HAMMER 61A/Many: Stabilization
[dragonfly.git] / sys / vfs / hammer / hammer_flusher.c
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.34 2008/07/10 21:23:58 dillon Exp $
35  */
36 /*
37  * HAMMER dependancy flusher thread
38  *
39  * Meta data updates create buffer dependancies which are arranged as a
40  * hierarchy of lists.
41  */
42
43 #include "hammer.h"
44
45 static void hammer_flusher_master_thread(void *arg);
46 static void hammer_flusher_slave_thread(void *arg);
47 static void hammer_flusher_flush(hammer_mount_t hmp);
48 static void hammer_flusher_flush_inode(hammer_inode_t ip,
49                                         hammer_transaction_t trans);
50
51 /*
52  * Support structures for the flusher threads.
53  */
54 struct hammer_flusher_info {
55         struct hammer_mount *hmp;
56         thread_t        td;
57         int             startit;
58         hammer_inode_t  work_array[HAMMER_FLUSH_GROUP_SIZE];
59 };
60
61 typedef struct hammer_flusher_info *hammer_flusher_info_t;
62
63 /*
64  * Sync all inodes pending on the flusher.  This routine may have to be
65  * called twice to get them all as some may be queued to a later flush group.
66  */
67 void
68 hammer_flusher_sync(hammer_mount_t hmp)
69 {
70         int seq;
71
72         if (hmp->flusher.td) {
73                 seq = hmp->flusher.next;
74                 if (hmp->flusher.signal++ == 0)
75                         wakeup(&hmp->flusher.signal);
76                 while ((int)(seq - hmp->flusher.done) > 0)
77                         tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
78         }
79 }
80
81 /*
82  * Sync all inodes pending on the flusher - return immediately.
83  */
84 void
85 hammer_flusher_async(hammer_mount_t hmp)
86 {
87         if (hmp->flusher.td) {
88                 if (hmp->flusher.signal++ == 0)
89                         wakeup(&hmp->flusher.signal);
90         }
91 }
92
93 void
94 hammer_flusher_create(hammer_mount_t hmp)
95 {
96         hammer_flusher_info_t info;
97         int i;
98
99         hmp->flusher.signal = 0;
100         hmp->flusher.act = 0;
101         hmp->flusher.done = 0;
102         hmp->flusher.next = 1;
103         hmp->flusher.count = 0;
104         hammer_ref(&hmp->flusher.finalize_lock);
105
106         lwkt_create(hammer_flusher_master_thread, hmp,
107                     &hmp->flusher.td, NULL, 0, -1, "hammer-M");
108         for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
109                 info = kmalloc(sizeof(*info), M_HAMMER, M_WAITOK|M_ZERO);
110                 info->hmp = hmp;
111                 ++hmp->flusher.count;
112                 hmp->flusher.info[i] = info;
113                 lwkt_create(hammer_flusher_slave_thread, info,
114                             &info->td, NULL, 0, -1, "hammer-S%d", i);
115         }
116 }
117
118 void
119 hammer_flusher_destroy(hammer_mount_t hmp)
120 {
121         hammer_flusher_info_t info;
122         int i;
123
124         /*
125          * Kill the master
126          */
127         hmp->flusher.exiting = 1;
128         while (hmp->flusher.td) {
129                 ++hmp->flusher.signal;
130                 wakeup(&hmp->flusher.signal);
131                 tsleep(&hmp->flusher.exiting, 0, "hmrwex", hz);
132         }
133
134         /*
135          * Kill the slaves
136          */
137         for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
138                 if ((info = hmp->flusher.info[i]) != NULL) {
139                         KKASSERT(info->startit == 0);
140                         info->startit = -1;
141                         wakeup(&info->startit);
142                         while (info->td) {
143                                 tsleep(&info->td, 0, "hmrwwc", 0);
144                         }
145                         hmp->flusher.info[i] = NULL;
146                         kfree(info, M_HAMMER);
147                         --hmp->flusher.count;
148                 }
149         }
150         KKASSERT(hmp->flusher.count == 0);
151 }
152
153 /*
154  * The master flusher thread manages the flusher sequence id and
155  * synchronization with the slave work threads.
156  */
157 static void
158 hammer_flusher_master_thread(void *arg)
159 {
160         hammer_mount_t hmp = arg;
161
162         for (;;) {
163                 while (hmp->flusher.group_lock)
164                         tsleep(&hmp->flusher.group_lock, 0, "hmrhld", 0);
165                 hmp->flusher.act = hmp->flusher.next;
166                 ++hmp->flusher.next;
167                 hammer_flusher_clean_loose_ios(hmp);
168                 hammer_flusher_flush(hmp);
169                 hmp->flusher.done = hmp->flusher.act;
170                 wakeup(&hmp->flusher.done);
171
172                 /*
173                  * Wait for activity.
174                  */
175                 if (hmp->flusher.exiting && TAILQ_EMPTY(&hmp->flush_list))
176                         break;
177
178                 /*
179                  * This is a hack until we can dispose of frontend buffer
180                  * cache buffers on the frontend.
181                  */
182                 while (hmp->flusher.signal == 0)
183                         tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0);
184                 hmp->flusher.signal = 0;
185         }
186
187         /*
188          * And we are done.
189          */
190         hmp->flusher.td = NULL;
191         wakeup(&hmp->flusher.exiting);
192         lwkt_exit();
193 }
194
195 /*
196  * The slave flusher thread pulls work off the master flush_list until no
197  * work is left.
198  */
199 static void
200 hammer_flusher_slave_thread(void *arg)
201 {
202         hammer_flusher_info_t info;
203         hammer_mount_t hmp;
204         hammer_inode_t ip;
205         int c;
206         int i;
207         int n;
208
209         info = arg;
210         hmp = info->hmp;
211
212         for (;;) {
213                 while (info->startit == 0)
214                         tsleep(&info->startit, 0, "hmrssw", 0);
215                 if (info->startit < 0)
216                         break;
217                 info->startit = 0;
218
219                 /*
220                  * Try to pull out around ~64 inodes at a time to flush.
221                  * The idea is to try to avoid deadlocks between the slaves.
222                  */
223                 n = c = 0;
224                 while ((ip = TAILQ_FIRST(&hmp->flush_list)) != NULL) {
225                         if (ip->flush_group != hmp->flusher.act)
226                                 break;
227                         TAILQ_REMOVE(&hmp->flush_list, ip, flush_entry);
228                         info->work_array[n++] = ip;
229                         c += ip->rsv_recs;
230                         if (n < HAMMER_FLUSH_GROUP_SIZE &&
231                             c < HAMMER_FLUSH_GROUP_SIZE * 8) {
232                                 continue;
233                         }
234                         for (i = 0; i < n; ++i){
235                                 hammer_flusher_flush_inode(info->work_array[i],
236                                                         &hmp->flusher.trans);
237                         }
238                         n = c = 0;
239                 }
240                 for (i = 0; i < n; ++i) {
241                         hammer_flusher_flush_inode(info->work_array[i],
242                                                    &hmp->flusher.trans);
243                 }
244                 if (--hmp->flusher.running == 0)
245                         wakeup(&hmp->flusher.running);
246         }
247         info->td = NULL;
248         wakeup(&info->td);
249         lwkt_exit();
250 }
251
252 void
253 hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
254 {
255         hammer_buffer_t buffer;
256         hammer_io_t io;
257
258         /*
259          * loose ends - buffers without bp's aren't tracked by the kernel
260          * and can build up, so clean them out.  This can occur when an
261          * IO completes on a buffer with no references left.
262          */
263         if ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
264                 crit_enter();   /* biodone() race */
265                 while ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
266                         KKASSERT(io->mod_list == &hmp->lose_list);
267                         TAILQ_REMOVE(&hmp->lose_list, io, mod_entry);
268                         io->mod_list = NULL;
269                         if (io->lock.refs == 0)
270                                 ++hammer_count_refedbufs;
271                         hammer_ref(&io->lock);
272                         buffer = (void *)io;
273                         hammer_rel_buffer(buffer, 0);
274                 }
275                 crit_exit();
276         }
277 }
278
279 /*
280  * Flush all inodes in the current flush group.
281  */
282 static void
283 hammer_flusher_flush(hammer_mount_t hmp)
284 {
285         hammer_flusher_info_t info;
286         hammer_reserve_t resv;
287         int i;
288         int n;
289
290         hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
291
292         /*
293          * If the previous flush cycle just about exhausted our UNDO space
294          * we may have to do a dummy cycle to move the first_offset up
295          * before actually digging into a new cycle, or the new cycle will
296          * not have sufficient undo space.
297          */
298         if (hammer_flusher_undo_exhausted(&hmp->flusher.trans, 3))
299                 hammer_flusher_finalize(&hmp->flusher.trans, 0);
300
301         /*
302          * Start work threads.
303          */
304         i = 0;
305         n = hmp->count_iqueued / HAMMER_FLUSH_GROUP_SIZE;
306         if (TAILQ_FIRST(&hmp->flush_list)) {
307                 for (i = 0; i <= n; ++i) {
308                         if (i == HAMMER_MAX_FLUSHERS ||
309                             hmp->flusher.info[i] == NULL) {
310                                 break;
311                         }
312                         info = hmp->flusher.info[i];
313                         if (info->startit == 0) {
314                                 ++hmp->flusher.running;
315                                 info->startit = 1;
316                                 wakeup(&info->startit);
317                         }
318                 }
319         }
320         while (hmp->flusher.running)
321                 tsleep(&hmp->flusher.running, 0, "hmrfcc", 0);
322
323         hammer_flusher_finalize(&hmp->flusher.trans, 1);
324         hmp->flusher.tid = hmp->flusher.trans.tid;
325
326         /*
327          * Clean up any freed big-blocks (typically zone-2). 
328          * resv->flush_group is typically set several flush groups ahead
329          * of the free to ensure that the freed block is not reused until
330          * it can no longer be reused.
331          */
332         while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
333                 if (resv->flush_group != hmp->flusher.act)
334                         break;
335                 hammer_reserve_clrdelay(hmp, resv);
336         }
337         hammer_done_transaction(&hmp->flusher.trans);
338 }
339
340 /*
341  * Flush a single inode that is part of a flush group.
342  *
343  * NOTE!  The sync code can return EWOULDBLOCK if the flush operation
344  * would otherwise blow out the buffer cache.  hammer_flush_inode_done()
345  * will re-queue the inode for the next flush sequence and force the
346  * flusher to run again if this occurs.
347  */
348 static
349 void
350 hammer_flusher_flush_inode(hammer_inode_t ip, hammer_transaction_t trans)
351 {
352         hammer_mount_t hmp = ip->hmp;
353         int error;
354
355         hammer_flusher_clean_loose_ios(hmp);
356         hammer_lock_sh(&hmp->flusher.finalize_lock);
357         error = hammer_sync_inode(ip);
358         if (error != EWOULDBLOCK)
359                 ip->error = error;
360         hammer_flush_inode_done(ip);
361         hammer_unlock(&hmp->flusher.finalize_lock);
362         while (hmp->flusher.finalize_want)
363                 tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0);
364         if (hammer_flusher_undo_exhausted(trans, 1)) {
365                 kprintf("HAMMER: Warning: UNDO area too small!\n");
366                 hammer_flusher_finalize(trans, 1);
367         } else if (hammer_flusher_meta_limit(trans->hmp)) {
368                 hammer_flusher_finalize(trans, 0);
369         }
370 }
371
372 /*
373  * Return non-zero if the UNDO area has less then (QUARTER / 4) of its
374  * space left.
375  *
376  * 1/4 - Emergency free undo space level.  Below this point the flusher
377  *       will finalize even if directory dependancies have not been resolved.
378  *
379  * 2/4 - Used by the pruning and reblocking code.  These functions may be
380  *       running in parallel with a flush and cannot be allowed to drop
381  *       available undo space to emergency levels.
382  *
383  * 3/4 - Used at the beginning of a flush to force-sync the volume header
384  *       to give the flush plenty of runway to work in.
385  */
386 int
387 hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter)
388 {
389         if (hammer_undo_space(trans) <
390             hammer_undo_max(trans->hmp) * quarter / 4) {
391                 kprintf("%c", '0' + quarter);
392                 return(1);
393         } else {
394                 return(0);
395         }
396 }
397
398 /*
399  * Flush all pending UNDOs, wait for write completion, update the volume
400  * header with the new UNDO end position, and flush it.  Then
401  * asynchronously flush the meta-data.
402  *
403  * If this is the last finalization in a flush group we also synchronize
404  * our cached blockmap and set hmp->flusher_undo_start and our cached undo
405  * fifo first_offset so the next flush resets the FIFO pointers.
406  *
407  * If this is not final it is being called because too many dirty meta-data
408  * buffers have built up and must be flushed with UNDO synchronization to
409  * avoid a buffer cache deadlock.
410  */
411 void
412 hammer_flusher_finalize(hammer_transaction_t trans, int final)
413 {
414         hammer_volume_t root_volume;
415         hammer_blockmap_t cundomap, dundomap;
416         hammer_mount_t hmp;
417         hammer_io_t io;
418         int count;
419         int i;
420
421         hmp = trans->hmp;
422         root_volume = trans->rootvol;
423
424         /*
425          * Exclusively lock the flusher.  This guarantees that all dirty
426          * buffers will be idled (have a mod-count of 0).
427          */
428         ++hmp->flusher.finalize_want;
429         hammer_lock_ex(&hmp->flusher.finalize_lock);
430
431         /*
432          * If this isn't the final sync several threads may have hit the
433          * meta-limit at the same time and raced.  Only sync if we really
434          * have to, after acquiring the lock.
435          */
436         if (final == 0 && !hammer_flusher_meta_limit(hmp))
437                 goto done;
438
439         /*
440          * Flush data buffers.  This can occur asynchronously and at any
441          * time.  We must interlock against the frontend direct-data write
442          * but do not have to acquire the sync-lock yet.
443          */
444         count = 0;
445         while ((io = TAILQ_FIRST(&hmp->data_list)) != NULL) {
446                 if (io->lock.refs == 0)
447                         ++hammer_count_refedbufs;
448                 hammer_ref(&io->lock);
449                 hammer_io_write_interlock(io);
450                 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
451                 hammer_io_flush(io);
452                 hammer_io_done_interlock(io);
453                 hammer_rel_buffer((hammer_buffer_t)io, 0);
454                 ++count;
455         }
456
457         /*
458          * The sync-lock is required for the remaining sequence.  This lock
459          * prevents meta-data from being modified.
460          */
461         hammer_sync_lock_ex(trans);
462
463         /*
464          * If we have been asked to finalize the volume header sync the
465          * cached blockmap to the on-disk blockmap.  Generate an UNDO
466          * record for the update.
467          */
468         if (final) {
469                 cundomap = &hmp->blockmap[0];
470                 dundomap = &root_volume->ondisk->vol0_blockmap[0];
471                 if (root_volume->io.modified) {
472                         hammer_modify_volume(trans, root_volume,
473                                              dundomap, sizeof(hmp->blockmap));
474                         for (i = 0; i < HAMMER_MAX_ZONES; ++i)
475                                 hammer_crc_set_blockmap(&cundomap[i]);
476                         bcopy(cundomap, dundomap, sizeof(hmp->blockmap));
477                         hammer_modify_volume_done(root_volume);
478                 }
479         }
480
481         /*
482          * Flush UNDOs
483          */
484         count = 0;
485         while ((io = TAILQ_FIRST(&hmp->undo_list)) != NULL) {
486                 KKASSERT(io->modify_refs == 0);
487                 if (io->lock.refs == 0)
488                         ++hammer_count_refedbufs;
489                 hammer_ref(&io->lock);
490                 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
491                 hammer_io_flush(io);
492                 hammer_rel_buffer((hammer_buffer_t)io, 0);
493                 ++count;
494         }
495
496         /*
497          * Wait for I/Os to complete
498          */
499         hammer_flusher_clean_loose_ios(hmp);
500         hammer_io_wait_all(hmp, "hmrfl1");
501
502         /*
503          * Update the on-disk volume header with new UNDO FIFO end position
504          * (do not generate new UNDO records for this change).  We have to
505          * do this for the UNDO FIFO whether (final) is set or not.
506          *
507          * Also update the on-disk next_tid field.  This does not require
508          * an UNDO.  However, because our TID is generated before we get
509          * the sync lock another sync may have beat us to the punch.
510          *
511          * This also has the side effect of updating first_offset based on
512          * a prior finalization when the first finalization of the next flush
513          * cycle occurs, removing any undo info from the prior finalization
514          * from consideration.
515          *
516          * The volume header will be flushed out synchronously.
517          */
518         dundomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
519         cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
520
521         if (dundomap->first_offset != cundomap->first_offset ||
522             dundomap->next_offset != cundomap->next_offset) {
523                 hammer_modify_volume(NULL, root_volume, NULL, 0);
524                 dundomap->first_offset = cundomap->first_offset;
525                 dundomap->next_offset = cundomap->next_offset;
526                 hammer_crc_set_blockmap(dundomap);
527                 hammer_modify_volume_done(root_volume);
528         }
529
530         if (root_volume->io.modified) {
531                 hammer_modify_volume(NULL, root_volume, NULL, 0);
532                 if (root_volume->ondisk->vol0_next_tid < trans->tid)
533                         root_volume->ondisk->vol0_next_tid = trans->tid;
534                 hammer_crc_set_volume(root_volume->ondisk);
535                 hammer_modify_volume_done(root_volume);
536                 hammer_io_flush(&root_volume->io);
537         }
538
539         /*
540          * Wait for I/Os to complete
541          */
542         hammer_flusher_clean_loose_ios(hmp);
543         hammer_io_wait_all(hmp, "hmrfl2");
544
545         /*
546          * Flush meta-data.  The meta-data will be undone if we crash
547          * so we can safely flush it asynchronously.
548          *
549          * Repeated catchups will wind up flushing this update's meta-data
550          * and the UNDO buffers for the next update simultaniously.  This
551          * is ok.
552          */
553         count = 0;
554         while ((io = TAILQ_FIRST(&hmp->meta_list)) != NULL) {
555                 KKASSERT(io->modify_refs == 0);
556                 if (io->lock.refs == 0)
557                         ++hammer_count_refedbufs;
558                 hammer_ref(&io->lock);
559                 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
560                 hammer_io_flush(io);
561                 hammer_rel_buffer((hammer_buffer_t)io, 0);
562                 ++count;
563         }
564
565         /*
566          * If this is the final finalization for the flush group set
567          * up for the next sequence by setting a new first_offset in
568          * our cached blockmap and clearing the undo history.
569          *
570          * Even though we have updated our cached first_offset, the on-disk
571          * first_offset still governs available-undo-space calculations.
572          */
573         if (final) {
574                 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
575                 cundomap->first_offset = cundomap->next_offset;
576                 hammer_clear_undo_history(hmp);
577         }
578
579         hammer_sync_unlock(trans);
580
581 done:
582         hammer_unlock(&hmp->flusher.finalize_lock);
583         if (--hmp->flusher.finalize_want == 0)
584                 wakeup(&hmp->flusher.finalize_want);
585 }
586
587 /*
588  * Return non-zero if too many dirty meta-data buffers have built up.
589  *
590  * Since we cannot allow such buffers to flush until we have dealt with
591  * the UNDOs, we risk deadlocking the kernel's buffer cache.
592  */
593 int
594 hammer_flusher_meta_limit(hammer_mount_t hmp)
595 {
596         if (hmp->locked_dirty_space + hmp->io_running_space >
597             hammer_limit_dirtybufspace) {
598                 return(1);
599         }
600         return(0);
601 }
602