HAMMER 59B/Many: Stabilization pass - fixes for large file issues
[dragonfly.git] / sys / vfs / hammer / hammer_flusher.c
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.30 2008/06/27 20:56:59 dillon Exp $
35  */
36 /*
37  * HAMMER dependancy flusher thread
38  *
39  * Meta data updates create buffer dependancies which are arranged as a
40  * hierarchy of lists.
41  */
42
43 #include "hammer.h"
44
45 static void hammer_flusher_master_thread(void *arg);
46 static void hammer_flusher_slave_thread(void *arg);
47 static void hammer_flusher_clean_loose_ios(hammer_mount_t hmp);
48 static void hammer_flusher_flush(hammer_mount_t hmp);
49 static void hammer_flusher_flush_inode(hammer_inode_t ip,
50                                         hammer_transaction_t trans);
51 static void hammer_flusher_finalize(hammer_transaction_t trans, int final);
52
53 /*
54  * Support structures for the flusher threads.
55  */
56 struct hammer_flusher_info {
57         struct hammer_mount *hmp;
58         thread_t        td;
59         int             startit;
60         hammer_inode_t  work_array[HAMMER_FLUSH_GROUP_SIZE];
61 };
62
63 typedef struct hammer_flusher_info *hammer_flusher_info_t;
64
65 /*
66  * Sync all inodes pending on the flusher.  This routine may have to be
67  * called twice to get them all as some may be queued to a later flush group.
68  */
69 void
70 hammer_flusher_sync(hammer_mount_t hmp)
71 {
72         int seq;
73
74         if (hmp->flusher.td) {
75                 seq = hmp->flusher.next;
76                 if (hmp->flusher.signal++ == 0)
77                         wakeup(&hmp->flusher.signal);
78                 while ((int)(seq - hmp->flusher.done) > 0)
79                         tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
80         }
81 }
82
83 /*
84  * Sync all inodes pending on the flusher - return immediately.
85  */
86 void
87 hammer_flusher_async(hammer_mount_t hmp)
88 {
89         if (hmp->flusher.td) {
90                 if (hmp->flusher.signal++ == 0)
91                         wakeup(&hmp->flusher.signal);
92         }
93 }
94
95 void
96 hammer_flusher_create(hammer_mount_t hmp)
97 {
98         hammer_flusher_info_t info;
99         int i;
100
101         hmp->flusher.signal = 0;
102         hmp->flusher.act = 0;
103         hmp->flusher.done = 0;
104         hmp->flusher.next = 1;
105         hmp->flusher.count = 0;
106         hammer_ref(&hmp->flusher.finalize_lock);
107
108         lwkt_create(hammer_flusher_master_thread, hmp,
109                     &hmp->flusher.td, NULL, 0, -1, "hammer-M");
110         for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
111                 info = kmalloc(sizeof(*info), M_HAMMER, M_WAITOK|M_ZERO);
112                 info->hmp = hmp;
113                 ++hmp->flusher.count;
114                 hmp->flusher.info[i] = info;
115                 lwkt_create(hammer_flusher_slave_thread, info,
116                             &info->td, NULL, 0, -1, "hammer-S%d", i);
117         }
118 }
119
120 void
121 hammer_flusher_destroy(hammer_mount_t hmp)
122 {
123         hammer_flusher_info_t info;
124         int i;
125
126         /*
127          * Kill the master
128          */
129         hmp->flusher.exiting = 1;
130         while (hmp->flusher.td) {
131                 ++hmp->flusher.signal;
132                 wakeup(&hmp->flusher.signal);
133                 tsleep(&hmp->flusher.exiting, 0, "hmrwex", hz);
134         }
135
136         /*
137          * Kill the slaves
138          */
139         for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
140                 if ((info = hmp->flusher.info[i]) != NULL) {
141                         KKASSERT(info->startit == 0);
142                         info->startit = -1;
143                         wakeup(&info->startit);
144                         while (info->td) {
145                                 tsleep(&info->td, 0, "hmrwwc", 0);
146                         }
147                         hmp->flusher.info[i] = NULL;
148                         kfree(info, M_HAMMER);
149                         --hmp->flusher.count;
150                 }
151         }
152         KKASSERT(hmp->flusher.count == 0);
153 }
154
155 /*
156  * The master flusher thread manages the flusher sequence id and
157  * synchronization with the slave work threads.
158  */
159 static void
160 hammer_flusher_master_thread(void *arg)
161 {
162         hammer_mount_t hmp = arg;
163
164         for (;;) {
165                 while (hmp->flusher.group_lock)
166                         tsleep(&hmp->flusher.group_lock, 0, "hmrhld", 0);
167                 hmp->flusher.act = hmp->flusher.next;
168                 ++hmp->flusher.next;
169                 hammer_flusher_clean_loose_ios(hmp);
170                 hammer_flusher_flush(hmp);
171                 hmp->flusher.done = hmp->flusher.act;
172                 wakeup(&hmp->flusher.done);
173
174                 /*
175                  * Wait for activity.
176                  */
177                 if (hmp->flusher.exiting && TAILQ_EMPTY(&hmp->flush_list))
178                         break;
179
180                 /*
181                  * This is a hack until we can dispose of frontend buffer
182                  * cache buffers on the frontend.
183                  */
184                 while (hmp->flusher.signal == 0)
185                         tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0);
186                 hmp->flusher.signal = 0;
187         }
188
189         /*
190          * And we are done.
191          */
192         hmp->flusher.td = NULL;
193         wakeup(&hmp->flusher.exiting);
194         lwkt_exit();
195 }
196
197 /*
198  * The slave flusher thread pulls work off the master flush_list until no
199  * work is left.
200  */
201 static void
202 hammer_flusher_slave_thread(void *arg)
203 {
204         hammer_flusher_info_t info;
205         hammer_mount_t hmp;
206         hammer_inode_t ip;
207         int c;
208         int i;
209         int n;
210
211         info = arg;
212         hmp = info->hmp;
213
214         for (;;) {
215                 while (info->startit == 0)
216                         tsleep(&info->startit, 0, "hmrssw", 0);
217                 if (info->startit < 0)
218                         break;
219                 info->startit = 0;
220
221                 /*
222                  * Try to pull out around ~64 inodes at a time to flush.
223                  * The idea is to try to avoid deadlocks between the slaves.
224                  */
225                 n = c = 0;
226                 while ((ip = TAILQ_FIRST(&hmp->flush_list)) != NULL) {
227                         if (ip->flush_group != hmp->flusher.act)
228                                 break;
229                         TAILQ_REMOVE(&hmp->flush_list, ip, flush_entry);
230                         info->work_array[n++] = ip;
231                         c += ip->rsv_recs;
232                         if (n < HAMMER_FLUSH_GROUP_SIZE &&
233                             c < HAMMER_FLUSH_GROUP_SIZE * 8) {
234                                 continue;
235                         }
236                         for (i = 0; i < n; ++i){
237                                 hammer_flusher_flush_inode(info->work_array[i],
238                                                         &hmp->flusher.trans);
239                         }
240                         n = c = 0;
241                 }
242                 for (i = 0; i < n; ++i) {
243                         hammer_flusher_flush_inode(info->work_array[i],
244                                                    &hmp->flusher.trans);
245                 }
246                 if (--hmp->flusher.running == 0)
247                         wakeup(&hmp->flusher.running);
248         }
249         info->td = NULL;
250         wakeup(&info->td);
251         lwkt_exit();
252 }
253
254 static void
255 hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
256 {
257         hammer_buffer_t buffer;
258         hammer_io_t io;
259         int panic_count = 1000000;
260
261         /*
262          * loose ends - buffers without bp's aren't tracked by the kernel
263          * and can build up, so clean them out.  This can occur when an
264          * IO completes on a buffer with no references left.
265          */
266         crit_enter();   /* biodone() race */
267         while ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
268                 KKASSERT(--panic_count > 0);
269                 KKASSERT(io->mod_list == &hmp->lose_list);
270                 TAILQ_REMOVE(&hmp->lose_list, io, mod_entry);
271                 io->mod_list = NULL;
272                 if (io->lock.refs == 0)
273                         ++hammer_count_refedbufs;
274                 hammer_ref(&io->lock);
275                 buffer = (void *)io;
276                 hammer_rel_buffer(buffer, 0);
277         }
278         crit_exit();
279 }
280
281 /*
282  * Flush all inodes in the current flush group.
283  */
284 static void
285 hammer_flusher_flush(hammer_mount_t hmp)
286 {
287         hammer_flusher_info_t info;
288         hammer_reserve_t resv;
289         int i;
290         int n;
291
292         hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
293
294         /*
295          * If the previous flush cycle just about exhausted our UNDO space
296          * we may have to do a dummy cycle to move the first_offset up
297          * before actually digging into a new cycle, or the new cycle will
298          * not have sufficient undo space.
299          */
300         if (hammer_flusher_undo_exhausted(&hmp->flusher.trans, 3)) {
301                 hammer_lock_ex(&hmp->flusher.finalize_lock);
302                 hammer_flusher_finalize(&hmp->flusher.trans, 0);
303                 hammer_unlock(&hmp->flusher.finalize_lock);
304         }
305
306         /*
307          * Start work threads.
308          */
309         i = 0;
310         n = hmp->count_iqueued / HAMMER_FLUSH_GROUP_SIZE;
311         if (TAILQ_FIRST(&hmp->flush_list)) {
312                 for (i = 0; i <= n; ++i) {
313                         if (i == HAMMER_MAX_FLUSHERS ||
314                             hmp->flusher.info[i] == NULL) {
315                                 break;
316                         }
317                         info = hmp->flusher.info[i];
318                         if (info->startit == 0) {
319                                 ++hmp->flusher.running;
320                                 info->startit = 1;
321                                 wakeup(&info->startit);
322                         }
323                 }
324         }
325         while (hmp->flusher.running)
326                 tsleep(&hmp->flusher.running, 0, "hmrfcc", 0);
327
328         hammer_flusher_finalize(&hmp->flusher.trans, 1);
329         hmp->flusher.tid = hmp->flusher.trans.tid;
330
331         /*
332          * Clean up any freed big-blocks (typically zone-2). 
333          * resv->flush_group is typically set several flush groups ahead
334          * of the free to ensure that the freed block is not reused until
335          * it can no longer be reused.
336          */
337         while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
338                 if (resv->flush_group != hmp->flusher.act)
339                         break;
340                 hammer_reserve_clrdelay(hmp, resv);
341         }
342         hammer_done_transaction(&hmp->flusher.trans);
343 }
344
345 /*
346  * Flush a single inode that is part of a flush group.
347  *
348  * NOTE!  The sync code can return EWOULDBLOCK if the flush operation
349  * would otherwise blow out the buffer cache.  hammer_flush_inode_done()
350  * will re-queue the inode for the next flush sequence and force the
351  * flusher to run again if this occurs.
352  */
353 static
354 void
355 hammer_flusher_flush_inode(hammer_inode_t ip, hammer_transaction_t trans)
356 {
357         hammer_mount_t hmp = ip->hmp;
358         int error;
359
360         hammer_lock_sh(&hmp->flusher.finalize_lock);
361         error = hammer_sync_inode(ip);
362         if (error != EWOULDBLOCK)
363                 ip->error = error;
364         hammer_flush_inode_done(ip);
365         hammer_unlock(&hmp->flusher.finalize_lock);
366         while (hmp->flusher.finalize_want)
367                 tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0);
368         if (hammer_flusher_undo_exhausted(trans, 1)) {
369                 hmp->flusher.finalize_want = 1;
370                 hammer_lock_ex(&hmp->flusher.finalize_lock);
371                 kprintf("HAMMER: Warning: UNDO area too small!\n");
372                 hammer_flusher_finalize(trans, 1);
373                 hammer_unlock(&hmp->flusher.finalize_lock);
374                 hmp->flusher.finalize_want = 0;
375                 wakeup(&hmp->flusher.finalize_want);
376         } else if (hammer_flusher_meta_limit(trans->hmp)) {
377                 hmp->flusher.finalize_want = 1;
378                 hammer_lock_ex(&hmp->flusher.finalize_lock);
379                 hammer_flusher_finalize(trans, 0);
380                 hammer_unlock(&hmp->flusher.finalize_lock);
381                 hmp->flusher.finalize_want = 0;
382                 wakeup(&hmp->flusher.finalize_want);
383         }
384 }
385
386 /*
387  * Return non-zero if the UNDO area has less then (QUARTER / 4) of its
388  * space left.
389  *
390  * 1/4 - Emergency free undo space level.  Below this point the flusher
391  *       will finalize even if directory dependancies have not been resolved.
392  *
393  * 2/4 - Used by the pruning and reblocking code.  These functions may be
394  *       running in parallel with a flush and cannot be allowed to drop
395  *       available undo space to emergency levels.
396  *
397  * 3/4 - Used at the beginning of a flush to force-sync the volume header
398  *       to give the flush plenty of runway to work in.
399  */
400 int
401 hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter)
402 {
403         if (hammer_undo_space(trans) <
404             hammer_undo_max(trans->hmp) * quarter / 4) {
405                 kprintf("%c", '0' + quarter);
406                 return(1);
407         } else {
408                 return(0);
409         }
410 }
411
412 /*
413  * Flush all pending UNDOs, wait for write completion, update the volume
414  * header with the new UNDO end position, and flush it.  Then
415  * asynchronously flush the meta-data.
416  *
417  * If this is the last finalization in a flush group we also synchronize
418  * our cached blockmap and set hmp->flusher_undo_start and our cached undo
419  * fifo first_offset so the next flush resets the FIFO pointers.
420  */
421 static
422 void
423 hammer_flusher_finalize(hammer_transaction_t trans, int final)
424 {
425         hammer_volume_t root_volume;
426         hammer_blockmap_t cundomap, dundomap;
427         hammer_mount_t hmp;
428         hammer_io_t io;
429         int count;
430         int i;
431
432         hmp = trans->hmp;
433         root_volume = trans->rootvol;
434
435         /*
436          * Flush data buffers.  This can occur asynchronously and at any
437          * time.  We must interlock against the frontend direct-data write
438          * but do not have to acquire the sync-lock yet.
439          */
440         count = 0;
441         while ((io = TAILQ_FIRST(&hmp->data_list)) != NULL) {
442                 if (io->lock.refs == 0)
443                         ++hammer_count_refedbufs;
444                 hammer_ref(&io->lock);
445                 hammer_io_write_interlock(io);
446                 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
447                 hammer_io_flush(io);
448                 hammer_io_done_interlock(io);
449                 hammer_rel_buffer((hammer_buffer_t)io, 0);
450                 ++count;
451         }
452
453         /*
454          * The sync-lock is required for the remaining sequence.  This lock
455          * prevents meta-data from being modified.
456          */
457         hammer_sync_lock_ex(trans);
458
459         /*
460          * If we have been asked to finalize the volume header sync the
461          * cached blockmap to the on-disk blockmap.  Generate an UNDO
462          * record for the update.
463          */
464         if (final) {
465                 cundomap = &hmp->blockmap[0];
466                 dundomap = &root_volume->ondisk->vol0_blockmap[0];
467                 if (root_volume->io.modified) {
468                         hammer_modify_volume(trans, root_volume,
469                                              dundomap, sizeof(hmp->blockmap));
470                         for (i = 0; i < HAMMER_MAX_ZONES; ++i)
471                                 hammer_crc_set_blockmap(&cundomap[i]);
472                         bcopy(cundomap, dundomap, sizeof(hmp->blockmap));
473                         hammer_modify_volume_done(root_volume);
474                 }
475         }
476
477         /*
478          * Flush UNDOs
479          */
480         count = 0;
481         while ((io = TAILQ_FIRST(&hmp->undo_list)) != NULL) {
482                 KKASSERT(io->modify_refs == 0);
483                 if (io->lock.refs == 0)
484                         ++hammer_count_refedbufs;
485                 hammer_ref(&io->lock);
486                 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
487                 hammer_io_flush(io);
488                 hammer_rel_buffer((hammer_buffer_t)io, 0);
489                 ++count;
490         }
491
492         /*
493          * Wait for I/Os to complete
494          */
495         hammer_flusher_clean_loose_ios(hmp);
496         hammer_io_wait_all(hmp, "hmrfl1");
497
498         /*
499          * Update the on-disk volume header with new UNDO FIFO end position
500          * (do not generate new UNDO records for this change).  We have to
501          * do this for the UNDO FIFO whether (final) is set or not.
502          *
503          * Also update the on-disk next_tid field.  This does not require
504          * an UNDO.  However, because our TID is generated before we get
505          * the sync lock another sync may have beat us to the punch.
506          *
507          * This also has the side effect of updating first_offset based on
508          * a prior finalization when the first finalization of the next flush
509          * cycle occurs, removing any undo info from the prior finalization
510          * from consideration.
511          *
512          * The volume header will be flushed out synchronously.
513          */
514         dundomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
515         cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
516
517         if (dundomap->first_offset != cundomap->first_offset ||
518             dundomap->next_offset != cundomap->next_offset) {
519                 hammer_modify_volume(NULL, root_volume, NULL, 0);
520                 dundomap->first_offset = cundomap->first_offset;
521                 dundomap->next_offset = cundomap->next_offset;
522                 hammer_crc_set_blockmap(dundomap);
523                 hammer_crc_set_volume(root_volume->ondisk);
524                 if (root_volume->ondisk->vol0_next_tid < trans->tid)
525                         root_volume->ondisk->vol0_next_tid = trans->tid;
526                 hammer_modify_volume_done(root_volume);
527         }
528
529         if (root_volume->io.modified) {
530                 hammer_io_flush(&root_volume->io);
531         }
532
533         /*
534          * Wait for I/Os to complete
535          */
536         hammer_flusher_clean_loose_ios(hmp);
537         hammer_io_wait_all(hmp, "hmrfl2");
538
539         /*
540          * Flush meta-data.  The meta-data will be undone if we crash
541          * so we can safely flush it asynchronously.
542          *
543          * Repeated catchups will wind up flushing this update's meta-data
544          * and the UNDO buffers for the next update simultaniously.  This
545          * is ok.
546          */
547         count = 0;
548         while ((io = TAILQ_FIRST(&hmp->meta_list)) != NULL) {
549                 KKASSERT(io->modify_refs == 0);
550                 if (io->lock.refs == 0)
551                         ++hammer_count_refedbufs;
552                 hammer_ref(&io->lock);
553                 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
554                 hammer_io_flush(io);
555                 hammer_rel_buffer((hammer_buffer_t)io, 0);
556                 ++count;
557         }
558
559         /*
560          * If this is the final finalization for the flush group set
561          * up for the next sequence by setting a new first_offset in
562          * our cached blockmap and clearing the undo history.
563          *
564          * Even though we have updated our cached first_offset, the on-disk
565          * first_offset still governs available-undo-space calculations.
566          */
567         if (final) {
568                 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
569                 cundomap->first_offset = cundomap->next_offset;
570                 hammer_clear_undo_history(hmp);
571         }
572
573         hammer_sync_unlock(trans);
574 }
575
576 /*
577  * Return non-zero if too many dirty meta-data buffers have built up.
578  *
579  * Since we cannot allow such buffers to flush until we have dealt with
580  * the UNDOs, we risk deadlocking the kernel's buffer cache.
581  */
582 int
583 hammer_flusher_meta_limit(hammer_mount_t hmp)
584 {
585         if (hmp->locked_dirty_count + hmp->io_running_count >
586             hammer_limit_dirtybufs) {
587                 return(1);
588         }
589         return(0);
590 }
591