268bc8d44dcc2ae80cdbc8a67fc5be01ee8e693a
[dragonfly.git] / sys / vfs / hammer / hammer_flusher.c
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.27 2008/06/14 01:42:13 dillon Exp $
35  */
36 /*
37  * HAMMER dependancy flusher thread
38  *
39  * Meta data updates create buffer dependancies which are arranged as a
40  * hierarchy of lists.
41  */
42
43 #include "hammer.h"
44
45 static void hammer_flusher_master_thread(void *arg);
46 static void hammer_flusher_slave_thread(void *arg);
47 static void hammer_flusher_clean_loose_ios(hammer_mount_t hmp);
48 static void hammer_flusher_flush(hammer_mount_t hmp);
49 static void hammer_flusher_flush_inode(hammer_inode_t ip,
50                                         hammer_transaction_t trans);
51 static int hammer_must_finalize_undo(hammer_mount_t hmp);
52 static void hammer_flusher_finalize(hammer_transaction_t trans, int final);
53
54 /*
55  * Support structures for the flusher threads.
56  */
57 struct hammer_flusher_info {
58         struct hammer_mount *hmp;
59         thread_t        td;
60         int             startit;
61         TAILQ_HEAD(,hammer_inode) work_list;
62 };
63
64 typedef struct hammer_flusher_info *hammer_flusher_info_t;
65
66 /*
67  * Sync all inodes pending on the flusher.  This routine may have to be
68  * called twice to get them all as some may be queued to a later flush group.
69  */
70 void
71 hammer_flusher_sync(hammer_mount_t hmp)
72 {
73         int seq;
74
75         if (hmp->flusher.td) {
76                 seq = hmp->flusher.next;
77                 if (hmp->flusher.signal++ == 0)
78                         wakeup(&hmp->flusher.signal);
79                 while ((int)(seq - hmp->flusher.done) > 0)
80                         tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
81         }
82 }
83
84 /*
85  * Sync all inodes pending on the flusher - return immediately.
86  */
87 void
88 hammer_flusher_async(hammer_mount_t hmp)
89 {
90         if (hmp->flusher.td) {
91                 if (hmp->flusher.signal++ == 0)
92                         wakeup(&hmp->flusher.signal);
93         }
94 }
95
96 void
97 hammer_flusher_create(hammer_mount_t hmp)
98 {
99         hammer_flusher_info_t info;
100         int i;
101
102         hmp->flusher.signal = 0;
103         hmp->flusher.act = 0;
104         hmp->flusher.done = 0;
105         hmp->flusher.next = 1;
106         hmp->flusher.count = 0;
107         hammer_ref(&hmp->flusher.finalize_lock);
108
109         lwkt_create(hammer_flusher_master_thread, hmp,
110                     &hmp->flusher.td, NULL, 0, -1, "hammer-M");
111         for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
112                 info = kmalloc(sizeof(*info), M_HAMMER, M_WAITOK|M_ZERO);
113                 info->hmp = hmp;
114                 TAILQ_INIT(&info->work_list);
115                 ++hmp->flusher.count;
116                 hmp->flusher.info[i] = info;
117                 lwkt_create(hammer_flusher_slave_thread, info,
118                             &info->td, NULL, 0, -1, "hammer-S%d", i);
119         }
120 }
121
122 void
123 hammer_flusher_destroy(hammer_mount_t hmp)
124 {
125         hammer_flusher_info_t info;
126         int i;
127
128         /*
129          * Kill the master
130          */
131         hmp->flusher.exiting = 1;
132         while (hmp->flusher.td) {
133                 ++hmp->flusher.signal;
134                 wakeup(&hmp->flusher.signal);
135                 tsleep(&hmp->flusher.exiting, 0, "hmrwex", hz);
136         }
137
138         /*
139          * Kill the slaves
140          */
141         for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
142                 if ((info = hmp->flusher.info[i]) != NULL) {
143                         KKASSERT(info->startit == 0);
144                         info->startit = -1;
145                         wakeup(&info->startit);
146                         while (info->td) {
147                                 tsleep(&info->td, 0, "hmrwwc", 0);
148                         }
149                         hmp->flusher.info[i] = NULL;
150                         kfree(info, M_HAMMER);
151                         --hmp->flusher.count;
152                 }
153         }
154         KKASSERT(hmp->flusher.count == 0);
155 }
156
157 /*
158  * The master flusher thread manages the flusher sequence id and
159  * synchronization with the slave work threads.
160  */
161 static void
162 hammer_flusher_master_thread(void *arg)
163 {
164         hammer_mount_t hmp = arg;
165
166         for (;;) {
167                 while (hmp->flusher.group_lock)
168                         tsleep(&hmp->flusher.group_lock, 0, "hmrhld", 0);
169                 kprintf("S");
170                 hmp->flusher.act = hmp->flusher.next;
171                 ++hmp->flusher.next;
172                 hammer_flusher_clean_loose_ios(hmp);
173                 hammer_flusher_flush(hmp);
174                 hmp->flusher.done = hmp->flusher.act;
175                 wakeup(&hmp->flusher.done);
176
177                 /*
178                  * Wait for activity.
179                  */
180                 if (hmp->flusher.exiting && TAILQ_EMPTY(&hmp->flush_list))
181                         break;
182
183                 /*
184                  * This is a hack until we can dispose of frontend buffer
185                  * cache buffers on the frontend.
186                  */
187                 while (hmp->flusher.signal == 0)
188                         tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0);
189                 hmp->flusher.signal = 0;
190         }
191
192         /*
193          * And we are done.
194          */
195         hmp->flusher.td = NULL;
196         wakeup(&hmp->flusher.exiting);
197         lwkt_exit();
198 }
199
200 /*
201  * The slave flusher thread pulls work off the master flush_list until no
202  * work is left.
203  */
204 static void
205 hammer_flusher_slave_thread(void *arg)
206 {
207         hammer_flusher_info_t info;
208         hammer_mount_t hmp;
209         hammer_inode_t ip;
210
211         info = arg;
212         hmp = info->hmp;
213
214         for (;;) {
215                 while (info->startit == 0)
216                         tsleep(&info->startit, 0, "hmrssw", 0);
217                 if (info->startit < 0)
218                         break;
219                 info->startit = 0;
220                 while ((ip = TAILQ_FIRST(&hmp->flush_list)) != NULL) {
221                         if (ip->flush_group != hmp->flusher.act)
222                                 break;
223                         TAILQ_REMOVE(&hmp->flush_list, ip, flush_entry);
224                         hammer_flusher_flush_inode(ip, &hmp->flusher.trans);
225                 }
226                 if (--hmp->flusher.running == 0)
227                         wakeup(&hmp->flusher.running);
228         }
229         info->td = NULL;
230         wakeup(&info->td);
231         lwkt_exit();
232 }
233
234 static void
235 hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
236 {
237         hammer_buffer_t buffer;
238         hammer_io_t io;
239         int panic_count = 1000000;
240
241         /*
242          * loose ends - buffers without bp's aren't tracked by the kernel
243          * and can build up, so clean them out.  This can occur when an
244          * IO completes on a buffer with no references left.
245          */
246         crit_enter();   /* biodone() race */
247         while ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
248                 KKASSERT(--panic_count > 0);
249                 KKASSERT(io->mod_list == &hmp->lose_list);
250                 TAILQ_REMOVE(&hmp->lose_list, io, mod_entry);
251                 io->mod_list = NULL;
252                 if (io->lock.refs == 0)
253                         ++hammer_count_refedbufs;
254                 hammer_ref(&io->lock);
255                 buffer = (void *)io;
256                 hammer_rel_buffer(buffer, 0);
257         }
258         crit_exit();
259 }
260
261 /*
262  * Flush all inodes in the current flush group.
263  */
264 static void
265 hammer_flusher_flush(hammer_mount_t hmp)
266 {
267         hammer_flusher_info_t info;
268         hammer_reserve_t resv;
269         int i;
270         int n;
271
272         hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
273
274         /*
275          * Start work threads.
276          */
277         i = 0;
278         n = hmp->count_iqueued / 64;
279         if (TAILQ_FIRST(&hmp->flush_list)) {
280                 for (i = 0; i <= hmp->count_iqueued / 64; ++i) {
281                         if (i == HAMMER_MAX_FLUSHERS ||
282                             hmp->flusher.info[i] == NULL) {
283                                 break;
284                         }
285                         info = hmp->flusher.info[i];
286                         if (info->startit == 0) {
287                                 ++hmp->flusher.running;
288                                 info->startit = 1;
289                                 wakeup(&info->startit);
290                         }
291                 }
292         }
293         while (hmp->flusher.running)
294                 tsleep(&hmp->flusher.running, 0, "hmrfcc", 0);
295
296         hammer_flusher_finalize(&hmp->flusher.trans, 1);
297         hmp->flusher.tid = hmp->flusher.trans.tid;
298
299         /*
300          * Clean up any freed big-blocks (typically zone-2). 
301          * resv->flush_group is typically set several flush groups ahead
302          * of the free to ensure that the freed block is not reused until
303          * it can no longer be reused.
304          */
305         while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
306                 if (resv->flush_group != hmp->flusher.act)
307                         break;
308                 TAILQ_REMOVE(&hmp->delay_list, resv, delay_entry);
309                 hammer_blockmap_reserve_complete(hmp, resv);
310         }
311         hammer_done_transaction(&hmp->flusher.trans);
312 }
313
314 /*
315  * Flush a single inode that is part of a flush group.
316  */
317 static
318 void
319 hammer_flusher_flush_inode(hammer_inode_t ip, hammer_transaction_t trans)
320 {
321         hammer_mount_t hmp = ip->hmp;
322
323         hammer_lock_sh(&hmp->flusher.finalize_lock);
324         ip->error = hammer_sync_inode(ip);
325         hammer_flush_inode_done(ip);
326         hammer_unlock(&hmp->flusher.finalize_lock);
327         while (hmp->flusher.finalize_want)
328                 tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0);
329         if (hammer_must_finalize_undo(hmp)) {
330                 hmp->flusher.finalize_want = 1;
331                 hammer_lock_ex(&hmp->flusher.finalize_lock);
332                 kprintf("HAMMER: Warning: UNDO area too small!");
333                 hammer_flusher_finalize(trans, 1);
334                 hammer_unlock(&hmp->flusher.finalize_lock);
335                 hmp->flusher.finalize_want = 0;
336                 wakeup(&hmp->flusher.finalize_want);
337         } else if (trans->hmp->locked_dirty_count +
338                    trans->hmp->io_running_count > hammer_limit_dirtybufs) {
339                 hmp->flusher.finalize_want = 1;
340                 hammer_lock_ex(&hmp->flusher.finalize_lock);
341                 kprintf("t");
342                 hammer_flusher_finalize(trans, 0);
343                 hammer_unlock(&hmp->flusher.finalize_lock);
344                 hmp->flusher.finalize_want = 0;
345                 wakeup(&hmp->flusher.finalize_want);
346         }
347 }
348
349 /*
350  * If the UNDO area gets over half full we have to flush it.  We can't
351  * afford the UNDO area becoming completely full as that would break
352  * the crash recovery atomicy.
353  */
354 static
355 int
356 hammer_must_finalize_undo(hammer_mount_t hmp)
357 {
358         if (hammer_undo_space(hmp) < hammer_undo_max(hmp) / 2) {
359                 hkprintf("*");
360                 return(1);
361         } else {
362                 return(0);
363         }
364 }
365
366 /*
367  * Flush all pending UNDOs, wait for write completion, update the volume
368  * header with the new UNDO end position, and flush it.  Then
369  * asynchronously flush the meta-data.
370  *
371  * If this is the last finalization in a flush group we also synchronize
372  * our cached blockmap and set hmp->flusher_undo_start and our cached undo
373  * fifo first_offset so the next flush resets the FIFO pointers.
374  */
375 static
376 void
377 hammer_flusher_finalize(hammer_transaction_t trans, int final)
378 {
379         hammer_volume_t root_volume;
380         hammer_blockmap_t cundomap, dundomap;
381         hammer_mount_t hmp;
382         hammer_io_t io;
383         int count;
384         int i;
385
386         hmp = trans->hmp;
387         root_volume = trans->rootvol;
388
389         /*
390          * Flush data buffers.  This can occur asynchronously and at any
391          * time.  We must interlock against the frontend direct-data write
392          * but do not have to acquire the sync-lock yet.
393          */
394         count = 0;
395         while ((io = TAILQ_FIRST(&hmp->data_list)) != NULL) {
396                 if (io->lock.refs == 0)
397                         ++hammer_count_refedbufs;
398                 hammer_ref(&io->lock);
399                 hammer_io_write_interlock(io);
400                 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
401                 hammer_io_flush(io);
402                 hammer_io_done_interlock(io);
403                 hammer_rel_buffer((hammer_buffer_t)io, 0);
404                 ++count;
405         }
406
407         /*
408          * The sync-lock is required for the remaining sequence.  This lock
409          * prevents meta-data from being modified.
410          */
411         hammer_sync_lock_ex(trans);
412
413         /*
414          * If we have been asked to finalize the volume header sync the
415          * cached blockmap to the on-disk blockmap.  Generate an UNDO
416          * record for the update.
417          */
418         if (final) {
419                 cundomap = &hmp->blockmap[0];
420                 dundomap = &root_volume->ondisk->vol0_blockmap[0];
421                 if (root_volume->io.modified) {
422                         hammer_modify_volume(trans, root_volume,
423                                              dundomap, sizeof(hmp->blockmap));
424                         for (i = 0; i < HAMMER_MAX_ZONES; ++i)
425                                 hammer_crc_set_blockmap(&cundomap[i]);
426                         bcopy(cundomap, dundomap, sizeof(hmp->blockmap));
427                         hammer_modify_volume_done(root_volume);
428                 }
429         }
430
431         /*
432          * Flush UNDOs
433          */
434         count = 0;
435         while ((io = TAILQ_FIRST(&hmp->undo_list)) != NULL) {
436                 KKASSERT(io->modify_refs == 0);
437                 if (io->lock.refs == 0)
438                         ++hammer_count_refedbufs;
439                 hammer_ref(&io->lock);
440                 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
441                 hammer_io_flush(io);
442                 hammer_rel_buffer((hammer_buffer_t)io, 0);
443                 ++count;
444         }
445
446         /*
447          * Wait for I/Os to complete
448          */
449         hammer_flusher_clean_loose_ios(hmp);
450         hammer_io_wait_all(hmp, "hmrfl1");
451
452         /*
453          * Update the on-disk volume header with new UNDO FIFO end position
454          * (do not generate new UNDO records for this change).  We have to
455          * do this for the UNDO FIFO whether (final) is set or not.
456          *
457          * Also update the on-disk next_tid field.  This does not require
458          * an UNDO.  However, because our TID is generated before we get
459          * the sync lock another sync may have beat us to the punch.
460          *
461          * The volume header will be flushed out synchronously.
462          */
463         dundomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
464         cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
465
466         if (dundomap->first_offset != cundomap->first_offset ||
467             dundomap->next_offset != cundomap->next_offset) {
468                 hammer_modify_volume(NULL, root_volume, NULL, 0);
469                 dundomap->first_offset = cundomap->first_offset;
470                 dundomap->next_offset = cundomap->next_offset;
471                 hammer_crc_set_blockmap(dundomap);
472                 hammer_crc_set_volume(root_volume->ondisk);
473                 if (root_volume->ondisk->vol0_next_tid < trans->tid)
474                         root_volume->ondisk->vol0_next_tid = trans->tid;
475                 hammer_modify_volume_done(root_volume);
476         }
477
478         if (root_volume->io.modified) {
479                 hammer_io_flush(&root_volume->io);
480         }
481
482         /*
483          * Wait for I/Os to complete
484          */
485         hammer_flusher_clean_loose_ios(hmp);
486         hammer_io_wait_all(hmp, "hmrfl2");
487
488         /*
489          * Flush meta-data.  The meta-data will be undone if we crash
490          * so we can safely flush it asynchronously.
491          *
492          * Repeated catchups will wind up flushing this update's meta-data
493          * and the UNDO buffers for the next update simultaniously.  This
494          * is ok.
495          */
496         count = 0;
497         while ((io = TAILQ_FIRST(&hmp->meta_list)) != NULL) {
498                 KKASSERT(io->modify_refs == 0);
499                 if (io->lock.refs == 0)
500                         ++hammer_count_refedbufs;
501                 hammer_ref(&io->lock);
502                 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
503                 hammer_io_flush(io);
504                 hammer_rel_buffer((hammer_buffer_t)io, 0);
505                 ++count;
506         }
507
508         /*
509          * If this is the final finalization for the flush group set
510          * up for the next sequence by setting a new first_offset in
511          * our cached blockmap and
512          * clearing the undo history.
513          */
514         if (final) {
515                 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
516                 cundomap->first_offset = cundomap->next_offset;
517                 hammer_clear_undo_history(hmp);
518         }
519
520         hammer_sync_unlock(trans);
521 }
522