HAMMER 57/Many: Pseudofs support
[dragonfly.git] / sys / vfs / hammer / hammer_flusher.c
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.29 2008/06/23 07:31:14 dillon Exp $
35  */
36 /*
37  * HAMMER dependancy flusher thread
38  *
39  * Meta data updates create buffer dependancies which are arranged as a
40  * hierarchy of lists.
41  */
42
43 #include "hammer.h"
44
45 static void hammer_flusher_master_thread(void *arg);
46 static void hammer_flusher_slave_thread(void *arg);
47 static void hammer_flusher_clean_loose_ios(hammer_mount_t hmp);
48 static void hammer_flusher_flush(hammer_mount_t hmp);
49 static void hammer_flusher_flush_inode(hammer_inode_t ip,
50                                         hammer_transaction_t trans);
51 static int hammer_must_finalize_undo(hammer_mount_t hmp);
52 static void hammer_flusher_finalize(hammer_transaction_t trans, int final);
53
54 /*
55  * Support structures for the flusher threads.
56  */
57 struct hammer_flusher_info {
58         struct hammer_mount *hmp;
59         thread_t        td;
60         int             startit;
61         hammer_inode_t  work_array[HAMMER_FLUSH_GROUP_SIZE];
62 };
63
64 typedef struct hammer_flusher_info *hammer_flusher_info_t;
65
66 /*
67  * Sync all inodes pending on the flusher.  This routine may have to be
68  * called twice to get them all as some may be queued to a later flush group.
69  */
70 void
71 hammer_flusher_sync(hammer_mount_t hmp)
72 {
73         int seq;
74
75         if (hmp->flusher.td) {
76                 seq = hmp->flusher.next;
77                 if (hmp->flusher.signal++ == 0)
78                         wakeup(&hmp->flusher.signal);
79                 while ((int)(seq - hmp->flusher.done) > 0)
80                         tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
81         }
82 }
83
84 /*
85  * Sync all inodes pending on the flusher - return immediately.
86  */
87 void
88 hammer_flusher_async(hammer_mount_t hmp)
89 {
90         if (hmp->flusher.td) {
91                 if (hmp->flusher.signal++ == 0)
92                         wakeup(&hmp->flusher.signal);
93         }
94 }
95
96 void
97 hammer_flusher_create(hammer_mount_t hmp)
98 {
99         hammer_flusher_info_t info;
100         int i;
101
102         hmp->flusher.signal = 0;
103         hmp->flusher.act = 0;
104         hmp->flusher.done = 0;
105         hmp->flusher.next = 1;
106         hmp->flusher.count = 0;
107         hammer_ref(&hmp->flusher.finalize_lock);
108
109         lwkt_create(hammer_flusher_master_thread, hmp,
110                     &hmp->flusher.td, NULL, 0, -1, "hammer-M");
111         for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
112                 info = kmalloc(sizeof(*info), M_HAMMER, M_WAITOK|M_ZERO);
113                 info->hmp = hmp;
114                 ++hmp->flusher.count;
115                 hmp->flusher.info[i] = info;
116                 lwkt_create(hammer_flusher_slave_thread, info,
117                             &info->td, NULL, 0, -1, "hammer-S%d", i);
118         }
119 }
120
121 void
122 hammer_flusher_destroy(hammer_mount_t hmp)
123 {
124         hammer_flusher_info_t info;
125         int i;
126
127         /*
128          * Kill the master
129          */
130         hmp->flusher.exiting = 1;
131         while (hmp->flusher.td) {
132                 ++hmp->flusher.signal;
133                 wakeup(&hmp->flusher.signal);
134                 tsleep(&hmp->flusher.exiting, 0, "hmrwex", hz);
135         }
136
137         /*
138          * Kill the slaves
139          */
140         for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
141                 if ((info = hmp->flusher.info[i]) != NULL) {
142                         KKASSERT(info->startit == 0);
143                         info->startit = -1;
144                         wakeup(&info->startit);
145                         while (info->td) {
146                                 tsleep(&info->td, 0, "hmrwwc", 0);
147                         }
148                         hmp->flusher.info[i] = NULL;
149                         kfree(info, M_HAMMER);
150                         --hmp->flusher.count;
151                 }
152         }
153         KKASSERT(hmp->flusher.count == 0);
154 }
155
156 /*
157  * The master flusher thread manages the flusher sequence id and
158  * synchronization with the slave work threads.
159  */
160 static void
161 hammer_flusher_master_thread(void *arg)
162 {
163         hammer_mount_t hmp = arg;
164
165         for (;;) {
166                 while (hmp->flusher.group_lock)
167                         tsleep(&hmp->flusher.group_lock, 0, "hmrhld", 0);
168                 hmp->flusher.act = hmp->flusher.next;
169                 ++hmp->flusher.next;
170                 hammer_flusher_clean_loose_ios(hmp);
171                 hammer_flusher_flush(hmp);
172                 hmp->flusher.done = hmp->flusher.act;
173                 wakeup(&hmp->flusher.done);
174
175                 /*
176                  * Wait for activity.
177                  */
178                 if (hmp->flusher.exiting && TAILQ_EMPTY(&hmp->flush_list))
179                         break;
180
181                 /*
182                  * This is a hack until we can dispose of frontend buffer
183                  * cache buffers on the frontend.
184                  */
185                 while (hmp->flusher.signal == 0)
186                         tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0);
187                 hmp->flusher.signal = 0;
188         }
189
190         /*
191          * And we are done.
192          */
193         hmp->flusher.td = NULL;
194         wakeup(&hmp->flusher.exiting);
195         lwkt_exit();
196 }
197
198 /*
199  * The slave flusher thread pulls work off the master flush_list until no
200  * work is left.
201  */
202 static void
203 hammer_flusher_slave_thread(void *arg)
204 {
205         hammer_flusher_info_t info;
206         hammer_mount_t hmp;
207         hammer_inode_t ip;
208         int c;
209         int i;
210         int n;
211
212         info = arg;
213         hmp = info->hmp;
214
215         for (;;) {
216                 while (info->startit == 0)
217                         tsleep(&info->startit, 0, "hmrssw", 0);
218                 if (info->startit < 0)
219                         break;
220                 info->startit = 0;
221
222                 /*
223                  * Try to pull out around ~64 inodes at a time to flush.
224                  * The idea is to try to avoid deadlocks between the slaves.
225                  */
226                 n = c = 0;
227                 while ((ip = TAILQ_FIRST(&hmp->flush_list)) != NULL) {
228                         if (ip->flush_group != hmp->flusher.act)
229                                 break;
230                         TAILQ_REMOVE(&hmp->flush_list, ip, flush_entry);
231                         info->work_array[n++] = ip;
232                         c += ip->rsv_recs;
233                         if (n < HAMMER_FLUSH_GROUP_SIZE &&
234                             c < HAMMER_FLUSH_GROUP_SIZE * 8) {
235                                 continue;
236                         }
237                         for (i = 0; i < n; ++i){
238                                 hammer_flusher_flush_inode(info->work_array[i],
239                                                         &hmp->flusher.trans);
240                         }
241                         n = c = 0;
242                 }
243                 for (i = 0; i < n; ++i) {
244                         hammer_flusher_flush_inode(info->work_array[i],
245                                                    &hmp->flusher.trans);
246                 }
247                 if (--hmp->flusher.running == 0)
248                         wakeup(&hmp->flusher.running);
249         }
250         info->td = NULL;
251         wakeup(&info->td);
252         lwkt_exit();
253 }
254
255 static void
256 hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
257 {
258         hammer_buffer_t buffer;
259         hammer_io_t io;
260         int panic_count = 1000000;
261
262         /*
263          * loose ends - buffers without bp's aren't tracked by the kernel
264          * and can build up, so clean them out.  This can occur when an
265          * IO completes on a buffer with no references left.
266          */
267         crit_enter();   /* biodone() race */
268         while ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
269                 KKASSERT(--panic_count > 0);
270                 KKASSERT(io->mod_list == &hmp->lose_list);
271                 TAILQ_REMOVE(&hmp->lose_list, io, mod_entry);
272                 io->mod_list = NULL;
273                 if (io->lock.refs == 0)
274                         ++hammer_count_refedbufs;
275                 hammer_ref(&io->lock);
276                 buffer = (void *)io;
277                 hammer_rel_buffer(buffer, 0);
278         }
279         crit_exit();
280 }
281
282 /*
283  * Flush all inodes in the current flush group.
284  */
285 static void
286 hammer_flusher_flush(hammer_mount_t hmp)
287 {
288         hammer_flusher_info_t info;
289         hammer_reserve_t resv;
290         int i;
291         int n;
292
293         hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
294
295         /*
296          * Start work threads.
297          */
298         i = 0;
299         n = hmp->count_iqueued / HAMMER_FLUSH_GROUP_SIZE;
300         if (TAILQ_FIRST(&hmp->flush_list)) {
301                 for (i = 0; i <= n; ++i) {
302                         if (i == HAMMER_MAX_FLUSHERS ||
303                             hmp->flusher.info[i] == NULL) {
304                                 break;
305                         }
306                         info = hmp->flusher.info[i];
307                         if (info->startit == 0) {
308                                 ++hmp->flusher.running;
309                                 info->startit = 1;
310                                 wakeup(&info->startit);
311                         }
312                 }
313         }
314         while (hmp->flusher.running)
315                 tsleep(&hmp->flusher.running, 0, "hmrfcc", 0);
316
317         hammer_flusher_finalize(&hmp->flusher.trans, 1);
318         hmp->flusher.tid = hmp->flusher.trans.tid;
319
320         /*
321          * Clean up any freed big-blocks (typically zone-2). 
322          * resv->flush_group is typically set several flush groups ahead
323          * of the free to ensure that the freed block is not reused until
324          * it can no longer be reused.
325          */
326         while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
327                 if (resv->flush_group != hmp->flusher.act)
328                         break;
329                 hammer_reserve_clrdelay(hmp, resv);
330         }
331         hammer_done_transaction(&hmp->flusher.trans);
332 }
333
334 /*
335  * Flush a single inode that is part of a flush group.
336  */
337 static
338 void
339 hammer_flusher_flush_inode(hammer_inode_t ip, hammer_transaction_t trans)
340 {
341         hammer_mount_t hmp = ip->hmp;
342
343         hammer_lock_sh(&hmp->flusher.finalize_lock);
344         ip->error = hammer_sync_inode(ip);
345         hammer_flush_inode_done(ip);
346         hammer_unlock(&hmp->flusher.finalize_lock);
347         while (hmp->flusher.finalize_want)
348                 tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0);
349         if (hammer_must_finalize_undo(hmp)) {
350                 hmp->flusher.finalize_want = 1;
351                 hammer_lock_ex(&hmp->flusher.finalize_lock);
352                 kprintf("HAMMER: Warning: UNDO area too small!\n");
353                 hammer_flusher_finalize(trans, 1);
354                 hammer_unlock(&hmp->flusher.finalize_lock);
355                 hmp->flusher.finalize_want = 0;
356                 wakeup(&hmp->flusher.finalize_want);
357         } else if (trans->hmp->locked_dirty_count +
358                    trans->hmp->io_running_count > hammer_limit_dirtybufs) {
359                 hmp->flusher.finalize_want = 1;
360                 hammer_lock_ex(&hmp->flusher.finalize_lock);
361                 hammer_flusher_finalize(trans, 0);
362                 hammer_unlock(&hmp->flusher.finalize_lock);
363                 hmp->flusher.finalize_want = 0;
364                 wakeup(&hmp->flusher.finalize_want);
365         }
366 }
367
368 /*
369  * If the UNDO area gets over half full we have to flush it.  We can't
370  * afford the UNDO area becoming completely full as that would break
371  * the crash recovery atomicy.
372  */
373 static
374 int
375 hammer_must_finalize_undo(hammer_mount_t hmp)
376 {
377         if (hammer_undo_space(hmp) < hammer_undo_max(hmp) / 2) {
378                 hkprintf("*");
379                 return(1);
380         } else {
381                 return(0);
382         }
383 }
384
385 /*
386  * Flush all pending UNDOs, wait for write completion, update the volume
387  * header with the new UNDO end position, and flush it.  Then
388  * asynchronously flush the meta-data.
389  *
390  * If this is the last finalization in a flush group we also synchronize
391  * our cached blockmap and set hmp->flusher_undo_start and our cached undo
392  * fifo first_offset so the next flush resets the FIFO pointers.
393  */
394 static
395 void
396 hammer_flusher_finalize(hammer_transaction_t trans, int final)
397 {
398         hammer_volume_t root_volume;
399         hammer_blockmap_t cundomap, dundomap;
400         hammer_mount_t hmp;
401         hammer_io_t io;
402         int count;
403         int i;
404
405         hmp = trans->hmp;
406         root_volume = trans->rootvol;
407
408         /*
409          * Flush data buffers.  This can occur asynchronously and at any
410          * time.  We must interlock against the frontend direct-data write
411          * but do not have to acquire the sync-lock yet.
412          */
413         count = 0;
414         while ((io = TAILQ_FIRST(&hmp->data_list)) != NULL) {
415                 if (io->lock.refs == 0)
416                         ++hammer_count_refedbufs;
417                 hammer_ref(&io->lock);
418                 hammer_io_write_interlock(io);
419                 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
420                 hammer_io_flush(io);
421                 hammer_io_done_interlock(io);
422                 hammer_rel_buffer((hammer_buffer_t)io, 0);
423                 ++count;
424         }
425
426         /*
427          * The sync-lock is required for the remaining sequence.  This lock
428          * prevents meta-data from being modified.
429          */
430         hammer_sync_lock_ex(trans);
431
432         /*
433          * If we have been asked to finalize the volume header sync the
434          * cached blockmap to the on-disk blockmap.  Generate an UNDO
435          * record for the update.
436          */
437         if (final) {
438                 cundomap = &hmp->blockmap[0];
439                 dundomap = &root_volume->ondisk->vol0_blockmap[0];
440                 if (root_volume->io.modified) {
441                         hammer_modify_volume(trans, root_volume,
442                                              dundomap, sizeof(hmp->blockmap));
443                         for (i = 0; i < HAMMER_MAX_ZONES; ++i)
444                                 hammer_crc_set_blockmap(&cundomap[i]);
445                         bcopy(cundomap, dundomap, sizeof(hmp->blockmap));
446                         hammer_modify_volume_done(root_volume);
447                 }
448         }
449
450         /*
451          * Flush UNDOs
452          */
453         count = 0;
454         while ((io = TAILQ_FIRST(&hmp->undo_list)) != NULL) {
455                 KKASSERT(io->modify_refs == 0);
456                 if (io->lock.refs == 0)
457                         ++hammer_count_refedbufs;
458                 hammer_ref(&io->lock);
459                 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
460                 hammer_io_flush(io);
461                 hammer_rel_buffer((hammer_buffer_t)io, 0);
462                 ++count;
463         }
464
465         /*
466          * Wait for I/Os to complete
467          */
468         hammer_flusher_clean_loose_ios(hmp);
469         hammer_io_wait_all(hmp, "hmrfl1");
470
471         /*
472          * Update the on-disk volume header with new UNDO FIFO end position
473          * (do not generate new UNDO records for this change).  We have to
474          * do this for the UNDO FIFO whether (final) is set or not.
475          *
476          * Also update the on-disk next_tid field.  This does not require
477          * an UNDO.  However, because our TID is generated before we get
478          * the sync lock another sync may have beat us to the punch.
479          *
480          * The volume header will be flushed out synchronously.
481          */
482         dundomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
483         cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
484
485         if (dundomap->first_offset != cundomap->first_offset ||
486             dundomap->next_offset != cundomap->next_offset) {
487                 hammer_modify_volume(NULL, root_volume, NULL, 0);
488                 dundomap->first_offset = cundomap->first_offset;
489                 dundomap->next_offset = cundomap->next_offset;
490                 hammer_crc_set_blockmap(dundomap);
491                 hammer_crc_set_volume(root_volume->ondisk);
492                 if (root_volume->ondisk->vol0_next_tid < trans->tid)
493                         root_volume->ondisk->vol0_next_tid = trans->tid;
494                 hammer_modify_volume_done(root_volume);
495         }
496
497         if (root_volume->io.modified) {
498                 hammer_io_flush(&root_volume->io);
499         }
500
501         /*
502          * Wait for I/Os to complete
503          */
504         hammer_flusher_clean_loose_ios(hmp);
505         hammer_io_wait_all(hmp, "hmrfl2");
506
507         /*
508          * Flush meta-data.  The meta-data will be undone if we crash
509          * so we can safely flush it asynchronously.
510          *
511          * Repeated catchups will wind up flushing this update's meta-data
512          * and the UNDO buffers for the next update simultaniously.  This
513          * is ok.
514          */
515         count = 0;
516         while ((io = TAILQ_FIRST(&hmp->meta_list)) != NULL) {
517                 KKASSERT(io->modify_refs == 0);
518                 if (io->lock.refs == 0)
519                         ++hammer_count_refedbufs;
520                 hammer_ref(&io->lock);
521                 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
522                 hammer_io_flush(io);
523                 hammer_rel_buffer((hammer_buffer_t)io, 0);
524                 ++count;
525         }
526
527         /*
528          * If this is the final finalization for the flush group set
529          * up for the next sequence by setting a new first_offset in
530          * our cached blockmap and
531          * clearing the undo history.
532          */
533         if (final) {
534                 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
535                 cundomap->first_offset = cundomap->next_offset;
536                 hammer_clear_undo_history(hmp);
537         }
538
539         hammer_sync_unlock(trans);
540 }
541