HAMMER 53G/Many: Performance tuning.
[dragonfly.git] / sys / vfs / hammer / hammer_flusher.c
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.23 2008/06/10 08:51:01 dillon Exp $
35  */
36 /*
37  * HAMMER dependancy flusher thread
38  *
39  * Meta data updates create buffer dependancies which are arranged as a
40  * hierarchy of lists.
41  */
42
43 #include "hammer.h"
44
45 static void hammer_flusher_master_thread(void *arg);
46 static void hammer_flusher_slave_thread(void *arg);
47 static void hammer_flusher_clean_loose_ios(hammer_mount_t hmp);
48 static void hammer_flusher_flush(hammer_mount_t hmp);
49 static void hammer_flusher_flush_inode(hammer_inode_t ip,
50                                         hammer_transaction_t trans);
51 static int hammer_must_finalize_undo(hammer_mount_t hmp);
52 static void hammer_flusher_finalize(hammer_transaction_t trans, int final);
53
54 #define HAMMER_FLUSHER_IMMEDIATE        16
55
56 void
57 hammer_flusher_sync(hammer_mount_t hmp)
58 {
59         int seq;
60
61         if (hmp->flusher.td) {
62                 seq = hmp->flusher.next;
63                 if (hmp->flusher.signal++ == 0)
64                         wakeup(&hmp->flusher.signal);
65                 while ((int)(seq - hmp->flusher.done) > 0)
66                         tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
67         }
68 }
69
70 void
71 hammer_flusher_async(hammer_mount_t hmp)
72 {
73         if (hmp->flusher.td) {
74                 if (hmp->flusher.signal++ == 0)
75                         wakeup(&hmp->flusher.signal);
76         }
77 }
78
79 void
80 hammer_flusher_create(hammer_mount_t hmp)
81 {
82         hammer_flusher_info_t info;
83         int i;
84
85         hmp->flusher.signal = 0;
86         hmp->flusher.act = 0;
87         hmp->flusher.done = 0;
88         hmp->flusher.next = 1;
89         hmp->flusher.count = 0;
90         hammer_ref(&hmp->flusher.finalize_lock);
91
92         lwkt_create(hammer_flusher_master_thread, hmp,
93                     &hmp->flusher.td, NULL, 0, -1, "hammer-M");
94         for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
95                 info = kmalloc(sizeof(*info), M_HAMMER, M_WAITOK|M_ZERO);
96                 info->hmp = hmp;
97                 TAILQ_INIT(&info->work_list);
98                 ++hmp->flusher.count;
99                 hmp->flusher.info[i] = info;
100                 lwkt_create(hammer_flusher_slave_thread, info,
101                             &info->td, NULL, 0, -1, "hammer-S%d", i);
102         }
103 }
104
105 void
106 hammer_flusher_destroy(hammer_mount_t hmp)
107 {
108         hammer_flusher_info_t info;
109         int i;
110
111         /*
112          * Kill the master
113          */
114         hmp->flusher.exiting = 1;
115         while (hmp->flusher.td) {
116                 ++hmp->flusher.signal;
117                 wakeup(&hmp->flusher.signal);
118                 tsleep(&hmp->flusher.exiting, 0, "hmrwex", hz);
119         }
120
121         /*
122          * Kill the slaves
123          */
124         for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
125                 if ((info = hmp->flusher.info[i]) != NULL) {
126                         KKASSERT(info->running == 0);
127                         info->running = -1;
128                         wakeup(&info->running);
129                         while (info->td) {
130                                 tsleep(&info->td, 0, "hmrwwc", 0);
131                         }
132                         hmp->flusher.info[i] = NULL;
133                         kfree(info, M_HAMMER);
134                         --hmp->flusher.count;
135                 }
136         }
137         KKASSERT(hmp->flusher.count == 0);
138 }
139
140 static void
141 hammer_flusher_master_thread(void *arg)
142 {
143         hammer_mount_t hmp = arg;
144
145         for (;;) {
146                 while (hmp->flusher.group_lock)
147                         tsleep(&hmp->flusher.group_lock, 0, "hmrhld", 0);
148                 kprintf("S");
149                 hmp->flusher.act = hmp->flusher.next;
150                 ++hmp->flusher.next;
151                 hammer_flusher_clean_loose_ios(hmp);
152                 hammer_flusher_flush(hmp);
153                 hammer_flusher_clean_loose_ios(hmp);
154                 hmp->flusher.done = hmp->flusher.act;
155                 wakeup(&hmp->flusher.done);
156
157                 /*
158                  * Wait for activity.
159                  */
160                 if (hmp->flusher.exiting && TAILQ_EMPTY(&hmp->flush_list))
161                         break;
162
163                 /*
164                  * This is a hack until we can dispose of frontend buffer
165                  * cache buffers on the frontend.
166                  */
167                 while (hmp->flusher.signal == 0)
168                         tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0);
169                 hmp->flusher.signal = 0;
170         }
171
172         /*
173          * And we are done.
174          */
175         hmp->flusher.td = NULL;
176         wakeup(&hmp->flusher.exiting);
177         lwkt_exit();
178 }
179
180 static void
181 hammer_flusher_slave_thread(void *arg)
182 {
183         hammer_flusher_info_t info;
184         hammer_mount_t hmp;
185         hammer_inode_t ip;
186
187         info = arg;
188         hmp = info->hmp;
189
190         for (;;) {
191                 while (info->running == 0)
192                         tsleep(&info->running, 0, "hmrssw", 0);
193                 if (info->running < 0)
194                         break;
195                 while ((ip = TAILQ_FIRST(&info->work_list)) != NULL) {
196                         TAILQ_REMOVE(&info->work_list, ip, flush_entry);
197                         hammer_flusher_flush_inode(ip, &hmp->flusher.trans);
198                 }
199                 info->running = 0;
200                 if (--hmp->flusher.running == 0)
201                         wakeup(&hmp->flusher.running);
202         }
203         info->td = NULL;
204         wakeup(&info->td);
205         lwkt_exit();
206 }
207
208 static void
209 hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
210 {
211         hammer_buffer_t buffer;
212         hammer_io_t io;
213
214         /*
215          * loose ends - buffers without bp's aren't tracked by the kernel
216          * and can build up, so clean them out.  This can occur when an
217          * IO completes on a buffer with no references left.
218          */
219         while ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
220                 KKASSERT(io->mod_list == &hmp->lose_list);
221                 TAILQ_REMOVE(io->mod_list, io, mod_entry);
222                 io->mod_list = NULL;
223                 hammer_ref(&io->lock);
224                 buffer = (void *)io;
225                 hammer_rel_buffer(buffer, 0);
226         }
227 }
228
229 /*
230  * Flush all inodes in the current flush group.
231  */
232 static void
233 hammer_flusher_flush(hammer_mount_t hmp)
234 {
235         hammer_flusher_info_t info;
236         hammer_inode_t ip;
237         hammer_reserve_t resv;
238         int i;
239
240         /*
241          * Flush the inodes
242          */
243         hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
244         i = 0;
245         while ((ip = TAILQ_FIRST(&hmp->flush_list)) != NULL) {
246                 if (ip->flush_group != hmp->flusher.act)
247                         break;
248                 TAILQ_REMOVE(&hmp->flush_list, ip, flush_entry);
249                 info = hmp->flusher.info[i];
250                 TAILQ_INSERT_TAIL(&info->work_list, ip, flush_entry);
251                 if (info->running == 0) {
252                         ++hmp->flusher.running;
253                         info->running = 1;
254                         wakeup(&info->running);
255                 }
256                 /*hammer_flusher_flush_inode(ip, &trans);*/
257                 ++i;
258                 if (i == HAMMER_MAX_FLUSHERS || hmp->flusher.info[i] == NULL)
259                         i = 0;
260         }
261         while (hmp->flusher.running)
262                 tsleep(&hmp->flusher.running, 0, "hmrfcc", 0);
263
264         hammer_flusher_finalize(&hmp->flusher.trans, 1);
265         hmp->flusher.tid = hmp->flusher.trans.tid;
266
267         /*
268          * Clean up any freed big-blocks (typically zone-2). 
269          * resv->flush_group is typically set several flush groups ahead
270          * of the free to ensure that the freed block is not reused until
271          * it can no longer be reused.
272          */
273         while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
274                 if (resv->flush_group != hmp->flusher.act)
275                         break;
276                 TAILQ_REMOVE(&hmp->delay_list, resv, delay_entry);
277                 hammer_blockmap_reserve_complete(hmp, resv);
278         }
279         hammer_done_transaction(&hmp->flusher.trans);
280 }
281
282 /*
283  * Flush a single inode that is part of a flush group.
284  */
285 static
286 void
287 hammer_flusher_flush_inode(hammer_inode_t ip, hammer_transaction_t trans)
288 {
289         hammer_mount_t hmp = ip->hmp;
290
291         hammer_lock_sh(&hmp->flusher.finalize_lock);
292         ip->error = hammer_sync_inode(ip);
293         hammer_flush_inode_done(ip);
294         hammer_unlock(&hmp->flusher.finalize_lock);
295         while (hmp->flusher.finalize_want)
296                 tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0);
297         if (hammer_must_finalize_undo(hmp)) {
298                 hmp->flusher.finalize_want = 1;
299                 hammer_lock_ex(&hmp->flusher.finalize_lock);
300                 kprintf("HAMMER: Warning: UNDO area too small!");
301                 hammer_flusher_finalize(trans, 1);
302                 hammer_unlock(&hmp->flusher.finalize_lock);
303                 hmp->flusher.finalize_want = 0;
304                 wakeup(&hmp->flusher.finalize_want);
305         } else if (trans->hmp->locked_dirty_count +
306                    trans->hmp->io_running_count > hammer_limit_dirtybufs) {
307                 hmp->flusher.finalize_want = 1;
308                 hammer_lock_ex(&hmp->flusher.finalize_lock);
309                 kprintf("t");
310                 hammer_flusher_finalize(trans, 0);
311                 hammer_unlock(&hmp->flusher.finalize_lock);
312                 hmp->flusher.finalize_want = 0;
313                 wakeup(&hmp->flusher.finalize_want);
314         }
315 }
316
317 /*
318  * If the UNDO area gets over half full we have to flush it.  We can't
319  * afford the UNDO area becoming completely full as that would break
320  * the crash recovery atomicy.
321  */
322 static
323 int
324 hammer_must_finalize_undo(hammer_mount_t hmp)
325 {
326         if (hammer_undo_space(hmp) < hammer_undo_max(hmp) / 2) {
327                 hkprintf("*");
328                 return(1);
329         } else {
330                 return(0);
331         }
332 }
333
334 /*
335  * Flush all pending UNDOs, wait for write completion, update the volume
336  * header with the new UNDO end position, and flush it.  Then
337  * asynchronously flush the meta-data.
338  *
339  * If this is the last finalization in a flush group we also synchronize
340  * our cached blockmap and set hmp->flusher_undo_start and our cached undo
341  * fifo first_offset so the next flush resets the FIFO pointers.
342  */
343 static
344 void
345 hammer_flusher_finalize(hammer_transaction_t trans, int final)
346 {
347         hammer_volume_t root_volume;
348         hammer_blockmap_t cundomap, dundomap;
349         hammer_mount_t hmp;
350         hammer_io_t io;
351         int count;
352         int i;
353
354         hmp = trans->hmp;
355         root_volume = trans->rootvol;
356
357         /*
358          * Flush data buffers.  This can occur asynchronously and at any
359          * time.  We must interlock against the frontend direct-data write
360          * but do not have to acquire the sync-lock yet.
361          */
362         count = 0;
363         while ((io = TAILQ_FIRST(&hmp->data_list)) != NULL) {
364                 hammer_ref(&io->lock);
365                 hammer_io_write_interlock(io);
366                 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
367                 hammer_io_flush(io);
368                 hammer_io_done_interlock(io);
369                 hammer_rel_buffer((hammer_buffer_t)io, 0);
370                 ++count;
371         }
372
373         /*
374          * The sync-lock is required for the remaining sequence.  This lock
375          * prevents meta-data from being modified.
376          */
377         hammer_sync_lock_ex(trans);
378
379         /*
380          * If we have been asked to finalize the volume header sync the
381          * cached blockmap to the on-disk blockmap.  Generate an UNDO
382          * record for the update.
383          */
384         if (final) {
385                 cundomap = &hmp->blockmap[0];
386                 dundomap = &root_volume->ondisk->vol0_blockmap[0];
387                 if (root_volume->io.modified) {
388                         hammer_modify_volume(trans, root_volume,
389                                              dundomap, sizeof(hmp->blockmap));
390                         for (i = 0; i < HAMMER_MAX_ZONES; ++i)
391                                 hammer_crc_set_blockmap(&cundomap[i]);
392                         bcopy(cundomap, dundomap, sizeof(hmp->blockmap));
393                         hammer_modify_volume_done(root_volume);
394                 }
395         }
396
397         /*
398          * Flush UNDOs
399          */
400         count = 0;
401         while ((io = TAILQ_FIRST(&hmp->undo_list)) != NULL) {
402                 KKASSERT(io->modify_refs == 0);
403                 hammer_ref(&io->lock);
404                 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
405                 hammer_io_flush(io);
406                 hammer_rel_buffer((hammer_buffer_t)io, 0);
407                 ++count;
408         }
409
410         /*
411          * Wait for I/Os to complete
412          */
413         crit_enter();
414         while (hmp->io_running_count)
415                 tsleep(&hmp->io_running_count, 0, "hmrfl1", 0);
416         crit_exit();
417
418         /*
419          * Update the on-disk volume header with new UNDO FIFO end position
420          * (do not generate new UNDO records for this change).  We have to
421          * do this for the UNDO FIFO whether (final) is set or not.
422          *
423          * Also update the on-disk next_tid field.  This does not require
424          * an UNDO.  However, because our TID is generated before we get
425          * the sync lock another sync may have beat us to the punch.
426          *
427          * The volume header will be flushed out synchronously.
428          */
429         dundomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
430         cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
431
432         if (dundomap->first_offset != cundomap->first_offset ||
433             dundomap->next_offset != cundomap->next_offset) {
434                 hammer_modify_volume(NULL, root_volume, NULL, 0);
435                 dundomap->first_offset = cundomap->first_offset;
436                 dundomap->next_offset = cundomap->next_offset;
437                 hammer_crc_set_blockmap(dundomap);
438                 hammer_crc_set_volume(root_volume->ondisk);
439                 if (root_volume->ondisk->vol0_next_tid < trans->tid)
440                         root_volume->ondisk->vol0_next_tid = trans->tid;
441                 hammer_modify_volume_done(root_volume);
442         }
443
444         if (root_volume->io.modified) {
445                 hammer_io_flush(&root_volume->io);
446         }
447
448         /*
449          * Wait for I/Os to complete
450          */
451         crit_enter();
452         while (hmp->io_running_count)
453                 tsleep(&hmp->io_running_count, 0, "hmrfl2", 0);
454         crit_exit();
455
456         /*
457          * Flush meta-data.  The meta-data will be undone if we crash
458          * so we can safely flush it asynchronously.
459          *
460          * Repeated catchups will wind up flushing this update's meta-data
461          * and the UNDO buffers for the next update simultaniously.  This
462          * is ok.
463          */
464         count = 0;
465         while ((io = TAILQ_FIRST(&hmp->meta_list)) != NULL) {
466                 KKASSERT(io->modify_refs == 0);
467                 hammer_ref(&io->lock);
468                 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
469                 hammer_io_flush(io);
470                 hammer_rel_buffer((hammer_buffer_t)io, 0);
471                 ++count;
472         }
473
474         /*
475          * If this is the final finalization for the flush group set
476          * up for the next sequence by setting a new first_offset in
477          * our cached blockmap and
478          * clearing the undo history.
479          */
480         if (final) {
481                 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
482                 cundomap->first_offset = cundomap->next_offset;
483                 hammer_clear_undo_history(hmp);
484         }
485
486         hammer_sync_unlock(trans);
487 }
488