HAMMER 61E/Many: Stabilization, Performance
[dragonfly.git] / sys / vfs / hammer / hammer_flusher.c
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.37 2008/07/12 23:04:50 dillon Exp $
35  */
36 /*
37  * HAMMER dependancy flusher thread
38  *
39  * Meta data updates create buffer dependancies which are arranged as a
40  * hierarchy of lists.
41  */
42
43 #include "hammer.h"
44
45 static void hammer_flusher_master_thread(void *arg);
46 static void hammer_flusher_slave_thread(void *arg);
47 static void hammer_flusher_flush(hammer_mount_t hmp);
48 static void hammer_flusher_flush_inode(hammer_inode_t ip,
49                                         hammer_transaction_t trans);
50
51 /*
52  * Support structures for the flusher threads.
53  */
54 struct hammer_flusher_info {
55         TAILQ_ENTRY(hammer_flusher_info) entry;
56         struct hammer_mount *hmp;
57         thread_t        td;
58         int             runstate;
59         int             count;
60         hammer_flush_group_t flg;
61         hammer_inode_t  work_array[HAMMER_FLUSH_GROUP_SIZE];
62 };
63
64 typedef struct hammer_flusher_info *hammer_flusher_info_t;
65
66 /*
67  * Sync all inodes pending on the flusher.
68  *
69  * All flush groups will be flushed.  This does not queue dirty inodes
70  * to the flush groups, it just flushes out what has already been queued!
71  */
72 void
73 hammer_flusher_sync(hammer_mount_t hmp)
74 {
75         int seq;
76
77         seq = hammer_flusher_async(hmp, NULL);
78         while ((int)(seq - hmp->flusher.done) > 0)
79                 tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
80 }
81
82 /*
83  * Sync all inodes pending on the flusher - return immediately.
84  *
85  * All flush groups will be flushed.
86  */
87 int
88 hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t close_flg)
89 {
90         hammer_flush_group_t flg;
91         int seq = hmp->flusher.next;
92
93         TAILQ_FOREACH(flg, &hmp->flush_group_list, flush_entry) {
94                 if (flg->running == 0)
95                         ++seq;
96                 flg->closed = 1;
97                 if (flg == close_flg)
98                         break;
99         }
100         if (hmp->flusher.td) {
101                 if (hmp->flusher.signal++ == 0)
102                         wakeup(&hmp->flusher.signal);
103         } else {
104                 seq = hmp->flusher.done;
105         }
106         return(seq);
107 }
108
109 void
110 hammer_flusher_wait(hammer_mount_t hmp, int seq)
111 {
112         while ((int)(seq - hmp->flusher.done) > 0)
113                 tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
114 }
115
116 void
117 hammer_flusher_create(hammer_mount_t hmp)
118 {
119         hammer_flusher_info_t info;
120         int i;
121
122         hmp->flusher.signal = 0;
123         hmp->flusher.act = 0;
124         hmp->flusher.done = 0;
125         hmp->flusher.next = 1;
126         hammer_ref(&hmp->flusher.finalize_lock);
127         TAILQ_INIT(&hmp->flusher.run_list);
128         TAILQ_INIT(&hmp->flusher.ready_list);
129
130         lwkt_create(hammer_flusher_master_thread, hmp,
131                     &hmp->flusher.td, NULL, 0, -1, "hammer-M");
132         for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
133                 info = kmalloc(sizeof(*info), M_HAMMER, M_WAITOK|M_ZERO);
134                 info->hmp = hmp;
135                 TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
136                 lwkt_create(hammer_flusher_slave_thread, info,
137                             &info->td, NULL, 0, -1, "hammer-S%d", i);
138         }
139 }
140
141 void
142 hammer_flusher_destroy(hammer_mount_t hmp)
143 {
144         hammer_flusher_info_t info;
145
146         /*
147          * Kill the master
148          */
149         hmp->flusher.exiting = 1;
150         while (hmp->flusher.td) {
151                 ++hmp->flusher.signal;
152                 wakeup(&hmp->flusher.signal);
153                 tsleep(&hmp->flusher.exiting, 0, "hmrwex", hz);
154         }
155
156         /*
157          * Kill the slaves
158          */
159         while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) != NULL) {
160                 KKASSERT(info->runstate == 0);
161                 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
162                 info->runstate = -1;
163                 wakeup(&info->runstate);
164                 while (info->td)
165                         tsleep(&info->td, 0, "hmrwwc", 0);
166                 TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
167                 kfree(info, M_HAMMER);
168         }
169 }
170
171 /*
172  * The master flusher thread manages the flusher sequence id and
173  * synchronization with the slave work threads.
174  */
175 static void
176 hammer_flusher_master_thread(void *arg)
177 {
178         hammer_flush_group_t flg;
179         hammer_mount_t hmp;
180
181         hmp = arg;
182
183         for (;;) {
184                 /*
185                  * Do at least one flush cycle.  We may have to update the
186                  * UNDO FIFO even if no inodes are queued.
187                  */
188                 for (;;) {
189                         while (hmp->flusher.group_lock)
190                                 tsleep(&hmp->flusher.group_lock, 0, "hmrhld", 0);
191                         hmp->flusher.act = hmp->flusher.next;
192                         ++hmp->flusher.next;
193                         hammer_flusher_clean_loose_ios(hmp);
194                         hammer_flusher_flush(hmp);
195                         hmp->flusher.done = hmp->flusher.act;
196                         wakeup(&hmp->flusher.done);
197                         flg = TAILQ_FIRST(&hmp->flush_group_list);
198                         if (flg == NULL || flg->closed == 0)
199                                 break;
200                 }
201
202                 /*
203                  * Wait for activity.
204                  */
205                 if (hmp->flusher.exiting && TAILQ_EMPTY(&hmp->flush_group_list))
206                         break;
207                 while (hmp->flusher.signal == 0)
208                         tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0);
209                 hmp->flusher.signal = 0;
210         }
211
212         /*
213          * And we are done.
214          */
215         hmp->flusher.td = NULL;
216         wakeup(&hmp->flusher.exiting);
217         lwkt_exit();
218 }
219
220 /*
221  * Flush all inodes in the current flush group.
222  */
223 static void
224 hammer_flusher_flush(hammer_mount_t hmp)
225 {
226         hammer_flusher_info_t info;
227         hammer_flush_group_t flg;
228         hammer_reserve_t resv;
229         hammer_inode_t ip;
230         hammer_inode_t next_ip;
231         int slave_index;
232
233         /*
234          * Just in-case there's a flush race on mount
235          */
236         if (TAILQ_FIRST(&hmp->flusher.ready_list) == NULL)
237                 return;
238
239         /*
240          * We only do one flg but we may have to loop/retry.
241          */
242         while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) {
243                 if (hammer_debug_general & 0x0001) {
244                         kprintf("hammer_flush %d ttl=%d recs=%d\n",
245                                 hmp->flusher.act,
246                                 flg->total_count, flg->refs);
247                 }
248                 hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
249
250                 /*
251                  * If the previous flush cycle just about exhausted our
252                  * UNDO space we may have to do a dummy cycle to move the
253                  * first_offset up before actually digging into a new cycle,
254                  * or the new cycle will not have sufficient undo space.
255                  */
256                 if (hammer_flusher_undo_exhausted(&hmp->flusher.trans, 3))
257                         hammer_flusher_finalize(&hmp->flusher.trans, 0);
258
259                 /*
260                  * Iterate the inodes in the flg's flush_list and assign
261                  * them to slaves.
262                  */
263                 flg->running = 1;
264                 slave_index = 0;
265                 info = TAILQ_FIRST(&hmp->flusher.ready_list);
266                 next_ip = TAILQ_FIRST(&flg->flush_list);
267
268                 while ((ip = next_ip) != NULL) {
269                         next_ip = TAILQ_NEXT(ip, flush_entry);
270
271                         /*
272                          * Add ip to the slave's work array.  The slave is
273                          * not currently running.
274                          */
275                         info->work_array[info->count++] = ip;
276                         if (info->count != HAMMER_FLUSH_GROUP_SIZE)
277                                 continue;
278
279                         /*
280                          * Get the slave running
281                          */
282                         TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
283                         TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
284                         info->flg = flg;
285                         info->runstate = 1;
286                         wakeup(&info->runstate);
287
288                         /*
289                          * Get a new slave.  We may have to wait for one to
290                          * finish running.
291                          */
292                         while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) == NULL) {
293                                 tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
294                         }
295                 }
296
297                 /*
298                  * Run the current slave if necessary
299                  */
300                 if (info->count) {
301                         TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
302                         TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
303                         info->flg = flg;
304                         info->runstate = 1;
305                         wakeup(&info->runstate);
306                 }
307
308                 /*
309                  * Wait for all slaves to finish running
310                  */
311                 while (TAILQ_FIRST(&hmp->flusher.run_list) != NULL)
312                         tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
313
314                 /*
315                  * Do the final finalization, clean up
316                  */
317                 hammer_flusher_finalize(&hmp->flusher.trans, 1);
318                 hmp->flusher.tid = hmp->flusher.trans.tid;
319
320                 hammer_done_transaction(&hmp->flusher.trans);
321
322                 /*
323                  * Loop up on the same flg.  If the flg is done clean it up
324                  * and break out.  We only flush one flg.
325                  */
326                 if (TAILQ_FIRST(&flg->flush_list) == NULL) {
327                         KKASSERT(TAILQ_EMPTY(&flg->flush_list));
328                         KKASSERT(flg->refs == 0);
329                         TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
330                         kfree(flg, M_HAMMER);
331                         break;
332                 }
333         }
334
335         /*
336          * Clean up any freed big-blocks (typically zone-2). 
337          * resv->flush_group is typically set several flush groups ahead
338          * of the free to ensure that the freed block is not reused until
339          * it can no longer be reused.
340          */
341         while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
342                 if (resv->flush_group != hmp->flusher.act)
343                         break;
344                 hammer_reserve_clrdelay(hmp, resv);
345         }
346 }
347
348
349 /*
350  * The slave flusher thread pulls work off the master flush_list until no
351  * work is left.
352  */
353 static void
354 hammer_flusher_slave_thread(void *arg)
355 {
356         hammer_flush_group_t flg;
357         hammer_flusher_info_t info;
358         hammer_mount_t hmp;
359         hammer_inode_t ip;
360         int i;
361
362         info = arg;
363         hmp = info->hmp;
364
365         for (;;) {
366                 while (info->runstate == 0)
367                         tsleep(&info->runstate, 0, "hmrssw", 0);
368                 if (info->runstate < 0)
369                         break;
370                 flg = info->flg;
371
372                 for (i = 0; i < info->count; ++i) {
373                         ip = info->work_array[i];
374                         hammer_flusher_flush_inode(ip, &hmp->flusher.trans);
375                 }
376                 info->count = 0;
377                 info->runstate = 0;
378                 TAILQ_REMOVE(&hmp->flusher.run_list, info, entry);
379                 TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
380                 wakeup(&hmp->flusher.ready_list);
381         }
382         info->td = NULL;
383         wakeup(&info->td);
384         lwkt_exit();
385 }
386
387 void
388 hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
389 {
390         hammer_buffer_t buffer;
391         hammer_io_t io;
392
393         /*
394          * loose ends - buffers without bp's aren't tracked by the kernel
395          * and can build up, so clean them out.  This can occur when an
396          * IO completes on a buffer with no references left.
397          */
398         if ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
399                 crit_enter();   /* biodone() race */
400                 while ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
401                         KKASSERT(io->mod_list == &hmp->lose_list);
402                         TAILQ_REMOVE(&hmp->lose_list, io, mod_entry);
403                         io->mod_list = NULL;
404                         if (io->lock.refs == 0)
405                                 ++hammer_count_refedbufs;
406                         hammer_ref(&io->lock);
407                         buffer = (void *)io;
408                         hammer_rel_buffer(buffer, 0);
409                 }
410                 crit_exit();
411         }
412 }
413
414 /*
415  * Flush a single inode that is part of a flush group.
416  *
417  * NOTE!  The sync code can return EWOULDBLOCK if the flush operation
418  * would otherwise blow out the buffer cache.  hammer_flush_inode_done()
419  * will re-queue the inode for the next flush sequence and force the
420  * flusher to run again if this occurs.
421  */
422 static
423 void
424 hammer_flusher_flush_inode(hammer_inode_t ip, hammer_transaction_t trans)
425 {
426         hammer_mount_t hmp = ip->hmp;
427         int error;
428
429         hammer_flusher_clean_loose_ios(hmp);
430         error = hammer_sync_inode(ip);
431         if (error != EWOULDBLOCK)
432                 ip->error = error;
433         hammer_flush_inode_done(ip);
434         while (hmp->flusher.finalize_want)
435                 tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0);
436         if (hammer_flusher_undo_exhausted(trans, 1)) {
437                 kprintf("HAMMER: Warning: UNDO area too small!\n");
438                 hammer_flusher_finalize(trans, 1);
439         } else if (hammer_flusher_meta_limit(trans->hmp)) {
440                 hammer_flusher_finalize(trans, 0);
441         }
442 }
443
444 /*
445  * Return non-zero if the UNDO area has less then (QUARTER / 4) of its
446  * space left.
447  *
448  * 1/4 - Emergency free undo space level.  Below this point the flusher
449  *       will finalize even if directory dependancies have not been resolved.
450  *
451  * 2/4 - Used by the pruning and reblocking code.  These functions may be
452  *       running in parallel with a flush and cannot be allowed to drop
453  *       available undo space to emergency levels.
454  *
455  * 3/4 - Used at the beginning of a flush to force-sync the volume header
456  *       to give the flush plenty of runway to work in.
457  */
458 int
459 hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter)
460 {
461         if (hammer_undo_space(trans) <
462             hammer_undo_max(trans->hmp) * quarter / 4) {
463                 return(1);
464         } else {
465                 return(0);
466         }
467 }
468
469 /*
470  * Flush all pending UNDOs, wait for write completion, update the volume
471  * header with the new UNDO end position, and flush it.  Then
472  * asynchronously flush the meta-data.
473  *
474  * If this is the last finalization in a flush group we also synchronize
475  * our cached blockmap and set hmp->flusher_undo_start and our cached undo
476  * fifo first_offset so the next flush resets the FIFO pointers.
477  *
478  * If this is not final it is being called because too many dirty meta-data
479  * buffers have built up and must be flushed with UNDO synchronization to
480  * avoid a buffer cache deadlock.
481  */
482 void
483 hammer_flusher_finalize(hammer_transaction_t trans, int final)
484 {
485         hammer_volume_t root_volume;
486         hammer_blockmap_t cundomap, dundomap;
487         hammer_mount_t hmp;
488         hammer_io_t io;
489         int count;
490         int i;
491
492         hmp = trans->hmp;
493         root_volume = trans->rootvol;
494
495         /*
496          * Exclusively lock the flusher.  This guarantees that all dirty
497          * buffers will be idled (have a mod-count of 0).
498          */
499         ++hmp->flusher.finalize_want;
500         hammer_lock_ex(&hmp->flusher.finalize_lock);
501
502         /*
503          * If this isn't the final sync several threads may have hit the
504          * meta-limit at the same time and raced.  Only sync if we really
505          * have to, after acquiring the lock.
506          */
507         if (final == 0 && !hammer_flusher_meta_limit(hmp))
508                 goto done;
509
510         /*
511          * Flush data buffers.  This can occur asynchronously and at any
512          * time.  We must interlock against the frontend direct-data write
513          * but do not have to acquire the sync-lock yet.
514          */
515         count = 0;
516         while ((io = TAILQ_FIRST(&hmp->data_list)) != NULL) {
517                 if (io->lock.refs == 0)
518                         ++hammer_count_refedbufs;
519                 hammer_ref(&io->lock);
520                 hammer_io_write_interlock(io);
521                 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
522                 hammer_io_flush(io);
523                 hammer_io_done_interlock(io);
524                 hammer_rel_buffer((hammer_buffer_t)io, 0);
525                 ++count;
526         }
527
528         /*
529          * The sync-lock is required for the remaining sequence.  This lock
530          * prevents meta-data from being modified.
531          */
532         hammer_sync_lock_ex(trans);
533
534         /*
535          * If we have been asked to finalize the volume header sync the
536          * cached blockmap to the on-disk blockmap.  Generate an UNDO
537          * record for the update.
538          */
539         if (final) {
540                 cundomap = &hmp->blockmap[0];
541                 dundomap = &root_volume->ondisk->vol0_blockmap[0];
542                 if (root_volume->io.modified) {
543                         hammer_modify_volume(trans, root_volume,
544                                              dundomap, sizeof(hmp->blockmap));
545                         for (i = 0; i < HAMMER_MAX_ZONES; ++i)
546                                 hammer_crc_set_blockmap(&cundomap[i]);
547                         bcopy(cundomap, dundomap, sizeof(hmp->blockmap));
548                         hammer_modify_volume_done(root_volume);
549                 }
550         }
551
552         /*
553          * Flush UNDOs
554          */
555         count = 0;
556         while ((io = TAILQ_FIRST(&hmp->undo_list)) != NULL) {
557                 KKASSERT(io->modify_refs == 0);
558                 if (io->lock.refs == 0)
559                         ++hammer_count_refedbufs;
560                 hammer_ref(&io->lock);
561                 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
562                 hammer_io_flush(io);
563                 hammer_rel_buffer((hammer_buffer_t)io, 0);
564                 ++count;
565         }
566
567         /*
568          * Wait for I/Os to complete
569          */
570         hammer_flusher_clean_loose_ios(hmp);
571         hammer_io_wait_all(hmp, "hmrfl1");
572
573         /*
574          * Update the on-disk volume header with new UNDO FIFO end position
575          * (do not generate new UNDO records for this change).  We have to
576          * do this for the UNDO FIFO whether (final) is set or not.
577          *
578          * Also update the on-disk next_tid field.  This does not require
579          * an UNDO.  However, because our TID is generated before we get
580          * the sync lock another sync may have beat us to the punch.
581          *
582          * This also has the side effect of updating first_offset based on
583          * a prior finalization when the first finalization of the next flush
584          * cycle occurs, removing any undo info from the prior finalization
585          * from consideration.
586          *
587          * The volume header will be flushed out synchronously.
588          */
589         dundomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
590         cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
591
592         if (dundomap->first_offset != cundomap->first_offset ||
593             dundomap->next_offset != cundomap->next_offset) {
594                 hammer_modify_volume(NULL, root_volume, NULL, 0);
595                 dundomap->first_offset = cundomap->first_offset;
596                 dundomap->next_offset = cundomap->next_offset;
597                 hammer_crc_set_blockmap(dundomap);
598                 hammer_modify_volume_done(root_volume);
599         }
600
601         if (root_volume->io.modified) {
602                 hammer_modify_volume(NULL, root_volume, NULL, 0);
603                 if (root_volume->ondisk->vol0_next_tid < trans->tid)
604                         root_volume->ondisk->vol0_next_tid = trans->tid;
605                 hammer_crc_set_volume(root_volume->ondisk);
606                 hammer_modify_volume_done(root_volume);
607                 hammer_io_flush(&root_volume->io);
608         }
609
610         /*
611          * Wait for I/Os to complete
612          */
613         hammer_flusher_clean_loose_ios(hmp);
614         hammer_io_wait_all(hmp, "hmrfl2");
615
616         /*
617          * Flush meta-data.  The meta-data will be undone if we crash
618          * so we can safely flush it asynchronously.
619          *
620          * Repeated catchups will wind up flushing this update's meta-data
621          * and the UNDO buffers for the next update simultaniously.  This
622          * is ok.
623          */
624         count = 0;
625         while ((io = TAILQ_FIRST(&hmp->meta_list)) != NULL) {
626                 KKASSERT(io->modify_refs == 0);
627                 if (io->lock.refs == 0)
628                         ++hammer_count_refedbufs;
629                 hammer_ref(&io->lock);
630                 KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
631                 hammer_io_flush(io);
632                 hammer_rel_buffer((hammer_buffer_t)io, 0);
633                 ++count;
634         }
635
636         /*
637          * If this is the final finalization for the flush group set
638          * up for the next sequence by setting a new first_offset in
639          * our cached blockmap and clearing the undo history.
640          *
641          * Even though we have updated our cached first_offset, the on-disk
642          * first_offset still governs available-undo-space calculations.
643          */
644         if (final) {
645                 cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
646                 cundomap->first_offset = cundomap->next_offset;
647                 hammer_clear_undo_history(hmp);
648         }
649
650         hammer_sync_unlock(trans);
651
652 done:
653         hammer_unlock(&hmp->flusher.finalize_lock);
654         if (--hmp->flusher.finalize_want == 0)
655                 wakeup(&hmp->flusher.finalize_want);
656 }
657
658 /*
659  * Return non-zero if too many dirty meta-data buffers have built up.
660  *
661  * Since we cannot allow such buffers to flush until we have dealt with
662  * the UNDOs, we risk deadlocking the kernel's buffer cache.
663  */
664 int
665 hammer_flusher_meta_limit(hammer_mount_t hmp)
666 {
667         if (hmp->locked_dirty_space + hmp->io_running_space >
668             hammer_limit_dirtybufspace) {
669                 return(1);
670         }
671         return(0);
672 }
673
674 int
675 hammer_flusher_meta_halflimit(hammer_mount_t hmp)
676 {
677         if (hmp->locked_dirty_space + hmp->io_running_space >
678             hammer_limit_dirtybufspace / 2) {
679                 return(1);
680         }
681         return(0);
682 }
683