kernel - VM PAGER part 2/2 - Expand vinitvmio() and vnode_pager_alloc()
[dragonfly.git] / sys / vfs / hammer / hammer_inode.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.114 2008/09/24 00:53:51 dillon Exp $
35  */
36
37 #include "hammer.h"
38 #include <vm/vm_extern.h>
39
40 static int      hammer_unload_inode(struct hammer_inode *ip);
41 static void     hammer_free_inode(hammer_inode_t ip);
42 static void     hammer_flush_inode_core(hammer_inode_t ip,
43                                         hammer_flush_group_t flg, int flags);
44 static int      hammer_setup_child_callback(hammer_record_t rec, void *data);
45 #if 0
46 static int      hammer_syncgrp_child_callback(hammer_record_t rec, void *data);
47 #endif
48 static int      hammer_setup_parent_inodes(hammer_inode_t ip, int depth,
49                                         hammer_flush_group_t flg);
50 static int      hammer_setup_parent_inodes_helper(hammer_record_t record,
51                                         int depth, hammer_flush_group_t flg);
52 static void     hammer_inode_wakereclaims(hammer_inode_t ip);
53
54 #ifdef DEBUG_TRUNCATE
55 extern struct hammer_inode *HammerTruncIp;
56 #endif
57
58 /*
59  * RB-Tree support for inode structures
60  */
61 int
62 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
63 {
64         if (ip1->obj_localization < ip2->obj_localization)
65                 return(-1);
66         if (ip1->obj_localization > ip2->obj_localization)
67                 return(1);
68         if (ip1->obj_id < ip2->obj_id)
69                 return(-1);
70         if (ip1->obj_id > ip2->obj_id)
71                 return(1);
72         if (ip1->obj_asof < ip2->obj_asof)
73                 return(-1);
74         if (ip1->obj_asof > ip2->obj_asof)
75                 return(1);
76         return(0);
77 }
78
79 int
80 hammer_redo_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
81 {
82         if (ip1->redo_fifo_start < ip2->redo_fifo_start)
83                 return(-1);
84         if (ip1->redo_fifo_start > ip2->redo_fifo_start)
85                 return(1);
86         return(0);
87 }
88
89 /*
90  * RB-Tree support for inode structures / special LOOKUP_INFO
91  */
92 static int
93 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
94 {
95         if (info->obj_localization < ip->obj_localization)
96                 return(-1);
97         if (info->obj_localization > ip->obj_localization)
98                 return(1);
99         if (info->obj_id < ip->obj_id)
100                 return(-1);
101         if (info->obj_id > ip->obj_id)
102                 return(1);
103         if (info->obj_asof < ip->obj_asof)
104                 return(-1);
105         if (info->obj_asof > ip->obj_asof)
106                 return(1);
107         return(0);
108 }
109
110 /*
111  * Used by hammer_scan_inode_snapshots() to locate all of an object's
112  * snapshots.  Note that the asof field is not tested, which we can get
113  * away with because it is the lowest-priority field.
114  */
115 static int
116 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data)
117 {
118         hammer_inode_info_t info = data;
119
120         if (ip->obj_localization > info->obj_localization)
121                 return(1);
122         if (ip->obj_localization < info->obj_localization)
123                 return(-1);
124         if (ip->obj_id > info->obj_id)
125                 return(1);
126         if (ip->obj_id < info->obj_id)
127                 return(-1);
128         return(0);
129 }
130
131 /*
132  * Used by hammer_unload_pseudofs() to locate all inodes associated with
133  * a particular PFS.
134  */
135 static int
136 hammer_inode_pfs_cmp(hammer_inode_t ip, void *data)
137 {
138         u_int32_t localization = *(u_int32_t *)data;
139         if (ip->obj_localization > localization)
140                 return(1);
141         if (ip->obj_localization < localization)
142                 return(-1);
143         return(0);
144 }
145
146 /*
147  * RB-Tree support for pseudofs structures
148  */
149 static int
150 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1, hammer_pseudofs_inmem_t p2)
151 {
152         if (p1->localization < p2->localization)
153                 return(-1);
154         if (p1->localization > p2->localization)
155                 return(1);
156         return(0);
157 }
158
159
160 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
161 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
162                 hammer_inode_info_cmp, hammer_inode_info_t);
163 RB_GENERATE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node,
164              hammer_pfs_rb_compare, u_int32_t, localization);
165
166 /*
167  * The kernel is not actively referencing this vnode but is still holding
168  * it cached.
169  *
170  * This is called from the frontend.
171  *
172  * MPALMOSTSAFE
173  */
174 int
175 hammer_vop_inactive(struct vop_inactive_args *ap)
176 {
177         struct hammer_inode *ip = VTOI(ap->a_vp);
178
179         /*
180          * Degenerate case
181          */
182         if (ip == NULL) {
183                 vrecycle(ap->a_vp);
184                 return(0);
185         }
186
187         /*
188          * If the inode no longer has visibility in the filesystem try to
189          * recycle it immediately, even if the inode is dirty.  Recycling
190          * it quickly allows the system to reclaim buffer cache and VM
191          * resources which can matter a lot in a heavily loaded system.
192          *
193          * This can deadlock in vfsync() if we aren't careful.
194          * 
195          * Do not queue the inode to the flusher if we still have visibility,
196          * otherwise namespace calls such as chmod will unnecessarily generate
197          * multiple inode updates.
198          */
199         if (ip->ino_data.nlinks == 0) {
200                 get_mplock();
201                 hammer_inode_unloadable_check(ip, 0);
202                 if (ip->flags & HAMMER_INODE_MODMASK)
203                         hammer_flush_inode(ip, 0);
204                 vrecycle(ap->a_vp);
205                 rel_mplock();
206         }
207         return(0);
208 }
209
210 /*
211  * Release the vnode association.  This is typically (but not always)
212  * the last reference on the inode.
213  *
214  * Once the association is lost we are on our own with regards to
215  * flushing the inode.
216  *
217  * We must interlock ip->vp so hammer_get_vnode() can avoid races.
218  */
219 int
220 hammer_vop_reclaim(struct vop_reclaim_args *ap)
221 {
222         struct hammer_inode *ip;
223         hammer_mount_t hmp;
224         struct vnode *vp;
225
226         vp = ap->a_vp;
227
228         if ((ip = vp->v_data) != NULL) {
229                 hmp = ip->hmp;
230                 hammer_lock_ex(&ip->lock);
231                 vp->v_data = NULL;
232                 ip->vp = NULL;
233
234                 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) {
235                         ++hammer_count_reclaiming;
236                         ++hmp->inode_reclaims;
237                         ip->flags |= HAMMER_INODE_RECLAIM;
238                 }
239                 hammer_unlock(&ip->lock);
240                 hammer_rel_inode(ip, 1);
241         }
242         return(0);
243 }
244
245 /*
246  * Return a locked vnode for the specified inode.  The inode must be
247  * referenced but NOT LOCKED on entry and will remain referenced on
248  * return.
249  *
250  * Called from the frontend.
251  */
252 int
253 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
254 {
255         hammer_mount_t hmp;
256         struct vnode *vp;
257         int error = 0;
258         u_int8_t obj_type;
259
260         hmp = ip->hmp;
261
262         for (;;) {
263                 if ((vp = ip->vp) == NULL) {
264                         error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0);
265                         if (error)
266                                 break;
267                         hammer_lock_ex(&ip->lock);
268                         if (ip->vp != NULL) {
269                                 hammer_unlock(&ip->lock);
270                                 vp = *vpp;
271                                 vp->v_type = VBAD;
272                                 vx_put(vp);
273                                 continue;
274                         }
275                         hammer_ref(&ip->lock);
276                         vp = *vpp;
277                         ip->vp = vp;
278
279                         obj_type = ip->ino_data.obj_type;
280                         vp->v_type = hammer_get_vnode_type(obj_type);
281
282                         hammer_inode_wakereclaims(ip);
283
284                         switch(ip->ino_data.obj_type) {
285                         case HAMMER_OBJTYPE_CDEV:
286                         case HAMMER_OBJTYPE_BDEV:
287                                 vp->v_ops = &hmp->mp->mnt_vn_spec_ops;
288                                 addaliasu(vp, ip->ino_data.rmajor,
289                                           ip->ino_data.rminor);
290                                 break;
291                         case HAMMER_OBJTYPE_FIFO:
292                                 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops;
293                                 break;
294                         case HAMMER_OBJTYPE_REGFILE:
295                                 break;
296                         default:
297                                 break;
298                         }
299
300                         /*
301                          * Only mark as the root vnode if the ip is not
302                          * historical, otherwise the VFS cache will get
303                          * confused.  The other half of the special handling
304                          * is in hammer_vop_nlookupdotdot().
305                          *
306                          * Pseudo-filesystem roots can be accessed via
307                          * non-root filesystem paths and setting VROOT may
308                          * confuse the namecache.  Set VPFSROOT instead.
309                          */
310                         if (ip->obj_id == HAMMER_OBJID_ROOT &&
311                             ip->obj_asof == hmp->asof) {
312                                 if (ip->obj_localization == 0)
313                                         vsetflags(vp, VROOT);
314                                 else
315                                         vsetflags(vp, VPFSROOT);
316                         }
317
318                         vp->v_data = (void *)ip;
319                         /* vnode locked by getnewvnode() */
320                         /* make related vnode dirty if inode dirty? */
321                         hammer_unlock(&ip->lock);
322                         if (vp->v_type == VREG) {
323                                 vinitvmio(vp, ip->ino_data.size,
324                                           hammer_blocksize(ip->ino_data.size),
325                                           hammer_blockoff(ip->ino_data.size));
326                         }
327                         break;
328                 }
329
330                 /*
331                  * Interlock vnode clearing.  This does not prevent the
332                  * vnode from going into a reclaimed state but it does
333                  * prevent it from being destroyed or reused so the vget()
334                  * will properly fail.
335                  */
336                 hammer_lock_ex(&ip->lock);
337                 if ((vp = ip->vp) == NULL) {
338                         hammer_unlock(&ip->lock);
339                         continue;
340                 }
341                 vhold_interlocked(vp);
342                 hammer_unlock(&ip->lock);
343
344                 /*
345                  * loop if the vget fails (aka races), or if the vp
346                  * no longer matches ip->vp.
347                  */
348                 if (vget(vp, LK_EXCLUSIVE) == 0) {
349                         if (vp == ip->vp) {
350                                 vdrop(vp);
351                                 break;
352                         }
353                         vput(vp);
354                 }
355                 vdrop(vp);
356         }
357         *vpp = vp;
358         return(error);
359 }
360
361 /*
362  * Locate all copies of the inode for obj_id compatible with the specified
363  * asof, reference, and issue the related call-back.  This routine is used
364  * for direct-io invalidation and does not create any new inodes.
365  */
366 void
367 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo,
368                             int (*callback)(hammer_inode_t ip, void *data),
369                             void *data)
370 {
371         hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root,
372                                    hammer_inode_info_cmp_all_history,
373                                    callback, iinfo);
374 }
375
376 /*
377  * Acquire a HAMMER inode.  The returned inode is not locked.  These functions
378  * do not attach or detach the related vnode (use hammer_get_vnode() for
379  * that).
380  *
381  * The flags argument is only applied for newly created inodes, and only
382  * certain flags are inherited.
383  *
384  * Called from the frontend.
385  */
386 struct hammer_inode *
387 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip,
388                  int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
389                  int flags, int *errorp)
390 {
391         hammer_mount_t hmp = trans->hmp;
392         struct hammer_node_cache *cachep;
393         struct hammer_inode_info iinfo;
394         struct hammer_cursor cursor;
395         struct hammer_inode *ip;
396
397
398         /*
399          * Determine if we already have an inode cached.  If we do then
400          * we are golden.
401          *
402          * If we find an inode with no vnode we have to mark the
403          * transaction such that hammer_inode_waitreclaims() is
404          * called later on to avoid building up an infinite number
405          * of inodes.  Otherwise we can continue to * add new inodes
406          * faster then they can be disposed of, even with the tsleep
407          * delay.
408          *
409          * If we find a dummy inode we return a failure so dounlink
410          * (which does another lookup) doesn't try to mess with the
411          * link count.  hammer_vop_nresolve() uses hammer_get_dummy_inode()
412          * to ref dummy inodes.
413          */
414         iinfo.obj_id = obj_id;
415         iinfo.obj_asof = asof;
416         iinfo.obj_localization = localization;
417 loop:
418         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
419         if (ip) {
420                 if (ip->flags & HAMMER_INODE_DUMMY) {
421                         *errorp = ENOENT;
422                         return(NULL);
423                 }
424                 hammer_ref(&ip->lock);
425                 *errorp = 0;
426                 return(ip);
427         }
428
429         /*
430          * Allocate a new inode structure and deal with races later.
431          */
432         ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
433         ++hammer_count_inodes;
434         ++hmp->count_inodes;
435         ip->obj_id = obj_id;
436         ip->obj_asof = iinfo.obj_asof;
437         ip->obj_localization = localization;
438         ip->hmp = hmp;
439         ip->flags = flags & HAMMER_INODE_RO;
440         ip->cache[0].ip = ip;
441         ip->cache[1].ip = ip;
442         ip->cache[2].ip = ip;
443         ip->cache[3].ip = ip;
444         if (hmp->ronly)
445                 ip->flags |= HAMMER_INODE_RO;
446         ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
447                 0x7FFFFFFFFFFFFFFFLL;
448         RB_INIT(&ip->rec_tree);
449         TAILQ_INIT(&ip->target_list);
450         hammer_ref(&ip->lock);
451
452         /*
453          * Locate the on-disk inode.  If this is a PFS root we always
454          * access the current version of the root inode and (if it is not
455          * a master) always access information under it with a snapshot
456          * TID.
457          *
458          * We cache recent inode lookups in this directory in dip->cache[2].
459          * If we can't find it we assume the inode we are looking for is
460          * close to the directory inode.
461          */
462 retry:
463         cachep = NULL;
464         if (dip) {
465                 if (dip->cache[2].node)
466                         cachep = &dip->cache[2];
467                 else
468                         cachep = &dip->cache[0];
469         }
470         hammer_init_cursor(trans, &cursor, cachep, NULL);
471         cursor.key_beg.localization = localization + HAMMER_LOCALIZE_INODE;
472         cursor.key_beg.obj_id = ip->obj_id;
473         cursor.key_beg.key = 0;
474         cursor.key_beg.create_tid = 0;
475         cursor.key_beg.delete_tid = 0;
476         cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
477         cursor.key_beg.obj_type = 0;
478
479         cursor.asof = iinfo.obj_asof;
480         cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
481                        HAMMER_CURSOR_ASOF;
482
483         *errorp = hammer_btree_lookup(&cursor);
484         if (*errorp == EDEADLK) {
485                 hammer_done_cursor(&cursor);
486                 goto retry;
487         }
488
489         /*
490          * On success the B-Tree lookup will hold the appropriate
491          * buffer cache buffers and provide a pointer to the requested
492          * information.  Copy the information to the in-memory inode
493          * and cache the B-Tree node to improve future operations.
494          */
495         if (*errorp == 0) {
496                 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
497                 ip->ino_data = cursor.data->inode;
498
499                 /*
500                  * cache[0] tries to cache the location of the object inode.
501                  * The assumption is that it is near the directory inode.
502                  *
503                  * cache[1] tries to cache the location of the object data.
504                  * We might have something in the governing directory from
505                  * scan optimizations (see the strategy code in
506                  * hammer_vnops.c).
507                  *
508                  * We update dip->cache[2], if possible, with the location
509                  * of the object inode for future directory shortcuts.
510                  */
511                 hammer_cache_node(&ip->cache[0], cursor.node);
512                 if (dip) {
513                         if (dip->cache[3].node) {
514                                 hammer_cache_node(&ip->cache[1],
515                                                   dip->cache[3].node);
516                         }
517                         hammer_cache_node(&dip->cache[2], cursor.node);
518                 }
519
520                 /*
521                  * The file should not contain any data past the file size
522                  * stored in the inode.  Setting save_trunc_off to the
523                  * file size instead of max reduces B-Tree lookup overheads
524                  * on append by allowing the flusher to avoid checking for
525                  * record overwrites.
526                  */
527                 ip->save_trunc_off = ip->ino_data.size;
528
529                 /*
530                  * Locate and assign the pseudofs management structure to
531                  * the inode.
532                  */
533                 if (dip && dip->obj_localization == ip->obj_localization) {
534                         ip->pfsm = dip->pfsm;
535                         hammer_ref(&ip->pfsm->lock);
536                 } else {
537                         ip->pfsm = hammer_load_pseudofs(trans,
538                                                         ip->obj_localization,
539                                                         errorp);
540                         *errorp = 0;    /* ignore ENOENT */
541                 }
542         }
543
544         /*
545          * The inode is placed on the red-black tree and will be synced to
546          * the media when flushed or by the filesystem sync.  If this races
547          * another instantiation/lookup the insertion will fail.
548          */
549         if (*errorp == 0) {
550                 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
551                         hammer_free_inode(ip);
552                         hammer_done_cursor(&cursor);
553                         goto loop;
554                 }
555                 ip->flags |= HAMMER_INODE_ONDISK;
556         } else {
557                 if (ip->flags & HAMMER_INODE_RSV_INODES) {
558                         ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
559                         --hmp->rsv_inodes;
560                 }
561
562                 hammer_free_inode(ip);
563                 ip = NULL;
564         }
565         hammer_done_cursor(&cursor);
566         trans->flags |= HAMMER_TRANSF_NEWINODE;
567         return (ip);
568 }
569
570 /*
571  * Get a dummy inode to placemark a broken directory entry.
572  */
573 struct hammer_inode *
574 hammer_get_dummy_inode(hammer_transaction_t trans, hammer_inode_t dip,
575                  int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
576                  int flags, int *errorp)
577 {
578         hammer_mount_t hmp = trans->hmp;
579         struct hammer_inode_info iinfo;
580         struct hammer_inode *ip;
581
582         /*
583          * Determine if we already have an inode cached.  If we do then
584          * we are golden.
585          *
586          * If we find an inode with no vnode we have to mark the
587          * transaction such that hammer_inode_waitreclaims() is
588          * called later on to avoid building up an infinite number
589          * of inodes.  Otherwise we can continue to * add new inodes
590          * faster then they can be disposed of, even with the tsleep
591          * delay.
592          *
593          * If we find a non-fake inode we return an error.  Only fake
594          * inodes can be returned by this routine.
595          */
596         iinfo.obj_id = obj_id;
597         iinfo.obj_asof = asof;
598         iinfo.obj_localization = localization;
599 loop:
600         *errorp = 0;
601         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
602         if (ip) {
603                 if ((ip->flags & HAMMER_INODE_DUMMY) == 0) {
604                         *errorp = ENOENT;
605                         return(NULL);
606                 }
607                 hammer_ref(&ip->lock);
608                 return(ip);
609         }
610
611         /*
612          * Allocate a new inode structure and deal with races later.
613          */
614         ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
615         ++hammer_count_inodes;
616         ++hmp->count_inodes;
617         ip->obj_id = obj_id;
618         ip->obj_asof = iinfo.obj_asof;
619         ip->obj_localization = localization;
620         ip->hmp = hmp;
621         ip->flags = flags | HAMMER_INODE_RO | HAMMER_INODE_DUMMY;
622         ip->cache[0].ip = ip;
623         ip->cache[1].ip = ip;
624         ip->cache[2].ip = ip;
625         ip->cache[3].ip = ip;
626         ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
627                 0x7FFFFFFFFFFFFFFFLL;
628         RB_INIT(&ip->rec_tree);
629         TAILQ_INIT(&ip->target_list);
630         hammer_ref(&ip->lock);
631
632         /*
633          * Populate the dummy inode.  Leave everything zero'd out.
634          *
635          * (ip->ino_leaf and ip->ino_data)
636          *
637          * Make the dummy inode a FIFO object which most copy programs
638          * will properly ignore.
639          */
640         ip->save_trunc_off = ip->ino_data.size;
641         ip->ino_data.obj_type = HAMMER_OBJTYPE_FIFO;
642
643         /*
644          * Locate and assign the pseudofs management structure to
645          * the inode.
646          */
647         if (dip && dip->obj_localization == ip->obj_localization) {
648                 ip->pfsm = dip->pfsm;
649                 hammer_ref(&ip->pfsm->lock);
650         } else {
651                 ip->pfsm = hammer_load_pseudofs(trans, ip->obj_localization,
652                                                 errorp);
653                 *errorp = 0;    /* ignore ENOENT */
654         }
655
656         /*
657          * The inode is placed on the red-black tree and will be synced to
658          * the media when flushed or by the filesystem sync.  If this races
659          * another instantiation/lookup the insertion will fail.
660          *
661          * NOTE: Do not set HAMMER_INODE_ONDISK.  The inode is a fake.
662          */
663         if (*errorp == 0) {
664                 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
665                         hammer_free_inode(ip);
666                         goto loop;
667                 }
668         } else {
669                 if (ip->flags & HAMMER_INODE_RSV_INODES) {
670                         ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
671                         --hmp->rsv_inodes;
672                 }
673                 hammer_free_inode(ip);
674                 ip = NULL;
675         }
676         trans->flags |= HAMMER_TRANSF_NEWINODE;
677         return (ip);
678 }
679
680 /*
681  * Return a referenced inode only if it is in our inode cache.
682  *
683  * Dummy inodes do not count.
684  */
685 struct hammer_inode *
686 hammer_find_inode(hammer_transaction_t trans, int64_t obj_id,
687                   hammer_tid_t asof, u_int32_t localization)
688 {
689         hammer_mount_t hmp = trans->hmp;
690         struct hammer_inode_info iinfo;
691         struct hammer_inode *ip;
692
693         iinfo.obj_id = obj_id;
694         iinfo.obj_asof = asof;
695         iinfo.obj_localization = localization;
696
697         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
698         if (ip) {
699                 if (ip->flags & HAMMER_INODE_DUMMY)
700                         ip = NULL;
701                 else
702                         hammer_ref(&ip->lock);
703         }
704         return(ip);
705 }
706
707 /*
708  * Create a new filesystem object, returning the inode in *ipp.  The
709  * returned inode will be referenced.  The inode is created in-memory.
710  *
711  * If pfsm is non-NULL the caller wishes to create the root inode for
712  * a master PFS.
713  */
714 int
715 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
716                     struct ucred *cred,
717                     hammer_inode_t dip, const char *name, int namelen,
718                     hammer_pseudofs_inmem_t pfsm, struct hammer_inode **ipp)
719 {
720         hammer_mount_t hmp;
721         hammer_inode_t ip;
722         uid_t xuid;
723         int error;
724         int64_t namekey;
725         u_int32_t dummy;
726
727         hmp = trans->hmp;
728
729         ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
730         ++hammer_count_inodes;
731         ++hmp->count_inodes;
732         trans->flags |= HAMMER_TRANSF_NEWINODE;
733
734         if (pfsm) {
735                 KKASSERT(pfsm->localization != 0);
736                 ip->obj_id = HAMMER_OBJID_ROOT;
737                 ip->obj_localization = pfsm->localization;
738         } else {
739                 KKASSERT(dip != NULL);
740                 namekey = hammer_directory_namekey(dip, name, namelen, &dummy);
741                 ip->obj_id = hammer_alloc_objid(hmp, dip, namekey);
742                 ip->obj_localization = dip->obj_localization;
743         }
744
745         KKASSERT(ip->obj_id != 0);
746         ip->obj_asof = hmp->asof;
747         ip->hmp = hmp;
748         ip->flush_state = HAMMER_FST_IDLE;
749         ip->flags = HAMMER_INODE_DDIRTY |
750                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME;
751         ip->cache[0].ip = ip;
752         ip->cache[1].ip = ip;
753         ip->cache[2].ip = ip;
754         ip->cache[3].ip = ip;
755
756         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
757         /* ip->save_trunc_off = 0; (already zero) */
758         RB_INIT(&ip->rec_tree);
759         TAILQ_INIT(&ip->target_list);
760
761         ip->ino_data.atime = trans->time;
762         ip->ino_data.mtime = trans->time;
763         ip->ino_data.size = 0;
764         ip->ino_data.nlinks = 0;
765
766         /*
767          * A nohistory designator on the parent directory is inherited by
768          * the child.  We will do this even for pseudo-fs creation... the
769          * sysad can turn it off.
770          */
771         if (dip) {
772                 ip->ino_data.uflags = dip->ino_data.uflags &
773                                       (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
774         }
775
776         ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
777         ip->ino_leaf.base.localization = ip->obj_localization +
778                                          HAMMER_LOCALIZE_INODE;
779         ip->ino_leaf.base.obj_id = ip->obj_id;
780         ip->ino_leaf.base.key = 0;
781         ip->ino_leaf.base.create_tid = 0;
782         ip->ino_leaf.base.delete_tid = 0;
783         ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
784         ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
785
786         ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
787         ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
788         ip->ino_data.mode = vap->va_mode;
789         ip->ino_data.ctime = trans->time;
790
791         /*
792          * If we are running version 2 or greater directory entries are
793          * inode-localized instead of data-localized.
794          */
795         if (trans->hmp->version >= HAMMER_VOL_VERSION_TWO) {
796                 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
797                         ip->ino_data.cap_flags |=
798                                 HAMMER_INODE_CAP_DIR_LOCAL_INO;
799                 }
800         }
801
802         /*
803          * Setup the ".." pointer.  This only needs to be done for directories
804          * but we do it for all objects as a recovery aid.
805          */
806         if (dip)
807                 ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id;
808 #if 0
809         /*
810          * The parent_obj_localization field only applies to pseudo-fs roots.
811          * XXX this is no longer applicable, PFSs are no longer directly
812          * tied into the parent's directory structure.
813          */
814         if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY &&
815             ip->obj_id == HAMMER_OBJID_ROOT) {
816                 ip->ino_data.ext.obj.parent_obj_localization = 
817                                                 dip->obj_localization;
818         }
819 #endif
820
821         switch(ip->ino_leaf.base.obj_type) {
822         case HAMMER_OBJTYPE_CDEV:
823         case HAMMER_OBJTYPE_BDEV:
824                 ip->ino_data.rmajor = vap->va_rmajor;
825                 ip->ino_data.rminor = vap->va_rminor;
826                 break;
827         default:
828                 break;
829         }
830
831         /*
832          * Calculate default uid/gid and overwrite with information from
833          * the vap.
834          */
835         if (dip) {
836                 xuid = hammer_to_unix_xid(&dip->ino_data.uid);
837                 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode,
838                                              xuid, cred, &vap->va_mode);
839         } else {
840                 xuid = 0;
841         }
842         ip->ino_data.mode = vap->va_mode;
843
844         if (vap->va_vaflags & VA_UID_UUID_VALID)
845                 ip->ino_data.uid = vap->va_uid_uuid;
846         else if (vap->va_uid != (uid_t)VNOVAL)
847                 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
848         else
849                 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
850
851         if (vap->va_vaflags & VA_GID_UUID_VALID)
852                 ip->ino_data.gid = vap->va_gid_uuid;
853         else if (vap->va_gid != (gid_t)VNOVAL)
854                 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
855         else if (dip)
856                 ip->ino_data.gid = dip->ino_data.gid;
857
858         hammer_ref(&ip->lock);
859
860         if (pfsm) {
861                 ip->pfsm = pfsm;
862                 hammer_ref(&pfsm->lock);
863                 error = 0;
864         } else if (dip->obj_localization == ip->obj_localization) {
865                 ip->pfsm = dip->pfsm;
866                 hammer_ref(&ip->pfsm->lock);
867                 error = 0;
868         } else {
869                 ip->pfsm = hammer_load_pseudofs(trans,
870                                                 ip->obj_localization,
871                                                 &error);
872                 error = 0;      /* ignore ENOENT */
873         }
874
875         if (error) {
876                 hammer_free_inode(ip);
877                 ip = NULL;
878         } else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
879                 panic("hammer_create_inode: duplicate obj_id %llx",
880                       (long long)ip->obj_id);
881                 /* not reached */
882                 hammer_free_inode(ip);
883         }
884         *ipp = ip;
885         return(error);
886 }
887
888 /*
889  * Final cleanup / freeing of an inode structure
890  */
891 static void
892 hammer_free_inode(hammer_inode_t ip)
893 {
894         struct hammer_mount *hmp;
895
896         hmp = ip->hmp;
897         KKASSERT(ip->lock.refs == 1);
898         hammer_uncache_node(&ip->cache[0]);
899         hammer_uncache_node(&ip->cache[1]);
900         hammer_uncache_node(&ip->cache[2]);
901         hammer_uncache_node(&ip->cache[3]);
902         hammer_inode_wakereclaims(ip);
903         if (ip->objid_cache)
904                 hammer_clear_objid(ip);
905         --hammer_count_inodes;
906         --hmp->count_inodes;
907         if (ip->pfsm) {
908                 hammer_rel_pseudofs(hmp, ip->pfsm);
909                 ip->pfsm = NULL;
910         }
911         kfree(ip, hmp->m_inodes);
912         ip = NULL;
913 }
914
915 /*
916  * Retrieve pseudo-fs data.  NULL will never be returned.
917  *
918  * If an error occurs *errorp will be set and a default template is returned,
919  * otherwise *errorp is set to 0.  Typically when an error occurs it will
920  * be ENOENT.
921  */
922 hammer_pseudofs_inmem_t
923 hammer_load_pseudofs(hammer_transaction_t trans,
924                      u_int32_t localization, int *errorp)
925 {
926         hammer_mount_t hmp = trans->hmp;
927         hammer_inode_t ip;
928         hammer_pseudofs_inmem_t pfsm;
929         struct hammer_cursor cursor;
930         int bytes;
931
932 retry:
933         pfsm = RB_LOOKUP(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, localization);
934         if (pfsm) {
935                 hammer_ref(&pfsm->lock);
936                 *errorp = 0;
937                 return(pfsm);
938         }
939
940         /*
941          * PFS records are stored in the root inode (not the PFS root inode,
942          * but the real root).  Avoid an infinite recursion if loading
943          * the PFS for the real root.
944          */
945         if (localization) {
946                 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT,
947                                       HAMMER_MAX_TID,
948                                       HAMMER_DEF_LOCALIZATION, 0, errorp);
949         } else {
950                 ip = NULL;
951         }
952
953         pfsm = kmalloc(sizeof(*pfsm), hmp->m_misc, M_WAITOK | M_ZERO);
954         pfsm->localization = localization;
955         pfsm->pfsd.unique_uuid = trans->rootvol->ondisk->vol_fsid;
956         pfsm->pfsd.shared_uuid = pfsm->pfsd.unique_uuid;
957
958         hammer_init_cursor(trans, &cursor, (ip ? &ip->cache[1] : NULL), ip);
959         cursor.key_beg.localization = HAMMER_DEF_LOCALIZATION +
960                                       HAMMER_LOCALIZE_MISC;
961         cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
962         cursor.key_beg.create_tid = 0;
963         cursor.key_beg.delete_tid = 0;
964         cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
965         cursor.key_beg.obj_type = 0;
966         cursor.key_beg.key = localization;
967         cursor.asof = HAMMER_MAX_TID;
968         cursor.flags |= HAMMER_CURSOR_ASOF;
969
970         if (ip)
971                 *errorp = hammer_ip_lookup(&cursor);
972         else
973                 *errorp = hammer_btree_lookup(&cursor);
974         if (*errorp == 0) {
975                 *errorp = hammer_ip_resolve_data(&cursor);
976                 if (*errorp == 0) {
977                         if (cursor.data->pfsd.mirror_flags &
978                             HAMMER_PFSD_DELETED) {
979                                 *errorp = ENOENT;
980                         } else {
981                                 bytes = cursor.leaf->data_len;
982                                 if (bytes > sizeof(pfsm->pfsd))
983                                         bytes = sizeof(pfsm->pfsd);
984                                 bcopy(cursor.data, &pfsm->pfsd, bytes);
985                         }
986                 }
987         }
988         hammer_done_cursor(&cursor);
989
990         pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
991         hammer_ref(&pfsm->lock);
992         if (ip)
993                 hammer_rel_inode(ip, 0);
994         if (RB_INSERT(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm)) {
995                 kfree(pfsm, hmp->m_misc);
996                 goto retry;
997         }
998         return(pfsm);
999 }
1000
1001 /*
1002  * Store pseudo-fs data.  The backend will automatically delete any prior
1003  * on-disk pseudo-fs data but we have to delete in-memory versions.
1004  */
1005 int
1006 hammer_save_pseudofs(hammer_transaction_t trans, hammer_pseudofs_inmem_t pfsm)
1007 {
1008         struct hammer_cursor cursor;
1009         hammer_record_t record;
1010         hammer_inode_t ip;
1011         int error;
1012
1013         ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
1014                               HAMMER_DEF_LOCALIZATION, 0, &error);
1015 retry:
1016         pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
1017         hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
1018         cursor.key_beg.localization = ip->obj_localization +
1019                                       HAMMER_LOCALIZE_MISC;
1020         cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
1021         cursor.key_beg.create_tid = 0;
1022         cursor.key_beg.delete_tid = 0;
1023         cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
1024         cursor.key_beg.obj_type = 0;
1025         cursor.key_beg.key = pfsm->localization;
1026         cursor.asof = HAMMER_MAX_TID;
1027         cursor.flags |= HAMMER_CURSOR_ASOF;
1028
1029         /*
1030          * Replace any in-memory version of the record.
1031          */
1032         error = hammer_ip_lookup(&cursor);
1033         if (error == 0 && hammer_cursor_inmem(&cursor)) {
1034                 record = cursor.iprec;
1035                 if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
1036                         KKASSERT(cursor.deadlk_rec == NULL);
1037                         hammer_ref(&record->lock);
1038                         cursor.deadlk_rec = record;
1039                         error = EDEADLK;
1040                 } else {
1041                         record->flags |= HAMMER_RECF_DELETED_FE;
1042                         error = 0;
1043                 }
1044         }
1045
1046         /*
1047          * Allocate replacement general record.  The backend flush will
1048          * delete any on-disk version of the record.
1049          */
1050         if (error == 0 || error == ENOENT) {
1051                 record = hammer_alloc_mem_record(ip, sizeof(pfsm->pfsd));
1052                 record->type = HAMMER_MEM_RECORD_GENERAL;
1053
1054                 record->leaf.base.localization = ip->obj_localization +
1055                                                  HAMMER_LOCALIZE_MISC;
1056                 record->leaf.base.rec_type = HAMMER_RECTYPE_PFS;
1057                 record->leaf.base.key = pfsm->localization;
1058                 record->leaf.data_len = sizeof(pfsm->pfsd);
1059                 bcopy(&pfsm->pfsd, record->data, sizeof(pfsm->pfsd));
1060                 error = hammer_ip_add_record(trans, record);
1061         }
1062         hammer_done_cursor(&cursor);
1063         if (error == EDEADLK)
1064                 goto retry;
1065         hammer_rel_inode(ip, 0);
1066         return(error);
1067 }
1068
1069 /*
1070  * Create a root directory for a PFS if one does not alredy exist.
1071  *
1072  * The PFS root stands alone so we must also bump the nlinks count
1073  * to prevent it from being destroyed on release.
1074  */
1075 int
1076 hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred,
1077                        hammer_pseudofs_inmem_t pfsm)
1078 {
1079         hammer_inode_t ip;
1080         struct vattr vap;
1081         int error;
1082
1083         ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
1084                               pfsm->localization, 0, &error);
1085         if (ip == NULL) {
1086                 vattr_null(&vap);
1087                 vap.va_mode = 0755;
1088                 vap.va_type = VDIR;
1089                 error = hammer_create_inode(trans, &vap, cred,
1090                                             NULL, NULL, 0,
1091                                             pfsm, &ip);
1092                 if (error == 0) {
1093                         ++ip->ino_data.nlinks;
1094                         hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
1095                 }
1096         }
1097         if (ip)
1098                 hammer_rel_inode(ip, 0);
1099         return(error);
1100 }
1101
1102 /*
1103  * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY
1104  * if we are unable to disassociate all the inodes.
1105  */
1106 static
1107 int
1108 hammer_unload_pseudofs_callback(hammer_inode_t ip, void *data)
1109 {
1110         int res;
1111
1112         hammer_ref(&ip->lock);
1113         if (ip->lock.refs == 2 && ip->vp)
1114                 vclean_unlocked(ip->vp);
1115         if (ip->lock.refs == 1 && ip->vp == NULL)
1116                 res = 0;
1117         else
1118                 res = -1;       /* stop, someone is using the inode */
1119         hammer_rel_inode(ip, 0);
1120         return(res);
1121 }
1122
1123 int
1124 hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization)
1125 {
1126         int res;
1127         int try;
1128
1129         for (try = res = 0; try < 4; ++try) {
1130                 res = hammer_ino_rb_tree_RB_SCAN(&trans->hmp->rb_inos_root,
1131                                            hammer_inode_pfs_cmp,
1132                                            hammer_unload_pseudofs_callback,
1133                                            &localization);
1134                 if (res == 0 && try > 1)
1135                         break;
1136                 hammer_flusher_sync(trans->hmp);
1137         }
1138         if (res != 0)
1139                 res = ENOTEMPTY;
1140         return(res);
1141 }
1142
1143
1144 /*
1145  * Release a reference on a PFS
1146  */
1147 void
1148 hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm)
1149 {
1150         hammer_unref(&pfsm->lock);
1151         if (pfsm->lock.refs == 0) {
1152                 RB_REMOVE(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm);
1153                 kfree(pfsm, hmp->m_misc);
1154         }
1155 }
1156
1157 /*
1158  * Called by hammer_sync_inode().
1159  */
1160 static int
1161 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
1162 {
1163         hammer_transaction_t trans = cursor->trans;
1164         hammer_record_t record;
1165         int error;
1166         int redirty;
1167
1168 retry:
1169         error = 0;
1170
1171         /*
1172          * If the inode has a presence on-disk then locate it and mark
1173          * it deleted, setting DELONDISK.
1174          *
1175          * The record may or may not be physically deleted, depending on
1176          * the retention policy.
1177          */
1178         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
1179             HAMMER_INODE_ONDISK) {
1180                 hammer_normalize_cursor(cursor);
1181                 cursor->key_beg.localization = ip->obj_localization + 
1182                                                HAMMER_LOCALIZE_INODE;
1183                 cursor->key_beg.obj_id = ip->obj_id;
1184                 cursor->key_beg.key = 0;
1185                 cursor->key_beg.create_tid = 0;
1186                 cursor->key_beg.delete_tid = 0;
1187                 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1188                 cursor->key_beg.obj_type = 0;
1189                 cursor->asof = ip->obj_asof;
1190                 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1191                 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
1192                 cursor->flags |= HAMMER_CURSOR_BACKEND;
1193
1194                 error = hammer_btree_lookup(cursor);
1195                 if (hammer_debug_inode)
1196                         kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
1197
1198                 if (error == 0) {
1199                         error = hammer_ip_delete_record(cursor, ip, trans->tid);
1200                         if (hammer_debug_inode)
1201                                 kprintf(" error %d\n", error);
1202                         if (error == 0) {
1203                                 ip->flags |= HAMMER_INODE_DELONDISK;
1204                         }
1205                         if (cursor->node)
1206                                 hammer_cache_node(&ip->cache[0], cursor->node);
1207                 }
1208                 if (error == EDEADLK) {
1209                         hammer_done_cursor(cursor);
1210                         error = hammer_init_cursor(trans, cursor,
1211                                                    &ip->cache[0], ip);
1212                         if (hammer_debug_inode)
1213                                 kprintf("IPDED %p %d\n", ip, error);
1214                         if (error == 0)
1215                                 goto retry;
1216                 }
1217         }
1218
1219         /*
1220          * Ok, write out the initial record or a new record (after deleting
1221          * the old one), unless the DELETED flag is set.  This routine will
1222          * clear DELONDISK if it writes out a record.
1223          *
1224          * Update our inode statistics if this is the first application of
1225          * the inode on-disk.
1226          */
1227         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
1228                 /*
1229                  * Generate a record and write it to the media.  We clean-up
1230                  * the state before releasing so we do not have to set-up
1231                  * a flush_group.
1232                  */
1233                 record = hammer_alloc_mem_record(ip, 0);
1234                 record->type = HAMMER_MEM_RECORD_INODE;
1235                 record->flush_state = HAMMER_FST_FLUSH;
1236                 record->leaf = ip->sync_ino_leaf;
1237                 record->leaf.base.create_tid = trans->tid;
1238                 record->leaf.data_len = sizeof(ip->sync_ino_data);
1239                 record->leaf.create_ts = trans->time32;
1240                 record->data = (void *)&ip->sync_ino_data;
1241                 record->flags |= HAMMER_RECF_INTERLOCK_BE;
1242
1243                 /*
1244                  * If this flag is set we cannot sync the new file size
1245                  * because we haven't finished related truncations.  The
1246                  * inode will be flushed in another flush group to finish
1247                  * the job.
1248                  */
1249                 if ((ip->flags & HAMMER_INODE_WOULDBLOCK) &&
1250                     ip->sync_ino_data.size != ip->ino_data.size) {
1251                         redirty = 1;
1252                         ip->sync_ino_data.size = ip->ino_data.size;
1253                 } else {
1254                         redirty = 0;
1255                 }
1256
1257                 for (;;) {
1258                         error = hammer_ip_sync_record_cursor(cursor, record);
1259                         if (hammer_debug_inode)
1260                                 kprintf("GENREC %p rec %08x %d\n",      
1261                                         ip, record->flags, error);
1262                         if (error != EDEADLK)
1263                                 break;
1264                         hammer_done_cursor(cursor);
1265                         error = hammer_init_cursor(trans, cursor,
1266                                                    &ip->cache[0], ip);
1267                         if (hammer_debug_inode)
1268                                 kprintf("GENREC reinit %d\n", error);
1269                         if (error)
1270                                 break;
1271                 }
1272
1273                 /*
1274                  * Note:  The record was never on the inode's record tree
1275                  * so just wave our hands importantly and destroy it.
1276                  */
1277                 record->flags |= HAMMER_RECF_COMMITTED;
1278                 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
1279                 record->flush_state = HAMMER_FST_IDLE;
1280                 ++ip->rec_generation;
1281                 hammer_rel_mem_record(record);
1282
1283                 /*
1284                  * Finish up.
1285                  */
1286                 if (error == 0) {
1287                         if (hammer_debug_inode)
1288                                 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
1289                         ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1290                                             HAMMER_INODE_SDIRTY |
1291                                             HAMMER_INODE_ATIME |
1292                                             HAMMER_INODE_MTIME);
1293                         ip->flags &= ~HAMMER_INODE_DELONDISK;
1294                         if (redirty)
1295                                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1296
1297                         /*
1298                          * Root volume count of inodes
1299                          */
1300                         hammer_sync_lock_sh(trans);
1301                         if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
1302                                 hammer_modify_volume_field(trans,
1303                                                            trans->rootvol,
1304                                                            vol0_stat_inodes);
1305                                 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1306                                 hammer_modify_volume_done(trans->rootvol);
1307                                 ip->flags |= HAMMER_INODE_ONDISK;
1308                                 if (hammer_debug_inode)
1309                                         kprintf("NOWONDISK %p\n", ip);
1310                         }
1311                         hammer_sync_unlock(trans);
1312                 }
1313         }
1314
1315         /*
1316          * If the inode has been destroyed, clean out any left-over flags
1317          * that may have been set by the frontend.
1318          */
1319         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) { 
1320                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1321                                     HAMMER_INODE_SDIRTY |
1322                                     HAMMER_INODE_ATIME |
1323                                     HAMMER_INODE_MTIME);
1324         }
1325         return(error);
1326 }
1327
1328 /*
1329  * Update only the itimes fields.
1330  *
1331  * ATIME can be updated without generating any UNDO.  MTIME is updated
1332  * with UNDO so it is guaranteed to be synchronized properly in case of
1333  * a crash.
1334  *
1335  * Neither field is included in the B-Tree leaf element's CRC, which is how
1336  * we can get away with updating ATIME the way we do.
1337  */
1338 static int
1339 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
1340 {
1341         hammer_transaction_t trans = cursor->trans;
1342         int error;
1343
1344 retry:
1345         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) !=
1346             HAMMER_INODE_ONDISK) {
1347                 return(0);
1348         }
1349
1350         hammer_normalize_cursor(cursor);
1351         cursor->key_beg.localization = ip->obj_localization + 
1352                                        HAMMER_LOCALIZE_INODE;
1353         cursor->key_beg.obj_id = ip->obj_id;
1354         cursor->key_beg.key = 0;
1355         cursor->key_beg.create_tid = 0;
1356         cursor->key_beg.delete_tid = 0;
1357         cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1358         cursor->key_beg.obj_type = 0;
1359         cursor->asof = ip->obj_asof;
1360         cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1361         cursor->flags |= HAMMER_CURSOR_ASOF;
1362         cursor->flags |= HAMMER_CURSOR_GET_LEAF;
1363         cursor->flags |= HAMMER_CURSOR_GET_DATA;
1364         cursor->flags |= HAMMER_CURSOR_BACKEND;
1365
1366         error = hammer_btree_lookup(cursor);
1367         if (error == 0) {
1368                 hammer_cache_node(&ip->cache[0], cursor->node);
1369                 if (ip->sync_flags & HAMMER_INODE_MTIME) {
1370                         /*
1371                          * Updating MTIME requires an UNDO.  Just cover
1372                          * both atime and mtime.
1373                          */
1374                         hammer_sync_lock_sh(trans);
1375                         hammer_modify_buffer(trans, cursor->data_buffer,
1376                                      HAMMER_ITIMES_BASE(&cursor->data->inode),
1377                                      HAMMER_ITIMES_BYTES);
1378                         cursor->data->inode.atime = ip->sync_ino_data.atime;
1379                         cursor->data->inode.mtime = ip->sync_ino_data.mtime;
1380                         hammer_modify_buffer_done(cursor->data_buffer);
1381                         hammer_sync_unlock(trans);
1382                 } else if (ip->sync_flags & HAMMER_INODE_ATIME) {
1383                         /*
1384                          * Updating atime only can be done in-place with
1385                          * no UNDO.
1386                          */
1387                         hammer_sync_lock_sh(trans);
1388                         hammer_modify_buffer(trans, cursor->data_buffer,
1389                                              NULL, 0);
1390                         cursor->data->inode.atime = ip->sync_ino_data.atime;
1391                         hammer_modify_buffer_done(cursor->data_buffer);
1392                         hammer_sync_unlock(trans);
1393                 }
1394                 ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME);
1395         }
1396         if (error == EDEADLK) {
1397                 hammer_done_cursor(cursor);
1398                 error = hammer_init_cursor(trans, cursor,
1399                                            &ip->cache[0], ip);
1400                 if (error == 0)
1401                         goto retry;
1402         }
1403         return(error);
1404 }
1405
1406 /*
1407  * Release a reference on an inode, flush as requested.
1408  *
1409  * On the last reference we queue the inode to the flusher for its final
1410  * disposition.
1411  */
1412 void
1413 hammer_rel_inode(struct hammer_inode *ip, int flush)
1414 {
1415         /*hammer_mount_t hmp = ip->hmp;*/
1416
1417         /*
1418          * Handle disposition when dropping the last ref.
1419          */
1420         for (;;) {
1421                 if (ip->lock.refs == 1) {
1422                         /*
1423                          * Determine whether on-disk action is needed for
1424                          * the inode's final disposition.
1425                          */
1426                         KKASSERT(ip->vp == NULL);
1427                         hammer_inode_unloadable_check(ip, 0);
1428                         if (ip->flags & HAMMER_INODE_MODMASK) {
1429                                 hammer_flush_inode(ip, 0);
1430                         } else if (ip->lock.refs == 1) {
1431                                 hammer_unload_inode(ip);
1432                                 break;
1433                         }
1434                 } else {
1435                         if (flush)
1436                                 hammer_flush_inode(ip, 0);
1437
1438                         /*
1439                          * The inode still has multiple refs, try to drop
1440                          * one ref.
1441                          */
1442                         KKASSERT(ip->lock.refs >= 1);
1443                         if (ip->lock.refs > 1) {
1444                                 hammer_unref(&ip->lock);
1445                                 break;
1446                         }
1447                 }
1448         }
1449 }
1450
1451 /*
1452  * Unload and destroy the specified inode.  Must be called with one remaining
1453  * reference.  The reference is disposed of.
1454  *
1455  * The inode must be completely clean.
1456  */
1457 static int
1458 hammer_unload_inode(struct hammer_inode *ip)
1459 {
1460         hammer_mount_t hmp = ip->hmp;
1461
1462         KASSERT(ip->lock.refs == 1,
1463                 ("hammer_unload_inode: %d refs\n", ip->lock.refs));
1464         KKASSERT(ip->vp == NULL);
1465         KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
1466         KKASSERT(ip->cursor_ip_refs == 0);
1467         KKASSERT(hammer_notlocked(&ip->lock));
1468         KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
1469
1470         KKASSERT(RB_EMPTY(&ip->rec_tree));
1471         KKASSERT(TAILQ_EMPTY(&ip->target_list));
1472
1473         if (ip->flags & HAMMER_INODE_RDIRTY) {
1474                 RB_REMOVE(hammer_redo_rb_tree, &hmp->rb_redo_root, ip);
1475                 ip->flags &= ~HAMMER_INODE_RDIRTY;
1476         }
1477         RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip);
1478
1479         hammer_free_inode(ip);
1480         return(0);
1481 }
1482
1483 /*
1484  * Called during unmounting if a critical error occured.  The in-memory
1485  * inode and all related structures are destroyed.
1486  *
1487  * If a critical error did not occur the unmount code calls the standard
1488  * release and asserts that the inode is gone.
1489  */
1490 int
1491 hammer_destroy_inode_callback(struct hammer_inode *ip, void *data __unused)
1492 {
1493         hammer_record_t rec;
1494
1495         /*
1496          * Get rid of the inodes in-memory records, regardless of their
1497          * state, and clear the mod-mask.
1498          */
1499         while ((rec = TAILQ_FIRST(&ip->target_list)) != NULL) {
1500                 TAILQ_REMOVE(&ip->target_list, rec, target_entry);
1501                 rec->target_ip = NULL;
1502                 if (rec->flush_state == HAMMER_FST_SETUP)
1503                         rec->flush_state = HAMMER_FST_IDLE;
1504         }
1505         while ((rec = RB_ROOT(&ip->rec_tree)) != NULL) {
1506                 if (rec->flush_state == HAMMER_FST_FLUSH)
1507                         --rec->flush_group->refs;
1508                 else
1509                         hammer_ref(&rec->lock);
1510                 KKASSERT(rec->lock.refs == 1);
1511                 rec->flush_state = HAMMER_FST_IDLE;
1512                 rec->flush_group = NULL;
1513                 rec->flags |= HAMMER_RECF_DELETED_FE; /* wave hands */
1514                 rec->flags |= HAMMER_RECF_DELETED_BE; /* wave hands */
1515                 ++ip->rec_generation;
1516                 hammer_rel_mem_record(rec);
1517         }
1518         ip->flags &= ~HAMMER_INODE_MODMASK;
1519         ip->sync_flags &= ~HAMMER_INODE_MODMASK;
1520         KKASSERT(ip->vp == NULL);
1521
1522         /*
1523          * Remove the inode from any flush group, force it idle.  FLUSH
1524          * and SETUP states have an inode ref.
1525          */
1526         switch(ip->flush_state) {
1527         case HAMMER_FST_FLUSH:
1528                 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip);
1529                 --ip->flush_group->refs;
1530                 ip->flush_group = NULL;
1531                 /* fall through */
1532         case HAMMER_FST_SETUP:
1533                 hammer_unref(&ip->lock);
1534                 ip->flush_state = HAMMER_FST_IDLE;
1535                 /* fall through */
1536         case HAMMER_FST_IDLE:
1537                 break;
1538         }
1539
1540         /*
1541          * There shouldn't be any associated vnode.  The unload needs at
1542          * least one ref, if we do have a vp steal its ip ref.
1543          */
1544         if (ip->vp) {
1545                 kprintf("hammer_destroy_inode_callback: Unexpected "
1546                         "vnode association ip %p vp %p\n", ip, ip->vp);
1547                 ip->vp->v_data = NULL;
1548                 ip->vp = NULL;
1549         } else {
1550                 hammer_ref(&ip->lock);
1551         }
1552         hammer_unload_inode(ip);
1553         return(0);
1554 }
1555
1556 /*
1557  * Called on mount -u when switching from RW to RO or vise-versa.  Adjust
1558  * the read-only flag for cached inodes.
1559  *
1560  * This routine is called from a RB_SCAN().
1561  */
1562 int
1563 hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
1564 {
1565         hammer_mount_t hmp = ip->hmp;
1566
1567         if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
1568                 ip->flags |= HAMMER_INODE_RO;
1569         else
1570                 ip->flags &= ~HAMMER_INODE_RO;
1571         return(0);
1572 }
1573
1574 /*
1575  * A transaction has modified an inode, requiring updates as specified by
1576  * the passed flags.
1577  *
1578  * HAMMER_INODE_DDIRTY: Inode data has been updated, not incl mtime/atime,
1579  *                      and not including size changes due to write-append
1580  *                      (but other size changes are included).
1581  * HAMMER_INODE_SDIRTY: Inode data has been updated, size changes due to
1582  *                      write-append.
1583  * HAMMER_INODE_XDIRTY: Dirty in-memory records
1584  * HAMMER_INODE_BUFS:   Dirty buffer cache buffers
1585  * HAMMER_INODE_DELETED: Inode record/data must be deleted
1586  * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
1587  */
1588 void
1589 hammer_modify_inode(hammer_inode_t ip, int flags)
1590 {
1591         /* 
1592          * ronly of 0 or 2 does not trigger assertion.
1593          * 2 is a special error state 
1594          */
1595         KKASSERT(ip->hmp->ronly != 1 ||
1596                   (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 
1597                             HAMMER_INODE_SDIRTY |
1598                             HAMMER_INODE_BUFS | HAMMER_INODE_DELETED |
1599                             HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0);
1600         if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
1601                 ip->flags |= HAMMER_INODE_RSV_INODES;
1602                 ++ip->hmp->rsv_inodes;
1603         }
1604
1605         ip->flags |= flags;
1606 }
1607
1608 /*
1609  * Request that an inode be flushed.  This whole mess cannot block and may
1610  * recurse (if not synchronous).  Once requested HAMMER will attempt to
1611  * actively flush the inode until the flush can be done.
1612  *
1613  * The inode may already be flushing, or may be in a setup state.  We can
1614  * place the inode in a flushing state if it is currently idle and flag it
1615  * to reflush if it is currently flushing.
1616  *
1617  * Upon return if the inode could not be flushed due to a setup
1618  * dependancy, then it will be automatically flushed when the dependancy
1619  * is satisfied.
1620  */
1621 void
1622 hammer_flush_inode(hammer_inode_t ip, int flags)
1623 {
1624         hammer_mount_t hmp;
1625         hammer_flush_group_t flg;
1626         int good;
1627
1628         /*
1629          * next_flush_group is the first flush group we can place the inode
1630          * in.  It may be NULL.  If it becomes full we append a new flush
1631          * group and make that the next_flush_group.
1632          */
1633         hmp = ip->hmp;
1634         while ((flg = hmp->next_flush_group) != NULL) {
1635                 KKASSERT(flg->running == 0);
1636                 if (flg->total_count + flg->refs <= ip->hmp->undo_rec_limit)
1637                         break;
1638                 hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
1639                 hammer_flusher_async(ip->hmp, flg);
1640         }
1641         if (flg == NULL) {
1642                 flg = kmalloc(sizeof(*flg), hmp->m_misc, M_WAITOK|M_ZERO);
1643                 hmp->next_flush_group = flg;
1644                 RB_INIT(&flg->flush_tree);
1645                 TAILQ_INSERT_TAIL(&hmp->flush_group_list, flg, flush_entry);
1646         }
1647
1648         /*
1649          * Trivial 'nothing to flush' case.  If the inode is in a SETUP
1650          * state we have to put it back into an IDLE state so we can
1651          * drop the extra ref.
1652          *
1653          * If we have a parent dependancy we must still fall through
1654          * so we can run it.
1655          */
1656         if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
1657                 if (ip->flush_state == HAMMER_FST_SETUP &&
1658                     TAILQ_EMPTY(&ip->target_list)) {
1659                         ip->flush_state = HAMMER_FST_IDLE;
1660                         hammer_rel_inode(ip, 0);
1661                 }
1662                 if (ip->flush_state == HAMMER_FST_IDLE)
1663                         return;
1664         }
1665
1666         /*
1667          * Our flush action will depend on the current state.
1668          */
1669         switch(ip->flush_state) {
1670         case HAMMER_FST_IDLE:
1671                 /*
1672                  * We have no dependancies and can flush immediately.  Some
1673                  * our children may not be flushable so we have to re-test
1674                  * with that additional knowledge.
1675                  */
1676                 hammer_flush_inode_core(ip, flg, flags);
1677                 break;
1678         case HAMMER_FST_SETUP:
1679                 /*
1680                  * Recurse upwards through dependancies via target_list
1681                  * and start their flusher actions going if possible.
1682                  *
1683                  * 'good' is our connectivity.  -1 means we have none and
1684                  * can't flush, 0 means there weren't any dependancies, and
1685                  * 1 means we have good connectivity.
1686                  */
1687                 good = hammer_setup_parent_inodes(ip, 0, flg);
1688
1689                 if (good >= 0) {
1690                         /*
1691                          * We can continue if good >= 0.  Determine how 
1692                          * many records under our inode can be flushed (and
1693                          * mark them).
1694                          */
1695                         hammer_flush_inode_core(ip, flg, flags);
1696                 } else {
1697                         /*
1698                          * Parent has no connectivity, tell it to flush
1699                          * us as soon as it does.
1700                          *
1701                          * The REFLUSH flag is also needed to trigger
1702                          * dependancy wakeups.
1703                          */
1704                         ip->flags |= HAMMER_INODE_CONN_DOWN |
1705                                      HAMMER_INODE_REFLUSH;
1706                         if (flags & HAMMER_FLUSH_SIGNAL) {
1707                                 ip->flags |= HAMMER_INODE_RESIGNAL;
1708                                 hammer_flusher_async(ip->hmp, flg);
1709                         }
1710                 }
1711                 break;
1712         case HAMMER_FST_FLUSH:
1713                 /*
1714                  * We are already flushing, flag the inode to reflush
1715                  * if needed after it completes its current flush.
1716                  *
1717                  * The REFLUSH flag is also needed to trigger
1718                  * dependancy wakeups.
1719                  */
1720                 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
1721                         ip->flags |= HAMMER_INODE_REFLUSH;
1722                 if (flags & HAMMER_FLUSH_SIGNAL) {
1723                         ip->flags |= HAMMER_INODE_RESIGNAL;
1724                         hammer_flusher_async(ip->hmp, flg);
1725                 }
1726                 break;
1727         }
1728 }
1729
1730 /*
1731  * Scan ip->target_list, which is a list of records owned by PARENTS to our
1732  * ip which reference our ip.
1733  *
1734  * XXX This is a huge mess of recursive code, but not one bit of it blocks
1735  *     so for now do not ref/deref the structures.  Note that if we use the
1736  *     ref/rel code later, the rel CAN block.
1737  */
1738 static int
1739 hammer_setup_parent_inodes(hammer_inode_t ip, int depth,
1740                            hammer_flush_group_t flg)
1741 {
1742         hammer_record_t depend;
1743         int good;
1744         int r;
1745
1746         /*
1747          * If we hit our recursion limit and we have parent dependencies
1748          * We cannot continue.  Returning < 0 will cause us to be flagged
1749          * for reflush.  Returning -2 cuts off additional dependency checks
1750          * because they are likely to also hit the depth limit.
1751          *
1752          * We cannot return < 0 if there are no dependencies or there might
1753          * not be anything to wakeup (ip).
1754          */
1755         if (depth == 20 && TAILQ_FIRST(&ip->target_list)) {
1756                 kprintf("HAMMER Warning: depth limit reached on "
1757                         "setup recursion, inode %p %016llx\n",
1758                         ip, (long long)ip->obj_id);
1759                 return(-2);
1760         }
1761
1762         /*
1763          * Scan dependencies
1764          */
1765         good = 0;
1766         TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
1767                 r = hammer_setup_parent_inodes_helper(depend, depth, flg);
1768                 KKASSERT(depend->target_ip == ip);
1769                 if (r < 0 && good == 0)
1770                         good = -1;
1771                 if (r > 0)
1772                         good = 1;
1773
1774                 /*
1775                  * If we failed due to the recursion depth limit then stop
1776                  * now.
1777                  */
1778                 if (r == -2)
1779                         break;
1780         }
1781         return(good);
1782 }
1783
1784 /*
1785  * This helper function takes a record representing the dependancy between
1786  * the parent inode and child inode.
1787  *
1788  * record->ip           = parent inode
1789  * record->target_ip    = child inode
1790  * 
1791  * We are asked to recurse upwards and convert the record from SETUP
1792  * to FLUSH if possible.
1793  *
1794  * Return 1 if the record gives us connectivity
1795  *
1796  * Return 0 if the record is not relevant 
1797  *
1798  * Return -1 if we can't resolve the dependancy and there is no connectivity.
1799  */
1800 static int
1801 hammer_setup_parent_inodes_helper(hammer_record_t record, int depth,
1802                                   hammer_flush_group_t flg)
1803 {
1804         hammer_mount_t hmp;
1805         hammer_inode_t pip;
1806         int good;
1807
1808         KKASSERT(record->flush_state != HAMMER_FST_IDLE);
1809         pip = record->ip;
1810         hmp = pip->hmp;
1811
1812         /*
1813          * If the record is already flushing, is it in our flush group?
1814          *
1815          * If it is in our flush group but it is a general record or a 
1816          * delete-on-disk, it does not improve our connectivity (return 0),
1817          * and if the target inode is not trying to destroy itself we can't
1818          * allow the operation yet anyway (the second return -1).
1819          */
1820         if (record->flush_state == HAMMER_FST_FLUSH) {
1821                 /*
1822                  * If not in our flush group ask the parent to reflush
1823                  * us as soon as possible.
1824                  */
1825                 if (record->flush_group != flg) {
1826                         pip->flags |= HAMMER_INODE_REFLUSH;
1827                         record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1828                         return(-1);
1829                 }
1830
1831                 /*
1832                  * If in our flush group everything is already set up,
1833                  * just return whether the record will improve our
1834                  * visibility or not.
1835                  */
1836                 if (record->type == HAMMER_MEM_RECORD_ADD)
1837                         return(1);
1838                 return(0);
1839         }
1840
1841         /*
1842          * It must be a setup record.  Try to resolve the setup dependancies
1843          * by recursing upwards so we can place ip on the flush list.
1844          *
1845          * Limit ourselves to 20 levels of recursion to avoid blowing out
1846          * the kernel stack.  If we hit the recursion limit we can't flush
1847          * until the parent flushes.  The parent will flush independantly
1848          * on its own and ultimately a deep recursion will be resolved.
1849          */
1850         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1851
1852         good = hammer_setup_parent_inodes(pip, depth + 1, flg);
1853
1854         /*
1855          * If good < 0 the parent has no connectivity and we cannot safely
1856          * flush the directory entry, which also means we can't flush our
1857          * ip.  Flag us for downward recursion once the parent's
1858          * connectivity is resolved.  Flag the parent for [re]flush or it
1859          * may not check for downward recursions.
1860          */
1861         if (good < 0) {
1862                 pip->flags |= HAMMER_INODE_REFLUSH;
1863                 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1864                 return(good);
1865         }
1866
1867         /*
1868          * We are go, place the parent inode in a flushing state so we can
1869          * place its record in a flushing state.  Note that the parent
1870          * may already be flushing.  The record must be in the same flush
1871          * group as the parent.
1872          */
1873         if (pip->flush_state != HAMMER_FST_FLUSH)
1874                 hammer_flush_inode_core(pip, flg, HAMMER_FLUSH_RECURSION);
1875         KKASSERT(pip->flush_state == HAMMER_FST_FLUSH);
1876         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1877
1878 #if 0
1879         if (record->type == HAMMER_MEM_RECORD_DEL &&
1880             (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
1881                 /*
1882                  * Regardless of flushing state we cannot sync this path if the
1883                  * record represents a delete-on-disk but the target inode
1884                  * is not ready to sync its own deletion.
1885                  *
1886                  * XXX need to count effective nlinks to determine whether
1887                  * the flush is ok, otherwise removing a hardlink will
1888                  * just leave the DEL record to rot.
1889                  */
1890                 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
1891                 return(-1);
1892         } else
1893 #endif
1894         if (pip->flush_group == flg) {
1895                 /*
1896                  * Because we have not calculated nlinks yet we can just
1897                  * set records to the flush state if the parent is in
1898                  * the same flush group as we are.
1899                  */
1900                 record->flush_state = HAMMER_FST_FLUSH;
1901                 record->flush_group = flg;
1902                 ++record->flush_group->refs;
1903                 hammer_ref(&record->lock);
1904
1905                 /*
1906                  * A general directory-add contributes to our visibility.
1907                  *
1908                  * Otherwise it is probably a directory-delete or 
1909                  * delete-on-disk record and does not contribute to our
1910                  * visbility (but we can still flush it).
1911                  */
1912                 if (record->type == HAMMER_MEM_RECORD_ADD)
1913                         return(1);
1914                 return(0);
1915         } else {
1916                 /*
1917                  * If the parent is not in our flush group we cannot
1918                  * flush this record yet, there is no visibility.
1919                  * We tell the parent to reflush and mark ourselves
1920                  * so the parent knows it should flush us too.
1921                  */
1922                 pip->flags |= HAMMER_INODE_REFLUSH;
1923                 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1924                 return(-1);
1925         }
1926 }
1927
1928 /*
1929  * This is the core routine placing an inode into the FST_FLUSH state.
1930  */
1931 static void
1932 hammer_flush_inode_core(hammer_inode_t ip, hammer_flush_group_t flg, int flags)
1933 {
1934         int go_count;
1935
1936         /*
1937          * Set flush state and prevent the flusher from cycling into
1938          * the next flush group.  Do not place the ip on the list yet.
1939          * Inodes not in the idle state get an extra reference.
1940          */
1941         KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
1942         if (ip->flush_state == HAMMER_FST_IDLE)
1943                 hammer_ref(&ip->lock);
1944         ip->flush_state = HAMMER_FST_FLUSH;
1945         ip->flush_group = flg;
1946         ++ip->hmp->flusher.group_lock;
1947         ++ip->hmp->count_iqueued;
1948         ++hammer_count_iqueued;
1949         ++flg->total_count;
1950         hammer_redo_fifo_start_flush(ip);
1951
1952         /*
1953          * If the flush group reaches the autoflush limit we want to signal
1954          * the flusher.  This is particularly important for remove()s.
1955          *
1956          * If the default hammer_limit_reclaim is changed via sysctl
1957          * make sure we don't hit a degenerate case where we don't start
1958          * a flush but blocked on further inode ops.
1959          */
1960         if (flg->total_count == hammer_autoflush ||
1961             flg->total_count >= hammer_limit_reclaim / 4)
1962                 flags |= HAMMER_FLUSH_SIGNAL;
1963
1964 #if 0
1965         /*
1966          * We need to be able to vfsync/truncate from the backend.
1967          *
1968          * XXX Any truncation from the backend will acquire the vnode
1969          *     independently.
1970          */
1971         KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
1972         if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
1973                 ip->flags |= HAMMER_INODE_VHELD;
1974                 vref(ip->vp);
1975         }
1976 #endif
1977
1978         /*
1979          * Figure out how many in-memory records we can actually flush
1980          * (not including inode meta-data, buffers, etc).
1981          */
1982         KKASSERT((ip->flags & HAMMER_INODE_WOULDBLOCK) == 0);
1983         if (flags & HAMMER_FLUSH_RECURSION) {
1984                 /*
1985                  * If this is a upwards recursion we do not want to
1986                  * recurse down again!
1987                  */
1988                 go_count = 1;
1989 #if 0
1990         } else if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
1991                 /*
1992                  * No new records are added if we must complete a flush
1993                  * from a previous cycle, but we do have to move the records
1994                  * from the previous cycle to the current one.
1995                  */
1996 #if 0
1997                 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1998                                    hammer_syncgrp_child_callback, NULL);
1999 #endif
2000                 go_count = 1;
2001 #endif
2002         } else {
2003                 /*
2004                  * Normal flush, scan records and bring them into the flush.
2005                  * Directory adds and deletes are usually skipped (they are
2006                  * grouped with the related inode rather then with the
2007                  * directory).
2008                  *
2009                  * go_count can be negative, which means the scan aborted
2010                  * due to the flush group being over-full and we should
2011                  * flush what we have.
2012                  */
2013                 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2014                                    hammer_setup_child_callback, NULL);
2015         }
2016
2017         /*
2018          * This is a more involved test that includes go_count.  If we
2019          * can't flush, flag the inode and return.  If go_count is 0 we
2020          * were are unable to flush any records in our rec_tree and
2021          * must ignore the XDIRTY flag.
2022          */
2023         if (go_count == 0) {
2024                 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
2025                         --ip->hmp->count_iqueued;
2026                         --hammer_count_iqueued;
2027
2028                         --flg->total_count;
2029                         ip->flush_state = HAMMER_FST_SETUP;
2030                         ip->flush_group = NULL;
2031 #if 0
2032                         if (ip->flags & HAMMER_INODE_VHELD) {
2033                                 ip->flags &= ~HAMMER_INODE_VHELD;
2034                                 vrele(ip->vp);
2035                         }
2036 #endif
2037
2038                         /*
2039                          * REFLUSH is needed to trigger dependancy wakeups
2040                          * when an inode is in SETUP.
2041                          */
2042                         ip->flags |= HAMMER_INODE_REFLUSH;
2043                         if (flags & HAMMER_FLUSH_SIGNAL) {
2044                                 ip->flags |= HAMMER_INODE_RESIGNAL;
2045                                 hammer_flusher_async(ip->hmp, flg);
2046                         }
2047                         if (--ip->hmp->flusher.group_lock == 0)
2048                                 wakeup(&ip->hmp->flusher.group_lock);
2049                         return;
2050                 }
2051         }
2052
2053         /*
2054          * Snapshot the state of the inode for the backend flusher.
2055          *
2056          * We continue to retain save_trunc_off even when all truncations
2057          * have been resolved as an optimization to determine if we can
2058          * skip the B-Tree lookup for overwrite deletions.
2059          *
2060          * NOTE: The DELETING flag is a mod flag, but it is also sticky,
2061          * and stays in ip->flags.  Once set, it stays set until the
2062          * inode is destroyed.
2063          */
2064         if (ip->flags & HAMMER_INODE_TRUNCATED) {
2065                 KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0);
2066                 ip->sync_trunc_off = ip->trunc_off;
2067                 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
2068                 ip->flags &= ~HAMMER_INODE_TRUNCATED;
2069                 ip->sync_flags |= HAMMER_INODE_TRUNCATED;
2070
2071                 /*
2072                  * The save_trunc_off used to cache whether the B-Tree
2073                  * holds any records past that point is not used until
2074                  * after the truncation has succeeded, so we can safely
2075                  * set it now.
2076                  */
2077                 if (ip->save_trunc_off > ip->sync_trunc_off)
2078                         ip->save_trunc_off = ip->sync_trunc_off;
2079         }
2080         ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK &
2081                            ~HAMMER_INODE_TRUNCATED);
2082         ip->sync_ino_leaf = ip->ino_leaf;
2083         ip->sync_ino_data = ip->ino_data;
2084         ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
2085 #ifdef DEBUG_TRUNCATE
2086         if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp)
2087                 kprintf("truncateS %016llx\n", ip->sync_trunc_off);
2088 #endif
2089
2090         /*
2091          * The flusher list inherits our inode and reference.
2092          */
2093         KKASSERT(flg->running == 0);
2094         RB_INSERT(hammer_fls_rb_tree, &flg->flush_tree, ip);
2095         if (--ip->hmp->flusher.group_lock == 0)
2096                 wakeup(&ip->hmp->flusher.group_lock);
2097
2098         if (flags & HAMMER_FLUSH_SIGNAL) {
2099                 hammer_flusher_async(ip->hmp, flg);
2100         }
2101 }
2102
2103 /*
2104  * Callback for scan of ip->rec_tree.  Try to include each record in our
2105  * flush.  ip->flush_group has been set but the inode has not yet been
2106  * moved into a flushing state.
2107  *
2108  * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
2109  * both inodes.
2110  *
2111  * We return 1 for any record placed or found in FST_FLUSH, which prevents
2112  * the caller from shortcutting the flush.
2113  */
2114 static int
2115 hammer_setup_child_callback(hammer_record_t rec, void *data)
2116 {
2117         hammer_flush_group_t flg;
2118         hammer_inode_t target_ip;
2119         hammer_inode_t ip;
2120         int r;
2121
2122         /*
2123          * Records deleted or committed by the backend are ignored.
2124          * Note that the flush detects deleted frontend records at
2125          * multiple points to deal with races.  This is just the first
2126          * line of defense.  The only time HAMMER_RECF_DELETED_FE cannot
2127          * be set is when HAMMER_RECF_INTERLOCK_BE is set, because it
2128          * messes up link-count calculations.
2129          *
2130          * NOTE: Don't get confused between record deletion and, say,
2131          * directory entry deletion.  The deletion of a directory entry
2132          * which is on-media has nothing to do with the record deletion
2133          * flags.
2134          */
2135         if (rec->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
2136                           HAMMER_RECF_COMMITTED)) {
2137                 if (rec->flush_state == HAMMER_FST_FLUSH) {
2138                         KKASSERT(rec->flush_group == rec->ip->flush_group);
2139                         r = 1;
2140                 } else {
2141                         r = 0;
2142                 }
2143                 return(r);
2144         }
2145
2146         /*
2147          * If the record is in an idle state it has no dependancies and
2148          * can be flushed.
2149          */
2150         ip = rec->ip;
2151         flg = ip->flush_group;
2152         r = 0;
2153
2154         switch(rec->flush_state) {
2155         case HAMMER_FST_IDLE:
2156                 /*
2157                  * The record has no setup dependancy, we can flush it.
2158                  */
2159                 KKASSERT(rec->target_ip == NULL);
2160                 rec->flush_state = HAMMER_FST_FLUSH;
2161                 rec->flush_group = flg;
2162                 ++flg->refs;
2163                 hammer_ref(&rec->lock);
2164                 r = 1;
2165                 break;
2166         case HAMMER_FST_SETUP:
2167                 /*
2168                  * The record has a setup dependancy.  These are typically
2169                  * directory entry adds and deletes.  Such entries will be
2170                  * flushed when their inodes are flushed so we do not
2171                  * usually have to add them to the flush here.  However,
2172                  * if the target_ip has set HAMMER_INODE_CONN_DOWN then
2173                  * it is asking us to flush this record (and it).
2174                  */
2175                 target_ip = rec->target_ip;
2176                 KKASSERT(target_ip != NULL);
2177                 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
2178
2179                 /*
2180                  * If the target IP is already flushing in our group
2181                  * we could associate the record, but target_ip has
2182                  * already synced ino_data to sync_ino_data and we
2183                  * would also have to adjust nlinks.   Plus there are
2184                  * ordering issues for adds and deletes.
2185                  *
2186                  * Reflush downward if this is an ADD, and upward if
2187                  * this is a DEL.
2188                  */
2189                 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
2190                         if (rec->flush_state == HAMMER_MEM_RECORD_ADD)
2191                                 ip->flags |= HAMMER_INODE_REFLUSH;
2192                         else
2193                                 target_ip->flags |= HAMMER_INODE_REFLUSH;
2194                         break;
2195                 } 
2196
2197                 /*
2198                  * Target IP is not yet flushing.  This can get complex
2199                  * because we have to be careful about the recursion.
2200                  *
2201                  * Directories create an issue for us in that if a flush
2202                  * of a directory is requested the expectation is to flush
2203                  * any pending directory entries, but this will cause the
2204                  * related inodes to recursively flush as well.  We can't
2205                  * really defer the operation so just get as many as we
2206                  * can and
2207                  */
2208 #if 0
2209                 if ((target_ip->flags & HAMMER_INODE_RECLAIM) == 0 &&
2210                     (target_ip->flags & HAMMER_INODE_CONN_DOWN) == 0) {
2211                         /*
2212                          * We aren't reclaiming and the target ip was not
2213                          * previously prevented from flushing due to this
2214                          * record dependancy.  Do not flush this record.
2215                          */
2216                         /*r = 0;*/
2217                 } else
2218 #endif
2219                 if (flg->total_count + flg->refs >
2220                            ip->hmp->undo_rec_limit) {
2221                         /*
2222                          * Our flush group is over-full and we risk blowing
2223                          * out the UNDO FIFO.  Stop the scan, flush what we
2224                          * have, then reflush the directory.
2225                          *
2226                          * The directory may be forced through multiple
2227                          * flush groups before it can be completely
2228                          * flushed.
2229                          */
2230                         ip->flags |= HAMMER_INODE_RESIGNAL |
2231                                      HAMMER_INODE_REFLUSH;
2232                         r = -1;
2233                 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
2234                         /*
2235                          * If the target IP is not flushing we can force
2236                          * it to flush, even if it is unable to write out
2237                          * any of its own records we have at least one in
2238                          * hand that we CAN deal with.
2239                          */
2240                         rec->flush_state = HAMMER_FST_FLUSH;
2241                         rec->flush_group = flg;
2242                         ++flg->refs;
2243                         hammer_ref(&rec->lock);
2244                         hammer_flush_inode_core(target_ip, flg,
2245                                                 HAMMER_FLUSH_RECURSION);
2246                         r = 1;
2247                 } else {
2248                         /*
2249                          * General or delete-on-disk record.
2250                          *
2251                          * XXX this needs help.  If a delete-on-disk we could
2252                          * disconnect the target.  If the target has its own
2253                          * dependancies they really need to be flushed.
2254                          *
2255                          * XXX
2256                          */
2257                         rec->flush_state = HAMMER_FST_FLUSH;
2258                         rec->flush_group = flg;
2259                         ++flg->refs;
2260                         hammer_ref(&rec->lock);
2261                         hammer_flush_inode_core(target_ip, flg,
2262                                                 HAMMER_FLUSH_RECURSION);
2263                         r = 1;
2264                 }
2265                 break;
2266         case HAMMER_FST_FLUSH:
2267                 /* 
2268                  * The flush_group should already match.
2269                  */
2270                 KKASSERT(rec->flush_group == flg);
2271                 r = 1;
2272                 break;
2273         }
2274         return(r);
2275 }
2276
2277 #if 0
2278 /*
2279  * This version just moves records already in a flush state to the new
2280  * flush group and that is it.
2281  */
2282 static int
2283 hammer_syncgrp_child_callback(hammer_record_t rec, void *data)
2284 {
2285         hammer_inode_t ip = rec->ip;
2286
2287         switch(rec->flush_state) {
2288         case HAMMER_FST_FLUSH:
2289                 KKASSERT(rec->flush_group == ip->flush_group);
2290                 break;
2291         default:
2292                 break;
2293         }
2294         return(0);
2295 }
2296 #endif
2297
2298 /*
2299  * Wait for a previously queued flush to complete.
2300  *
2301  * If a critical error occured we don't try to wait.
2302  */
2303 void
2304 hammer_wait_inode(hammer_inode_t ip)
2305 {
2306         hammer_flush_group_t flg;
2307
2308         flg = NULL;
2309         if ((ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) {
2310                 while (ip->flush_state != HAMMER_FST_IDLE &&
2311                        (ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) {
2312                         if (ip->flush_state == HAMMER_FST_SETUP)
2313                                 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2314                         if (ip->flush_state != HAMMER_FST_IDLE) {
2315                                 ip->flags |= HAMMER_INODE_FLUSHW;
2316                                 tsleep(&ip->flags, 0, "hmrwin", 0);
2317                         }
2318                 }
2319         }
2320 }
2321
2322 /*
2323  * Called by the backend code when a flush has been completed.
2324  * The inode has already been removed from the flush list.
2325  *
2326  * A pipelined flush can occur, in which case we must re-enter the
2327  * inode on the list and re-copy its fields.
2328  */
2329 void
2330 hammer_flush_inode_done(hammer_inode_t ip, int error)
2331 {
2332         hammer_mount_t hmp;
2333         int dorel;
2334
2335         KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
2336
2337         hmp = ip->hmp;
2338
2339         /*
2340          * Auto-reflush if the backend could not completely flush
2341          * the inode.  This fixes a case where a deferred buffer flush
2342          * could cause fsync to return early.
2343          */
2344         if (ip->sync_flags & HAMMER_INODE_MODMASK)
2345                 ip->flags |= HAMMER_INODE_REFLUSH;
2346
2347         /*
2348          * Merge left-over flags back into the frontend and fix the state.
2349          * Incomplete truncations are retained by the backend.
2350          */
2351         ip->error = error;
2352         ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED;
2353         ip->sync_flags &= HAMMER_INODE_TRUNCATED;
2354
2355         /*
2356          * The backend may have adjusted nlinks, so if the adjusted nlinks
2357          * does not match the fronttend set the frontend's DDIRTY flag again.
2358          */
2359         if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
2360                 ip->flags |= HAMMER_INODE_DDIRTY;
2361
2362         /*
2363          * Fix up the dirty buffer status.
2364          */
2365         if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) {
2366                 ip->flags |= HAMMER_INODE_BUFS;
2367         }
2368         hammer_redo_fifo_end_flush(ip);
2369
2370         /*
2371          * Re-set the XDIRTY flag if some of the inode's in-memory records
2372          * could not be flushed.
2373          */
2374         KKASSERT((RB_EMPTY(&ip->rec_tree) &&
2375                   (ip->flags & HAMMER_INODE_XDIRTY) == 0) ||
2376                  (!RB_EMPTY(&ip->rec_tree) &&
2377                   (ip->flags & HAMMER_INODE_XDIRTY) != 0));
2378
2379         /*
2380          * Do not lose track of inodes which no longer have vnode
2381          * assocations, otherwise they may never get flushed again.
2382          *
2383          * The reflush flag can be set superfluously, causing extra pain
2384          * for no reason.  If the inode is no longer modified it no longer
2385          * needs to be flushed.
2386          */
2387         if (ip->flags & HAMMER_INODE_MODMASK) {
2388                 if (ip->vp == NULL)
2389                         ip->flags |= HAMMER_INODE_REFLUSH;
2390         } else {
2391                 ip->flags &= ~HAMMER_INODE_REFLUSH;
2392         }
2393
2394         /*
2395          * Adjust the flush state.
2396          */
2397         if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
2398                 /*
2399                  * We were unable to flush out all our records, leave the
2400                  * inode in a flush state and in the current flush group.
2401                  * The flush group will be re-run.
2402                  *
2403                  * This occurs if the UNDO block gets too full or there is
2404                  * too much dirty meta-data and allows the flusher to
2405                  * finalize the UNDO block and then re-flush.
2406                  */
2407                 ip->flags &= ~HAMMER_INODE_WOULDBLOCK;
2408                 dorel = 0;
2409         } else {
2410                 /*
2411                  * Remove from the flush_group
2412                  */
2413                 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip);
2414                 ip->flush_group = NULL;
2415
2416 #if 0
2417                 /*
2418                  * Clean up the vnode ref and tracking counts.
2419                  */
2420                 if (ip->flags & HAMMER_INODE_VHELD) {
2421                         ip->flags &= ~HAMMER_INODE_VHELD;
2422                         vrele(ip->vp);
2423                 }
2424 #endif
2425                 --hmp->count_iqueued;
2426                 --hammer_count_iqueued;
2427
2428                 /*
2429                  * And adjust the state.
2430                  */
2431                 if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
2432                         ip->flush_state = HAMMER_FST_IDLE;
2433                         dorel = 1;
2434                 } else {
2435                         ip->flush_state = HAMMER_FST_SETUP;
2436                         dorel = 0;
2437                 }
2438
2439                 /*
2440                  * If the frontend is waiting for a flush to complete,
2441                  * wake it up.
2442                  */
2443                 if (ip->flags & HAMMER_INODE_FLUSHW) {
2444                         ip->flags &= ~HAMMER_INODE_FLUSHW;
2445                         wakeup(&ip->flags);
2446                 }
2447
2448                 /*
2449                  * If the frontend made more changes and requested another
2450                  * flush, then try to get it running.
2451                  *
2452                  * Reflushes are aborted when the inode is errored out.
2453                  */
2454                 if (ip->flags & HAMMER_INODE_REFLUSH) {
2455                         ip->flags &= ~HAMMER_INODE_REFLUSH;
2456                         if (ip->flags & HAMMER_INODE_RESIGNAL) {
2457                                 ip->flags &= ~HAMMER_INODE_RESIGNAL;
2458                                 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2459                         } else {
2460                                 hammer_flush_inode(ip, 0);
2461                         }
2462                 }
2463         }
2464
2465         /*
2466          * If we have no parent dependancies we can clear CONN_DOWN
2467          */
2468         if (TAILQ_EMPTY(&ip->target_list))
2469                 ip->flags &= ~HAMMER_INODE_CONN_DOWN;
2470
2471         /*
2472          * If the inode is now clean drop the space reservation.
2473          */
2474         if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
2475             (ip->flags & HAMMER_INODE_RSV_INODES)) {
2476                 ip->flags &= ~HAMMER_INODE_RSV_INODES;
2477                 --hmp->rsv_inodes;
2478         }
2479
2480         if (dorel)
2481                 hammer_rel_inode(ip, 0);
2482 }
2483
2484 /*
2485  * Called from hammer_sync_inode() to synchronize in-memory records
2486  * to the media.
2487  */
2488 static int
2489 hammer_sync_record_callback(hammer_record_t record, void *data)
2490 {
2491         hammer_cursor_t cursor = data;
2492         hammer_transaction_t trans = cursor->trans;
2493         hammer_mount_t hmp = trans->hmp;
2494         int error;
2495
2496         /*
2497          * Skip records that do not belong to the current flush.
2498          */
2499         ++hammer_stats_record_iterations;
2500         if (record->flush_state != HAMMER_FST_FLUSH)
2501                 return(0);
2502
2503 #if 1
2504         if (record->flush_group != record->ip->flush_group) {
2505                 kprintf("sync_record %p ip %p bad flush group %p %p\n", record, record->ip, record->flush_group ,record->ip->flush_group);
2506                 if (hammer_debug_critical)
2507                         Debugger("blah2");
2508                 return(0);
2509         }
2510 #endif
2511         KKASSERT(record->flush_group == record->ip->flush_group);
2512
2513         /*
2514          * Interlock the record using the BE flag.  Once BE is set the
2515          * frontend cannot change the state of FE.
2516          *
2517          * NOTE: If FE is set prior to us setting BE we still sync the
2518          * record out, but the flush completion code converts it to 
2519          * a delete-on-disk record instead of destroying it.
2520          */
2521         KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
2522         record->flags |= HAMMER_RECF_INTERLOCK_BE;
2523
2524         /*
2525          * The backend has already disposed of the record.
2526          */
2527         if (record->flags & (HAMMER_RECF_DELETED_BE | HAMMER_RECF_COMMITTED)) {
2528                 error = 0;
2529                 goto done;
2530         }
2531
2532         /*
2533          * If the whole inode is being deleting all on-disk records will
2534          * be deleted very soon, we can't sync any new records to disk
2535          * because they will be deleted in the same transaction they were
2536          * created in (delete_tid == create_tid), which will assert.
2537          *
2538          * XXX There may be a case with RECORD_ADD with DELETED_FE set
2539          * that we currently panic on.
2540          */
2541         if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
2542                 switch(record->type) {
2543                 case HAMMER_MEM_RECORD_DATA:
2544                         /*
2545                          * We don't have to do anything, if the record was
2546                          * committed the space will have been accounted for
2547                          * in the blockmap.
2548                          */
2549                         /* fall through */
2550                 case HAMMER_MEM_RECORD_GENERAL:
2551                         /*
2552                          * Set deleted-by-backend flag.  Do not set the
2553                          * backend committed flag, because we are throwing
2554                          * the record away.
2555                          */
2556                         record->flags |= HAMMER_RECF_DELETED_BE;
2557                         ++record->ip->rec_generation;
2558                         error = 0;
2559                         goto done;
2560                 case HAMMER_MEM_RECORD_ADD:
2561                         panic("hammer_sync_record_callback: illegal add "
2562                               "during inode deletion record %p", record);
2563                         break; /* NOT REACHED */
2564                 case HAMMER_MEM_RECORD_INODE:
2565                         panic("hammer_sync_record_callback: attempt to "
2566                               "sync inode record %p?", record);
2567                         break; /* NOT REACHED */
2568                 case HAMMER_MEM_RECORD_DEL:
2569                         /* 
2570                          * Follow through and issue the on-disk deletion
2571                          */
2572                         break;
2573                 }
2574         }
2575
2576         /*
2577          * If DELETED_FE is set special handling is needed for directory
2578          * entries.  Dependant pieces related to the directory entry may
2579          * have already been synced to disk.  If this occurs we have to
2580          * sync the directory entry and then change the in-memory record
2581          * from an ADD to a DELETE to cover the fact that it's been
2582          * deleted by the frontend.
2583          *
2584          * A directory delete covering record (MEM_RECORD_DEL) can never
2585          * be deleted by the frontend.
2586          *
2587          * Any other record type (aka DATA) can be deleted by the frontend.
2588          * XXX At the moment the flusher must skip it because there may
2589          * be another data record in the flush group for the same block,
2590          * meaning that some frontend data changes can leak into the backend's
2591          * synchronization point.
2592          */
2593         if (record->flags & HAMMER_RECF_DELETED_FE) {
2594                 if (record->type == HAMMER_MEM_RECORD_ADD) {
2595                         /*
2596                          * Convert a front-end deleted directory-add to
2597                          * a directory-delete entry later.
2598                          */
2599                         record->flags |= HAMMER_RECF_CONVERT_DELETE;
2600                 } else {
2601                         /*
2602                          * Dispose of the record (race case).  Mark as
2603                          * deleted by backend (and not committed).
2604                          */
2605                         KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
2606                         record->flags |= HAMMER_RECF_DELETED_BE;
2607                         ++record->ip->rec_generation;
2608                         error = 0;
2609                         goto done;
2610                 }
2611         }
2612
2613         /*
2614          * Assign the create_tid for new records.  Deletions already
2615          * have the record's entire key properly set up.
2616          */
2617         if (record->type != HAMMER_MEM_RECORD_DEL) {
2618                 record->leaf.base.create_tid = trans->tid;
2619                 record->leaf.create_ts = trans->time32;
2620         }
2621
2622         /*
2623          * This actually moves the record to the on-media B-Tree.  We
2624          * must also generate REDO_TERM entries in the UNDO/REDO FIFO
2625          * indicating that the related REDO_WRITE(s) have been committed.
2626          *
2627          * During recovery any REDO_TERM's within the nominal recovery span
2628          * are ignored since the related meta-data is being undone, causing
2629          * any matching REDO_WRITEs to execute.  The REDO_TERMs outside
2630          * the nominal recovery span will match against REDO_WRITEs and
2631          * prevent them from being executed (because the meta-data has
2632          * already been synchronized).
2633          */
2634         if (record->flags & HAMMER_RECF_REDO) {
2635                 KKASSERT(record->type == HAMMER_MEM_RECORD_DATA);
2636                 hammer_generate_redo(trans, record->ip,
2637                                      record->leaf.base.key -
2638                                          record->leaf.data_len,
2639                                      HAMMER_REDO_TERM_WRITE,
2640                                      NULL,
2641                                      record->leaf.data_len);
2642         }
2643         for (;;) {
2644                 error = hammer_ip_sync_record_cursor(cursor, record);
2645                 if (error != EDEADLK)
2646                         break;
2647                 hammer_done_cursor(cursor);
2648                 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
2649                                            record->ip);
2650                 if (error)
2651                         break;
2652         }
2653         record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
2654
2655         if (error)
2656                 error = -error;
2657 done:
2658         hammer_flush_record_done(record, error);
2659
2660         /*
2661          * Do partial finalization if we have built up too many dirty
2662          * buffers.  Otherwise a buffer cache deadlock can occur when
2663          * doing things like creating tens of thousands of tiny files.
2664          *
2665          * We must release our cursor lock to avoid a 3-way deadlock
2666          * due to the exclusive sync lock the finalizer must get.
2667          *
2668          * WARNING: See warnings in hammer_unlock_cursor() function.
2669          */
2670         if (hammer_flusher_meta_limit(hmp)) {
2671                 hammer_unlock_cursor(cursor);
2672                 hammer_flusher_finalize(trans, 0);
2673                 hammer_lock_cursor(cursor);
2674         }
2675
2676         return(error);
2677 }
2678
2679 /*
2680  * Backend function called by the flusher to sync an inode to media.
2681  */
2682 int
2683 hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip)
2684 {
2685         struct hammer_cursor cursor;
2686         hammer_node_t tmp_node;
2687         hammer_record_t depend;
2688         hammer_record_t next;
2689         int error, tmp_error;
2690         u_int64_t nlinks;
2691
2692         if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
2693                 return(0);
2694
2695         error = hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
2696         if (error)
2697                 goto done;
2698
2699         /*
2700          * Any directory records referencing this inode which are not in
2701          * our current flush group must adjust our nlink count for the
2702          * purposes of synchronizating to disk.
2703          *
2704          * Records which are in our flush group can be unlinked from our
2705          * inode now, potentially allowing the inode to be physically
2706          * deleted.
2707          *
2708          * This cannot block.
2709          */
2710         nlinks = ip->ino_data.nlinks;
2711         next = TAILQ_FIRST(&ip->target_list);
2712         while ((depend = next) != NULL) {
2713                 next = TAILQ_NEXT(depend, target_entry);
2714                 if (depend->flush_state == HAMMER_FST_FLUSH &&
2715                     depend->flush_group == ip->flush_group) {
2716                         /*
2717                          * If this is an ADD that was deleted by the frontend
2718                          * the frontend nlinks count will have already been
2719                          * decremented, but the backend is going to sync its
2720                          * directory entry and must account for it.  The
2721                          * record will be converted to a delete-on-disk when
2722                          * it gets synced.
2723                          *
2724                          * If the ADD was not deleted by the frontend we
2725                          * can remove the dependancy from our target_list.
2726                          */
2727                         if (depend->flags & HAMMER_RECF_DELETED_FE) {
2728                                 ++nlinks;
2729                         } else {
2730                                 TAILQ_REMOVE(&ip->target_list, depend,
2731                                              target_entry);
2732                                 depend->target_ip = NULL;
2733                         }
2734                 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
2735                         /*
2736                          * Not part of our flush group and not deleted by
2737                          * the front-end, adjust the link count synced to
2738                          * the media (undo what the frontend did when it
2739                          * queued the record).
2740                          */
2741                         KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
2742                         switch(depend->type) {
2743                         case HAMMER_MEM_RECORD_ADD:
2744                                 --nlinks;
2745                                 break;
2746                         case HAMMER_MEM_RECORD_DEL:
2747                                 ++nlinks;
2748                                 break;
2749                         default:
2750                                 break;
2751                         }
2752                 }
2753         }
2754
2755         /*
2756          * Set dirty if we had to modify the link count.
2757          */
2758         if (ip->sync_ino_data.nlinks != nlinks) {
2759                 KKASSERT((int64_t)nlinks >= 0);
2760                 ip->sync_ino_data.nlinks = nlinks;
2761                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2762         }
2763
2764         /*
2765          * If there is a trunction queued destroy any data past the (aligned)
2766          * truncation point.  Userland will have dealt with the buffer
2767          * containing the truncation point for us.
2768          *
2769          * We don't flush pending frontend data buffers until after we've
2770          * dealt with the truncation.
2771          */
2772         if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2773                 /*
2774                  * Interlock trunc_off.  The VOP front-end may continue to
2775                  * make adjustments to it while we are blocked.
2776                  */
2777                 off_t trunc_off;
2778                 off_t aligned_trunc_off;
2779                 int blkmask;
2780
2781                 trunc_off = ip->sync_trunc_off;
2782                 blkmask = hammer_blocksize(trunc_off) - 1;
2783                 aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask;
2784
2785                 /*
2786                  * Delete any whole blocks on-media.  The front-end has
2787                  * already cleaned out any partial block and made it
2788                  * pending.  The front-end may have updated trunc_off
2789                  * while we were blocked so we only use sync_trunc_off.
2790                  *
2791                  * This operation can blow out the buffer cache, EWOULDBLOCK
2792                  * means we were unable to complete the deletion.  The
2793                  * deletion will update sync_trunc_off in that case.
2794                  */
2795                 error = hammer_ip_delete_range(&cursor, ip,
2796                                                 aligned_trunc_off,
2797                                                 0x7FFFFFFFFFFFFFFFLL, 2);
2798                 if (error == EWOULDBLOCK) {
2799                         ip->flags |= HAMMER_INODE_WOULDBLOCK;
2800                         error = 0;
2801                         goto defer_buffer_flush;
2802                 }
2803
2804                 if (error)
2805                         goto done;
2806
2807                 /*
2808                  * Generate a REDO_TERM_TRUNC entry in the UNDO/REDO FIFO.
2809                  *
2810                  * XXX we do this even if we did not previously generate
2811                  * a REDO_TRUNC record.  This operation may enclosed the
2812                  * range for multiple prior truncation entries in the REDO
2813                  * log.
2814                  */
2815                 if (trans->hmp->version >= HAMMER_VOL_VERSION_FOUR &&
2816                     (ip->flags & HAMMER_INODE_RDIRTY)) {
2817                         hammer_generate_redo(trans, ip, aligned_trunc_off,
2818                                              HAMMER_REDO_TERM_TRUNC,
2819                                              NULL, 0);
2820                 }
2821
2822                 /*
2823                  * Clear the truncation flag on the backend after we have
2824                  * completed the deletions.  Backend data is now good again
2825                  * (including new records we are about to sync, below).
2826                  *
2827                  * Leave sync_trunc_off intact.  As we write additional
2828                  * records the backend will update sync_trunc_off.  This
2829                  * tells the backend whether it can skip the overwrite
2830                  * test.  This should work properly even when the backend
2831                  * writes full blocks where the truncation point straddles
2832                  * the block because the comparison is against the base
2833                  * offset of the record.
2834                  */
2835                 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2836                 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
2837         } else {
2838                 error = 0;
2839         }
2840
2841         /*
2842          * Now sync related records.  These will typically be directory
2843          * entries, records tracking direct-writes, or delete-on-disk records.
2844          */
2845         if (error == 0) {
2846                 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2847                                     hammer_sync_record_callback, &cursor);
2848                 if (tmp_error < 0)
2849                         tmp_error = -error;
2850                 if (tmp_error)
2851                         error = tmp_error;
2852         }
2853         hammer_cache_node(&ip->cache[1], cursor.node);
2854
2855         /*
2856          * Re-seek for inode update, assuming our cache hasn't been ripped
2857          * out from under us.
2858          */
2859         if (error == 0) {
2860                 tmp_node = hammer_ref_node_safe(trans, &ip->cache[0], &error);
2861                 if (tmp_node) {
2862                         hammer_cursor_downgrade(&cursor);
2863                         hammer_lock_sh(&tmp_node->lock);
2864                         if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0)
2865                                 hammer_cursor_seek(&cursor, tmp_node, 0);
2866                         hammer_unlock(&tmp_node->lock);
2867                         hammer_rel_node(tmp_node);
2868                 }
2869                 error = 0;
2870         }
2871
2872         /*
2873          * If we are deleting the inode the frontend had better not have
2874          * any active references on elements making up the inode.
2875          *
2876          * The call to hammer_ip_delete_clean() cleans up auxillary records
2877          * but not DB or DATA records.  Those must have already been deleted
2878          * by the normal truncation mechanic.
2879          */
2880         if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
2881                 RB_EMPTY(&ip->rec_tree)  &&
2882             (ip->sync_flags & HAMMER_INODE_DELETING) &&
2883             (ip->flags & HAMMER_INODE_DELETED) == 0) {
2884                 int count1 = 0;
2885
2886                 error = hammer_ip_delete_clean(&cursor, ip, &count1);
2887                 if (error == 0) {
2888                         ip->flags |= HAMMER_INODE_DELETED;
2889                         ip->sync_flags &= ~HAMMER_INODE_DELETING;
2890                         ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2891                         KKASSERT(RB_EMPTY(&ip->rec_tree));
2892
2893                         /*
2894                          * Set delete_tid in both the frontend and backend
2895                          * copy of the inode record.  The DELETED flag handles
2896                          * this, do not set DDIRTY.
2897                          */
2898                         ip->ino_leaf.base.delete_tid = trans->tid;
2899                         ip->sync_ino_leaf.base.delete_tid = trans->tid;
2900                         ip->ino_leaf.delete_ts = trans->time32;
2901                         ip->sync_ino_leaf.delete_ts = trans->time32;
2902
2903
2904                         /*
2905                          * Adjust the inode count in the volume header
2906                          */
2907                         hammer_sync_lock_sh(trans);
2908                         if (ip->flags & HAMMER_INODE_ONDISK) {
2909                                 hammer_modify_volume_field(trans,
2910                                                            trans->rootvol,
2911                                                            vol0_stat_inodes);
2912                                 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
2913                                 hammer_modify_volume_done(trans->rootvol);
2914                         }
2915                         hammer_sync_unlock(trans);
2916                 }
2917         }
2918
2919         if (error)
2920                 goto done;
2921         ip->sync_flags &= ~HAMMER_INODE_BUFS;
2922
2923 defer_buffer_flush:
2924         /*
2925          * Now update the inode's on-disk inode-data and/or on-disk record.
2926          * DELETED and ONDISK are managed only in ip->flags.
2927          *
2928          * In the case of a defered buffer flush we still update the on-disk
2929          * inode to satisfy visibility requirements if there happen to be
2930          * directory dependancies.
2931          */
2932         switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
2933         case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
2934                 /*
2935                  * If deleted and on-disk, don't set any additional flags.
2936                  * the delete flag takes care of things.
2937                  *
2938                  * Clear flags which may have been set by the frontend.
2939                  */
2940                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2941                                     HAMMER_INODE_SDIRTY |
2942                                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2943                                     HAMMER_INODE_DELETING);
2944                 break;
2945         case HAMMER_INODE_DELETED:
2946                 /*
2947                  * Take care of the case where a deleted inode was never
2948                  * flushed to the disk in the first place.
2949                  *
2950                  * Clear flags which may have been set by the frontend.
2951                  */
2952                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2953                                     HAMMER_INODE_SDIRTY |
2954                                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2955                                     HAMMER_INODE_DELETING);
2956                 while (RB_ROOT(&ip->rec_tree)) {
2957                         hammer_record_t record = RB_ROOT(&ip->rec_tree);
2958                         hammer_ref(&record->lock);
2959                         KKASSERT(record->lock.refs == 1);
2960                         record->flags |= HAMMER_RECF_DELETED_BE;
2961                         ++record->ip->rec_generation;
2962                         hammer_rel_mem_record(record);
2963                 }
2964                 break;
2965         case HAMMER_INODE_ONDISK:
2966                 /*
2967                  * If already on-disk, do not set any additional flags.
2968                  */
2969                 break;
2970         default:
2971                 /*
2972                  * If not on-disk and not deleted, set DDIRTY to force
2973                  * an initial record to be written.
2974                  *
2975                  * Also set the create_tid in both the frontend and backend
2976                  * copy of the inode record.
2977                  */
2978                 ip->ino_leaf.base.create_tid = trans->tid;
2979                 ip->ino_leaf.create_ts = trans->time32;
2980                 ip->sync_ino_leaf.base.create_tid = trans->tid;
2981                 ip->sync_ino_leaf.create_ts = trans->time32;
2982                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2983                 break;
2984         }
2985
2986         /*
2987          * If DDIRTY or SDIRTY is set, write out a new record.
2988          * If the inode is already on-disk the old record is marked as
2989          * deleted.
2990          *
2991          * If DELETED is set hammer_update_inode() will delete the existing
2992          * record without writing out a new one.
2993          *
2994          * If *ONLY* the ITIMES flag is set we can update the record in-place.
2995          */
2996         if (ip->flags & HAMMER_INODE_DELETED) {
2997                 error = hammer_update_inode(&cursor, ip);
2998         } else 
2999         if (!(ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY)) &&
3000             (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) {
3001                 error = hammer_update_itimes(&cursor, ip);
3002         } else
3003         if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY |
3004                               HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) {
3005                 error = hammer_update_inode(&cursor, ip);
3006         }
3007 done:
3008         if (error) {
3009                 hammer_critical_error(ip->hmp, ip, error,
3010                                       "while syncing inode");
3011         }
3012         hammer_done_cursor(&cursor);
3013         return(error);
3014 }
3015
3016 /*
3017  * This routine is called when the OS is no longer actively referencing
3018  * the inode (but might still be keeping it cached), or when releasing
3019  * the last reference to an inode.
3020  *
3021  * At this point if the inode's nlinks count is zero we want to destroy
3022  * it, which may mean destroying it on-media too.
3023  */
3024 void
3025 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
3026 {
3027         struct vnode *vp;
3028
3029         /*
3030          * Set the DELETING flag when the link count drops to 0 and the
3031          * OS no longer has any opens on the inode.
3032          *
3033          * The backend will clear DELETING (a mod flag) and set DELETED
3034          * (a state flag) when it is actually able to perform the
3035          * operation.
3036          *
3037          * Don't reflag the deletion if the flusher is currently syncing
3038          * one that was already flagged.  A previously set DELETING flag
3039          * may bounce around flags and sync_flags until the operation is
3040          * completely done.
3041          */
3042         if (ip->ino_data.nlinks == 0 &&
3043             ((ip->flags | ip->sync_flags) & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
3044                 ip->flags |= HAMMER_INODE_DELETING;
3045                 ip->flags |= HAMMER_INODE_TRUNCATED;
3046                 ip->trunc_off = 0;
3047                 vp = NULL;
3048                 if (getvp) {
3049                         if (hammer_get_vnode(ip, &vp) != 0)
3050                                 return;
3051                 }
3052
3053                 /*
3054                  * Final cleanup
3055                  */
3056                 if (ip->vp)
3057                         nvtruncbuf(ip->vp, 0, HAMMER_BUFSIZE, 0);
3058                 if (getvp)
3059                         vput(vp);
3060         }
3061 }
3062
3063 /*
3064  * After potentially resolving a dependancy the inode is tested
3065  * to determine whether it needs to be reflushed.
3066  */
3067 void
3068 hammer_test_inode(hammer_inode_t ip)
3069 {
3070         if (ip->flags & HAMMER_INODE_REFLUSH) {
3071                 ip->flags &= ~HAMMER_INODE_REFLUSH;
3072                 hammer_ref(&ip->lock);
3073                 if (ip->flags & HAMMER_INODE_RESIGNAL) {
3074                         ip->flags &= ~HAMMER_INODE_RESIGNAL;
3075                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
3076                 } else {
3077                         hammer_flush_inode(ip, 0);
3078                 }
3079                 hammer_rel_inode(ip, 0);
3080         }
3081 }
3082
3083 /*
3084  * Clear the RECLAIM flag on an inode.  This occurs when the inode is
3085  * reassociated with a vp or just before it gets freed.
3086  *
3087  * Pipeline wakeups to threads blocked due to an excessive number of
3088  * detached inodes.  This typically occurs when atime updates accumulate
3089  * while scanning a directory tree.
3090  */
3091 static void
3092 hammer_inode_wakereclaims(hammer_inode_t ip)
3093 {
3094         struct hammer_reclaim *reclaim;
3095         hammer_mount_t hmp = ip->hmp;
3096
3097         if ((ip->flags & HAMMER_INODE_RECLAIM) == 0)
3098                 return;
3099
3100         --hammer_count_reclaiming;
3101         --hmp->inode_reclaims;
3102         ip->flags &= ~HAMMER_INODE_RECLAIM;
3103
3104         while ((reclaim = TAILQ_FIRST(&hmp->reclaim_list)) != NULL) {
3105                 if (reclaim->count > 0 && --reclaim->count == 0) {
3106                         TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry);
3107                         wakeup(reclaim);
3108                 }
3109                 if (hmp->inode_reclaims > hammer_limit_reclaim / 2)
3110                         break;
3111         }
3112 }
3113
3114 /*
3115  * Setup our reclaim pipeline.  We only let so many detached (and dirty)
3116  * inodes build up before we start blocking.  This routine is called
3117  * if a new inode is created or an inode is loaded from media.
3118  *
3119  * When we block we don't care *which* inode has finished reclaiming,
3120  * as lone as one does.
3121  */
3122 void
3123 hammer_inode_waitreclaims(hammer_mount_t hmp)
3124 {
3125         struct hammer_reclaim reclaim;
3126
3127         if (hmp->inode_reclaims < hammer_limit_reclaim)
3128                 return;
3129         reclaim.count = 1;
3130         TAILQ_INSERT_TAIL(&hmp->reclaim_list, &reclaim, entry);
3131         tsleep(&reclaim, 0, "hmrrcm", hz);
3132         if (reclaim.count > 0)
3133                 TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry);
3134 }
3135
3136 #if 0
3137
3138 /*
3139  * XXX not used, doesn't work very well due to the large batching nature
3140  * of flushes.
3141  *
3142  * A larger then normal backlog of inodes is sitting in the flusher,
3143  * enforce a general slowdown to let it catch up.  This routine is only
3144  * called on completion of a non-flusher-related transaction which
3145  * performed B-Tree node I/O.
3146  *
3147  * It is possible for the flusher to stall in a continuous load.
3148  * blogbench -i1000 -o seems to do a good job generating this sort of load.
3149  * If the flusher is unable to catch up the inode count can bloat until
3150  * we run out of kvm.
3151  *
3152  * This is a bit of a hack.
3153  */
3154 void
3155 hammer_inode_waithard(hammer_mount_t hmp)
3156 {
3157         /*
3158          * Hysteresis.
3159          */
3160         if (hmp->flags & HAMMER_MOUNT_FLUSH_RECOVERY) {
3161                 if (hmp->inode_reclaims < hammer_limit_reclaim / 2 &&
3162                     hmp->count_iqueued < hmp->count_inodes / 20) {
3163                         hmp->flags &= ~HAMMER_MOUNT_FLUSH_RECOVERY;
3164                         return;
3165                 }
3166         } else {
3167                 if (hmp->inode_reclaims < hammer_limit_reclaim ||
3168                     hmp->count_iqueued < hmp->count_inodes / 10) {
3169                         return;
3170                 }
3171                 hmp->flags |= HAMMER_MOUNT_FLUSH_RECOVERY;
3172         }
3173
3174         /*
3175          * Block for one flush cycle.
3176          */
3177         hammer_flusher_wait_next(hmp);
3178 }
3179
3180 #endif