sys/vfs/hammer: Fix and add comments on root inode
[dragonfly.git] / sys / vfs / hammer / hammer_inode.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34
35 #include "hammer.h"
36 #include <vm/vm_extern.h>
37
38 static int      hammer_unload_inode(struct hammer_inode *ip);
39 static void     hammer_free_inode(hammer_inode_t ip);
40 static void     hammer_flush_inode_core(hammer_inode_t ip,
41                                         hammer_flush_group_t flg, int flags);
42 static int      hammer_setup_child_callback(hammer_record_t rec, void *data);
43 #if 0
44 static int      hammer_syncgrp_child_callback(hammer_record_t rec, void *data);
45 #endif
46 static int      hammer_setup_parent_inodes(hammer_inode_t ip, int depth,
47                                         hammer_flush_group_t flg);
48 static int      hammer_setup_parent_inodes_helper(hammer_record_t record,
49                                         int depth, hammer_flush_group_t flg);
50 static void     hammer_inode_wakereclaims(hammer_inode_t ip);
51 static struct hammer_inostats *hammer_inode_inostats(hammer_mount_t hmp,
52                                         pid_t pid);
53
54 #ifdef DEBUG_TRUNCATE
55 extern struct hammer_inode *HammerTruncIp;
56 #endif
57
58 struct krate hammer_gen_krate = { 1 };
59
60 /*
61  * RB-Tree support for inode structures
62  */
63 int
64 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
65 {
66         if (ip1->obj_localization < ip2->obj_localization)
67                 return(-1);
68         if (ip1->obj_localization > ip2->obj_localization)
69                 return(1);
70         if (ip1->obj_id < ip2->obj_id)
71                 return(-1);
72         if (ip1->obj_id > ip2->obj_id)
73                 return(1);
74         if (ip1->obj_asof < ip2->obj_asof)
75                 return(-1);
76         if (ip1->obj_asof > ip2->obj_asof)
77                 return(1);
78         return(0);
79 }
80
81 int
82 hammer_redo_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
83 {
84         if (ip1->redo_fifo_start < ip2->redo_fifo_start)
85                 return(-1);
86         if (ip1->redo_fifo_start > ip2->redo_fifo_start)
87                 return(1);
88         return(0);
89 }
90
91 /*
92  * RB-Tree support for inode structures / special LOOKUP_INFO
93  */
94 static int
95 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
96 {
97         if (info->obj_localization < ip->obj_localization)
98                 return(-1);
99         if (info->obj_localization > ip->obj_localization)
100                 return(1);
101         if (info->obj_id < ip->obj_id)
102                 return(-1);
103         if (info->obj_id > ip->obj_id)
104                 return(1);
105         if (info->obj_asof < ip->obj_asof)
106                 return(-1);
107         if (info->obj_asof > ip->obj_asof)
108                 return(1);
109         return(0);
110 }
111
112 /*
113  * Used by hammer_scan_inode_snapshots() to locate all of an object's
114  * snapshots.  Note that the asof field is not tested, which we can get
115  * away with because it is the lowest-priority field.
116  */
117 static int
118 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data)
119 {
120         hammer_inode_info_t info = data;
121
122         if (ip->obj_localization > info->obj_localization)
123                 return(1);
124         if (ip->obj_localization < info->obj_localization)
125                 return(-1);
126         if (ip->obj_id > info->obj_id)
127                 return(1);
128         if (ip->obj_id < info->obj_id)
129                 return(-1);
130         return(0);
131 }
132
133 /*
134  * Used by hammer_unload_pseudofs() to locate all inodes associated with
135  * a particular PFS.
136  */
137 static int
138 hammer_inode_pfs_cmp(hammer_inode_t ip, void *data)
139 {
140         u_int32_t localization = *(u_int32_t *)data;
141         if (ip->obj_localization > localization)
142                 return(1);
143         if (ip->obj_localization < localization)
144                 return(-1);
145         return(0);
146 }
147
148 /*
149  * RB-Tree support for pseudofs structures
150  */
151 static int
152 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1, hammer_pseudofs_inmem_t p2)
153 {
154         if (p1->localization < p2->localization)
155                 return(-1);
156         if (p1->localization > p2->localization)
157                 return(1);
158         return(0);
159 }
160
161
162 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
163 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
164                 hammer_inode_info_cmp, hammer_inode_info_t);
165 RB_GENERATE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node,
166              hammer_pfs_rb_compare, u_int32_t, localization);
167
168 /*
169  * The kernel is not actively referencing this vnode but is still holding
170  * it cached.
171  *
172  * This is called from the frontend.
173  *
174  * MPALMOSTSAFE
175  */
176 int
177 hammer_vop_inactive(struct vop_inactive_args *ap)
178 {
179         struct hammer_inode *ip = VTOI(ap->a_vp);
180         hammer_mount_t hmp;
181
182         /*
183          * Degenerate case
184          */
185         if (ip == NULL) {
186                 vrecycle(ap->a_vp);
187                 return(0);
188         }
189
190         /*
191          * If the inode no longer has visibility in the filesystem try to
192          * recycle it immediately, even if the inode is dirty.  Recycling
193          * it quickly allows the system to reclaim buffer cache and VM
194          * resources which can matter a lot in a heavily loaded system.
195          *
196          * This can deadlock in vfsync() if we aren't careful.
197          * 
198          * Do not queue the inode to the flusher if we still have visibility,
199          * otherwise namespace calls such as chmod will unnecessarily generate
200          * multiple inode updates.
201          */
202         if (ip->ino_data.nlinks == 0) {
203                 hmp = ip->hmp;
204                 lwkt_gettoken(&hmp->fs_token);
205                 hammer_inode_unloadable_check(ip, 0);
206                 if (ip->flags & HAMMER_INODE_MODMASK)
207                         hammer_flush_inode(ip, 0);
208                 lwkt_reltoken(&hmp->fs_token);
209                 vrecycle(ap->a_vp);
210         }
211         return(0);
212 }
213
214 /*
215  * Release the vnode association.  This is typically (but not always)
216  * the last reference on the inode.
217  *
218  * Once the association is lost we are on our own with regards to
219  * flushing the inode.
220  *
221  * We must interlock ip->vp so hammer_get_vnode() can avoid races.
222  */
223 int
224 hammer_vop_reclaim(struct vop_reclaim_args *ap)
225 {
226         struct hammer_inode *ip;
227         hammer_mount_t hmp;
228         struct vnode *vp;
229
230         vp = ap->a_vp;
231
232         if ((ip = vp->v_data) != NULL) {
233                 hmp = ip->hmp;
234                 lwkt_gettoken(&hmp->fs_token);
235                 hammer_lock_ex(&ip->lock);
236                 vp->v_data = NULL;
237                 ip->vp = NULL;
238
239                 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) {
240                         ++hammer_count_reclaims;
241                         ++hmp->count_reclaims;
242                         ip->flags |= HAMMER_INODE_RECLAIM;
243                 }
244                 hammer_unlock(&ip->lock);
245                 vclrisdirty(vp);
246                 hammer_rel_inode(ip, 1);
247                 lwkt_reltoken(&hmp->fs_token);
248         }
249         return(0);
250 }
251
252 /*
253  * Inform the kernel that the inode is dirty.  This will be checked
254  * by vn_unlock().
255  *
256  * Theoretically in order to reclaim a vnode the hammer_vop_reclaim()
257  * must be called which will interlock against our inode lock, so
258  * if VRECLAIMED is not set vp->v_mount (as used by vsetisdirty())
259  * should be stable without having to acquire any new locks.
260  */
261 void
262 hammer_inode_dirty(struct hammer_inode *ip)
263 {
264         struct vnode *vp;
265
266         if ((ip->flags & HAMMER_INODE_MODMASK) &&
267             (vp = ip->vp) != NULL &&
268             (vp->v_flag & (VRECLAIMED | VISDIRTY)) == 0) {
269                 vsetisdirty(vp);
270         }
271 }
272
273 /*
274  * Return a locked vnode for the specified inode.  The inode must be
275  * referenced but NOT LOCKED on entry and will remain referenced on
276  * return.
277  *
278  * Called from the frontend.
279  */
280 int
281 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
282 {
283         hammer_mount_t hmp;
284         struct vnode *vp;
285         int error = 0;
286         u_int8_t obj_type;
287
288         hmp = ip->hmp;
289
290         for (;;) {
291                 if ((vp = ip->vp) == NULL) {
292                         error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0);
293                         if (error)
294                                 break;
295                         hammer_lock_ex(&ip->lock);
296                         if (ip->vp != NULL) {
297                                 hammer_unlock(&ip->lock);
298                                 vp = *vpp;
299                                 vp->v_type = VBAD;
300                                 vx_put(vp);
301                                 continue;
302                         }
303                         hammer_ref(&ip->lock);
304                         vp = *vpp;
305                         ip->vp = vp;
306
307                         obj_type = ip->ino_data.obj_type;
308                         vp->v_type = hammer_get_vnode_type(obj_type);
309
310                         hammer_inode_wakereclaims(ip);
311
312                         switch(ip->ino_data.obj_type) {
313                         case HAMMER_OBJTYPE_CDEV:
314                         case HAMMER_OBJTYPE_BDEV:
315                                 vp->v_ops = &hmp->mp->mnt_vn_spec_ops;
316                                 addaliasu(vp, ip->ino_data.rmajor,
317                                           ip->ino_data.rminor);
318                                 break;
319                         case HAMMER_OBJTYPE_FIFO:
320                                 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops;
321                                 break;
322                         case HAMMER_OBJTYPE_REGFILE:
323                                 break;
324                         default:
325                                 break;
326                         }
327
328                         /*
329                          * Only mark as the root vnode if the ip is not
330                          * historical, otherwise the VFS cache will get
331                          * confused.  The other half of the special handling
332                          * is in hammer_vop_nlookupdotdot().
333                          *
334                          * Pseudo-filesystem roots can be accessed via
335                          * non-root filesystem paths and setting VROOT may
336                          * confuse the namecache.  Set VPFSROOT instead.
337                          */
338                         if (ip->obj_id == HAMMER_OBJID_ROOT &&
339                             ip->obj_asof == hmp->asof) {
340                                 if (ip->obj_localization == 0)
341                                         vsetflags(vp, VROOT);
342                                 else
343                                         vsetflags(vp, VPFSROOT);
344                         }
345
346                         vp->v_data = (void *)ip;
347                         /* vnode locked by getnewvnode() */
348                         /* make related vnode dirty if inode dirty? */
349                         hammer_unlock(&ip->lock);
350                         if (vp->v_type == VREG) {
351                                 vinitvmio(vp, ip->ino_data.size,
352                                           hammer_blocksize(ip->ino_data.size),
353                                           hammer_blockoff(ip->ino_data.size));
354                         }
355                         break;
356                 }
357
358                 /*
359                  * Interlock vnode clearing.  This does not prevent the
360                  * vnode from going into a reclaimed state but it does
361                  * prevent it from being destroyed or reused so the vget()
362                  * will properly fail.
363                  */
364                 hammer_lock_ex(&ip->lock);
365                 if ((vp = ip->vp) == NULL) {
366                         hammer_unlock(&ip->lock);
367                         continue;
368                 }
369                 vhold(vp);
370                 hammer_unlock(&ip->lock);
371
372                 /*
373                  * loop if the vget fails (aka races), or if the vp
374                  * no longer matches ip->vp.
375                  */
376                 if (vget(vp, LK_EXCLUSIVE) == 0) {
377                         if (vp == ip->vp) {
378                                 vdrop(vp);
379                                 break;
380                         }
381                         vput(vp);
382                 }
383                 vdrop(vp);
384         }
385         *vpp = vp;
386         return(error);
387 }
388
389 /*
390  * Locate all copies of the inode for obj_id compatible with the specified
391  * asof, reference, and issue the related call-back.  This routine is used
392  * for direct-io invalidation and does not create any new inodes.
393  */
394 void
395 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo,
396                             int (*callback)(hammer_inode_t ip, void *data),
397                             void *data)
398 {
399         hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root,
400                                    hammer_inode_info_cmp_all_history,
401                                    callback, iinfo);
402 }
403
404 /*
405  * Acquire a HAMMER inode.  The returned inode is not locked.  These functions
406  * do not attach or detach the related vnode (use hammer_get_vnode() for
407  * that).
408  *
409  * The flags argument is only applied for newly created inodes, and only
410  * certain flags are inherited.
411  *
412  * Called from the frontend.
413  */
414 struct hammer_inode *
415 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip,
416                  int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
417                  int flags, int *errorp)
418 {
419         hammer_mount_t hmp = trans->hmp;
420         struct hammer_node_cache *cachep;
421         struct hammer_inode_info iinfo;
422         struct hammer_cursor cursor;
423         struct hammer_inode *ip;
424
425
426         /*
427          * Determine if we already have an inode cached.  If we do then
428          * we are golden.
429          *
430          * If we find an inode with no vnode we have to mark the
431          * transaction such that hammer_inode_waitreclaims() is
432          * called later on to avoid building up an infinite number
433          * of inodes.  Otherwise we can continue to * add new inodes
434          * faster then they can be disposed of, even with the tsleep
435          * delay.
436          *
437          * If we find a dummy inode we return a failure so dounlink
438          * (which does another lookup) doesn't try to mess with the
439          * link count.  hammer_vop_nresolve() uses hammer_get_dummy_inode()
440          * to ref dummy inodes.
441          */
442         iinfo.obj_id = obj_id;
443         iinfo.obj_asof = asof;
444         iinfo.obj_localization = localization;
445 loop:
446         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
447         if (ip) {
448                 if (ip->flags & HAMMER_INODE_DUMMY) {
449                         *errorp = ENOENT;
450                         return(NULL);
451                 }
452                 hammer_ref(&ip->lock);
453                 *errorp = 0;
454                 return(ip);
455         }
456
457         /*
458          * Allocate a new inode structure and deal with races later.
459          */
460         ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
461         ++hammer_count_inodes;
462         ++hmp->count_inodes;
463         ip->obj_id = obj_id;
464         ip->obj_asof = iinfo.obj_asof;
465         ip->obj_localization = localization;
466         ip->hmp = hmp;
467         ip->flags = flags & HAMMER_INODE_RO;
468         ip->cache[0].ip = ip;
469         ip->cache[1].ip = ip;
470         ip->cache[2].ip = ip;
471         ip->cache[3].ip = ip;
472         if (hmp->ronly)
473                 ip->flags |= HAMMER_INODE_RO;
474         ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
475                 0x7FFFFFFFFFFFFFFFLL;
476         RB_INIT(&ip->rec_tree);
477         TAILQ_INIT(&ip->target_list);
478         hammer_ref(&ip->lock);
479
480         /*
481          * Locate the on-disk inode.  If this is a PFS root we always
482          * access the current version of the root inode and (if it is not
483          * a master) always access information under it with a snapshot
484          * TID.
485          *
486          * We cache recent inode lookups in this directory in dip->cache[2].
487          * If we can't find it we assume the inode we are looking for is
488          * close to the directory inode.
489          */
490 retry:
491         cachep = NULL;
492         if (dip) {
493                 if (dip->cache[2].node)
494                         cachep = &dip->cache[2];
495                 else
496                         cachep = &dip->cache[0];
497         }
498         hammer_init_cursor(trans, &cursor, cachep, NULL);
499         cursor.key_beg.localization = localization + HAMMER_LOCALIZE_INODE;
500         cursor.key_beg.obj_id = ip->obj_id;
501         cursor.key_beg.key = 0;
502         cursor.key_beg.create_tid = 0;
503         cursor.key_beg.delete_tid = 0;
504         cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
505         cursor.key_beg.obj_type = 0;
506
507         cursor.asof = iinfo.obj_asof;
508         cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
509                        HAMMER_CURSOR_ASOF;
510
511         *errorp = hammer_btree_lookup(&cursor);
512         if (*errorp == EDEADLK) {
513                 hammer_done_cursor(&cursor);
514                 goto retry;
515         }
516
517         /*
518          * On success the B-Tree lookup will hold the appropriate
519          * buffer cache buffers and provide a pointer to the requested
520          * information.  Copy the information to the in-memory inode
521          * and cache the B-Tree node to improve future operations.
522          */
523         if (*errorp == 0) {
524                 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
525                 ip->ino_data = cursor.data->inode;
526
527                 /*
528                  * cache[0] tries to cache the location of the object inode.
529                  * The assumption is that it is near the directory inode.
530                  *
531                  * cache[1] tries to cache the location of the object data.
532                  * We might have something in the governing directory from
533                  * scan optimizations (see the strategy code in
534                  * hammer_vnops.c).
535                  *
536                  * We update dip->cache[2], if possible, with the location
537                  * of the object inode for future directory shortcuts.
538                  */
539                 hammer_cache_node(&ip->cache[0], cursor.node);
540                 if (dip) {
541                         if (dip->cache[3].node) {
542                                 hammer_cache_node(&ip->cache[1],
543                                                   dip->cache[3].node);
544                         }
545                         hammer_cache_node(&dip->cache[2], cursor.node);
546                 }
547
548                 /*
549                  * The file should not contain any data past the file size
550                  * stored in the inode.  Setting save_trunc_off to the
551                  * file size instead of max reduces B-Tree lookup overheads
552                  * on append by allowing the flusher to avoid checking for
553                  * record overwrites.
554                  */
555                 ip->save_trunc_off = ip->ino_data.size;
556
557                 /*
558                  * Locate and assign the pseudofs management structure to
559                  * the inode.
560                  */
561                 if (dip && dip->obj_localization == ip->obj_localization) {
562                         ip->pfsm = dip->pfsm;
563                         hammer_ref(&ip->pfsm->lock);
564                 } else {
565                         ip->pfsm = hammer_load_pseudofs(trans,
566                                                         ip->obj_localization,
567                                                         errorp);
568                         *errorp = 0;    /* ignore ENOENT */
569                 }
570         }
571
572         /*
573          * The inode is placed on the red-black tree and will be synced to
574          * the media when flushed or by the filesystem sync.  If this races
575          * another instantiation/lookup the insertion will fail.
576          */
577         if (*errorp == 0) {
578                 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
579                         hammer_free_inode(ip);
580                         hammer_done_cursor(&cursor);
581                         goto loop;
582                 }
583                 ip->flags |= HAMMER_INODE_ONDISK;
584         } else {
585                 if (ip->flags & HAMMER_INODE_RSV_INODES) {
586                         ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
587                         --hmp->rsv_inodes;
588                 }
589
590                 hammer_free_inode(ip);
591                 ip = NULL;
592         }
593         hammer_done_cursor(&cursor);
594
595         /*
596          * NEWINODE is only set if the inode becomes dirty later,
597          * setting it here just leads to unnecessary stalls.
598          *
599          * trans->flags |= HAMMER_TRANSF_NEWINODE;
600          */
601         return (ip);
602 }
603
604 /*
605  * Get a dummy inode to placemark a broken directory entry.
606  */
607 struct hammer_inode *
608 hammer_get_dummy_inode(hammer_transaction_t trans, hammer_inode_t dip,
609                  int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
610                  int flags, int *errorp)
611 {
612         hammer_mount_t hmp = trans->hmp;
613         struct hammer_inode_info iinfo;
614         struct hammer_inode *ip;
615
616         /*
617          * Determine if we already have an inode cached.  If we do then
618          * we are golden.
619          *
620          * If we find an inode with no vnode we have to mark the
621          * transaction such that hammer_inode_waitreclaims() is
622          * called later on to avoid building up an infinite number
623          * of inodes.  Otherwise we can continue to * add new inodes
624          * faster then they can be disposed of, even with the tsleep
625          * delay.
626          *
627          * If we find a non-fake inode we return an error.  Only fake
628          * inodes can be returned by this routine.
629          */
630         iinfo.obj_id = obj_id;
631         iinfo.obj_asof = asof;
632         iinfo.obj_localization = localization;
633 loop:
634         *errorp = 0;
635         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
636         if (ip) {
637                 if ((ip->flags & HAMMER_INODE_DUMMY) == 0) {
638                         *errorp = ENOENT;
639                         return(NULL);
640                 }
641                 hammer_ref(&ip->lock);
642                 return(ip);
643         }
644
645         /*
646          * Allocate a new inode structure and deal with races later.
647          */
648         ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
649         ++hammer_count_inodes;
650         ++hmp->count_inodes;
651         ip->obj_id = obj_id;
652         ip->obj_asof = iinfo.obj_asof;
653         ip->obj_localization = localization;
654         ip->hmp = hmp;
655         ip->flags = flags | HAMMER_INODE_RO | HAMMER_INODE_DUMMY;
656         ip->cache[0].ip = ip;
657         ip->cache[1].ip = ip;
658         ip->cache[2].ip = ip;
659         ip->cache[3].ip = ip;
660         ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
661                 0x7FFFFFFFFFFFFFFFLL;
662         RB_INIT(&ip->rec_tree);
663         TAILQ_INIT(&ip->target_list);
664         hammer_ref(&ip->lock);
665
666         /*
667          * Populate the dummy inode.  Leave everything zero'd out.
668          *
669          * (ip->ino_leaf and ip->ino_data)
670          *
671          * Make the dummy inode a FIFO object which most copy programs
672          * will properly ignore.
673          */
674         ip->save_trunc_off = ip->ino_data.size;
675         ip->ino_data.obj_type = HAMMER_OBJTYPE_FIFO;
676
677         /*
678          * Locate and assign the pseudofs management structure to
679          * the inode.
680          */
681         if (dip && dip->obj_localization == ip->obj_localization) {
682                 ip->pfsm = dip->pfsm;
683                 hammer_ref(&ip->pfsm->lock);
684         } else {
685                 ip->pfsm = hammer_load_pseudofs(trans, ip->obj_localization,
686                                                 errorp);
687                 *errorp = 0;    /* ignore ENOENT */
688         }
689
690         /*
691          * The inode is placed on the red-black tree and will be synced to
692          * the media when flushed or by the filesystem sync.  If this races
693          * another instantiation/lookup the insertion will fail.
694          *
695          * NOTE: Do not set HAMMER_INODE_ONDISK.  The inode is a fake.
696          */
697         if (*errorp == 0) {
698                 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
699                         hammer_free_inode(ip);
700                         goto loop;
701                 }
702         } else {
703                 if (ip->flags & HAMMER_INODE_RSV_INODES) {
704                         ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
705                         --hmp->rsv_inodes;
706                 }
707                 hammer_free_inode(ip);
708                 ip = NULL;
709         }
710         trans->flags |= HAMMER_TRANSF_NEWINODE;
711         return (ip);
712 }
713
714 /*
715  * Return a referenced inode only if it is in our inode cache.
716  *
717  * Dummy inodes do not count.
718  */
719 struct hammer_inode *
720 hammer_find_inode(hammer_transaction_t trans, int64_t obj_id,
721                   hammer_tid_t asof, u_int32_t localization)
722 {
723         hammer_mount_t hmp = trans->hmp;
724         struct hammer_inode_info iinfo;
725         struct hammer_inode *ip;
726
727         iinfo.obj_id = obj_id;
728         iinfo.obj_asof = asof;
729         iinfo.obj_localization = localization;
730
731         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
732         if (ip) {
733                 if (ip->flags & HAMMER_INODE_DUMMY)
734                         ip = NULL;
735                 else
736                         hammer_ref(&ip->lock);
737         }
738         return(ip);
739 }
740
741 /*
742  * Create a new filesystem object, returning the inode in *ipp.  The
743  * returned inode will be referenced.  The inode is created in-memory.
744  *
745  * If pfsm is non-NULL the caller wishes to create the root inode for
746  * a master PFS.
747  */
748 int
749 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
750                     struct ucred *cred,
751                     hammer_inode_t dip, const char *name, int namelen,
752                     hammer_pseudofs_inmem_t pfsm, struct hammer_inode **ipp)
753 {
754         hammer_mount_t hmp;
755         hammer_inode_t ip;
756         uid_t xuid;
757         int error;
758         int64_t namekey;
759         u_int32_t dummy;
760
761         hmp = trans->hmp;
762
763         /*
764          * Disallow the creation of new inodes in directories which
765          * have been deleted.  In HAMMER, this will cause a record
766          * syncing assertion later on in the flush code.
767          */
768         if (dip && dip->ino_data.nlinks == 0) {
769                 *ipp = NULL;
770                 return (EINVAL);
771         }
772
773         /*
774          * Allocate inode
775          */
776         ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
777         ++hammer_count_inodes;
778         ++hmp->count_inodes;
779         trans->flags |= HAMMER_TRANSF_NEWINODE;
780
781         if (pfsm) {
782                 KKASSERT(pfsm->localization != 0);
783                 ip->obj_id = HAMMER_OBJID_ROOT;
784                 ip->obj_localization = pfsm->localization;
785         } else {
786                 KKASSERT(dip != NULL);
787                 namekey = hammer_directory_namekey(dip, name, namelen, &dummy);
788                 ip->obj_id = hammer_alloc_objid(hmp, dip, namekey);
789                 ip->obj_localization = dip->obj_localization;
790         }
791
792         KKASSERT(ip->obj_id != 0);
793         ip->obj_asof = hmp->asof;
794         ip->hmp = hmp;
795         ip->flush_state = HAMMER_FST_IDLE;
796         ip->flags = HAMMER_INODE_DDIRTY |
797                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME;
798         ip->cache[0].ip = ip;
799         ip->cache[1].ip = ip;
800         ip->cache[2].ip = ip;
801         ip->cache[3].ip = ip;
802
803         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
804         /* ip->save_trunc_off = 0; (already zero) */
805         RB_INIT(&ip->rec_tree);
806         TAILQ_INIT(&ip->target_list);
807
808         ip->ino_data.atime = trans->time;
809         ip->ino_data.mtime = trans->time;
810         ip->ino_data.size = 0;
811         ip->ino_data.nlinks = 0;
812
813         /*
814          * A nohistory designator on the parent directory is inherited by
815          * the child.  We will do this even for pseudo-fs creation... the
816          * sysad can turn it off.
817          */
818         if (dip) {
819                 ip->ino_data.uflags = dip->ino_data.uflags &
820                                       (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
821         }
822
823         ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
824         ip->ino_leaf.base.localization = ip->obj_localization +
825                                          HAMMER_LOCALIZE_INODE;
826         ip->ino_leaf.base.obj_id = ip->obj_id;
827         ip->ino_leaf.base.key = 0;
828         ip->ino_leaf.base.create_tid = 0;
829         ip->ino_leaf.base.delete_tid = 0;
830         ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
831         ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
832
833         ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
834         ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
835         ip->ino_data.mode = vap->va_mode;
836         ip->ino_data.ctime = trans->time;
837
838         /*
839          * If we are running version 2 or greater directory entries are
840          * inode-localized instead of data-localized.
841          */
842         if (trans->hmp->version >= HAMMER_VOL_VERSION_TWO) {
843                 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
844                         ip->ino_data.cap_flags |=
845                                 HAMMER_INODE_CAP_DIR_LOCAL_INO;
846                 }
847         }
848         if (trans->hmp->version >= HAMMER_VOL_VERSION_SIX) {
849                 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
850                         ip->ino_data.cap_flags |=
851                                 HAMMER_INODE_CAP_DIRHASH_ALG1;
852                 }
853         }
854
855         /*
856          * Setup the ".." pointer.  This only needs to be done for directories
857          * but we do it for all objects as a recovery aid.
858          */
859         if (dip)
860                 ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id;
861 #if 0
862         /*
863          * The parent_obj_localization field only applies to pseudo-fs roots.
864          * XXX this is no longer applicable, PFSs are no longer directly
865          * tied into the parent's directory structure.
866          */
867         if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY &&
868             ip->obj_id == HAMMER_OBJID_ROOT) {
869                 ip->ino_data.ext.obj.parent_obj_localization = 
870                                                 dip->obj_localization;
871         }
872 #endif
873
874         switch(ip->ino_leaf.base.obj_type) {
875         case HAMMER_OBJTYPE_CDEV:
876         case HAMMER_OBJTYPE_BDEV:
877                 ip->ino_data.rmajor = vap->va_rmajor;
878                 ip->ino_data.rminor = vap->va_rminor;
879                 break;
880         default:
881                 break;
882         }
883
884         /*
885          * Calculate default uid/gid and overwrite with information from
886          * the vap.
887          */
888         if (dip) {
889                 xuid = hammer_to_unix_xid(&dip->ino_data.uid);
890                 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode,
891                                              xuid, cred, &vap->va_mode);
892         } else {
893                 xuid = 0;
894         }
895         ip->ino_data.mode = vap->va_mode;
896
897         if (vap->va_vaflags & VA_UID_UUID_VALID)
898                 ip->ino_data.uid = vap->va_uid_uuid;
899         else if (vap->va_uid != (uid_t)VNOVAL)
900                 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
901         else
902                 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
903
904         if (vap->va_vaflags & VA_GID_UUID_VALID)
905                 ip->ino_data.gid = vap->va_gid_uuid;
906         else if (vap->va_gid != (gid_t)VNOVAL)
907                 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
908         else if (dip)
909                 ip->ino_data.gid = dip->ino_data.gid;
910
911         hammer_ref(&ip->lock);
912
913         if (pfsm) {
914                 ip->pfsm = pfsm;
915                 hammer_ref(&pfsm->lock);
916                 error = 0;
917         } else if (dip->obj_localization == ip->obj_localization) {
918                 ip->pfsm = dip->pfsm;
919                 hammer_ref(&ip->pfsm->lock);
920                 error = 0;
921         } else {
922                 ip->pfsm = hammer_load_pseudofs(trans,
923                                                 ip->obj_localization,
924                                                 &error);
925                 error = 0;      /* ignore ENOENT */
926         }
927
928         if (error) {
929                 hammer_free_inode(ip);
930                 ip = NULL;
931         } else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
932                 panic("hammer_create_inode: duplicate obj_id %llx",
933                       (long long)ip->obj_id);
934                 /* not reached */
935                 hammer_free_inode(ip);
936         }
937         *ipp = ip;
938         return(error);
939 }
940
941 /*
942  * Final cleanup / freeing of an inode structure
943  */
944 static void
945 hammer_free_inode(hammer_inode_t ip)
946 {
947         struct hammer_mount *hmp;
948
949         hmp = ip->hmp;
950         KKASSERT(hammer_oneref(&ip->lock));
951         hammer_uncache_node(&ip->cache[0]);
952         hammer_uncache_node(&ip->cache[1]);
953         hammer_uncache_node(&ip->cache[2]);
954         hammer_uncache_node(&ip->cache[3]);
955         hammer_inode_wakereclaims(ip);
956         if (ip->objid_cache)
957                 hammer_clear_objid(ip);
958         --hammer_count_inodes;
959         --hmp->count_inodes;
960         if (ip->pfsm) {
961                 hammer_rel_pseudofs(hmp, ip->pfsm);
962                 ip->pfsm = NULL;
963         }
964         kfree(ip, hmp->m_inodes);
965         ip = NULL;
966 }
967
968 /*
969  * Retrieve pseudo-fs data.  NULL will never be returned.
970  *
971  * If an error occurs *errorp will be set and a default template is returned,
972  * otherwise *errorp is set to 0.  Typically when an error occurs it will
973  * be ENOENT.
974  */
975 hammer_pseudofs_inmem_t
976 hammer_load_pseudofs(hammer_transaction_t trans,
977                      u_int32_t localization, int *errorp)
978 {
979         hammer_mount_t hmp = trans->hmp;
980         hammer_inode_t ip;
981         hammer_pseudofs_inmem_t pfsm;
982         struct hammer_cursor cursor;
983         int bytes;
984
985 retry:
986         pfsm = RB_LOOKUP(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, localization);
987         if (pfsm) {
988                 hammer_ref(&pfsm->lock);
989                 *errorp = 0;
990                 return(pfsm);
991         }
992
993         /*
994          * PFS records are associated with the root inode (not the PFS root
995          * inode, but the real root).  Avoid an infinite recursion if loading
996          * the PFS for the real root.
997          */
998         if (localization) {
999                 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT,
1000                                       HAMMER_MAX_TID,
1001                                       HAMMER_DEF_LOCALIZATION, 0, errorp);
1002         } else {
1003                 ip = NULL;
1004         }
1005
1006         pfsm = kmalloc(sizeof(*pfsm), hmp->m_misc, M_WAITOK | M_ZERO);
1007         pfsm->localization = localization;
1008         pfsm->pfsd.unique_uuid = trans->rootvol->ondisk->vol_fsid;
1009         pfsm->pfsd.shared_uuid = pfsm->pfsd.unique_uuid;
1010
1011         hammer_init_cursor(trans, &cursor, (ip ? &ip->cache[1] : NULL), ip);
1012         cursor.key_beg.localization = HAMMER_DEF_LOCALIZATION +
1013                                       HAMMER_LOCALIZE_MISC;
1014         cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
1015         cursor.key_beg.create_tid = 0;
1016         cursor.key_beg.delete_tid = 0;
1017         cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
1018         cursor.key_beg.obj_type = 0;
1019         cursor.key_beg.key = localization;
1020         cursor.asof = HAMMER_MAX_TID;
1021         cursor.flags |= HAMMER_CURSOR_ASOF;
1022
1023         if (ip)
1024                 *errorp = hammer_ip_lookup(&cursor);
1025         else
1026                 *errorp = hammer_btree_lookup(&cursor);
1027         if (*errorp == 0) {
1028                 *errorp = hammer_ip_resolve_data(&cursor);
1029                 if (*errorp == 0) {
1030                         if (cursor.data->pfsd.mirror_flags &
1031                             HAMMER_PFSD_DELETED) {
1032                                 *errorp = ENOENT;
1033                         } else {
1034                                 bytes = cursor.leaf->data_len;
1035                                 if (bytes > sizeof(pfsm->pfsd))
1036                                         bytes = sizeof(pfsm->pfsd);
1037                                 bcopy(cursor.data, &pfsm->pfsd, bytes);
1038                         }
1039                 }
1040         }
1041         hammer_done_cursor(&cursor);
1042
1043         pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
1044         hammer_ref(&pfsm->lock);
1045         if (ip)
1046                 hammer_rel_inode(ip, 0);
1047         if (RB_INSERT(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm)) {
1048                 kfree(pfsm, hmp->m_misc);
1049                 goto retry;
1050         }
1051         return(pfsm);
1052 }
1053
1054 /*
1055  * Store pseudo-fs data.  The backend will automatically delete any prior
1056  * on-disk pseudo-fs data but we have to delete in-memory versions.
1057  */
1058 int
1059 hammer_save_pseudofs(hammer_transaction_t trans, hammer_pseudofs_inmem_t pfsm)
1060 {
1061         struct hammer_cursor cursor;
1062         hammer_record_t record;
1063         hammer_inode_t ip;
1064         int error;
1065
1066         /*
1067          * PFS records are associated with the root inode (not the PFS root
1068          * inode, but the real root).
1069          */
1070         ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
1071                               HAMMER_DEF_LOCALIZATION, 0, &error);
1072 retry:
1073         pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
1074         hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
1075         cursor.key_beg.localization = ip->obj_localization +
1076                                       HAMMER_LOCALIZE_MISC;
1077         cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
1078         cursor.key_beg.create_tid = 0;
1079         cursor.key_beg.delete_tid = 0;
1080         cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
1081         cursor.key_beg.obj_type = 0;
1082         cursor.key_beg.key = pfsm->localization;
1083         cursor.asof = HAMMER_MAX_TID;
1084         cursor.flags |= HAMMER_CURSOR_ASOF;
1085
1086         /*
1087          * Replace any in-memory version of the record.
1088          */
1089         error = hammer_ip_lookup(&cursor);
1090         if (error == 0 && hammer_cursor_inmem(&cursor)) {
1091                 record = cursor.iprec;
1092                 if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
1093                         KKASSERT(cursor.deadlk_rec == NULL);
1094                         hammer_ref(&record->lock);
1095                         cursor.deadlk_rec = record;
1096                         error = EDEADLK;
1097                 } else {
1098                         record->flags |= HAMMER_RECF_DELETED_FE;
1099                         error = 0;
1100                 }
1101         }
1102
1103         /*
1104          * Allocate replacement general record.  The backend flush will
1105          * delete any on-disk version of the record.
1106          */
1107         if (error == 0 || error == ENOENT) {
1108                 record = hammer_alloc_mem_record(ip, sizeof(pfsm->pfsd));
1109                 record->type = HAMMER_MEM_RECORD_GENERAL;
1110
1111                 record->leaf.base.localization = ip->obj_localization +
1112                                                  HAMMER_LOCALIZE_MISC;
1113                 record->leaf.base.rec_type = HAMMER_RECTYPE_PFS;
1114                 record->leaf.base.key = pfsm->localization;
1115                 record->leaf.data_len = sizeof(pfsm->pfsd);
1116                 bcopy(&pfsm->pfsd, record->data, sizeof(pfsm->pfsd));
1117                 error = hammer_ip_add_record(trans, record);
1118         }
1119         hammer_done_cursor(&cursor);
1120         if (error == EDEADLK)
1121                 goto retry;
1122         hammer_rel_inode(ip, 0);
1123         return(error);
1124 }
1125
1126 /*
1127  * Create a root directory for a PFS if one does not alredy exist.
1128  *
1129  * The PFS root stands alone so we must also bump the nlinks count
1130  * to prevent it from being destroyed on release.
1131  */
1132 int
1133 hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred,
1134                        hammer_pseudofs_inmem_t pfsm)
1135 {
1136         hammer_inode_t ip;
1137         struct vattr vap;
1138         int error;
1139
1140         ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
1141                               pfsm->localization, 0, &error);
1142         if (ip == NULL) {
1143                 vattr_null(&vap);
1144                 vap.va_mode = 0755;
1145                 vap.va_type = VDIR;
1146                 error = hammer_create_inode(trans, &vap, cred,
1147                                             NULL, NULL, 0,
1148                                             pfsm, &ip);
1149                 if (error == 0) {
1150                         ++ip->ino_data.nlinks;
1151                         hammer_modify_inode(trans, ip, HAMMER_INODE_DDIRTY);
1152                 }
1153         }
1154         if (ip)
1155                 hammer_rel_inode(ip, 0);
1156         return(error);
1157 }
1158
1159 /*
1160  * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY
1161  * if we are unable to disassociate all the inodes.
1162  */
1163 static
1164 int
1165 hammer_unload_pseudofs_callback(hammer_inode_t ip, void *data)
1166 {
1167         int res;
1168
1169         hammer_ref(&ip->lock);
1170         if (hammer_isactive(&ip->lock) == 2 && ip->vp)
1171                 vclean_unlocked(ip->vp);
1172         if (hammer_isactive(&ip->lock) == 1 && ip->vp == NULL)
1173                 res = 0;
1174         else
1175                 res = -1;       /* stop, someone is using the inode */
1176         hammer_rel_inode(ip, 0);
1177         return(res);
1178 }
1179
1180 int
1181 hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization)
1182 {
1183         int res;
1184         int try;
1185
1186         for (try = res = 0; try < 4; ++try) {
1187                 res = hammer_ino_rb_tree_RB_SCAN(&trans->hmp->rb_inos_root,
1188                                            hammer_inode_pfs_cmp,
1189                                            hammer_unload_pseudofs_callback,
1190                                            &localization);
1191                 if (res == 0 && try > 1)
1192                         break;
1193                 hammer_flusher_sync(trans->hmp);
1194         }
1195         if (res != 0)
1196                 res = ENOTEMPTY;
1197         return(res);
1198 }
1199
1200
1201 /*
1202  * Release a reference on a PFS
1203  */
1204 void
1205 hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm)
1206 {
1207         hammer_rel(&pfsm->lock);
1208         if (hammer_norefs(&pfsm->lock)) {
1209                 RB_REMOVE(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm);
1210                 kfree(pfsm, hmp->m_misc);
1211         }
1212 }
1213
1214 /*
1215  * Called by hammer_sync_inode().
1216  */
1217 static int
1218 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
1219 {
1220         hammer_transaction_t trans = cursor->trans;
1221         hammer_record_t record;
1222         int error;
1223         int redirty;
1224
1225 retry:
1226         error = 0;
1227
1228         /*
1229          * If the inode has a presence on-disk then locate it and mark
1230          * it deleted, setting DELONDISK.
1231          *
1232          * The record may or may not be physically deleted, depending on
1233          * the retention policy.
1234          */
1235         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
1236             HAMMER_INODE_ONDISK) {
1237                 hammer_normalize_cursor(cursor);
1238                 cursor->key_beg.localization = ip->obj_localization + 
1239                                                HAMMER_LOCALIZE_INODE;
1240                 cursor->key_beg.obj_id = ip->obj_id;
1241                 cursor->key_beg.key = 0;
1242                 cursor->key_beg.create_tid = 0;
1243                 cursor->key_beg.delete_tid = 0;
1244                 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1245                 cursor->key_beg.obj_type = 0;
1246                 cursor->asof = ip->obj_asof;
1247                 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1248                 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
1249                 cursor->flags |= HAMMER_CURSOR_BACKEND;
1250
1251                 error = hammer_btree_lookup(cursor);
1252                 if (hammer_debug_inode)
1253                         kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
1254
1255                 if (error == 0) {
1256                         error = hammer_ip_delete_record(cursor, ip, trans->tid);
1257                         if (hammer_debug_inode)
1258                                 kprintf(" error %d\n", error);
1259                         if (error == 0) {
1260                                 ip->flags |= HAMMER_INODE_DELONDISK;
1261                         }
1262                         if (cursor->node)
1263                                 hammer_cache_node(&ip->cache[0], cursor->node);
1264                 }
1265                 if (error == EDEADLK) {
1266                         hammer_done_cursor(cursor);
1267                         error = hammer_init_cursor(trans, cursor,
1268                                                    &ip->cache[0], ip);
1269                         if (hammer_debug_inode)
1270                                 kprintf("IPDED %p %d\n", ip, error);
1271                         if (error == 0)
1272                                 goto retry;
1273                 }
1274         }
1275
1276         /*
1277          * Ok, write out the initial record or a new record (after deleting
1278          * the old one), unless the DELETED flag is set.  This routine will
1279          * clear DELONDISK if it writes out a record.
1280          *
1281          * Update our inode statistics if this is the first application of
1282          * the inode on-disk.
1283          */
1284         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
1285                 /*
1286                  * Generate a record and write it to the media.  We clean-up
1287                  * the state before releasing so we do not have to set-up
1288                  * a flush_group.
1289                  */
1290                 record = hammer_alloc_mem_record(ip, 0);
1291                 record->type = HAMMER_MEM_RECORD_INODE;
1292                 record->flush_state = HAMMER_FST_FLUSH;
1293                 record->leaf = ip->sync_ino_leaf;
1294                 record->leaf.base.create_tid = trans->tid;
1295                 record->leaf.data_len = sizeof(ip->sync_ino_data);
1296                 record->leaf.create_ts = trans->time32;
1297                 record->data = (void *)&ip->sync_ino_data;
1298                 record->flags |= HAMMER_RECF_INTERLOCK_BE;
1299
1300                 /*
1301                  * If this flag is set we cannot sync the new file size
1302                  * because we haven't finished related truncations.  The
1303                  * inode will be flushed in another flush group to finish
1304                  * the job.
1305                  */
1306                 if ((ip->flags & HAMMER_INODE_WOULDBLOCK) &&
1307                     ip->sync_ino_data.size != ip->ino_data.size) {
1308                         redirty = 1;
1309                         ip->sync_ino_data.size = ip->ino_data.size;
1310                 } else {
1311                         redirty = 0;
1312                 }
1313
1314                 for (;;) {
1315                         error = hammer_ip_sync_record_cursor(cursor, record);
1316                         if (hammer_debug_inode)
1317                                 kprintf("GENREC %p rec %08x %d\n",      
1318                                         ip, record->flags, error);
1319                         if (error != EDEADLK)
1320                                 break;
1321                         hammer_done_cursor(cursor);
1322                         error = hammer_init_cursor(trans, cursor,
1323                                                    &ip->cache[0], ip);
1324                         if (hammer_debug_inode)
1325                                 kprintf("GENREC reinit %d\n", error);
1326                         if (error)
1327                                 break;
1328                 }
1329
1330                 /*
1331                  * Note:  The record was never on the inode's record tree
1332                  * so just wave our hands importantly and destroy it.
1333                  */
1334                 record->flags |= HAMMER_RECF_COMMITTED;
1335                 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
1336                 record->flush_state = HAMMER_FST_IDLE;
1337                 ++ip->rec_generation;
1338                 hammer_rel_mem_record(record);
1339
1340                 /*
1341                  * Finish up.
1342                  */
1343                 if (error == 0) {
1344                         if (hammer_debug_inode)
1345                                 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
1346                         ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1347                                             HAMMER_INODE_SDIRTY |
1348                                             HAMMER_INODE_ATIME |
1349                                             HAMMER_INODE_MTIME);
1350                         ip->flags &= ~HAMMER_INODE_DELONDISK;
1351                         if (redirty)
1352                                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1353
1354                         /*
1355                          * Root volume count of inodes
1356                          */
1357                         hammer_sync_lock_sh(trans);
1358                         if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
1359                                 hammer_modify_volume_field(trans,
1360                                                            trans->rootvol,
1361                                                            vol0_stat_inodes);
1362                                 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1363                                 hammer_modify_volume_done(trans->rootvol);
1364                                 ip->flags |= HAMMER_INODE_ONDISK;
1365                                 if (hammer_debug_inode)
1366                                         kprintf("NOWONDISK %p\n", ip);
1367                         }
1368                         hammer_sync_unlock(trans);
1369                 }
1370         }
1371
1372         /*
1373          * If the inode has been destroyed, clean out any left-over flags
1374          * that may have been set by the frontend.
1375          */
1376         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) { 
1377                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1378                                     HAMMER_INODE_SDIRTY |
1379                                     HAMMER_INODE_ATIME |
1380                                     HAMMER_INODE_MTIME);
1381         }
1382         return(error);
1383 }
1384
1385 /*
1386  * Update only the itimes fields.
1387  *
1388  * ATIME can be updated without generating any UNDO.  MTIME is updated
1389  * with UNDO so it is guaranteed to be synchronized properly in case of
1390  * a crash.
1391  *
1392  * Neither field is included in the B-Tree leaf element's CRC, which is how
1393  * we can get away with updating ATIME the way we do.
1394  */
1395 static int
1396 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
1397 {
1398         hammer_transaction_t trans = cursor->trans;
1399         int error;
1400
1401 retry:
1402         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) !=
1403             HAMMER_INODE_ONDISK) {
1404                 return(0);
1405         }
1406
1407         hammer_normalize_cursor(cursor);
1408         cursor->key_beg.localization = ip->obj_localization + 
1409                                        HAMMER_LOCALIZE_INODE;
1410         cursor->key_beg.obj_id = ip->obj_id;
1411         cursor->key_beg.key = 0;
1412         cursor->key_beg.create_tid = 0;
1413         cursor->key_beg.delete_tid = 0;
1414         cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1415         cursor->key_beg.obj_type = 0;
1416         cursor->asof = ip->obj_asof;
1417         cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1418         cursor->flags |= HAMMER_CURSOR_ASOF;
1419         cursor->flags |= HAMMER_CURSOR_GET_LEAF;
1420         cursor->flags |= HAMMER_CURSOR_GET_DATA;
1421         cursor->flags |= HAMMER_CURSOR_BACKEND;
1422
1423         error = hammer_btree_lookup(cursor);
1424         if (error == 0) {
1425                 hammer_cache_node(&ip->cache[0], cursor->node);
1426                 if (ip->sync_flags & HAMMER_INODE_MTIME) {
1427                         /*
1428                          * Updating MTIME requires an UNDO.  Just cover
1429                          * both atime and mtime.
1430                          */
1431                         hammer_sync_lock_sh(trans);
1432                         hammer_modify_buffer(trans, cursor->data_buffer,
1433                                      HAMMER_ITIMES_BASE(&cursor->data->inode),
1434                                      HAMMER_ITIMES_BYTES);
1435                         cursor->data->inode.atime = ip->sync_ino_data.atime;
1436                         cursor->data->inode.mtime = ip->sync_ino_data.mtime;
1437                         hammer_modify_buffer_done(cursor->data_buffer);
1438                         hammer_sync_unlock(trans);
1439                 } else if (ip->sync_flags & HAMMER_INODE_ATIME) {
1440                         /*
1441                          * Updating atime only can be done in-place with
1442                          * no UNDO.
1443                          */
1444                         hammer_sync_lock_sh(trans);
1445                         hammer_modify_buffer(trans, cursor->data_buffer,
1446                                              NULL, 0);
1447                         cursor->data->inode.atime = ip->sync_ino_data.atime;
1448                         hammer_modify_buffer_done(cursor->data_buffer);
1449                         hammer_sync_unlock(trans);
1450                 }
1451                 ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME);
1452         }
1453         if (error == EDEADLK) {
1454                 hammer_done_cursor(cursor);
1455                 error = hammer_init_cursor(trans, cursor,
1456                                            &ip->cache[0], ip);
1457                 if (error == 0)
1458                         goto retry;
1459         }
1460         return(error);
1461 }
1462
1463 /*
1464  * Release a reference on an inode, flush as requested.
1465  *
1466  * On the last reference we queue the inode to the flusher for its final
1467  * disposition.
1468  */
1469 void
1470 hammer_rel_inode(struct hammer_inode *ip, int flush)
1471 {
1472         /*hammer_mount_t hmp = ip->hmp;*/
1473
1474         /*
1475          * Handle disposition when dropping the last ref.
1476          */
1477         for (;;) {
1478                 if (hammer_oneref(&ip->lock)) {
1479                         /*
1480                          * Determine whether on-disk action is needed for
1481                          * the inode's final disposition.
1482                          */
1483                         KKASSERT(ip->vp == NULL);
1484                         hammer_inode_unloadable_check(ip, 0);
1485                         if (ip->flags & HAMMER_INODE_MODMASK) {
1486                                 hammer_flush_inode(ip, 0);
1487                         } else if (hammer_oneref(&ip->lock)) {
1488                                 hammer_unload_inode(ip);
1489                                 break;
1490                         }
1491                 } else {
1492                         if (flush)
1493                                 hammer_flush_inode(ip, 0);
1494
1495                         /*
1496                          * The inode still has multiple refs, try to drop
1497                          * one ref.
1498                          */
1499                         KKASSERT(hammer_isactive(&ip->lock) >= 1);
1500                         if (hammer_isactive(&ip->lock) > 1) {
1501                                 hammer_rel(&ip->lock);
1502                                 break;
1503                         }
1504                 }
1505         }
1506 }
1507
1508 /*
1509  * Unload and destroy the specified inode.  Must be called with one remaining
1510  * reference.  The reference is disposed of.
1511  *
1512  * The inode must be completely clean.
1513  */
1514 static int
1515 hammer_unload_inode(struct hammer_inode *ip)
1516 {
1517         hammer_mount_t hmp = ip->hmp;
1518
1519         KASSERT(hammer_oneref(&ip->lock),
1520                 ("hammer_unload_inode: %d refs", hammer_isactive(&ip->lock)));
1521         KKASSERT(ip->vp == NULL);
1522         KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
1523         KKASSERT(ip->cursor_ip_refs == 0);
1524         KKASSERT(hammer_notlocked(&ip->lock));
1525         KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
1526
1527         KKASSERT(RB_EMPTY(&ip->rec_tree));
1528         KKASSERT(TAILQ_EMPTY(&ip->target_list));
1529
1530         if (ip->flags & HAMMER_INODE_RDIRTY) {
1531                 RB_REMOVE(hammer_redo_rb_tree, &hmp->rb_redo_root, ip);
1532                 ip->flags &= ~HAMMER_INODE_RDIRTY;
1533         }
1534         RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip);
1535
1536         hammer_free_inode(ip);
1537         return(0);
1538 }
1539
1540 /*
1541  * Called during unmounting if a critical error occured.  The in-memory
1542  * inode and all related structures are destroyed.
1543  *
1544  * If a critical error did not occur the unmount code calls the standard
1545  * release and asserts that the inode is gone.
1546  */
1547 int
1548 hammer_destroy_inode_callback(struct hammer_inode *ip, void *data __unused)
1549 {
1550         hammer_record_t rec;
1551
1552         /*
1553          * Get rid of the inodes in-memory records, regardless of their
1554          * state, and clear the mod-mask.
1555          */
1556         while ((rec = TAILQ_FIRST(&ip->target_list)) != NULL) {
1557                 TAILQ_REMOVE(&ip->target_list, rec, target_entry);
1558                 rec->target_ip = NULL;
1559                 if (rec->flush_state == HAMMER_FST_SETUP)
1560                         rec->flush_state = HAMMER_FST_IDLE;
1561         }
1562         while ((rec = RB_ROOT(&ip->rec_tree)) != NULL) {
1563                 if (rec->flush_state == HAMMER_FST_FLUSH)
1564                         --rec->flush_group->refs;
1565                 else
1566                         hammer_ref(&rec->lock);
1567                 KKASSERT(hammer_oneref(&rec->lock));
1568                 rec->flush_state = HAMMER_FST_IDLE;
1569                 rec->flush_group = NULL;
1570                 rec->flags |= HAMMER_RECF_DELETED_FE; /* wave hands */
1571                 rec->flags |= HAMMER_RECF_DELETED_BE; /* wave hands */
1572                 ++ip->rec_generation;
1573                 hammer_rel_mem_record(rec);
1574         }
1575         ip->flags &= ~HAMMER_INODE_MODMASK;
1576         ip->sync_flags &= ~HAMMER_INODE_MODMASK;
1577         KKASSERT(ip->vp == NULL);
1578
1579         /*
1580          * Remove the inode from any flush group, force it idle.  FLUSH
1581          * and SETUP states have an inode ref.
1582          */
1583         switch(ip->flush_state) {
1584         case HAMMER_FST_FLUSH:
1585                 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip);
1586                 --ip->flush_group->refs;
1587                 ip->flush_group = NULL;
1588                 /* fall through */
1589         case HAMMER_FST_SETUP:
1590                 hammer_rel(&ip->lock);
1591                 ip->flush_state = HAMMER_FST_IDLE;
1592                 /* fall through */
1593         case HAMMER_FST_IDLE:
1594                 break;
1595         }
1596
1597         /*
1598          * There shouldn't be any associated vnode.  The unload needs at
1599          * least one ref, if we do have a vp steal its ip ref.
1600          */
1601         if (ip->vp) {
1602                 kprintf("hammer_destroy_inode_callback: Unexpected "
1603                         "vnode association ip %p vp %p\n", ip, ip->vp);
1604                 ip->vp->v_data = NULL;
1605                 ip->vp = NULL;
1606         } else {
1607                 hammer_ref(&ip->lock);
1608         }
1609         hammer_unload_inode(ip);
1610         return(0);
1611 }
1612
1613 /*
1614  * Called on mount -u when switching from RW to RO or vise-versa.  Adjust
1615  * the read-only flag for cached inodes.
1616  *
1617  * This routine is called from a RB_SCAN().
1618  */
1619 int
1620 hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
1621 {
1622         hammer_mount_t hmp = ip->hmp;
1623
1624         if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
1625                 ip->flags |= HAMMER_INODE_RO;
1626         else
1627                 ip->flags &= ~HAMMER_INODE_RO;
1628         return(0);
1629 }
1630
1631 /*
1632  * A transaction has modified an inode, requiring updates as specified by
1633  * the passed flags.
1634  *
1635  * HAMMER_INODE_DDIRTY: Inode data has been updated, not incl mtime/atime,
1636  *                      and not including size changes due to write-append
1637  *                      (but other size changes are included).
1638  * HAMMER_INODE_SDIRTY: Inode data has been updated, size changes due to
1639  *                      write-append.
1640  * HAMMER_INODE_XDIRTY: Dirty in-memory records
1641  * HAMMER_INODE_BUFS:   Dirty buffer cache buffers
1642  * HAMMER_INODE_DELETED: Inode record/data must be deleted
1643  * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
1644  */
1645 void
1646 hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags)
1647 {
1648         /* 
1649          * ronly of 0 or 2 does not trigger assertion.
1650          * 2 is a special error state 
1651          */
1652         KKASSERT(ip->hmp->ronly != 1 ||
1653                   (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 
1654                             HAMMER_INODE_SDIRTY |
1655                             HAMMER_INODE_BUFS | HAMMER_INODE_DELETED |
1656                             HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0);
1657         if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
1658                 ip->flags |= HAMMER_INODE_RSV_INODES;
1659                 ++ip->hmp->rsv_inodes;
1660         }
1661
1662         /*
1663          * Set the NEWINODE flag in the transaction if the inode
1664          * transitions to a dirty state.  This is used to track
1665          * the load on the inode cache.
1666          */
1667         if (trans &&
1668             (ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1669             (flags & HAMMER_INODE_MODMASK)) {
1670                 trans->flags |= HAMMER_TRANSF_NEWINODE;
1671         }
1672         if (flags & HAMMER_INODE_MODMASK)
1673                 hammer_inode_dirty(ip);
1674         ip->flags |= flags;
1675 }
1676
1677 /*
1678  * Attempt to quickly update the atime for a hammer inode.  Return 0 on
1679  * success, -1 on failure.
1680  *
1681  * We attempt to update the atime with only the ip lock and not the
1682  * whole filesystem lock in order to improve concurrency.  We can only
1683  * do this safely if the ATIME flag is already pending on the inode.
1684  *
1685  * This function is called via a vnops path (ip pointer is stable) without
1686  * fs_token held.
1687  */
1688 int
1689 hammer_update_atime_quick(hammer_inode_t ip)
1690 {
1691         struct timeval tv;
1692         int res = -1;
1693
1694         if ((ip->flags & HAMMER_INODE_RO) ||
1695             (ip->hmp->mp->mnt_flag & MNT_NOATIME)) {
1696                 /*
1697                  * Silently indicate success on read-only mount/snap
1698                  */
1699                 res = 0;
1700         } else if (ip->flags & HAMMER_INODE_ATIME) {
1701                 /*
1702                  * Double check with inode lock held against backend.  This
1703                  * is only safe if all we need to do is update
1704                  * ino_data.atime.
1705                  */
1706                 getmicrotime(&tv);
1707                 hammer_lock_ex(&ip->lock);
1708                 if (ip->flags & HAMMER_INODE_ATIME) {
1709                         ip->ino_data.atime =
1710                             (unsigned long)tv.tv_sec * 1000000ULL + tv.tv_usec;
1711                         res = 0;
1712                 }
1713                 hammer_unlock(&ip->lock);
1714         }
1715         return res;
1716 }
1717
1718 /*
1719  * Request that an inode be flushed.  This whole mess cannot block and may
1720  * recurse (if not synchronous).  Once requested HAMMER will attempt to
1721  * actively flush the inode until the flush can be done.
1722  *
1723  * The inode may already be flushing, or may be in a setup state.  We can
1724  * place the inode in a flushing state if it is currently idle and flag it
1725  * to reflush if it is currently flushing.
1726  *
1727  * Upon return if the inode could not be flushed due to a setup
1728  * dependancy, then it will be automatically flushed when the dependancy
1729  * is satisfied.
1730  */
1731 void
1732 hammer_flush_inode(hammer_inode_t ip, int flags)
1733 {
1734         hammer_mount_t hmp;
1735         hammer_flush_group_t flg;
1736         int good;
1737
1738         /*
1739          * fill_flush_group is the first flush group we may be able to
1740          * continue filling, it may be open or closed but it will always
1741          * be past the currently flushing (running) flg.
1742          *
1743          * next_flush_group is the next open flush group.
1744          */
1745         hmp = ip->hmp;
1746         while ((flg = hmp->fill_flush_group) != NULL) {
1747                 KKASSERT(flg->running == 0);
1748                 if (flg->total_count + flg->refs <= ip->hmp->undo_rec_limit &&
1749                     flg->total_count <= hammer_autoflush) {
1750                         break;
1751                 }
1752                 hmp->fill_flush_group = TAILQ_NEXT(flg, flush_entry);
1753                 hammer_flusher_async(ip->hmp, flg);
1754         }
1755         if (flg == NULL) {
1756                 flg = kmalloc(sizeof(*flg), hmp->m_misc, M_WAITOK|M_ZERO);
1757                 flg->seq = hmp->flusher.next++;
1758                 if (hmp->next_flush_group == NULL)
1759                         hmp->next_flush_group = flg;
1760                 if (hmp->fill_flush_group == NULL)
1761                         hmp->fill_flush_group = flg;
1762                 RB_INIT(&flg->flush_tree);
1763                 TAILQ_INSERT_TAIL(&hmp->flush_group_list, flg, flush_entry);
1764         }
1765
1766         /*
1767          * Trivial 'nothing to flush' case.  If the inode is in a SETUP
1768          * state we have to put it back into an IDLE state so we can
1769          * drop the extra ref.
1770          *
1771          * If we have a parent dependancy we must still fall through
1772          * so we can run it.
1773          */
1774         if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
1775                 if (ip->flush_state == HAMMER_FST_SETUP &&
1776                     TAILQ_EMPTY(&ip->target_list)) {
1777                         ip->flush_state = HAMMER_FST_IDLE;
1778                         hammer_rel_inode(ip, 0);
1779                 }
1780                 if (ip->flush_state == HAMMER_FST_IDLE)
1781                         return;
1782         }
1783
1784         /*
1785          * Our flush action will depend on the current state.
1786          */
1787         switch(ip->flush_state) {
1788         case HAMMER_FST_IDLE:
1789                 /*
1790                  * We have no dependancies and can flush immediately.  Some
1791                  * our children may not be flushable so we have to re-test
1792                  * with that additional knowledge.
1793                  */
1794                 hammer_flush_inode_core(ip, flg, flags);
1795                 break;
1796         case HAMMER_FST_SETUP:
1797                 /*
1798                  * Recurse upwards through dependancies via target_list
1799                  * and start their flusher actions going if possible.
1800                  *
1801                  * 'good' is our connectivity.  -1 means we have none and
1802                  * can't flush, 0 means there weren't any dependancies, and
1803                  * 1 means we have good connectivity.
1804                  */
1805                 good = hammer_setup_parent_inodes(ip, 0, flg);
1806
1807                 if (good >= 0) {
1808                         /*
1809                          * We can continue if good >= 0.  Determine how 
1810                          * many records under our inode can be flushed (and
1811                          * mark them).
1812                          */
1813                         hammer_flush_inode_core(ip, flg, flags);
1814                 } else {
1815                         /*
1816                          * Parent has no connectivity, tell it to flush
1817                          * us as soon as it does.
1818                          *
1819                          * The REFLUSH flag is also needed to trigger
1820                          * dependancy wakeups.
1821                          */
1822                         ip->flags |= HAMMER_INODE_CONN_DOWN |
1823                                      HAMMER_INODE_REFLUSH;
1824                         if (flags & HAMMER_FLUSH_SIGNAL) {
1825                                 ip->flags |= HAMMER_INODE_RESIGNAL;
1826                                 hammer_flusher_async(ip->hmp, flg);
1827                         }
1828                 }
1829                 break;
1830         case HAMMER_FST_FLUSH:
1831                 /*
1832                  * We are already flushing, flag the inode to reflush
1833                  * if needed after it completes its current flush.
1834                  *
1835                  * The REFLUSH flag is also needed to trigger
1836                  * dependancy wakeups.
1837                  */
1838                 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
1839                         ip->flags |= HAMMER_INODE_REFLUSH;
1840                 if (flags & HAMMER_FLUSH_SIGNAL) {
1841                         ip->flags |= HAMMER_INODE_RESIGNAL;
1842                         hammer_flusher_async(ip->hmp, flg);
1843                 }
1844                 break;
1845         }
1846 }
1847
1848 /*
1849  * Scan ip->target_list, which is a list of records owned by PARENTS to our
1850  * ip which reference our ip.
1851  *
1852  * XXX This is a huge mess of recursive code, but not one bit of it blocks
1853  *     so for now do not ref/deref the structures.  Note that if we use the
1854  *     ref/rel code later, the rel CAN block.
1855  */
1856 static int
1857 hammer_setup_parent_inodes(hammer_inode_t ip, int depth,
1858                            hammer_flush_group_t flg)
1859 {
1860         hammer_record_t depend;
1861         int good;
1862         int r;
1863
1864         /*
1865          * If we hit our recursion limit and we have parent dependencies
1866          * We cannot continue.  Returning < 0 will cause us to be flagged
1867          * for reflush.  Returning -2 cuts off additional dependency checks
1868          * because they are likely to also hit the depth limit.
1869          *
1870          * We cannot return < 0 if there are no dependencies or there might
1871          * not be anything to wakeup (ip).
1872          */
1873         if (depth == 20 && TAILQ_FIRST(&ip->target_list)) {
1874                 if (hammer_debug_general & 0x10000)
1875                         krateprintf(&hammer_gen_krate,
1876                             "HAMMER Warning: depth limit reached on "
1877                             "setup recursion, inode %p %016llx\n",
1878                             ip, (long long)ip->obj_id);
1879                 return(-2);
1880         }
1881
1882         /*
1883          * Scan dependencies
1884          */
1885         good = 0;
1886         TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
1887                 r = hammer_setup_parent_inodes_helper(depend, depth, flg);
1888                 KKASSERT(depend->target_ip == ip);
1889                 if (r < 0 && good == 0)
1890                         good = -1;
1891                 if (r > 0)
1892                         good = 1;
1893
1894                 /*
1895                  * If we failed due to the recursion depth limit then stop
1896                  * now.
1897                  */
1898                 if (r == -2)
1899                         break;
1900         }
1901         return(good);
1902 }
1903
1904 /*
1905  * This helper function takes a record representing the dependancy between
1906  * the parent inode and child inode.
1907  *
1908  * record->ip           = parent inode
1909  * record->target_ip    = child inode
1910  * 
1911  * We are asked to recurse upwards and convert the record from SETUP
1912  * to FLUSH if possible.
1913  *
1914  * Return 1 if the record gives us connectivity
1915  *
1916  * Return 0 if the record is not relevant 
1917  *
1918  * Return -1 if we can't resolve the dependancy and there is no connectivity.
1919  */
1920 static int
1921 hammer_setup_parent_inodes_helper(hammer_record_t record, int depth,
1922                                   hammer_flush_group_t flg)
1923 {
1924         hammer_inode_t pip;
1925         int good;
1926
1927         KKASSERT(record->flush_state != HAMMER_FST_IDLE);
1928         pip = record->ip;
1929
1930         /*
1931          * If the record is already flushing, is it in our flush group?
1932          *
1933          * If it is in our flush group but it is a general record or a 
1934          * delete-on-disk, it does not improve our connectivity (return 0),
1935          * and if the target inode is not trying to destroy itself we can't
1936          * allow the operation yet anyway (the second return -1).
1937          */
1938         if (record->flush_state == HAMMER_FST_FLUSH) {
1939                 /*
1940                  * If not in our flush group ask the parent to reflush
1941                  * us as soon as possible.
1942                  */
1943                 if (record->flush_group != flg) {
1944                         pip->flags |= HAMMER_INODE_REFLUSH;
1945                         record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1946                         return(-1);
1947                 }
1948
1949                 /*
1950                  * If in our flush group everything is already set up,
1951                  * just return whether the record will improve our
1952                  * visibility or not.
1953                  */
1954                 if (record->type == HAMMER_MEM_RECORD_ADD)
1955                         return(1);
1956                 return(0);
1957         }
1958
1959         /*
1960          * It must be a setup record.  Try to resolve the setup dependancies
1961          * by recursing upwards so we can place ip on the flush list.
1962          *
1963          * Limit ourselves to 20 levels of recursion to avoid blowing out
1964          * the kernel stack.  If we hit the recursion limit we can't flush
1965          * until the parent flushes.  The parent will flush independantly
1966          * on its own and ultimately a deep recursion will be resolved.
1967          */
1968         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1969
1970         good = hammer_setup_parent_inodes(pip, depth + 1, flg);
1971
1972         /*
1973          * If good < 0 the parent has no connectivity and we cannot safely
1974          * flush the directory entry, which also means we can't flush our
1975          * ip.  Flag us for downward recursion once the parent's
1976          * connectivity is resolved.  Flag the parent for [re]flush or it
1977          * may not check for downward recursions.
1978          */
1979         if (good < 0) {
1980                 pip->flags |= HAMMER_INODE_REFLUSH;
1981                 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1982                 return(good);
1983         }
1984
1985         /*
1986          * We are go, place the parent inode in a flushing state so we can
1987          * place its record in a flushing state.  Note that the parent
1988          * may already be flushing.  The record must be in the same flush
1989          * group as the parent.
1990          */
1991         if (pip->flush_state != HAMMER_FST_FLUSH)
1992                 hammer_flush_inode_core(pip, flg, HAMMER_FLUSH_RECURSION);
1993         KKASSERT(pip->flush_state == HAMMER_FST_FLUSH);
1994
1995         /*
1996          * It is possible for a rename to create a loop in the recursion
1997          * and revisit a record.  This will result in the record being
1998          * placed in a flush state unexpectedly.  This check deals with
1999          * the case.
2000          */
2001         if (record->flush_state == HAMMER_FST_FLUSH) {
2002                 if (record->type == HAMMER_MEM_RECORD_ADD)
2003                         return(1);
2004                 return(0);
2005         }
2006
2007         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
2008
2009 #if 0
2010         if (record->type == HAMMER_MEM_RECORD_DEL &&
2011             (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
2012                 /*
2013                  * Regardless of flushing state we cannot sync this path if the
2014                  * record represents a delete-on-disk but the target inode
2015                  * is not ready to sync its own deletion.
2016                  *
2017                  * XXX need to count effective nlinks to determine whether
2018                  * the flush is ok, otherwise removing a hardlink will
2019                  * just leave the DEL record to rot.
2020                  */
2021                 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
2022                 return(-1);
2023         } else
2024 #endif
2025         if (pip->flush_group == flg) {
2026                 /*
2027                  * Because we have not calculated nlinks yet we can just
2028                  * set records to the flush state if the parent is in
2029                  * the same flush group as we are.
2030                  */
2031                 record->flush_state = HAMMER_FST_FLUSH;
2032                 record->flush_group = flg;
2033                 ++record->flush_group->refs;
2034                 hammer_ref(&record->lock);
2035
2036                 /*
2037                  * A general directory-add contributes to our visibility.
2038                  *
2039                  * Otherwise it is probably a directory-delete or 
2040                  * delete-on-disk record and does not contribute to our
2041                  * visbility (but we can still flush it).
2042                  */
2043                 if (record->type == HAMMER_MEM_RECORD_ADD)
2044                         return(1);
2045                 return(0);
2046         } else {
2047                 /*
2048                  * If the parent is not in our flush group we cannot
2049                  * flush this record yet, there is no visibility.
2050                  * We tell the parent to reflush and mark ourselves
2051                  * so the parent knows it should flush us too.
2052                  */
2053                 pip->flags |= HAMMER_INODE_REFLUSH;
2054                 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
2055                 return(-1);
2056         }
2057 }
2058
2059 /*
2060  * This is the core routine placing an inode into the FST_FLUSH state.
2061  */
2062 static void
2063 hammer_flush_inode_core(hammer_inode_t ip, hammer_flush_group_t flg, int flags)
2064 {
2065         hammer_mount_t hmp = ip->hmp;
2066         int go_count;
2067
2068         /*
2069          * Set flush state and prevent the flusher from cycling into
2070          * the next flush group.  Do not place the ip on the list yet.
2071          * Inodes not in the idle state get an extra reference.
2072          */
2073         KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
2074         if (ip->flush_state == HAMMER_FST_IDLE)
2075                 hammer_ref(&ip->lock);
2076         ip->flush_state = HAMMER_FST_FLUSH;
2077         ip->flush_group = flg;
2078         ++hmp->flusher.group_lock;
2079         ++hmp->count_iqueued;
2080         ++hammer_count_iqueued;
2081         ++flg->total_count;
2082         hammer_redo_fifo_start_flush(ip);
2083
2084 #if 0
2085         /*
2086          * We need to be able to vfsync/truncate from the backend.
2087          *
2088          * XXX Any truncation from the backend will acquire the vnode
2089          *     independently.
2090          */
2091         KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
2092         if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
2093                 ip->flags |= HAMMER_INODE_VHELD;
2094                 vref(ip->vp);
2095         }
2096 #endif
2097
2098         /*
2099          * Figure out how many in-memory records we can actually flush
2100          * (not including inode meta-data, buffers, etc).
2101          */
2102         KKASSERT((ip->flags & HAMMER_INODE_WOULDBLOCK) == 0);
2103         if (flags & HAMMER_FLUSH_RECURSION) {
2104                 /*
2105                  * If this is a upwards recursion we do not want to
2106                  * recurse down again!
2107                  */
2108                 go_count = 1;
2109 #if 0
2110         } else if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
2111                 /*
2112                  * No new records are added if we must complete a flush
2113                  * from a previous cycle, but we do have to move the records
2114                  * from the previous cycle to the current one.
2115                  */
2116 #if 0
2117                 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2118                                    hammer_syncgrp_child_callback, NULL);
2119 #endif
2120                 go_count = 1;
2121 #endif
2122         } else {
2123                 /*
2124                  * Normal flush, scan records and bring them into the flush.
2125                  * Directory adds and deletes are usually skipped (they are
2126                  * grouped with the related inode rather then with the
2127                  * directory).
2128                  *
2129                  * go_count can be negative, which means the scan aborted
2130                  * due to the flush group being over-full and we should
2131                  * flush what we have.
2132                  */
2133                 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2134                                    hammer_setup_child_callback, NULL);
2135         }
2136
2137         /*
2138          * This is a more involved test that includes go_count.  If we
2139          * can't flush, flag the inode and return.  If go_count is 0 we
2140          * were are unable to flush any records in our rec_tree and
2141          * must ignore the XDIRTY flag.
2142          */
2143         if (go_count == 0) {
2144                 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
2145                         --hmp->count_iqueued;
2146                         --hammer_count_iqueued;
2147
2148                         --flg->total_count;
2149                         ip->flush_state = HAMMER_FST_SETUP;
2150                         ip->flush_group = NULL;
2151                         if (flags & HAMMER_FLUSH_SIGNAL) {
2152                                 ip->flags |= HAMMER_INODE_REFLUSH |
2153                                              HAMMER_INODE_RESIGNAL;
2154                         } else {
2155                                 ip->flags |= HAMMER_INODE_REFLUSH;
2156                         }
2157 #if 0
2158                         if (ip->flags & HAMMER_INODE_VHELD) {
2159                                 ip->flags &= ~HAMMER_INODE_VHELD;
2160                                 vrele(ip->vp);
2161                         }
2162 #endif
2163
2164                         /*
2165                          * REFLUSH is needed to trigger dependancy wakeups
2166                          * when an inode is in SETUP.
2167                          */
2168                         ip->flags |= HAMMER_INODE_REFLUSH;
2169                         if (--hmp->flusher.group_lock == 0)
2170                                 wakeup(&hmp->flusher.group_lock);
2171                         return;
2172                 }
2173         }
2174
2175         /*
2176          * Snapshot the state of the inode for the backend flusher.
2177          *
2178          * We continue to retain save_trunc_off even when all truncations
2179          * have been resolved as an optimization to determine if we can
2180          * skip the B-Tree lookup for overwrite deletions.
2181          *
2182          * NOTE: The DELETING flag is a mod flag, but it is also sticky,
2183          * and stays in ip->flags.  Once set, it stays set until the
2184          * inode is destroyed.
2185          */
2186         if (ip->flags & HAMMER_INODE_TRUNCATED) {
2187                 KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0);
2188                 ip->sync_trunc_off = ip->trunc_off;
2189                 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
2190                 ip->flags &= ~HAMMER_INODE_TRUNCATED;
2191                 ip->sync_flags |= HAMMER_INODE_TRUNCATED;
2192
2193                 /*
2194                  * The save_trunc_off used to cache whether the B-Tree
2195                  * holds any records past that point is not used until
2196                  * after the truncation has succeeded, so we can safely
2197                  * set it now.
2198                  */
2199                 if (ip->save_trunc_off > ip->sync_trunc_off)
2200                         ip->save_trunc_off = ip->sync_trunc_off;
2201         }
2202         ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK &
2203                            ~HAMMER_INODE_TRUNCATED);
2204         ip->sync_ino_leaf = ip->ino_leaf;
2205         ip->sync_ino_data = ip->ino_data;
2206         ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
2207 #ifdef DEBUG_TRUNCATE
2208         if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp)
2209                 kprintf("truncateS %016llx\n", ip->sync_trunc_off);
2210 #endif
2211
2212         /*
2213          * The flusher list inherits our inode and reference.
2214          */
2215         KKASSERT(flg->running == 0);
2216         RB_INSERT(hammer_fls_rb_tree, &flg->flush_tree, ip);
2217         if (--hmp->flusher.group_lock == 0)
2218                 wakeup(&hmp->flusher.group_lock);
2219
2220         /*
2221          * Auto-flush the group if it grows too large.  Make sure the
2222          * inode reclaim wait pipeline continues to work.
2223          */
2224         if (flg->total_count >= hammer_autoflush ||
2225             flg->total_count >= hammer_limit_reclaims / 4) {
2226                 if (hmp->fill_flush_group == flg)
2227                         hmp->fill_flush_group = TAILQ_NEXT(flg, flush_entry);
2228                 hammer_flusher_async(hmp, flg);
2229         }
2230 }
2231
2232 /*
2233  * Callback for scan of ip->rec_tree.  Try to include each record in our
2234  * flush.  ip->flush_group has been set but the inode has not yet been
2235  * moved into a flushing state.
2236  *
2237  * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
2238  * both inodes.
2239  *
2240  * We return 1 for any record placed or found in FST_FLUSH, which prevents
2241  * the caller from shortcutting the flush.
2242  */
2243 static int
2244 hammer_setup_child_callback(hammer_record_t rec, void *data)
2245 {
2246         hammer_flush_group_t flg;
2247         hammer_inode_t target_ip;
2248         hammer_inode_t ip;
2249         int r;
2250
2251         /*
2252          * Records deleted or committed by the backend are ignored.
2253          * Note that the flush detects deleted frontend records at
2254          * multiple points to deal with races.  This is just the first
2255          * line of defense.  The only time HAMMER_RECF_DELETED_FE cannot
2256          * be set is when HAMMER_RECF_INTERLOCK_BE is set, because it
2257          * messes up link-count calculations.
2258          *
2259          * NOTE: Don't get confused between record deletion and, say,
2260          * directory entry deletion.  The deletion of a directory entry
2261          * which is on-media has nothing to do with the record deletion
2262          * flags.
2263          */
2264         if (rec->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
2265                           HAMMER_RECF_COMMITTED)) {
2266                 if (rec->flush_state == HAMMER_FST_FLUSH) {
2267                         KKASSERT(rec->flush_group == rec->ip->flush_group);
2268                         r = 1;
2269                 } else {
2270                         r = 0;
2271                 }
2272                 return(r);
2273         }
2274
2275         /*
2276          * If the record is in an idle state it has no dependancies and
2277          * can be flushed.
2278          */
2279         ip = rec->ip;
2280         flg = ip->flush_group;
2281         r = 0;
2282
2283         switch(rec->flush_state) {
2284         case HAMMER_FST_IDLE:
2285                 /*
2286                  * The record has no setup dependancy, we can flush it.
2287                  */
2288                 KKASSERT(rec->target_ip == NULL);
2289                 rec->flush_state = HAMMER_FST_FLUSH;
2290                 rec->flush_group = flg;
2291                 ++flg->refs;
2292                 hammer_ref(&rec->lock);
2293                 r = 1;
2294                 break;
2295         case HAMMER_FST_SETUP:
2296                 /*
2297                  * The record has a setup dependancy.  These are typically
2298                  * directory entry adds and deletes.  Such entries will be
2299                  * flushed when their inodes are flushed so we do not
2300                  * usually have to add them to the flush here.  However,
2301                  * if the target_ip has set HAMMER_INODE_CONN_DOWN then
2302                  * it is asking us to flush this record (and it).
2303                  */
2304                 target_ip = rec->target_ip;
2305                 KKASSERT(target_ip != NULL);
2306                 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
2307
2308                 /*
2309                  * If the target IP is already flushing in our group
2310                  * we could associate the record, but target_ip has
2311                  * already synced ino_data to sync_ino_data and we
2312                  * would also have to adjust nlinks.   Plus there are
2313                  * ordering issues for adds and deletes.
2314                  *
2315                  * Reflush downward if this is an ADD, and upward if
2316                  * this is a DEL.
2317                  */
2318                 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
2319                         if (rec->type == HAMMER_MEM_RECORD_ADD)
2320                                 ip->flags |= HAMMER_INODE_REFLUSH;
2321                         else
2322                                 target_ip->flags |= HAMMER_INODE_REFLUSH;
2323                         break;
2324                 } 
2325
2326                 /*
2327                  * Target IP is not yet flushing.  This can get complex
2328                  * because we have to be careful about the recursion.
2329                  *
2330                  * Directories create an issue for us in that if a flush
2331                  * of a directory is requested the expectation is to flush
2332                  * any pending directory entries, but this will cause the
2333                  * related inodes to recursively flush as well.  We can't
2334                  * really defer the operation so just get as many as we
2335                  * can and
2336                  */
2337 #if 0
2338                 if ((target_ip->flags & HAMMER_INODE_RECLAIM) == 0 &&
2339                     (target_ip->flags & HAMMER_INODE_CONN_DOWN) == 0) {
2340                         /*
2341                          * We aren't reclaiming and the target ip was not
2342                          * previously prevented from flushing due to this
2343                          * record dependancy.  Do not flush this record.
2344                          */
2345                         /*r = 0;*/
2346                 } else
2347 #endif
2348                 if (flg->total_count + flg->refs >
2349                            ip->hmp->undo_rec_limit) {
2350                         /*
2351                          * Our flush group is over-full and we risk blowing
2352                          * out the UNDO FIFO.  Stop the scan, flush what we
2353                          * have, then reflush the directory.
2354                          *
2355                          * The directory may be forced through multiple
2356                          * flush groups before it can be completely
2357                          * flushed.
2358                          */
2359                         ip->flags |= HAMMER_INODE_RESIGNAL |
2360                                      HAMMER_INODE_REFLUSH;
2361                         r = -1;
2362                 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
2363                         /*
2364                          * If the target IP is not flushing we can force
2365                          * it to flush, even if it is unable to write out
2366                          * any of its own records we have at least one in
2367                          * hand that we CAN deal with.
2368                          */
2369                         rec->flush_state = HAMMER_FST_FLUSH;
2370                         rec->flush_group = flg;
2371                         ++flg->refs;
2372                         hammer_ref(&rec->lock);
2373                         hammer_flush_inode_core(target_ip, flg,
2374                                                 HAMMER_FLUSH_RECURSION);
2375                         r = 1;
2376                 } else {
2377                         /*
2378                          * General or delete-on-disk record.
2379                          *
2380                          * XXX this needs help.  If a delete-on-disk we could
2381                          * disconnect the target.  If the target has its own
2382                          * dependancies they really need to be flushed.
2383                          *
2384                          * XXX
2385                          */
2386                         rec->flush_state = HAMMER_FST_FLUSH;
2387                         rec->flush_group = flg;
2388                         ++flg->refs;
2389                         hammer_ref(&rec->lock);
2390                         hammer_flush_inode_core(target_ip, flg,
2391                                                 HAMMER_FLUSH_RECURSION);
2392                         r = 1;
2393                 }
2394                 break;
2395         case HAMMER_FST_FLUSH:
2396                 /* 
2397                  * The record could be part of a previous flush group if the
2398                  * inode is a directory (the record being a directory entry).
2399                  * Once the flush group was closed a hammer_test_inode()
2400                  * function can cause a new flush group to be setup, placing
2401                  * the directory inode itself in a new flush group.
2402                  *
2403                  * When associated with a previous flush group we count it
2404                  * as if it were in our current flush group, since it will
2405                  * effectively be flushed by the time we flush our current
2406                  * flush group.
2407                  */
2408                 KKASSERT(
2409                     rec->ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY ||
2410                     rec->flush_group == flg);
2411                 r = 1;
2412                 break;
2413         }
2414         return(r);
2415 }
2416
2417 #if 0
2418 /*
2419  * This version just moves records already in a flush state to the new
2420  * flush group and that is it.
2421  */
2422 static int
2423 hammer_syncgrp_child_callback(hammer_record_t rec, void *data)
2424 {
2425         hammer_inode_t ip = rec->ip;
2426
2427         switch(rec->flush_state) {
2428         case HAMMER_FST_FLUSH:
2429                 KKASSERT(rec->flush_group == ip->flush_group);
2430                 break;
2431         default:
2432                 break;
2433         }
2434         return(0);
2435 }
2436 #endif
2437
2438 /*
2439  * Wait for a previously queued flush to complete.
2440  *
2441  * If a critical error occured we don't try to wait.
2442  */
2443 void
2444 hammer_wait_inode(hammer_inode_t ip)
2445 {
2446         /*
2447          * The inode can be in a SETUP state in which case RESIGNAL
2448          * should be set.  If RESIGNAL is not set then the previous
2449          * flush completed and a later operation placed the inode
2450          * in a passive setup state again, so we're done.
2451          *
2452          * The inode can be in a FLUSH state in which case we
2453          * can just wait for completion.
2454          */
2455         while (ip->flush_state == HAMMER_FST_FLUSH ||
2456             (ip->flush_state == HAMMER_FST_SETUP &&
2457              (ip->flags & HAMMER_INODE_RESIGNAL))) {
2458                 /*
2459                  * Don't try to flush on a critical error
2460                  */
2461                 if (ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
2462                         break;
2463
2464                 /*
2465                  * If the inode was already being flushed its flg
2466                  * may not have been queued to the backend.  We
2467                  * have to make sure it gets queued or we can wind
2468                  * up blocked or deadlocked (particularly if we are
2469                  * the vnlru thread).
2470                  */
2471                 if (ip->flush_state == HAMMER_FST_FLUSH) {
2472                         KKASSERT(ip->flush_group);
2473                         if (ip->flush_group->closed == 0) {
2474                                 if (hammer_debug_inode) {
2475                                         kprintf("hammer: debug: forcing "
2476                                                 "async flush ip %016jx\n",
2477                                                 (intmax_t)ip->obj_id);
2478                                 }
2479                                 hammer_flusher_async(ip->hmp,
2480                                                      ip->flush_group);
2481                                 continue; /* retest */
2482                         }
2483                 }
2484
2485                 /*
2486                  * In a flush state with the flg queued to the backend
2487                  * or in a setup state with RESIGNAL set, we can safely
2488                  * wait.
2489                  */
2490                 ip->flags |= HAMMER_INODE_FLUSHW;
2491                 tsleep(&ip->flags, 0, "hmrwin", 0);
2492         }
2493
2494 #if 0
2495         /*
2496          * The inode may have been in a passive setup state,
2497          * call flush to make sure we get signaled.
2498          */
2499         if (ip->flush_state == HAMMER_FST_SETUP)
2500                 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2501 #endif
2502
2503 }
2504
2505 /*
2506  * Called by the backend code when a flush has been completed.
2507  * The inode has already been removed from the flush list.
2508  *
2509  * A pipelined flush can occur, in which case we must re-enter the
2510  * inode on the list and re-copy its fields.
2511  */
2512 void
2513 hammer_flush_inode_done(hammer_inode_t ip, int error)
2514 {
2515         hammer_mount_t hmp;
2516         int dorel;
2517
2518         KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
2519
2520         hmp = ip->hmp;
2521
2522         /*
2523          * Auto-reflush if the backend could not completely flush
2524          * the inode.  This fixes a case where a deferred buffer flush
2525          * could cause fsync to return early.
2526          */
2527         if (ip->sync_flags & HAMMER_INODE_MODMASK)
2528                 ip->flags |= HAMMER_INODE_REFLUSH;
2529
2530         /*
2531          * Merge left-over flags back into the frontend and fix the state.
2532          * Incomplete truncations are retained by the backend.
2533          */
2534         ip->error = error;
2535         ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED;
2536         ip->sync_flags &= HAMMER_INODE_TRUNCATED;
2537
2538         /*
2539          * The backend may have adjusted nlinks, so if the adjusted nlinks
2540          * does not match the fronttend set the frontend's DDIRTY flag again.
2541          */
2542         if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
2543                 ip->flags |= HAMMER_INODE_DDIRTY;
2544
2545         /*
2546          * Fix up the dirty buffer status.
2547          */
2548         if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) {
2549                 ip->flags |= HAMMER_INODE_BUFS;
2550         }
2551         hammer_redo_fifo_end_flush(ip);
2552
2553         /*
2554          * Re-set the XDIRTY flag if some of the inode's in-memory records
2555          * could not be flushed.
2556          */
2557         KKASSERT((RB_EMPTY(&ip->rec_tree) &&
2558                   (ip->flags & HAMMER_INODE_XDIRTY) == 0) ||
2559                  (!RB_EMPTY(&ip->rec_tree) &&
2560                   (ip->flags & HAMMER_INODE_XDIRTY) != 0));
2561
2562         /*
2563          * Do not lose track of inodes which no longer have vnode
2564          * assocations, otherwise they may never get flushed again.
2565          *
2566          * The reflush flag can be set superfluously, causing extra pain
2567          * for no reason.  If the inode is no longer modified it no longer
2568          * needs to be flushed.
2569          */
2570         if (ip->flags & HAMMER_INODE_MODMASK) {
2571                 if (ip->vp == NULL)
2572                         ip->flags |= HAMMER_INODE_REFLUSH;
2573         } else {
2574                 ip->flags &= ~HAMMER_INODE_REFLUSH;
2575         }
2576         if (ip->flags & HAMMER_INODE_MODMASK)
2577                 hammer_inode_dirty(ip);
2578
2579         /*
2580          * Adjust the flush state.
2581          */
2582         if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
2583                 /*
2584                  * We were unable to flush out all our records, leave the
2585                  * inode in a flush state and in the current flush group.
2586                  * The flush group will be re-run.
2587                  *
2588                  * This occurs if the UNDO block gets too full or there is
2589                  * too much dirty meta-data and allows the flusher to
2590                  * finalize the UNDO block and then re-flush.
2591                  */
2592                 ip->flags &= ~HAMMER_INODE_WOULDBLOCK;
2593                 dorel = 0;
2594         } else {
2595                 /*
2596                  * Remove from the flush_group
2597                  */
2598                 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip);
2599                 ip->flush_group = NULL;
2600
2601 #if 0
2602                 /*
2603                  * Clean up the vnode ref and tracking counts.
2604                  */
2605                 if (ip->flags & HAMMER_INODE_VHELD) {
2606                         ip->flags &= ~HAMMER_INODE_VHELD;
2607                         vrele(ip->vp);
2608                 }
2609 #endif
2610                 --hmp->count_iqueued;
2611                 --hammer_count_iqueued;
2612
2613                 /*
2614                  * And adjust the state.
2615                  */
2616                 if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
2617                         ip->flush_state = HAMMER_FST_IDLE;
2618                         dorel = 1;
2619                 } else {
2620                         ip->flush_state = HAMMER_FST_SETUP;
2621                         dorel = 0;
2622                 }
2623
2624                 /*
2625                  * If the frontend is waiting for a flush to complete,
2626                  * wake it up.
2627                  */
2628                 if (ip->flags & HAMMER_INODE_FLUSHW) {
2629                         ip->flags &= ~HAMMER_INODE_FLUSHW;
2630                         wakeup(&ip->flags);
2631                 }
2632
2633                 /*
2634                  * If the frontend made more changes and requested another
2635                  * flush, then try to get it running.
2636                  *
2637                  * Reflushes are aborted when the inode is errored out.
2638                  */
2639                 if (ip->flags & HAMMER_INODE_REFLUSH) {
2640                         ip->flags &= ~HAMMER_INODE_REFLUSH;
2641                         if (ip->flags & HAMMER_INODE_RESIGNAL) {
2642                                 ip->flags &= ~HAMMER_INODE_RESIGNAL;
2643                                 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2644                         } else {
2645                                 hammer_flush_inode(ip, 0);
2646                         }
2647                 }
2648         }
2649
2650         /*
2651          * If we have no parent dependancies we can clear CONN_DOWN
2652          */
2653         if (TAILQ_EMPTY(&ip->target_list))
2654                 ip->flags &= ~HAMMER_INODE_CONN_DOWN;
2655
2656         /*
2657          * If the inode is now clean drop the space reservation.
2658          */
2659         if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
2660             (ip->flags & HAMMER_INODE_RSV_INODES)) {
2661                 ip->flags &= ~HAMMER_INODE_RSV_INODES;
2662                 --hmp->rsv_inodes;
2663         }
2664
2665         ip->flags &= ~HAMMER_INODE_SLAVEFLUSH;
2666
2667         if (dorel)
2668                 hammer_rel_inode(ip, 0);
2669 }
2670
2671 /*
2672  * Called from hammer_sync_inode() to synchronize in-memory records
2673  * to the media.
2674  */
2675 static int
2676 hammer_sync_record_callback(hammer_record_t record, void *data)
2677 {
2678         hammer_cursor_t cursor = data;
2679         hammer_transaction_t trans = cursor->trans;
2680         hammer_mount_t hmp = trans->hmp;
2681         int error;
2682
2683         /*
2684          * Skip records that do not belong to the current flush.
2685          */
2686         ++hammer_stats_record_iterations;
2687         if (record->flush_state != HAMMER_FST_FLUSH)
2688                 return(0);
2689
2690 #if 1
2691         if (record->flush_group != record->ip->flush_group) {
2692                 kprintf("sync_record %p ip %p bad flush group %p %p\n", record, record->ip, record->flush_group ,record->ip->flush_group);
2693                 if (hammer_debug_critical)
2694                         Debugger("blah2");
2695                 return(0);
2696         }
2697 #endif
2698         KKASSERT(record->flush_group == record->ip->flush_group);
2699
2700         /*
2701          * Interlock the record using the BE flag.  Once BE is set the
2702          * frontend cannot change the state of FE.
2703          *
2704          * NOTE: If FE is set prior to us setting BE we still sync the
2705          * record out, but the flush completion code converts it to 
2706          * a delete-on-disk record instead of destroying it.
2707          */
2708         KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
2709         record->flags |= HAMMER_RECF_INTERLOCK_BE;
2710
2711         /*
2712          * The backend has already disposed of the record.
2713          */
2714         if (record->flags & (HAMMER_RECF_DELETED_BE | HAMMER_RECF_COMMITTED)) {
2715                 error = 0;
2716                 goto done;
2717         }
2718
2719         /*
2720          * If the whole inode is being deleted and all on-disk records will
2721          * be deleted very soon, we can't sync any new records to disk
2722          * because they will be deleted in the same transaction they were
2723          * created in (delete_tid == create_tid), which will assert.
2724          *
2725          * XXX There may be a case with RECORD_ADD with DELETED_FE set
2726          * that we currently panic on.
2727          */
2728         if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
2729                 switch(record->type) {
2730                 case HAMMER_MEM_RECORD_DATA:
2731                         /*
2732                          * We don't have to do anything, if the record was
2733                          * committed the space will have been accounted for
2734                          * in the blockmap.
2735                          */
2736                         /* fall through */
2737                 case HAMMER_MEM_RECORD_GENERAL:
2738                         /*
2739                          * Set deleted-by-backend flag.  Do not set the
2740                          * backend committed flag, because we are throwing
2741                          * the record away.
2742                          */
2743                         record->flags |= HAMMER_RECF_DELETED_BE;
2744                         ++record->ip->rec_generation;
2745                         error = 0;
2746                         goto done;
2747                 case HAMMER_MEM_RECORD_ADD:
2748                         panic("hammer_sync_record_callback: illegal add "
2749                               "during inode deletion record %p", record);
2750                         break; /* NOT REACHED */
2751                 case HAMMER_MEM_RECORD_INODE:
2752                         panic("hammer_sync_record_callback: attempt to "
2753                               "sync inode record %p?", record);
2754                         break; /* NOT REACHED */
2755                 case HAMMER_MEM_RECORD_DEL:
2756                         /* 
2757                          * Follow through and issue the on-disk deletion
2758                          */
2759                         break;
2760                 }
2761         }
2762
2763         /*
2764          * If DELETED_FE is set special handling is needed for directory
2765          * entries.  Dependant pieces related to the directory entry may
2766          * have already been synced to disk.  If this occurs we have to
2767          * sync the directory entry and then change the in-memory record
2768          * from an ADD to a DELETE to cover the fact that it's been
2769          * deleted by the frontend.
2770          *
2771          * A directory delete covering record (MEM_RECORD_DEL) can never
2772          * be deleted by the frontend.
2773          *
2774          * Any other record type (aka DATA) can be deleted by the frontend.
2775          * XXX At the moment the flusher must skip it because there may
2776          * be another data record in the flush group for the same block,
2777          * meaning that some frontend data changes can leak into the backend's
2778          * synchronization point.
2779          */
2780         if (record->flags & HAMMER_RECF_DELETED_FE) {
2781                 if (record->type == HAMMER_MEM_RECORD_ADD) {
2782                         /*
2783                          * Convert a front-end deleted directory-add to
2784                          * a directory-delete entry later.
2785                          */
2786                         record->flags |= HAMMER_RECF_CONVERT_DELETE;
2787                 } else {
2788                         /*
2789                          * Dispose of the record (race case).  Mark as
2790                          * deleted by backend (and not committed).
2791                          */
2792                         KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
2793                         record->flags |= HAMMER_RECF_DELETED_BE;
2794                         ++record->ip->rec_generation;
2795                         error = 0;
2796                         goto done;
2797                 }
2798         }
2799
2800         /*
2801          * Assign the create_tid for new records.  Deletions already
2802          * have the record's entire key properly set up.
2803          */
2804         if (record->type != HAMMER_MEM_RECORD_DEL) {
2805                 record->leaf.base.create_tid = trans->tid;
2806                 record->leaf.create_ts = trans->time32;
2807         }
2808
2809         /*
2810          * This actually moves the record to the on-media B-Tree.  We
2811          * must also generate REDO_TERM entries in the UNDO/REDO FIFO
2812          * indicating that the related REDO_WRITE(s) have been committed.
2813          *
2814          * During recovery any REDO_TERM's within the nominal recovery span
2815          * are ignored since the related meta-data is being undone, causing
2816          * any matching REDO_WRITEs to execute.  The REDO_TERMs outside
2817          * the nominal recovery span will match against REDO_WRITEs and
2818          * prevent them from being executed (because the meta-data has
2819          * already been synchronized).
2820          */
2821         if (record->flags & HAMMER_RECF_REDO) {
2822                 KKASSERT(record->type == HAMMER_MEM_RECORD_DATA);
2823                 hammer_generate_redo(trans, record->ip,
2824                                      record->leaf.base.key -
2825                                          record->leaf.data_len,
2826                                      HAMMER_REDO_TERM_WRITE,
2827                                      NULL,
2828                                      record->leaf.data_len);
2829         }
2830
2831         for (;;) {
2832                 error = hammer_ip_sync_record_cursor(cursor, record);
2833                 if (error != EDEADLK)
2834                         break;
2835                 hammer_done_cursor(cursor);
2836                 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
2837                                            record->ip);
2838                 if (error)
2839                         break;
2840         }
2841         record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
2842
2843         if (error)
2844                 error = -error;
2845 done:
2846         hammer_flush_record_done(record, error);
2847
2848         /*
2849          * Do partial finalization if we have built up too many dirty
2850          * buffers.  Otherwise a buffer cache deadlock can occur when
2851          * doing things like creating tens of thousands of tiny files.
2852          *
2853          * We must release our cursor lock to avoid a 3-way deadlock
2854          * due to the exclusive sync lock the finalizer must get.
2855          *
2856          * WARNING: See warnings in hammer_unlock_cursor() function.
2857          */
2858         if (hammer_flusher_meta_limit(hmp) ||
2859             vm_page_count_severe()) {
2860                 hammer_unlock_cursor(cursor);
2861                 hammer_flusher_finalize(trans, 0);
2862                 hammer_lock_cursor(cursor);
2863         }
2864         return(error);
2865 }
2866
2867 /*
2868  * Backend function called by the flusher to sync an inode to media.
2869  */
2870 int
2871 hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip)
2872 {
2873         struct hammer_cursor cursor;
2874         hammer_node_t tmp_node;
2875         hammer_record_t depend;
2876         hammer_record_t next;
2877         int error, tmp_error;
2878         u_int64_t nlinks;
2879
2880         if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
2881                 return(0);
2882
2883         error = hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
2884         if (error)
2885                 goto done;
2886
2887         /*
2888          * Any directory records referencing this inode which are not in
2889          * our current flush group must adjust our nlink count for the
2890          * purposes of synchronizating to disk.
2891          *
2892          * Records which are in our flush group can be unlinked from our
2893          * inode now, potentially allowing the inode to be physically
2894          * deleted.
2895          *
2896          * This cannot block.
2897          */
2898         nlinks = ip->ino_data.nlinks;
2899         next = TAILQ_FIRST(&ip->target_list);
2900         while ((depend = next) != NULL) {
2901                 next = TAILQ_NEXT(depend, target_entry);
2902                 if (depend->flush_state == HAMMER_FST_FLUSH &&
2903                     depend->flush_group == ip->flush_group) {
2904                         /*
2905                          * If this is an ADD that was deleted by the frontend
2906                          * the frontend nlinks count will have already been
2907                          * decremented, but the backend is going to sync its
2908                          * directory entry and must account for it.  The
2909                          * record will be converted to a delete-on-disk when
2910                          * it gets synced.
2911                          *
2912                          * If the ADD was not deleted by the frontend we
2913                          * can remove the dependancy from our target_list.
2914                          */
2915                         if (depend->flags & HAMMER_RECF_DELETED_FE) {
2916                                 ++nlinks;
2917                         } else {
2918                                 TAILQ_REMOVE(&ip->target_list, depend,
2919                                              target_entry);
2920                                 depend->target_ip = NULL;
2921                         }
2922                 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
2923                         /*
2924                          * Not part of our flush group and not deleted by
2925                          * the front-end, adjust the link count synced to
2926                          * the media (undo what the frontend did when it
2927                          * queued the record).
2928                          */
2929                         KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
2930                         switch(depend->type) {
2931                         case HAMMER_MEM_RECORD_ADD:
2932                                 --nlinks;
2933                                 break;
2934                         case HAMMER_MEM_RECORD_DEL:
2935                                 ++nlinks;
2936                                 break;
2937                         default:
2938                                 break;
2939                         }
2940                 }
2941         }
2942
2943         /*
2944          * Set dirty if we had to modify the link count.
2945          */
2946         if (ip->sync_ino_data.nlinks != nlinks) {
2947                 KKASSERT((int64_t)nlinks >= 0);
2948                 ip->sync_ino_data.nlinks = nlinks;
2949                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2950         }
2951
2952         /*
2953          * If there is a trunction queued destroy any data past the (aligned)
2954          * truncation point.  Userland will have dealt with the buffer
2955          * containing the truncation point for us.
2956          *
2957          * We don't flush pending frontend data buffers until after we've
2958          * dealt with the truncation.
2959          */
2960         if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2961                 /*
2962                  * Interlock trunc_off.  The VOP front-end may continue to
2963                  * make adjustments to it while we are blocked.
2964                  */
2965                 off_t trunc_off;
2966                 off_t aligned_trunc_off;
2967                 int blkmask;
2968
2969                 trunc_off = ip->sync_trunc_off;
2970                 blkmask = hammer_blocksize(trunc_off) - 1;
2971                 aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask;
2972
2973                 /*
2974                  * Delete any whole blocks on-media.  The front-end has
2975                  * already cleaned out any partial block and made it
2976                  * pending.  The front-end may have updated trunc_off
2977                  * while we were blocked so we only use sync_trunc_off.
2978                  *
2979                  * This operation can blow out the buffer cache, EWOULDBLOCK
2980                  * means we were unable to complete the deletion.  The
2981                  * deletion will update sync_trunc_off in that case.
2982                  */
2983                 error = hammer_ip_delete_range(&cursor, ip,
2984                                                 aligned_trunc_off,
2985                                                 0x7FFFFFFFFFFFFFFFLL, 2);
2986                 if (error == EWOULDBLOCK) {
2987                         ip->flags |= HAMMER_INODE_WOULDBLOCK;
2988                         error = 0;
2989                         goto defer_buffer_flush;
2990                 }
2991
2992                 if (error)
2993                         goto done;
2994
2995                 /*
2996                  * Generate a REDO_TERM_TRUNC entry in the UNDO/REDO FIFO.
2997                  *
2998                  * XXX we do this even if we did not previously generate
2999                  * a REDO_TRUNC record.  This operation may enclosed the
3000                  * range for multiple prior truncation entries in the REDO
3001                  * log.
3002                  */
3003                 if (trans->hmp->version >= HAMMER_VOL_VERSION_FOUR &&
3004                     (ip->flags & HAMMER_INODE_RDIRTY)) {
3005                         hammer_generate_redo(trans, ip, aligned_trunc_off,
3006                                              HAMMER_REDO_TERM_TRUNC,
3007                                              NULL, 0);
3008                 }
3009
3010                 /*
3011                  * Clear the truncation flag on the backend after we have
3012                  * completed the deletions.  Backend data is now good again
3013                  * (including new records we are about to sync, below).
3014                  *
3015                  * Leave sync_trunc_off intact.  As we write additional
3016                  * records the backend will update sync_trunc_off.  This
3017                  * tells the backend whether it can skip the overwrite
3018                  * test.  This should work properly even when the backend
3019                  * writes full blocks where the truncation point straddles
3020                  * the block because the comparison is against the base
3021                  * offset of the record.
3022                  */
3023                 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
3024                 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
3025         } else {
3026                 error = 0;
3027         }
3028
3029         /*
3030          * Now sync related records.  These will typically be directory
3031          * entries, records tracking direct-writes, or delete-on-disk records.
3032          */
3033         if (error == 0) {
3034                 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
3035                                     hammer_sync_record_callback, &cursor);
3036                 if (tmp_error < 0)
3037                         tmp_error = -error;
3038                 if (tmp_error)
3039                         error = tmp_error;
3040         }
3041         hammer_cache_node(&ip->cache[1], cursor.node);
3042
3043         /*
3044          * Re-seek for inode update, assuming our cache hasn't been ripped
3045          * out from under us.
3046          */
3047         if (error == 0) {
3048                 tmp_node = hammer_ref_node_safe(trans, &ip->cache[0], &error);
3049                 if (tmp_node) {
3050                         hammer_cursor_downgrade(&cursor);
3051                         hammer_lock_sh(&tmp_node->lock);
3052                         if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0)
3053                                 hammer_cursor_seek(&cursor, tmp_node, 0);
3054                         hammer_unlock(&tmp_node->lock);
3055                         hammer_rel_node(tmp_node);
3056                 }
3057                 error = 0;
3058         }
3059
3060         /*
3061          * If we are deleting the inode the frontend had better not have
3062          * any active references on elements making up the inode.
3063          *
3064          * The call to hammer_ip_delete_clean() cleans up auxillary records
3065          * but not DB or DATA records.  Those must have already been deleted
3066          * by the normal truncation mechanic.
3067          */
3068         if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
3069                 RB_EMPTY(&ip->rec_tree)  &&
3070             (ip->sync_flags & HAMMER_INODE_DELETING) &&
3071             (ip->flags & HAMMER_INODE_DELETED) == 0) {
3072                 int count1 = 0;
3073
3074                 error = hammer_ip_delete_clean(&cursor, ip, &count1);
3075                 if (error == 0) {
3076                         ip->flags |= HAMMER_INODE_DELETED;
3077                         ip->sync_flags &= ~HAMMER_INODE_DELETING;
3078                         ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
3079                         KKASSERT(RB_EMPTY(&ip->rec_tree));
3080
3081                         /*
3082                          * Set delete_tid in both the frontend and backend
3083                          * copy of the inode record.  The DELETED flag handles
3084                          * this, do not set DDIRTY.
3085                          */
3086                         ip->ino_leaf.base.delete_tid = trans->tid;
3087                         ip->sync_ino_leaf.base.delete_tid = trans->tid;
3088                         ip->ino_leaf.delete_ts = trans->time32;
3089                         ip->sync_ino_leaf.delete_ts = trans->time32;
3090
3091
3092                         /*
3093                          * Adjust the inode count in the volume header
3094                          */
3095                         hammer_sync_lock_sh(trans);
3096                         if (ip->flags & HAMMER_INODE_ONDISK) {
3097                                 hammer_modify_volume_field(trans,
3098                                                            trans->rootvol,
3099                                                            vol0_stat_inodes);
3100                                 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
3101                                 hammer_modify_volume_done(trans->rootvol);
3102                         }
3103                         hammer_sync_unlock(trans);
3104                 }
3105         }
3106
3107         if (error)
3108                 goto done;
3109         ip->sync_flags &= ~HAMMER_INODE_BUFS;
3110
3111 defer_buffer_flush:
3112         /*
3113          * Now update the inode's on-disk inode-data and/or on-disk record.
3114          * DELETED and ONDISK are managed only in ip->flags.
3115          *
3116          * In the case of a defered buffer flush we still update the on-disk
3117          * inode to satisfy visibility requirements if there happen to be
3118          * directory dependancies.
3119          */
3120         switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
3121         case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
3122                 /*
3123                  * If deleted and on-disk, don't set any additional flags.
3124                  * the delete flag takes care of things.
3125                  *
3126                  * Clear flags which may have been set by the frontend.
3127                  */
3128                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
3129                                     HAMMER_INODE_SDIRTY |
3130                                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
3131                                     HAMMER_INODE_DELETING);
3132                 break;
3133         case HAMMER_INODE_DELETED:
3134                 /*
3135                  * Take care of the case where a deleted inode was never
3136                  * flushed to the disk in the first place.
3137                  *
3138                  * Clear flags which may have been set by the frontend.
3139                  */
3140                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
3141                                     HAMMER_INODE_SDIRTY |
3142                                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
3143                                     HAMMER_INODE_DELETING);
3144                 while (RB_ROOT(&ip->rec_tree)) {
3145                         hammer_record_t record = RB_ROOT(&ip->rec_tree);
3146                         hammer_ref(&record->lock);
3147                         KKASSERT(hammer_oneref(&record->lock));
3148                         record->flags |= HAMMER_RECF_DELETED_BE;
3149                         ++record->ip->rec_generation;
3150                         hammer_rel_mem_record(record);
3151                 }
3152                 break;
3153         case HAMMER_INODE_ONDISK:
3154                 /*
3155                  * If already on-disk, do not set any additional flags.
3156                  */
3157                 break;
3158         default:
3159                 /*
3160                  * If not on-disk and not deleted, set DDIRTY to force
3161                  * an initial record to be written.
3162                  *
3163                  * Also set the create_tid in both the frontend and backend
3164                  * copy of the inode record.
3165                  */
3166                 ip->ino_leaf.base.create_tid = trans->tid;
3167                 ip->ino_leaf.create_ts = trans->time32;
3168                 ip->sync_ino_leaf.base.create_tid = trans->tid;
3169                 ip->sync_ino_leaf.create_ts = trans->time32;
3170                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
3171                 break;
3172         }
3173
3174         /*
3175          * If DDIRTY or SDIRTY is set, write out a new record.
3176          * If the inode is already on-disk the old record is marked as
3177          * deleted.
3178          *
3179          * If DELETED is set hammer_update_inode() will delete the existing
3180          * record without writing out a new one.
3181          *
3182          * If *ONLY* the ITIMES flag is set we can update the record in-place.
3183          */
3184         if (ip->flags & HAMMER_INODE_DELETED) {
3185                 error = hammer_update_inode(&cursor, ip);
3186         } else 
3187         if (!(ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY)) &&
3188             (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) {
3189                 error = hammer_update_itimes(&cursor, ip);
3190         } else
3191         if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY |
3192                               HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) {
3193                 error = hammer_update_inode(&cursor, ip);
3194         }
3195 done:
3196         if (ip->flags & HAMMER_INODE_MODMASK)
3197                 hammer_inode_dirty(ip);
3198         if (error) {
3199                 hammer_critical_error(ip->hmp, ip, error,
3200                                       "while syncing inode");
3201         }
3202         hammer_done_cursor(&cursor);
3203         return(error);
3204 }
3205
3206 /*
3207  * This routine is called when the OS is no longer actively referencing
3208  * the inode (but might still be keeping it cached), or when releasing
3209  * the last reference to an inode.
3210  *
3211  * At this point if the inode's nlinks count is zero we want to destroy
3212  * it, which may mean destroying it on-media too.
3213  */
3214 void
3215 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
3216 {
3217         struct vnode *vp;
3218
3219         /*
3220          * Set the DELETING flag when the link count drops to 0 and the
3221          * OS no longer has any opens on the inode.
3222          *
3223          * The backend will clear DELETING (a mod flag) and set DELETED
3224          * (a state flag) when it is actually able to perform the
3225          * operation.
3226          *
3227          * Don't reflag the deletion if the flusher is currently syncing
3228          * one that was already flagged.  A previously set DELETING flag
3229          * may bounce around flags and sync_flags until the operation is
3230          * completely done.
3231          *
3232          * Do not attempt to modify a snapshot inode (one set to read-only).
3233          */
3234         if (ip->ino_data.nlinks == 0 &&
3235             ((ip->flags | ip->sync_flags) & (HAMMER_INODE_RO|HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
3236                 ip->flags |= HAMMER_INODE_DELETING;
3237                 ip->flags |= HAMMER_INODE_TRUNCATED;
3238                 ip->trunc_off = 0;
3239                 vp = NULL;
3240                 if (getvp) {
3241                         if (hammer_get_vnode(ip, &vp) != 0)
3242                                 return;
3243                 }
3244
3245                 /*
3246                  * Final cleanup
3247                  */
3248                 if (ip->vp)
3249                         nvtruncbuf(ip->vp, 0, HAMMER_BUFSIZE, 0, 0);
3250                 if (ip->flags & HAMMER_INODE_MODMASK)
3251                         hammer_inode_dirty(ip);
3252                 if (getvp)
3253                         vput(vp);
3254         }
3255 }
3256
3257 /*
3258  * After potentially resolving a dependancy the inode is tested
3259  * to determine whether it needs to be reflushed.
3260  */
3261 void
3262 hammer_test_inode(hammer_inode_t ip)
3263 {
3264         if (ip->flags & HAMMER_INODE_REFLUSH) {
3265                 ip->flags &= ~HAMMER_INODE_REFLUSH;
3266                 hammer_ref(&ip->lock);
3267                 if (ip->flags & HAMMER_INODE_RESIGNAL) {
3268                         ip->flags &= ~HAMMER_INODE_RESIGNAL;
3269                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
3270                 } else {
3271                         hammer_flush_inode(ip, 0);
3272                 }
3273                 hammer_rel_inode(ip, 0);
3274         }
3275 }
3276
3277 /*
3278  * Clear the RECLAIM flag on an inode.  This occurs when the inode is
3279  * reassociated with a vp or just before it gets freed.
3280  *
3281  * Pipeline wakeups to threads blocked due to an excessive number of
3282  * detached inodes.  This typically occurs when atime updates accumulate
3283  * while scanning a directory tree.
3284  */
3285 static void
3286 hammer_inode_wakereclaims(hammer_inode_t ip)
3287 {
3288         struct hammer_reclaim *reclaim;
3289         hammer_mount_t hmp = ip->hmp;
3290
3291         if ((ip->flags & HAMMER_INODE_RECLAIM) == 0)
3292                 return;
3293
3294         --hammer_count_reclaims;
3295         --hmp->count_reclaims;
3296         ip->flags &= ~HAMMER_INODE_RECLAIM;
3297
3298         if ((reclaim = TAILQ_FIRST(&hmp->reclaim_list)) != NULL) {
3299                 KKASSERT(reclaim->count > 0);
3300                 if (--reclaim->count == 0) {
3301                         TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry);
3302                         wakeup(reclaim);
3303                 }
3304         }
3305 }
3306
3307 /*
3308  * Setup our reclaim pipeline.  We only let so many detached (and dirty)
3309  * inodes build up before we start blocking.  This routine is called
3310  * if a new inode is created or an inode is loaded from media.
3311  *
3312  * When we block we don't care *which* inode has finished reclaiming,
3313  * as long as one does.
3314  *
3315  * The reclaim pipeline is primarily governed by the auto-flush which is
3316  * 1/4 hammer_limit_reclaims.  We don't want to block if the count is
3317  * less than 1/2 hammer_limit_reclaims.  From 1/2 to full count is
3318  * dynamically governed.
3319  */
3320 void
3321 hammer_inode_waitreclaims(hammer_transaction_t trans)
3322 {
3323         hammer_mount_t hmp = trans->hmp;
3324         struct hammer_reclaim reclaim;
3325         int lower_limit;
3326
3327         /*
3328          * Track inode load, delay if the number of reclaiming inodes is
3329          * between 2/4 and 4/4 hammer_limit_reclaims, depending.
3330          */
3331         if (curthread->td_proc) {
3332                 struct hammer_inostats *stats;
3333
3334                 stats = hammer_inode_inostats(hmp, curthread->td_proc->p_pid);
3335                 ++stats->count;
3336
3337                 if (stats->count > hammer_limit_reclaims / 2)
3338                         stats->count = hammer_limit_reclaims / 2;
3339                 lower_limit = hammer_limit_reclaims - stats->count;
3340                 if (hammer_debug_general & 0x10000) {
3341                         kprintf("pid %5d limit %d\n",
3342                                 (int)curthread->td_proc->p_pid, lower_limit);
3343                 }
3344         } else {
3345                 lower_limit = hammer_limit_reclaims * 3 / 4;
3346         }
3347         if (hmp->count_reclaims >= lower_limit) {
3348                 reclaim.count = 1;
3349                 TAILQ_INSERT_TAIL(&hmp->reclaim_list, &reclaim, entry);
3350                 tsleep(&reclaim, 0, "hmrrcm", hz);
3351                 if (reclaim.count > 0)
3352                         TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry);
3353         }
3354 }
3355
3356 /*
3357  * Keep track of reclaim statistics on a per-pid basis using a loose
3358  * 4-way set associative hash table.  Collisions inherit the count of
3359  * the previous entry.
3360  *
3361  * NOTE: We want to be careful here to limit the chain size.  If the chain
3362  *       size is too large a pid will spread its stats out over too many
3363  *       entries under certain types of heavy filesystem activity and
3364  *       wind up not delaying long enough.
3365  */
3366 static
3367 struct hammer_inostats *
3368 hammer_inode_inostats(hammer_mount_t hmp, pid_t pid)
3369 {
3370         struct hammer_inostats *stats;
3371         int delta;
3372         int chain;
3373         static volatile int iterator;   /* we don't care about MP races */
3374
3375         /*
3376          * Chain up to 4 times to find our entry.
3377          */
3378         for (chain = 0; chain < 4; ++chain) {
3379                 stats = &hmp->inostats[(pid + chain) & HAMMER_INOSTATS_HMASK];
3380                 if (stats->pid == pid)
3381                         break;
3382         }
3383
3384         /*
3385          * Replace one of the four chaining entries with our new entry.
3386          */
3387         if (chain == 4) {
3388                 stats = &hmp->inostats[(pid + (iterator++ & 3)) &
3389                                        HAMMER_INOSTATS_HMASK];
3390                 stats->pid = pid;
3391         }
3392
3393         /*
3394          * Decay the entry
3395          */
3396         if (stats->count && stats->ltick != ticks) {
3397                 delta = ticks - stats->ltick;
3398                 stats->ltick = ticks;
3399                 if (delta <= 0 || delta > hz * 60)
3400                         stats->count = 0;
3401                 else
3402                         stats->count = stats->count * hz / (hz + delta);
3403         }
3404         if (hammer_debug_general & 0x10000)
3405                 kprintf("pid %5d stats %d\n", (int)pid, stats->count);
3406         return (stats);
3407 }
3408
3409 #if 0
3410
3411 /*
3412  * XXX not used, doesn't work very well due to the large batching nature
3413  * of flushes.
3414  *
3415  * A larger then normal backlog of inodes is sitting in the flusher,
3416  * enforce a general slowdown to let it catch up.  This routine is only
3417  * called on completion of a non-flusher-related transaction which
3418  * performed B-Tree node I/O.
3419  *
3420  * It is possible for the flusher to stall in a continuous load.
3421  * blogbench -i1000 -o seems to do a good job generating this sort of load.
3422  * If the flusher is unable to catch up the inode count can bloat until
3423  * we run out of kvm.
3424  *
3425  * This is a bit of a hack.
3426  */
3427 void
3428 hammer_inode_waithard(hammer_mount_t hmp)
3429 {
3430         /*
3431          * Hysteresis.
3432          */
3433         if (hmp->flags & HAMMER_MOUNT_FLUSH_RECOVERY) {
3434                 if (hmp->count_reclaims < hammer_limit_reclaims / 2 &&
3435                     hmp->count_iqueued < hmp->count_inodes / 20) {
3436                         hmp->flags &= ~HAMMER_MOUNT_FLUSH_RECOVERY;
3437                         return;
3438                 }
3439         } else {
3440                 if (hmp->count_reclaims < hammer_limit_reclaims ||
3441                     hmp->count_iqueued < hmp->count_inodes / 10) {
3442                         return;
3443                 }
3444                 hmp->flags |= HAMMER_MOUNT_FLUSH_RECOVERY;
3445         }
3446
3447         /*
3448          * Block for one flush cycle.
3449          */
3450         hammer_flusher_wait_next(hmp);
3451 }
3452
3453 #endif