ktr/x86_64: Fix caller chain support
[dragonfly.git] / sys / vfs / hammer / hammer_inode.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34
35 #include "hammer.h"
36 #include <vm/vm_extern.h>
37
38 static int      hammer_unload_inode(struct hammer_inode *ip);
39 static void     hammer_free_inode(hammer_inode_t ip);
40 static void     hammer_flush_inode_core(hammer_inode_t ip,
41                                         hammer_flush_group_t flg, int flags);
42 static int      hammer_setup_child_callback(hammer_record_t rec, void *data);
43 #if 0
44 static int      hammer_syncgrp_child_callback(hammer_record_t rec, void *data);
45 #endif
46 static int      hammer_setup_parent_inodes(hammer_inode_t ip, int depth,
47                                         hammer_flush_group_t flg);
48 static int      hammer_setup_parent_inodes_helper(hammer_record_t record,
49                                         int depth, hammer_flush_group_t flg);
50 static void     hammer_inode_wakereclaims(hammer_inode_t ip);
51 static struct hammer_inostats *hammer_inode_inostats(hammer_mount_t hmp,
52                                         pid_t pid);
53
54 #ifdef DEBUG_TRUNCATE
55 extern struct hammer_inode *HammerTruncIp;
56 #endif
57
58 struct krate hammer_gen_krate = { 1 };
59
60 /*
61  * RB-Tree support for inode structures
62  */
63 int
64 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
65 {
66         if (ip1->obj_localization < ip2->obj_localization)
67                 return(-1);
68         if (ip1->obj_localization > ip2->obj_localization)
69                 return(1);
70         if (ip1->obj_id < ip2->obj_id)
71                 return(-1);
72         if (ip1->obj_id > ip2->obj_id)
73                 return(1);
74         if (ip1->obj_asof < ip2->obj_asof)
75                 return(-1);
76         if (ip1->obj_asof > ip2->obj_asof)
77                 return(1);
78         return(0);
79 }
80
81 int
82 hammer_redo_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
83 {
84         if (ip1->redo_fifo_start < ip2->redo_fifo_start)
85                 return(-1);
86         if (ip1->redo_fifo_start > ip2->redo_fifo_start)
87                 return(1);
88         return(0);
89 }
90
91 /*
92  * RB-Tree support for inode structures / special LOOKUP_INFO
93  */
94 static int
95 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
96 {
97         if (info->obj_localization < ip->obj_localization)
98                 return(-1);
99         if (info->obj_localization > ip->obj_localization)
100                 return(1);
101         if (info->obj_id < ip->obj_id)
102                 return(-1);
103         if (info->obj_id > ip->obj_id)
104                 return(1);
105         if (info->obj_asof < ip->obj_asof)
106                 return(-1);
107         if (info->obj_asof > ip->obj_asof)
108                 return(1);
109         return(0);
110 }
111
112 /*
113  * Used by hammer_scan_inode_snapshots() to locate all of an object's
114  * snapshots.  Note that the asof field is not tested, which we can get
115  * away with because it is the lowest-priority field.
116  */
117 static int
118 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data)
119 {
120         hammer_inode_info_t info = data;
121
122         if (ip->obj_localization > info->obj_localization)
123                 return(1);
124         if (ip->obj_localization < info->obj_localization)
125                 return(-1);
126         if (ip->obj_id > info->obj_id)
127                 return(1);
128         if (ip->obj_id < info->obj_id)
129                 return(-1);
130         return(0);
131 }
132
133 /*
134  * Used by hammer_unload_pseudofs() to locate all inodes associated with
135  * a particular PFS.
136  */
137 static int
138 hammer_inode_pfs_cmp(hammer_inode_t ip, void *data)
139 {
140         u_int32_t localization = *(u_int32_t *)data;
141         if (ip->obj_localization > localization)
142                 return(1);
143         if (ip->obj_localization < localization)
144                 return(-1);
145         return(0);
146 }
147
148 /*
149  * RB-Tree support for pseudofs structures
150  */
151 static int
152 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1, hammer_pseudofs_inmem_t p2)
153 {
154         if (p1->localization < p2->localization)
155                 return(-1);
156         if (p1->localization > p2->localization)
157                 return(1);
158         return(0);
159 }
160
161
162 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
163 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
164                 hammer_inode_info_cmp, hammer_inode_info_t);
165 RB_GENERATE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node,
166              hammer_pfs_rb_compare, u_int32_t, localization);
167
168 /*
169  * The kernel is not actively referencing this vnode but is still holding
170  * it cached.
171  *
172  * This is called from the frontend.
173  *
174  * MPALMOSTSAFE
175  */
176 int
177 hammer_vop_inactive(struct vop_inactive_args *ap)
178 {
179         struct hammer_inode *ip = VTOI(ap->a_vp);
180         hammer_mount_t hmp;
181
182         /*
183          * Degenerate case
184          */
185         if (ip == NULL) {
186                 vrecycle(ap->a_vp);
187                 return(0);
188         }
189
190         /*
191          * If the inode no longer has visibility in the filesystem try to
192          * recycle it immediately, even if the inode is dirty.  Recycling
193          * it quickly allows the system to reclaim buffer cache and VM
194          * resources which can matter a lot in a heavily loaded system.
195          *
196          * This can deadlock in vfsync() if we aren't careful.
197          * 
198          * Do not queue the inode to the flusher if we still have visibility,
199          * otherwise namespace calls such as chmod will unnecessarily generate
200          * multiple inode updates.
201          */
202         if (ip->ino_data.nlinks == 0) {
203                 hmp = ip->hmp;
204                 lwkt_gettoken(&hmp->fs_token);
205                 hammer_inode_unloadable_check(ip, 0);
206                 if (ip->flags & HAMMER_INODE_MODMASK)
207                         hammer_flush_inode(ip, 0);
208                 lwkt_reltoken(&hmp->fs_token);
209                 vrecycle(ap->a_vp);
210         }
211         return(0);
212 }
213
214 /*
215  * Release the vnode association.  This is typically (but not always)
216  * the last reference on the inode.
217  *
218  * Once the association is lost we are on our own with regards to
219  * flushing the inode.
220  *
221  * We must interlock ip->vp so hammer_get_vnode() can avoid races.
222  */
223 int
224 hammer_vop_reclaim(struct vop_reclaim_args *ap)
225 {
226         struct hammer_inode *ip;
227         hammer_mount_t hmp;
228         struct vnode *vp;
229
230         vp = ap->a_vp;
231
232         if ((ip = vp->v_data) != NULL) {
233                 hmp = ip->hmp;
234                 lwkt_gettoken(&hmp->fs_token);
235                 hammer_lock_ex(&ip->lock);
236                 vp->v_data = NULL;
237                 ip->vp = NULL;
238
239                 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) {
240                         ++hammer_count_reclaims;
241                         ++hmp->count_reclaims;
242                         ip->flags |= HAMMER_INODE_RECLAIM;
243                 }
244                 hammer_unlock(&ip->lock);
245                 vclrisdirty(vp);
246                 hammer_rel_inode(ip, 1);
247                 lwkt_reltoken(&hmp->fs_token);
248         }
249         return(0);
250 }
251
252 /*
253  * Inform the kernel that the inode is dirty.  This will be checked
254  * by vn_unlock().
255  *
256  * Theoretically in order to reclaim a vnode the hammer_vop_reclaim()
257  * must be called which will interlock against our inode lock, so
258  * if VRECLAIMED is not set vp->v_mount (as used by vsetisdirty())
259  * should be stable without having to acquire any new locks.
260  */
261 void
262 hammer_inode_dirty(struct hammer_inode *ip)
263 {
264         struct vnode *vp;
265
266         if ((ip->flags & HAMMER_INODE_MODMASK) &&
267             (vp = ip->vp) != NULL &&
268             (vp->v_flag & (VRECLAIMED | VISDIRTY)) == 0) {
269                 vsetisdirty(vp);
270         }
271 }
272
273 /*
274  * Return a locked vnode for the specified inode.  The inode must be
275  * referenced but NOT LOCKED on entry and will remain referenced on
276  * return.
277  *
278  * Called from the frontend.
279  */
280 int
281 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
282 {
283         hammer_mount_t hmp;
284         struct vnode *vp;
285         int error = 0;
286         u_int8_t obj_type;
287
288         hmp = ip->hmp;
289
290         for (;;) {
291                 if ((vp = ip->vp) == NULL) {
292                         error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0);
293                         if (error)
294                                 break;
295                         hammer_lock_ex(&ip->lock);
296                         if (ip->vp != NULL) {
297                                 hammer_unlock(&ip->lock);
298                                 vp = *vpp;
299                                 vp->v_type = VBAD;
300                                 vx_put(vp);
301                                 continue;
302                         }
303                         hammer_ref(&ip->lock);
304                         vp = *vpp;
305                         ip->vp = vp;
306
307                         obj_type = ip->ino_data.obj_type;
308                         vp->v_type = hammer_get_vnode_type(obj_type);
309
310                         hammer_inode_wakereclaims(ip);
311
312                         switch(ip->ino_data.obj_type) {
313                         case HAMMER_OBJTYPE_CDEV:
314                         case HAMMER_OBJTYPE_BDEV:
315                                 vp->v_ops = &hmp->mp->mnt_vn_spec_ops;
316                                 addaliasu(vp, ip->ino_data.rmajor,
317                                           ip->ino_data.rminor);
318                                 break;
319                         case HAMMER_OBJTYPE_FIFO:
320                                 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops;
321                                 break;
322                         case HAMMER_OBJTYPE_REGFILE:
323                                 break;
324                         default:
325                                 break;
326                         }
327
328                         /*
329                          * Only mark as the root vnode if the ip is not
330                          * historical, otherwise the VFS cache will get
331                          * confused.  The other half of the special handling
332                          * is in hammer_vop_nlookupdotdot().
333                          *
334                          * Pseudo-filesystem roots can be accessed via
335                          * non-root filesystem paths and setting VROOT may
336                          * confuse the namecache.  Set VPFSROOT instead.
337                          */
338                         if (ip->obj_id == HAMMER_OBJID_ROOT) {
339                                 if (ip->obj_asof == hmp->asof) {
340                                         if (ip->obj_localization == 0)
341                                                 vsetflags(vp, VROOT);
342                                         else
343                                                 vsetflags(vp, VPFSROOT);
344                                 } else {
345                                         vsetflags(vp, VPFSROOT);
346                                 }
347                         }
348
349                         vp->v_data = (void *)ip;
350                         /* vnode locked by getnewvnode() */
351                         /* make related vnode dirty if inode dirty? */
352                         hammer_unlock(&ip->lock);
353                         if (vp->v_type == VREG) {
354                                 vinitvmio(vp, ip->ino_data.size,
355                                           hammer_blocksize(ip->ino_data.size),
356                                           hammer_blockoff(ip->ino_data.size));
357                         }
358                         break;
359                 }
360
361                 /*
362                  * Interlock vnode clearing.  This does not prevent the
363                  * vnode from going into a reclaimed state but it does
364                  * prevent it from being destroyed or reused so the vget()
365                  * will properly fail.
366                  */
367                 hammer_lock_ex(&ip->lock);
368                 if ((vp = ip->vp) == NULL) {
369                         hammer_unlock(&ip->lock);
370                         continue;
371                 }
372                 vhold(vp);
373                 hammer_unlock(&ip->lock);
374
375                 /*
376                  * loop if the vget fails (aka races), or if the vp
377                  * no longer matches ip->vp.
378                  */
379                 if (vget(vp, LK_EXCLUSIVE) == 0) {
380                         if (vp == ip->vp) {
381                                 vdrop(vp);
382                                 break;
383                         }
384                         vput(vp);
385                 }
386                 vdrop(vp);
387         }
388         *vpp = vp;
389         return(error);
390 }
391
392 /*
393  * Locate all copies of the inode for obj_id compatible with the specified
394  * asof, reference, and issue the related call-back.  This routine is used
395  * for direct-io invalidation and does not create any new inodes.
396  */
397 void
398 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo,
399                             int (*callback)(hammer_inode_t ip, void *data),
400                             void *data)
401 {
402         hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root,
403                                    hammer_inode_info_cmp_all_history,
404                                    callback, iinfo);
405 }
406
407 /*
408  * Acquire a HAMMER inode.  The returned inode is not locked.  These functions
409  * do not attach or detach the related vnode (use hammer_get_vnode() for
410  * that).
411  *
412  * The flags argument is only applied for newly created inodes, and only
413  * certain flags are inherited.
414  *
415  * Called from the frontend.
416  */
417 struct hammer_inode *
418 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip,
419                  int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
420                  int flags, int *errorp)
421 {
422         hammer_mount_t hmp = trans->hmp;
423         struct hammer_node_cache *cachep;
424         struct hammer_inode_info iinfo;
425         struct hammer_cursor cursor;
426         struct hammer_inode *ip;
427
428
429         /*
430          * Determine if we already have an inode cached.  If we do then
431          * we are golden.
432          *
433          * If we find an inode with no vnode we have to mark the
434          * transaction such that hammer_inode_waitreclaims() is
435          * called later on to avoid building up an infinite number
436          * of inodes.  Otherwise we can continue to * add new inodes
437          * faster then they can be disposed of, even with the tsleep
438          * delay.
439          *
440          * If we find a dummy inode we return a failure so dounlink
441          * (which does another lookup) doesn't try to mess with the
442          * link count.  hammer_vop_nresolve() uses hammer_get_dummy_inode()
443          * to ref dummy inodes.
444          */
445         iinfo.obj_id = obj_id;
446         iinfo.obj_asof = asof;
447         iinfo.obj_localization = localization;
448 loop:
449         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
450         if (ip) {
451                 if (ip->flags & HAMMER_INODE_DUMMY) {
452                         *errorp = ENOENT;
453                         return(NULL);
454                 }
455                 hammer_ref(&ip->lock);
456                 *errorp = 0;
457                 return(ip);
458         }
459
460         /*
461          * Allocate a new inode structure and deal with races later.
462          */
463         ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
464         ++hammer_count_inodes;
465         ++hmp->count_inodes;
466         ip->obj_id = obj_id;
467         ip->obj_asof = iinfo.obj_asof;
468         ip->obj_localization = localization;
469         ip->hmp = hmp;
470         ip->flags = flags & HAMMER_INODE_RO;
471         ip->cache[0].ip = ip;
472         ip->cache[1].ip = ip;
473         ip->cache[2].ip = ip;
474         ip->cache[3].ip = ip;
475         if (hmp->ronly)
476                 ip->flags |= HAMMER_INODE_RO;
477         ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
478                 0x7FFFFFFFFFFFFFFFLL;
479         RB_INIT(&ip->rec_tree);
480         TAILQ_INIT(&ip->target_list);
481         hammer_ref(&ip->lock);
482
483         /*
484          * Locate the on-disk inode.  If this is a PFS root we always
485          * access the current version of the root inode and (if it is not
486          * a master) always access information under it with a snapshot
487          * TID.
488          *
489          * We cache recent inode lookups in this directory in dip->cache[2].
490          * If we can't find it we assume the inode we are looking for is
491          * close to the directory inode.
492          */
493 retry:
494         cachep = NULL;
495         if (dip) {
496                 if (dip->cache[2].node)
497                         cachep = &dip->cache[2];
498                 else
499                         cachep = &dip->cache[0];
500         }
501         hammer_init_cursor(trans, &cursor, cachep, NULL);
502         cursor.key_beg.localization = localization + HAMMER_LOCALIZE_INODE;
503         cursor.key_beg.obj_id = ip->obj_id;
504         cursor.key_beg.key = 0;
505         cursor.key_beg.create_tid = 0;
506         cursor.key_beg.delete_tid = 0;
507         cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
508         cursor.key_beg.obj_type = 0;
509
510         cursor.asof = iinfo.obj_asof;
511         cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
512                        HAMMER_CURSOR_ASOF;
513
514         *errorp = hammer_btree_lookup(&cursor);
515         if (*errorp == EDEADLK) {
516                 hammer_done_cursor(&cursor);
517                 goto retry;
518         }
519
520         /*
521          * On success the B-Tree lookup will hold the appropriate
522          * buffer cache buffers and provide a pointer to the requested
523          * information.  Copy the information to the in-memory inode
524          * and cache the B-Tree node to improve future operations.
525          */
526         if (*errorp == 0) {
527                 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
528                 ip->ino_data = cursor.data->inode;
529
530                 /*
531                  * cache[0] tries to cache the location of the object inode.
532                  * The assumption is that it is near the directory inode.
533                  *
534                  * cache[1] tries to cache the location of the object data.
535                  * We might have something in the governing directory from
536                  * scan optimizations (see the strategy code in
537                  * hammer_vnops.c).
538                  *
539                  * We update dip->cache[2], if possible, with the location
540                  * of the object inode for future directory shortcuts.
541                  */
542                 hammer_cache_node(&ip->cache[0], cursor.node);
543                 if (dip) {
544                         if (dip->cache[3].node) {
545                                 hammer_cache_node(&ip->cache[1],
546                                                   dip->cache[3].node);
547                         }
548                         hammer_cache_node(&dip->cache[2], cursor.node);
549                 }
550
551                 /*
552                  * The file should not contain any data past the file size
553                  * stored in the inode.  Setting save_trunc_off to the
554                  * file size instead of max reduces B-Tree lookup overheads
555                  * on append by allowing the flusher to avoid checking for
556                  * record overwrites.
557                  */
558                 ip->save_trunc_off = ip->ino_data.size;
559
560                 /*
561                  * Locate and assign the pseudofs management structure to
562                  * the inode.
563                  */
564                 if (dip && dip->obj_localization == ip->obj_localization) {
565                         ip->pfsm = dip->pfsm;
566                         hammer_ref(&ip->pfsm->lock);
567                 } else {
568                         ip->pfsm = hammer_load_pseudofs(trans,
569                                                         ip->obj_localization,
570                                                         errorp);
571                         *errorp = 0;    /* ignore ENOENT */
572                 }
573         }
574
575         /*
576          * The inode is placed on the red-black tree and will be synced to
577          * the media when flushed or by the filesystem sync.  If this races
578          * another instantiation/lookup the insertion will fail.
579          */
580         if (*errorp == 0) {
581                 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
582                         hammer_free_inode(ip);
583                         hammer_done_cursor(&cursor);
584                         goto loop;
585                 }
586                 ip->flags |= HAMMER_INODE_ONDISK;
587         } else {
588                 if (ip->flags & HAMMER_INODE_RSV_INODES) {
589                         ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
590                         --hmp->rsv_inodes;
591                 }
592
593                 hammer_free_inode(ip);
594                 ip = NULL;
595         }
596         hammer_done_cursor(&cursor);
597
598         /*
599          * NEWINODE is only set if the inode becomes dirty later,
600          * setting it here just leads to unnecessary stalls.
601          *
602          * trans->flags |= HAMMER_TRANSF_NEWINODE;
603          */
604         return (ip);
605 }
606
607 /*
608  * Get a dummy inode to placemark a broken directory entry.
609  */
610 struct hammer_inode *
611 hammer_get_dummy_inode(hammer_transaction_t trans, hammer_inode_t dip,
612                  int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
613                  int flags, int *errorp)
614 {
615         hammer_mount_t hmp = trans->hmp;
616         struct hammer_inode_info iinfo;
617         struct hammer_inode *ip;
618
619         /*
620          * Determine if we already have an inode cached.  If we do then
621          * we are golden.
622          *
623          * If we find an inode with no vnode we have to mark the
624          * transaction such that hammer_inode_waitreclaims() is
625          * called later on to avoid building up an infinite number
626          * of inodes.  Otherwise we can continue to * add new inodes
627          * faster then they can be disposed of, even with the tsleep
628          * delay.
629          *
630          * If we find a non-fake inode we return an error.  Only fake
631          * inodes can be returned by this routine.
632          */
633         iinfo.obj_id = obj_id;
634         iinfo.obj_asof = asof;
635         iinfo.obj_localization = localization;
636 loop:
637         *errorp = 0;
638         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
639         if (ip) {
640                 if ((ip->flags & HAMMER_INODE_DUMMY) == 0) {
641                         *errorp = ENOENT;
642                         return(NULL);
643                 }
644                 hammer_ref(&ip->lock);
645                 return(ip);
646         }
647
648         /*
649          * Allocate a new inode structure and deal with races later.
650          */
651         ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
652         ++hammer_count_inodes;
653         ++hmp->count_inodes;
654         ip->obj_id = obj_id;
655         ip->obj_asof = iinfo.obj_asof;
656         ip->obj_localization = localization;
657         ip->hmp = hmp;
658         ip->flags = flags | HAMMER_INODE_RO | HAMMER_INODE_DUMMY;
659         ip->cache[0].ip = ip;
660         ip->cache[1].ip = ip;
661         ip->cache[2].ip = ip;
662         ip->cache[3].ip = ip;
663         ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
664                 0x7FFFFFFFFFFFFFFFLL;
665         RB_INIT(&ip->rec_tree);
666         TAILQ_INIT(&ip->target_list);
667         hammer_ref(&ip->lock);
668
669         /*
670          * Populate the dummy inode.  Leave everything zero'd out.
671          *
672          * (ip->ino_leaf and ip->ino_data)
673          *
674          * Make the dummy inode a FIFO object which most copy programs
675          * will properly ignore.
676          */
677         ip->save_trunc_off = ip->ino_data.size;
678         ip->ino_data.obj_type = HAMMER_OBJTYPE_FIFO;
679
680         /*
681          * Locate and assign the pseudofs management structure to
682          * the inode.
683          */
684         if (dip && dip->obj_localization == ip->obj_localization) {
685                 ip->pfsm = dip->pfsm;
686                 hammer_ref(&ip->pfsm->lock);
687         } else {
688                 ip->pfsm = hammer_load_pseudofs(trans, ip->obj_localization,
689                                                 errorp);
690                 *errorp = 0;    /* ignore ENOENT */
691         }
692
693         /*
694          * The inode is placed on the red-black tree and will be synced to
695          * the media when flushed or by the filesystem sync.  If this races
696          * another instantiation/lookup the insertion will fail.
697          *
698          * NOTE: Do not set HAMMER_INODE_ONDISK.  The inode is a fake.
699          */
700         if (*errorp == 0) {
701                 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
702                         hammer_free_inode(ip);
703                         goto loop;
704                 }
705         } else {
706                 if (ip->flags & HAMMER_INODE_RSV_INODES) {
707                         ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
708                         --hmp->rsv_inodes;
709                 }
710                 hammer_free_inode(ip);
711                 ip = NULL;
712         }
713         trans->flags |= HAMMER_TRANSF_NEWINODE;
714         return (ip);
715 }
716
717 /*
718  * Return a referenced inode only if it is in our inode cache.
719  *
720  * Dummy inodes do not count.
721  */
722 struct hammer_inode *
723 hammer_find_inode(hammer_transaction_t trans, int64_t obj_id,
724                   hammer_tid_t asof, u_int32_t localization)
725 {
726         hammer_mount_t hmp = trans->hmp;
727         struct hammer_inode_info iinfo;
728         struct hammer_inode *ip;
729
730         iinfo.obj_id = obj_id;
731         iinfo.obj_asof = asof;
732         iinfo.obj_localization = localization;
733
734         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
735         if (ip) {
736                 if (ip->flags & HAMMER_INODE_DUMMY)
737                         ip = NULL;
738                 else
739                         hammer_ref(&ip->lock);
740         }
741         return(ip);
742 }
743
744 /*
745  * Create a new filesystem object, returning the inode in *ipp.  The
746  * returned inode will be referenced.  The inode is created in-memory.
747  *
748  * If pfsm is non-NULL the caller wishes to create the root inode for
749  * a master PFS.
750  */
751 int
752 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
753                     struct ucred *cred,
754                     hammer_inode_t dip, const char *name, int namelen,
755                     hammer_pseudofs_inmem_t pfsm, struct hammer_inode **ipp)
756 {
757         hammer_mount_t hmp;
758         hammer_inode_t ip;
759         uid_t xuid;
760         int error;
761         int64_t namekey;
762         u_int32_t dummy;
763
764         hmp = trans->hmp;
765
766         /*
767          * Disallow the creation of new inodes in directories which
768          * have been deleted.  In HAMMER, this will cause a record
769          * syncing assertion later on in the flush code.
770          */
771         if (dip && dip->ino_data.nlinks == 0) {
772                 *ipp = NULL;
773                 return (EINVAL);
774         }
775
776         /*
777          * Allocate inode
778          */
779         ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
780         ++hammer_count_inodes;
781         ++hmp->count_inodes;
782         trans->flags |= HAMMER_TRANSF_NEWINODE;
783
784         if (pfsm) {
785                 KKASSERT(pfsm->localization != 0);
786                 ip->obj_id = HAMMER_OBJID_ROOT;
787                 ip->obj_localization = pfsm->localization;
788         } else {
789                 KKASSERT(dip != NULL);
790                 namekey = hammer_directory_namekey(dip, name, namelen, &dummy);
791                 ip->obj_id = hammer_alloc_objid(hmp, dip, namekey);
792                 ip->obj_localization = dip->obj_localization;
793         }
794
795         KKASSERT(ip->obj_id != 0);
796         ip->obj_asof = hmp->asof;
797         ip->hmp = hmp;
798         ip->flush_state = HAMMER_FST_IDLE;
799         ip->flags = HAMMER_INODE_DDIRTY |
800                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME;
801         ip->cache[0].ip = ip;
802         ip->cache[1].ip = ip;
803         ip->cache[2].ip = ip;
804         ip->cache[3].ip = ip;
805
806         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
807         /* ip->save_trunc_off = 0; (already zero) */
808         RB_INIT(&ip->rec_tree);
809         TAILQ_INIT(&ip->target_list);
810
811         ip->ino_data.atime = trans->time;
812         ip->ino_data.mtime = trans->time;
813         ip->ino_data.size = 0;
814         ip->ino_data.nlinks = 0;
815
816         /*
817          * A nohistory designator on the parent directory is inherited by
818          * the child.  We will do this even for pseudo-fs creation... the
819          * sysad can turn it off.
820          */
821         if (dip) {
822                 ip->ino_data.uflags = dip->ino_data.uflags &
823                                       (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
824         }
825
826         ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
827         ip->ino_leaf.base.localization = ip->obj_localization +
828                                          HAMMER_LOCALIZE_INODE;
829         ip->ino_leaf.base.obj_id = ip->obj_id;
830         ip->ino_leaf.base.key = 0;
831         ip->ino_leaf.base.create_tid = 0;
832         ip->ino_leaf.base.delete_tid = 0;
833         ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
834         ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
835
836         ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
837         ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
838         ip->ino_data.mode = vap->va_mode;
839         ip->ino_data.ctime = trans->time;
840
841         /*
842          * If we are running version 2 or greater directory entries are
843          * inode-localized instead of data-localized.
844          */
845         if (trans->hmp->version >= HAMMER_VOL_VERSION_TWO) {
846                 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
847                         ip->ino_data.cap_flags |=
848                                 HAMMER_INODE_CAP_DIR_LOCAL_INO;
849                 }
850         }
851         if (trans->hmp->version >= HAMMER_VOL_VERSION_SIX) {
852                 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
853                         ip->ino_data.cap_flags |=
854                                 HAMMER_INODE_CAP_DIRHASH_ALG1;
855                 }
856         }
857
858         /*
859          * Setup the ".." pointer.  This only needs to be done for directories
860          * but we do it for all objects as a recovery aid if dip exists.
861          * The inode is probably a PFS root if dip is NULL.
862          */
863         if (dip)
864                 ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id;
865 #if 0
866         /*
867          * The parent_obj_localization field only applies to pseudo-fs roots.
868          * XXX this is no longer applicable, PFSs are no longer directly
869          * tied into the parent's directory structure.
870          */
871         if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY &&
872             ip->obj_id == HAMMER_OBJID_ROOT) {
873                 ip->ino_data.ext.obj.parent_obj_localization = 
874                                                 dip->obj_localization;
875         }
876 #endif
877
878         switch(ip->ino_leaf.base.obj_type) {
879         case HAMMER_OBJTYPE_CDEV:
880         case HAMMER_OBJTYPE_BDEV:
881                 ip->ino_data.rmajor = vap->va_rmajor;
882                 ip->ino_data.rminor = vap->va_rminor;
883                 break;
884         default:
885                 break;
886         }
887
888         /*
889          * Calculate default uid/gid and overwrite with information from
890          * the vap.
891          */
892         if (dip) {
893                 xuid = hammer_to_unix_xid(&dip->ino_data.uid);
894                 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode,
895                                              xuid, cred, &vap->va_mode);
896         } else {
897                 xuid = 0;
898         }
899         ip->ino_data.mode = vap->va_mode;
900
901         if (vap->va_vaflags & VA_UID_UUID_VALID)
902                 ip->ino_data.uid = vap->va_uid_uuid;
903         else if (vap->va_uid != (uid_t)VNOVAL)
904                 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
905         else
906                 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
907
908         if (vap->va_vaflags & VA_GID_UUID_VALID)
909                 ip->ino_data.gid = vap->va_gid_uuid;
910         else if (vap->va_gid != (gid_t)VNOVAL)
911                 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
912         else if (dip)
913                 ip->ino_data.gid = dip->ino_data.gid;
914
915         hammer_ref(&ip->lock);
916
917         if (pfsm) {
918                 ip->pfsm = pfsm;
919                 hammer_ref(&pfsm->lock);
920                 error = 0;
921         } else if (dip->obj_localization == ip->obj_localization) {
922                 ip->pfsm = dip->pfsm;
923                 hammer_ref(&ip->pfsm->lock);
924                 error = 0;
925         } else {
926                 ip->pfsm = hammer_load_pseudofs(trans,
927                                                 ip->obj_localization,
928                                                 &error);
929                 error = 0;      /* ignore ENOENT */
930         }
931
932         if (error) {
933                 hammer_free_inode(ip);
934                 ip = NULL;
935         } else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
936                 panic("hammer_create_inode: duplicate obj_id %llx",
937                       (long long)ip->obj_id);
938                 /* not reached */
939                 hammer_free_inode(ip);
940         }
941         *ipp = ip;
942         return(error);
943 }
944
945 /*
946  * Final cleanup / freeing of an inode structure
947  */
948 static void
949 hammer_free_inode(hammer_inode_t ip)
950 {
951         struct hammer_mount *hmp;
952
953         hmp = ip->hmp;
954         KKASSERT(hammer_oneref(&ip->lock));
955         hammer_uncache_node(&ip->cache[0]);
956         hammer_uncache_node(&ip->cache[1]);
957         hammer_uncache_node(&ip->cache[2]);
958         hammer_uncache_node(&ip->cache[3]);
959         hammer_inode_wakereclaims(ip);
960         if (ip->objid_cache)
961                 hammer_clear_objid(ip);
962         --hammer_count_inodes;
963         --hmp->count_inodes;
964         if (ip->pfsm) {
965                 hammer_rel_pseudofs(hmp, ip->pfsm);
966                 ip->pfsm = NULL;
967         }
968         kfree(ip, hmp->m_inodes);
969         ip = NULL;
970 }
971
972 /*
973  * Retrieve pseudo-fs data.  NULL will never be returned.
974  *
975  * If an error occurs *errorp will be set and a default template is returned,
976  * otherwise *errorp is set to 0.  Typically when an error occurs it will
977  * be ENOENT.
978  */
979 hammer_pseudofs_inmem_t
980 hammer_load_pseudofs(hammer_transaction_t trans,
981                      u_int32_t localization, int *errorp)
982 {
983         hammer_mount_t hmp = trans->hmp;
984         hammer_inode_t ip;
985         hammer_pseudofs_inmem_t pfsm;
986         struct hammer_cursor cursor;
987         int bytes;
988
989 retry:
990         pfsm = RB_LOOKUP(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, localization);
991         if (pfsm) {
992                 hammer_ref(&pfsm->lock);
993                 *errorp = 0;
994                 return(pfsm);
995         }
996
997         /*
998          * PFS records are associated with the root inode (not the PFS root
999          * inode, but the real root).  Avoid an infinite recursion if loading
1000          * the PFS for the real root.
1001          */
1002         if (localization) {
1003                 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT,
1004                                       HAMMER_MAX_TID,
1005                                       HAMMER_DEF_LOCALIZATION, 0, errorp);
1006         } else {
1007                 ip = NULL;
1008         }
1009
1010         pfsm = kmalloc(sizeof(*pfsm), hmp->m_misc, M_WAITOK | M_ZERO);
1011         pfsm->localization = localization;
1012         pfsm->pfsd.unique_uuid = trans->rootvol->ondisk->vol_fsid;
1013         pfsm->pfsd.shared_uuid = pfsm->pfsd.unique_uuid;
1014
1015         hammer_init_cursor(trans, &cursor, (ip ? &ip->cache[1] : NULL), ip);
1016         cursor.key_beg.localization = HAMMER_DEF_LOCALIZATION +
1017                                       HAMMER_LOCALIZE_MISC;
1018         cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
1019         cursor.key_beg.create_tid = 0;
1020         cursor.key_beg.delete_tid = 0;
1021         cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
1022         cursor.key_beg.obj_type = 0;
1023         cursor.key_beg.key = localization;
1024         cursor.asof = HAMMER_MAX_TID;
1025         cursor.flags |= HAMMER_CURSOR_ASOF;
1026
1027         if (ip)
1028                 *errorp = hammer_ip_lookup(&cursor);
1029         else
1030                 *errorp = hammer_btree_lookup(&cursor);
1031         if (*errorp == 0) {
1032                 *errorp = hammer_ip_resolve_data(&cursor);
1033                 if (*errorp == 0) {
1034                         if (cursor.data->pfsd.mirror_flags &
1035                             HAMMER_PFSD_DELETED) {
1036                                 *errorp = ENOENT;
1037                         } else {
1038                                 bytes = cursor.leaf->data_len;
1039                                 if (bytes > sizeof(pfsm->pfsd))
1040                                         bytes = sizeof(pfsm->pfsd);
1041                                 bcopy(cursor.data, &pfsm->pfsd, bytes);
1042                         }
1043                 }
1044         }
1045         hammer_done_cursor(&cursor);
1046
1047         pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
1048         hammer_ref(&pfsm->lock);
1049         if (ip)
1050                 hammer_rel_inode(ip, 0);
1051         if (RB_INSERT(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm)) {
1052                 kfree(pfsm, hmp->m_misc);
1053                 goto retry;
1054         }
1055         return(pfsm);
1056 }
1057
1058 /*
1059  * Store pseudo-fs data.  The backend will automatically delete any prior
1060  * on-disk pseudo-fs data but we have to delete in-memory versions.
1061  */
1062 int
1063 hammer_save_pseudofs(hammer_transaction_t trans, hammer_pseudofs_inmem_t pfsm)
1064 {
1065         struct hammer_cursor cursor;
1066         hammer_record_t record;
1067         hammer_inode_t ip;
1068         int error;
1069
1070         /*
1071          * PFS records are associated with the root inode (not the PFS root
1072          * inode, but the real root).
1073          */
1074         ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
1075                               HAMMER_DEF_LOCALIZATION, 0, &error);
1076 retry:
1077         pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
1078         hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
1079         cursor.key_beg.localization = ip->obj_localization +
1080                                       HAMMER_LOCALIZE_MISC;
1081         cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
1082         cursor.key_beg.create_tid = 0;
1083         cursor.key_beg.delete_tid = 0;
1084         cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
1085         cursor.key_beg.obj_type = 0;
1086         cursor.key_beg.key = pfsm->localization;
1087         cursor.asof = HAMMER_MAX_TID;
1088         cursor.flags |= HAMMER_CURSOR_ASOF;
1089
1090         /*
1091          * Replace any in-memory version of the record.
1092          */
1093         error = hammer_ip_lookup(&cursor);
1094         if (error == 0 && hammer_cursor_inmem(&cursor)) {
1095                 record = cursor.iprec;
1096                 if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
1097                         KKASSERT(cursor.deadlk_rec == NULL);
1098                         hammer_ref(&record->lock);
1099                         cursor.deadlk_rec = record;
1100                         error = EDEADLK;
1101                 } else {
1102                         record->flags |= HAMMER_RECF_DELETED_FE;
1103                         error = 0;
1104                 }
1105         }
1106
1107         /*
1108          * Allocate replacement general record.  The backend flush will
1109          * delete any on-disk version of the record.
1110          */
1111         if (error == 0 || error == ENOENT) {
1112                 record = hammer_alloc_mem_record(ip, sizeof(pfsm->pfsd));
1113                 record->type = HAMMER_MEM_RECORD_GENERAL;
1114
1115                 record->leaf.base.localization = ip->obj_localization +
1116                                                  HAMMER_LOCALIZE_MISC;
1117                 record->leaf.base.rec_type = HAMMER_RECTYPE_PFS;
1118                 record->leaf.base.key = pfsm->localization;
1119                 record->leaf.data_len = sizeof(pfsm->pfsd);
1120                 bcopy(&pfsm->pfsd, record->data, sizeof(pfsm->pfsd));
1121                 error = hammer_ip_add_record(trans, record);
1122         }
1123         hammer_done_cursor(&cursor);
1124         if (error == EDEADLK)
1125                 goto retry;
1126         hammer_rel_inode(ip, 0);
1127         return(error);
1128 }
1129
1130 /*
1131  * Create a root directory for a PFS if one does not alredy exist.
1132  *
1133  * The PFS root stands alone so we must also bump the nlinks count
1134  * to prevent it from being destroyed on release.
1135  */
1136 int
1137 hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred,
1138                        hammer_pseudofs_inmem_t pfsm)
1139 {
1140         hammer_inode_t ip;
1141         struct vattr vap;
1142         int error;
1143
1144         ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
1145                               pfsm->localization, 0, &error);
1146         if (ip == NULL) {
1147                 vattr_null(&vap);
1148                 vap.va_mode = 0755;
1149                 vap.va_type = VDIR;
1150                 error = hammer_create_inode(trans, &vap, cred,
1151                                             NULL, NULL, 0,
1152                                             pfsm, &ip);
1153                 if (error == 0) {
1154                         ++ip->ino_data.nlinks;
1155                         hammer_modify_inode(trans, ip, HAMMER_INODE_DDIRTY);
1156                 }
1157         }
1158         if (ip)
1159                 hammer_rel_inode(ip, 0);
1160         return(error);
1161 }
1162
1163 /*
1164  * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY
1165  * if we are unable to disassociate all the inodes.
1166  */
1167 static
1168 int
1169 hammer_unload_pseudofs_callback(hammer_inode_t ip, void *data)
1170 {
1171         int res;
1172
1173         hammer_ref(&ip->lock);
1174         if (hammer_isactive(&ip->lock) == 2 && ip->vp)
1175                 vclean_unlocked(ip->vp);
1176         if (hammer_isactive(&ip->lock) == 1 && ip->vp == NULL)
1177                 res = 0;
1178         else
1179                 res = -1;       /* stop, someone is using the inode */
1180         hammer_rel_inode(ip, 0);
1181         return(res);
1182 }
1183
1184 int
1185 hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization)
1186 {
1187         int res;
1188         int try;
1189
1190         for (try = res = 0; try < 4; ++try) {
1191                 res = hammer_ino_rb_tree_RB_SCAN(&trans->hmp->rb_inos_root,
1192                                            hammer_inode_pfs_cmp,
1193                                            hammer_unload_pseudofs_callback,
1194                                            &localization);
1195                 if (res == 0 && try > 1)
1196                         break;
1197                 hammer_flusher_sync(trans->hmp);
1198         }
1199         if (res != 0)
1200                 res = ENOTEMPTY;
1201         return(res);
1202 }
1203
1204
1205 /*
1206  * Release a reference on a PFS
1207  */
1208 void
1209 hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm)
1210 {
1211         hammer_rel(&pfsm->lock);
1212         if (hammer_norefs(&pfsm->lock)) {
1213                 RB_REMOVE(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm);
1214                 kfree(pfsm, hmp->m_misc);
1215         }
1216 }
1217
1218 /*
1219  * Called by hammer_sync_inode().
1220  */
1221 static int
1222 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
1223 {
1224         hammer_transaction_t trans = cursor->trans;
1225         hammer_record_t record;
1226         int error;
1227         int redirty;
1228
1229 retry:
1230         error = 0;
1231
1232         /*
1233          * If the inode has a presence on-disk then locate it and mark
1234          * it deleted, setting DELONDISK.
1235          *
1236          * The record may or may not be physically deleted, depending on
1237          * the retention policy.
1238          */
1239         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
1240             HAMMER_INODE_ONDISK) {
1241                 hammer_normalize_cursor(cursor);
1242                 cursor->key_beg.localization = ip->obj_localization + 
1243                                                HAMMER_LOCALIZE_INODE;
1244                 cursor->key_beg.obj_id = ip->obj_id;
1245                 cursor->key_beg.key = 0;
1246                 cursor->key_beg.create_tid = 0;
1247                 cursor->key_beg.delete_tid = 0;
1248                 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1249                 cursor->key_beg.obj_type = 0;
1250                 cursor->asof = ip->obj_asof;
1251                 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1252                 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
1253                 cursor->flags |= HAMMER_CURSOR_BACKEND;
1254
1255                 error = hammer_btree_lookup(cursor);
1256                 if (hammer_debug_inode)
1257                         kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
1258
1259                 if (error == 0) {
1260                         error = hammer_ip_delete_record(cursor, ip, trans->tid);
1261                         if (hammer_debug_inode)
1262                                 kprintf(" error %d\n", error);
1263                         if (error == 0) {
1264                                 ip->flags |= HAMMER_INODE_DELONDISK;
1265                         }
1266                         if (cursor->node)
1267                                 hammer_cache_node(&ip->cache[0], cursor->node);
1268                 }
1269                 if (error == EDEADLK) {
1270                         hammer_done_cursor(cursor);
1271                         error = hammer_init_cursor(trans, cursor,
1272                                                    &ip->cache[0], ip);
1273                         if (hammer_debug_inode)
1274                                 kprintf("IPDED %p %d\n", ip, error);
1275                         if (error == 0)
1276                                 goto retry;
1277                 }
1278         }
1279
1280         /*
1281          * Ok, write out the initial record or a new record (after deleting
1282          * the old one), unless the DELETED flag is set.  This routine will
1283          * clear DELONDISK if it writes out a record.
1284          *
1285          * Update our inode statistics if this is the first application of
1286          * the inode on-disk.
1287          */
1288         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
1289                 /*
1290                  * Generate a record and write it to the media.  We clean-up
1291                  * the state before releasing so we do not have to set-up
1292                  * a flush_group.
1293                  */
1294                 record = hammer_alloc_mem_record(ip, 0);
1295                 record->type = HAMMER_MEM_RECORD_INODE;
1296                 record->flush_state = HAMMER_FST_FLUSH;
1297                 record->leaf = ip->sync_ino_leaf;
1298                 record->leaf.base.create_tid = trans->tid;
1299                 record->leaf.data_len = sizeof(ip->sync_ino_data);
1300                 record->leaf.create_ts = trans->time32;
1301                 record->data = (void *)&ip->sync_ino_data;
1302                 record->flags |= HAMMER_RECF_INTERLOCK_BE;
1303
1304                 /*
1305                  * If this flag is set we cannot sync the new file size
1306                  * because we haven't finished related truncations.  The
1307                  * inode will be flushed in another flush group to finish
1308                  * the job.
1309                  */
1310                 if ((ip->flags & HAMMER_INODE_WOULDBLOCK) &&
1311                     ip->sync_ino_data.size != ip->ino_data.size) {
1312                         redirty = 1;
1313                         ip->sync_ino_data.size = ip->ino_data.size;
1314                 } else {
1315                         redirty = 0;
1316                 }
1317
1318                 for (;;) {
1319                         error = hammer_ip_sync_record_cursor(cursor, record);
1320                         if (hammer_debug_inode)
1321                                 kprintf("GENREC %p rec %08x %d\n",      
1322                                         ip, record->flags, error);
1323                         if (error != EDEADLK)
1324                                 break;
1325                         hammer_done_cursor(cursor);
1326                         error = hammer_init_cursor(trans, cursor,
1327                                                    &ip->cache[0], ip);
1328                         if (hammer_debug_inode)
1329                                 kprintf("GENREC reinit %d\n", error);
1330                         if (error)
1331                                 break;
1332                 }
1333
1334                 /*
1335                  * Note:  The record was never on the inode's record tree
1336                  * so just wave our hands importantly and destroy it.
1337                  */
1338                 record->flags |= HAMMER_RECF_COMMITTED;
1339                 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
1340                 record->flush_state = HAMMER_FST_IDLE;
1341                 ++ip->rec_generation;
1342                 hammer_rel_mem_record(record);
1343
1344                 /*
1345                  * Finish up.
1346                  */
1347                 if (error == 0) {
1348                         if (hammer_debug_inode)
1349                                 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
1350                         ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1351                                             HAMMER_INODE_SDIRTY |
1352                                             HAMMER_INODE_ATIME |
1353                                             HAMMER_INODE_MTIME);
1354                         ip->flags &= ~HAMMER_INODE_DELONDISK;
1355                         if (redirty)
1356                                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1357
1358                         /*
1359                          * Root volume count of inodes
1360                          */
1361                         hammer_sync_lock_sh(trans);
1362                         if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
1363                                 hammer_modify_volume_field(trans,
1364                                                            trans->rootvol,
1365                                                            vol0_stat_inodes);
1366                                 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1367                                 hammer_modify_volume_done(trans->rootvol);
1368                                 ip->flags |= HAMMER_INODE_ONDISK;
1369                                 if (hammer_debug_inode)
1370                                         kprintf("NOWONDISK %p\n", ip);
1371                         }
1372                         hammer_sync_unlock(trans);
1373                 }
1374         }
1375
1376         /*
1377          * If the inode has been destroyed, clean out any left-over flags
1378          * that may have been set by the frontend.
1379          */
1380         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) { 
1381                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1382                                     HAMMER_INODE_SDIRTY |
1383                                     HAMMER_INODE_ATIME |
1384                                     HAMMER_INODE_MTIME);
1385         }
1386         return(error);
1387 }
1388
1389 /*
1390  * Update only the itimes fields.
1391  *
1392  * ATIME can be updated without generating any UNDO.  MTIME is updated
1393  * with UNDO so it is guaranteed to be synchronized properly in case of
1394  * a crash.
1395  *
1396  * Neither field is included in the B-Tree leaf element's CRC, which is how
1397  * we can get away with updating ATIME the way we do.
1398  */
1399 static int
1400 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
1401 {
1402         hammer_transaction_t trans = cursor->trans;
1403         int error;
1404
1405 retry:
1406         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) !=
1407             HAMMER_INODE_ONDISK) {
1408                 return(0);
1409         }
1410
1411         hammer_normalize_cursor(cursor);
1412         cursor->key_beg.localization = ip->obj_localization + 
1413                                        HAMMER_LOCALIZE_INODE;
1414         cursor->key_beg.obj_id = ip->obj_id;
1415         cursor->key_beg.key = 0;
1416         cursor->key_beg.create_tid = 0;
1417         cursor->key_beg.delete_tid = 0;
1418         cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1419         cursor->key_beg.obj_type = 0;
1420         cursor->asof = ip->obj_asof;
1421         cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1422         cursor->flags |= HAMMER_CURSOR_ASOF;
1423         cursor->flags |= HAMMER_CURSOR_GET_LEAF;
1424         cursor->flags |= HAMMER_CURSOR_GET_DATA;
1425         cursor->flags |= HAMMER_CURSOR_BACKEND;
1426
1427         error = hammer_btree_lookup(cursor);
1428         if (error == 0) {
1429                 hammer_cache_node(&ip->cache[0], cursor->node);
1430                 if (ip->sync_flags & HAMMER_INODE_MTIME) {
1431                         /*
1432                          * Updating MTIME requires an UNDO.  Just cover
1433                          * both atime and mtime.
1434                          */
1435                         hammer_sync_lock_sh(trans);
1436                         hammer_modify_buffer(trans, cursor->data_buffer,
1437                                      HAMMER_ITIMES_BASE(&cursor->data->inode),
1438                                      HAMMER_ITIMES_BYTES);
1439                         cursor->data->inode.atime = ip->sync_ino_data.atime;
1440                         cursor->data->inode.mtime = ip->sync_ino_data.mtime;
1441                         hammer_modify_buffer_done(cursor->data_buffer);
1442                         hammer_sync_unlock(trans);
1443                 } else if (ip->sync_flags & HAMMER_INODE_ATIME) {
1444                         /*
1445                          * Updating atime only can be done in-place with
1446                          * no UNDO.
1447                          */
1448                         hammer_sync_lock_sh(trans);
1449                         hammer_modify_buffer(trans, cursor->data_buffer,
1450                                              NULL, 0);
1451                         cursor->data->inode.atime = ip->sync_ino_data.atime;
1452                         hammer_modify_buffer_done(cursor->data_buffer);
1453                         hammer_sync_unlock(trans);
1454                 }
1455                 ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME);
1456         }
1457         if (error == EDEADLK) {
1458                 hammer_done_cursor(cursor);
1459                 error = hammer_init_cursor(trans, cursor,
1460                                            &ip->cache[0], ip);
1461                 if (error == 0)
1462                         goto retry;
1463         }
1464         return(error);
1465 }
1466
1467 /*
1468  * Release a reference on an inode, flush as requested.
1469  *
1470  * On the last reference we queue the inode to the flusher for its final
1471  * disposition.
1472  */
1473 void
1474 hammer_rel_inode(struct hammer_inode *ip, int flush)
1475 {
1476         /*hammer_mount_t hmp = ip->hmp;*/
1477
1478         /*
1479          * Handle disposition when dropping the last ref.
1480          */
1481         for (;;) {
1482                 if (hammer_oneref(&ip->lock)) {
1483                         /*
1484                          * Determine whether on-disk action is needed for
1485                          * the inode's final disposition.
1486                          */
1487                         KKASSERT(ip->vp == NULL);
1488                         hammer_inode_unloadable_check(ip, 0);
1489                         if (ip->flags & HAMMER_INODE_MODMASK) {
1490                                 hammer_flush_inode(ip, 0);
1491                         } else if (hammer_oneref(&ip->lock)) {
1492                                 hammer_unload_inode(ip);
1493                                 break;
1494                         }
1495                 } else {
1496                         if (flush)
1497                                 hammer_flush_inode(ip, 0);
1498
1499                         /*
1500                          * The inode still has multiple refs, try to drop
1501                          * one ref.
1502                          */
1503                         KKASSERT(hammer_isactive(&ip->lock) >= 1);
1504                         if (hammer_isactive(&ip->lock) > 1) {
1505                                 hammer_rel(&ip->lock);
1506                                 break;
1507                         }
1508                 }
1509         }
1510 }
1511
1512 /*
1513  * Unload and destroy the specified inode.  Must be called with one remaining
1514  * reference.  The reference is disposed of.
1515  *
1516  * The inode must be completely clean.
1517  */
1518 static int
1519 hammer_unload_inode(struct hammer_inode *ip)
1520 {
1521         hammer_mount_t hmp = ip->hmp;
1522
1523         KASSERT(hammer_oneref(&ip->lock),
1524                 ("hammer_unload_inode: %d refs", hammer_isactive(&ip->lock)));
1525         KKASSERT(ip->vp == NULL);
1526         KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
1527         KKASSERT(ip->cursor_ip_refs == 0);
1528         KKASSERT(hammer_notlocked(&ip->lock));
1529         KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
1530
1531         KKASSERT(RB_EMPTY(&ip->rec_tree));
1532         KKASSERT(TAILQ_EMPTY(&ip->target_list));
1533
1534         if (ip->flags & HAMMER_INODE_RDIRTY) {
1535                 RB_REMOVE(hammer_redo_rb_tree, &hmp->rb_redo_root, ip);
1536                 ip->flags &= ~HAMMER_INODE_RDIRTY;
1537         }
1538         RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip);
1539
1540         hammer_free_inode(ip);
1541         return(0);
1542 }
1543
1544 /*
1545  * Called during unmounting if a critical error occured.  The in-memory
1546  * inode and all related structures are destroyed.
1547  *
1548  * If a critical error did not occur the unmount code calls the standard
1549  * release and asserts that the inode is gone.
1550  */
1551 int
1552 hammer_destroy_inode_callback(struct hammer_inode *ip, void *data __unused)
1553 {
1554         hammer_record_t rec;
1555
1556         /*
1557          * Get rid of the inodes in-memory records, regardless of their
1558          * state, and clear the mod-mask.
1559          */
1560         while ((rec = TAILQ_FIRST(&ip->target_list)) != NULL) {
1561                 TAILQ_REMOVE(&ip->target_list, rec, target_entry);
1562                 rec->target_ip = NULL;
1563                 if (rec->flush_state == HAMMER_FST_SETUP)
1564                         rec->flush_state = HAMMER_FST_IDLE;
1565         }
1566         while ((rec = RB_ROOT(&ip->rec_tree)) != NULL) {
1567                 if (rec->flush_state == HAMMER_FST_FLUSH)
1568                         --rec->flush_group->refs;
1569                 else
1570                         hammer_ref(&rec->lock);
1571                 KKASSERT(hammer_oneref(&rec->lock));
1572                 rec->flush_state = HAMMER_FST_IDLE;
1573                 rec->flush_group = NULL;
1574                 rec->flags |= HAMMER_RECF_DELETED_FE; /* wave hands */
1575                 rec->flags |= HAMMER_RECF_DELETED_BE; /* wave hands */
1576                 ++ip->rec_generation;
1577                 hammer_rel_mem_record(rec);
1578         }
1579         ip->flags &= ~HAMMER_INODE_MODMASK;
1580         ip->sync_flags &= ~HAMMER_INODE_MODMASK;
1581         KKASSERT(ip->vp == NULL);
1582
1583         /*
1584          * Remove the inode from any flush group, force it idle.  FLUSH
1585          * and SETUP states have an inode ref.
1586          */
1587         switch(ip->flush_state) {
1588         case HAMMER_FST_FLUSH:
1589                 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip);
1590                 --ip->flush_group->refs;
1591                 ip->flush_group = NULL;
1592                 /* fall through */
1593         case HAMMER_FST_SETUP:
1594                 hammer_rel(&ip->lock);
1595                 ip->flush_state = HAMMER_FST_IDLE;
1596                 /* fall through */
1597         case HAMMER_FST_IDLE:
1598                 break;
1599         }
1600
1601         /*
1602          * There shouldn't be any associated vnode.  The unload needs at
1603          * least one ref, if we do have a vp steal its ip ref.
1604          */
1605         if (ip->vp) {
1606                 kprintf("hammer_destroy_inode_callback: Unexpected "
1607                         "vnode association ip %p vp %p\n", ip, ip->vp);
1608                 ip->vp->v_data = NULL;
1609                 ip->vp = NULL;
1610         } else {
1611                 hammer_ref(&ip->lock);
1612         }
1613         hammer_unload_inode(ip);
1614         return(0);
1615 }
1616
1617 /*
1618  * Called on mount -u when switching from RW to RO or vise-versa.  Adjust
1619  * the read-only flag for cached inodes.
1620  *
1621  * This routine is called from a RB_SCAN().
1622  */
1623 int
1624 hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
1625 {
1626         hammer_mount_t hmp = ip->hmp;
1627
1628         if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
1629                 ip->flags |= HAMMER_INODE_RO;
1630         else
1631                 ip->flags &= ~HAMMER_INODE_RO;
1632         return(0);
1633 }
1634
1635 /*
1636  * A transaction has modified an inode, requiring updates as specified by
1637  * the passed flags.
1638  *
1639  * HAMMER_INODE_DDIRTY: Inode data has been updated, not incl mtime/atime,
1640  *                      and not including size changes due to write-append
1641  *                      (but other size changes are included).
1642  * HAMMER_INODE_SDIRTY: Inode data has been updated, size changes due to
1643  *                      write-append.
1644  * HAMMER_INODE_XDIRTY: Dirty in-memory records
1645  * HAMMER_INODE_BUFS:   Dirty buffer cache buffers
1646  * HAMMER_INODE_DELETED: Inode record/data must be deleted
1647  * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
1648  */
1649 void
1650 hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags)
1651 {
1652         /* 
1653          * ronly of 0 or 2 does not trigger assertion.
1654          * 2 is a special error state 
1655          */
1656         KKASSERT(ip->hmp->ronly != 1 ||
1657                   (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 
1658                             HAMMER_INODE_SDIRTY |
1659                             HAMMER_INODE_BUFS | HAMMER_INODE_DELETED |
1660                             HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0);
1661         if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
1662                 ip->flags |= HAMMER_INODE_RSV_INODES;
1663                 ++ip->hmp->rsv_inodes;
1664         }
1665
1666         /*
1667          * Set the NEWINODE flag in the transaction if the inode
1668          * transitions to a dirty state.  This is used to track
1669          * the load on the inode cache.
1670          */
1671         if (trans &&
1672             (ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1673             (flags & HAMMER_INODE_MODMASK)) {
1674                 trans->flags |= HAMMER_TRANSF_NEWINODE;
1675         }
1676         if (flags & HAMMER_INODE_MODMASK)
1677                 hammer_inode_dirty(ip);
1678         ip->flags |= flags;
1679 }
1680
1681 /*
1682  * Attempt to quickly update the atime for a hammer inode.  Return 0 on
1683  * success, -1 on failure.
1684  *
1685  * We attempt to update the atime with only the ip lock and not the
1686  * whole filesystem lock in order to improve concurrency.  We can only
1687  * do this safely if the ATIME flag is already pending on the inode.
1688  *
1689  * This function is called via a vnops path (ip pointer is stable) without
1690  * fs_token held.
1691  */
1692 int
1693 hammer_update_atime_quick(hammer_inode_t ip)
1694 {
1695         struct timeval tv;
1696         int res = -1;
1697
1698         if ((ip->flags & HAMMER_INODE_RO) ||
1699             (ip->hmp->mp->mnt_flag & MNT_NOATIME)) {
1700                 /*
1701                  * Silently indicate success on read-only mount/snap
1702                  */
1703                 res = 0;
1704         } else if (ip->flags & HAMMER_INODE_ATIME) {
1705                 /*
1706                  * Double check with inode lock held against backend.  This
1707                  * is only safe if all we need to do is update
1708                  * ino_data.atime.
1709                  */
1710                 getmicrotime(&tv);
1711                 hammer_lock_ex(&ip->lock);
1712                 if (ip->flags & HAMMER_INODE_ATIME) {
1713                         ip->ino_data.atime =
1714                             (unsigned long)tv.tv_sec * 1000000ULL + tv.tv_usec;
1715                         res = 0;
1716                 }
1717                 hammer_unlock(&ip->lock);
1718         }
1719         return res;
1720 }
1721
1722 /*
1723  * Request that an inode be flushed.  This whole mess cannot block and may
1724  * recurse (if not synchronous).  Once requested HAMMER will attempt to
1725  * actively flush the inode until the flush can be done.
1726  *
1727  * The inode may already be flushing, or may be in a setup state.  We can
1728  * place the inode in a flushing state if it is currently idle and flag it
1729  * to reflush if it is currently flushing.
1730  *
1731  * Upon return if the inode could not be flushed due to a setup
1732  * dependancy, then it will be automatically flushed when the dependancy
1733  * is satisfied.
1734  */
1735 void
1736 hammer_flush_inode(hammer_inode_t ip, int flags)
1737 {
1738         hammer_mount_t hmp;
1739         hammer_flush_group_t flg;
1740         int good;
1741
1742         /*
1743          * fill_flush_group is the first flush group we may be able to
1744          * continue filling, it may be open or closed but it will always
1745          * be past the currently flushing (running) flg.
1746          *
1747          * next_flush_group is the next open flush group.
1748          */
1749         hmp = ip->hmp;
1750         while ((flg = hmp->fill_flush_group) != NULL) {
1751                 KKASSERT(flg->running == 0);
1752                 if (flg->total_count + flg->refs <= ip->hmp->undo_rec_limit &&
1753                     flg->total_count <= hammer_autoflush) {
1754                         break;
1755                 }
1756                 hmp->fill_flush_group = TAILQ_NEXT(flg, flush_entry);
1757                 hammer_flusher_async(ip->hmp, flg);
1758         }
1759         if (flg == NULL) {
1760                 flg = kmalloc(sizeof(*flg), hmp->m_misc, M_WAITOK|M_ZERO);
1761                 flg->seq = hmp->flusher.next++;
1762                 if (hmp->next_flush_group == NULL)
1763                         hmp->next_flush_group = flg;
1764                 if (hmp->fill_flush_group == NULL)
1765                         hmp->fill_flush_group = flg;
1766                 RB_INIT(&flg->flush_tree);
1767                 TAILQ_INSERT_TAIL(&hmp->flush_group_list, flg, flush_entry);
1768         }
1769
1770         /*
1771          * Trivial 'nothing to flush' case.  If the inode is in a SETUP
1772          * state we have to put it back into an IDLE state so we can
1773          * drop the extra ref.
1774          *
1775          * If we have a parent dependancy we must still fall through
1776          * so we can run it.
1777          */
1778         if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
1779                 if (ip->flush_state == HAMMER_FST_SETUP &&
1780                     TAILQ_EMPTY(&ip->target_list)) {
1781                         ip->flush_state = HAMMER_FST_IDLE;
1782                         hammer_rel_inode(ip, 0);
1783                 }
1784                 if (ip->flush_state == HAMMER_FST_IDLE)
1785                         return;
1786         }
1787
1788         /*
1789          * Our flush action will depend on the current state.
1790          */
1791         switch(ip->flush_state) {
1792         case HAMMER_FST_IDLE:
1793                 /*
1794                  * We have no dependancies and can flush immediately.  Some
1795                  * our children may not be flushable so we have to re-test
1796                  * with that additional knowledge.
1797                  */
1798                 hammer_flush_inode_core(ip, flg, flags);
1799                 break;
1800         case HAMMER_FST_SETUP:
1801                 /*
1802                  * Recurse upwards through dependancies via target_list
1803                  * and start their flusher actions going if possible.
1804                  *
1805                  * 'good' is our connectivity.  -1 means we have none and
1806                  * can't flush, 0 means there weren't any dependancies, and
1807                  * 1 means we have good connectivity.
1808                  */
1809                 good = hammer_setup_parent_inodes(ip, 0, flg);
1810
1811                 if (good >= 0) {
1812                         /*
1813                          * We can continue if good >= 0.  Determine how 
1814                          * many records under our inode can be flushed (and
1815                          * mark them).
1816                          */
1817                         hammer_flush_inode_core(ip, flg, flags);
1818                 } else {
1819                         /*
1820                          * Parent has no connectivity, tell it to flush
1821                          * us as soon as it does.
1822                          *
1823                          * The REFLUSH flag is also needed to trigger
1824                          * dependancy wakeups.
1825                          */
1826                         ip->flags |= HAMMER_INODE_CONN_DOWN |
1827                                      HAMMER_INODE_REFLUSH;
1828                         if (flags & HAMMER_FLUSH_SIGNAL) {
1829                                 ip->flags |= HAMMER_INODE_RESIGNAL;
1830                                 hammer_flusher_async(ip->hmp, flg);
1831                         }
1832                 }
1833                 break;
1834         case HAMMER_FST_FLUSH:
1835                 /*
1836                  * We are already flushing, flag the inode to reflush
1837                  * if needed after it completes its current flush.
1838                  *
1839                  * The REFLUSH flag is also needed to trigger
1840                  * dependancy wakeups.
1841                  */
1842                 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
1843                         ip->flags |= HAMMER_INODE_REFLUSH;
1844                 if (flags & HAMMER_FLUSH_SIGNAL) {
1845                         ip->flags |= HAMMER_INODE_RESIGNAL;
1846                         hammer_flusher_async(ip->hmp, flg);
1847                 }
1848                 break;
1849         }
1850 }
1851
1852 /*
1853  * Scan ip->target_list, which is a list of records owned by PARENTS to our
1854  * ip which reference our ip.
1855  *
1856  * XXX This is a huge mess of recursive code, but not one bit of it blocks
1857  *     so for now do not ref/deref the structures.  Note that if we use the
1858  *     ref/rel code later, the rel CAN block.
1859  */
1860 static int
1861 hammer_setup_parent_inodes(hammer_inode_t ip, int depth,
1862                            hammer_flush_group_t flg)
1863 {
1864         hammer_record_t depend;
1865         int good;
1866         int r;
1867
1868         /*
1869          * If we hit our recursion limit and we have parent dependencies
1870          * We cannot continue.  Returning < 0 will cause us to be flagged
1871          * for reflush.  Returning -2 cuts off additional dependency checks
1872          * because they are likely to also hit the depth limit.
1873          *
1874          * We cannot return < 0 if there are no dependencies or there might
1875          * not be anything to wakeup (ip).
1876          */
1877         if (depth == 20 && TAILQ_FIRST(&ip->target_list)) {
1878                 if (hammer_debug_general & 0x10000)
1879                         krateprintf(&hammer_gen_krate,
1880                             "HAMMER Warning: depth limit reached on "
1881                             "setup recursion, inode %p %016llx\n",
1882                             ip, (long long)ip->obj_id);
1883                 return(-2);
1884         }
1885
1886         /*
1887          * Scan dependencies
1888          */
1889         good = 0;
1890         TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
1891                 r = hammer_setup_parent_inodes_helper(depend, depth, flg);
1892                 KKASSERT(depend->target_ip == ip);
1893                 if (r < 0 && good == 0)
1894                         good = -1;
1895                 if (r > 0)
1896                         good = 1;
1897
1898                 /*
1899                  * If we failed due to the recursion depth limit then stop
1900                  * now.
1901                  */
1902                 if (r == -2)
1903                         break;
1904         }
1905         return(good);
1906 }
1907
1908 /*
1909  * This helper function takes a record representing the dependancy between
1910  * the parent inode and child inode.
1911  *
1912  * record->ip           = parent inode
1913  * record->target_ip    = child inode
1914  * 
1915  * We are asked to recurse upwards and convert the record from SETUP
1916  * to FLUSH if possible.
1917  *
1918  * Return 1 if the record gives us connectivity
1919  *
1920  * Return 0 if the record is not relevant 
1921  *
1922  * Return -1 if we can't resolve the dependancy and there is no connectivity.
1923  */
1924 static int
1925 hammer_setup_parent_inodes_helper(hammer_record_t record, int depth,
1926                                   hammer_flush_group_t flg)
1927 {
1928         hammer_inode_t pip;
1929         int good;
1930
1931         KKASSERT(record->flush_state != HAMMER_FST_IDLE);
1932         pip = record->ip;
1933
1934         /*
1935          * If the record is already flushing, is it in our flush group?
1936          *
1937          * If it is in our flush group but it is a general record or a 
1938          * delete-on-disk, it does not improve our connectivity (return 0),
1939          * and if the target inode is not trying to destroy itself we can't
1940          * allow the operation yet anyway (the second return -1).
1941          */
1942         if (record->flush_state == HAMMER_FST_FLUSH) {
1943                 /*
1944                  * If not in our flush group ask the parent to reflush
1945                  * us as soon as possible.
1946                  */
1947                 if (record->flush_group != flg) {
1948                         pip->flags |= HAMMER_INODE_REFLUSH;
1949                         record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1950                         return(-1);
1951                 }
1952
1953                 /*
1954                  * If in our flush group everything is already set up,
1955                  * just return whether the record will improve our
1956                  * visibility or not.
1957                  */
1958                 if (record->type == HAMMER_MEM_RECORD_ADD)
1959                         return(1);
1960                 return(0);
1961         }
1962
1963         /*
1964          * It must be a setup record.  Try to resolve the setup dependancies
1965          * by recursing upwards so we can place ip on the flush list.
1966          *
1967          * Limit ourselves to 20 levels of recursion to avoid blowing out
1968          * the kernel stack.  If we hit the recursion limit we can't flush
1969          * until the parent flushes.  The parent will flush independantly
1970          * on its own and ultimately a deep recursion will be resolved.
1971          */
1972         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1973
1974         good = hammer_setup_parent_inodes(pip, depth + 1, flg);
1975
1976         /*
1977          * If good < 0 the parent has no connectivity and we cannot safely
1978          * flush the directory entry, which also means we can't flush our
1979          * ip.  Flag us for downward recursion once the parent's
1980          * connectivity is resolved.  Flag the parent for [re]flush or it
1981          * may not check for downward recursions.
1982          */
1983         if (good < 0) {
1984                 pip->flags |= HAMMER_INODE_REFLUSH;
1985                 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1986                 return(good);
1987         }
1988
1989         /*
1990          * We are go, place the parent inode in a flushing state so we can
1991          * place its record in a flushing state.  Note that the parent
1992          * may already be flushing.  The record must be in the same flush
1993          * group as the parent.
1994          */
1995         if (pip->flush_state != HAMMER_FST_FLUSH)
1996                 hammer_flush_inode_core(pip, flg, HAMMER_FLUSH_RECURSION);
1997         KKASSERT(pip->flush_state == HAMMER_FST_FLUSH);
1998
1999         /*
2000          * It is possible for a rename to create a loop in the recursion
2001          * and revisit a record.  This will result in the record being
2002          * placed in a flush state unexpectedly.  This check deals with
2003          * the case.
2004          */
2005         if (record->flush_state == HAMMER_FST_FLUSH) {
2006                 if (record->type == HAMMER_MEM_RECORD_ADD)
2007                         return(1);
2008                 return(0);
2009         }
2010
2011         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
2012
2013 #if 0
2014         if (record->type == HAMMER_MEM_RECORD_DEL &&
2015             (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
2016                 /*
2017                  * Regardless of flushing state we cannot sync this path if the
2018                  * record represents a delete-on-disk but the target inode
2019                  * is not ready to sync its own deletion.
2020                  *
2021                  * XXX need to count effective nlinks to determine whether
2022                  * the flush is ok, otherwise removing a hardlink will
2023                  * just leave the DEL record to rot.
2024                  */
2025                 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
2026                 return(-1);
2027         } else
2028 #endif
2029         if (pip->flush_group == flg) {
2030                 /*
2031                  * Because we have not calculated nlinks yet we can just
2032                  * set records to the flush state if the parent is in
2033                  * the same flush group as we are.
2034                  */
2035                 record->flush_state = HAMMER_FST_FLUSH;
2036                 record->flush_group = flg;
2037                 ++record->flush_group->refs;
2038                 hammer_ref(&record->lock);
2039
2040                 /*
2041                  * A general directory-add contributes to our visibility.
2042                  *
2043                  * Otherwise it is probably a directory-delete or 
2044                  * delete-on-disk record and does not contribute to our
2045                  * visbility (but we can still flush it).
2046                  */
2047                 if (record->type == HAMMER_MEM_RECORD_ADD)
2048                         return(1);
2049                 return(0);
2050         } else {
2051                 /*
2052                  * If the parent is not in our flush group we cannot
2053                  * flush this record yet, there is no visibility.
2054                  * We tell the parent to reflush and mark ourselves
2055                  * so the parent knows it should flush us too.
2056                  */
2057                 pip->flags |= HAMMER_INODE_REFLUSH;
2058                 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
2059                 return(-1);
2060         }
2061 }
2062
2063 /*
2064  * This is the core routine placing an inode into the FST_FLUSH state.
2065  */
2066 static void
2067 hammer_flush_inode_core(hammer_inode_t ip, hammer_flush_group_t flg, int flags)
2068 {
2069         hammer_mount_t hmp = ip->hmp;
2070         int go_count;
2071
2072         /*
2073          * Set flush state and prevent the flusher from cycling into
2074          * the next flush group.  Do not place the ip on the list yet.
2075          * Inodes not in the idle state get an extra reference.
2076          */
2077         KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
2078         if (ip->flush_state == HAMMER_FST_IDLE)
2079                 hammer_ref(&ip->lock);
2080         ip->flush_state = HAMMER_FST_FLUSH;
2081         ip->flush_group = flg;
2082         ++hmp->flusher.group_lock;
2083         ++hmp->count_iqueued;
2084         ++hammer_count_iqueued;
2085         ++flg->total_count;
2086         hammer_redo_fifo_start_flush(ip);
2087
2088 #if 0
2089         /*
2090          * We need to be able to vfsync/truncate from the backend.
2091          *
2092          * XXX Any truncation from the backend will acquire the vnode
2093          *     independently.
2094          */
2095         KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
2096         if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
2097                 ip->flags |= HAMMER_INODE_VHELD;
2098                 vref(ip->vp);
2099         }
2100 #endif
2101
2102         /*
2103          * Figure out how many in-memory records we can actually flush
2104          * (not including inode meta-data, buffers, etc).
2105          */
2106         KKASSERT((ip->flags & HAMMER_INODE_WOULDBLOCK) == 0);
2107         if (flags & HAMMER_FLUSH_RECURSION) {
2108                 /*
2109                  * If this is a upwards recursion we do not want to
2110                  * recurse down again!
2111                  */
2112                 go_count = 1;
2113 #if 0
2114         } else if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
2115                 /*
2116                  * No new records are added if we must complete a flush
2117                  * from a previous cycle, but we do have to move the records
2118                  * from the previous cycle to the current one.
2119                  */
2120 #if 0
2121                 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2122                                    hammer_syncgrp_child_callback, NULL);
2123 #endif
2124                 go_count = 1;
2125 #endif
2126         } else {
2127                 /*
2128                  * Normal flush, scan records and bring them into the flush.
2129                  * Directory adds and deletes are usually skipped (they are
2130                  * grouped with the related inode rather then with the
2131                  * directory).
2132                  *
2133                  * go_count can be negative, which means the scan aborted
2134                  * due to the flush group being over-full and we should
2135                  * flush what we have.
2136                  */
2137                 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2138                                    hammer_setup_child_callback, NULL);
2139         }
2140
2141         /*
2142          * This is a more involved test that includes go_count.  If we
2143          * can't flush, flag the inode and return.  If go_count is 0 we
2144          * were are unable to flush any records in our rec_tree and
2145          * must ignore the XDIRTY flag.
2146          */
2147         if (go_count == 0) {
2148                 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
2149                         --hmp->count_iqueued;
2150                         --hammer_count_iqueued;
2151
2152                         --flg->total_count;
2153                         ip->flush_state = HAMMER_FST_SETUP;
2154                         ip->flush_group = NULL;
2155                         if (flags & HAMMER_FLUSH_SIGNAL) {
2156                                 ip->flags |= HAMMER_INODE_REFLUSH |
2157                                              HAMMER_INODE_RESIGNAL;
2158                         } else {
2159                                 ip->flags |= HAMMER_INODE_REFLUSH;
2160                         }
2161 #if 0
2162                         if (ip->flags & HAMMER_INODE_VHELD) {
2163                                 ip->flags &= ~HAMMER_INODE_VHELD;
2164                                 vrele(ip->vp);
2165                         }
2166 #endif
2167
2168                         /*
2169                          * REFLUSH is needed to trigger dependancy wakeups
2170                          * when an inode is in SETUP.
2171                          */
2172                         ip->flags |= HAMMER_INODE_REFLUSH;
2173                         if (--hmp->flusher.group_lock == 0)
2174                                 wakeup(&hmp->flusher.group_lock);
2175                         return;
2176                 }
2177         }
2178
2179         /*
2180          * Snapshot the state of the inode for the backend flusher.
2181          *
2182          * We continue to retain save_trunc_off even when all truncations
2183          * have been resolved as an optimization to determine if we can
2184          * skip the B-Tree lookup for overwrite deletions.
2185          *
2186          * NOTE: The DELETING flag is a mod flag, but it is also sticky,
2187          * and stays in ip->flags.  Once set, it stays set until the
2188          * inode is destroyed.
2189          */
2190         if (ip->flags & HAMMER_INODE_TRUNCATED) {
2191                 KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0);
2192                 ip->sync_trunc_off = ip->trunc_off;
2193                 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
2194                 ip->flags &= ~HAMMER_INODE_TRUNCATED;
2195                 ip->sync_flags |= HAMMER_INODE_TRUNCATED;
2196
2197                 /*
2198                  * The save_trunc_off used to cache whether the B-Tree
2199                  * holds any records past that point is not used until
2200                  * after the truncation has succeeded, so we can safely
2201                  * set it now.
2202                  */
2203                 if (ip->save_trunc_off > ip->sync_trunc_off)
2204                         ip->save_trunc_off = ip->sync_trunc_off;
2205         }
2206         ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK &
2207                            ~HAMMER_INODE_TRUNCATED);
2208         ip->sync_ino_leaf = ip->ino_leaf;
2209         ip->sync_ino_data = ip->ino_data;
2210         ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
2211 #ifdef DEBUG_TRUNCATE
2212         if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp)
2213                 kprintf("truncateS %016llx\n", ip->sync_trunc_off);
2214 #endif
2215
2216         /*
2217          * The flusher list inherits our inode and reference.
2218          */
2219         KKASSERT(flg->running == 0);
2220         RB_INSERT(hammer_fls_rb_tree, &flg->flush_tree, ip);
2221         if (--hmp->flusher.group_lock == 0)
2222                 wakeup(&hmp->flusher.group_lock);
2223
2224         /*
2225          * Auto-flush the group if it grows too large.  Make sure the
2226          * inode reclaim wait pipeline continues to work.
2227          */
2228         if (flg->total_count >= hammer_autoflush ||
2229             flg->total_count >= hammer_limit_reclaims / 4) {
2230                 if (hmp->fill_flush_group == flg)
2231                         hmp->fill_flush_group = TAILQ_NEXT(flg, flush_entry);
2232                 hammer_flusher_async(hmp, flg);
2233         }
2234 }
2235
2236 /*
2237  * Callback for scan of ip->rec_tree.  Try to include each record in our
2238  * flush.  ip->flush_group has been set but the inode has not yet been
2239  * moved into a flushing state.
2240  *
2241  * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
2242  * both inodes.
2243  *
2244  * We return 1 for any record placed or found in FST_FLUSH, which prevents
2245  * the caller from shortcutting the flush.
2246  */
2247 static int
2248 hammer_setup_child_callback(hammer_record_t rec, void *data)
2249 {
2250         hammer_flush_group_t flg;
2251         hammer_inode_t target_ip;
2252         hammer_inode_t ip;
2253         int r;
2254
2255         /*
2256          * Records deleted or committed by the backend are ignored.
2257          * Note that the flush detects deleted frontend records at
2258          * multiple points to deal with races.  This is just the first
2259          * line of defense.  The only time HAMMER_RECF_DELETED_FE cannot
2260          * be set is when HAMMER_RECF_INTERLOCK_BE is set, because it
2261          * messes up link-count calculations.
2262          *
2263          * NOTE: Don't get confused between record deletion and, say,
2264          * directory entry deletion.  The deletion of a directory entry
2265          * which is on-media has nothing to do with the record deletion
2266          * flags.
2267          */
2268         if (rec->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
2269                           HAMMER_RECF_COMMITTED)) {
2270                 if (rec->flush_state == HAMMER_FST_FLUSH) {
2271                         KKASSERT(rec->flush_group == rec->ip->flush_group);
2272                         r = 1;
2273                 } else {
2274                         r = 0;
2275                 }
2276                 return(r);
2277         }
2278
2279         /*
2280          * If the record is in an idle state it has no dependancies and
2281          * can be flushed.
2282          */
2283         ip = rec->ip;
2284         flg = ip->flush_group;
2285         r = 0;
2286
2287         switch(rec->flush_state) {
2288         case HAMMER_FST_IDLE:
2289                 /*
2290                  * The record has no setup dependancy, we can flush it.
2291                  */
2292                 KKASSERT(rec->target_ip == NULL);
2293                 rec->flush_state = HAMMER_FST_FLUSH;
2294                 rec->flush_group = flg;
2295                 ++flg->refs;
2296                 hammer_ref(&rec->lock);
2297                 r = 1;
2298                 break;
2299         case HAMMER_FST_SETUP:
2300                 /*
2301                  * The record has a setup dependancy.  These are typically
2302                  * directory entry adds and deletes.  Such entries will be
2303                  * flushed when their inodes are flushed so we do not
2304                  * usually have to add them to the flush here.  However,
2305                  * if the target_ip has set HAMMER_INODE_CONN_DOWN then
2306                  * it is asking us to flush this record (and it).
2307                  */
2308                 target_ip = rec->target_ip;
2309                 KKASSERT(target_ip != NULL);
2310                 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
2311
2312                 /*
2313                  * If the target IP is already flushing in our group
2314                  * we could associate the record, but target_ip has
2315                  * already synced ino_data to sync_ino_data and we
2316                  * would also have to adjust nlinks.   Plus there are
2317                  * ordering issues for adds and deletes.
2318                  *
2319                  * Reflush downward if this is an ADD, and upward if
2320                  * this is a DEL.
2321                  */
2322                 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
2323                         if (rec->type == HAMMER_MEM_RECORD_ADD)
2324                                 ip->flags |= HAMMER_INODE_REFLUSH;
2325                         else
2326                                 target_ip->flags |= HAMMER_INODE_REFLUSH;
2327                         break;
2328                 } 
2329
2330                 /*
2331                  * Target IP is not yet flushing.  This can get complex
2332                  * because we have to be careful about the recursion.
2333                  *
2334                  * Directories create an issue for us in that if a flush
2335                  * of a directory is requested the expectation is to flush
2336                  * any pending directory entries, but this will cause the
2337                  * related inodes to recursively flush as well.  We can't
2338                  * really defer the operation so just get as many as we
2339                  * can and
2340                  */
2341 #if 0
2342                 if ((target_ip->flags & HAMMER_INODE_RECLAIM) == 0 &&
2343                     (target_ip->flags & HAMMER_INODE_CONN_DOWN) == 0) {
2344                         /*
2345                          * We aren't reclaiming and the target ip was not
2346                          * previously prevented from flushing due to this
2347                          * record dependancy.  Do not flush this record.
2348                          */
2349                         /*r = 0;*/
2350                 } else
2351 #endif
2352                 if (flg->total_count + flg->refs >
2353                            ip->hmp->undo_rec_limit) {
2354                         /*
2355                          * Our flush group is over-full and we risk blowing
2356                          * out the UNDO FIFO.  Stop the scan, flush what we
2357                          * have, then reflush the directory.
2358                          *
2359                          * The directory may be forced through multiple
2360                          * flush groups before it can be completely
2361                          * flushed.
2362                          */
2363                         ip->flags |= HAMMER_INODE_RESIGNAL |
2364                                      HAMMER_INODE_REFLUSH;
2365                         r = -1;
2366                 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
2367                         /*
2368                          * If the target IP is not flushing we can force
2369                          * it to flush, even if it is unable to write out
2370                          * any of its own records we have at least one in
2371                          * hand that we CAN deal with.
2372                          */
2373                         rec->flush_state = HAMMER_FST_FLUSH;
2374                         rec->flush_group = flg;
2375                         ++flg->refs;
2376                         hammer_ref(&rec->lock);
2377                         hammer_flush_inode_core(target_ip, flg,
2378                                                 HAMMER_FLUSH_RECURSION);
2379                         r = 1;
2380                 } else {
2381                         /*
2382                          * General or delete-on-disk record.
2383                          *
2384                          * XXX this needs help.  If a delete-on-disk we could
2385                          * disconnect the target.  If the target has its own
2386                          * dependancies they really need to be flushed.
2387                          *
2388                          * XXX
2389                          */
2390                         rec->flush_state = HAMMER_FST_FLUSH;
2391                         rec->flush_group = flg;
2392                         ++flg->refs;
2393                         hammer_ref(&rec->lock);
2394                         hammer_flush_inode_core(target_ip, flg,
2395                                                 HAMMER_FLUSH_RECURSION);
2396                         r = 1;
2397                 }
2398                 break;
2399         case HAMMER_FST_FLUSH:
2400                 /* 
2401                  * The record could be part of a previous flush group if the
2402                  * inode is a directory (the record being a directory entry).
2403                  * Once the flush group was closed a hammer_test_inode()
2404                  * function can cause a new flush group to be setup, placing
2405                  * the directory inode itself in a new flush group.
2406                  *
2407                  * When associated with a previous flush group we count it
2408                  * as if it were in our current flush group, since it will
2409                  * effectively be flushed by the time we flush our current
2410                  * flush group.
2411                  */
2412                 KKASSERT(
2413                     rec->ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY ||
2414                     rec->flush_group == flg);
2415                 r = 1;
2416                 break;
2417         }
2418         return(r);
2419 }
2420
2421 #if 0
2422 /*
2423  * This version just moves records already in a flush state to the new
2424  * flush group and that is it.
2425  */
2426 static int
2427 hammer_syncgrp_child_callback(hammer_record_t rec, void *data)
2428 {
2429         hammer_inode_t ip = rec->ip;
2430
2431         switch(rec->flush_state) {
2432         case HAMMER_FST_FLUSH:
2433                 KKASSERT(rec->flush_group == ip->flush_group);
2434                 break;
2435         default:
2436                 break;
2437         }
2438         return(0);
2439 }
2440 #endif
2441
2442 /*
2443  * Wait for a previously queued flush to complete.
2444  *
2445  * If a critical error occured we don't try to wait.
2446  */
2447 void
2448 hammer_wait_inode(hammer_inode_t ip)
2449 {
2450         /*
2451          * The inode can be in a SETUP state in which case RESIGNAL
2452          * should be set.  If RESIGNAL is not set then the previous
2453          * flush completed and a later operation placed the inode
2454          * in a passive setup state again, so we're done.
2455          *
2456          * The inode can be in a FLUSH state in which case we
2457          * can just wait for completion.
2458          */
2459         while (ip->flush_state == HAMMER_FST_FLUSH ||
2460             (ip->flush_state == HAMMER_FST_SETUP &&
2461              (ip->flags & HAMMER_INODE_RESIGNAL))) {
2462                 /*
2463                  * Don't try to flush on a critical error
2464                  */
2465                 if (ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
2466                         break;
2467
2468                 /*
2469                  * If the inode was already being flushed its flg
2470                  * may not have been queued to the backend.  We
2471                  * have to make sure it gets queued or we can wind
2472                  * up blocked or deadlocked (particularly if we are
2473                  * the vnlru thread).
2474                  */
2475                 if (ip->flush_state == HAMMER_FST_FLUSH) {
2476                         KKASSERT(ip->flush_group);
2477                         if (ip->flush_group->closed == 0) {
2478                                 if (hammer_debug_inode) {
2479                                         kprintf("hammer: debug: forcing "
2480                                                 "async flush ip %016jx\n",
2481                                                 (intmax_t)ip->obj_id);
2482                                 }
2483                                 hammer_flusher_async(ip->hmp,
2484                                                      ip->flush_group);
2485                                 continue; /* retest */
2486                         }
2487                 }
2488
2489                 /*
2490                  * In a flush state with the flg queued to the backend
2491                  * or in a setup state with RESIGNAL set, we can safely
2492                  * wait.
2493                  */
2494                 ip->flags |= HAMMER_INODE_FLUSHW;
2495                 tsleep(&ip->flags, 0, "hmrwin", 0);
2496         }
2497
2498 #if 0
2499         /*
2500          * The inode may have been in a passive setup state,
2501          * call flush to make sure we get signaled.
2502          */
2503         if (ip->flush_state == HAMMER_FST_SETUP)
2504                 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2505 #endif
2506
2507 }
2508
2509 /*
2510  * Called by the backend code when a flush has been completed.
2511  * The inode has already been removed from the flush list.
2512  *
2513  * A pipelined flush can occur, in which case we must re-enter the
2514  * inode on the list and re-copy its fields.
2515  */
2516 void
2517 hammer_flush_inode_done(hammer_inode_t ip, int error)
2518 {
2519         hammer_mount_t hmp;
2520         int dorel;
2521
2522         KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
2523
2524         hmp = ip->hmp;
2525
2526         /*
2527          * Auto-reflush if the backend could not completely flush
2528          * the inode.  This fixes a case where a deferred buffer flush
2529          * could cause fsync to return early.
2530          */
2531         if (ip->sync_flags & HAMMER_INODE_MODMASK)
2532                 ip->flags |= HAMMER_INODE_REFLUSH;
2533
2534         /*
2535          * Merge left-over flags back into the frontend and fix the state.
2536          * Incomplete truncations are retained by the backend.
2537          */
2538         ip->error = error;
2539         ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED;
2540         ip->sync_flags &= HAMMER_INODE_TRUNCATED;
2541
2542         /*
2543          * The backend may have adjusted nlinks, so if the adjusted nlinks
2544          * does not match the fronttend set the frontend's DDIRTY flag again.
2545          */
2546         if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
2547                 ip->flags |= HAMMER_INODE_DDIRTY;
2548
2549         /*
2550          * Fix up the dirty buffer status.
2551          */
2552         if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) {
2553                 ip->flags |= HAMMER_INODE_BUFS;
2554         }
2555         hammer_redo_fifo_end_flush(ip);
2556
2557         /*
2558          * Re-set the XDIRTY flag if some of the inode's in-memory records
2559          * could not be flushed.
2560          */
2561         KKASSERT((RB_EMPTY(&ip->rec_tree) &&
2562                   (ip->flags & HAMMER_INODE_XDIRTY) == 0) ||
2563                  (!RB_EMPTY(&ip->rec_tree) &&
2564                   (ip->flags & HAMMER_INODE_XDIRTY) != 0));
2565
2566         /*
2567          * Do not lose track of inodes which no longer have vnode
2568          * assocations, otherwise they may never get flushed again.
2569          *
2570          * The reflush flag can be set superfluously, causing extra pain
2571          * for no reason.  If the inode is no longer modified it no longer
2572          * needs to be flushed.
2573          */
2574         if (ip->flags & HAMMER_INODE_MODMASK) {
2575                 if (ip->vp == NULL)
2576                         ip->flags |= HAMMER_INODE_REFLUSH;
2577         } else {
2578                 ip->flags &= ~HAMMER_INODE_REFLUSH;
2579         }
2580
2581         /*
2582          * The fs token is held but the inode lock is not held.  Because this
2583          * is a backend flush it is possible that the vnode has no references
2584          * and cause a reclaim race inside vsetisdirty() if/when it blocks.
2585          *
2586          * Therefore, we must lock the inode around this particular dirtying
2587          * operation.  We don't have to around other dirtying operations
2588          * where the vnode is implicitly or explicitly held.
2589          */
2590         if (ip->flags & HAMMER_INODE_MODMASK) {
2591                 hammer_lock_ex(&ip->lock);
2592                 hammer_inode_dirty(ip);
2593                 hammer_unlock(&ip->lock);
2594         }
2595
2596         /*
2597          * Adjust the flush state.
2598          */
2599         if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
2600                 /*
2601                  * We were unable to flush out all our records, leave the
2602                  * inode in a flush state and in the current flush group.
2603                  * The flush group will be re-run.
2604                  *
2605                  * This occurs if the UNDO block gets too full or there is
2606                  * too much dirty meta-data and allows the flusher to
2607                  * finalize the UNDO block and then re-flush.
2608                  */
2609                 ip->flags &= ~HAMMER_INODE_WOULDBLOCK;
2610                 dorel = 0;
2611         } else {
2612                 /*
2613                  * Remove from the flush_group
2614                  */
2615                 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip);
2616                 ip->flush_group = NULL;
2617
2618 #if 0
2619                 /*
2620                  * Clean up the vnode ref and tracking counts.
2621                  */
2622                 if (ip->flags & HAMMER_INODE_VHELD) {
2623                         ip->flags &= ~HAMMER_INODE_VHELD;
2624                         vrele(ip->vp);
2625                 }
2626 #endif
2627                 --hmp->count_iqueued;
2628                 --hammer_count_iqueued;
2629
2630                 /*
2631                  * And adjust the state.
2632                  */
2633                 if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
2634                         ip->flush_state = HAMMER_FST_IDLE;
2635                         dorel = 1;
2636                 } else {
2637                         ip->flush_state = HAMMER_FST_SETUP;
2638                         dorel = 0;
2639                 }
2640
2641                 /*
2642                  * If the frontend is waiting for a flush to complete,
2643                  * wake it up.
2644                  */
2645                 if (ip->flags & HAMMER_INODE_FLUSHW) {
2646                         ip->flags &= ~HAMMER_INODE_FLUSHW;
2647                         wakeup(&ip->flags);
2648                 }
2649
2650                 /*
2651                  * If the frontend made more changes and requested another
2652                  * flush, then try to get it running.
2653                  *
2654                  * Reflushes are aborted when the inode is errored out.
2655                  */
2656                 if (ip->flags & HAMMER_INODE_REFLUSH) {
2657                         ip->flags &= ~HAMMER_INODE_REFLUSH;
2658                         if (ip->flags & HAMMER_INODE_RESIGNAL) {
2659                                 ip->flags &= ~HAMMER_INODE_RESIGNAL;
2660                                 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2661                         } else {
2662                                 hammer_flush_inode(ip, 0);
2663                         }
2664                 }
2665         }
2666
2667         /*
2668          * If we have no parent dependancies we can clear CONN_DOWN
2669          */
2670         if (TAILQ_EMPTY(&ip->target_list))
2671                 ip->flags &= ~HAMMER_INODE_CONN_DOWN;
2672
2673         /*
2674          * If the inode is now clean drop the space reservation.
2675          */
2676         if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
2677             (ip->flags & HAMMER_INODE_RSV_INODES)) {
2678                 ip->flags &= ~HAMMER_INODE_RSV_INODES;
2679                 --hmp->rsv_inodes;
2680         }
2681
2682         ip->flags &= ~HAMMER_INODE_SLAVEFLUSH;
2683
2684         if (dorel)
2685                 hammer_rel_inode(ip, 0);
2686 }
2687
2688 /*
2689  * Called from hammer_sync_inode() to synchronize in-memory records
2690  * to the media.
2691  */
2692 static int
2693 hammer_sync_record_callback(hammer_record_t record, void *data)
2694 {
2695         hammer_cursor_t cursor = data;
2696         hammer_transaction_t trans = cursor->trans;
2697         hammer_mount_t hmp = trans->hmp;
2698         int error;
2699
2700         /*
2701          * Skip records that do not belong to the current flush.
2702          */
2703         ++hammer_stats_record_iterations;
2704         if (record->flush_state != HAMMER_FST_FLUSH)
2705                 return(0);
2706
2707 #if 1
2708         if (record->flush_group != record->ip->flush_group) {
2709                 kprintf("sync_record %p ip %p bad flush group %p %p\n", record, record->ip, record->flush_group ,record->ip->flush_group);
2710                 if (hammer_debug_critical)
2711                         Debugger("blah2");
2712                 return(0);
2713         }
2714 #endif
2715         KKASSERT(record->flush_group == record->ip->flush_group);
2716
2717         /*
2718          * Interlock the record using the BE flag.  Once BE is set the
2719          * frontend cannot change the state of FE.
2720          *
2721          * NOTE: If FE is set prior to us setting BE we still sync the
2722          * record out, but the flush completion code converts it to 
2723          * a delete-on-disk record instead of destroying it.
2724          */
2725         KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
2726         record->flags |= HAMMER_RECF_INTERLOCK_BE;
2727
2728         /*
2729          * The backend has already disposed of the record.
2730          */
2731         if (record->flags & (HAMMER_RECF_DELETED_BE | HAMMER_RECF_COMMITTED)) {
2732                 error = 0;
2733                 goto done;
2734         }
2735
2736         /*
2737          * If the whole inode is being deleted and all on-disk records will
2738          * be deleted very soon, we can't sync any new records to disk
2739          * because they will be deleted in the same transaction they were
2740          * created in (delete_tid == create_tid), which will assert.
2741          *
2742          * XXX There may be a case with RECORD_ADD with DELETED_FE set
2743          * that we currently panic on.
2744          */
2745         if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
2746                 switch(record->type) {
2747                 case HAMMER_MEM_RECORD_DATA:
2748                         /*
2749                          * We don't have to do anything, if the record was
2750                          * committed the space will have been accounted for
2751                          * in the blockmap.
2752                          */
2753                         /* fall through */
2754                 case HAMMER_MEM_RECORD_GENERAL:
2755                         /*
2756                          * Set deleted-by-backend flag.  Do not set the
2757                          * backend committed flag, because we are throwing
2758                          * the record away.
2759                          */
2760                         record->flags |= HAMMER_RECF_DELETED_BE;
2761                         ++record->ip->rec_generation;
2762                         error = 0;
2763                         goto done;
2764                 case HAMMER_MEM_RECORD_ADD:
2765                         panic("hammer_sync_record_callback: illegal add "
2766                               "during inode deletion record %p", record);
2767                         break; /* NOT REACHED */
2768                 case HAMMER_MEM_RECORD_INODE:
2769                         panic("hammer_sync_record_callback: attempt to "
2770                               "sync inode record %p?", record);
2771                         break; /* NOT REACHED */
2772                 case HAMMER_MEM_RECORD_DEL:
2773                         /* 
2774                          * Follow through and issue the on-disk deletion
2775                          */
2776                         break;
2777                 }
2778         }
2779
2780         /*
2781          * If DELETED_FE is set special handling is needed for directory
2782          * entries.  Dependant pieces related to the directory entry may
2783          * have already been synced to disk.  If this occurs we have to
2784          * sync the directory entry and then change the in-memory record
2785          * from an ADD to a DELETE to cover the fact that it's been
2786          * deleted by the frontend.
2787          *
2788          * A directory delete covering record (MEM_RECORD_DEL) can never
2789          * be deleted by the frontend.
2790          *
2791          * Any other record type (aka DATA) can be deleted by the frontend.
2792          * XXX At the moment the flusher must skip it because there may
2793          * be another data record in the flush group for the same block,
2794          * meaning that some frontend data changes can leak into the backend's
2795          * synchronization point.
2796          */
2797         if (record->flags & HAMMER_RECF_DELETED_FE) {
2798                 if (record->type == HAMMER_MEM_RECORD_ADD) {
2799                         /*
2800                          * Convert a front-end deleted directory-add to
2801                          * a directory-delete entry later.
2802                          */
2803                         record->flags |= HAMMER_RECF_CONVERT_DELETE;
2804                 } else {
2805                         /*
2806                          * Dispose of the record (race case).  Mark as
2807                          * deleted by backend (and not committed).
2808                          */
2809                         KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
2810                         record->flags |= HAMMER_RECF_DELETED_BE;
2811                         ++record->ip->rec_generation;
2812                         error = 0;
2813                         goto done;
2814                 }
2815         }
2816
2817         /*
2818          * Assign the create_tid for new records.  Deletions already
2819          * have the record's entire key properly set up.
2820          */
2821         if (record->type != HAMMER_MEM_RECORD_DEL) {
2822                 record->leaf.base.create_tid = trans->tid;
2823                 record->leaf.create_ts = trans->time32;
2824         }
2825
2826         /*
2827          * This actually moves the record to the on-media B-Tree.  We
2828          * must also generate REDO_TERM entries in the UNDO/REDO FIFO
2829          * indicating that the related REDO_WRITE(s) have been committed.
2830          *
2831          * During recovery any REDO_TERM's within the nominal recovery span
2832          * are ignored since the related meta-data is being undone, causing
2833          * any matching REDO_WRITEs to execute.  The REDO_TERMs outside
2834          * the nominal recovery span will match against REDO_WRITEs and
2835          * prevent them from being executed (because the meta-data has
2836          * already been synchronized).
2837          */
2838         if (record->flags & HAMMER_RECF_REDO) {
2839                 KKASSERT(record->type == HAMMER_MEM_RECORD_DATA);
2840                 hammer_generate_redo(trans, record->ip,
2841                                      record->leaf.base.key -
2842                                          record->leaf.data_len,
2843                                      HAMMER_REDO_TERM_WRITE,
2844                                      NULL,
2845                                      record->leaf.data_len);
2846         }
2847
2848         for (;;) {
2849                 error = hammer_ip_sync_record_cursor(cursor, record);
2850                 if (error != EDEADLK)
2851                         break;
2852                 hammer_done_cursor(cursor);
2853                 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
2854                                            record->ip);
2855                 if (error)
2856                         break;
2857         }
2858         record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
2859
2860         if (error)
2861                 error = -error;
2862 done:
2863         hammer_flush_record_done(record, error);
2864
2865         /*
2866          * Do partial finalization if we have built up too many dirty
2867          * buffers.  Otherwise a buffer cache deadlock can occur when
2868          * doing things like creating tens of thousands of tiny files.
2869          *
2870          * We must release our cursor lock to avoid a 3-way deadlock
2871          * due to the exclusive sync lock the finalizer must get.
2872          *
2873          * WARNING: See warnings in hammer_unlock_cursor() function.
2874          */
2875         if (hammer_flusher_meta_limit(hmp) ||
2876             vm_page_count_severe()) {
2877                 hammer_unlock_cursor(cursor);
2878                 hammer_flusher_finalize(trans, 0);
2879                 hammer_lock_cursor(cursor);
2880         }
2881         return(error);
2882 }
2883
2884 /*
2885  * Backend function called by the flusher to sync an inode to media.
2886  */
2887 int
2888 hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip)
2889 {
2890         struct hammer_cursor cursor;
2891         hammer_node_t tmp_node;
2892         hammer_record_t depend;
2893         hammer_record_t next;
2894         int error, tmp_error;
2895         u_int64_t nlinks;
2896
2897         if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
2898                 return(0);
2899
2900         error = hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
2901         if (error)
2902                 goto done;
2903
2904         /*
2905          * Any directory records referencing this inode which are not in
2906          * our current flush group must adjust our nlink count for the
2907          * purposes of synchronizating to disk.
2908          *
2909          * Records which are in our flush group can be unlinked from our
2910          * inode now, potentially allowing the inode to be physically
2911          * deleted.
2912          *
2913          * This cannot block.
2914          */
2915         nlinks = ip->ino_data.nlinks;
2916         next = TAILQ_FIRST(&ip->target_list);
2917         while ((depend = next) != NULL) {
2918                 next = TAILQ_NEXT(depend, target_entry);
2919                 if (depend->flush_state == HAMMER_FST_FLUSH &&
2920                     depend->flush_group == ip->flush_group) {
2921                         /*
2922                          * If this is an ADD that was deleted by the frontend
2923                          * the frontend nlinks count will have already been
2924                          * decremented, but the backend is going to sync its
2925                          * directory entry and must account for it.  The
2926                          * record will be converted to a delete-on-disk when
2927                          * it gets synced.
2928                          *
2929                          * If the ADD was not deleted by the frontend we
2930                          * can remove the dependancy from our target_list.
2931                          */
2932                         if (depend->flags & HAMMER_RECF_DELETED_FE) {
2933                                 ++nlinks;
2934                         } else {
2935                                 TAILQ_REMOVE(&ip->target_list, depend,
2936                                              target_entry);
2937                                 depend->target_ip = NULL;
2938                         }
2939                 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
2940                         /*
2941                          * Not part of our flush group and not deleted by
2942                          * the front-end, adjust the link count synced to
2943                          * the media (undo what the frontend did when it
2944                          * queued the record).
2945                          */
2946                         KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
2947                         switch(depend->type) {
2948                         case HAMMER_MEM_RECORD_ADD:
2949                                 --nlinks;
2950                                 break;
2951                         case HAMMER_MEM_RECORD_DEL:
2952                                 ++nlinks;
2953                                 break;
2954                         default:
2955                                 break;
2956                         }
2957                 }
2958         }
2959
2960         /*
2961          * Set dirty if we had to modify the link count.
2962          */
2963         if (ip->sync_ino_data.nlinks != nlinks) {
2964                 KKASSERT((int64_t)nlinks >= 0);
2965                 ip->sync_ino_data.nlinks = nlinks;
2966                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2967         }
2968
2969         /*
2970          * If there is a trunction queued destroy any data past the (aligned)
2971          * truncation point.  Userland will have dealt with the buffer
2972          * containing the truncation point for us.
2973          *
2974          * We don't flush pending frontend data buffers until after we've
2975          * dealt with the truncation.
2976          */
2977         if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2978                 /*
2979                  * Interlock trunc_off.  The VOP front-end may continue to
2980                  * make adjustments to it while we are blocked.
2981                  */
2982                 off_t trunc_off;
2983                 off_t aligned_trunc_off;
2984                 int blkmask;
2985
2986                 trunc_off = ip->sync_trunc_off;
2987                 blkmask = hammer_blocksize(trunc_off) - 1;
2988                 aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask;
2989
2990                 /*
2991                  * Delete any whole blocks on-media.  The front-end has
2992                  * already cleaned out any partial block and made it
2993                  * pending.  The front-end may have updated trunc_off
2994                  * while we were blocked so we only use sync_trunc_off.
2995                  *
2996                  * This operation can blow out the buffer cache, EWOULDBLOCK
2997                  * means we were unable to complete the deletion.  The
2998                  * deletion will update sync_trunc_off in that case.
2999                  */
3000                 error = hammer_ip_delete_range(&cursor, ip,
3001                                                 aligned_trunc_off,
3002                                                 0x7FFFFFFFFFFFFFFFLL, 2);
3003                 if (error == EWOULDBLOCK) {
3004                         ip->flags |= HAMMER_INODE_WOULDBLOCK;
3005                         error = 0;
3006                         goto defer_buffer_flush;
3007                 }
3008
3009                 if (error)
3010                         goto done;
3011
3012                 /*
3013                  * Generate a REDO_TERM_TRUNC entry in the UNDO/REDO FIFO.
3014                  *
3015                  * XXX we do this even if we did not previously generate
3016                  * a REDO_TRUNC record.  This operation may enclosed the
3017                  * range for multiple prior truncation entries in the REDO
3018                  * log.
3019                  */
3020                 if (trans->hmp->version >= HAMMER_VOL_VERSION_FOUR &&
3021                     (ip->flags & HAMMER_INODE_RDIRTY)) {
3022                         hammer_generate_redo(trans, ip, aligned_trunc_off,
3023                                              HAMMER_REDO_TERM_TRUNC,
3024                                              NULL, 0);
3025                 }
3026
3027                 /*
3028                  * Clear the truncation flag on the backend after we have
3029                  * completed the deletions.  Backend data is now good again
3030                  * (including new records we are about to sync, below).
3031                  *
3032                  * Leave sync_trunc_off intact.  As we write additional
3033                  * records the backend will update sync_trunc_off.  This
3034                  * tells the backend whether it can skip the overwrite
3035                  * test.  This should work properly even when the backend
3036                  * writes full blocks where the truncation point straddles
3037                  * the block because the comparison is against the base
3038                  * offset of the record.
3039                  */
3040                 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
3041                 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
3042         } else {
3043                 error = 0;
3044         }
3045
3046         /*
3047          * Now sync related records.  These will typically be directory
3048          * entries, records tracking direct-writes, or delete-on-disk records.
3049          */
3050         if (error == 0) {
3051                 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
3052                                     hammer_sync_record_callback, &cursor);
3053                 if (tmp_error < 0)
3054                         tmp_error = -error;
3055                 if (tmp_error)
3056                         error = tmp_error;
3057         }
3058         hammer_cache_node(&ip->cache[1], cursor.node);
3059
3060         /*
3061          * Re-seek for inode update, assuming our cache hasn't been ripped
3062          * out from under us.
3063          */
3064         if (error == 0) {
3065                 tmp_node = hammer_ref_node_safe(trans, &ip->cache[0], &error);
3066                 if (tmp_node) {
3067                         hammer_cursor_downgrade(&cursor);
3068                         hammer_lock_sh(&tmp_node->lock);
3069                         if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0)
3070                                 hammer_cursor_seek(&cursor, tmp_node, 0);
3071                         hammer_unlock(&tmp_node->lock);
3072                         hammer_rel_node(tmp_node);
3073                 }
3074                 error = 0;
3075         }
3076
3077         /*
3078          * If we are deleting the inode the frontend had better not have
3079          * any active references on elements making up the inode.
3080          *
3081          * The call to hammer_ip_delete_clean() cleans up auxillary records
3082          * but not DB or DATA records.  Those must have already been deleted
3083          * by the normal truncation mechanic.
3084          */
3085         if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
3086                 RB_EMPTY(&ip->rec_tree)  &&
3087             (ip->sync_flags & HAMMER_INODE_DELETING) &&
3088             (ip->flags & HAMMER_INODE_DELETED) == 0) {
3089                 int count1 = 0;
3090
3091                 error = hammer_ip_delete_clean(&cursor, ip, &count1);
3092                 if (error == 0) {
3093                         ip->flags |= HAMMER_INODE_DELETED;
3094                         ip->sync_flags &= ~HAMMER_INODE_DELETING;
3095                         ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
3096                         KKASSERT(RB_EMPTY(&ip->rec_tree));
3097
3098                         /*
3099                          * Set delete_tid in both the frontend and backend
3100                          * copy of the inode record.  The DELETED flag handles
3101                          * this, do not set DDIRTY.
3102                          */
3103                         ip->ino_leaf.base.delete_tid = trans->tid;
3104                         ip->sync_ino_leaf.base.delete_tid = trans->tid;
3105                         ip->ino_leaf.delete_ts = trans->time32;
3106                         ip->sync_ino_leaf.delete_ts = trans->time32;
3107
3108
3109                         /*
3110                          * Adjust the inode count in the volume header
3111                          */
3112                         hammer_sync_lock_sh(trans);
3113                         if (ip->flags & HAMMER_INODE_ONDISK) {
3114                                 hammer_modify_volume_field(trans,
3115                                                            trans->rootvol,
3116                                                            vol0_stat_inodes);
3117                                 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
3118                                 hammer_modify_volume_done(trans->rootvol);
3119                         }
3120                         hammer_sync_unlock(trans);
3121                 }
3122         }
3123
3124         if (error)
3125                 goto done;
3126         ip->sync_flags &= ~HAMMER_INODE_BUFS;
3127
3128 defer_buffer_flush:
3129         /*
3130          * Now update the inode's on-disk inode-data and/or on-disk record.
3131          * DELETED and ONDISK are managed only in ip->flags.
3132          *
3133          * In the case of a defered buffer flush we still update the on-disk
3134          * inode to satisfy visibility requirements if there happen to be
3135          * directory dependancies.
3136          */
3137         switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
3138         case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
3139                 /*
3140                  * If deleted and on-disk, don't set any additional flags.
3141                  * the delete flag takes care of things.
3142                  *
3143                  * Clear flags which may have been set by the frontend.
3144                  */
3145                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
3146                                     HAMMER_INODE_SDIRTY |
3147                                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
3148                                     HAMMER_INODE_DELETING);
3149                 break;
3150         case HAMMER_INODE_DELETED:
3151                 /*
3152                  * Take care of the case where a deleted inode was never
3153                  * flushed to the disk in the first place.
3154                  *
3155                  * Clear flags which may have been set by the frontend.
3156                  */
3157                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
3158                                     HAMMER_INODE_SDIRTY |
3159                                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
3160                                     HAMMER_INODE_DELETING);
3161                 while (RB_ROOT(&ip->rec_tree)) {
3162                         hammer_record_t record = RB_ROOT(&ip->rec_tree);
3163                         hammer_ref(&record->lock);
3164                         KKASSERT(hammer_oneref(&record->lock));
3165                         record->flags |= HAMMER_RECF_DELETED_BE;
3166                         ++record->ip->rec_generation;
3167                         hammer_rel_mem_record(record);
3168                 }
3169                 break;
3170         case HAMMER_INODE_ONDISK:
3171                 /*
3172                  * If already on-disk, do not set any additional flags.
3173                  */
3174                 break;
3175         default:
3176                 /*
3177                  * If not on-disk and not deleted, set DDIRTY to force
3178                  * an initial record to be written.
3179                  *
3180                  * Also set the create_tid in both the frontend and backend
3181                  * copy of the inode record.
3182                  */
3183                 ip->ino_leaf.base.create_tid = trans->tid;
3184                 ip->ino_leaf.create_ts = trans->time32;
3185                 ip->sync_ino_leaf.base.create_tid = trans->tid;
3186                 ip->sync_ino_leaf.create_ts = trans->time32;
3187                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
3188                 break;
3189         }
3190
3191         /*
3192          * If DDIRTY or SDIRTY is set, write out a new record.
3193          * If the inode is already on-disk the old record is marked as
3194          * deleted.
3195          *
3196          * If DELETED is set hammer_update_inode() will delete the existing
3197          * record without writing out a new one.
3198          *
3199          * If *ONLY* the ITIMES flag is set we can update the record in-place.
3200          */
3201         if (ip->flags & HAMMER_INODE_DELETED) {
3202                 error = hammer_update_inode(&cursor, ip);
3203         } else 
3204         if (!(ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY)) &&
3205             (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) {
3206                 error = hammer_update_itimes(&cursor, ip);
3207         } else
3208         if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY |
3209                               HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) {
3210                 error = hammer_update_inode(&cursor, ip);
3211         }
3212 done:
3213         if (ip->flags & HAMMER_INODE_MODMASK)
3214                 hammer_inode_dirty(ip);
3215         if (error) {
3216                 hammer_critical_error(ip->hmp, ip, error,
3217                                       "while syncing inode");
3218         }
3219         hammer_done_cursor(&cursor);
3220         return(error);
3221 }
3222
3223 /*
3224  * This routine is called when the OS is no longer actively referencing
3225  * the inode (but might still be keeping it cached), or when releasing
3226  * the last reference to an inode.
3227  *
3228  * At this point if the inode's nlinks count is zero we want to destroy
3229  * it, which may mean destroying it on-media too.
3230  */
3231 void
3232 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
3233 {
3234         struct vnode *vp;
3235
3236         /*
3237          * Set the DELETING flag when the link count drops to 0 and the
3238          * OS no longer has any opens on the inode.
3239          *
3240          * The backend will clear DELETING (a mod flag) and set DELETED
3241          * (a state flag) when it is actually able to perform the
3242          * operation.
3243          *
3244          * Don't reflag the deletion if the flusher is currently syncing
3245          * one that was already flagged.  A previously set DELETING flag
3246          * may bounce around flags and sync_flags until the operation is
3247          * completely done.
3248          *
3249          * Do not attempt to modify a snapshot inode (one set to read-only).
3250          */
3251         if (ip->ino_data.nlinks == 0 &&
3252             ((ip->flags | ip->sync_flags) & (HAMMER_INODE_RO|HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
3253                 ip->flags |= HAMMER_INODE_DELETING;
3254                 ip->flags |= HAMMER_INODE_TRUNCATED;
3255                 ip->trunc_off = 0;
3256                 vp = NULL;
3257                 if (getvp) {
3258                         if (hammer_get_vnode(ip, &vp) != 0)
3259                                 return;
3260                 }
3261
3262                 /*
3263                  * Final cleanup
3264                  */
3265                 if (ip->vp)
3266                         nvtruncbuf(ip->vp, 0, HAMMER_BUFSIZE, 0, 0);
3267                 if (ip->flags & HAMMER_INODE_MODMASK)
3268                         hammer_inode_dirty(ip);
3269                 if (getvp)
3270                         vput(vp);
3271         }
3272 }
3273
3274 /*
3275  * After potentially resolving a dependancy the inode is tested
3276  * to determine whether it needs to be reflushed.
3277  */
3278 void
3279 hammer_test_inode(hammer_inode_t ip)
3280 {
3281         if (ip->flags & HAMMER_INODE_REFLUSH) {
3282                 ip->flags &= ~HAMMER_INODE_REFLUSH;
3283                 hammer_ref(&ip->lock);
3284                 if (ip->flags & HAMMER_INODE_RESIGNAL) {
3285                         ip->flags &= ~HAMMER_INODE_RESIGNAL;
3286                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
3287                 } else {
3288                         hammer_flush_inode(ip, 0);
3289                 }
3290                 hammer_rel_inode(ip, 0);
3291         }
3292 }
3293
3294 /*
3295  * Clear the RECLAIM flag on an inode.  This occurs when the inode is
3296  * reassociated with a vp or just before it gets freed.
3297  *
3298  * Pipeline wakeups to threads blocked due to an excessive number of
3299  * detached inodes.  This typically occurs when atime updates accumulate
3300  * while scanning a directory tree.
3301  */
3302 static void
3303 hammer_inode_wakereclaims(hammer_inode_t ip)
3304 {
3305         struct hammer_reclaim *reclaim;
3306         hammer_mount_t hmp = ip->hmp;
3307
3308         if ((ip->flags & HAMMER_INODE_RECLAIM) == 0)
3309                 return;
3310
3311         --hammer_count_reclaims;
3312         --hmp->count_reclaims;
3313         ip->flags &= ~HAMMER_INODE_RECLAIM;
3314
3315         if ((reclaim = TAILQ_FIRST(&hmp->reclaim_list)) != NULL) {
3316                 KKASSERT(reclaim->count > 0);
3317                 if (--reclaim->count == 0) {
3318                         TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry);
3319                         wakeup(reclaim);
3320                 }
3321         }
3322 }
3323
3324 /*
3325  * Setup our reclaim pipeline.  We only let so many detached (and dirty)
3326  * inodes build up before we start blocking.  This routine is called
3327  * if a new inode is created or an inode is loaded from media.
3328  *
3329  * When we block we don't care *which* inode has finished reclaiming,
3330  * as long as one does.
3331  *
3332  * The reclaim pipeline is primarily governed by the auto-flush which is
3333  * 1/4 hammer_limit_reclaims.  We don't want to block if the count is
3334  * less than 1/2 hammer_limit_reclaims.  From 1/2 to full count is
3335  * dynamically governed.
3336  */
3337 void
3338 hammer_inode_waitreclaims(hammer_transaction_t trans)
3339 {
3340         hammer_mount_t hmp = trans->hmp;
3341         struct hammer_reclaim reclaim;
3342         int lower_limit;
3343
3344         /*
3345          * Track inode load, delay if the number of reclaiming inodes is
3346          * between 2/4 and 4/4 hammer_limit_reclaims, depending.
3347          */
3348         if (curthread->td_proc) {
3349                 struct hammer_inostats *stats;
3350
3351                 stats = hammer_inode_inostats(hmp, curthread->td_proc->p_pid);
3352                 ++stats->count;
3353
3354                 if (stats->count > hammer_limit_reclaims / 2)
3355                         stats->count = hammer_limit_reclaims / 2;
3356                 lower_limit = hammer_limit_reclaims - stats->count;
3357                 if (hammer_debug_general & 0x10000) {
3358                         kprintf("pid %5d limit %d\n",
3359                                 (int)curthread->td_proc->p_pid, lower_limit);
3360                 }
3361         } else {
3362                 lower_limit = hammer_limit_reclaims * 3 / 4;
3363         }
3364         if (hmp->count_reclaims >= lower_limit) {
3365                 reclaim.count = 1;
3366                 TAILQ_INSERT_TAIL(&hmp->reclaim_list, &reclaim, entry);
3367                 tsleep(&reclaim, 0, "hmrrcm", hz);
3368                 if (reclaim.count > 0)
3369                         TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry);
3370         }
3371 }
3372
3373 /*
3374  * Keep track of reclaim statistics on a per-pid basis using a loose
3375  * 4-way set associative hash table.  Collisions inherit the count of
3376  * the previous entry.
3377  *
3378  * NOTE: We want to be careful here to limit the chain size.  If the chain
3379  *       size is too large a pid will spread its stats out over too many
3380  *       entries under certain types of heavy filesystem activity and
3381  *       wind up not delaying long enough.
3382  */
3383 static
3384 struct hammer_inostats *
3385 hammer_inode_inostats(hammer_mount_t hmp, pid_t pid)
3386 {
3387         struct hammer_inostats *stats;
3388         int delta;
3389         int chain;
3390         static volatile int iterator;   /* we don't care about MP races */
3391
3392         /*
3393          * Chain up to 4 times to find our entry.
3394          */
3395         for (chain = 0; chain < 4; ++chain) {
3396                 stats = &hmp->inostats[(pid + chain) & HAMMER_INOSTATS_HMASK];
3397                 if (stats->pid == pid)
3398                         break;
3399         }
3400
3401         /*
3402          * Replace one of the four chaining entries with our new entry.
3403          */
3404         if (chain == 4) {
3405                 stats = &hmp->inostats[(pid + (iterator++ & 3)) &
3406                                        HAMMER_INOSTATS_HMASK];
3407                 stats->pid = pid;
3408         }
3409
3410         /*
3411          * Decay the entry
3412          */
3413         if (stats->count && stats->ltick != ticks) {
3414                 delta = ticks - stats->ltick;
3415                 stats->ltick = ticks;
3416                 if (delta <= 0 || delta > hz * 60)
3417                         stats->count = 0;
3418                 else
3419                         stats->count = stats->count * hz / (hz + delta);
3420         }
3421         if (hammer_debug_general & 0x10000)
3422                 kprintf("pid %5d stats %d\n", (int)pid, stats->count);
3423         return (stats);
3424 }
3425
3426 #if 0
3427
3428 /*
3429  * XXX not used, doesn't work very well due to the large batching nature
3430  * of flushes.
3431  *
3432  * A larger then normal backlog of inodes is sitting in the flusher,
3433  * enforce a general slowdown to let it catch up.  This routine is only
3434  * called on completion of a non-flusher-related transaction which
3435  * performed B-Tree node I/O.
3436  *
3437  * It is possible for the flusher to stall in a continuous load.
3438  * blogbench -i1000 -o seems to do a good job generating this sort of load.
3439  * If the flusher is unable to catch up the inode count can bloat until
3440  * we run out of kvm.
3441  *
3442  * This is a bit of a hack.
3443  */
3444 void
3445 hammer_inode_waithard(hammer_mount_t hmp)
3446 {
3447         /*
3448          * Hysteresis.
3449          */
3450         if (hmp->flags & HAMMER_MOUNT_FLUSH_RECOVERY) {
3451                 if (hmp->count_reclaims < hammer_limit_reclaims / 2 &&
3452                     hmp->count_iqueued < hmp->count_inodes / 20) {
3453                         hmp->flags &= ~HAMMER_MOUNT_FLUSH_RECOVERY;
3454                         return;
3455                 }
3456         } else {
3457                 if (hmp->count_reclaims < hammer_limit_reclaims ||
3458                     hmp->count_iqueued < hmp->count_inodes / 10) {
3459                         return;
3460                 }
3461                 hmp->flags |= HAMMER_MOUNT_FLUSH_RECOVERY;
3462         }
3463
3464         /*
3465          * Block for one flush cycle.
3466          */
3467         hammer_flusher_wait_next(hmp);
3468 }
3469
3470 #endif