Merge branch 'vendor/BZIP'
[dragonfly.git] / sys / vfs / hammer / hammer_inode.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.114 2008/09/24 00:53:51 dillon Exp $
35  */
36
37 #include "hammer.h"
38 #include <vm/vm_extern.h>
39
40 static int      hammer_unload_inode(struct hammer_inode *ip);
41 static void     hammer_free_inode(hammer_inode_t ip);
42 static void     hammer_flush_inode_core(hammer_inode_t ip,
43                                         hammer_flush_group_t flg, int flags);
44 static int      hammer_setup_child_callback(hammer_record_t rec, void *data);
45 #if 0
46 static int      hammer_syncgrp_child_callback(hammer_record_t rec, void *data);
47 #endif
48 static int      hammer_setup_parent_inodes(hammer_inode_t ip, int depth,
49                                         hammer_flush_group_t flg);
50 static int      hammer_setup_parent_inodes_helper(hammer_record_t record,
51                                         int depth, hammer_flush_group_t flg);
52 static void     hammer_inode_wakereclaims(hammer_inode_t ip);
53 static struct hammer_inostats *hammer_inode_inostats(hammer_mount_t hmp,
54                                         pid_t pid);
55
56 #ifdef DEBUG_TRUNCATE
57 extern struct hammer_inode *HammerTruncIp;
58 #endif
59
60 /*
61  * RB-Tree support for inode structures
62  */
63 int
64 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
65 {
66         if (ip1->obj_localization < ip2->obj_localization)
67                 return(-1);
68         if (ip1->obj_localization > ip2->obj_localization)
69                 return(1);
70         if (ip1->obj_id < ip2->obj_id)
71                 return(-1);
72         if (ip1->obj_id > ip2->obj_id)
73                 return(1);
74         if (ip1->obj_asof < ip2->obj_asof)
75                 return(-1);
76         if (ip1->obj_asof > ip2->obj_asof)
77                 return(1);
78         return(0);
79 }
80
81 int
82 hammer_redo_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
83 {
84         if (ip1->redo_fifo_start < ip2->redo_fifo_start)
85                 return(-1);
86         if (ip1->redo_fifo_start > ip2->redo_fifo_start)
87                 return(1);
88         return(0);
89 }
90
91 /*
92  * RB-Tree support for inode structures / special LOOKUP_INFO
93  */
94 static int
95 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
96 {
97         if (info->obj_localization < ip->obj_localization)
98                 return(-1);
99         if (info->obj_localization > ip->obj_localization)
100                 return(1);
101         if (info->obj_id < ip->obj_id)
102                 return(-1);
103         if (info->obj_id > ip->obj_id)
104                 return(1);
105         if (info->obj_asof < ip->obj_asof)
106                 return(-1);
107         if (info->obj_asof > ip->obj_asof)
108                 return(1);
109         return(0);
110 }
111
112 /*
113  * Used by hammer_scan_inode_snapshots() to locate all of an object's
114  * snapshots.  Note that the asof field is not tested, which we can get
115  * away with because it is the lowest-priority field.
116  */
117 static int
118 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data)
119 {
120         hammer_inode_info_t info = data;
121
122         if (ip->obj_localization > info->obj_localization)
123                 return(1);
124         if (ip->obj_localization < info->obj_localization)
125                 return(-1);
126         if (ip->obj_id > info->obj_id)
127                 return(1);
128         if (ip->obj_id < info->obj_id)
129                 return(-1);
130         return(0);
131 }
132
133 /*
134  * Used by hammer_unload_pseudofs() to locate all inodes associated with
135  * a particular PFS.
136  */
137 static int
138 hammer_inode_pfs_cmp(hammer_inode_t ip, void *data)
139 {
140         u_int32_t localization = *(u_int32_t *)data;
141         if (ip->obj_localization > localization)
142                 return(1);
143         if (ip->obj_localization < localization)
144                 return(-1);
145         return(0);
146 }
147
148 /*
149  * RB-Tree support for pseudofs structures
150  */
151 static int
152 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1, hammer_pseudofs_inmem_t p2)
153 {
154         if (p1->localization < p2->localization)
155                 return(-1);
156         if (p1->localization > p2->localization)
157                 return(1);
158         return(0);
159 }
160
161
162 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
163 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
164                 hammer_inode_info_cmp, hammer_inode_info_t);
165 RB_GENERATE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node,
166              hammer_pfs_rb_compare, u_int32_t, localization);
167
168 /*
169  * The kernel is not actively referencing this vnode but is still holding
170  * it cached.
171  *
172  * This is called from the frontend.
173  *
174  * MPALMOSTSAFE
175  */
176 int
177 hammer_vop_inactive(struct vop_inactive_args *ap)
178 {
179         struct hammer_inode *ip = VTOI(ap->a_vp);
180         hammer_mount_t hmp;
181
182         /*
183          * Degenerate case
184          */
185         if (ip == NULL) {
186                 vrecycle(ap->a_vp);
187                 return(0);
188         }
189
190         /*
191          * If the inode no longer has visibility in the filesystem try to
192          * recycle it immediately, even if the inode is dirty.  Recycling
193          * it quickly allows the system to reclaim buffer cache and VM
194          * resources which can matter a lot in a heavily loaded system.
195          *
196          * This can deadlock in vfsync() if we aren't careful.
197          * 
198          * Do not queue the inode to the flusher if we still have visibility,
199          * otherwise namespace calls such as chmod will unnecessarily generate
200          * multiple inode updates.
201          */
202         if (ip->ino_data.nlinks == 0) {
203                 hmp = ip->hmp;
204                 lwkt_gettoken(&hmp->fs_token);
205                 hammer_inode_unloadable_check(ip, 0);
206                 if (ip->flags & HAMMER_INODE_MODMASK)
207                         hammer_flush_inode(ip, 0);
208                 lwkt_reltoken(&hmp->fs_token);
209                 vrecycle(ap->a_vp);
210         }
211         return(0);
212 }
213
214 /*
215  * Release the vnode association.  This is typically (but not always)
216  * the last reference on the inode.
217  *
218  * Once the association is lost we are on our own with regards to
219  * flushing the inode.
220  *
221  * We must interlock ip->vp so hammer_get_vnode() can avoid races.
222  */
223 int
224 hammer_vop_reclaim(struct vop_reclaim_args *ap)
225 {
226         struct hammer_inode *ip;
227         hammer_mount_t hmp;
228         struct vnode *vp;
229
230         vp = ap->a_vp;
231
232         if ((ip = vp->v_data) != NULL) {
233                 hmp = ip->hmp;
234                 lwkt_gettoken(&hmp->fs_token);
235                 hammer_lock_ex(&ip->lock);
236                 vp->v_data = NULL;
237                 ip->vp = NULL;
238
239                 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) {
240                         ++hammer_count_reclaiming;
241                         ++hmp->inode_reclaims;
242                         ip->flags |= HAMMER_INODE_RECLAIM;
243                 }
244                 hammer_unlock(&ip->lock);
245                 hammer_rel_inode(ip, 1);
246                 lwkt_reltoken(&hmp->fs_token);
247         }
248         return(0);
249 }
250
251 /*
252  * Return a locked vnode for the specified inode.  The inode must be
253  * referenced but NOT LOCKED on entry and will remain referenced on
254  * return.
255  *
256  * Called from the frontend.
257  */
258 int
259 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
260 {
261         hammer_mount_t hmp;
262         struct vnode *vp;
263         int error = 0;
264         u_int8_t obj_type;
265
266         hmp = ip->hmp;
267
268         for (;;) {
269                 if ((vp = ip->vp) == NULL) {
270                         error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0);
271                         if (error)
272                                 break;
273                         hammer_lock_ex(&ip->lock);
274                         if (ip->vp != NULL) {
275                                 hammer_unlock(&ip->lock);
276                                 vp = *vpp;
277                                 vp->v_type = VBAD;
278                                 vx_put(vp);
279                                 continue;
280                         }
281                         hammer_ref(&ip->lock);
282                         vp = *vpp;
283                         ip->vp = vp;
284
285                         obj_type = ip->ino_data.obj_type;
286                         vp->v_type = hammer_get_vnode_type(obj_type);
287
288                         hammer_inode_wakereclaims(ip);
289
290                         switch(ip->ino_data.obj_type) {
291                         case HAMMER_OBJTYPE_CDEV:
292                         case HAMMER_OBJTYPE_BDEV:
293                                 vp->v_ops = &hmp->mp->mnt_vn_spec_ops;
294                                 addaliasu(vp, ip->ino_data.rmajor,
295                                           ip->ino_data.rminor);
296                                 break;
297                         case HAMMER_OBJTYPE_FIFO:
298                                 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops;
299                                 break;
300                         case HAMMER_OBJTYPE_REGFILE:
301                                 break;
302                         default:
303                                 break;
304                         }
305
306                         /*
307                          * Only mark as the root vnode if the ip is not
308                          * historical, otherwise the VFS cache will get
309                          * confused.  The other half of the special handling
310                          * is in hammer_vop_nlookupdotdot().
311                          *
312                          * Pseudo-filesystem roots can be accessed via
313                          * non-root filesystem paths and setting VROOT may
314                          * confuse the namecache.  Set VPFSROOT instead.
315                          */
316                         if (ip->obj_id == HAMMER_OBJID_ROOT &&
317                             ip->obj_asof == hmp->asof) {
318                                 if (ip->obj_localization == 0)
319                                         vsetflags(vp, VROOT);
320                                 else
321                                         vsetflags(vp, VPFSROOT);
322                         }
323
324                         vp->v_data = (void *)ip;
325                         /* vnode locked by getnewvnode() */
326                         /* make related vnode dirty if inode dirty? */
327                         hammer_unlock(&ip->lock);
328                         if (vp->v_type == VREG) {
329                                 vinitvmio(vp, ip->ino_data.size,
330                                           hammer_blocksize(ip->ino_data.size),
331                                           hammer_blockoff(ip->ino_data.size));
332                         }
333                         break;
334                 }
335
336                 /*
337                  * Interlock vnode clearing.  This does not prevent the
338                  * vnode from going into a reclaimed state but it does
339                  * prevent it from being destroyed or reused so the vget()
340                  * will properly fail.
341                  */
342                 hammer_lock_ex(&ip->lock);
343                 if ((vp = ip->vp) == NULL) {
344                         hammer_unlock(&ip->lock);
345                         continue;
346                 }
347                 vhold_interlocked(vp);
348                 hammer_unlock(&ip->lock);
349
350                 /*
351                  * loop if the vget fails (aka races), or if the vp
352                  * no longer matches ip->vp.
353                  */
354                 if (vget(vp, LK_EXCLUSIVE) == 0) {
355                         if (vp == ip->vp) {
356                                 vdrop(vp);
357                                 break;
358                         }
359                         vput(vp);
360                 }
361                 vdrop(vp);
362         }
363         *vpp = vp;
364         return(error);
365 }
366
367 /*
368  * Locate all copies of the inode for obj_id compatible with the specified
369  * asof, reference, and issue the related call-back.  This routine is used
370  * for direct-io invalidation and does not create any new inodes.
371  */
372 void
373 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo,
374                             int (*callback)(hammer_inode_t ip, void *data),
375                             void *data)
376 {
377         hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root,
378                                    hammer_inode_info_cmp_all_history,
379                                    callback, iinfo);
380 }
381
382 /*
383  * Acquire a HAMMER inode.  The returned inode is not locked.  These functions
384  * do not attach or detach the related vnode (use hammer_get_vnode() for
385  * that).
386  *
387  * The flags argument is only applied for newly created inodes, and only
388  * certain flags are inherited.
389  *
390  * Called from the frontend.
391  */
392 struct hammer_inode *
393 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip,
394                  int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
395                  int flags, int *errorp)
396 {
397         hammer_mount_t hmp = trans->hmp;
398         struct hammer_node_cache *cachep;
399         struct hammer_inode_info iinfo;
400         struct hammer_cursor cursor;
401         struct hammer_inode *ip;
402
403
404         /*
405          * Determine if we already have an inode cached.  If we do then
406          * we are golden.
407          *
408          * If we find an inode with no vnode we have to mark the
409          * transaction such that hammer_inode_waitreclaims() is
410          * called later on to avoid building up an infinite number
411          * of inodes.  Otherwise we can continue to * add new inodes
412          * faster then they can be disposed of, even with the tsleep
413          * delay.
414          *
415          * If we find a dummy inode we return a failure so dounlink
416          * (which does another lookup) doesn't try to mess with the
417          * link count.  hammer_vop_nresolve() uses hammer_get_dummy_inode()
418          * to ref dummy inodes.
419          */
420         iinfo.obj_id = obj_id;
421         iinfo.obj_asof = asof;
422         iinfo.obj_localization = localization;
423 loop:
424         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
425         if (ip) {
426                 if (ip->flags & HAMMER_INODE_DUMMY) {
427                         *errorp = ENOENT;
428                         return(NULL);
429                 }
430                 hammer_ref(&ip->lock);
431                 *errorp = 0;
432                 return(ip);
433         }
434
435         /*
436          * Allocate a new inode structure and deal with races later.
437          */
438         ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
439         ++hammer_count_inodes;
440         ++hmp->count_inodes;
441         ip->obj_id = obj_id;
442         ip->obj_asof = iinfo.obj_asof;
443         ip->obj_localization = localization;
444         ip->hmp = hmp;
445         ip->flags = flags & HAMMER_INODE_RO;
446         ip->cache[0].ip = ip;
447         ip->cache[1].ip = ip;
448         ip->cache[2].ip = ip;
449         ip->cache[3].ip = ip;
450         if (hmp->ronly)
451                 ip->flags |= HAMMER_INODE_RO;
452         ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
453                 0x7FFFFFFFFFFFFFFFLL;
454         RB_INIT(&ip->rec_tree);
455         TAILQ_INIT(&ip->target_list);
456         hammer_ref(&ip->lock);
457
458         /*
459          * Locate the on-disk inode.  If this is a PFS root we always
460          * access the current version of the root inode and (if it is not
461          * a master) always access information under it with a snapshot
462          * TID.
463          *
464          * We cache recent inode lookups in this directory in dip->cache[2].
465          * If we can't find it we assume the inode we are looking for is
466          * close to the directory inode.
467          */
468 retry:
469         cachep = NULL;
470         if (dip) {
471                 if (dip->cache[2].node)
472                         cachep = &dip->cache[2];
473                 else
474                         cachep = &dip->cache[0];
475         }
476         hammer_init_cursor(trans, &cursor, cachep, NULL);
477         cursor.key_beg.localization = localization + HAMMER_LOCALIZE_INODE;
478         cursor.key_beg.obj_id = ip->obj_id;
479         cursor.key_beg.key = 0;
480         cursor.key_beg.create_tid = 0;
481         cursor.key_beg.delete_tid = 0;
482         cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
483         cursor.key_beg.obj_type = 0;
484
485         cursor.asof = iinfo.obj_asof;
486         cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
487                        HAMMER_CURSOR_ASOF;
488
489         *errorp = hammer_btree_lookup(&cursor);
490         if (*errorp == EDEADLK) {
491                 hammer_done_cursor(&cursor);
492                 goto retry;
493         }
494
495         /*
496          * On success the B-Tree lookup will hold the appropriate
497          * buffer cache buffers and provide a pointer to the requested
498          * information.  Copy the information to the in-memory inode
499          * and cache the B-Tree node to improve future operations.
500          */
501         if (*errorp == 0) {
502                 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
503                 ip->ino_data = cursor.data->inode;
504
505                 /*
506                  * cache[0] tries to cache the location of the object inode.
507                  * The assumption is that it is near the directory inode.
508                  *
509                  * cache[1] tries to cache the location of the object data.
510                  * We might have something in the governing directory from
511                  * scan optimizations (see the strategy code in
512                  * hammer_vnops.c).
513                  *
514                  * We update dip->cache[2], if possible, with the location
515                  * of the object inode for future directory shortcuts.
516                  */
517                 hammer_cache_node(&ip->cache[0], cursor.node);
518                 if (dip) {
519                         if (dip->cache[3].node) {
520                                 hammer_cache_node(&ip->cache[1],
521                                                   dip->cache[3].node);
522                         }
523                         hammer_cache_node(&dip->cache[2], cursor.node);
524                 }
525
526                 /*
527                  * The file should not contain any data past the file size
528                  * stored in the inode.  Setting save_trunc_off to the
529                  * file size instead of max reduces B-Tree lookup overheads
530                  * on append by allowing the flusher to avoid checking for
531                  * record overwrites.
532                  */
533                 ip->save_trunc_off = ip->ino_data.size;
534
535                 /*
536                  * Locate and assign the pseudofs management structure to
537                  * the inode.
538                  */
539                 if (dip && dip->obj_localization == ip->obj_localization) {
540                         ip->pfsm = dip->pfsm;
541                         hammer_ref(&ip->pfsm->lock);
542                 } else {
543                         ip->pfsm = hammer_load_pseudofs(trans,
544                                                         ip->obj_localization,
545                                                         errorp);
546                         *errorp = 0;    /* ignore ENOENT */
547                 }
548         }
549
550         /*
551          * The inode is placed on the red-black tree and will be synced to
552          * the media when flushed or by the filesystem sync.  If this races
553          * another instantiation/lookup the insertion will fail.
554          */
555         if (*errorp == 0) {
556                 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
557                         hammer_free_inode(ip);
558                         hammer_done_cursor(&cursor);
559                         goto loop;
560                 }
561                 ip->flags |= HAMMER_INODE_ONDISK;
562         } else {
563                 if (ip->flags & HAMMER_INODE_RSV_INODES) {
564                         ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
565                         --hmp->rsv_inodes;
566                 }
567
568                 hammer_free_inode(ip);
569                 ip = NULL;
570         }
571         hammer_done_cursor(&cursor);
572
573         /*
574          * NEWINODE is only set if the inode becomes dirty later,
575          * setting it here just leads to unnecessary stalls.
576          *
577          * trans->flags |= HAMMER_TRANSF_NEWINODE;
578          */
579         return (ip);
580 }
581
582 /*
583  * Get a dummy inode to placemark a broken directory entry.
584  */
585 struct hammer_inode *
586 hammer_get_dummy_inode(hammer_transaction_t trans, hammer_inode_t dip,
587                  int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
588                  int flags, int *errorp)
589 {
590         hammer_mount_t hmp = trans->hmp;
591         struct hammer_inode_info iinfo;
592         struct hammer_inode *ip;
593
594         /*
595          * Determine if we already have an inode cached.  If we do then
596          * we are golden.
597          *
598          * If we find an inode with no vnode we have to mark the
599          * transaction such that hammer_inode_waitreclaims() is
600          * called later on to avoid building up an infinite number
601          * of inodes.  Otherwise we can continue to * add new inodes
602          * faster then they can be disposed of, even with the tsleep
603          * delay.
604          *
605          * If we find a non-fake inode we return an error.  Only fake
606          * inodes can be returned by this routine.
607          */
608         iinfo.obj_id = obj_id;
609         iinfo.obj_asof = asof;
610         iinfo.obj_localization = localization;
611 loop:
612         *errorp = 0;
613         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
614         if (ip) {
615                 if ((ip->flags & HAMMER_INODE_DUMMY) == 0) {
616                         *errorp = ENOENT;
617                         return(NULL);
618                 }
619                 hammer_ref(&ip->lock);
620                 return(ip);
621         }
622
623         /*
624          * Allocate a new inode structure and deal with races later.
625          */
626         ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
627         ++hammer_count_inodes;
628         ++hmp->count_inodes;
629         ip->obj_id = obj_id;
630         ip->obj_asof = iinfo.obj_asof;
631         ip->obj_localization = localization;
632         ip->hmp = hmp;
633         ip->flags = flags | HAMMER_INODE_RO | HAMMER_INODE_DUMMY;
634         ip->cache[0].ip = ip;
635         ip->cache[1].ip = ip;
636         ip->cache[2].ip = ip;
637         ip->cache[3].ip = ip;
638         ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
639                 0x7FFFFFFFFFFFFFFFLL;
640         RB_INIT(&ip->rec_tree);
641         TAILQ_INIT(&ip->target_list);
642         hammer_ref(&ip->lock);
643
644         /*
645          * Populate the dummy inode.  Leave everything zero'd out.
646          *
647          * (ip->ino_leaf and ip->ino_data)
648          *
649          * Make the dummy inode a FIFO object which most copy programs
650          * will properly ignore.
651          */
652         ip->save_trunc_off = ip->ino_data.size;
653         ip->ino_data.obj_type = HAMMER_OBJTYPE_FIFO;
654
655         /*
656          * Locate and assign the pseudofs management structure to
657          * the inode.
658          */
659         if (dip && dip->obj_localization == ip->obj_localization) {
660                 ip->pfsm = dip->pfsm;
661                 hammer_ref(&ip->pfsm->lock);
662         } else {
663                 ip->pfsm = hammer_load_pseudofs(trans, ip->obj_localization,
664                                                 errorp);
665                 *errorp = 0;    /* ignore ENOENT */
666         }
667
668         /*
669          * The inode is placed on the red-black tree and will be synced to
670          * the media when flushed or by the filesystem sync.  If this races
671          * another instantiation/lookup the insertion will fail.
672          *
673          * NOTE: Do not set HAMMER_INODE_ONDISK.  The inode is a fake.
674          */
675         if (*errorp == 0) {
676                 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
677                         hammer_free_inode(ip);
678                         goto loop;
679                 }
680         } else {
681                 if (ip->flags & HAMMER_INODE_RSV_INODES) {
682                         ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
683                         --hmp->rsv_inodes;
684                 }
685                 hammer_free_inode(ip);
686                 ip = NULL;
687         }
688         trans->flags |= HAMMER_TRANSF_NEWINODE;
689         return (ip);
690 }
691
692 /*
693  * Return a referenced inode only if it is in our inode cache.
694  *
695  * Dummy inodes do not count.
696  */
697 struct hammer_inode *
698 hammer_find_inode(hammer_transaction_t trans, int64_t obj_id,
699                   hammer_tid_t asof, u_int32_t localization)
700 {
701         hammer_mount_t hmp = trans->hmp;
702         struct hammer_inode_info iinfo;
703         struct hammer_inode *ip;
704
705         iinfo.obj_id = obj_id;
706         iinfo.obj_asof = asof;
707         iinfo.obj_localization = localization;
708
709         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
710         if (ip) {
711                 if (ip->flags & HAMMER_INODE_DUMMY)
712                         ip = NULL;
713                 else
714                         hammer_ref(&ip->lock);
715         }
716         return(ip);
717 }
718
719 /*
720  * Create a new filesystem object, returning the inode in *ipp.  The
721  * returned inode will be referenced.  The inode is created in-memory.
722  *
723  * If pfsm is non-NULL the caller wishes to create the root inode for
724  * a master PFS.
725  */
726 int
727 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
728                     struct ucred *cred,
729                     hammer_inode_t dip, const char *name, int namelen,
730                     hammer_pseudofs_inmem_t pfsm, struct hammer_inode **ipp)
731 {
732         hammer_mount_t hmp;
733         hammer_inode_t ip;
734         uid_t xuid;
735         int error;
736         int64_t namekey;
737         u_int32_t dummy;
738
739         hmp = trans->hmp;
740
741         ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
742         ++hammer_count_inodes;
743         ++hmp->count_inodes;
744         trans->flags |= HAMMER_TRANSF_NEWINODE;
745
746         if (pfsm) {
747                 KKASSERT(pfsm->localization != 0);
748                 ip->obj_id = HAMMER_OBJID_ROOT;
749                 ip->obj_localization = pfsm->localization;
750         } else {
751                 KKASSERT(dip != NULL);
752                 namekey = hammer_directory_namekey(dip, name, namelen, &dummy);
753                 ip->obj_id = hammer_alloc_objid(hmp, dip, namekey);
754                 ip->obj_localization = dip->obj_localization;
755         }
756
757         KKASSERT(ip->obj_id != 0);
758         ip->obj_asof = hmp->asof;
759         ip->hmp = hmp;
760         ip->flush_state = HAMMER_FST_IDLE;
761         ip->flags = HAMMER_INODE_DDIRTY |
762                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME;
763         ip->cache[0].ip = ip;
764         ip->cache[1].ip = ip;
765         ip->cache[2].ip = ip;
766         ip->cache[3].ip = ip;
767
768         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
769         /* ip->save_trunc_off = 0; (already zero) */
770         RB_INIT(&ip->rec_tree);
771         TAILQ_INIT(&ip->target_list);
772
773         ip->ino_data.atime = trans->time;
774         ip->ino_data.mtime = trans->time;
775         ip->ino_data.size = 0;
776         ip->ino_data.nlinks = 0;
777
778         /*
779          * A nohistory designator on the parent directory is inherited by
780          * the child.  We will do this even for pseudo-fs creation... the
781          * sysad can turn it off.
782          */
783         if (dip) {
784                 ip->ino_data.uflags = dip->ino_data.uflags &
785                                       (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
786         }
787
788         ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
789         ip->ino_leaf.base.localization = ip->obj_localization +
790                                          HAMMER_LOCALIZE_INODE;
791         ip->ino_leaf.base.obj_id = ip->obj_id;
792         ip->ino_leaf.base.key = 0;
793         ip->ino_leaf.base.create_tid = 0;
794         ip->ino_leaf.base.delete_tid = 0;
795         ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
796         ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
797
798         ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
799         ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
800         ip->ino_data.mode = vap->va_mode;
801         ip->ino_data.ctime = trans->time;
802
803         /*
804          * If we are running version 2 or greater directory entries are
805          * inode-localized instead of data-localized.
806          */
807         if (trans->hmp->version >= HAMMER_VOL_VERSION_TWO) {
808                 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
809                         ip->ino_data.cap_flags |=
810                                 HAMMER_INODE_CAP_DIR_LOCAL_INO;
811                 }
812         }
813
814         /*
815          * Setup the ".." pointer.  This only needs to be done for directories
816          * but we do it for all objects as a recovery aid.
817          */
818         if (dip)
819                 ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id;
820 #if 0
821         /*
822          * The parent_obj_localization field only applies to pseudo-fs roots.
823          * XXX this is no longer applicable, PFSs are no longer directly
824          * tied into the parent's directory structure.
825          */
826         if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY &&
827             ip->obj_id == HAMMER_OBJID_ROOT) {
828                 ip->ino_data.ext.obj.parent_obj_localization = 
829                                                 dip->obj_localization;
830         }
831 #endif
832
833         switch(ip->ino_leaf.base.obj_type) {
834         case HAMMER_OBJTYPE_CDEV:
835         case HAMMER_OBJTYPE_BDEV:
836                 ip->ino_data.rmajor = vap->va_rmajor;
837                 ip->ino_data.rminor = vap->va_rminor;
838                 break;
839         default:
840                 break;
841         }
842
843         /*
844          * Calculate default uid/gid and overwrite with information from
845          * the vap.
846          */
847         if (dip) {
848                 xuid = hammer_to_unix_xid(&dip->ino_data.uid);
849                 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode,
850                                              xuid, cred, &vap->va_mode);
851         } else {
852                 xuid = 0;
853         }
854         ip->ino_data.mode = vap->va_mode;
855
856         if (vap->va_vaflags & VA_UID_UUID_VALID)
857                 ip->ino_data.uid = vap->va_uid_uuid;
858         else if (vap->va_uid != (uid_t)VNOVAL)
859                 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
860         else
861                 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
862
863         if (vap->va_vaflags & VA_GID_UUID_VALID)
864                 ip->ino_data.gid = vap->va_gid_uuid;
865         else if (vap->va_gid != (gid_t)VNOVAL)
866                 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
867         else if (dip)
868                 ip->ino_data.gid = dip->ino_data.gid;
869
870         hammer_ref(&ip->lock);
871
872         if (pfsm) {
873                 ip->pfsm = pfsm;
874                 hammer_ref(&pfsm->lock);
875                 error = 0;
876         } else if (dip->obj_localization == ip->obj_localization) {
877                 ip->pfsm = dip->pfsm;
878                 hammer_ref(&ip->pfsm->lock);
879                 error = 0;
880         } else {
881                 ip->pfsm = hammer_load_pseudofs(trans,
882                                                 ip->obj_localization,
883                                                 &error);
884                 error = 0;      /* ignore ENOENT */
885         }
886
887         if (error) {
888                 hammer_free_inode(ip);
889                 ip = NULL;
890         } else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
891                 panic("hammer_create_inode: duplicate obj_id %llx",
892                       (long long)ip->obj_id);
893                 /* not reached */
894                 hammer_free_inode(ip);
895         }
896         *ipp = ip;
897         return(error);
898 }
899
900 /*
901  * Final cleanup / freeing of an inode structure
902  */
903 static void
904 hammer_free_inode(hammer_inode_t ip)
905 {
906         struct hammer_mount *hmp;
907
908         hmp = ip->hmp;
909         KKASSERT(hammer_oneref(&ip->lock));
910         hammer_uncache_node(&ip->cache[0]);
911         hammer_uncache_node(&ip->cache[1]);
912         hammer_uncache_node(&ip->cache[2]);
913         hammer_uncache_node(&ip->cache[3]);
914         hammer_inode_wakereclaims(ip);
915         if (ip->objid_cache)
916                 hammer_clear_objid(ip);
917         --hammer_count_inodes;
918         --hmp->count_inodes;
919         if (ip->pfsm) {
920                 hammer_rel_pseudofs(hmp, ip->pfsm);
921                 ip->pfsm = NULL;
922         }
923         kfree(ip, hmp->m_inodes);
924         ip = NULL;
925 }
926
927 /*
928  * Retrieve pseudo-fs data.  NULL will never be returned.
929  *
930  * If an error occurs *errorp will be set and a default template is returned,
931  * otherwise *errorp is set to 0.  Typically when an error occurs it will
932  * be ENOENT.
933  */
934 hammer_pseudofs_inmem_t
935 hammer_load_pseudofs(hammer_transaction_t trans,
936                      u_int32_t localization, int *errorp)
937 {
938         hammer_mount_t hmp = trans->hmp;
939         hammer_inode_t ip;
940         hammer_pseudofs_inmem_t pfsm;
941         struct hammer_cursor cursor;
942         int bytes;
943
944 retry:
945         pfsm = RB_LOOKUP(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, localization);
946         if (pfsm) {
947                 hammer_ref(&pfsm->lock);
948                 *errorp = 0;
949                 return(pfsm);
950         }
951
952         /*
953          * PFS records are stored in the root inode (not the PFS root inode,
954          * but the real root).  Avoid an infinite recursion if loading
955          * the PFS for the real root.
956          */
957         if (localization) {
958                 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT,
959                                       HAMMER_MAX_TID,
960                                       HAMMER_DEF_LOCALIZATION, 0, errorp);
961         } else {
962                 ip = NULL;
963         }
964
965         pfsm = kmalloc(sizeof(*pfsm), hmp->m_misc, M_WAITOK | M_ZERO);
966         pfsm->localization = localization;
967         pfsm->pfsd.unique_uuid = trans->rootvol->ondisk->vol_fsid;
968         pfsm->pfsd.shared_uuid = pfsm->pfsd.unique_uuid;
969
970         hammer_init_cursor(trans, &cursor, (ip ? &ip->cache[1] : NULL), ip);
971         cursor.key_beg.localization = HAMMER_DEF_LOCALIZATION +
972                                       HAMMER_LOCALIZE_MISC;
973         cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
974         cursor.key_beg.create_tid = 0;
975         cursor.key_beg.delete_tid = 0;
976         cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
977         cursor.key_beg.obj_type = 0;
978         cursor.key_beg.key = localization;
979         cursor.asof = HAMMER_MAX_TID;
980         cursor.flags |= HAMMER_CURSOR_ASOF;
981
982         if (ip)
983                 *errorp = hammer_ip_lookup(&cursor);
984         else
985                 *errorp = hammer_btree_lookup(&cursor);
986         if (*errorp == 0) {
987                 *errorp = hammer_ip_resolve_data(&cursor);
988                 if (*errorp == 0) {
989                         if (cursor.data->pfsd.mirror_flags &
990                             HAMMER_PFSD_DELETED) {
991                                 *errorp = ENOENT;
992                         } else {
993                                 bytes = cursor.leaf->data_len;
994                                 if (bytes > sizeof(pfsm->pfsd))
995                                         bytes = sizeof(pfsm->pfsd);
996                                 bcopy(cursor.data, &pfsm->pfsd, bytes);
997                         }
998                 }
999         }
1000         hammer_done_cursor(&cursor);
1001
1002         pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
1003         hammer_ref(&pfsm->lock);
1004         if (ip)
1005                 hammer_rel_inode(ip, 0);
1006         if (RB_INSERT(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm)) {
1007                 kfree(pfsm, hmp->m_misc);
1008                 goto retry;
1009         }
1010         return(pfsm);
1011 }
1012
1013 /*
1014  * Store pseudo-fs data.  The backend will automatically delete any prior
1015  * on-disk pseudo-fs data but we have to delete in-memory versions.
1016  */
1017 int
1018 hammer_save_pseudofs(hammer_transaction_t trans, hammer_pseudofs_inmem_t pfsm)
1019 {
1020         struct hammer_cursor cursor;
1021         hammer_record_t record;
1022         hammer_inode_t ip;
1023         int error;
1024
1025         ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
1026                               HAMMER_DEF_LOCALIZATION, 0, &error);
1027 retry:
1028         pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
1029         hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
1030         cursor.key_beg.localization = ip->obj_localization +
1031                                       HAMMER_LOCALIZE_MISC;
1032         cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
1033         cursor.key_beg.create_tid = 0;
1034         cursor.key_beg.delete_tid = 0;
1035         cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
1036         cursor.key_beg.obj_type = 0;
1037         cursor.key_beg.key = pfsm->localization;
1038         cursor.asof = HAMMER_MAX_TID;
1039         cursor.flags |= HAMMER_CURSOR_ASOF;
1040
1041         /*
1042          * Replace any in-memory version of the record.
1043          */
1044         error = hammer_ip_lookup(&cursor);
1045         if (error == 0 && hammer_cursor_inmem(&cursor)) {
1046                 record = cursor.iprec;
1047                 if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
1048                         KKASSERT(cursor.deadlk_rec == NULL);
1049                         hammer_ref(&record->lock);
1050                         cursor.deadlk_rec = record;
1051                         error = EDEADLK;
1052                 } else {
1053                         record->flags |= HAMMER_RECF_DELETED_FE;
1054                         error = 0;
1055                 }
1056         }
1057
1058         /*
1059          * Allocate replacement general record.  The backend flush will
1060          * delete any on-disk version of the record.
1061          */
1062         if (error == 0 || error == ENOENT) {
1063                 record = hammer_alloc_mem_record(ip, sizeof(pfsm->pfsd));
1064                 record->type = HAMMER_MEM_RECORD_GENERAL;
1065
1066                 record->leaf.base.localization = ip->obj_localization +
1067                                                  HAMMER_LOCALIZE_MISC;
1068                 record->leaf.base.rec_type = HAMMER_RECTYPE_PFS;
1069                 record->leaf.base.key = pfsm->localization;
1070                 record->leaf.data_len = sizeof(pfsm->pfsd);
1071                 bcopy(&pfsm->pfsd, record->data, sizeof(pfsm->pfsd));
1072                 error = hammer_ip_add_record(trans, record);
1073         }
1074         hammer_done_cursor(&cursor);
1075         if (error == EDEADLK)
1076                 goto retry;
1077         hammer_rel_inode(ip, 0);
1078         return(error);
1079 }
1080
1081 /*
1082  * Create a root directory for a PFS if one does not alredy exist.
1083  *
1084  * The PFS root stands alone so we must also bump the nlinks count
1085  * to prevent it from being destroyed on release.
1086  */
1087 int
1088 hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred,
1089                        hammer_pseudofs_inmem_t pfsm)
1090 {
1091         hammer_inode_t ip;
1092         struct vattr vap;
1093         int error;
1094
1095         ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
1096                               pfsm->localization, 0, &error);
1097         if (ip == NULL) {
1098                 vattr_null(&vap);
1099                 vap.va_mode = 0755;
1100                 vap.va_type = VDIR;
1101                 error = hammer_create_inode(trans, &vap, cred,
1102                                             NULL, NULL, 0,
1103                                             pfsm, &ip);
1104                 if (error == 0) {
1105                         ++ip->ino_data.nlinks;
1106                         hammer_modify_inode(trans, ip, HAMMER_INODE_DDIRTY);
1107                 }
1108         }
1109         if (ip)
1110                 hammer_rel_inode(ip, 0);
1111         return(error);
1112 }
1113
1114 /*
1115  * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY
1116  * if we are unable to disassociate all the inodes.
1117  */
1118 static
1119 int
1120 hammer_unload_pseudofs_callback(hammer_inode_t ip, void *data)
1121 {
1122         int res;
1123
1124         hammer_ref(&ip->lock);
1125         if (hammer_isactive(&ip->lock) == 2 && ip->vp)
1126                 vclean_unlocked(ip->vp);
1127         if (hammer_isactive(&ip->lock) == 1 && ip->vp == NULL)
1128                 res = 0;
1129         else
1130                 res = -1;       /* stop, someone is using the inode */
1131         hammer_rel_inode(ip, 0);
1132         return(res);
1133 }
1134
1135 int
1136 hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization)
1137 {
1138         int res;
1139         int try;
1140
1141         for (try = res = 0; try < 4; ++try) {
1142                 res = hammer_ino_rb_tree_RB_SCAN(&trans->hmp->rb_inos_root,
1143                                            hammer_inode_pfs_cmp,
1144                                            hammer_unload_pseudofs_callback,
1145                                            &localization);
1146                 if (res == 0 && try > 1)
1147                         break;
1148                 hammer_flusher_sync(trans->hmp);
1149         }
1150         if (res != 0)
1151                 res = ENOTEMPTY;
1152         return(res);
1153 }
1154
1155
1156 /*
1157  * Release a reference on a PFS
1158  */
1159 void
1160 hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm)
1161 {
1162         hammer_rel(&pfsm->lock);
1163         if (hammer_norefs(&pfsm->lock)) {
1164                 RB_REMOVE(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm);
1165                 kfree(pfsm, hmp->m_misc);
1166         }
1167 }
1168
1169 /*
1170  * Called by hammer_sync_inode().
1171  */
1172 static int
1173 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
1174 {
1175         hammer_transaction_t trans = cursor->trans;
1176         hammer_record_t record;
1177         int error;
1178         int redirty;
1179
1180 retry:
1181         error = 0;
1182
1183         /*
1184          * If the inode has a presence on-disk then locate it and mark
1185          * it deleted, setting DELONDISK.
1186          *
1187          * The record may or may not be physically deleted, depending on
1188          * the retention policy.
1189          */
1190         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
1191             HAMMER_INODE_ONDISK) {
1192                 hammer_normalize_cursor(cursor);
1193                 cursor->key_beg.localization = ip->obj_localization + 
1194                                                HAMMER_LOCALIZE_INODE;
1195                 cursor->key_beg.obj_id = ip->obj_id;
1196                 cursor->key_beg.key = 0;
1197                 cursor->key_beg.create_tid = 0;
1198                 cursor->key_beg.delete_tid = 0;
1199                 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1200                 cursor->key_beg.obj_type = 0;
1201                 cursor->asof = ip->obj_asof;
1202                 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1203                 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
1204                 cursor->flags |= HAMMER_CURSOR_BACKEND;
1205
1206                 error = hammer_btree_lookup(cursor);
1207                 if (hammer_debug_inode)
1208                         kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
1209
1210                 if (error == 0) {
1211                         error = hammer_ip_delete_record(cursor, ip, trans->tid);
1212                         if (hammer_debug_inode)
1213                                 kprintf(" error %d\n", error);
1214                         if (error == 0) {
1215                                 ip->flags |= HAMMER_INODE_DELONDISK;
1216                         }
1217                         if (cursor->node)
1218                                 hammer_cache_node(&ip->cache[0], cursor->node);
1219                 }
1220                 if (error == EDEADLK) {
1221                         hammer_done_cursor(cursor);
1222                         error = hammer_init_cursor(trans, cursor,
1223                                                    &ip->cache[0], ip);
1224                         if (hammer_debug_inode)
1225                                 kprintf("IPDED %p %d\n", ip, error);
1226                         if (error == 0)
1227                                 goto retry;
1228                 }
1229         }
1230
1231         /*
1232          * Ok, write out the initial record or a new record (after deleting
1233          * the old one), unless the DELETED flag is set.  This routine will
1234          * clear DELONDISK if it writes out a record.
1235          *
1236          * Update our inode statistics if this is the first application of
1237          * the inode on-disk.
1238          */
1239         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
1240                 /*
1241                  * Generate a record and write it to the media.  We clean-up
1242                  * the state before releasing so we do not have to set-up
1243                  * a flush_group.
1244                  */
1245                 record = hammer_alloc_mem_record(ip, 0);
1246                 record->type = HAMMER_MEM_RECORD_INODE;
1247                 record->flush_state = HAMMER_FST_FLUSH;
1248                 record->leaf = ip->sync_ino_leaf;
1249                 record->leaf.base.create_tid = trans->tid;
1250                 record->leaf.data_len = sizeof(ip->sync_ino_data);
1251                 record->leaf.create_ts = trans->time32;
1252                 record->data = (void *)&ip->sync_ino_data;
1253                 record->flags |= HAMMER_RECF_INTERLOCK_BE;
1254
1255                 /*
1256                  * If this flag is set we cannot sync the new file size
1257                  * because we haven't finished related truncations.  The
1258                  * inode will be flushed in another flush group to finish
1259                  * the job.
1260                  */
1261                 if ((ip->flags & HAMMER_INODE_WOULDBLOCK) &&
1262                     ip->sync_ino_data.size != ip->ino_data.size) {
1263                         redirty = 1;
1264                         ip->sync_ino_data.size = ip->ino_data.size;
1265                 } else {
1266                         redirty = 0;
1267                 }
1268
1269                 for (;;) {
1270                         error = hammer_ip_sync_record_cursor(cursor, record);
1271                         if (hammer_debug_inode)
1272                                 kprintf("GENREC %p rec %08x %d\n",      
1273                                         ip, record->flags, error);
1274                         if (error != EDEADLK)
1275                                 break;
1276                         hammer_done_cursor(cursor);
1277                         error = hammer_init_cursor(trans, cursor,
1278                                                    &ip->cache[0], ip);
1279                         if (hammer_debug_inode)
1280                                 kprintf("GENREC reinit %d\n", error);
1281                         if (error)
1282                                 break;
1283                 }
1284
1285                 /*
1286                  * Note:  The record was never on the inode's record tree
1287                  * so just wave our hands importantly and destroy it.
1288                  */
1289                 record->flags |= HAMMER_RECF_COMMITTED;
1290                 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
1291                 record->flush_state = HAMMER_FST_IDLE;
1292                 ++ip->rec_generation;
1293                 hammer_rel_mem_record(record);
1294
1295                 /*
1296                  * Finish up.
1297                  */
1298                 if (error == 0) {
1299                         if (hammer_debug_inode)
1300                                 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
1301                         ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1302                                             HAMMER_INODE_SDIRTY |
1303                                             HAMMER_INODE_ATIME |
1304                                             HAMMER_INODE_MTIME);
1305                         ip->flags &= ~HAMMER_INODE_DELONDISK;
1306                         if (redirty)
1307                                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1308
1309                         /*
1310                          * Root volume count of inodes
1311                          */
1312                         hammer_sync_lock_sh(trans);
1313                         if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
1314                                 hammer_modify_volume_field(trans,
1315                                                            trans->rootvol,
1316                                                            vol0_stat_inodes);
1317                                 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1318                                 hammer_modify_volume_done(trans->rootvol);
1319                                 ip->flags |= HAMMER_INODE_ONDISK;
1320                                 if (hammer_debug_inode)
1321                                         kprintf("NOWONDISK %p\n", ip);
1322                         }
1323                         hammer_sync_unlock(trans);
1324                 }
1325         }
1326
1327         /*
1328          * If the inode has been destroyed, clean out any left-over flags
1329          * that may have been set by the frontend.
1330          */
1331         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) { 
1332                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1333                                     HAMMER_INODE_SDIRTY |
1334                                     HAMMER_INODE_ATIME |
1335                                     HAMMER_INODE_MTIME);
1336         }
1337         return(error);
1338 }
1339
1340 /*
1341  * Update only the itimes fields.
1342  *
1343  * ATIME can be updated without generating any UNDO.  MTIME is updated
1344  * with UNDO so it is guaranteed to be synchronized properly in case of
1345  * a crash.
1346  *
1347  * Neither field is included in the B-Tree leaf element's CRC, which is how
1348  * we can get away with updating ATIME the way we do.
1349  */
1350 static int
1351 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
1352 {
1353         hammer_transaction_t trans = cursor->trans;
1354         int error;
1355
1356 retry:
1357         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) !=
1358             HAMMER_INODE_ONDISK) {
1359                 return(0);
1360         }
1361
1362         hammer_normalize_cursor(cursor);
1363         cursor->key_beg.localization = ip->obj_localization + 
1364                                        HAMMER_LOCALIZE_INODE;
1365         cursor->key_beg.obj_id = ip->obj_id;
1366         cursor->key_beg.key = 0;
1367         cursor->key_beg.create_tid = 0;
1368         cursor->key_beg.delete_tid = 0;
1369         cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1370         cursor->key_beg.obj_type = 0;
1371         cursor->asof = ip->obj_asof;
1372         cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1373         cursor->flags |= HAMMER_CURSOR_ASOF;
1374         cursor->flags |= HAMMER_CURSOR_GET_LEAF;
1375         cursor->flags |= HAMMER_CURSOR_GET_DATA;
1376         cursor->flags |= HAMMER_CURSOR_BACKEND;
1377
1378         error = hammer_btree_lookup(cursor);
1379         if (error == 0) {
1380                 hammer_cache_node(&ip->cache[0], cursor->node);
1381                 if (ip->sync_flags & HAMMER_INODE_MTIME) {
1382                         /*
1383                          * Updating MTIME requires an UNDO.  Just cover
1384                          * both atime and mtime.
1385                          */
1386                         hammer_sync_lock_sh(trans);
1387                         hammer_modify_buffer(trans, cursor->data_buffer,
1388                                      HAMMER_ITIMES_BASE(&cursor->data->inode),
1389                                      HAMMER_ITIMES_BYTES);
1390                         cursor->data->inode.atime = ip->sync_ino_data.atime;
1391                         cursor->data->inode.mtime = ip->sync_ino_data.mtime;
1392                         hammer_modify_buffer_done(cursor->data_buffer);
1393                         hammer_sync_unlock(trans);
1394                 } else if (ip->sync_flags & HAMMER_INODE_ATIME) {
1395                         /*
1396                          * Updating atime only can be done in-place with
1397                          * no UNDO.
1398                          */
1399                         hammer_sync_lock_sh(trans);
1400                         hammer_modify_buffer(trans, cursor->data_buffer,
1401                                              NULL, 0);
1402                         cursor->data->inode.atime = ip->sync_ino_data.atime;
1403                         hammer_modify_buffer_done(cursor->data_buffer);
1404                         hammer_sync_unlock(trans);
1405                 }
1406                 ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME);
1407         }
1408         if (error == EDEADLK) {
1409                 hammer_done_cursor(cursor);
1410                 error = hammer_init_cursor(trans, cursor,
1411                                            &ip->cache[0], ip);
1412                 if (error == 0)
1413                         goto retry;
1414         }
1415         return(error);
1416 }
1417
1418 /*
1419  * Release a reference on an inode, flush as requested.
1420  *
1421  * On the last reference we queue the inode to the flusher for its final
1422  * disposition.
1423  */
1424 void
1425 hammer_rel_inode(struct hammer_inode *ip, int flush)
1426 {
1427         /*hammer_mount_t hmp = ip->hmp;*/
1428
1429         /*
1430          * Handle disposition when dropping the last ref.
1431          */
1432         for (;;) {
1433                 if (hammer_oneref(&ip->lock)) {
1434                         /*
1435                          * Determine whether on-disk action is needed for
1436                          * the inode's final disposition.
1437                          */
1438                         KKASSERT(ip->vp == NULL);
1439                         hammer_inode_unloadable_check(ip, 0);
1440                         if (ip->flags & HAMMER_INODE_MODMASK) {
1441                                 hammer_flush_inode(ip, 0);
1442                         } else if (hammer_oneref(&ip->lock)) {
1443                                 hammer_unload_inode(ip);
1444                                 break;
1445                         }
1446                 } else {
1447                         if (flush)
1448                                 hammer_flush_inode(ip, 0);
1449
1450                         /*
1451                          * The inode still has multiple refs, try to drop
1452                          * one ref.
1453                          */
1454                         KKASSERT(hammer_isactive(&ip->lock) >= 1);
1455                         if (hammer_isactive(&ip->lock) > 1) {
1456                                 hammer_rel(&ip->lock);
1457                                 break;
1458                         }
1459                 }
1460         }
1461 }
1462
1463 /*
1464  * Unload and destroy the specified inode.  Must be called with one remaining
1465  * reference.  The reference is disposed of.
1466  *
1467  * The inode must be completely clean.
1468  */
1469 static int
1470 hammer_unload_inode(struct hammer_inode *ip)
1471 {
1472         hammer_mount_t hmp = ip->hmp;
1473
1474         KASSERT(hammer_oneref(&ip->lock),
1475                 ("hammer_unload_inode: %d refs\n", hammer_isactive(&ip->lock)));
1476         KKASSERT(ip->vp == NULL);
1477         KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
1478         KKASSERT(ip->cursor_ip_refs == 0);
1479         KKASSERT(hammer_notlocked(&ip->lock));
1480         KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
1481
1482         KKASSERT(RB_EMPTY(&ip->rec_tree));
1483         KKASSERT(TAILQ_EMPTY(&ip->target_list));
1484
1485         if (ip->flags & HAMMER_INODE_RDIRTY) {
1486                 RB_REMOVE(hammer_redo_rb_tree, &hmp->rb_redo_root, ip);
1487                 ip->flags &= ~HAMMER_INODE_RDIRTY;
1488         }
1489         RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip);
1490
1491         hammer_free_inode(ip);
1492         return(0);
1493 }
1494
1495 /*
1496  * Called during unmounting if a critical error occured.  The in-memory
1497  * inode and all related structures are destroyed.
1498  *
1499  * If a critical error did not occur the unmount code calls the standard
1500  * release and asserts that the inode is gone.
1501  */
1502 int
1503 hammer_destroy_inode_callback(struct hammer_inode *ip, void *data __unused)
1504 {
1505         hammer_record_t rec;
1506
1507         /*
1508          * Get rid of the inodes in-memory records, regardless of their
1509          * state, and clear the mod-mask.
1510          */
1511         while ((rec = TAILQ_FIRST(&ip->target_list)) != NULL) {
1512                 TAILQ_REMOVE(&ip->target_list, rec, target_entry);
1513                 rec->target_ip = NULL;
1514                 if (rec->flush_state == HAMMER_FST_SETUP)
1515                         rec->flush_state = HAMMER_FST_IDLE;
1516         }
1517         while ((rec = RB_ROOT(&ip->rec_tree)) != NULL) {
1518                 if (rec->flush_state == HAMMER_FST_FLUSH)
1519                         --rec->flush_group->refs;
1520                 else
1521                         hammer_ref(&rec->lock);
1522                 KKASSERT(hammer_oneref(&rec->lock));
1523                 rec->flush_state = HAMMER_FST_IDLE;
1524                 rec->flush_group = NULL;
1525                 rec->flags |= HAMMER_RECF_DELETED_FE; /* wave hands */
1526                 rec->flags |= HAMMER_RECF_DELETED_BE; /* wave hands */
1527                 ++ip->rec_generation;
1528                 hammer_rel_mem_record(rec);
1529         }
1530         ip->flags &= ~HAMMER_INODE_MODMASK;
1531         ip->sync_flags &= ~HAMMER_INODE_MODMASK;
1532         KKASSERT(ip->vp == NULL);
1533
1534         /*
1535          * Remove the inode from any flush group, force it idle.  FLUSH
1536          * and SETUP states have an inode ref.
1537          */
1538         switch(ip->flush_state) {
1539         case HAMMER_FST_FLUSH:
1540                 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip);
1541                 --ip->flush_group->refs;
1542                 ip->flush_group = NULL;
1543                 /* fall through */
1544         case HAMMER_FST_SETUP:
1545                 hammer_rel(&ip->lock);
1546                 ip->flush_state = HAMMER_FST_IDLE;
1547                 /* fall through */
1548         case HAMMER_FST_IDLE:
1549                 break;
1550         }
1551
1552         /*
1553          * There shouldn't be any associated vnode.  The unload needs at
1554          * least one ref, if we do have a vp steal its ip ref.
1555          */
1556         if (ip->vp) {
1557                 kprintf("hammer_destroy_inode_callback: Unexpected "
1558                         "vnode association ip %p vp %p\n", ip, ip->vp);
1559                 ip->vp->v_data = NULL;
1560                 ip->vp = NULL;
1561         } else {
1562                 hammer_ref(&ip->lock);
1563         }
1564         hammer_unload_inode(ip);
1565         return(0);
1566 }
1567
1568 /*
1569  * Called on mount -u when switching from RW to RO or vise-versa.  Adjust
1570  * the read-only flag for cached inodes.
1571  *
1572  * This routine is called from a RB_SCAN().
1573  */
1574 int
1575 hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
1576 {
1577         hammer_mount_t hmp = ip->hmp;
1578
1579         if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
1580                 ip->flags |= HAMMER_INODE_RO;
1581         else
1582                 ip->flags &= ~HAMMER_INODE_RO;
1583         return(0);
1584 }
1585
1586 /*
1587  * A transaction has modified an inode, requiring updates as specified by
1588  * the passed flags.
1589  *
1590  * HAMMER_INODE_DDIRTY: Inode data has been updated, not incl mtime/atime,
1591  *                      and not including size changes due to write-append
1592  *                      (but other size changes are included).
1593  * HAMMER_INODE_SDIRTY: Inode data has been updated, size changes due to
1594  *                      write-append.
1595  * HAMMER_INODE_XDIRTY: Dirty in-memory records
1596  * HAMMER_INODE_BUFS:   Dirty buffer cache buffers
1597  * HAMMER_INODE_DELETED: Inode record/data must be deleted
1598  * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
1599  */
1600 void
1601 hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags)
1602 {
1603         /* 
1604          * ronly of 0 or 2 does not trigger assertion.
1605          * 2 is a special error state 
1606          */
1607         KKASSERT(ip->hmp->ronly != 1 ||
1608                   (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 
1609                             HAMMER_INODE_SDIRTY |
1610                             HAMMER_INODE_BUFS | HAMMER_INODE_DELETED |
1611                             HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0);
1612         if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
1613                 ip->flags |= HAMMER_INODE_RSV_INODES;
1614                 ++ip->hmp->rsv_inodes;
1615         }
1616
1617         /*
1618          * Set the NEWINODE flag in the transaction if the inode
1619          * transitions to a dirty state.  This is used to track
1620          * the load on the inode cache.
1621          */
1622         if (trans &&
1623             (ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1624             (flags & HAMMER_INODE_MODMASK)) {
1625                 trans->flags |= HAMMER_TRANSF_NEWINODE;
1626         }
1627
1628         ip->flags |= flags;
1629 }
1630
1631 /*
1632  * Request that an inode be flushed.  This whole mess cannot block and may
1633  * recurse (if not synchronous).  Once requested HAMMER will attempt to
1634  * actively flush the inode until the flush can be done.
1635  *
1636  * The inode may already be flushing, or may be in a setup state.  We can
1637  * place the inode in a flushing state if it is currently idle and flag it
1638  * to reflush if it is currently flushing.
1639  *
1640  * Upon return if the inode could not be flushed due to a setup
1641  * dependancy, then it will be automatically flushed when the dependancy
1642  * is satisfied.
1643  */
1644 void
1645 hammer_flush_inode(hammer_inode_t ip, int flags)
1646 {
1647         hammer_mount_t hmp;
1648         hammer_flush_group_t flg;
1649         int good;
1650
1651         /*
1652          * next_flush_group is the first flush group we can place the inode
1653          * in.  It may be NULL.  If it becomes full we append a new flush
1654          * group and make that the next_flush_group.
1655          */
1656         hmp = ip->hmp;
1657         while ((flg = hmp->next_flush_group) != NULL) {
1658                 KKASSERT(flg->running == 0);
1659                 if (flg->total_count + flg->refs <= ip->hmp->undo_rec_limit)
1660                         break;
1661                 hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
1662                 hammer_flusher_async(ip->hmp, flg);
1663         }
1664         if (flg == NULL) {
1665                 flg = kmalloc(sizeof(*flg), hmp->m_misc, M_WAITOK|M_ZERO);
1666                 hmp->next_flush_group = flg;
1667                 RB_INIT(&flg->flush_tree);
1668                 TAILQ_INSERT_TAIL(&hmp->flush_group_list, flg, flush_entry);
1669         }
1670
1671         /*
1672          * Trivial 'nothing to flush' case.  If the inode is in a SETUP
1673          * state we have to put it back into an IDLE state so we can
1674          * drop the extra ref.
1675          *
1676          * If we have a parent dependancy we must still fall through
1677          * so we can run it.
1678          */
1679         if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
1680                 if (ip->flush_state == HAMMER_FST_SETUP &&
1681                     TAILQ_EMPTY(&ip->target_list)) {
1682                         ip->flush_state = HAMMER_FST_IDLE;
1683                         hammer_rel_inode(ip, 0);
1684                 }
1685                 if (ip->flush_state == HAMMER_FST_IDLE)
1686                         return;
1687         }
1688
1689         /*
1690          * Our flush action will depend on the current state.
1691          */
1692         switch(ip->flush_state) {
1693         case HAMMER_FST_IDLE:
1694                 /*
1695                  * We have no dependancies and can flush immediately.  Some
1696                  * our children may not be flushable so we have to re-test
1697                  * with that additional knowledge.
1698                  */
1699                 hammer_flush_inode_core(ip, flg, flags);
1700                 break;
1701         case HAMMER_FST_SETUP:
1702                 /*
1703                  * Recurse upwards through dependancies via target_list
1704                  * and start their flusher actions going if possible.
1705                  *
1706                  * 'good' is our connectivity.  -1 means we have none and
1707                  * can't flush, 0 means there weren't any dependancies, and
1708                  * 1 means we have good connectivity.
1709                  */
1710                 good = hammer_setup_parent_inodes(ip, 0, flg);
1711
1712                 if (good >= 0) {
1713                         /*
1714                          * We can continue if good >= 0.  Determine how 
1715                          * many records under our inode can be flushed (and
1716                          * mark them).
1717                          */
1718                         hammer_flush_inode_core(ip, flg, flags);
1719                 } else {
1720                         /*
1721                          * Parent has no connectivity, tell it to flush
1722                          * us as soon as it does.
1723                          *
1724                          * The REFLUSH flag is also needed to trigger
1725                          * dependancy wakeups.
1726                          */
1727                         ip->flags |= HAMMER_INODE_CONN_DOWN |
1728                                      HAMMER_INODE_REFLUSH;
1729                         if (flags & HAMMER_FLUSH_SIGNAL) {
1730                                 ip->flags |= HAMMER_INODE_RESIGNAL;
1731                                 hammer_flusher_async(ip->hmp, flg);
1732                         }
1733                 }
1734                 break;
1735         case HAMMER_FST_FLUSH:
1736                 /*
1737                  * We are already flushing, flag the inode to reflush
1738                  * if needed after it completes its current flush.
1739                  *
1740                  * The REFLUSH flag is also needed to trigger
1741                  * dependancy wakeups.
1742                  */
1743                 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
1744                         ip->flags |= HAMMER_INODE_REFLUSH;
1745                 if (flags & HAMMER_FLUSH_SIGNAL) {
1746                         ip->flags |= HAMMER_INODE_RESIGNAL;
1747                         hammer_flusher_async(ip->hmp, flg);
1748                 }
1749                 break;
1750         }
1751 }
1752
1753 /*
1754  * Scan ip->target_list, which is a list of records owned by PARENTS to our
1755  * ip which reference our ip.
1756  *
1757  * XXX This is a huge mess of recursive code, but not one bit of it blocks
1758  *     so for now do not ref/deref the structures.  Note that if we use the
1759  *     ref/rel code later, the rel CAN block.
1760  */
1761 static int
1762 hammer_setup_parent_inodes(hammer_inode_t ip, int depth,
1763                            hammer_flush_group_t flg)
1764 {
1765         hammer_record_t depend;
1766         int good;
1767         int r;
1768
1769         /*
1770          * If we hit our recursion limit and we have parent dependencies
1771          * We cannot continue.  Returning < 0 will cause us to be flagged
1772          * for reflush.  Returning -2 cuts off additional dependency checks
1773          * because they are likely to also hit the depth limit.
1774          *
1775          * We cannot return < 0 if there are no dependencies or there might
1776          * not be anything to wakeup (ip).
1777          */
1778         if (depth == 20 && TAILQ_FIRST(&ip->target_list)) {
1779                 kprintf("HAMMER Warning: depth limit reached on "
1780                         "setup recursion, inode %p %016llx\n",
1781                         ip, (long long)ip->obj_id);
1782                 return(-2);
1783         }
1784
1785         /*
1786          * Scan dependencies
1787          */
1788         good = 0;
1789         TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
1790                 r = hammer_setup_parent_inodes_helper(depend, depth, flg);
1791                 KKASSERT(depend->target_ip == ip);
1792                 if (r < 0 && good == 0)
1793                         good = -1;
1794                 if (r > 0)
1795                         good = 1;
1796
1797                 /*
1798                  * If we failed due to the recursion depth limit then stop
1799                  * now.
1800                  */
1801                 if (r == -2)
1802                         break;
1803         }
1804         return(good);
1805 }
1806
1807 /*
1808  * This helper function takes a record representing the dependancy between
1809  * the parent inode and child inode.
1810  *
1811  * record->ip           = parent inode
1812  * record->target_ip    = child inode
1813  * 
1814  * We are asked to recurse upwards and convert the record from SETUP
1815  * to FLUSH if possible.
1816  *
1817  * Return 1 if the record gives us connectivity
1818  *
1819  * Return 0 if the record is not relevant 
1820  *
1821  * Return -1 if we can't resolve the dependancy and there is no connectivity.
1822  */
1823 static int
1824 hammer_setup_parent_inodes_helper(hammer_record_t record, int depth,
1825                                   hammer_flush_group_t flg)
1826 {
1827         hammer_mount_t hmp;
1828         hammer_inode_t pip;
1829         int good;
1830
1831         KKASSERT(record->flush_state != HAMMER_FST_IDLE);
1832         pip = record->ip;
1833         hmp = pip->hmp;
1834
1835         /*
1836          * If the record is already flushing, is it in our flush group?
1837          *
1838          * If it is in our flush group but it is a general record or a 
1839          * delete-on-disk, it does not improve our connectivity (return 0),
1840          * and if the target inode is not trying to destroy itself we can't
1841          * allow the operation yet anyway (the second return -1).
1842          */
1843         if (record->flush_state == HAMMER_FST_FLUSH) {
1844                 /*
1845                  * If not in our flush group ask the parent to reflush
1846                  * us as soon as possible.
1847                  */
1848                 if (record->flush_group != flg) {
1849                         pip->flags |= HAMMER_INODE_REFLUSH;
1850                         record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1851                         return(-1);
1852                 }
1853
1854                 /*
1855                  * If in our flush group everything is already set up,
1856                  * just return whether the record will improve our
1857                  * visibility or not.
1858                  */
1859                 if (record->type == HAMMER_MEM_RECORD_ADD)
1860                         return(1);
1861                 return(0);
1862         }
1863
1864         /*
1865          * It must be a setup record.  Try to resolve the setup dependancies
1866          * by recursing upwards so we can place ip on the flush list.
1867          *
1868          * Limit ourselves to 20 levels of recursion to avoid blowing out
1869          * the kernel stack.  If we hit the recursion limit we can't flush
1870          * until the parent flushes.  The parent will flush independantly
1871          * on its own and ultimately a deep recursion will be resolved.
1872          */
1873         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1874
1875         good = hammer_setup_parent_inodes(pip, depth + 1, flg);
1876
1877         /*
1878          * If good < 0 the parent has no connectivity and we cannot safely
1879          * flush the directory entry, which also means we can't flush our
1880          * ip.  Flag us for downward recursion once the parent's
1881          * connectivity is resolved.  Flag the parent for [re]flush or it
1882          * may not check for downward recursions.
1883          */
1884         if (good < 0) {
1885                 pip->flags |= HAMMER_INODE_REFLUSH;
1886                 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1887                 return(good);
1888         }
1889
1890         /*
1891          * We are go, place the parent inode in a flushing state so we can
1892          * place its record in a flushing state.  Note that the parent
1893          * may already be flushing.  The record must be in the same flush
1894          * group as the parent.
1895          */
1896         if (pip->flush_state != HAMMER_FST_FLUSH)
1897                 hammer_flush_inode_core(pip, flg, HAMMER_FLUSH_RECURSION);
1898         KKASSERT(pip->flush_state == HAMMER_FST_FLUSH);
1899
1900         /*
1901          * It is possible for a rename to create a loop in the recursion
1902          * and revisit a record.  This will result in the record being
1903          * placed in a flush state unexpectedly.  This check deals with
1904          * the case.
1905          */
1906         if (record->flush_state == HAMMER_FST_FLUSH) {
1907                 if (record->type == HAMMER_MEM_RECORD_ADD)
1908                         return(1);
1909                 return(0);
1910         }
1911
1912         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1913
1914 #if 0
1915         if (record->type == HAMMER_MEM_RECORD_DEL &&
1916             (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
1917                 /*
1918                  * Regardless of flushing state we cannot sync this path if the
1919                  * record represents a delete-on-disk but the target inode
1920                  * is not ready to sync its own deletion.
1921                  *
1922                  * XXX need to count effective nlinks to determine whether
1923                  * the flush is ok, otherwise removing a hardlink will
1924                  * just leave the DEL record to rot.
1925                  */
1926                 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
1927                 return(-1);
1928         } else
1929 #endif
1930         if (pip->flush_group == flg) {
1931                 /*
1932                  * Because we have not calculated nlinks yet we can just
1933                  * set records to the flush state if the parent is in
1934                  * the same flush group as we are.
1935                  */
1936                 record->flush_state = HAMMER_FST_FLUSH;
1937                 record->flush_group = flg;
1938                 ++record->flush_group->refs;
1939                 hammer_ref(&record->lock);
1940
1941                 /*
1942                  * A general directory-add contributes to our visibility.
1943                  *
1944                  * Otherwise it is probably a directory-delete or 
1945                  * delete-on-disk record and does not contribute to our
1946                  * visbility (but we can still flush it).
1947                  */
1948                 if (record->type == HAMMER_MEM_RECORD_ADD)
1949                         return(1);
1950                 return(0);
1951         } else {
1952                 /*
1953                  * If the parent is not in our flush group we cannot
1954                  * flush this record yet, there is no visibility.
1955                  * We tell the parent to reflush and mark ourselves
1956                  * so the parent knows it should flush us too.
1957                  */
1958                 pip->flags |= HAMMER_INODE_REFLUSH;
1959                 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1960                 return(-1);
1961         }
1962 }
1963
1964 /*
1965  * This is the core routine placing an inode into the FST_FLUSH state.
1966  */
1967 static void
1968 hammer_flush_inode_core(hammer_inode_t ip, hammer_flush_group_t flg, int flags)
1969 {
1970         int go_count;
1971
1972         /*
1973          * Set flush state and prevent the flusher from cycling into
1974          * the next flush group.  Do not place the ip on the list yet.
1975          * Inodes not in the idle state get an extra reference.
1976          */
1977         KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
1978         if (ip->flush_state == HAMMER_FST_IDLE)
1979                 hammer_ref(&ip->lock);
1980         ip->flush_state = HAMMER_FST_FLUSH;
1981         ip->flush_group = flg;
1982         ++ip->hmp->flusher.group_lock;
1983         ++ip->hmp->count_iqueued;
1984         ++hammer_count_iqueued;
1985         ++flg->total_count;
1986         hammer_redo_fifo_start_flush(ip);
1987
1988         /*
1989          * If the flush group reaches the autoflush limit we want to signal
1990          * the flusher.  This is particularly important for remove()s.
1991          *
1992          * If the default hammer_limit_reclaim is changed via sysctl
1993          * make sure we don't hit a degenerate case where we don't start
1994          * a flush but blocked on further inode ops.
1995          */
1996         if (flg->total_count == hammer_autoflush ||
1997             flg->total_count >= hammer_limit_reclaim / 4)
1998                 flags |= HAMMER_FLUSH_SIGNAL;
1999
2000 #if 0
2001         /*
2002          * We need to be able to vfsync/truncate from the backend.
2003          *
2004          * XXX Any truncation from the backend will acquire the vnode
2005          *     independently.
2006          */
2007         KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
2008         if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
2009                 ip->flags |= HAMMER_INODE_VHELD;
2010                 vref(ip->vp);
2011         }
2012 #endif
2013
2014         /*
2015          * Figure out how many in-memory records we can actually flush
2016          * (not including inode meta-data, buffers, etc).
2017          */
2018         KKASSERT((ip->flags & HAMMER_INODE_WOULDBLOCK) == 0);
2019         if (flags & HAMMER_FLUSH_RECURSION) {
2020                 /*
2021                  * If this is a upwards recursion we do not want to
2022                  * recurse down again!
2023                  */
2024                 go_count = 1;
2025 #if 0
2026         } else if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
2027                 /*
2028                  * No new records are added if we must complete a flush
2029                  * from a previous cycle, but we do have to move the records
2030                  * from the previous cycle to the current one.
2031                  */
2032 #if 0
2033                 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2034                                    hammer_syncgrp_child_callback, NULL);
2035 #endif
2036                 go_count = 1;
2037 #endif
2038         } else {
2039                 /*
2040                  * Normal flush, scan records and bring them into the flush.
2041                  * Directory adds and deletes are usually skipped (they are
2042                  * grouped with the related inode rather then with the
2043                  * directory).
2044                  *
2045                  * go_count can be negative, which means the scan aborted
2046                  * due to the flush group being over-full and we should
2047                  * flush what we have.
2048                  */
2049                 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2050                                    hammer_setup_child_callback, NULL);
2051         }
2052
2053         /*
2054          * This is a more involved test that includes go_count.  If we
2055          * can't flush, flag the inode and return.  If go_count is 0 we
2056          * were are unable to flush any records in our rec_tree and
2057          * must ignore the XDIRTY flag.
2058          */
2059         if (go_count == 0) {
2060                 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
2061                         --ip->hmp->count_iqueued;
2062                         --hammer_count_iqueued;
2063
2064                         --flg->total_count;
2065                         ip->flush_state = HAMMER_FST_SETUP;
2066                         ip->flush_group = NULL;
2067 #if 0
2068                         if (ip->flags & HAMMER_INODE_VHELD) {
2069                                 ip->flags &= ~HAMMER_INODE_VHELD;
2070                                 vrele(ip->vp);
2071                         }
2072 #endif
2073
2074                         /*
2075                          * REFLUSH is needed to trigger dependancy wakeups
2076                          * when an inode is in SETUP.
2077                          */
2078                         ip->flags |= HAMMER_INODE_REFLUSH;
2079                         if (flags & HAMMER_FLUSH_SIGNAL) {
2080                                 ip->flags |= HAMMER_INODE_RESIGNAL;
2081                                 hammer_flusher_async(ip->hmp, flg);
2082                         }
2083                         if (--ip->hmp->flusher.group_lock == 0)
2084                                 wakeup(&ip->hmp->flusher.group_lock);
2085                         return;
2086                 }
2087         }
2088
2089         /*
2090          * Snapshot the state of the inode for the backend flusher.
2091          *
2092          * We continue to retain save_trunc_off even when all truncations
2093          * have been resolved as an optimization to determine if we can
2094          * skip the B-Tree lookup for overwrite deletions.
2095          *
2096          * NOTE: The DELETING flag is a mod flag, but it is also sticky,
2097          * and stays in ip->flags.  Once set, it stays set until the
2098          * inode is destroyed.
2099          */
2100         if (ip->flags & HAMMER_INODE_TRUNCATED) {
2101                 KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0);
2102                 ip->sync_trunc_off = ip->trunc_off;
2103                 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
2104                 ip->flags &= ~HAMMER_INODE_TRUNCATED;
2105                 ip->sync_flags |= HAMMER_INODE_TRUNCATED;
2106
2107                 /*
2108                  * The save_trunc_off used to cache whether the B-Tree
2109                  * holds any records past that point is not used until
2110                  * after the truncation has succeeded, so we can safely
2111                  * set it now.
2112                  */
2113                 if (ip->save_trunc_off > ip->sync_trunc_off)
2114                         ip->save_trunc_off = ip->sync_trunc_off;
2115         }
2116         ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK &
2117                            ~HAMMER_INODE_TRUNCATED);
2118         ip->sync_ino_leaf = ip->ino_leaf;
2119         ip->sync_ino_data = ip->ino_data;
2120         ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
2121 #ifdef DEBUG_TRUNCATE
2122         if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp)
2123                 kprintf("truncateS %016llx\n", ip->sync_trunc_off);
2124 #endif
2125
2126         /*
2127          * The flusher list inherits our inode and reference.
2128          */
2129         KKASSERT(flg->running == 0);
2130         RB_INSERT(hammer_fls_rb_tree, &flg->flush_tree, ip);
2131         if (--ip->hmp->flusher.group_lock == 0)
2132                 wakeup(&ip->hmp->flusher.group_lock);
2133
2134         if (flags & HAMMER_FLUSH_SIGNAL) {
2135                 hammer_flusher_async(ip->hmp, flg);
2136         }
2137 }
2138
2139 /*
2140  * Callback for scan of ip->rec_tree.  Try to include each record in our
2141  * flush.  ip->flush_group has been set but the inode has not yet been
2142  * moved into a flushing state.
2143  *
2144  * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
2145  * both inodes.
2146  *
2147  * We return 1 for any record placed or found in FST_FLUSH, which prevents
2148  * the caller from shortcutting the flush.
2149  */
2150 static int
2151 hammer_setup_child_callback(hammer_record_t rec, void *data)
2152 {
2153         hammer_flush_group_t flg;
2154         hammer_inode_t target_ip;
2155         hammer_inode_t ip;
2156         int r;
2157
2158         /*
2159          * Records deleted or committed by the backend are ignored.
2160          * Note that the flush detects deleted frontend records at
2161          * multiple points to deal with races.  This is just the first
2162          * line of defense.  The only time HAMMER_RECF_DELETED_FE cannot
2163          * be set is when HAMMER_RECF_INTERLOCK_BE is set, because it
2164          * messes up link-count calculations.
2165          *
2166          * NOTE: Don't get confused between record deletion and, say,
2167          * directory entry deletion.  The deletion of a directory entry
2168          * which is on-media has nothing to do with the record deletion
2169          * flags.
2170          */
2171         if (rec->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
2172                           HAMMER_RECF_COMMITTED)) {
2173                 if (rec->flush_state == HAMMER_FST_FLUSH) {
2174                         KKASSERT(rec->flush_group == rec->ip->flush_group);
2175                         r = 1;
2176                 } else {
2177                         r = 0;
2178                 }
2179                 return(r);
2180         }
2181
2182         /*
2183          * If the record is in an idle state it has no dependancies and
2184          * can be flushed.
2185          */
2186         ip = rec->ip;
2187         flg = ip->flush_group;
2188         r = 0;
2189
2190         switch(rec->flush_state) {
2191         case HAMMER_FST_IDLE:
2192                 /*
2193                  * The record has no setup dependancy, we can flush it.
2194                  */
2195                 KKASSERT(rec->target_ip == NULL);
2196                 rec->flush_state = HAMMER_FST_FLUSH;
2197                 rec->flush_group = flg;
2198                 ++flg->refs;
2199                 hammer_ref(&rec->lock);
2200                 r = 1;
2201                 break;
2202         case HAMMER_FST_SETUP:
2203                 /*
2204                  * The record has a setup dependancy.  These are typically
2205                  * directory entry adds and deletes.  Such entries will be
2206                  * flushed when their inodes are flushed so we do not
2207                  * usually have to add them to the flush here.  However,
2208                  * if the target_ip has set HAMMER_INODE_CONN_DOWN then
2209                  * it is asking us to flush this record (and it).
2210                  */
2211                 target_ip = rec->target_ip;
2212                 KKASSERT(target_ip != NULL);
2213                 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
2214
2215                 /*
2216                  * If the target IP is already flushing in our group
2217                  * we could associate the record, but target_ip has
2218                  * already synced ino_data to sync_ino_data and we
2219                  * would also have to adjust nlinks.   Plus there are
2220                  * ordering issues for adds and deletes.
2221                  *
2222                  * Reflush downward if this is an ADD, and upward if
2223                  * this is a DEL.
2224                  */
2225                 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
2226                         if (rec->flush_state == HAMMER_MEM_RECORD_ADD)
2227                                 ip->flags |= HAMMER_INODE_REFLUSH;
2228                         else
2229                                 target_ip->flags |= HAMMER_INODE_REFLUSH;
2230                         break;
2231                 } 
2232
2233                 /*
2234                  * Target IP is not yet flushing.  This can get complex
2235                  * because we have to be careful about the recursion.
2236                  *
2237                  * Directories create an issue for us in that if a flush
2238                  * of a directory is requested the expectation is to flush
2239                  * any pending directory entries, but this will cause the
2240                  * related inodes to recursively flush as well.  We can't
2241                  * really defer the operation so just get as many as we
2242                  * can and
2243                  */
2244 #if 0
2245                 if ((target_ip->flags & HAMMER_INODE_RECLAIM) == 0 &&
2246                     (target_ip->flags & HAMMER_INODE_CONN_DOWN) == 0) {
2247                         /*
2248                          * We aren't reclaiming and the target ip was not
2249                          * previously prevented from flushing due to this
2250                          * record dependancy.  Do not flush this record.
2251                          */
2252                         /*r = 0;*/
2253                 } else
2254 #endif
2255                 if (flg->total_count + flg->refs >
2256                            ip->hmp->undo_rec_limit) {
2257                         /*
2258                          * Our flush group is over-full and we risk blowing
2259                          * out the UNDO FIFO.  Stop the scan, flush what we
2260                          * have, then reflush the directory.
2261                          *
2262                          * The directory may be forced through multiple
2263                          * flush groups before it can be completely
2264                          * flushed.
2265                          */
2266                         ip->flags |= HAMMER_INODE_RESIGNAL |
2267                                      HAMMER_INODE_REFLUSH;
2268                         r = -1;
2269                 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
2270                         /*
2271                          * If the target IP is not flushing we can force
2272                          * it to flush, even if it is unable to write out
2273                          * any of its own records we have at least one in
2274                          * hand that we CAN deal with.
2275                          */
2276                         rec->flush_state = HAMMER_FST_FLUSH;
2277                         rec->flush_group = flg;
2278                         ++flg->refs;
2279                         hammer_ref(&rec->lock);
2280                         hammer_flush_inode_core(target_ip, flg,
2281                                                 HAMMER_FLUSH_RECURSION);
2282                         r = 1;
2283                 } else {
2284                         /*
2285                          * General or delete-on-disk record.
2286                          *
2287                          * XXX this needs help.  If a delete-on-disk we could
2288                          * disconnect the target.  If the target has its own
2289                          * dependancies they really need to be flushed.
2290                          *
2291                          * XXX
2292                          */
2293                         rec->flush_state = HAMMER_FST_FLUSH;
2294                         rec->flush_group = flg;
2295                         ++flg->refs;
2296                         hammer_ref(&rec->lock);
2297                         hammer_flush_inode_core(target_ip, flg,
2298                                                 HAMMER_FLUSH_RECURSION);
2299                         r = 1;
2300                 }
2301                 break;
2302         case HAMMER_FST_FLUSH:
2303                 /* 
2304                  * The flush_group should already match.
2305                  */
2306                 KKASSERT(rec->flush_group == flg);
2307                 r = 1;
2308                 break;
2309         }
2310         return(r);
2311 }
2312
2313 #if 0
2314 /*
2315  * This version just moves records already in a flush state to the new
2316  * flush group and that is it.
2317  */
2318 static int
2319 hammer_syncgrp_child_callback(hammer_record_t rec, void *data)
2320 {
2321         hammer_inode_t ip = rec->ip;
2322
2323         switch(rec->flush_state) {
2324         case HAMMER_FST_FLUSH:
2325                 KKASSERT(rec->flush_group == ip->flush_group);
2326                 break;
2327         default:
2328                 break;
2329         }
2330         return(0);
2331 }
2332 #endif
2333
2334 /*
2335  * Wait for a previously queued flush to complete.
2336  *
2337  * If a critical error occured we don't try to wait.
2338  */
2339 void
2340 hammer_wait_inode(hammer_inode_t ip)
2341 {
2342         hammer_flush_group_t flg;
2343
2344         flg = NULL;
2345         if ((ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) {
2346                 while (ip->flush_state != HAMMER_FST_IDLE &&
2347                        (ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) {
2348                         if (ip->flush_state == HAMMER_FST_SETUP)
2349                                 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2350                         if (ip->flush_state != HAMMER_FST_IDLE) {
2351                                 ip->flags |= HAMMER_INODE_FLUSHW;
2352                                 tsleep(&ip->flags, 0, "hmrwin", 0);
2353                         }
2354                 }
2355         }
2356 }
2357
2358 /*
2359  * Called by the backend code when a flush has been completed.
2360  * The inode has already been removed from the flush list.
2361  *
2362  * A pipelined flush can occur, in which case we must re-enter the
2363  * inode on the list and re-copy its fields.
2364  */
2365 void
2366 hammer_flush_inode_done(hammer_inode_t ip, int error)
2367 {
2368         hammer_mount_t hmp;
2369         int dorel;
2370
2371         KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
2372
2373         hmp = ip->hmp;
2374
2375         /*
2376          * Auto-reflush if the backend could not completely flush
2377          * the inode.  This fixes a case where a deferred buffer flush
2378          * could cause fsync to return early.
2379          */
2380         if (ip->sync_flags & HAMMER_INODE_MODMASK)
2381                 ip->flags |= HAMMER_INODE_REFLUSH;
2382
2383         /*
2384          * Merge left-over flags back into the frontend and fix the state.
2385          * Incomplete truncations are retained by the backend.
2386          */
2387         ip->error = error;
2388         ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED;
2389         ip->sync_flags &= HAMMER_INODE_TRUNCATED;
2390
2391         /*
2392          * The backend may have adjusted nlinks, so if the adjusted nlinks
2393          * does not match the fronttend set the frontend's DDIRTY flag again.
2394          */
2395         if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
2396                 ip->flags |= HAMMER_INODE_DDIRTY;
2397
2398         /*
2399          * Fix up the dirty buffer status.
2400          */
2401         if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) {
2402                 ip->flags |= HAMMER_INODE_BUFS;
2403         }
2404         hammer_redo_fifo_end_flush(ip);
2405
2406         /*
2407          * Re-set the XDIRTY flag if some of the inode's in-memory records
2408          * could not be flushed.
2409          */
2410         KKASSERT((RB_EMPTY(&ip->rec_tree) &&
2411                   (ip->flags & HAMMER_INODE_XDIRTY) == 0) ||
2412                  (!RB_EMPTY(&ip->rec_tree) &&
2413                   (ip->flags & HAMMER_INODE_XDIRTY) != 0));
2414
2415         /*
2416          * Do not lose track of inodes which no longer have vnode
2417          * assocations, otherwise they may never get flushed again.
2418          *
2419          * The reflush flag can be set superfluously, causing extra pain
2420          * for no reason.  If the inode is no longer modified it no longer
2421          * needs to be flushed.
2422          */
2423         if (ip->flags & HAMMER_INODE_MODMASK) {
2424                 if (ip->vp == NULL)
2425                         ip->flags |= HAMMER_INODE_REFLUSH;
2426         } else {
2427                 ip->flags &= ~HAMMER_INODE_REFLUSH;
2428         }
2429
2430         /*
2431          * Adjust the flush state.
2432          */
2433         if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
2434                 /*
2435                  * We were unable to flush out all our records, leave the
2436                  * inode in a flush state and in the current flush group.
2437                  * The flush group will be re-run.
2438                  *
2439                  * This occurs if the UNDO block gets too full or there is
2440                  * too much dirty meta-data and allows the flusher to
2441                  * finalize the UNDO block and then re-flush.
2442                  */
2443                 ip->flags &= ~HAMMER_INODE_WOULDBLOCK;
2444                 dorel = 0;
2445         } else {
2446                 /*
2447                  * Remove from the flush_group
2448                  */
2449                 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip);
2450                 ip->flush_group = NULL;
2451
2452 #if 0
2453                 /*
2454                  * Clean up the vnode ref and tracking counts.
2455                  */
2456                 if (ip->flags & HAMMER_INODE_VHELD) {
2457                         ip->flags &= ~HAMMER_INODE_VHELD;
2458                         vrele(ip->vp);
2459                 }
2460 #endif
2461                 --hmp->count_iqueued;
2462                 --hammer_count_iqueued;
2463
2464                 /*
2465                  * And adjust the state.
2466                  */
2467                 if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
2468                         ip->flush_state = HAMMER_FST_IDLE;
2469                         dorel = 1;
2470                 } else {
2471                         ip->flush_state = HAMMER_FST_SETUP;
2472                         dorel = 0;
2473                 }
2474
2475                 /*
2476                  * If the frontend is waiting for a flush to complete,
2477                  * wake it up.
2478                  */
2479                 if (ip->flags & HAMMER_INODE_FLUSHW) {
2480                         ip->flags &= ~HAMMER_INODE_FLUSHW;
2481                         wakeup(&ip->flags);
2482                 }
2483
2484                 /*
2485                  * If the frontend made more changes and requested another
2486                  * flush, then try to get it running.
2487                  *
2488                  * Reflushes are aborted when the inode is errored out.
2489                  */
2490                 if (ip->flags & HAMMER_INODE_REFLUSH) {
2491                         ip->flags &= ~HAMMER_INODE_REFLUSH;
2492                         if (ip->flags & HAMMER_INODE_RESIGNAL) {
2493                                 ip->flags &= ~HAMMER_INODE_RESIGNAL;
2494                                 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2495                         } else {
2496                                 hammer_flush_inode(ip, 0);
2497                         }
2498                 }
2499         }
2500
2501         /*
2502          * If we have no parent dependancies we can clear CONN_DOWN
2503          */
2504         if (TAILQ_EMPTY(&ip->target_list))
2505                 ip->flags &= ~HAMMER_INODE_CONN_DOWN;
2506
2507         /*
2508          * If the inode is now clean drop the space reservation.
2509          */
2510         if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
2511             (ip->flags & HAMMER_INODE_RSV_INODES)) {
2512                 ip->flags &= ~HAMMER_INODE_RSV_INODES;
2513                 --hmp->rsv_inodes;
2514         }
2515
2516         if (dorel)
2517                 hammer_rel_inode(ip, 0);
2518 }
2519
2520 /*
2521  * Called from hammer_sync_inode() to synchronize in-memory records
2522  * to the media.
2523  */
2524 static int
2525 hammer_sync_record_callback(hammer_record_t record, void *data)
2526 {
2527         hammer_cursor_t cursor = data;
2528         hammer_transaction_t trans = cursor->trans;
2529         hammer_mount_t hmp = trans->hmp;
2530         int error;
2531
2532         /*
2533          * Skip records that do not belong to the current flush.
2534          */
2535         ++hammer_stats_record_iterations;
2536         if (record->flush_state != HAMMER_FST_FLUSH)
2537                 return(0);
2538
2539 #if 1
2540         if (record->flush_group != record->ip->flush_group) {
2541                 kprintf("sync_record %p ip %p bad flush group %p %p\n", record, record->ip, record->flush_group ,record->ip->flush_group);
2542                 if (hammer_debug_critical)
2543                         Debugger("blah2");
2544                 return(0);
2545         }
2546 #endif
2547         KKASSERT(record->flush_group == record->ip->flush_group);
2548
2549         /*
2550          * Interlock the record using the BE flag.  Once BE is set the
2551          * frontend cannot change the state of FE.
2552          *
2553          * NOTE: If FE is set prior to us setting BE we still sync the
2554          * record out, but the flush completion code converts it to 
2555          * a delete-on-disk record instead of destroying it.
2556          */
2557         KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
2558         record->flags |= HAMMER_RECF_INTERLOCK_BE;
2559
2560         /*
2561          * The backend has already disposed of the record.
2562          */
2563         if (record->flags & (HAMMER_RECF_DELETED_BE | HAMMER_RECF_COMMITTED)) {
2564                 error = 0;
2565                 goto done;
2566         }
2567
2568         /*
2569          * If the whole inode is being deleting all on-disk records will
2570          * be deleted very soon, we can't sync any new records to disk
2571          * because they will be deleted in the same transaction they were
2572          * created in (delete_tid == create_tid), which will assert.
2573          *
2574          * XXX There may be a case with RECORD_ADD with DELETED_FE set
2575          * that we currently panic on.
2576          */
2577         if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
2578                 switch(record->type) {
2579                 case HAMMER_MEM_RECORD_DATA:
2580                         /*
2581                          * We don't have to do anything, if the record was
2582                          * committed the space will have been accounted for
2583                          * in the blockmap.
2584                          */
2585                         /* fall through */
2586                 case HAMMER_MEM_RECORD_GENERAL:
2587                         /*
2588                          * Set deleted-by-backend flag.  Do not set the
2589                          * backend committed flag, because we are throwing
2590                          * the record away.
2591                          */
2592                         record->flags |= HAMMER_RECF_DELETED_BE;
2593                         ++record->ip->rec_generation;
2594                         error = 0;
2595                         goto done;
2596                 case HAMMER_MEM_RECORD_ADD:
2597                         panic("hammer_sync_record_callback: illegal add "
2598                               "during inode deletion record %p", record);
2599                         break; /* NOT REACHED */
2600                 case HAMMER_MEM_RECORD_INODE:
2601                         panic("hammer_sync_record_callback: attempt to "
2602                               "sync inode record %p?", record);
2603                         break; /* NOT REACHED */
2604                 case HAMMER_MEM_RECORD_DEL:
2605                         /* 
2606                          * Follow through and issue the on-disk deletion
2607                          */
2608                         break;
2609                 }
2610         }
2611
2612         /*
2613          * If DELETED_FE is set special handling is needed for directory
2614          * entries.  Dependant pieces related to the directory entry may
2615          * have already been synced to disk.  If this occurs we have to
2616          * sync the directory entry and then change the in-memory record
2617          * from an ADD to a DELETE to cover the fact that it's been
2618          * deleted by the frontend.
2619          *
2620          * A directory delete covering record (MEM_RECORD_DEL) can never
2621          * be deleted by the frontend.
2622          *
2623          * Any other record type (aka DATA) can be deleted by the frontend.
2624          * XXX At the moment the flusher must skip it because there may
2625          * be another data record in the flush group for the same block,
2626          * meaning that some frontend data changes can leak into the backend's
2627          * synchronization point.
2628          */
2629         if (record->flags & HAMMER_RECF_DELETED_FE) {
2630                 if (record->type == HAMMER_MEM_RECORD_ADD) {
2631                         /*
2632                          * Convert a front-end deleted directory-add to
2633                          * a directory-delete entry later.
2634                          */
2635                         record->flags |= HAMMER_RECF_CONVERT_DELETE;
2636                 } else {
2637                         /*
2638                          * Dispose of the record (race case).  Mark as
2639                          * deleted by backend (and not committed).
2640                          */
2641                         KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
2642                         record->flags |= HAMMER_RECF_DELETED_BE;
2643                         ++record->ip->rec_generation;
2644                         error = 0;
2645                         goto done;
2646                 }
2647         }
2648
2649         /*
2650          * Assign the create_tid for new records.  Deletions already
2651          * have the record's entire key properly set up.
2652          */
2653         if (record->type != HAMMER_MEM_RECORD_DEL) {
2654                 record->leaf.base.create_tid = trans->tid;
2655                 record->leaf.create_ts = trans->time32;
2656         }
2657
2658         /*
2659          * This actually moves the record to the on-media B-Tree.  We
2660          * must also generate REDO_TERM entries in the UNDO/REDO FIFO
2661          * indicating that the related REDO_WRITE(s) have been committed.
2662          *
2663          * During recovery any REDO_TERM's within the nominal recovery span
2664          * are ignored since the related meta-data is being undone, causing
2665          * any matching REDO_WRITEs to execute.  The REDO_TERMs outside
2666          * the nominal recovery span will match against REDO_WRITEs and
2667          * prevent them from being executed (because the meta-data has
2668          * already been synchronized).
2669          */
2670         if (record->flags & HAMMER_RECF_REDO) {
2671                 KKASSERT(record->type == HAMMER_MEM_RECORD_DATA);
2672                 hammer_generate_redo(trans, record->ip,
2673                                      record->leaf.base.key -
2674                                          record->leaf.data_len,
2675                                      HAMMER_REDO_TERM_WRITE,
2676                                      NULL,
2677                                      record->leaf.data_len);
2678         }
2679         for (;;) {
2680                 error = hammer_ip_sync_record_cursor(cursor, record);
2681                 if (error != EDEADLK)
2682                         break;
2683                 hammer_done_cursor(cursor);
2684                 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
2685                                            record->ip);
2686                 if (error)
2687                         break;
2688         }
2689         record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
2690
2691         if (error)
2692                 error = -error;
2693 done:
2694         hammer_flush_record_done(record, error);
2695
2696         /*
2697          * Do partial finalization if we have built up too many dirty
2698          * buffers.  Otherwise a buffer cache deadlock can occur when
2699          * doing things like creating tens of thousands of tiny files.
2700          *
2701          * We must release our cursor lock to avoid a 3-way deadlock
2702          * due to the exclusive sync lock the finalizer must get.
2703          *
2704          * WARNING: See warnings in hammer_unlock_cursor() function.
2705          */
2706         if (hammer_flusher_meta_limit(hmp)) {
2707                 hammer_unlock_cursor(cursor);
2708                 hammer_flusher_finalize(trans, 0);
2709                 hammer_lock_cursor(cursor);
2710         }
2711
2712         return(error);
2713 }
2714
2715 /*
2716  * Backend function called by the flusher to sync an inode to media.
2717  */
2718 int
2719 hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip)
2720 {
2721         struct hammer_cursor cursor;
2722         hammer_node_t tmp_node;
2723         hammer_record_t depend;
2724         hammer_record_t next;
2725         int error, tmp_error;
2726         u_int64_t nlinks;
2727
2728         if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
2729                 return(0);
2730
2731         error = hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
2732         if (error)
2733                 goto done;
2734
2735         /*
2736          * Any directory records referencing this inode which are not in
2737          * our current flush group must adjust our nlink count for the
2738          * purposes of synchronizating to disk.
2739          *
2740          * Records which are in our flush group can be unlinked from our
2741          * inode now, potentially allowing the inode to be physically
2742          * deleted.
2743          *
2744          * This cannot block.
2745          */
2746         nlinks = ip->ino_data.nlinks;
2747         next = TAILQ_FIRST(&ip->target_list);
2748         while ((depend = next) != NULL) {
2749                 next = TAILQ_NEXT(depend, target_entry);
2750                 if (depend->flush_state == HAMMER_FST_FLUSH &&
2751                     depend->flush_group == ip->flush_group) {
2752                         /*
2753                          * If this is an ADD that was deleted by the frontend
2754                          * the frontend nlinks count will have already been
2755                          * decremented, but the backend is going to sync its
2756                          * directory entry and must account for it.  The
2757                          * record will be converted to a delete-on-disk when
2758                          * it gets synced.
2759                          *
2760                          * If the ADD was not deleted by the frontend we
2761                          * can remove the dependancy from our target_list.
2762                          */
2763                         if (depend->flags & HAMMER_RECF_DELETED_FE) {
2764                                 ++nlinks;
2765                         } else {
2766                                 TAILQ_REMOVE(&ip->target_list, depend,
2767                                              target_entry);
2768                                 depend->target_ip = NULL;
2769                         }
2770                 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
2771                         /*
2772                          * Not part of our flush group and not deleted by
2773                          * the front-end, adjust the link count synced to
2774                          * the media (undo what the frontend did when it
2775                          * queued the record).
2776                          */
2777                         KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
2778                         switch(depend->type) {
2779                         case HAMMER_MEM_RECORD_ADD:
2780                                 --nlinks;
2781                                 break;
2782                         case HAMMER_MEM_RECORD_DEL:
2783                                 ++nlinks;
2784                                 break;
2785                         default:
2786                                 break;
2787                         }
2788                 }
2789         }
2790
2791         /*
2792          * Set dirty if we had to modify the link count.
2793          */
2794         if (ip->sync_ino_data.nlinks != nlinks) {
2795                 KKASSERT((int64_t)nlinks >= 0);
2796                 ip->sync_ino_data.nlinks = nlinks;
2797                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2798         }
2799
2800         /*
2801          * If there is a trunction queued destroy any data past the (aligned)
2802          * truncation point.  Userland will have dealt with the buffer
2803          * containing the truncation point for us.
2804          *
2805          * We don't flush pending frontend data buffers until after we've
2806          * dealt with the truncation.
2807          */
2808         if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2809                 /*
2810                  * Interlock trunc_off.  The VOP front-end may continue to
2811                  * make adjustments to it while we are blocked.
2812                  */
2813                 off_t trunc_off;
2814                 off_t aligned_trunc_off;
2815                 int blkmask;
2816
2817                 trunc_off = ip->sync_trunc_off;
2818                 blkmask = hammer_blocksize(trunc_off) - 1;
2819                 aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask;
2820
2821                 /*
2822                  * Delete any whole blocks on-media.  The front-end has
2823                  * already cleaned out any partial block and made it
2824                  * pending.  The front-end may have updated trunc_off
2825                  * while we were blocked so we only use sync_trunc_off.
2826                  *
2827                  * This operation can blow out the buffer cache, EWOULDBLOCK
2828                  * means we were unable to complete the deletion.  The
2829                  * deletion will update sync_trunc_off in that case.
2830                  */
2831                 error = hammer_ip_delete_range(&cursor, ip,
2832                                                 aligned_trunc_off,
2833                                                 0x7FFFFFFFFFFFFFFFLL, 2);
2834                 if (error == EWOULDBLOCK) {
2835                         ip->flags |= HAMMER_INODE_WOULDBLOCK;
2836                         error = 0;
2837                         goto defer_buffer_flush;
2838                 }
2839
2840                 if (error)
2841                         goto done;
2842
2843                 /*
2844                  * Generate a REDO_TERM_TRUNC entry in the UNDO/REDO FIFO.
2845                  *
2846                  * XXX we do this even if we did not previously generate
2847                  * a REDO_TRUNC record.  This operation may enclosed the
2848                  * range for multiple prior truncation entries in the REDO
2849                  * log.
2850                  */
2851                 if (trans->hmp->version >= HAMMER_VOL_VERSION_FOUR &&
2852                     (ip->flags & HAMMER_INODE_RDIRTY)) {
2853                         hammer_generate_redo(trans, ip, aligned_trunc_off,
2854                                              HAMMER_REDO_TERM_TRUNC,
2855                                              NULL, 0);
2856                 }
2857
2858                 /*
2859                  * Clear the truncation flag on the backend after we have
2860                  * completed the deletions.  Backend data is now good again
2861                  * (including new records we are about to sync, below).
2862                  *
2863                  * Leave sync_trunc_off intact.  As we write additional
2864                  * records the backend will update sync_trunc_off.  This
2865                  * tells the backend whether it can skip the overwrite
2866                  * test.  This should work properly even when the backend
2867                  * writes full blocks where the truncation point straddles
2868                  * the block because the comparison is against the base
2869                  * offset of the record.
2870                  */
2871                 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2872                 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
2873         } else {
2874                 error = 0;
2875         }
2876
2877         /*
2878          * Now sync related records.  These will typically be directory
2879          * entries, records tracking direct-writes, or delete-on-disk records.
2880          */
2881         if (error == 0) {
2882                 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2883                                     hammer_sync_record_callback, &cursor);
2884                 if (tmp_error < 0)
2885                         tmp_error = -error;
2886                 if (tmp_error)
2887                         error = tmp_error;
2888         }
2889         hammer_cache_node(&ip->cache[1], cursor.node);
2890
2891         /*
2892          * Re-seek for inode update, assuming our cache hasn't been ripped
2893          * out from under us.
2894          */
2895         if (error == 0) {
2896                 tmp_node = hammer_ref_node_safe(trans, &ip->cache[0], &error);
2897                 if (tmp_node) {
2898                         hammer_cursor_downgrade(&cursor);
2899                         hammer_lock_sh(&tmp_node->lock);
2900                         if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0)
2901                                 hammer_cursor_seek(&cursor, tmp_node, 0);
2902                         hammer_unlock(&tmp_node->lock);
2903                         hammer_rel_node(tmp_node);
2904                 }
2905                 error = 0;
2906         }
2907
2908         /*
2909          * If we are deleting the inode the frontend had better not have
2910          * any active references on elements making up the inode.
2911          *
2912          * The call to hammer_ip_delete_clean() cleans up auxillary records
2913          * but not DB or DATA records.  Those must have already been deleted
2914          * by the normal truncation mechanic.
2915          */
2916         if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
2917                 RB_EMPTY(&ip->rec_tree)  &&
2918             (ip->sync_flags & HAMMER_INODE_DELETING) &&
2919             (ip->flags & HAMMER_INODE_DELETED) == 0) {
2920                 int count1 = 0;
2921
2922                 error = hammer_ip_delete_clean(&cursor, ip, &count1);
2923                 if (error == 0) {
2924                         ip->flags |= HAMMER_INODE_DELETED;
2925                         ip->sync_flags &= ~HAMMER_INODE_DELETING;
2926                         ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2927                         KKASSERT(RB_EMPTY(&ip->rec_tree));
2928
2929                         /*
2930                          * Set delete_tid in both the frontend and backend
2931                          * copy of the inode record.  The DELETED flag handles
2932                          * this, do not set DDIRTY.
2933                          */
2934                         ip->ino_leaf.base.delete_tid = trans->tid;
2935                         ip->sync_ino_leaf.base.delete_tid = trans->tid;
2936                         ip->ino_leaf.delete_ts = trans->time32;
2937                         ip->sync_ino_leaf.delete_ts = trans->time32;
2938
2939
2940                         /*
2941                          * Adjust the inode count in the volume header
2942                          */
2943                         hammer_sync_lock_sh(trans);
2944                         if (ip->flags & HAMMER_INODE_ONDISK) {
2945                                 hammer_modify_volume_field(trans,
2946                                                            trans->rootvol,
2947                                                            vol0_stat_inodes);
2948                                 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
2949                                 hammer_modify_volume_done(trans->rootvol);
2950                         }
2951                         hammer_sync_unlock(trans);
2952                 }
2953         }
2954
2955         if (error)
2956                 goto done;
2957         ip->sync_flags &= ~HAMMER_INODE_BUFS;
2958
2959 defer_buffer_flush:
2960         /*
2961          * Now update the inode's on-disk inode-data and/or on-disk record.
2962          * DELETED and ONDISK are managed only in ip->flags.
2963          *
2964          * In the case of a defered buffer flush we still update the on-disk
2965          * inode to satisfy visibility requirements if there happen to be
2966          * directory dependancies.
2967          */
2968         switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
2969         case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
2970                 /*
2971                  * If deleted and on-disk, don't set any additional flags.
2972                  * the delete flag takes care of things.
2973                  *
2974                  * Clear flags which may have been set by the frontend.
2975                  */
2976                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2977                                     HAMMER_INODE_SDIRTY |
2978                                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2979                                     HAMMER_INODE_DELETING);
2980                 break;
2981         case HAMMER_INODE_DELETED:
2982                 /*
2983                  * Take care of the case where a deleted inode was never
2984                  * flushed to the disk in the first place.
2985                  *
2986                  * Clear flags which may have been set by the frontend.
2987                  */
2988                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2989                                     HAMMER_INODE_SDIRTY |
2990                                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2991                                     HAMMER_INODE_DELETING);
2992                 while (RB_ROOT(&ip->rec_tree)) {
2993                         hammer_record_t record = RB_ROOT(&ip->rec_tree);
2994                         hammer_ref(&record->lock);
2995                         KKASSERT(hammer_oneref(&record->lock));
2996                         record->flags |= HAMMER_RECF_DELETED_BE;
2997                         ++record->ip->rec_generation;
2998                         hammer_rel_mem_record(record);
2999                 }
3000                 break;
3001         case HAMMER_INODE_ONDISK:
3002                 /*
3003                  * If already on-disk, do not set any additional flags.
3004                  */
3005                 break;
3006         default:
3007                 /*
3008                  * If not on-disk and not deleted, set DDIRTY to force
3009                  * an initial record to be written.
3010                  *
3011                  * Also set the create_tid in both the frontend and backend
3012                  * copy of the inode record.
3013                  */
3014                 ip->ino_leaf.base.create_tid = trans->tid;
3015                 ip->ino_leaf.create_ts = trans->time32;
3016                 ip->sync_ino_leaf.base.create_tid = trans->tid;
3017                 ip->sync_ino_leaf.create_ts = trans->time32;
3018                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
3019                 break;
3020         }
3021
3022         /*
3023          * If DDIRTY or SDIRTY is set, write out a new record.
3024          * If the inode is already on-disk the old record is marked as
3025          * deleted.
3026          *
3027          * If DELETED is set hammer_update_inode() will delete the existing
3028          * record without writing out a new one.
3029          *
3030          * If *ONLY* the ITIMES flag is set we can update the record in-place.
3031          */
3032         if (ip->flags & HAMMER_INODE_DELETED) {
3033                 error = hammer_update_inode(&cursor, ip);
3034         } else 
3035         if (!(ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY)) &&
3036             (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) {
3037                 error = hammer_update_itimes(&cursor, ip);
3038         } else
3039         if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY |
3040                               HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) {
3041                 error = hammer_update_inode(&cursor, ip);
3042         }
3043 done:
3044         if (error) {
3045                 hammer_critical_error(ip->hmp, ip, error,
3046                                       "while syncing inode");
3047         }
3048         hammer_done_cursor(&cursor);
3049         return(error);
3050 }
3051
3052 /*
3053  * This routine is called when the OS is no longer actively referencing
3054  * the inode (but might still be keeping it cached), or when releasing
3055  * the last reference to an inode.
3056  *
3057  * At this point if the inode's nlinks count is zero we want to destroy
3058  * it, which may mean destroying it on-media too.
3059  */
3060 void
3061 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
3062 {
3063         struct vnode *vp;
3064
3065         /*
3066          * Set the DELETING flag when the link count drops to 0 and the
3067          * OS no longer has any opens on the inode.
3068          *
3069          * The backend will clear DELETING (a mod flag) and set DELETED
3070          * (a state flag) when it is actually able to perform the
3071          * operation.
3072          *
3073          * Don't reflag the deletion if the flusher is currently syncing
3074          * one that was already flagged.  A previously set DELETING flag
3075          * may bounce around flags and sync_flags until the operation is
3076          * completely done.
3077          */
3078         if (ip->ino_data.nlinks == 0 &&
3079             ((ip->flags | ip->sync_flags) & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
3080                 ip->flags |= HAMMER_INODE_DELETING;
3081                 ip->flags |= HAMMER_INODE_TRUNCATED;
3082                 ip->trunc_off = 0;
3083                 vp = NULL;
3084                 if (getvp) {
3085                         if (hammer_get_vnode(ip, &vp) != 0)
3086                                 return;
3087                 }
3088
3089                 /*
3090                  * Final cleanup
3091                  */
3092                 if (ip->vp)
3093                         nvtruncbuf(ip->vp, 0, HAMMER_BUFSIZE, 0);
3094                 if (getvp)
3095                         vput(vp);
3096         }
3097 }
3098
3099 /*
3100  * After potentially resolving a dependancy the inode is tested
3101  * to determine whether it needs to be reflushed.
3102  */
3103 void
3104 hammer_test_inode(hammer_inode_t ip)
3105 {
3106         if (ip->flags & HAMMER_INODE_REFLUSH) {
3107                 ip->flags &= ~HAMMER_INODE_REFLUSH;
3108                 hammer_ref(&ip->lock);
3109                 if (ip->flags & HAMMER_INODE_RESIGNAL) {
3110                         ip->flags &= ~HAMMER_INODE_RESIGNAL;
3111                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
3112                 } else {
3113                         hammer_flush_inode(ip, 0);
3114                 }
3115                 hammer_rel_inode(ip, 0);
3116         }
3117 }
3118
3119 /*
3120  * Clear the RECLAIM flag on an inode.  This occurs when the inode is
3121  * reassociated with a vp or just before it gets freed.
3122  *
3123  * Pipeline wakeups to threads blocked due to an excessive number of
3124  * detached inodes.  This typically occurs when atime updates accumulate
3125  * while scanning a directory tree.
3126  */
3127 static void
3128 hammer_inode_wakereclaims(hammer_inode_t ip)
3129 {
3130         struct hammer_reclaim *reclaim;
3131         hammer_mount_t hmp = ip->hmp;
3132
3133         if ((ip->flags & HAMMER_INODE_RECLAIM) == 0)
3134                 return;
3135
3136         --hammer_count_reclaiming;
3137         --hmp->inode_reclaims;
3138         ip->flags &= ~HAMMER_INODE_RECLAIM;
3139
3140         while ((reclaim = TAILQ_FIRST(&hmp->reclaim_list)) != NULL) {
3141                 if (reclaim->count > 0 && --reclaim->count == 0) {
3142                         TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry);
3143                         wakeup(reclaim);
3144                 }
3145                 if (hmp->inode_reclaims > hammer_limit_reclaim / 2)
3146                         break;
3147         }
3148 }
3149
3150 /*
3151  * Setup our reclaim pipeline.  We only let so many detached (and dirty)
3152  * inodes build up before we start blocking.  This routine is called
3153  * if a new inode is created or an inode is loaded from media.
3154  *
3155  * When we block we don't care *which* inode has finished reclaiming,
3156  * as lone as one does.
3157  */
3158 void
3159 hammer_inode_waitreclaims(hammer_transaction_t trans)
3160 {
3161         hammer_mount_t hmp = trans->hmp;
3162         struct hammer_reclaim reclaim;
3163
3164         /*
3165          * Track inode load
3166          */
3167         if (curthread->td_proc) {
3168                 struct hammer_inostats *stats;
3169                 int lower_limit;
3170
3171                 stats = hammer_inode_inostats(hmp, curthread->td_proc->p_pid);
3172                 ++stats->count;
3173
3174                 if (stats->count > hammer_limit_reclaim / 2)
3175                         stats->count = hammer_limit_reclaim / 2;
3176                 lower_limit = hammer_limit_reclaim - stats->count;
3177                 if (hammer_debug_general & 0x10000)
3178                         kprintf("pid %5d limit %d\n", (int)curthread->td_proc->p_pid, lower_limit);
3179
3180                 if (hmp->inode_reclaims < lower_limit)
3181                         return;
3182         } else {
3183                 /*
3184                  * Default mode
3185                  */
3186                 if (hmp->inode_reclaims < hammer_limit_reclaim)
3187                         return;
3188         }
3189         reclaim.count = 1;
3190         TAILQ_INSERT_TAIL(&hmp->reclaim_list, &reclaim, entry);
3191         tsleep(&reclaim, 0, "hmrrcm", hz);
3192         if (reclaim.count > 0)
3193                 TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry);
3194 }
3195
3196 /*
3197  * Keep track of reclaim statistics on a per-pid basis using a loose
3198  * 4-way set associative hash table.  Collisions inherit the count of
3199  * the previous entry.
3200  *
3201  * NOTE: We want to be careful here to limit the chain size.  If the chain
3202  *       size is too large a pid will spread its stats out over too many
3203  *       entries under certain types of heavy filesystem activity and
3204  *       wind up not delaying long enough.
3205  */
3206 static
3207 struct hammer_inostats *
3208 hammer_inode_inostats(hammer_mount_t hmp, pid_t pid)
3209 {
3210         struct hammer_inostats *stats;
3211         int delta;
3212         int chain;
3213         static volatile int iterator;   /* we don't care about MP races */
3214
3215         /*
3216          * Chain up to 4 times to find our entry.
3217          */
3218         for (chain = 0; chain < 4; ++chain) {
3219                 stats = &hmp->inostats[(pid + chain) & HAMMER_INOSTATS_HMASK];
3220                 if (stats->pid == pid)
3221                         break;
3222         }
3223
3224         /*
3225          * Replace one of the four chaining entries with our new entry.
3226          */
3227         if (chain == 4) {
3228                 stats = &hmp->inostats[(pid + (iterator++ & 3)) &
3229                                        HAMMER_INOSTATS_HMASK];
3230                 stats->pid = pid;
3231         }
3232
3233         /*
3234          * Decay the entry
3235          */
3236         if (stats->count && stats->ltick != ticks) {
3237                 delta = ticks - stats->ltick;
3238                 stats->ltick = ticks;
3239                 if (delta <= 0 || delta > hz * 60)
3240                         stats->count = 0;
3241                 else
3242                         stats->count = stats->count * hz / (hz + delta);
3243         }
3244         if (hammer_debug_general & 0x10000)
3245                 kprintf("pid %5d stats %d\n", (int)pid, stats->count);
3246         return (stats);
3247 }
3248
3249 #if 0
3250
3251 /*
3252  * XXX not used, doesn't work very well due to the large batching nature
3253  * of flushes.
3254  *
3255  * A larger then normal backlog of inodes is sitting in the flusher,
3256  * enforce a general slowdown to let it catch up.  This routine is only
3257  * called on completion of a non-flusher-related transaction which
3258  * performed B-Tree node I/O.
3259  *
3260  * It is possible for the flusher to stall in a continuous load.
3261  * blogbench -i1000 -o seems to do a good job generating this sort of load.
3262  * If the flusher is unable to catch up the inode count can bloat until
3263  * we run out of kvm.
3264  *
3265  * This is a bit of a hack.
3266  */
3267 void
3268 hammer_inode_waithard(hammer_mount_t hmp)
3269 {
3270         /*
3271          * Hysteresis.
3272          */
3273         if (hmp->flags & HAMMER_MOUNT_FLUSH_RECOVERY) {
3274                 if (hmp->inode_reclaims < hammer_limit_reclaim / 2 &&
3275                     hmp->count_iqueued < hmp->count_inodes / 20) {
3276                         hmp->flags &= ~HAMMER_MOUNT_FLUSH_RECOVERY;
3277                         return;
3278                 }
3279         } else {
3280                 if (hmp->inode_reclaims < hammer_limit_reclaim ||
3281                     hmp->count_iqueued < hmp->count_inodes / 10) {
3282                         return;
3283                 }
3284                 hmp->flags |= HAMMER_MOUNT_FLUSH_RECOVERY;
3285         }
3286
3287         /*
3288          * Block for one flush cycle.
3289          */
3290         hammer_flusher_wait_next(hmp);
3291 }
3292
3293 #endif