8276b331d6fe20a253af44b44e31981e16e08784
[dragonfly.git] / sys / vfs / hammer / hammer_inode.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.92 2008/07/03 04:24:51 dillon Exp $
35  */
36
37 #include "hammer.h"
38 #include <vm/vm_extern.h>
39 #include <sys/buf.h>
40 #include <sys/buf2.h>
41
42 static int      hammer_unload_inode(struct hammer_inode *ip);
43 static void     hammer_free_inode(hammer_inode_t ip);
44 static void     hammer_flush_inode_core(hammer_inode_t ip, int flags);
45 static int      hammer_setup_child_callback(hammer_record_t rec, void *data);
46 static int      hammer_syncgrp_child_callback(hammer_record_t rec, void *data);
47 static int      hammer_setup_parent_inodes(hammer_inode_t ip);
48 static int      hammer_setup_parent_inodes_helper(hammer_record_t record);
49 static void     hammer_inode_wakereclaims(hammer_inode_t ip);
50
51 #ifdef DEBUG_TRUNCATE
52 extern struct hammer_inode *HammerTruncIp;
53 #endif
54
55 /*
56  * RB-Tree support for inode structures
57  */
58 int
59 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
60 {
61         if (ip1->obj_localization < ip2->obj_localization)
62                 return(-1);
63         if (ip1->obj_localization > ip2->obj_localization)
64                 return(1);
65         if (ip1->obj_id < ip2->obj_id)
66                 return(-1);
67         if (ip1->obj_id > ip2->obj_id)
68                 return(1);
69         if (ip1->obj_asof < ip2->obj_asof)
70                 return(-1);
71         if (ip1->obj_asof > ip2->obj_asof)
72                 return(1);
73         return(0);
74 }
75
76 /*
77  * RB-Tree support for inode structures / special LOOKUP_INFO
78  */
79 static int
80 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
81 {
82         if (info->obj_localization < ip->obj_localization)
83                 return(-1);
84         if (info->obj_localization > ip->obj_localization)
85                 return(1);
86         if (info->obj_id < ip->obj_id)
87                 return(-1);
88         if (info->obj_id > ip->obj_id)
89                 return(1);
90         if (info->obj_asof < ip->obj_asof)
91                 return(-1);
92         if (info->obj_asof > ip->obj_asof)
93                 return(1);
94         return(0);
95 }
96
97 /*
98  * Used by hammer_scan_inode_snapshots() to locate all of an object's
99  * snapshots.  Note that the asof field is not tested, which we can get
100  * away with because it is the lowest-priority field.
101  */
102 static int
103 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data)
104 {
105         hammer_inode_info_t info = data;
106
107         if (ip->obj_localization > info->obj_localization)
108                 return(1);
109         if (ip->obj_localization < info->obj_localization)
110                 return(-1);
111         if (ip->obj_id > info->obj_id)
112                 return(1);
113         if (ip->obj_id < info->obj_id)
114                 return(-1);
115         return(0);
116 }
117
118 /*
119  * RB-Tree support for pseudofs structures
120  */
121 static int
122 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1, hammer_pseudofs_inmem_t p2)
123 {
124         if (p1->localization < p2->localization)
125                 return(-1);
126         if (p1->localization > p2->localization)
127                 return(1);
128         return(0);
129 }
130
131
132 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
133 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
134                 hammer_inode_info_cmp, hammer_inode_info_t);
135 RB_GENERATE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node,
136              hammer_pfs_rb_compare, u_int32_t, localization);
137
138 /*
139  * The kernel is not actively referencing this vnode but is still holding
140  * it cached.
141  *
142  * This is called from the frontend.
143  */
144 int
145 hammer_vop_inactive(struct vop_inactive_args *ap)
146 {
147         struct hammer_inode *ip = VTOI(ap->a_vp);
148
149         /*
150          * Degenerate case
151          */
152         if (ip == NULL) {
153                 vrecycle(ap->a_vp);
154                 return(0);
155         }
156
157         /*
158          * If the inode no longer has visibility in the filesystem try to
159          * recycle it immediately, even if the inode is dirty.  Recycling
160          * it quickly allows the system to reclaim buffer cache and VM
161          * resources which can matter a lot in a heavily loaded system.
162          *
163          * This can deadlock in vfsync() if we aren't careful.
164          * 
165          * Do not queue the inode to the flusher if we still have visibility,
166          * otherwise namespace calls such as chmod will unnecessarily generate
167          * multiple inode updates.
168          */
169         hammer_inode_unloadable_check(ip, 0);
170         if (ip->ino_data.nlinks == 0) {
171                 if (ip->flags & HAMMER_INODE_MODMASK)
172                         hammer_flush_inode(ip, 0);
173                 vrecycle(ap->a_vp);
174         }
175         return(0);
176 }
177
178 /*
179  * Release the vnode association.  This is typically (but not always)
180  * the last reference on the inode.
181  *
182  * Once the association is lost we are on our own with regards to
183  * flushing the inode.
184  */
185 int
186 hammer_vop_reclaim(struct vop_reclaim_args *ap)
187 {
188         struct hammer_inode *ip;
189         hammer_mount_t hmp;
190         struct vnode *vp;
191
192         vp = ap->a_vp;
193
194         if ((ip = vp->v_data) != NULL) {
195                 hmp = ip->hmp;
196                 vp->v_data = NULL;
197                 ip->vp = NULL;
198
199                 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) {
200                         ++hammer_count_reclaiming;
201                         ++hmp->inode_reclaims;
202                         ip->flags |= HAMMER_INODE_RECLAIM;
203                         if (hmp->inode_reclaims > HAMMER_RECLAIM_FLUSH &&
204                             (hmp->inode_reclaims & 255) == 0) {
205                                 hammer_flusher_async(hmp);
206                         }
207                 }
208                 hammer_rel_inode(ip, 1);
209         }
210         return(0);
211 }
212
213 /*
214  * Return a locked vnode for the specified inode.  The inode must be
215  * referenced but NOT LOCKED on entry and will remain referenced on
216  * return.
217  *
218  * Called from the frontend.
219  */
220 int
221 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
222 {
223         hammer_mount_t hmp;
224         struct vnode *vp;
225         int error = 0;
226
227         hmp = ip->hmp;
228
229         for (;;) {
230                 if ((vp = ip->vp) == NULL) {
231                         error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0);
232                         if (error)
233                                 break;
234                         hammer_lock_ex(&ip->lock);
235                         if (ip->vp != NULL) {
236                                 hammer_unlock(&ip->lock);
237                                 vp->v_type = VBAD;
238                                 vx_put(vp);
239                                 continue;
240                         }
241                         hammer_ref(&ip->lock);
242                         vp = *vpp;
243                         ip->vp = vp;
244                         vp->v_type =
245                                 hammer_get_vnode_type(ip->ino_data.obj_type);
246
247                         hammer_inode_wakereclaims(ip);
248
249                         switch(ip->ino_data.obj_type) {
250                         case HAMMER_OBJTYPE_CDEV:
251                         case HAMMER_OBJTYPE_BDEV:
252                                 vp->v_ops = &hmp->mp->mnt_vn_spec_ops;
253                                 addaliasu(vp, ip->ino_data.rmajor,
254                                           ip->ino_data.rminor);
255                                 break;
256                         case HAMMER_OBJTYPE_FIFO:
257                                 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops;
258                                 break;
259                         default:
260                                 break;
261                         }
262
263                         /*
264                          * Only mark as the root vnode if the ip is not
265                          * historical, otherwise the VFS cache will get
266                          * confused.  The other half of the special handling
267                          * is in hammer_vop_nlookupdotdot().
268                          *
269                          * Pseudo-filesystem roots also do not count.
270                          */
271                         if (ip->obj_id == HAMMER_OBJID_ROOT &&
272                             ip->obj_asof == hmp->asof &&
273                             ip->obj_localization == 0) {
274                                 vp->v_flag |= VROOT;
275                         }
276
277                         vp->v_data = (void *)ip;
278                         /* vnode locked by getnewvnode() */
279                         /* make related vnode dirty if inode dirty? */
280                         hammer_unlock(&ip->lock);
281                         if (vp->v_type == VREG)
282                                 vinitvmio(vp, ip->ino_data.size);
283                         break;
284                 }
285
286                 /*
287                  * loop if the vget fails (aka races), or if the vp
288                  * no longer matches ip->vp.
289                  */
290                 if (vget(vp, LK_EXCLUSIVE) == 0) {
291                         if (vp == ip->vp)
292                                 break;
293                         vput(vp);
294                 }
295         }
296         *vpp = vp;
297         return(error);
298 }
299
300 /*
301  * Locate all copies of the inode for obj_id compatible with the specified
302  * asof, reference, and issue the related call-back.  This routine is used
303  * for direct-io invalidation and does not create any new inodes.
304  */
305 void
306 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo,
307                             int (*callback)(hammer_inode_t ip, void *data),
308                             void *data)
309 {
310         hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root,
311                                    hammer_inode_info_cmp_all_history,
312                                    callback, iinfo);
313 }
314
315 /*
316  * Acquire a HAMMER inode.  The returned inode is not locked.  These functions
317  * do not attach or detach the related vnode (use hammer_get_vnode() for
318  * that).
319  *
320  * The flags argument is only applied for newly created inodes, and only
321  * certain flags are inherited.
322  *
323  * Called from the frontend.
324  */
325 struct hammer_inode *
326 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip,
327                  u_int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
328                  int flags, int *errorp)
329 {
330         hammer_mount_t hmp = trans->hmp;
331         struct hammer_inode_info iinfo;
332         struct hammer_cursor cursor;
333         struct hammer_inode *ip;
334
335
336         /*
337          * Determine if we already have an inode cached.  If we do then
338          * we are golden.
339          */
340         iinfo.obj_id = obj_id;
341         iinfo.obj_asof = asof;
342         iinfo.obj_localization = localization;
343 loop:
344         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
345         if (ip) {
346                 hammer_ref(&ip->lock);
347                 *errorp = 0;
348                 return(ip);
349         }
350
351         /*
352          * Allocate a new inode structure and deal with races later.
353          */
354         ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
355         ++hammer_count_inodes;
356         ++hmp->count_inodes;
357         ip->obj_id = obj_id;
358         ip->obj_asof = iinfo.obj_asof;
359         ip->obj_localization = localization;
360         ip->hmp = hmp;
361         ip->flags = flags & HAMMER_INODE_RO;
362         ip->cache[0].ip = ip;
363         ip->cache[1].ip = ip;
364         if (hmp->ronly)
365                 ip->flags |= HAMMER_INODE_RO;
366         ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
367                 0x7FFFFFFFFFFFFFFFLL;
368         RB_INIT(&ip->rec_tree);
369         TAILQ_INIT(&ip->target_list);
370         hammer_ref(&ip->lock);
371
372         /*
373          * Locate the on-disk inode.
374          */
375 retry:
376         hammer_init_cursor(trans, &cursor, (dip ? &dip->cache[0] : NULL), NULL);
377         cursor.key_beg.localization = localization + HAMMER_LOCALIZE_INODE;
378         cursor.key_beg.obj_id = ip->obj_id;
379         cursor.key_beg.key = 0;
380         cursor.key_beg.create_tid = 0;
381         cursor.key_beg.delete_tid = 0;
382         cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
383         cursor.key_beg.obj_type = 0;
384         cursor.asof = iinfo.obj_asof;
385         cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
386                        HAMMER_CURSOR_ASOF;
387
388         *errorp = hammer_btree_lookup(&cursor);
389         if (*errorp == EDEADLK) {
390                 hammer_done_cursor(&cursor);
391                 goto retry;
392         }
393
394         /*
395          * On success the B-Tree lookup will hold the appropriate
396          * buffer cache buffers and provide a pointer to the requested
397          * information.  Copy the information to the in-memory inode
398          * and cache the B-Tree node to improve future operations.
399          */
400         if (*errorp == 0) {
401                 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
402                 ip->ino_data = cursor.data->inode;
403
404                 /*
405                  * cache[0] tries to cache the location of the object inode.
406                  * The assumption is that it is near the directory inode.
407                  *
408                  * cache[1] tries to cache the location of the object data.
409                  * The assumption is that it is near the directory data.
410                  */
411                 hammer_cache_node(&ip->cache[0], cursor.node);
412                 if (dip && dip->cache[1].node)
413                         hammer_cache_node(&ip->cache[1], dip->cache[1].node);
414
415                 /*
416                  * The file should not contain any data past the file size
417                  * stored in the inode.  Setting save_trunc_off to the
418                  * file size instead of max reduces B-Tree lookup overheads
419                  * on append by allowing the flusher to avoid checking for
420                  * record overwrites.
421                  */
422                 ip->save_trunc_off = ip->ino_data.size;
423
424                 /*
425                  * Locate and assign the pseudofs management structure to
426                  * the inode.
427                  */
428                 if (dip && dip->obj_localization == ip->obj_localization) {
429                         ip->pfsm = dip->pfsm;
430                         hammer_ref(&ip->pfsm->lock);
431                 } else {
432                         *errorp = hammer_load_pseudofs(trans, ip);
433                 }
434         }
435
436         /*
437          * The inode is placed on the red-black tree and will be synced to
438          * the media when flushed or by the filesystem sync.  If this races
439          * another instantiation/lookup the insertion will fail.
440          */
441         if (*errorp == 0) {
442                 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
443                         hammer_free_inode(ip);
444                         hammer_done_cursor(&cursor);
445                         goto loop;
446                 }
447                 ip->flags |= HAMMER_INODE_ONDISK;
448         } else {
449                 if (ip->flags & HAMMER_INODE_RSV_INODES) {
450                         ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
451                         --hmp->rsv_inodes;
452                 }
453
454                 hammer_free_inode(ip);
455                 ip = NULL;
456         }
457         hammer_done_cursor(&cursor);
458         return (ip);
459 }
460
461 /*
462  * Create a new filesystem object, returning the inode in *ipp.  The
463  * returned inode will be referenced.
464  *
465  * The inode is created in-memory.
466  */
467 int
468 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
469                     struct ucred *cred, hammer_inode_t dip,
470                     int pseudofs, struct hammer_inode **ipp)
471 {
472         hammer_mount_t hmp;
473         hammer_inode_t ip;
474         uid_t xuid;
475         u_int32_t localization;
476         int error;
477
478         hmp = trans->hmp;
479
480         /*
481          * Assign the localization domain.  If if dip is NULL we are creating
482          * a pseudo-fs and must locate an unused localization domain.
483          */
484         if (pseudofs) {
485                 for (localization = HAMMER_DEF_LOCALIZATION;
486                      localization < HAMMER_LOCALIZE_PSEUDOFS_MASK;
487                      localization += HAMMER_LOCALIZE_PSEUDOFS_INC) {
488                         ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT,
489                                               hmp->asof, localization,
490                                               0, &error);
491                         if (ip == NULL) {
492                                 if (error != ENOENT)
493                                         return(error);
494                                 break;
495                         }
496                         if (ip)
497                                 hammer_rel_inode(ip, 0);
498                 }
499         } else {
500                 localization = dip->obj_localization;
501         }
502
503         ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
504         ++hammer_count_inodes;
505         ++hmp->count_inodes;
506
507         /*
508          * Allocate a new object id.  If creating a new pseudo-fs the
509          * obj_id is 1.
510          */
511         if (pseudofs)
512                 ip->obj_id = HAMMER_OBJID_ROOT;
513         else
514                 ip->obj_id = hammer_alloc_objid(hmp, dip);
515         ip->obj_localization = localization;
516
517         KKASSERT(ip->obj_id != 0);
518         ip->obj_asof = hmp->asof;
519         ip->hmp = hmp;
520         ip->flush_state = HAMMER_FST_IDLE;
521         ip->flags = HAMMER_INODE_DDIRTY |
522                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME;
523         ip->cache[0].ip = ip;
524         ip->cache[1].ip = ip;
525
526         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
527         /* ip->save_trunc_off = 0; (already zero) */
528         RB_INIT(&ip->rec_tree);
529         TAILQ_INIT(&ip->target_list);
530
531         ip->ino_data.atime = trans->time;
532         ip->ino_data.mtime = trans->time;
533         ip->ino_data.size = 0;
534         ip->ino_data.nlinks = 0;
535
536         /*
537          * A nohistory designator on the parent directory is inherited by
538          * the child.  We will do this even for pseudo-fs creation... the
539          * sysad can turn it off.
540          */
541         ip->ino_data.uflags = dip->ino_data.uflags &
542                               (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
543
544         ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
545         ip->ino_leaf.base.localization = ip->obj_localization +
546                                          HAMMER_LOCALIZE_INODE;
547         ip->ino_leaf.base.obj_id = ip->obj_id;
548         ip->ino_leaf.base.key = 0;
549         ip->ino_leaf.base.create_tid = 0;
550         ip->ino_leaf.base.delete_tid = 0;
551         ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
552         ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
553
554         ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
555         ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
556         ip->ino_data.mode = vap->va_mode;
557         ip->ino_data.ctime = trans->time;
558
559         /*
560          * Setup the ".." pointer.  This only needs to be done for directories
561          * but we do it for all objects as a recovery aid.
562          *
563          * The parent_obj_localization field only applies to pseudo-fs roots.
564          */
565         ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id;
566         if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY &&
567             ip->obj_id == HAMMER_OBJID_ROOT) {
568                 ip->ino_data.ext.obj.parent_obj_localization = 
569                                                 dip->obj_localization;
570         }
571
572         switch(ip->ino_leaf.base.obj_type) {
573         case HAMMER_OBJTYPE_CDEV:
574         case HAMMER_OBJTYPE_BDEV:
575                 ip->ino_data.rmajor = vap->va_rmajor;
576                 ip->ino_data.rminor = vap->va_rminor;
577                 break;
578         default:
579                 break;
580         }
581
582         /*
583          * Calculate default uid/gid and overwrite with information from
584          * the vap.
585          */
586         xuid = hammer_to_unix_xid(&dip->ino_data.uid);
587         xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode, xuid, cred,
588                                      &vap->va_mode);
589         ip->ino_data.mode = vap->va_mode;
590
591         if (vap->va_vaflags & VA_UID_UUID_VALID)
592                 ip->ino_data.uid = vap->va_uid_uuid;
593         else if (vap->va_uid != (uid_t)VNOVAL)
594                 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
595         else
596                 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
597
598         if (vap->va_vaflags & VA_GID_UUID_VALID)
599                 ip->ino_data.gid = vap->va_gid_uuid;
600         else if (vap->va_gid != (gid_t)VNOVAL)
601                 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
602         else
603                 ip->ino_data.gid = dip->ino_data.gid;
604
605         hammer_ref(&ip->lock);
606
607         if (dip->obj_localization == ip->obj_localization) {
608                 ip->pfsm = dip->pfsm;
609                 hammer_ref(&ip->pfsm->lock);
610                 error = 0;
611         } else {
612                 error = hammer_load_pseudofs(trans, ip);
613         }
614
615         if (error) {
616                 hammer_free_inode(ip);
617                 ip = NULL;
618         } else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
619                 panic("hammer_create_inode: duplicate obj_id %llx", ip->obj_id);
620                 /* not reached */
621                 hammer_free_inode(ip);
622         }
623         *ipp = ip;
624         return(error);
625 }
626
627 /*
628  * Final cleanup / freeing of an inode structure
629  */
630 static void
631 hammer_free_inode(hammer_inode_t ip)
632 {
633         KKASSERT(ip->lock.refs == 1);
634         hammer_uncache_node(&ip->cache[0]);
635         hammer_uncache_node(&ip->cache[1]);
636         hammer_inode_wakereclaims(ip);
637         if (ip->objid_cache)
638                 hammer_clear_objid(ip);
639         --hammer_count_inodes;
640         --ip->hmp->count_inodes;
641         if (ip->pfsm) {
642                 hammer_rel_pseudofs(ip->hmp, ip->pfsm);
643                 ip->pfsm = NULL;
644         }
645         kfree(ip, M_HAMMER);
646         ip = NULL;
647 }
648
649 /*
650  * Retrieve pseudo-fs data.
651  */
652 int
653 hammer_load_pseudofs(hammer_transaction_t trans, hammer_inode_t ip)
654 {
655         hammer_mount_t hmp = trans->hmp;
656         hammer_pseudofs_inmem_t pfsm;
657         struct hammer_cursor cursor;
658         int error;
659         int bytes;
660
661 retry:
662         pfsm = RB_LOOKUP(hammer_pfs_rb_tree, &hmp->rb_pfsm_root,
663                          ip->obj_localization);
664         if (pfsm) {
665                 KKASSERT(ip->pfsm == NULL);
666                 ip->pfsm = pfsm;
667                 hammer_ref(&pfsm->lock);
668                 return(0);
669         }
670
671         pfsm = kmalloc(sizeof(*pfsm), M_HAMMER, M_WAITOK | M_ZERO);
672         pfsm->localization = ip->obj_localization;
673
674         hammer_init_cursor(trans, &cursor, NULL, NULL);
675         cursor.key_beg.localization = ip->obj_localization +
676                                       HAMMER_LOCALIZE_MISC;
677         cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
678         cursor.key_beg.create_tid = 0;
679         cursor.key_beg.delete_tid = 0;
680         cursor.key_beg.rec_type = HAMMER_RECTYPE_FIX;
681         cursor.key_beg.obj_type = 0;
682         cursor.key_beg.key = HAMMER_FIXKEY_PSEUDOFS;
683         cursor.asof = HAMMER_MAX_TID;
684         cursor.flags |= HAMMER_CURSOR_ASOF;
685
686         error = hammer_btree_lookup(&cursor);
687         if (error == 0) {
688                 error = hammer_btree_extract(&cursor, HAMMER_CURSOR_GET_DATA);
689                 if (error == 0) {
690                         bytes = cursor.leaf->data_len;
691                         if (bytes > sizeof(pfsm->pfsd))
692                                 bytes = sizeof(pfsm->pfsd);
693                         bcopy(cursor.data, &pfsm->pfsd, bytes);
694                 }
695         } else if (error == ENOENT) {
696                 error = 0;
697         }
698
699         hammer_done_cursor(&cursor);
700
701         if (error == 0) {
702                 hammer_ref(&pfsm->lock);
703                 if (RB_INSERT(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm)) {
704                         kfree(pfsm, M_HAMMER);
705                         goto retry;
706                 }
707                 ip->pfsm = pfsm;
708
709                 /*
710                  * Certain aspects of the pseudofs configuration are reflected
711                  * in the inode.
712                  */
713                 if (pfsm->pfsd.mirror_flags & HAMMER_PFSD_SLAVE) {
714                         ip->flags |= HAMMER_INODE_RO;
715                         ip->flags |= HAMMER_INODE_PFSD;
716                         if (ip->obj_asof > pfsm->pfsd.sync_beg_tid)
717                                 ip->obj_asof = pfsm->pfsd.sync_beg_tid;
718                 } else if (pfsm->pfsd.master_id >= 0) {
719                         ip->flags |= HAMMER_INODE_PFSD;
720                 }
721         } else {
722                 kfree(pfsm, M_HAMMER);
723         }
724         return(error);
725 }
726
727 /*
728  * Store pseudo-fs data.  The backend will automatically delete any prior
729  * on-disk pseudo-fs data but we have to delete in-memory versions.
730  */
731 int
732 hammer_save_pseudofs(hammer_transaction_t trans, hammer_inode_t ip)
733 {
734         struct hammer_cursor cursor;
735         hammer_pseudofs_inmem_t pfsm;
736         hammer_record_t record;
737         int error;
738
739 retry:
740         pfsm = ip->pfsm;
741         hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
742         cursor.key_beg.localization = ip->obj_localization +
743                                       HAMMER_LOCALIZE_MISC;
744         cursor.key_beg.obj_id = ip->obj_id;
745         cursor.key_beg.create_tid = 0;
746         cursor.key_beg.delete_tid = 0;
747         cursor.key_beg.rec_type = HAMMER_RECTYPE_FIX;
748         cursor.key_beg.obj_type = 0;
749         cursor.key_beg.key = HAMMER_FIXKEY_PSEUDOFS;
750         cursor.asof = HAMMER_MAX_TID;
751         cursor.flags |= HAMMER_CURSOR_ASOF;
752
753         error = hammer_ip_lookup(&cursor);
754         if (error == 0 && hammer_cursor_inmem(&cursor)) {
755                 record = cursor.iprec;
756                 if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
757                         KKASSERT(cursor.deadlk_rec == NULL);
758                         hammer_ref(&record->lock);
759                         cursor.deadlk_rec = record;
760                         error = EDEADLK;
761                 } else {
762                         record->flags |= HAMMER_RECF_DELETED_FE;
763                         error = 0;
764                 }
765         }
766         if (error == 0 || error == ENOENT) {
767                 record = hammer_alloc_mem_record(ip, sizeof(pfsm->pfsd));
768                 record->type = HAMMER_MEM_RECORD_GENERAL;
769
770                 record->leaf.base.localization = ip->obj_localization +
771                                                  HAMMER_LOCALIZE_MISC;
772                 record->leaf.base.rec_type = HAMMER_RECTYPE_FIX;
773                 record->leaf.base.key = HAMMER_FIXKEY_PSEUDOFS;
774                 record->leaf.data_len = sizeof(pfsm->pfsd);
775                 bcopy(&pfsm->pfsd, record->data, sizeof(pfsm->pfsd));
776                 error = hammer_ip_add_record(trans, record);
777         }
778         hammer_done_cursor(&cursor);
779         if (error == EDEADLK)
780                 goto retry;
781         if (error == 0) {
782                 /*
783                  * Certain aspects of the pseudofs configuration are reflected
784                  * in the inode.  Note that we cannot mess with the as-of or
785                  * clear the read-only state.
786                  *
787                  * If this inode represented a slave snapshot its asof will
788                  * be set to a snapshot tid.  When clearing slave mode any
789                  * re-access of the inode via the parent directory will
790                  * wind up using a different asof and thus will instantiate
791                  * a new inode.
792                  */
793                 if (pfsm->pfsd.mirror_flags & HAMMER_PFSD_SLAVE) {
794                         ip->flags |= HAMMER_INODE_RO;
795                         ip->flags |= HAMMER_INODE_PFSD;
796                 } else if (pfsm->pfsd.master_id >= 0) {
797                         ip->flags |= HAMMER_INODE_PFSD;
798                 } else {
799                         ip->flags &= ~HAMMER_INODE_PFSD;
800                 }
801         }
802         return(error);
803 }
804
805 void
806 hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm)
807 {
808         hammer_unref(&pfsm->lock);
809         if (pfsm->lock.refs == 0) {
810                 RB_REMOVE(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm);
811                 kfree(pfsm, M_HAMMER);
812         }
813 }
814
815 /*
816  * Called by hammer_sync_inode().
817  */
818 static int
819 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
820 {
821         hammer_transaction_t trans = cursor->trans;
822         hammer_record_t record;
823         int error;
824         int redirty;
825
826 retry:
827         error = 0;
828
829         /*
830          * If the inode has a presence on-disk then locate it and mark
831          * it deleted, setting DELONDISK.
832          *
833          * The record may or may not be physically deleted, depending on
834          * the retention policy.
835          */
836         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
837             HAMMER_INODE_ONDISK) {
838                 hammer_normalize_cursor(cursor);
839                 cursor->key_beg.localization = ip->obj_localization + 
840                                                HAMMER_LOCALIZE_INODE;
841                 cursor->key_beg.obj_id = ip->obj_id;
842                 cursor->key_beg.key = 0;
843                 cursor->key_beg.create_tid = 0;
844                 cursor->key_beg.delete_tid = 0;
845                 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
846                 cursor->key_beg.obj_type = 0;
847                 cursor->asof = ip->obj_asof;
848                 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
849                 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
850                 cursor->flags |= HAMMER_CURSOR_BACKEND;
851
852                 error = hammer_btree_lookup(cursor);
853                 if (hammer_debug_inode)
854                         kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
855                 if (error) {
856                         kprintf("error %d\n", error);
857                         Debugger("hammer_update_inode");
858                 }
859
860                 if (error == 0) {
861                         error = hammer_ip_delete_record(cursor, ip, trans->tid);
862                         if (hammer_debug_inode)
863                                 kprintf(" error %d\n", error);
864                         if (error && error != EDEADLK) {
865                                 kprintf("error %d\n", error);
866                                 Debugger("hammer_update_inode2");
867                         }
868                         if (error == 0) {
869                                 ip->flags |= HAMMER_INODE_DELONDISK;
870                         }
871                         if (cursor->node)
872                                 hammer_cache_node(&ip->cache[0], cursor->node);
873                 }
874                 if (error == EDEADLK) {
875                         hammer_done_cursor(cursor);
876                         error = hammer_init_cursor(trans, cursor,
877                                                    &ip->cache[0], ip);
878                         if (hammer_debug_inode)
879                                 kprintf("IPDED %p %d\n", ip, error);
880                         if (error == 0)
881                                 goto retry;
882                 }
883         }
884
885         /*
886          * Ok, write out the initial record or a new record (after deleting
887          * the old one), unless the DELETED flag is set.  This routine will
888          * clear DELONDISK if it writes out a record.
889          *
890          * Update our inode statistics if this is the first application of
891          * the inode on-disk.
892          */
893         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
894                 /*
895                  * Generate a record and write it to the media
896                  */
897                 record = hammer_alloc_mem_record(ip, 0);
898                 record->type = HAMMER_MEM_RECORD_INODE;
899                 record->flush_state = HAMMER_FST_FLUSH;
900                 record->leaf = ip->sync_ino_leaf;
901                 record->leaf.base.create_tid = trans->tid;
902                 record->leaf.data_len = sizeof(ip->sync_ino_data);
903                 record->leaf.create_ts = trans->time32;
904                 record->data = (void *)&ip->sync_ino_data;
905                 record->flags |= HAMMER_RECF_INTERLOCK_BE;
906
907                 /*
908                  * If this flag is set we cannot sync the new file size
909                  * because we haven't finished related truncations.  The
910                  * inode will be flushed in another flush group to finish
911                  * the job.
912                  */
913                 if ((ip->flags & HAMMER_INODE_WOULDBLOCK) &&
914                     ip->sync_ino_data.size != ip->ino_data.size) {
915                         redirty = 1;
916                         ip->sync_ino_data.size = ip->ino_data.size;
917                 } else {
918                         redirty = 0;
919                 }
920
921                 for (;;) {
922                         error = hammer_ip_sync_record_cursor(cursor, record);
923                         if (hammer_debug_inode)
924                                 kprintf("GENREC %p rec %08x %d\n",      
925                                         ip, record->flags, error);
926                         if (error != EDEADLK)
927                                 break;
928                         hammer_done_cursor(cursor);
929                         error = hammer_init_cursor(trans, cursor,
930                                                    &ip->cache[0], ip);
931                         if (hammer_debug_inode)
932                                 kprintf("GENREC reinit %d\n", error);
933                         if (error)
934                                 break;
935                 }
936                 if (error) {
937                         kprintf("error %d\n", error);
938                         Debugger("hammer_update_inode3");
939                 }
940
941                 /*
942                  * The record isn't managed by the inode's record tree,
943                  * destroy it whether we succeed or fail.
944                  */
945                 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
946                 record->flags |= HAMMER_RECF_DELETED_FE;
947                 record->flush_state = HAMMER_FST_IDLE;
948                 hammer_rel_mem_record(record);
949
950                 /*
951                  * Finish up.
952                  */
953                 if (error == 0) {
954                         if (hammer_debug_inode)
955                                 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
956                         ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
957                                             HAMMER_INODE_ATIME |
958                                             HAMMER_INODE_MTIME);
959                         ip->flags &= ~HAMMER_INODE_DELONDISK;
960                         if (redirty)
961                                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
962
963                         /*
964                          * Root volume count of inodes
965                          */
966                         if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
967                                 hammer_modify_volume_field(trans,
968                                                            trans->rootvol,
969                                                            vol0_stat_inodes);
970                                 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
971                                 hammer_modify_volume_done(trans->rootvol);
972                                 ip->flags |= HAMMER_INODE_ONDISK;
973                                 if (hammer_debug_inode)
974                                         kprintf("NOWONDISK %p\n", ip);
975                         }
976                 }
977         }
978
979         /*
980          * If the inode has been destroyed, clean out any left-over flags
981          * that may have been set by the frontend.
982          */
983         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) { 
984                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
985                                     HAMMER_INODE_ATIME |
986                                     HAMMER_INODE_MTIME);
987         }
988         return(error);
989 }
990
991 /*
992  * Update only the itimes fields.
993  *
994  * ATIME can be updated without generating any UNDO.  MTIME is updated
995  * with UNDO so it is guaranteed to be synchronized properly in case of
996  * a crash.
997  *
998  * Neither field is included in the B-Tree leaf element's CRC, which is how
999  * we can get away with updating ATIME the way we do.
1000  */
1001 static int
1002 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
1003 {
1004         hammer_transaction_t trans = cursor->trans;
1005         int error;
1006
1007 retry:
1008         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) !=
1009             HAMMER_INODE_ONDISK) {
1010                 return(0);
1011         }
1012
1013         hammer_normalize_cursor(cursor);
1014         cursor->key_beg.localization = ip->obj_localization + 
1015                                        HAMMER_LOCALIZE_INODE;
1016         cursor->key_beg.obj_id = ip->obj_id;
1017         cursor->key_beg.key = 0;
1018         cursor->key_beg.create_tid = 0;
1019         cursor->key_beg.delete_tid = 0;
1020         cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1021         cursor->key_beg.obj_type = 0;
1022         cursor->asof = ip->obj_asof;
1023         cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1024         cursor->flags |= HAMMER_CURSOR_ASOF;
1025         cursor->flags |= HAMMER_CURSOR_GET_LEAF;
1026         cursor->flags |= HAMMER_CURSOR_GET_DATA;
1027         cursor->flags |= HAMMER_CURSOR_BACKEND;
1028
1029         error = hammer_btree_lookup(cursor);
1030         if (error) {
1031                 kprintf("error %d\n", error);
1032                 Debugger("hammer_update_itimes1");
1033         }
1034         if (error == 0) {
1035                 hammer_cache_node(&ip->cache[0], cursor->node);
1036                 if (ip->sync_flags & HAMMER_INODE_MTIME) {
1037                         /*
1038                          * Updating MTIME requires an UNDO.  Just cover
1039                          * both atime and mtime.
1040                          */
1041                         hammer_modify_buffer(trans, cursor->data_buffer,
1042                                      HAMMER_ITIMES_BASE(&cursor->data->inode),
1043                                      HAMMER_ITIMES_BYTES);
1044                         cursor->data->inode.atime = ip->sync_ino_data.atime;
1045                         cursor->data->inode.mtime = ip->sync_ino_data.mtime;
1046                         hammer_modify_buffer_done(cursor->data_buffer);
1047                 } else if (ip->sync_flags & HAMMER_INODE_ATIME) {
1048                         /*
1049                          * Updating atime only can be done in-place with
1050                          * no UNDO.
1051                          */
1052                         hammer_modify_buffer(trans, cursor->data_buffer,
1053                                              NULL, 0);
1054                         cursor->data->inode.atime = ip->sync_ino_data.atime;
1055                         hammer_modify_buffer_done(cursor->data_buffer);
1056                 }
1057                 ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME);
1058         }
1059         if (error == EDEADLK) {
1060                 hammer_done_cursor(cursor);
1061                 error = hammer_init_cursor(trans, cursor,
1062                                            &ip->cache[0], ip);
1063                 if (error == 0)
1064                         goto retry;
1065         }
1066         return(error);
1067 }
1068
1069 /*
1070  * Release a reference on an inode, flush as requested.
1071  *
1072  * On the last reference we queue the inode to the flusher for its final
1073  * disposition.
1074  */
1075 void
1076 hammer_rel_inode(struct hammer_inode *ip, int flush)
1077 {
1078         hammer_mount_t hmp = ip->hmp;
1079
1080         /*
1081          * Handle disposition when dropping the last ref.
1082          */
1083         for (;;) {
1084                 if (ip->lock.refs == 1) {
1085                         /*
1086                          * Determine whether on-disk action is needed for
1087                          * the inode's final disposition.
1088                          */
1089                         KKASSERT(ip->vp == NULL);
1090                         hammer_inode_unloadable_check(ip, 0);
1091                         if (ip->flags & HAMMER_INODE_MODMASK) {
1092                                 if (hmp->rsv_inodes > desiredvnodes) {
1093                                         hammer_flush_inode(ip,
1094                                                            HAMMER_FLUSH_SIGNAL);
1095                                 } else {
1096                                         hammer_flush_inode(ip, 0);
1097                                 }
1098                         } else if (ip->lock.refs == 1) {
1099                                 hammer_unload_inode(ip);
1100                                 break;
1101                         }
1102                 } else {
1103                         if (flush)
1104                                 hammer_flush_inode(ip, 0);
1105
1106                         /*
1107                          * The inode still has multiple refs, try to drop
1108                          * one ref.
1109                          */
1110                         KKASSERT(ip->lock.refs >= 1);
1111                         if (ip->lock.refs > 1) {
1112                                 hammer_unref(&ip->lock);
1113                                 break;
1114                         }
1115                 }
1116         }
1117 }
1118
1119 /*
1120  * Unload and destroy the specified inode.  Must be called with one remaining
1121  * reference.  The reference is disposed of.
1122  *
1123  * This can only be called in the context of the flusher.
1124  */
1125 static int
1126 hammer_unload_inode(struct hammer_inode *ip)
1127 {
1128         hammer_mount_t hmp = ip->hmp;
1129
1130         KASSERT(ip->lock.refs == 1,
1131                 ("hammer_unload_inode: %d refs\n", ip->lock.refs));
1132         KKASSERT(ip->vp == NULL);
1133         KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
1134         KKASSERT(ip->cursor_ip_refs == 0);
1135         KKASSERT(ip->lock.lockcount == 0);
1136         KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
1137
1138         KKASSERT(RB_EMPTY(&ip->rec_tree));
1139         KKASSERT(TAILQ_EMPTY(&ip->target_list));
1140
1141         RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip);
1142
1143         hammer_free_inode(ip);
1144         return(0);
1145 }
1146
1147 /*
1148  * Called on mount -u when switching from RW to RO or vise-versa.  Adjust
1149  * the read-only flag for cached inodes.
1150  *
1151  * This routine is called from a RB_SCAN().
1152  */
1153 int
1154 hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
1155 {
1156         hammer_mount_t hmp = ip->hmp;
1157
1158         if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
1159                 ip->flags |= HAMMER_INODE_RO;
1160         else
1161                 ip->flags &= ~HAMMER_INODE_RO;
1162         return(0);
1163 }
1164
1165 /*
1166  * A transaction has modified an inode, requiring updates as specified by
1167  * the passed flags.
1168  *
1169  * HAMMER_INODE_DDIRTY: Inode data has been updated
1170  * HAMMER_INODE_XDIRTY: Dirty in-memory records
1171  * HAMMER_INODE_BUFS:   Dirty buffer cache buffers
1172  * HAMMER_INODE_DELETED: Inode record/data must be deleted
1173  * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
1174  */
1175 void
1176 hammer_modify_inode(hammer_inode_t ip, int flags)
1177 {
1178         KKASSERT(ip->hmp->ronly == 0 ||
1179                   (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 
1180                             HAMMER_INODE_BUFS | HAMMER_INODE_DELETED |
1181                             HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0);
1182         if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
1183                 ip->flags |= HAMMER_INODE_RSV_INODES;
1184                 ++ip->hmp->rsv_inodes;
1185         }
1186
1187         ip->flags |= flags;
1188 }
1189
1190 /*
1191  * Request that an inode be flushed.  This whole mess cannot block and may
1192  * recurse (if not synchronous).  Once requested HAMMER will attempt to
1193  * actively flush the inode until the flush can be done.
1194  *
1195  * The inode may already be flushing, or may be in a setup state.  We can
1196  * place the inode in a flushing state if it is currently idle and flag it
1197  * to reflush if it is currently flushing.
1198  *
1199  * If the HAMMER_FLUSH_SYNCHRONOUS flag is specified we will attempt to
1200  * flush the indoe synchronously using the caller's context.
1201  */
1202 void
1203 hammer_flush_inode(hammer_inode_t ip, int flags)
1204 {
1205         int good;
1206
1207         /*
1208          * Trivial 'nothing to flush' case.  If the inode is ina SETUP
1209          * state we have to put it back into an IDLE state so we can
1210          * drop the extra ref.
1211          */
1212         if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
1213                 if (ip->flush_state == HAMMER_FST_SETUP) {
1214                         ip->flush_state = HAMMER_FST_IDLE;
1215                         hammer_rel_inode(ip, 0);
1216                 }
1217                 return;
1218         }
1219
1220         /*
1221          * Our flush action will depend on the current state.
1222          */
1223         switch(ip->flush_state) {
1224         case HAMMER_FST_IDLE:
1225                 /*
1226                  * We have no dependancies and can flush immediately.  Some
1227                  * our children may not be flushable so we have to re-test
1228                  * with that additional knowledge.
1229                  */
1230                 hammer_flush_inode_core(ip, flags);
1231                 break;
1232         case HAMMER_FST_SETUP:
1233                 /*
1234                  * Recurse upwards through dependancies via target_list
1235                  * and start their flusher actions going if possible.
1236                  *
1237                  * 'good' is our connectivity.  -1 means we have none and
1238                  * can't flush, 0 means there weren't any dependancies, and
1239                  * 1 means we have good connectivity.
1240                  */
1241                 good = hammer_setup_parent_inodes(ip);
1242
1243                 /*
1244                  * We can continue if good >= 0.  Determine how many records
1245                  * under our inode can be flushed (and mark them).
1246                  */
1247                 if (good >= 0) {
1248                         hammer_flush_inode_core(ip, flags);
1249                 } else {
1250                         ip->flags |= HAMMER_INODE_REFLUSH;
1251                         if (flags & HAMMER_FLUSH_SIGNAL) {
1252                                 ip->flags |= HAMMER_INODE_RESIGNAL;
1253                                 hammer_flusher_async(ip->hmp);
1254                         }
1255                 }
1256                 break;
1257         default:
1258                 /*
1259                  * We are already flushing, flag the inode to reflush
1260                  * if needed after it completes its current flush.
1261                  */
1262                 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
1263                         ip->flags |= HAMMER_INODE_REFLUSH;
1264                 if (flags & HAMMER_FLUSH_SIGNAL) {
1265                         ip->flags |= HAMMER_INODE_RESIGNAL;
1266                         hammer_flusher_async(ip->hmp);
1267                 }
1268                 break;
1269         }
1270 }
1271
1272 /*
1273  * Scan ip->target_list, which is a list of records owned by PARENTS to our
1274  * ip which reference our ip.
1275  *
1276  * XXX This is a huge mess of recursive code, but not one bit of it blocks
1277  *     so for now do not ref/deref the structures.  Note that if we use the
1278  *     ref/rel code later, the rel CAN block.
1279  */
1280 static int
1281 hammer_setup_parent_inodes(hammer_inode_t ip)
1282 {
1283         hammer_record_t depend;
1284 #if 0
1285         hammer_record_t next;
1286         hammer_inode_t  pip;
1287 #endif
1288         int good;
1289         int r;
1290
1291         good = 0;
1292         TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
1293                 r = hammer_setup_parent_inodes_helper(depend);
1294                 KKASSERT(depend->target_ip == ip);
1295                 if (r < 0 && good == 0)
1296                         good = -1;
1297                 if (r > 0)
1298                         good = 1;
1299         }
1300         return(good);
1301
1302 #if 0
1303 retry:
1304         good = 0;
1305         next = TAILQ_FIRST(&ip->target_list);
1306         if (next) {
1307                 hammer_ref(&next->lock);
1308                 hammer_ref(&next->ip->lock);
1309         }
1310         while ((depend = next) != NULL) {
1311                 if (depend->target_ip == NULL) {
1312                         pip = depend->ip;
1313                         hammer_rel_mem_record(depend);
1314                         hammer_rel_inode(pip, 0);
1315                         goto retry;
1316                 }
1317                 KKASSERT(depend->target_ip == ip);
1318                 next = TAILQ_NEXT(depend, target_entry);
1319                 if (next) {
1320                         hammer_ref(&next->lock);
1321                         hammer_ref(&next->ip->lock);
1322                 }
1323                 r = hammer_setup_parent_inodes_helper(depend);
1324                 if (r < 0 && good == 0)
1325                         good = -1;
1326                 if (r > 0)
1327                         good = 1;
1328                 pip = depend->ip;
1329                 hammer_rel_mem_record(depend);
1330                 hammer_rel_inode(pip, 0);
1331         }
1332         return(good);
1333 #endif
1334 }
1335
1336 /*
1337  * This helper function takes a record representing the dependancy between
1338  * the parent inode and child inode.
1339  *
1340  * record->ip           = parent inode
1341  * record->target_ip    = child inode
1342  * 
1343  * We are asked to recurse upwards and convert the record from SETUP
1344  * to FLUSH if possible.
1345  *
1346  * Return 1 if the record gives us connectivity
1347  *
1348  * Return 0 if the record is not relevant 
1349  *
1350  * Return -1 if we can't resolve the dependancy and there is no connectivity.
1351  */
1352 static int
1353 hammer_setup_parent_inodes_helper(hammer_record_t record)
1354 {
1355         hammer_mount_t hmp;
1356         hammer_inode_t pip;
1357         int good;
1358
1359         KKASSERT(record->flush_state != HAMMER_FST_IDLE);
1360         pip = record->ip;
1361         hmp = pip->hmp;
1362
1363         /*
1364          * If the record is already flushing, is it in our flush group?
1365          *
1366          * If it is in our flush group but it is a general record or a 
1367          * delete-on-disk, it does not improve our connectivity (return 0),
1368          * and if the target inode is not trying to destroy itself we can't
1369          * allow the operation yet anyway (the second return -1).
1370          */
1371         if (record->flush_state == HAMMER_FST_FLUSH) {
1372                 if (record->flush_group != hmp->flusher.next) {
1373                         pip->flags |= HAMMER_INODE_REFLUSH;
1374                         return(-1);
1375                 }
1376                 if (record->type == HAMMER_MEM_RECORD_ADD)
1377                         return(1);
1378                 /* GENERAL or DEL */
1379                 return(0);
1380         }
1381
1382         /*
1383          * It must be a setup record.  Try to resolve the setup dependancies
1384          * by recursing upwards so we can place ip on the flush list.
1385          */
1386         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1387
1388         good = hammer_setup_parent_inodes(pip);
1389
1390         /*
1391          * We can't flush ip because it has no connectivity (XXX also check
1392          * nlinks for pre-existing connectivity!).  Flag it so any resolution
1393          * recurses back down.
1394          */
1395         if (good < 0) {
1396                 pip->flags |= HAMMER_INODE_REFLUSH;
1397                 return(good);
1398         }
1399
1400         /*
1401          * We are go, place the parent inode in a flushing state so we can
1402          * place its record in a flushing state.  Note that the parent
1403          * may already be flushing.  The record must be in the same flush
1404          * group as the parent.
1405          */
1406         if (pip->flush_state != HAMMER_FST_FLUSH)
1407                 hammer_flush_inode_core(pip, HAMMER_FLUSH_RECURSION);
1408         KKASSERT(pip->flush_state == HAMMER_FST_FLUSH);
1409         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1410
1411 #if 0
1412         if (record->type == HAMMER_MEM_RECORD_DEL &&
1413             (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
1414                 /*
1415                  * Regardless of flushing state we cannot sync this path if the
1416                  * record represents a delete-on-disk but the target inode
1417                  * is not ready to sync its own deletion.
1418                  *
1419                  * XXX need to count effective nlinks to determine whether
1420                  * the flush is ok, otherwise removing a hardlink will
1421                  * just leave the DEL record to rot.
1422                  */
1423                 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
1424                 return(-1);
1425         } else
1426 #endif
1427         if (pip->flush_group == pip->hmp->flusher.next) {
1428                 /*
1429                  * This is the record we wanted to synchronize.  If the
1430                  * record went into a flush state while we blocked it 
1431                  * had better be in the correct flush group.
1432                  */
1433                 if (record->flush_state != HAMMER_FST_FLUSH) {
1434                         record->flush_state = HAMMER_FST_FLUSH;
1435                         record->flush_group = pip->flush_group;
1436                         hammer_ref(&record->lock);
1437                 } else {
1438                         KKASSERT(record->flush_group == pip->flush_group);
1439                 }
1440                 if (record->type == HAMMER_MEM_RECORD_ADD)
1441                         return(1);
1442
1443                 /*
1444                  * A general or delete-on-disk record does not contribute
1445                  * to our visibility.  We can still flush it, however.
1446                  */
1447                 return(0);
1448         } else {
1449                 /*
1450                  * We couldn't resolve the dependancies, request that the
1451                  * inode be flushed when the dependancies can be resolved.
1452                  */
1453                 pip->flags |= HAMMER_INODE_REFLUSH;
1454                 return(-1);
1455         }
1456 }
1457
1458 /*
1459  * This is the core routine placing an inode into the FST_FLUSH state.
1460  */
1461 static void
1462 hammer_flush_inode_core(hammer_inode_t ip, int flags)
1463 {
1464         int go_count;
1465
1466         /*
1467          * Set flush state and prevent the flusher from cycling into
1468          * the next flush group.  Do not place the ip on the list yet.
1469          * Inodes not in the idle state get an extra reference.
1470          */
1471         KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
1472         if (ip->flush_state == HAMMER_FST_IDLE)
1473                 hammer_ref(&ip->lock);
1474         ip->flush_state = HAMMER_FST_FLUSH;
1475         ip->flush_group = ip->hmp->flusher.next;
1476         ++ip->hmp->flusher.group_lock;
1477         ++ip->hmp->count_iqueued;
1478         ++hammer_count_iqueued;
1479
1480         /*
1481          * We need to be able to vfsync/truncate from the backend.
1482          */
1483         KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
1484         if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
1485                 ip->flags |= HAMMER_INODE_VHELD;
1486                 vref(ip->vp);
1487         }
1488
1489         /*
1490          * Figure out how many in-memory records we can actually flush
1491          * (not including inode meta-data, buffers, etc).
1492          *
1493          * Do not add new records to the flush if this is a recursion or
1494          * if we must still complete a flush from the previous flush cycle.
1495          */
1496         if (flags & HAMMER_FLUSH_RECURSION) {
1497                 go_count = 1;
1498         } else if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
1499                 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1500                                    hammer_syncgrp_child_callback, NULL);
1501                 go_count = 1;
1502         } else {
1503                 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1504                                    hammer_setup_child_callback, NULL);
1505         }
1506
1507         /*
1508          * This is a more involved test that includes go_count.  If we
1509          * can't flush, flag the inode and return.  If go_count is 0 we
1510          * were are unable to flush any records in our rec_tree and
1511          * must ignore the XDIRTY flag.
1512          */
1513         if (go_count == 0) {
1514                 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
1515                         ip->flags |= HAMMER_INODE_REFLUSH;
1516
1517                         --ip->hmp->count_iqueued;
1518                         --hammer_count_iqueued;
1519
1520                         ip->flush_state = HAMMER_FST_SETUP;
1521                         if (ip->flags & HAMMER_INODE_VHELD) {
1522                                 ip->flags &= ~HAMMER_INODE_VHELD;
1523                                 vrele(ip->vp);
1524                         }
1525                         if (flags & HAMMER_FLUSH_SIGNAL) {
1526                                 ip->flags |= HAMMER_INODE_RESIGNAL;
1527                                 hammer_flusher_async(ip->hmp);
1528                         }
1529                         if (--ip->hmp->flusher.group_lock == 0)
1530                                 wakeup(&ip->hmp->flusher.group_lock);
1531                         return;
1532                 }
1533         }
1534
1535         /*
1536          * Snapshot the state of the inode for the backend flusher.
1537          *
1538          * We continue to retain save_trunc_off even when all truncations
1539          * have been resolved as an optimization to determine if we can
1540          * skip the B-Tree lookup for overwrite deletions.
1541          *
1542          * NOTE: The DELETING flag is a mod flag, but it is also sticky,
1543          * and stays in ip->flags.  Once set, it stays set until the
1544          * inode is destroyed.
1545          *
1546          * NOTE: If a truncation from a previous flush cycle had to be
1547          * continued into this one, the TRUNCATED flag will still be
1548          * set in sync_flags as will WOULDBLOCK.  When this occurs
1549          * we CANNOT safely integrate a new truncation from the front-end
1550          * because there may be data records in-memory assigned a flush
1551          * state from the previous cycle that are supposed to be flushed
1552          * before the next frontend truncation.
1553          */
1554         if ((ip->flags & (HAMMER_INODE_TRUNCATED | HAMMER_INODE_WOULDBLOCK)) ==
1555             HAMMER_INODE_TRUNCATED) {
1556                 KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0);
1557                 ip->sync_trunc_off = ip->trunc_off;
1558                 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
1559                 ip->flags &= ~HAMMER_INODE_TRUNCATED;
1560                 ip->sync_flags |= HAMMER_INODE_TRUNCATED;
1561
1562                 /*
1563                  * The save_trunc_off used to cache whether the B-Tree
1564                  * holds any records past that point is not used until
1565                  * after the truncation has succeeded, so we can safely
1566                  * set it now.
1567                  */
1568                 if (ip->save_trunc_off > ip->sync_trunc_off)
1569                         ip->save_trunc_off = ip->sync_trunc_off;
1570         }
1571         ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK &
1572                            ~HAMMER_INODE_TRUNCATED);
1573         ip->sync_ino_leaf = ip->ino_leaf;
1574         ip->sync_ino_data = ip->ino_data;
1575         ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
1576 #ifdef DEBUG_TRUNCATE
1577         if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp)
1578                 kprintf("truncateS %016llx\n", ip->sync_trunc_off);
1579 #endif
1580
1581         /*
1582          * The flusher list inherits our inode and reference.
1583          */
1584         TAILQ_INSERT_TAIL(&ip->hmp->flush_list, ip, flush_entry);
1585         if (--ip->hmp->flusher.group_lock == 0)
1586                 wakeup(&ip->hmp->flusher.group_lock);
1587
1588         if (flags & HAMMER_FLUSH_SIGNAL) {
1589                 hammer_flusher_async(ip->hmp);
1590         }
1591 }
1592
1593 /*
1594  * Callback for scan of ip->rec_tree.  Try to include each record in our
1595  * flush.  ip->flush_group has been set but the inode has not yet been
1596  * moved into a flushing state.
1597  *
1598  * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
1599  * both inodes.
1600  *
1601  * We return 1 for any record placed or found in FST_FLUSH, which prevents
1602  * the caller from shortcutting the flush.
1603  */
1604 static int
1605 hammer_setup_child_callback(hammer_record_t rec, void *data)
1606 {
1607         hammer_inode_t target_ip;
1608         hammer_inode_t ip;
1609         int r;
1610
1611         /*
1612          * Deleted records are ignored.  Note that the flush detects deleted
1613          * front-end records at multiple points to deal with races.  This is
1614          * just the first line of defense.  The only time DELETED_FE cannot
1615          * be set is when HAMMER_RECF_INTERLOCK_BE is set. 
1616          *
1617          * Don't get confused between record deletion and, say, directory
1618          * entry deletion.  The deletion of a directory entry that is on
1619          * the media has nothing to do with the record deletion flags.
1620          *
1621          * The flush_group for a record already in a flush state must
1622          * be updated.  This case can only occur if the inode deleting
1623          * too many records had to be moved to the next flush group.
1624          */
1625         if (rec->flags & (HAMMER_RECF_DELETED_FE|HAMMER_RECF_DELETED_BE)) {
1626                 if (rec->flush_state == HAMMER_FST_FLUSH) {
1627                         KKASSERT(rec->ip->flags & HAMMER_INODE_WOULDBLOCK);
1628                         rec->flush_group = rec->ip->flush_group;
1629                         r = 1;
1630                 } else {
1631                         r = 0;
1632                 }
1633                 return(r);
1634         }
1635
1636         /*
1637          * If the record is in an idle state it has no dependancies and
1638          * can be flushed.
1639          */
1640         ip = rec->ip;
1641         r = 0;
1642
1643         switch(rec->flush_state) {
1644         case HAMMER_FST_IDLE:
1645                 /*
1646                  * Record has no setup dependancy, we can flush it.
1647                  */
1648                 KKASSERT(rec->target_ip == NULL);
1649                 rec->flush_state = HAMMER_FST_FLUSH;
1650                 rec->flush_group = ip->flush_group;
1651                 hammer_ref(&rec->lock);
1652                 r = 1;
1653                 break;
1654         case HAMMER_FST_SETUP:
1655                 /*
1656                  * Record has a setup dependancy.  Try to include the
1657                  * target ip in the flush. 
1658                  *
1659                  * We have to be careful here, if we do not do the right
1660                  * thing we can lose track of dirty inodes and the system
1661                  * will lockup trying to allocate buffers.
1662                  */
1663                 target_ip = rec->target_ip;
1664                 KKASSERT(target_ip != NULL);
1665                 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
1666                 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
1667                         /*
1668                          * If the target IP is already flushing in our group
1669                          * we are golden, otherwise make sure the target
1670                          * reflushes.
1671                          */
1672                         if (target_ip->flush_group == ip->flush_group) {
1673                                 rec->flush_state = HAMMER_FST_FLUSH;
1674                                 rec->flush_group = ip->flush_group;
1675                                 hammer_ref(&rec->lock);
1676                                 r = 1;
1677                         } else {
1678                                 target_ip->flags |= HAMMER_INODE_REFLUSH;
1679                         }
1680                 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
1681                         /*
1682                          * If the target IP is not flushing we can force
1683                          * it to flush, even if it is unable to write out
1684                          * any of its own records we have at least one in
1685                          * hand that we CAN deal with.
1686                          */
1687                         rec->flush_state = HAMMER_FST_FLUSH;
1688                         rec->flush_group = ip->flush_group;
1689                         hammer_ref(&rec->lock);
1690                         hammer_flush_inode_core(target_ip,
1691                                                 HAMMER_FLUSH_RECURSION);
1692                         r = 1;
1693                 } else {
1694                         /*
1695                          * General or delete-on-disk record.
1696                          *
1697                          * XXX this needs help.  If a delete-on-disk we could
1698                          * disconnect the target.  If the target has its own
1699                          * dependancies they really need to be flushed.
1700                          *
1701                          * XXX
1702                          */
1703                         rec->flush_state = HAMMER_FST_FLUSH;
1704                         rec->flush_group = ip->flush_group;
1705                         hammer_ref(&rec->lock);
1706                         hammer_flush_inode_core(target_ip,
1707                                                 HAMMER_FLUSH_RECURSION);
1708                         r = 1;
1709                 }
1710                 break;
1711         case HAMMER_FST_FLUSH:
1712                 /* 
1713                  * If the WOULDBLOCK flag is set records may have been left
1714                  * over from a previous flush attempt and should be moved
1715                  * to the current flush group.  If it is not set then all
1716                  * such records had better have been flushed already or
1717                  * already associated with the current flush group.
1718                  */
1719                 if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
1720                         rec->flush_group = ip->flush_group;
1721                 } else {
1722                         KKASSERT(rec->flush_group == ip->flush_group);
1723                 }
1724                 r = 1;
1725                 break;
1726         }
1727         return(r);
1728 }
1729
1730 /*
1731  * This version just moves records already in a flush state to the new
1732  * flush group and that is it.
1733  */
1734 static int
1735 hammer_syncgrp_child_callback(hammer_record_t rec, void *data)
1736 {
1737         hammer_inode_t ip = rec->ip;
1738
1739         switch(rec->flush_state) {
1740         case HAMMER_FST_FLUSH:
1741                 if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
1742                         rec->flush_group = ip->flush_group;
1743                 } else {
1744                         KKASSERT(rec->flush_group == ip->flush_group);
1745                 }
1746                 break;
1747         default:
1748                 break;
1749         }
1750         return(0);
1751 }
1752
1753 /*
1754  * Wait for a previously queued flush to complete.  Not only do we need to
1755  * wait for the inode to sync out, we also may have to run the flusher again
1756  * to get it past the UNDO position pertaining to the flush so a crash does
1757  * not 'undo' our flush.
1758  */
1759 void
1760 hammer_wait_inode(hammer_inode_t ip)
1761 {
1762         hammer_mount_t hmp = ip->hmp;
1763         int sync_group;
1764         int waitcount;
1765
1766         sync_group = ip->flush_group;
1767         waitcount = (ip->flags & HAMMER_INODE_REFLUSH) ? 2 : 1;
1768
1769         if (ip->flush_state == HAMMER_FST_SETUP) {
1770                 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1771         }
1772         /* XXX can we make this != FST_IDLE ? check SETUP depends */
1773         while (ip->flush_state == HAMMER_FST_FLUSH &&
1774                (ip->flush_group - sync_group) < waitcount) {
1775                 ip->flags |= HAMMER_INODE_FLUSHW;
1776                 tsleep(&ip->flags, 0, "hmrwin", 0);
1777         }
1778         while (hmp->flusher.done - sync_group < waitcount) {
1779                 kprintf("Y");
1780                 hammer_flusher_sync(hmp);
1781         }
1782 }
1783
1784 /*
1785  * Called by the backend code when a flush has been completed.
1786  * The inode has already been removed from the flush list.
1787  *
1788  * A pipelined flush can occur, in which case we must re-enter the
1789  * inode on the list and re-copy its fields.
1790  */
1791 void
1792 hammer_flush_inode_done(hammer_inode_t ip)
1793 {
1794         hammer_mount_t hmp;
1795         int dorel;
1796
1797         KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
1798
1799         hmp = ip->hmp;
1800
1801         /*
1802          * Merge left-over flags back into the frontend and fix the state.
1803          * Incomplete truncations are retained by the backend.
1804          */
1805         ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED;
1806         ip->sync_flags &= HAMMER_INODE_TRUNCATED;
1807
1808         /*
1809          * The backend may have adjusted nlinks, so if the adjusted nlinks
1810          * does not match the fronttend set the frontend's RDIRTY flag again.
1811          */
1812         if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
1813                 ip->flags |= HAMMER_INODE_DDIRTY;
1814
1815         /*
1816          * Fix up the dirty buffer status.
1817          */
1818         if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) {
1819                 ip->flags |= HAMMER_INODE_BUFS;
1820         }
1821
1822         /*
1823          * Re-set the XDIRTY flag if some of the inode's in-memory records
1824          * could not be flushed.
1825          */
1826         KKASSERT((RB_EMPTY(&ip->rec_tree) &&
1827                   (ip->flags & HAMMER_INODE_XDIRTY) == 0) ||
1828                  (!RB_EMPTY(&ip->rec_tree) &&
1829                   (ip->flags & HAMMER_INODE_XDIRTY) != 0));
1830
1831         /*
1832          * Do not lose track of inodes which no longer have vnode
1833          * assocations, otherwise they may never get flushed again.
1834          */
1835         if ((ip->flags & HAMMER_INODE_MODMASK) && ip->vp == NULL)
1836                 ip->flags |= HAMMER_INODE_REFLUSH;
1837
1838         /*
1839          * Clean up the vnode ref
1840          */
1841         if (ip->flags & HAMMER_INODE_VHELD) {
1842                 ip->flags &= ~HAMMER_INODE_VHELD;
1843                 vrele(ip->vp);
1844         }
1845
1846         /*
1847          * Adjust flush_state.  The target state (idle or setup) shouldn't
1848          * be terribly important since we will reflush if we really need
1849          * to do anything.
1850          *
1851          * If the WOULDBLOCK flag is set we must re-flush immediately
1852          * to continue a potentially large deletion.  The flag also causes
1853          * the hammer_setup_child_callback() to move records in the old
1854          * flush group to the new one.
1855          */
1856         if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
1857                 kprintf("B");
1858                 ip->flush_state = HAMMER_FST_IDLE;
1859                 hammer_flush_inode_core(ip, HAMMER_FLUSH_SIGNAL);
1860                 ip->flags &= ~HAMMER_INODE_WOULDBLOCK;
1861                 dorel = 1;
1862         } else if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
1863                 ip->flush_state = HAMMER_FST_IDLE;
1864                 dorel = 1;
1865         } else {
1866                 ip->flush_state = HAMMER_FST_SETUP;
1867                 dorel = 0;
1868         }
1869
1870         --hmp->count_iqueued;
1871         --hammer_count_iqueued;
1872
1873         /*
1874          * If the frontend made more changes and requested another flush,
1875          * then try to get it running.
1876          */
1877         if (ip->flags & HAMMER_INODE_REFLUSH) {
1878                 ip->flags &= ~HAMMER_INODE_REFLUSH;
1879                 if (ip->flags & HAMMER_INODE_RESIGNAL) {
1880                         ip->flags &= ~HAMMER_INODE_RESIGNAL;
1881                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1882                 } else {
1883                         hammer_flush_inode(ip, 0);
1884                 }
1885         }
1886
1887         /*
1888          * If the inode is now clean drop the space reservation.
1889          */
1890         if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1891             (ip->flags & HAMMER_INODE_RSV_INODES)) {
1892                 ip->flags &= ~HAMMER_INODE_RSV_INODES;
1893                 --hmp->rsv_inodes;
1894         }
1895
1896         /*
1897          * Finally, if the frontend is waiting for a flush to complete,
1898          * wake it up.
1899          */
1900         if (ip->flush_state != HAMMER_FST_FLUSH) {
1901                 if (ip->flags & HAMMER_INODE_FLUSHW) {
1902                         ip->flags &= ~HAMMER_INODE_FLUSHW;
1903                         wakeup(&ip->flags);
1904                 }
1905         }
1906         if (dorel)
1907                 hammer_rel_inode(ip, 0);
1908 }
1909
1910 /*
1911  * Called from hammer_sync_inode() to synchronize in-memory records
1912  * to the media.
1913  */
1914 static int
1915 hammer_sync_record_callback(hammer_record_t record, void *data)
1916 {
1917         hammer_cursor_t cursor = data;
1918         hammer_transaction_t trans = cursor->trans;
1919         int error;
1920
1921         /*
1922          * Skip records that do not belong to the current flush.
1923          */
1924         ++hammer_stats_record_iterations;
1925         if (record->flush_state != HAMMER_FST_FLUSH)
1926                 return(0);
1927
1928 #if 1
1929         if (record->flush_group != record->ip->flush_group) {
1930                 kprintf("sync_record %p ip %p bad flush group %d %d\n", record, record->ip, record->flush_group ,record->ip->flush_group);
1931                 Debugger("blah2");
1932                 return(0);
1933         }
1934 #endif
1935         KKASSERT(record->flush_group == record->ip->flush_group);
1936
1937         /*
1938          * Interlock the record using the BE flag.  Once BE is set the
1939          * frontend cannot change the state of FE.
1940          *
1941          * NOTE: If FE is set prior to us setting BE we still sync the
1942          * record out, but the flush completion code converts it to 
1943          * a delete-on-disk record instead of destroying it.
1944          */
1945         KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
1946         record->flags |= HAMMER_RECF_INTERLOCK_BE;
1947
1948         /*
1949          * The backend may have already disposed of the record.
1950          */
1951         if (record->flags & HAMMER_RECF_DELETED_BE) {
1952                 error = 0;
1953                 goto done;
1954         }
1955
1956         /*
1957          * If the whole inode is being deleting all on-disk records will
1958          * be deleted very soon, we can't sync any new records to disk
1959          * because they will be deleted in the same transaction they were
1960          * created in (delete_tid == create_tid), which will assert.
1961          *
1962          * XXX There may be a case with RECORD_ADD with DELETED_FE set
1963          * that we currently panic on.
1964          */
1965         if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
1966                 switch(record->type) {
1967                 case HAMMER_MEM_RECORD_DATA:
1968                         /*
1969                          * We don't have to do anything, if the record was
1970                          * committed the space will have been accounted for
1971                          * in the blockmap.
1972                          */
1973                         /* fall through */
1974                 case HAMMER_MEM_RECORD_GENERAL:
1975                         record->flags |= HAMMER_RECF_DELETED_FE;
1976                         record->flags |= HAMMER_RECF_DELETED_BE;
1977                         error = 0;
1978                         goto done;
1979                 case HAMMER_MEM_RECORD_ADD:
1980                         panic("hammer_sync_record_callback: illegal add "
1981                               "during inode deletion record %p", record);
1982                         break; /* NOT REACHED */
1983                 case HAMMER_MEM_RECORD_INODE:
1984                         panic("hammer_sync_record_callback: attempt to "
1985                               "sync inode record %p?", record);
1986                         break; /* NOT REACHED */
1987                 case HAMMER_MEM_RECORD_DEL:
1988                         /* 
1989                          * Follow through and issue the on-disk deletion
1990                          */
1991                         break;
1992                 }
1993         }
1994
1995         /*
1996          * If DELETED_FE is set special handling is needed for directory
1997          * entries.  Dependant pieces related to the directory entry may
1998          * have already been synced to disk.  If this occurs we have to
1999          * sync the directory entry and then change the in-memory record
2000          * from an ADD to a DELETE to cover the fact that it's been
2001          * deleted by the frontend.
2002          *
2003          * A directory delete covering record (MEM_RECORD_DEL) can never
2004          * be deleted by the frontend.
2005          *
2006          * Any other record type (aka DATA) can be deleted by the frontend.
2007          * XXX At the moment the flusher must skip it because there may
2008          * be another data record in the flush group for the same block,
2009          * meaning that some frontend data changes can leak into the backend's
2010          * synchronization point.
2011          */
2012         if (record->flags & HAMMER_RECF_DELETED_FE) {
2013                 if (record->type == HAMMER_MEM_RECORD_ADD) {
2014                         record->flags |= HAMMER_RECF_CONVERT_DELETE;
2015                 } else {
2016                         KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
2017                         record->flags |= HAMMER_RECF_DELETED_BE;
2018                         error = 0;
2019                         goto done;
2020                 }
2021         }
2022
2023         /*
2024          * Assign the create_tid for new records.  Deletions already
2025          * have the record's entire key properly set up.
2026          */
2027         if (record->type != HAMMER_MEM_RECORD_DEL)
2028                 record->leaf.base.create_tid = trans->tid;
2029                 record->leaf.create_ts = trans->time32;
2030         for (;;) {
2031                 error = hammer_ip_sync_record_cursor(cursor, record);
2032                 if (error != EDEADLK)
2033                         break;
2034                 hammer_done_cursor(cursor);
2035                 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
2036                                            record->ip);
2037                 if (error)
2038                         break;
2039         }
2040         record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
2041
2042         if (error) {
2043                 error = -error;
2044                 if (error != -ENOSPC) {
2045                         kprintf("hammer_sync_record_callback: sync failed rec "
2046                                 "%p, error %d\n", record, error);
2047                         Debugger("sync failed rec");
2048                 }
2049         }
2050 done:
2051         hammer_flush_record_done(record, error);
2052         return(error);
2053 }
2054
2055 /*
2056  * XXX error handling
2057  */
2058 int
2059 hammer_sync_inode(hammer_inode_t ip)
2060 {
2061         struct hammer_transaction trans;
2062         struct hammer_cursor cursor;
2063         hammer_node_t tmp_node;
2064         hammer_record_t depend;
2065         hammer_record_t next;
2066         int error, tmp_error;
2067         u_int64_t nlinks;
2068
2069         if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
2070                 return(0);
2071
2072         hammer_start_transaction_fls(&trans, ip->hmp);
2073         error = hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2074         if (error)
2075                 goto done;
2076
2077         /*
2078          * Any directory records referencing this inode which are not in
2079          * our current flush group must adjust our nlink count for the
2080          * purposes of synchronization to disk.
2081          *
2082          * Records which are in our flush group can be unlinked from our
2083          * inode now, potentially allowing the inode to be physically
2084          * deleted.
2085          *
2086          * This cannot block.
2087          */
2088         nlinks = ip->ino_data.nlinks;
2089         next = TAILQ_FIRST(&ip->target_list);
2090         while ((depend = next) != NULL) {
2091                 next = TAILQ_NEXT(depend, target_entry);
2092                 if (depend->flush_state == HAMMER_FST_FLUSH &&
2093                     depend->flush_group == ip->hmp->flusher.act) {
2094                         /*
2095                          * If this is an ADD that was deleted by the frontend
2096                          * the frontend nlinks count will have already been
2097                          * decremented, but the backend is going to sync its
2098                          * directory entry and must account for it.  The
2099                          * record will be converted to a delete-on-disk when
2100                          * it gets synced.
2101                          *
2102                          * If the ADD was not deleted by the frontend we
2103                          * can remove the dependancy from our target_list.
2104                          */
2105                         if (depend->flags & HAMMER_RECF_DELETED_FE) {
2106                                 ++nlinks;
2107                         } else {
2108                                 TAILQ_REMOVE(&ip->target_list, depend,
2109                                              target_entry);
2110                                 depend->target_ip = NULL;
2111                         }
2112                 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
2113                         /*
2114                          * Not part of our flush group
2115                          */
2116                         KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
2117                         switch(depend->type) {
2118                         case HAMMER_MEM_RECORD_ADD:
2119                                 --nlinks;
2120                                 break;
2121                         case HAMMER_MEM_RECORD_DEL:
2122                                 ++nlinks;
2123                                 break;
2124                         default:
2125                                 break;
2126                         }
2127                 }
2128         }
2129
2130         /*
2131          * Set dirty if we had to modify the link count.
2132          */
2133         if (ip->sync_ino_data.nlinks != nlinks) {
2134                 KKASSERT((int64_t)nlinks >= 0);
2135                 ip->sync_ino_data.nlinks = nlinks;
2136                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2137         }
2138
2139         /*
2140          * If there is a trunction queued destroy any data past the (aligned)
2141          * truncation point.  Userland will have dealt with the buffer
2142          * containing the truncation point for us.
2143          *
2144          * We don't flush pending frontend data buffers until after we've
2145          * dealt with the truncation.
2146          */
2147         if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2148                 /*
2149                  * Interlock trunc_off.  The VOP front-end may continue to
2150                  * make adjustments to it while we are blocked.
2151                  */
2152                 off_t trunc_off;
2153                 off_t aligned_trunc_off;
2154                 int blkmask;
2155
2156                 trunc_off = ip->sync_trunc_off;
2157                 blkmask = hammer_blocksize(trunc_off) - 1;
2158                 aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask;
2159
2160                 /*
2161                  * Delete any whole blocks on-media.  The front-end has
2162                  * already cleaned out any partial block and made it
2163                  * pending.  The front-end may have updated trunc_off
2164                  * while we were blocked so we only use sync_trunc_off.
2165                  *
2166                  * This operation can blow out the buffer cache, EWOULDBLOCK
2167                  * means we were unable to complete the deletion.  The
2168                  * deletion will update sync_trunc_off in that case.
2169                  */
2170                 error = hammer_ip_delete_range(&cursor, ip,
2171                                                 aligned_trunc_off,
2172                                                 0x7FFFFFFFFFFFFFFFLL, 2);
2173                 if (error == EWOULDBLOCK) {
2174                         ip->flags |= HAMMER_INODE_WOULDBLOCK;
2175                         error = 0;
2176                         goto defer_buffer_flush;
2177                 }
2178
2179                 if (error)
2180                         Debugger("hammer_ip_delete_range errored");
2181
2182                 /*
2183                  * Clear the truncation flag on the backend after we have
2184                  * complete the deletions.  Backend data is now good again
2185                  * (including new records we are about to sync, below).
2186                  *
2187                  * Leave sync_trunc_off intact.  As we write additional
2188                  * records the backend will update sync_trunc_off.  This
2189                  * tells the backend whether it can skip the overwrite
2190                  * test.  This should work properly even when the backend
2191                  * writes full blocks where the truncation point straddles
2192                  * the block because the comparison is against the base
2193                  * offset of the record.
2194                  */
2195                 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2196                 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
2197         } else {
2198                 error = 0;
2199         }
2200
2201         /*
2202          * Now sync related records.  These will typically be directory
2203          * entries or delete-on-disk records.
2204          *
2205          * Not all records will be flushed, but clear XDIRTY anyway.  We
2206          * will set it again in the frontend hammer_flush_inode_done() 
2207          * if records remain.
2208          */
2209         if (error == 0) {
2210                 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2211                                     hammer_sync_record_callback, &cursor);
2212                 if (tmp_error < 0)
2213                         tmp_error = -error;
2214                 if (tmp_error)
2215                         error = tmp_error;
2216         }
2217         hammer_cache_node(&ip->cache[1], cursor.node);
2218
2219         /*
2220          * Re-seek for inode update, assuming our cache hasn't been ripped
2221          * out from under us.
2222          */
2223         if (error == 0) {
2224                 tmp_node = hammer_ref_node_safe(ip->hmp, &ip->cache[0], &error);
2225                 if (tmp_node) {
2226                         hammer_cursor_downgrade(&cursor);
2227                         hammer_lock_sh(&tmp_node->lock);
2228                         if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0)
2229                                 hammer_cursor_seek(&cursor, tmp_node, 0);
2230                         hammer_unlock(&tmp_node->lock);
2231                         hammer_rel_node(tmp_node);
2232                 }
2233                 error = 0;
2234         }
2235
2236         /*
2237          * If we are deleting the inode the frontend had better not have
2238          * any active references on elements making up the inode.
2239          *
2240          * The call to hammer_ip_delete_clean() cleans up auxillary records
2241          * but not DB or DATA records.  Those must have already been deleted
2242          * by the normal truncation mechanic.
2243          */
2244         if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
2245                 RB_EMPTY(&ip->rec_tree)  &&
2246             (ip->sync_flags & HAMMER_INODE_DELETING) &&
2247             (ip->flags & HAMMER_INODE_DELETED) == 0) {
2248                 int count1 = 0;
2249
2250                 error = hammer_ip_delete_clean(&cursor, ip, &count1);
2251                 if (error == 0) {
2252                         ip->flags |= HAMMER_INODE_DELETED;
2253                         ip->sync_flags &= ~HAMMER_INODE_DELETING;
2254                         ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2255                         KKASSERT(RB_EMPTY(&ip->rec_tree));
2256
2257                         /*
2258                          * Set delete_tid in both the frontend and backend
2259                          * copy of the inode record.  The DELETED flag handles
2260                          * this, do not set RDIRTY.
2261                          */
2262                         ip->ino_leaf.base.delete_tid = trans.tid;
2263                         ip->sync_ino_leaf.base.delete_tid = trans.tid;
2264                         ip->ino_leaf.delete_ts = trans.time32;
2265                         ip->sync_ino_leaf.delete_ts = trans.time32;
2266
2267
2268                         /*
2269                          * Adjust the inode count in the volume header
2270                          */
2271                         if (ip->flags & HAMMER_INODE_ONDISK) {
2272                                 hammer_modify_volume_field(&trans,
2273                                                            trans.rootvol,
2274                                                            vol0_stat_inodes);
2275                                 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
2276                                 hammer_modify_volume_done(trans.rootvol);
2277                         }
2278                 } else {
2279                         Debugger("hammer_ip_delete_clean errored");
2280                 }
2281         }
2282
2283         ip->sync_flags &= ~HAMMER_INODE_BUFS;
2284
2285         if (error)
2286                 Debugger("RB_SCAN errored");
2287
2288 defer_buffer_flush:
2289         /*
2290          * Now update the inode's on-disk inode-data and/or on-disk record.
2291          * DELETED and ONDISK are managed only in ip->flags.
2292          *
2293          * In the case of a defered buffer flush we still update the on-disk
2294          * inode to satisfy visibility requirements if there happen to be
2295          * directory dependancies.
2296          */
2297         switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
2298         case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
2299                 /*
2300                  * If deleted and on-disk, don't set any additional flags.
2301                  * the delete flag takes care of things.
2302                  *
2303                  * Clear flags which may have been set by the frontend.
2304                  */
2305                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2306                                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2307                                     HAMMER_INODE_DELETING);
2308                 break;
2309         case HAMMER_INODE_DELETED:
2310                 /*
2311                  * Take care of the case where a deleted inode was never
2312                  * flushed to the disk in the first place.
2313                  *
2314                  * Clear flags which may have been set by the frontend.
2315                  */
2316                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2317                                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2318                                     HAMMER_INODE_DELETING);
2319                 while (RB_ROOT(&ip->rec_tree)) {
2320                         hammer_record_t record = RB_ROOT(&ip->rec_tree);
2321                         hammer_ref(&record->lock);
2322                         KKASSERT(record->lock.refs == 1);
2323                         record->flags |= HAMMER_RECF_DELETED_FE;
2324                         record->flags |= HAMMER_RECF_DELETED_BE;
2325                         hammer_rel_mem_record(record);
2326                 }
2327                 break;
2328         case HAMMER_INODE_ONDISK:
2329                 /*
2330                  * If already on-disk, do not set any additional flags.
2331                  */
2332                 break;
2333         default:
2334                 /*
2335                  * If not on-disk and not deleted, set DDIRTY to force
2336                  * an initial record to be written.
2337                  *
2338                  * Also set the create_tid in both the frontend and backend
2339                  * copy of the inode record.
2340                  */
2341                 ip->ino_leaf.base.create_tid = trans.tid;
2342                 ip->ino_leaf.create_ts = trans.time32;
2343                 ip->sync_ino_leaf.base.create_tid = trans.tid;
2344                 ip->sync_ino_leaf.create_ts = trans.time32;
2345                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2346                 break;
2347         }
2348
2349         /*
2350          * If RDIRTY or DDIRTY is set, write out a new record.  If the inode
2351          * is already on-disk the old record is marked as deleted.
2352          *
2353          * If DELETED is set hammer_update_inode() will delete the existing
2354          * record without writing out a new one.
2355          *
2356          * If *ONLY* the ITIMES flag is set we can update the record in-place.
2357          */
2358         if (ip->flags & HAMMER_INODE_DELETED) {
2359                 error = hammer_update_inode(&cursor, ip);
2360         } else 
2361         if ((ip->sync_flags & HAMMER_INODE_DDIRTY) == 0 &&
2362             (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) {
2363                 error = hammer_update_itimes(&cursor, ip);
2364         } else
2365         if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) {
2366                 error = hammer_update_inode(&cursor, ip);
2367         }
2368         if (error)
2369                 Debugger("hammer_update_itimes/inode errored");
2370 done:
2371         /*
2372          * Save the TID we used to sync the inode with to make sure we
2373          * do not improperly reuse it.
2374          */
2375         hammer_done_cursor(&cursor);
2376         hammer_done_transaction(&trans);
2377         return(error);
2378 }
2379
2380 /*
2381  * This routine is called when the OS is no longer actively referencing
2382  * the inode (but might still be keeping it cached), or when releasing
2383  * the last reference to an inode.
2384  *
2385  * At this point if the inode's nlinks count is zero we want to destroy
2386  * it, which may mean destroying it on-media too.
2387  */
2388 void
2389 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
2390 {
2391         struct vnode *vp;
2392
2393         /*
2394          * Set the DELETING flag when the link count drops to 0 and the
2395          * OS no longer has any opens on the inode.
2396          *
2397          * The backend will clear DELETING (a mod flag) and set DELETED
2398          * (a state flag) when it is actually able to perform the
2399          * operation.
2400          */
2401         if (ip->ino_data.nlinks == 0 &&
2402             (ip->flags & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
2403                 ip->flags |= HAMMER_INODE_DELETING;
2404                 ip->flags |= HAMMER_INODE_TRUNCATED;
2405                 ip->trunc_off = 0;
2406                 vp = NULL;
2407                 if (getvp) {
2408                         if (hammer_get_vnode(ip, &vp) != 0)
2409                                 return;
2410                 }
2411
2412                 /*
2413                  * Final cleanup
2414                  */
2415                 if (ip->vp) {
2416                         vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
2417                         vnode_pager_setsize(ip->vp, 0);
2418                 }
2419                 if (getvp) {
2420                         vput(vp);
2421                 }
2422         }
2423 }
2424
2425 /*
2426  * Re-test an inode when a dependancy had gone away to see if we
2427  * can chain flush it.
2428  */
2429 void
2430 hammer_test_inode(hammer_inode_t ip)
2431 {
2432         if (ip->flags & HAMMER_INODE_REFLUSH) {
2433                 ip->flags &= ~HAMMER_INODE_REFLUSH;
2434                 hammer_ref(&ip->lock);
2435                 if (ip->flags & HAMMER_INODE_RESIGNAL) {
2436                         ip->flags &= ~HAMMER_INODE_RESIGNAL;
2437                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2438                 } else {
2439                         hammer_flush_inode(ip, 0);
2440                 }
2441                 hammer_rel_inode(ip, 0);
2442         }
2443 }
2444
2445 /*
2446  * Clear the RECLAIM flag on an inode.  This occurs when the inode is
2447  * reassociated with a vp or just before it gets freed.
2448  *
2449  * Wakeup one thread blocked waiting on reclaims to complete.  Note that
2450  * the inode the thread is waiting on behalf of is a different inode then
2451  * the inode we are called with.  This is to create a pipeline.
2452  */
2453 static void
2454 hammer_inode_wakereclaims(hammer_inode_t ip)
2455 {
2456         struct hammer_reclaim *reclaim;
2457         hammer_mount_t hmp = ip->hmp;
2458
2459         if ((ip->flags & HAMMER_INODE_RECLAIM) == 0)
2460                 return;
2461
2462         --hammer_count_reclaiming;
2463         --hmp->inode_reclaims;
2464         ip->flags &= ~HAMMER_INODE_RECLAIM;
2465
2466         if ((reclaim = TAILQ_FIRST(&hmp->reclaim_list)) != NULL) {
2467                 TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry);
2468                 reclaim->okydoky = 1;
2469                 wakeup(reclaim);
2470         }
2471 }
2472
2473 /*
2474  * Setup our reclaim pipeline.  We only let so many detached (and dirty)
2475  * inodes build up before we start blocking.
2476  *
2477  * When we block we don't care *which* inode has finished reclaiming,
2478  * as lone as one does.  This is somewhat heuristical... we also put a
2479  * cap on how long we are willing to wait.
2480  */
2481 void
2482 hammer_inode_waitreclaims(hammer_mount_t hmp)
2483 {
2484         struct hammer_reclaim reclaim;
2485         int delay;
2486
2487         if (hmp->inode_reclaims > HAMMER_RECLAIM_WAIT) {
2488                 reclaim.okydoky = 0;
2489                 TAILQ_INSERT_TAIL(&hmp->reclaim_list,
2490                                   &reclaim, entry);
2491         } else {
2492                 reclaim.okydoky = 1;
2493         }
2494
2495         if (reclaim.okydoky == 0) {
2496                 delay = (hmp->inode_reclaims - HAMMER_RECLAIM_WAIT) * hz /
2497                         HAMMER_RECLAIM_WAIT;
2498                 if (delay >= 0)
2499                         tsleep(&reclaim, 0, "hmrrcm", delay + 1);
2500                 if (reclaim.okydoky == 0)
2501                         TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry);
2502         }
2503 }
2504