HAMMER 2.1:01 - Stability
[dragonfly.git] / sys / vfs / hammer / hammer_inode.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.109 2008/08/06 15:38:58 dillon Exp $
35  */
36
37 #include "hammer.h"
38 #include <vm/vm_extern.h>
39 #include <sys/buf.h>
40 #include <sys/buf2.h>
41
42 static int      hammer_unload_inode(struct hammer_inode *ip);
43 static void     hammer_free_inode(hammer_inode_t ip);
44 static void     hammer_flush_inode_core(hammer_inode_t ip,
45                                         hammer_flush_group_t flg, int flags);
46 static int      hammer_setup_child_callback(hammer_record_t rec, void *data);
47 #if 0
48 static int      hammer_syncgrp_child_callback(hammer_record_t rec, void *data);
49 #endif
50 static int      hammer_setup_parent_inodes(hammer_inode_t ip,
51                                         hammer_flush_group_t flg);
52 static int      hammer_setup_parent_inodes_helper(hammer_record_t record,
53                                         hammer_flush_group_t flg);
54 static void     hammer_inode_wakereclaims(hammer_inode_t ip);
55
56 #ifdef DEBUG_TRUNCATE
57 extern struct hammer_inode *HammerTruncIp;
58 #endif
59
60 /*
61  * RB-Tree support for inode structures
62  */
63 int
64 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
65 {
66         if (ip1->obj_localization < ip2->obj_localization)
67                 return(-1);
68         if (ip1->obj_localization > ip2->obj_localization)
69                 return(1);
70         if (ip1->obj_id < ip2->obj_id)
71                 return(-1);
72         if (ip1->obj_id > ip2->obj_id)
73                 return(1);
74         if (ip1->obj_asof < ip2->obj_asof)
75                 return(-1);
76         if (ip1->obj_asof > ip2->obj_asof)
77                 return(1);
78         return(0);
79 }
80
81 /*
82  * RB-Tree support for inode structures / special LOOKUP_INFO
83  */
84 static int
85 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
86 {
87         if (info->obj_localization < ip->obj_localization)
88                 return(-1);
89         if (info->obj_localization > ip->obj_localization)
90                 return(1);
91         if (info->obj_id < ip->obj_id)
92                 return(-1);
93         if (info->obj_id > ip->obj_id)
94                 return(1);
95         if (info->obj_asof < ip->obj_asof)
96                 return(-1);
97         if (info->obj_asof > ip->obj_asof)
98                 return(1);
99         return(0);
100 }
101
102 /*
103  * Used by hammer_scan_inode_snapshots() to locate all of an object's
104  * snapshots.  Note that the asof field is not tested, which we can get
105  * away with because it is the lowest-priority field.
106  */
107 static int
108 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data)
109 {
110         hammer_inode_info_t info = data;
111
112         if (ip->obj_localization > info->obj_localization)
113                 return(1);
114         if (ip->obj_localization < info->obj_localization)
115                 return(-1);
116         if (ip->obj_id > info->obj_id)
117                 return(1);
118         if (ip->obj_id < info->obj_id)
119                 return(-1);
120         return(0);
121 }
122
123 /*
124  * Used by hammer_unload_pseudofs() to locate all inodes associated with
125  * a particular PFS.
126  */
127 static int
128 hammer_inode_pfs_cmp(hammer_inode_t ip, void *data)
129 {
130         u_int32_t localization = *(u_int32_t *)data;
131         if (ip->obj_localization > localization)
132                 return(1);
133         if (ip->obj_localization < localization)
134                 return(-1);
135         return(0);
136 }
137
138 /*
139  * RB-Tree support for pseudofs structures
140  */
141 static int
142 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1, hammer_pseudofs_inmem_t p2)
143 {
144         if (p1->localization < p2->localization)
145                 return(-1);
146         if (p1->localization > p2->localization)
147                 return(1);
148         return(0);
149 }
150
151
152 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
153 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
154                 hammer_inode_info_cmp, hammer_inode_info_t);
155 RB_GENERATE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node,
156              hammer_pfs_rb_compare, u_int32_t, localization);
157
158 /*
159  * The kernel is not actively referencing this vnode but is still holding
160  * it cached.
161  *
162  * This is called from the frontend.
163  */
164 int
165 hammer_vop_inactive(struct vop_inactive_args *ap)
166 {
167         struct hammer_inode *ip = VTOI(ap->a_vp);
168
169         /*
170          * Degenerate case
171          */
172         if (ip == NULL) {
173                 vrecycle(ap->a_vp);
174                 return(0);
175         }
176
177         /*
178          * If the inode no longer has visibility in the filesystem try to
179          * recycle it immediately, even if the inode is dirty.  Recycling
180          * it quickly allows the system to reclaim buffer cache and VM
181          * resources which can matter a lot in a heavily loaded system.
182          *
183          * This can deadlock in vfsync() if we aren't careful.
184          * 
185          * Do not queue the inode to the flusher if we still have visibility,
186          * otherwise namespace calls such as chmod will unnecessarily generate
187          * multiple inode updates.
188          */
189         hammer_inode_unloadable_check(ip, 0);
190         if (ip->ino_data.nlinks == 0) {
191                 if (ip->flags & HAMMER_INODE_MODMASK)
192                         hammer_flush_inode(ip, 0);
193                 vrecycle(ap->a_vp);
194         }
195         return(0);
196 }
197
198 /*
199  * Release the vnode association.  This is typically (but not always)
200  * the last reference on the inode.
201  *
202  * Once the association is lost we are on our own with regards to
203  * flushing the inode.
204  */
205 int
206 hammer_vop_reclaim(struct vop_reclaim_args *ap)
207 {
208         struct hammer_inode *ip;
209         hammer_mount_t hmp;
210         struct vnode *vp;
211
212         vp = ap->a_vp;
213
214         if ((ip = vp->v_data) != NULL) {
215                 hmp = ip->hmp;
216                 vp->v_data = NULL;
217                 ip->vp = NULL;
218
219                 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) {
220                         ++hammer_count_reclaiming;
221                         ++hmp->inode_reclaims;
222                         ip->flags |= HAMMER_INODE_RECLAIM;
223
224                         /*
225                          * Poke the flusher.  If we don't do this programs
226                          * will start to stall on the reclaiming count.
227                          */
228                         if (hmp->inode_reclaims > HAMMER_RECLAIM_FLUSH &&
229                            (hmp->inode_reclaims & 255) == 0) {
230                                hammer_flusher_async(hmp, NULL);
231                         }
232                 }
233                 hammer_rel_inode(ip, 1);
234         }
235         return(0);
236 }
237
238 /*
239  * Return a locked vnode for the specified inode.  The inode must be
240  * referenced but NOT LOCKED on entry and will remain referenced on
241  * return.
242  *
243  * Called from the frontend.
244  */
245 int
246 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
247 {
248         hammer_mount_t hmp;
249         struct vnode *vp;
250         int error = 0;
251         u_int8_t obj_type;
252
253         hmp = ip->hmp;
254
255         for (;;) {
256                 if ((vp = ip->vp) == NULL) {
257                         error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0);
258                         if (error)
259                                 break;
260                         hammer_lock_ex(&ip->lock);
261                         if (ip->vp != NULL) {
262                                 hammer_unlock(&ip->lock);
263                                 vp->v_type = VBAD;
264                                 vx_put(vp);
265                                 continue;
266                         }
267                         hammer_ref(&ip->lock);
268                         vp = *vpp;
269                         ip->vp = vp;
270
271                         obj_type = ip->ino_data.obj_type;
272                         vp->v_type = hammer_get_vnode_type(obj_type);
273
274                         hammer_inode_wakereclaims(ip);
275
276                         switch(ip->ino_data.obj_type) {
277                         case HAMMER_OBJTYPE_CDEV:
278                         case HAMMER_OBJTYPE_BDEV:
279                                 vp->v_ops = &hmp->mp->mnt_vn_spec_ops;
280                                 addaliasu(vp, ip->ino_data.rmajor,
281                                           ip->ino_data.rminor);
282                                 break;
283                         case HAMMER_OBJTYPE_FIFO:
284                                 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops;
285                                 break;
286                         default:
287                                 break;
288                         }
289
290                         /*
291                          * Only mark as the root vnode if the ip is not
292                          * historical, otherwise the VFS cache will get
293                          * confused.  The other half of the special handling
294                          * is in hammer_vop_nlookupdotdot().
295                          *
296                          * Pseudo-filesystem roots also do not count.
297                          */
298                         if (ip->obj_id == HAMMER_OBJID_ROOT &&
299                             ip->obj_asof == hmp->asof &&
300                             ip->obj_localization == 0) {
301                                 vp->v_flag |= VROOT;
302                         }
303
304                         vp->v_data = (void *)ip;
305                         /* vnode locked by getnewvnode() */
306                         /* make related vnode dirty if inode dirty? */
307                         hammer_unlock(&ip->lock);
308                         if (vp->v_type == VREG)
309                                 vinitvmio(vp, ip->ino_data.size);
310                         break;
311                 }
312
313                 /*
314                  * loop if the vget fails (aka races), or if the vp
315                  * no longer matches ip->vp.
316                  */
317                 if (vget(vp, LK_EXCLUSIVE) == 0) {
318                         if (vp == ip->vp)
319                                 break;
320                         vput(vp);
321                 }
322         }
323         *vpp = vp;
324         return(error);
325 }
326
327 /*
328  * Locate all copies of the inode for obj_id compatible with the specified
329  * asof, reference, and issue the related call-back.  This routine is used
330  * for direct-io invalidation and does not create any new inodes.
331  */
332 void
333 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo,
334                             int (*callback)(hammer_inode_t ip, void *data),
335                             void *data)
336 {
337         hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root,
338                                    hammer_inode_info_cmp_all_history,
339                                    callback, iinfo);
340 }
341
342 /*
343  * Acquire a HAMMER inode.  The returned inode is not locked.  These functions
344  * do not attach or detach the related vnode (use hammer_get_vnode() for
345  * that).
346  *
347  * The flags argument is only applied for newly created inodes, and only
348  * certain flags are inherited.
349  *
350  * Called from the frontend.
351  */
352 struct hammer_inode *
353 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip,
354                  int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
355                  int flags, int *errorp)
356 {
357         hammer_mount_t hmp = trans->hmp;
358         struct hammer_inode_info iinfo;
359         struct hammer_cursor cursor;
360         struct hammer_inode *ip;
361
362
363         /*
364          * Determine if we already have an inode cached.  If we do then
365          * we are golden.
366          */
367         iinfo.obj_id = obj_id;
368         iinfo.obj_asof = asof;
369         iinfo.obj_localization = localization;
370 loop:
371         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
372         if (ip) {
373                 hammer_ref(&ip->lock);
374                 *errorp = 0;
375                 return(ip);
376         }
377
378         /*
379          * Allocate a new inode structure and deal with races later.
380          */
381         ip = kmalloc(sizeof(*ip), M_HAMMER_INO, M_WAITOK|M_ZERO);
382         ++hammer_count_inodes;
383         ++hmp->count_inodes;
384         ip->obj_id = obj_id;
385         ip->obj_asof = iinfo.obj_asof;
386         ip->obj_localization = localization;
387         ip->hmp = hmp;
388         ip->flags = flags & HAMMER_INODE_RO;
389         ip->cache[0].ip = ip;
390         ip->cache[1].ip = ip;
391         if (hmp->ronly)
392                 ip->flags |= HAMMER_INODE_RO;
393         ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
394                 0x7FFFFFFFFFFFFFFFLL;
395         RB_INIT(&ip->rec_tree);
396         TAILQ_INIT(&ip->target_list);
397         hammer_ref(&ip->lock);
398
399         /*
400          * Locate the on-disk inode.  If this is a PFS root we always
401          * access the current version of the root inode and (if it is not
402          * a master) always access information under it with a snapshot
403          * TID.
404          */
405 retry:
406         hammer_init_cursor(trans, &cursor, (dip ? &dip->cache[0] : NULL), NULL);
407         cursor.key_beg.localization = localization + HAMMER_LOCALIZE_INODE;
408         cursor.key_beg.obj_id = ip->obj_id;
409         cursor.key_beg.key = 0;
410         cursor.key_beg.create_tid = 0;
411         cursor.key_beg.delete_tid = 0;
412         cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
413         cursor.key_beg.obj_type = 0;
414
415         cursor.asof = iinfo.obj_asof;
416         cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
417                        HAMMER_CURSOR_ASOF;
418
419         *errorp = hammer_btree_lookup(&cursor);
420         if (*errorp == EDEADLK) {
421                 hammer_done_cursor(&cursor);
422                 goto retry;
423         }
424
425         /*
426          * On success the B-Tree lookup will hold the appropriate
427          * buffer cache buffers and provide a pointer to the requested
428          * information.  Copy the information to the in-memory inode
429          * and cache the B-Tree node to improve future operations.
430          */
431         if (*errorp == 0) {
432                 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
433                 ip->ino_data = cursor.data->inode;
434
435                 /*
436                  * cache[0] tries to cache the location of the object inode.
437                  * The assumption is that it is near the directory inode.
438                  *
439                  * cache[1] tries to cache the location of the object data.
440                  * The assumption is that it is near the directory data.
441                  */
442                 hammer_cache_node(&ip->cache[0], cursor.node);
443                 if (dip && dip->cache[1].node)
444                         hammer_cache_node(&ip->cache[1], dip->cache[1].node);
445
446                 /*
447                  * The file should not contain any data past the file size
448                  * stored in the inode.  Setting save_trunc_off to the
449                  * file size instead of max reduces B-Tree lookup overheads
450                  * on append by allowing the flusher to avoid checking for
451                  * record overwrites.
452                  */
453                 ip->save_trunc_off = ip->ino_data.size;
454
455                 /*
456                  * Locate and assign the pseudofs management structure to
457                  * the inode.
458                  */
459                 if (dip && dip->obj_localization == ip->obj_localization) {
460                         ip->pfsm = dip->pfsm;
461                         hammer_ref(&ip->pfsm->lock);
462                 } else {
463                         ip->pfsm = hammer_load_pseudofs(trans,
464                                                         ip->obj_localization,
465                                                         errorp);
466                         *errorp = 0;    /* ignore ENOENT */
467                 }
468         }
469
470         /*
471          * The inode is placed on the red-black tree and will be synced to
472          * the media when flushed or by the filesystem sync.  If this races
473          * another instantiation/lookup the insertion will fail.
474          */
475         if (*errorp == 0) {
476                 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
477                         hammer_free_inode(ip);
478                         hammer_done_cursor(&cursor);
479                         goto loop;
480                 }
481                 ip->flags |= HAMMER_INODE_ONDISK;
482         } else {
483                 if (ip->flags & HAMMER_INODE_RSV_INODES) {
484                         ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
485                         --hmp->rsv_inodes;
486                 }
487
488                 hammer_free_inode(ip);
489                 ip = NULL;
490         }
491         hammer_done_cursor(&cursor);
492         return (ip);
493 }
494
495 /*
496  * Create a new filesystem object, returning the inode in *ipp.  The
497  * returned inode will be referenced.  The inode is created in-memory.
498  *
499  * If pfsm is non-NULL the caller wishes to create the root inode for
500  * a master PFS.
501  */
502 int
503 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
504                     struct ucred *cred, hammer_inode_t dip,
505                     hammer_pseudofs_inmem_t pfsm, struct hammer_inode **ipp)
506 {
507         hammer_mount_t hmp;
508         hammer_inode_t ip;
509         uid_t xuid;
510         int error;
511
512         hmp = trans->hmp;
513
514         ip = kmalloc(sizeof(*ip), M_HAMMER_INO, M_WAITOK|M_ZERO);
515         ++hammer_count_inodes;
516         ++hmp->count_inodes;
517
518         if (pfsm) {
519                 KKASSERT(pfsm->localization != 0);
520                 ip->obj_id = HAMMER_OBJID_ROOT;
521                 ip->obj_localization = pfsm->localization;
522         } else {
523                 KKASSERT(dip != NULL);
524                 ip->obj_id = hammer_alloc_objid(hmp, dip);
525                 ip->obj_localization = dip->obj_localization;
526         }
527
528         KKASSERT(ip->obj_id != 0);
529         ip->obj_asof = hmp->asof;
530         ip->hmp = hmp;
531         ip->flush_state = HAMMER_FST_IDLE;
532         ip->flags = HAMMER_INODE_DDIRTY |
533                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME;
534         ip->cache[0].ip = ip;
535         ip->cache[1].ip = ip;
536
537         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
538         /* ip->save_trunc_off = 0; (already zero) */
539         RB_INIT(&ip->rec_tree);
540         TAILQ_INIT(&ip->target_list);
541
542         ip->ino_data.atime = trans->time;
543         ip->ino_data.mtime = trans->time;
544         ip->ino_data.size = 0;
545         ip->ino_data.nlinks = 0;
546
547         /*
548          * A nohistory designator on the parent directory is inherited by
549          * the child.  We will do this even for pseudo-fs creation... the
550          * sysad can turn it off.
551          */
552         if (dip) {
553                 ip->ino_data.uflags = dip->ino_data.uflags &
554                                       (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
555         }
556
557         ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
558         ip->ino_leaf.base.localization = ip->obj_localization +
559                                          HAMMER_LOCALIZE_INODE;
560         ip->ino_leaf.base.obj_id = ip->obj_id;
561         ip->ino_leaf.base.key = 0;
562         ip->ino_leaf.base.create_tid = 0;
563         ip->ino_leaf.base.delete_tid = 0;
564         ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
565         ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
566
567         ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
568         ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
569         ip->ino_data.mode = vap->va_mode;
570         ip->ino_data.ctime = trans->time;
571
572         /*
573          * Setup the ".." pointer.  This only needs to be done for directories
574          * but we do it for all objects as a recovery aid.
575          */
576         if (dip)
577                 ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id;
578 #if 0
579         /*
580          * The parent_obj_localization field only applies to pseudo-fs roots.
581          * XXX this is no longer applicable, PFSs are no longer directly
582          * tied into the parent's directory structure.
583          */
584         if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY &&
585             ip->obj_id == HAMMER_OBJID_ROOT) {
586                 ip->ino_data.ext.obj.parent_obj_localization = 
587                                                 dip->obj_localization;
588         }
589 #endif
590
591         switch(ip->ino_leaf.base.obj_type) {
592         case HAMMER_OBJTYPE_CDEV:
593         case HAMMER_OBJTYPE_BDEV:
594                 ip->ino_data.rmajor = vap->va_rmajor;
595                 ip->ino_data.rminor = vap->va_rminor;
596                 break;
597         default:
598                 break;
599         }
600
601         /*
602          * Calculate default uid/gid and overwrite with information from
603          * the vap.
604          */
605         if (dip) {
606                 xuid = hammer_to_unix_xid(&dip->ino_data.uid);
607                 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode,
608                                              xuid, cred, &vap->va_mode);
609         } else {
610                 xuid = 0;
611         }
612         ip->ino_data.mode = vap->va_mode;
613
614         if (vap->va_vaflags & VA_UID_UUID_VALID)
615                 ip->ino_data.uid = vap->va_uid_uuid;
616         else if (vap->va_uid != (uid_t)VNOVAL)
617                 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
618         else
619                 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
620
621         if (vap->va_vaflags & VA_GID_UUID_VALID)
622                 ip->ino_data.gid = vap->va_gid_uuid;
623         else if (vap->va_gid != (gid_t)VNOVAL)
624                 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
625         else if (dip)
626                 ip->ino_data.gid = dip->ino_data.gid;
627
628         hammer_ref(&ip->lock);
629
630         if (pfsm) {
631                 ip->pfsm = pfsm;
632                 hammer_ref(&pfsm->lock);
633                 error = 0;
634         } else if (dip->obj_localization == ip->obj_localization) {
635                 ip->pfsm = dip->pfsm;
636                 hammer_ref(&ip->pfsm->lock);
637                 error = 0;
638         } else {
639                 ip->pfsm = hammer_load_pseudofs(trans,
640                                                 ip->obj_localization,
641                                                 &error);
642                 error = 0;      /* ignore ENOENT */
643         }
644
645         if (error) {
646                 hammer_free_inode(ip);
647                 ip = NULL;
648         } else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
649                 panic("hammer_create_inode: duplicate obj_id %llx", ip->obj_id);
650                 /* not reached */
651                 hammer_free_inode(ip);
652         }
653         *ipp = ip;
654         return(error);
655 }
656
657 /*
658  * Final cleanup / freeing of an inode structure
659  */
660 static void
661 hammer_free_inode(hammer_inode_t ip)
662 {
663         KKASSERT(ip->lock.refs == 1);
664         hammer_uncache_node(&ip->cache[0]);
665         hammer_uncache_node(&ip->cache[1]);
666         hammer_inode_wakereclaims(ip);
667         if (ip->objid_cache)
668                 hammer_clear_objid(ip);
669         --hammer_count_inodes;
670         --ip->hmp->count_inodes;
671         if (ip->pfsm) {
672                 hammer_rel_pseudofs(ip->hmp, ip->pfsm);
673                 ip->pfsm = NULL;
674         }
675         kfree(ip, M_HAMMER_INO);
676         ip = NULL;
677 }
678
679 /*
680  * Retrieve pseudo-fs data.  NULL will never be returned.
681  *
682  * If an error occurs *errorp will be set and a default template is returned,
683  * otherwise *errorp is set to 0.  Typically when an error occurs it will
684  * be ENOENT.
685  */
686 hammer_pseudofs_inmem_t
687 hammer_load_pseudofs(hammer_transaction_t trans,
688                      u_int32_t localization, int *errorp)
689 {
690         hammer_mount_t hmp = trans->hmp;
691         hammer_inode_t ip;
692         hammer_pseudofs_inmem_t pfsm;
693         struct hammer_cursor cursor;
694         int bytes;
695
696 retry:
697         pfsm = RB_LOOKUP(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, localization);
698         if (pfsm) {
699                 hammer_ref(&pfsm->lock);
700                 *errorp = 0;
701                 return(pfsm);
702         }
703
704         /*
705          * PFS records are stored in the root inode (not the PFS root inode,
706          * but the real root).  Avoid an infinite recursion if loading
707          * the PFS for the real root.
708          */
709         if (localization) {
710                 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT,
711                                       HAMMER_MAX_TID,
712                                       HAMMER_DEF_LOCALIZATION, 0, errorp);
713         } else {
714                 ip = NULL;
715         }
716
717         pfsm = kmalloc(sizeof(*pfsm), M_HAMMER, M_WAITOK | M_ZERO);
718         pfsm->localization = localization;
719         pfsm->pfsd.unique_uuid = trans->rootvol->ondisk->vol_fsid;
720         pfsm->pfsd.shared_uuid = pfsm->pfsd.unique_uuid;
721
722         hammer_init_cursor(trans, &cursor, (ip ? &ip->cache[1] : NULL), ip);
723         cursor.key_beg.localization = HAMMER_DEF_LOCALIZATION +
724                                       HAMMER_LOCALIZE_MISC;
725         cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
726         cursor.key_beg.create_tid = 0;
727         cursor.key_beg.delete_tid = 0;
728         cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
729         cursor.key_beg.obj_type = 0;
730         cursor.key_beg.key = localization;
731         cursor.asof = HAMMER_MAX_TID;
732         cursor.flags |= HAMMER_CURSOR_ASOF;
733
734         if (ip)
735                 *errorp = hammer_ip_lookup(&cursor);
736         else
737                 *errorp = hammer_btree_lookup(&cursor);
738         if (*errorp == 0) {
739                 *errorp = hammer_ip_resolve_data(&cursor);
740                 if (*errorp == 0) {
741                         if (cursor.data->pfsd.mirror_flags &
742                             HAMMER_PFSD_DELETED) {
743                                 *errorp = ENOENT;
744                         } else {
745                                 bytes = cursor.leaf->data_len;
746                                 if (bytes > sizeof(pfsm->pfsd))
747                                         bytes = sizeof(pfsm->pfsd);
748                                 bcopy(cursor.data, &pfsm->pfsd, bytes);
749                         }
750                 }
751         }
752         hammer_done_cursor(&cursor);
753
754         pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
755         hammer_ref(&pfsm->lock);
756         if (ip)
757                 hammer_rel_inode(ip, 0);
758         if (RB_INSERT(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm)) {
759                 kfree(pfsm, M_HAMMER);
760                 goto retry;
761         }
762         return(pfsm);
763 }
764
765 /*
766  * Store pseudo-fs data.  The backend will automatically delete any prior
767  * on-disk pseudo-fs data but we have to delete in-memory versions.
768  */
769 int
770 hammer_save_pseudofs(hammer_transaction_t trans, hammer_pseudofs_inmem_t pfsm)
771 {
772         struct hammer_cursor cursor;
773         hammer_record_t record;
774         hammer_inode_t ip;
775         int error;
776
777         ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
778                               HAMMER_DEF_LOCALIZATION, 0, &error);
779 retry:
780         pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
781         hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
782         cursor.key_beg.localization = ip->obj_localization +
783                                       HAMMER_LOCALIZE_MISC;
784         cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
785         cursor.key_beg.create_tid = 0;
786         cursor.key_beg.delete_tid = 0;
787         cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
788         cursor.key_beg.obj_type = 0;
789         cursor.key_beg.key = pfsm->localization;
790         cursor.asof = HAMMER_MAX_TID;
791         cursor.flags |= HAMMER_CURSOR_ASOF;
792
793         error = hammer_ip_lookup(&cursor);
794         if (error == 0 && hammer_cursor_inmem(&cursor)) {
795                 record = cursor.iprec;
796                 if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
797                         KKASSERT(cursor.deadlk_rec == NULL);
798                         hammer_ref(&record->lock);
799                         cursor.deadlk_rec = record;
800                         error = EDEADLK;
801                 } else {
802                         record->flags |= HAMMER_RECF_DELETED_FE;
803                         error = 0;
804                 }
805         }
806         if (error == 0 || error == ENOENT) {
807                 record = hammer_alloc_mem_record(ip, sizeof(pfsm->pfsd));
808                 record->type = HAMMER_MEM_RECORD_GENERAL;
809
810                 record->leaf.base.localization = ip->obj_localization +
811                                                  HAMMER_LOCALIZE_MISC;
812                 record->leaf.base.rec_type = HAMMER_RECTYPE_PFS;
813                 record->leaf.base.key = pfsm->localization;
814                 record->leaf.data_len = sizeof(pfsm->pfsd);
815                 bcopy(&pfsm->pfsd, record->data, sizeof(pfsm->pfsd));
816                 error = hammer_ip_add_record(trans, record);
817         }
818         hammer_done_cursor(&cursor);
819         if (error == EDEADLK)
820                 goto retry;
821         hammer_rel_inode(ip, 0);
822         return(error);
823 }
824
825 /*
826  * Create a root directory for a PFS if one does not alredy exist.
827  *
828  * The PFS root stands alone so we must also bump the nlinks count
829  * to prevent it from being destroyed on release.
830  */
831 int
832 hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred,
833                        hammer_pseudofs_inmem_t pfsm)
834 {
835         hammer_inode_t ip;
836         struct vattr vap;
837         int error;
838
839         ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
840                               pfsm->localization, 0, &error);
841         if (ip == NULL) {
842                 vattr_null(&vap);
843                 vap.va_mode = 0755;
844                 vap.va_type = VDIR;
845                 error = hammer_create_inode(trans, &vap, cred, NULL, pfsm, &ip);
846                 if (error == 0) {
847                         ++ip->ino_data.nlinks;
848                         hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
849                 }
850         }
851         if (ip)
852                 hammer_rel_inode(ip, 0);
853         return(error);
854 }
855
856 /*
857  * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY
858  * if we are unable to disassociate all the inodes.
859  */
860 static
861 int
862 hammer_unload_pseudofs_callback(hammer_inode_t ip, void *data)
863 {
864         int res;
865
866         hammer_ref(&ip->lock);
867         if (ip->lock.refs == 2 && ip->vp)
868                 vclean_unlocked(ip->vp);
869         if (ip->lock.refs == 1 && ip->vp == NULL)
870                 res = 0;
871         else
872                 res = -1;       /* stop, someone is using the inode */
873         hammer_rel_inode(ip, 0);
874         return(res);
875 }
876
877 int
878 hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization)
879 {
880         int res;
881         int try;
882
883         for (try = res = 0; try < 4; ++try) {
884                 res = hammer_ino_rb_tree_RB_SCAN(&trans->hmp->rb_inos_root,
885                                            hammer_inode_pfs_cmp,
886                                            hammer_unload_pseudofs_callback,
887                                            &localization);
888                 if (res == 0 && try > 1)
889                         break;
890                 hammer_flusher_sync(trans->hmp);
891         }
892         if (res != 0)
893                 res = ENOTEMPTY;
894         return(res);
895 }
896
897
898 /*
899  * Release a reference on a PFS
900  */
901 void
902 hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm)
903 {
904         hammer_unref(&pfsm->lock);
905         if (pfsm->lock.refs == 0) {
906                 RB_REMOVE(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm);
907                 kfree(pfsm, M_HAMMER);
908         }
909 }
910
911 /*
912  * Called by hammer_sync_inode().
913  */
914 static int
915 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
916 {
917         hammer_transaction_t trans = cursor->trans;
918         hammer_record_t record;
919         int error;
920         int redirty;
921
922 retry:
923         error = 0;
924
925         /*
926          * If the inode has a presence on-disk then locate it and mark
927          * it deleted, setting DELONDISK.
928          *
929          * The record may or may not be physically deleted, depending on
930          * the retention policy.
931          */
932         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
933             HAMMER_INODE_ONDISK) {
934                 hammer_normalize_cursor(cursor);
935                 cursor->key_beg.localization = ip->obj_localization + 
936                                                HAMMER_LOCALIZE_INODE;
937                 cursor->key_beg.obj_id = ip->obj_id;
938                 cursor->key_beg.key = 0;
939                 cursor->key_beg.create_tid = 0;
940                 cursor->key_beg.delete_tid = 0;
941                 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
942                 cursor->key_beg.obj_type = 0;
943                 cursor->asof = ip->obj_asof;
944                 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
945                 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
946                 cursor->flags |= HAMMER_CURSOR_BACKEND;
947
948                 error = hammer_btree_lookup(cursor);
949                 if (hammer_debug_inode)
950                         kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
951
952                 if (error == 0) {
953                         error = hammer_ip_delete_record(cursor, ip, trans->tid);
954                         if (hammer_debug_inode)
955                                 kprintf(" error %d\n", error);
956                         if (error == 0) {
957                                 ip->flags |= HAMMER_INODE_DELONDISK;
958                         }
959                         if (cursor->node)
960                                 hammer_cache_node(&ip->cache[0], cursor->node);
961                 }
962                 if (error == EDEADLK) {
963                         hammer_done_cursor(cursor);
964                         error = hammer_init_cursor(trans, cursor,
965                                                    &ip->cache[0], ip);
966                         if (hammer_debug_inode)
967                                 kprintf("IPDED %p %d\n", ip, error);
968                         if (error == 0)
969                                 goto retry;
970                 }
971         }
972
973         /*
974          * Ok, write out the initial record or a new record (after deleting
975          * the old one), unless the DELETED flag is set.  This routine will
976          * clear DELONDISK if it writes out a record.
977          *
978          * Update our inode statistics if this is the first application of
979          * the inode on-disk.
980          */
981         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
982                 /*
983                  * Generate a record and write it to the media.  We clean-up
984                  * the state before releasing so we do not have to set-up
985                  * a flush_group.
986                  */
987                 record = hammer_alloc_mem_record(ip, 0);
988                 record->type = HAMMER_MEM_RECORD_INODE;
989                 record->flush_state = HAMMER_FST_FLUSH;
990                 record->leaf = ip->sync_ino_leaf;
991                 record->leaf.base.create_tid = trans->tid;
992                 record->leaf.data_len = sizeof(ip->sync_ino_data);
993                 record->leaf.create_ts = trans->time32;
994                 record->data = (void *)&ip->sync_ino_data;
995                 record->flags |= HAMMER_RECF_INTERLOCK_BE;
996
997                 /*
998                  * If this flag is set we cannot sync the new file size
999                  * because we haven't finished related truncations.  The
1000                  * inode will be flushed in another flush group to finish
1001                  * the job.
1002                  */
1003                 if ((ip->flags & HAMMER_INODE_WOULDBLOCK) &&
1004                     ip->sync_ino_data.size != ip->ino_data.size) {
1005                         redirty = 1;
1006                         ip->sync_ino_data.size = ip->ino_data.size;
1007                 } else {
1008                         redirty = 0;
1009                 }
1010
1011                 for (;;) {
1012                         error = hammer_ip_sync_record_cursor(cursor, record);
1013                         if (hammer_debug_inode)
1014                                 kprintf("GENREC %p rec %08x %d\n",      
1015                                         ip, record->flags, error);
1016                         if (error != EDEADLK)
1017                                 break;
1018                         hammer_done_cursor(cursor);
1019                         error = hammer_init_cursor(trans, cursor,
1020                                                    &ip->cache[0], ip);
1021                         if (hammer_debug_inode)
1022                                 kprintf("GENREC reinit %d\n", error);
1023                         if (error)
1024                                 break;
1025                 }
1026
1027                 /*
1028                  * The record isn't managed by the inode's record tree,
1029                  * destroy it whether we succeed or fail.
1030                  */
1031                 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
1032                 record->flags |= HAMMER_RECF_DELETED_FE | HAMMER_RECF_COMMITTED;
1033                 record->flush_state = HAMMER_FST_IDLE;
1034                 hammer_rel_mem_record(record);
1035
1036                 /*
1037                  * Finish up.
1038                  */
1039                 if (error == 0) {
1040                         if (hammer_debug_inode)
1041                                 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
1042                         ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1043                                             HAMMER_INODE_ATIME |
1044                                             HAMMER_INODE_MTIME);
1045                         ip->flags &= ~HAMMER_INODE_DELONDISK;
1046                         if (redirty)
1047                                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1048
1049                         /*
1050                          * Root volume count of inodes
1051                          */
1052                         hammer_sync_lock_sh(trans);
1053                         if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
1054                                 hammer_modify_volume_field(trans,
1055                                                            trans->rootvol,
1056                                                            vol0_stat_inodes);
1057                                 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1058                                 hammer_modify_volume_done(trans->rootvol);
1059                                 ip->flags |= HAMMER_INODE_ONDISK;
1060                                 if (hammer_debug_inode)
1061                                         kprintf("NOWONDISK %p\n", ip);
1062                         }
1063                         hammer_sync_unlock(trans);
1064                 }
1065         }
1066
1067         /*
1068          * If the inode has been destroyed, clean out any left-over flags
1069          * that may have been set by the frontend.
1070          */
1071         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) { 
1072                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1073                                     HAMMER_INODE_ATIME |
1074                                     HAMMER_INODE_MTIME);
1075         }
1076         return(error);
1077 }
1078
1079 /*
1080  * Update only the itimes fields.
1081  *
1082  * ATIME can be updated without generating any UNDO.  MTIME is updated
1083  * with UNDO so it is guaranteed to be synchronized properly in case of
1084  * a crash.
1085  *
1086  * Neither field is included in the B-Tree leaf element's CRC, which is how
1087  * we can get away with updating ATIME the way we do.
1088  */
1089 static int
1090 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
1091 {
1092         hammer_transaction_t trans = cursor->trans;
1093         int error;
1094
1095 retry:
1096         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) !=
1097             HAMMER_INODE_ONDISK) {
1098                 return(0);
1099         }
1100
1101         hammer_normalize_cursor(cursor);
1102         cursor->key_beg.localization = ip->obj_localization + 
1103                                        HAMMER_LOCALIZE_INODE;
1104         cursor->key_beg.obj_id = ip->obj_id;
1105         cursor->key_beg.key = 0;
1106         cursor->key_beg.create_tid = 0;
1107         cursor->key_beg.delete_tid = 0;
1108         cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1109         cursor->key_beg.obj_type = 0;
1110         cursor->asof = ip->obj_asof;
1111         cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1112         cursor->flags |= HAMMER_CURSOR_ASOF;
1113         cursor->flags |= HAMMER_CURSOR_GET_LEAF;
1114         cursor->flags |= HAMMER_CURSOR_GET_DATA;
1115         cursor->flags |= HAMMER_CURSOR_BACKEND;
1116
1117         error = hammer_btree_lookup(cursor);
1118         if (error == 0) {
1119                 hammer_cache_node(&ip->cache[0], cursor->node);
1120                 if (ip->sync_flags & HAMMER_INODE_MTIME) {
1121                         /*
1122                          * Updating MTIME requires an UNDO.  Just cover
1123                          * both atime and mtime.
1124                          */
1125                         hammer_sync_lock_sh(trans);
1126                         hammer_modify_buffer(trans, cursor->data_buffer,
1127                                      HAMMER_ITIMES_BASE(&cursor->data->inode),
1128                                      HAMMER_ITIMES_BYTES);
1129                         cursor->data->inode.atime = ip->sync_ino_data.atime;
1130                         cursor->data->inode.mtime = ip->sync_ino_data.mtime;
1131                         hammer_modify_buffer_done(cursor->data_buffer);
1132                         hammer_sync_unlock(trans);
1133                 } else if (ip->sync_flags & HAMMER_INODE_ATIME) {
1134                         /*
1135                          * Updating atime only can be done in-place with
1136                          * no UNDO.
1137                          */
1138                         hammer_sync_lock_sh(trans);
1139                         hammer_modify_buffer(trans, cursor->data_buffer,
1140                                              NULL, 0);
1141                         cursor->data->inode.atime = ip->sync_ino_data.atime;
1142                         hammer_modify_buffer_done(cursor->data_buffer);
1143                         hammer_sync_unlock(trans);
1144                 }
1145                 ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME);
1146         }
1147         if (error == EDEADLK) {
1148                 hammer_done_cursor(cursor);
1149                 error = hammer_init_cursor(trans, cursor,
1150                                            &ip->cache[0], ip);
1151                 if (error == 0)
1152                         goto retry;
1153         }
1154         return(error);
1155 }
1156
1157 /*
1158  * Release a reference on an inode, flush as requested.
1159  *
1160  * On the last reference we queue the inode to the flusher for its final
1161  * disposition.
1162  */
1163 void
1164 hammer_rel_inode(struct hammer_inode *ip, int flush)
1165 {
1166         hammer_mount_t hmp = ip->hmp;
1167
1168         /*
1169          * Handle disposition when dropping the last ref.
1170          */
1171         for (;;) {
1172                 if (ip->lock.refs == 1) {
1173                         /*
1174                          * Determine whether on-disk action is needed for
1175                          * the inode's final disposition.
1176                          */
1177                         KKASSERT(ip->vp == NULL);
1178                         hammer_inode_unloadable_check(ip, 0);
1179                         if (ip->flags & HAMMER_INODE_MODMASK) {
1180                                 if (hmp->rsv_inodes > desiredvnodes) {
1181                                         hammer_flush_inode(ip,
1182                                                            HAMMER_FLUSH_SIGNAL);
1183                                 } else {
1184                                         hammer_flush_inode(ip, 0);
1185                                 }
1186                         } else if (ip->lock.refs == 1) {
1187                                 hammer_unload_inode(ip);
1188                                 break;
1189                         }
1190                 } else {
1191                         if (flush)
1192                                 hammer_flush_inode(ip, 0);
1193
1194                         /*
1195                          * The inode still has multiple refs, try to drop
1196                          * one ref.
1197                          */
1198                         KKASSERT(ip->lock.refs >= 1);
1199                         if (ip->lock.refs > 1) {
1200                                 hammer_unref(&ip->lock);
1201                                 break;
1202                         }
1203                 }
1204         }
1205 }
1206
1207 /*
1208  * Unload and destroy the specified inode.  Must be called with one remaining
1209  * reference.  The reference is disposed of.
1210  *
1211  * The inode must be completely clean.
1212  */
1213 static int
1214 hammer_unload_inode(struct hammer_inode *ip)
1215 {
1216         hammer_mount_t hmp = ip->hmp;
1217
1218         KASSERT(ip->lock.refs == 1,
1219                 ("hammer_unload_inode: %d refs\n", ip->lock.refs));
1220         KKASSERT(ip->vp == NULL);
1221         KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
1222         KKASSERT(ip->cursor_ip_refs == 0);
1223         KKASSERT(ip->lock.lockcount == 0);
1224         KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
1225
1226         KKASSERT(RB_EMPTY(&ip->rec_tree));
1227         KKASSERT(TAILQ_EMPTY(&ip->target_list));
1228
1229         RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip);
1230
1231         hammer_free_inode(ip);
1232         return(0);
1233 }
1234
1235 /*
1236  * Called during unmounting if a critical error occured.  The in-memory
1237  * inode and all related structures are destroyed.
1238  *
1239  * If a critical error did not occur the unmount code calls the standard
1240  * release and asserts that the inode is gone.
1241  */
1242 int
1243 hammer_destroy_inode_callback(struct hammer_inode *ip, void *data __unused)
1244 {
1245         hammer_record_t rec;
1246
1247         /*
1248          * Get rid of the inodes in-memory records, regardless of their
1249          * state, and clear the mod-mask.
1250          */
1251         while ((rec = TAILQ_FIRST(&ip->target_list)) != NULL) {
1252                 TAILQ_REMOVE(&ip->target_list, rec, target_entry);
1253                 rec->target_ip = NULL;
1254                 if (rec->flush_state == HAMMER_FST_SETUP)
1255                         rec->flush_state = HAMMER_FST_IDLE;
1256         }
1257         while ((rec = RB_ROOT(&ip->rec_tree)) != NULL) {
1258                 if (rec->flush_state == HAMMER_FST_FLUSH)
1259                         --rec->flush_group->refs;
1260                 else
1261                         hammer_ref(&rec->lock);
1262                 KKASSERT(rec->lock.refs == 1);
1263                 rec->flush_state = HAMMER_FST_IDLE;
1264                 rec->flush_group = NULL;
1265                 rec->flags |= HAMMER_RECF_DELETED_FE;
1266                 rec->flags |= HAMMER_RECF_DELETED_BE;
1267                 hammer_rel_mem_record(rec);
1268         }
1269         ip->flags &= ~HAMMER_INODE_MODMASK;
1270         ip->sync_flags &= ~HAMMER_INODE_MODMASK;
1271         KKASSERT(ip->vp == NULL);
1272
1273         /*
1274          * Remove the inode from any flush group, force it idle.  FLUSH
1275          * and SETUP states have an inode ref.
1276          */
1277         switch(ip->flush_state) {
1278         case HAMMER_FST_FLUSH:
1279                 TAILQ_REMOVE(&ip->flush_group->flush_list, ip, flush_entry);
1280                 --ip->flush_group->refs;
1281                 ip->flush_group = NULL;
1282                 /* fall through */
1283         case HAMMER_FST_SETUP:
1284                 hammer_unref(&ip->lock);
1285                 ip->flush_state = HAMMER_FST_IDLE;
1286                 /* fall through */
1287         case HAMMER_FST_IDLE:
1288                 break;
1289         }
1290
1291         /*
1292          * There shouldn't be any associated vnode.  The unload needs at
1293          * least one ref, if we do have a vp steal its ip ref.
1294          */
1295         if (ip->vp) {
1296                 kprintf("hammer_destroy_inode_callback: Unexpected "
1297                         "vnode association ip %p vp %p\n", ip, ip->vp);
1298                 ip->vp->v_data = NULL;
1299                 ip->vp = NULL;
1300         } else {
1301                 hammer_ref(&ip->lock);
1302         }
1303         hammer_unload_inode(ip);
1304         return(0);
1305 }
1306
1307 /*
1308  * Called on mount -u when switching from RW to RO or vise-versa.  Adjust
1309  * the read-only flag for cached inodes.
1310  *
1311  * This routine is called from a RB_SCAN().
1312  */
1313 int
1314 hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
1315 {
1316         hammer_mount_t hmp = ip->hmp;
1317
1318         if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
1319                 ip->flags |= HAMMER_INODE_RO;
1320         else
1321                 ip->flags &= ~HAMMER_INODE_RO;
1322         return(0);
1323 }
1324
1325 /*
1326  * A transaction has modified an inode, requiring updates as specified by
1327  * the passed flags.
1328  *
1329  * HAMMER_INODE_DDIRTY: Inode data has been updated
1330  * HAMMER_INODE_XDIRTY: Dirty in-memory records
1331  * HAMMER_INODE_BUFS:   Dirty buffer cache buffers
1332  * HAMMER_INODE_DELETED: Inode record/data must be deleted
1333  * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
1334  */
1335 void
1336 hammer_modify_inode(hammer_inode_t ip, int flags)
1337 {
1338         /* 
1339          * ronly of 0 or 2 does not trigger assertion.
1340          * 2 is a special error state 
1341          */
1342         KKASSERT(ip->hmp->ronly != 1 ||
1343                   (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 
1344                             HAMMER_INODE_BUFS | HAMMER_INODE_DELETED |
1345                             HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0);
1346         if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
1347                 ip->flags |= HAMMER_INODE_RSV_INODES;
1348                 ++ip->hmp->rsv_inodes;
1349         }
1350
1351         ip->flags |= flags;
1352 }
1353
1354 /*
1355  * Request that an inode be flushed.  This whole mess cannot block and may
1356  * recurse (if not synchronous).  Once requested HAMMER will attempt to
1357  * actively flush the inode until the flush can be done.
1358  *
1359  * The inode may already be flushing, or may be in a setup state.  We can
1360  * place the inode in a flushing state if it is currently idle and flag it
1361  * to reflush if it is currently flushing.
1362  *
1363  * Upon return if the inode could not be flushed due to a setup
1364  * dependancy, then it will be automatically flushed when the dependancy
1365  * is satisfied.
1366  */
1367 void
1368 hammer_flush_inode(hammer_inode_t ip, int flags)
1369 {
1370         hammer_mount_t hmp;
1371         hammer_flush_group_t flg;
1372         int good;
1373
1374         /*
1375          * next_flush_group is the first flush group we can place the inode
1376          * in.  It may be NULL.  If it becomes full we append a new flush
1377          * group and make that the next_flush_group.
1378          */
1379         hmp = ip->hmp;
1380         while ((flg = hmp->next_flush_group) != NULL) {
1381                 KKASSERT(flg->running == 0);
1382                 if (flg->total_count + flg->refs <= ip->hmp->undo_rec_limit)
1383                         break;
1384                 hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
1385                 hammer_flusher_async(ip->hmp, flg);
1386         }
1387         if (flg == NULL) {
1388                 flg = kmalloc(sizeof(*flg), M_HAMMER, M_WAITOK|M_ZERO);
1389                 hmp->next_flush_group = flg;
1390                 TAILQ_INIT(&flg->flush_list);
1391                 TAILQ_INSERT_TAIL(&hmp->flush_group_list, flg, flush_entry);
1392         }
1393
1394         /*
1395          * Trivial 'nothing to flush' case.  If the inode is in a SETUP
1396          * state we have to put it back into an IDLE state so we can
1397          * drop the extra ref.
1398          *
1399          * If we have a parent dependancy we must still fall through
1400          * so we can run it.
1401          */
1402         if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
1403                 if (ip->flush_state == HAMMER_FST_SETUP &&
1404                     TAILQ_EMPTY(&ip->target_list)) {
1405                         ip->flush_state = HAMMER_FST_IDLE;
1406                         hammer_rel_inode(ip, 0);
1407                 }
1408                 if (ip->flush_state == HAMMER_FST_IDLE)
1409                         return;
1410         }
1411
1412         /*
1413          * Our flush action will depend on the current state.
1414          */
1415         switch(ip->flush_state) {
1416         case HAMMER_FST_IDLE:
1417                 /*
1418                  * We have no dependancies and can flush immediately.  Some
1419                  * our children may not be flushable so we have to re-test
1420                  * with that additional knowledge.
1421                  */
1422                 hammer_flush_inode_core(ip, flg, flags);
1423                 break;
1424         case HAMMER_FST_SETUP:
1425                 /*
1426                  * Recurse upwards through dependancies via target_list
1427                  * and start their flusher actions going if possible.
1428                  *
1429                  * 'good' is our connectivity.  -1 means we have none and
1430                  * can't flush, 0 means there weren't any dependancies, and
1431                  * 1 means we have good connectivity.
1432                  */
1433                 good = hammer_setup_parent_inodes(ip, flg);
1434
1435                 if (good >= 0) {
1436                         /*
1437                          * We can continue if good >= 0.  Determine how 
1438                          * many records under our inode can be flushed (and
1439                          * mark them).
1440                          */
1441                         hammer_flush_inode_core(ip, flg, flags);
1442                 } else {
1443                         /*
1444                          * Parent has no connectivity, tell it to flush
1445                          * us as soon as it does.
1446                          *
1447                          * The REFLUSH flag is also needed to trigger
1448                          * dependancy wakeups.
1449                          */
1450                         ip->flags |= HAMMER_INODE_CONN_DOWN |
1451                                      HAMMER_INODE_REFLUSH;
1452                         if (flags & HAMMER_FLUSH_SIGNAL) {
1453                                 ip->flags |= HAMMER_INODE_RESIGNAL;
1454                                 hammer_flusher_async(ip->hmp, flg);
1455                         }
1456                 }
1457                 break;
1458         case HAMMER_FST_FLUSH:
1459                 /*
1460                  * We are already flushing, flag the inode to reflush
1461                  * if needed after it completes its current flush.
1462                  *
1463                  * The REFLUSH flag is also needed to trigger
1464                  * dependancy wakeups.
1465                  */
1466                 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
1467                         ip->flags |= HAMMER_INODE_REFLUSH;
1468                 if (flags & HAMMER_FLUSH_SIGNAL) {
1469                         ip->flags |= HAMMER_INODE_RESIGNAL;
1470                         hammer_flusher_async(ip->hmp, flg);
1471                 }
1472                 break;
1473         }
1474 }
1475
1476 /*
1477  * Scan ip->target_list, which is a list of records owned by PARENTS to our
1478  * ip which reference our ip.
1479  *
1480  * XXX This is a huge mess of recursive code, but not one bit of it blocks
1481  *     so for now do not ref/deref the structures.  Note that if we use the
1482  *     ref/rel code later, the rel CAN block.
1483  */
1484 static int
1485 hammer_setup_parent_inodes(hammer_inode_t ip, hammer_flush_group_t flg)
1486 {
1487         hammer_record_t depend;
1488         int good;
1489         int r;
1490
1491         good = 0;
1492         TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
1493                 r = hammer_setup_parent_inodes_helper(depend, flg);
1494                 KKASSERT(depend->target_ip == ip);
1495                 if (r < 0 && good == 0)
1496                         good = -1;
1497                 if (r > 0)
1498                         good = 1;
1499         }
1500         return(good);
1501 }
1502
1503 /*
1504  * This helper function takes a record representing the dependancy between
1505  * the parent inode and child inode.
1506  *
1507  * record->ip           = parent inode
1508  * record->target_ip    = child inode
1509  * 
1510  * We are asked to recurse upwards and convert the record from SETUP
1511  * to FLUSH if possible.
1512  *
1513  * Return 1 if the record gives us connectivity
1514  *
1515  * Return 0 if the record is not relevant 
1516  *
1517  * Return -1 if we can't resolve the dependancy and there is no connectivity.
1518  */
1519 static int
1520 hammer_setup_parent_inodes_helper(hammer_record_t record,
1521                                   hammer_flush_group_t flg)
1522 {
1523         hammer_mount_t hmp;
1524         hammer_inode_t pip;
1525         int good;
1526
1527         KKASSERT(record->flush_state != HAMMER_FST_IDLE);
1528         pip = record->ip;
1529         hmp = pip->hmp;
1530
1531         /*
1532          * If the record is already flushing, is it in our flush group?
1533          *
1534          * If it is in our flush group but it is a general record or a 
1535          * delete-on-disk, it does not improve our connectivity (return 0),
1536          * and if the target inode is not trying to destroy itself we can't
1537          * allow the operation yet anyway (the second return -1).
1538          */
1539         if (record->flush_state == HAMMER_FST_FLUSH) {
1540                 /*
1541                  * If not in our flush group ask the parent to reflush
1542                  * us as soon as possible.
1543                  */
1544                 if (record->flush_group != flg) {
1545                         pip->flags |= HAMMER_INODE_REFLUSH;
1546                         record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1547                         return(-1);
1548                 }
1549
1550                 /*
1551                  * If in our flush group everything is already set up,
1552                  * just return whether the record will improve our
1553                  * visibility or not.
1554                  */
1555                 if (record->type == HAMMER_MEM_RECORD_ADD)
1556                         return(1);
1557                 return(0);
1558         }
1559
1560         /*
1561          * It must be a setup record.  Try to resolve the setup dependancies
1562          * by recursing upwards so we can place ip on the flush list.
1563          */
1564         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1565
1566         good = hammer_setup_parent_inodes(pip, flg);
1567
1568         /*
1569          * If good < 0 the parent has no connectivity and we cannot safely
1570          * flush the directory entry, which also means we can't flush our
1571          * ip.  Flag the parent and us for downward recursion once the
1572          * parent's connectivity is resolved.
1573          */
1574         if (good < 0) {
1575                 /* pip->flags |= HAMMER_INODE_CONN_DOWN; set by recursion */
1576                 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1577                 return(good);
1578         }
1579
1580         /*
1581          * We are go, place the parent inode in a flushing state so we can
1582          * place its record in a flushing state.  Note that the parent
1583          * may already be flushing.  The record must be in the same flush
1584          * group as the parent.
1585          */
1586         if (pip->flush_state != HAMMER_FST_FLUSH)
1587                 hammer_flush_inode_core(pip, flg, HAMMER_FLUSH_RECURSION);
1588         KKASSERT(pip->flush_state == HAMMER_FST_FLUSH);
1589         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1590
1591 #if 0
1592         if (record->type == HAMMER_MEM_RECORD_DEL &&
1593             (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
1594                 /*
1595                  * Regardless of flushing state we cannot sync this path if the
1596                  * record represents a delete-on-disk but the target inode
1597                  * is not ready to sync its own deletion.
1598                  *
1599                  * XXX need to count effective nlinks to determine whether
1600                  * the flush is ok, otherwise removing a hardlink will
1601                  * just leave the DEL record to rot.
1602                  */
1603                 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
1604                 return(-1);
1605         } else
1606 #endif
1607         if (pip->flush_group == flg) {
1608                 /*
1609                  * If the parent is in the same flush group as us we can
1610                  * just set the record to a flushing state and we are
1611                  * done.
1612                  */
1613                 record->flush_state = HAMMER_FST_FLUSH;
1614                 record->flush_group = flg;
1615                 ++record->flush_group->refs;
1616                 hammer_ref(&record->lock);
1617
1618                 /*
1619                  * A general directory-add contributes to our visibility.
1620                  *
1621                  * Otherwise it is probably a directory-delete or 
1622                  * delete-on-disk record and does not contribute to our
1623                  * visbility (but we can still flush it).
1624                  */
1625                 if (record->type == HAMMER_MEM_RECORD_ADD)
1626                         return(1);
1627                 return(0);
1628         } else {
1629                 /*
1630                  * If the parent is not in our flush group we cannot
1631                  * flush this record yet, there is no visibility.
1632                  * We tell the parent to reflush and mark ourselves
1633                  * so the parent knows it should flush us too.
1634                  */
1635                 pip->flags |= HAMMER_INODE_REFLUSH;
1636                 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1637                 return(-1);
1638         }
1639 }
1640
1641 /*
1642  * This is the core routine placing an inode into the FST_FLUSH state.
1643  */
1644 static void
1645 hammer_flush_inode_core(hammer_inode_t ip, hammer_flush_group_t flg, int flags)
1646 {
1647         int go_count;
1648
1649         /*
1650          * Set flush state and prevent the flusher from cycling into
1651          * the next flush group.  Do not place the ip on the list yet.
1652          * Inodes not in the idle state get an extra reference.
1653          */
1654         KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
1655         if (ip->flush_state == HAMMER_FST_IDLE)
1656                 hammer_ref(&ip->lock);
1657         ip->flush_state = HAMMER_FST_FLUSH;
1658         ip->flush_group = flg;
1659         ++ip->hmp->flusher.group_lock;
1660         ++ip->hmp->count_iqueued;
1661         ++hammer_count_iqueued;
1662         ++flg->total_count;
1663
1664         /*
1665          * We need to be able to vfsync/truncate from the backend.
1666          */
1667         KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
1668         if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
1669                 ip->flags |= HAMMER_INODE_VHELD;
1670                 vref(ip->vp);
1671         }
1672
1673         /*
1674          * Figure out how many in-memory records we can actually flush
1675          * (not including inode meta-data, buffers, etc).
1676          */
1677         if (flags & HAMMER_FLUSH_RECURSION) {
1678                 /*
1679                  * If this is a upwards recursion we do not want to
1680                  * recurse down again!
1681                  */
1682                 go_count = 1;
1683         } else if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
1684                 /*
1685                  * No new records are added if we must complete a flush
1686                  * from a previous cycle, but we do have to move the records
1687                  * from the previous cycle to the current one.
1688                  */
1689 #if 0
1690                 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1691                                    hammer_syncgrp_child_callback, NULL);
1692 #endif
1693                 go_count = 1;
1694         } else {
1695                 /*
1696                  * Normal flush, scan records and bring them into the flush.
1697                  * Directory adds and deletes are usually skipped (they are
1698                  * grouped with the related inode rather then with the
1699                  * directory).
1700                  *
1701                  * go_count can be negative, which means the scan aborted
1702                  * due to the flush group being over-full and we should
1703                  * flush what we have.
1704                  */
1705                 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1706                                    hammer_setup_child_callback, NULL);
1707         }
1708
1709         /*
1710          * This is a more involved test that includes go_count.  If we
1711          * can't flush, flag the inode and return.  If go_count is 0 we
1712          * were are unable to flush any records in our rec_tree and
1713          * must ignore the XDIRTY flag.
1714          */
1715         if (go_count == 0) {
1716                 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
1717                         --ip->hmp->count_iqueued;
1718                         --hammer_count_iqueued;
1719
1720                         --flg->total_count;
1721                         ip->flush_state = HAMMER_FST_SETUP;
1722                         ip->flush_group = NULL;
1723                         if (ip->flags & HAMMER_INODE_VHELD) {
1724                                 ip->flags &= ~HAMMER_INODE_VHELD;
1725                                 vrele(ip->vp);
1726                         }
1727
1728                         /*
1729                          * REFLUSH is needed to trigger dependancy wakeups
1730                          * when an inode is in SETUP.
1731                          */
1732                         ip->flags |= HAMMER_INODE_REFLUSH;
1733                         if (flags & HAMMER_FLUSH_SIGNAL) {
1734                                 ip->flags |= HAMMER_INODE_RESIGNAL;
1735                                 hammer_flusher_async(ip->hmp, flg);
1736                         }
1737                         if (--ip->hmp->flusher.group_lock == 0)
1738                                 wakeup(&ip->hmp->flusher.group_lock);
1739                         return;
1740                 }
1741         }
1742
1743         /*
1744          * Snapshot the state of the inode for the backend flusher.
1745          *
1746          * We continue to retain save_trunc_off even when all truncations
1747          * have been resolved as an optimization to determine if we can
1748          * skip the B-Tree lookup for overwrite deletions.
1749          *
1750          * NOTE: The DELETING flag is a mod flag, but it is also sticky,
1751          * and stays in ip->flags.  Once set, it stays set until the
1752          * inode is destroyed.
1753          *
1754          * NOTE: If a truncation from a previous flush cycle had to be
1755          * continued into this one, the TRUNCATED flag will still be
1756          * set in sync_flags as will WOULDBLOCK.  When this occurs
1757          * we CANNOT safely integrate a new truncation from the front-end
1758          * because there may be data records in-memory assigned a flush
1759          * state from the previous cycle that are supposed to be flushed
1760          * before the next frontend truncation.
1761          */
1762         if ((ip->flags & (HAMMER_INODE_TRUNCATED | HAMMER_INODE_WOULDBLOCK)) ==
1763             HAMMER_INODE_TRUNCATED) {
1764                 KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0);
1765                 ip->sync_trunc_off = ip->trunc_off;
1766                 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
1767                 ip->flags &= ~HAMMER_INODE_TRUNCATED;
1768                 ip->sync_flags |= HAMMER_INODE_TRUNCATED;
1769
1770                 /*
1771                  * The save_trunc_off used to cache whether the B-Tree
1772                  * holds any records past that point is not used until
1773                  * after the truncation has succeeded, so we can safely
1774                  * set it now.
1775                  */
1776                 if (ip->save_trunc_off > ip->sync_trunc_off)
1777                         ip->save_trunc_off = ip->sync_trunc_off;
1778         }
1779         ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK &
1780                            ~HAMMER_INODE_TRUNCATED);
1781         ip->sync_ino_leaf = ip->ino_leaf;
1782         ip->sync_ino_data = ip->ino_data;
1783         ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
1784 #ifdef DEBUG_TRUNCATE
1785         if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp)
1786                 kprintf("truncateS %016llx\n", ip->sync_trunc_off);
1787 #endif
1788
1789         /*
1790          * The flusher list inherits our inode and reference.
1791          */
1792         KKASSERT(flg->running == 0);
1793         TAILQ_INSERT_TAIL(&flg->flush_list, ip, flush_entry);
1794         if (--ip->hmp->flusher.group_lock == 0)
1795                 wakeup(&ip->hmp->flusher.group_lock);
1796
1797         if (flags & HAMMER_FLUSH_SIGNAL) {
1798                 hammer_flusher_async(ip->hmp, flg);
1799         }
1800 }
1801
1802 /*
1803  * Callback for scan of ip->rec_tree.  Try to include each record in our
1804  * flush.  ip->flush_group has been set but the inode has not yet been
1805  * moved into a flushing state.
1806  *
1807  * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
1808  * both inodes.
1809  *
1810  * We return 1 for any record placed or found in FST_FLUSH, which prevents
1811  * the caller from shortcutting the flush.
1812  */
1813 static int
1814 hammer_setup_child_callback(hammer_record_t rec, void *data)
1815 {
1816         hammer_flush_group_t flg;
1817         hammer_inode_t target_ip;
1818         hammer_inode_t ip;
1819         int r;
1820
1821         /*
1822          * Deleted records are ignored.  Note that the flush detects deleted
1823          * front-end records at multiple points to deal with races.  This is
1824          * just the first line of defense.  The only time DELETED_FE cannot
1825          * be set is when HAMMER_RECF_INTERLOCK_BE is set. 
1826          *
1827          * Don't get confused between record deletion and, say, directory
1828          * entry deletion.  The deletion of a directory entry that is on
1829          * the media has nothing to do with the record deletion flags.
1830          */
1831         if (rec->flags & (HAMMER_RECF_DELETED_FE|HAMMER_RECF_DELETED_BE)) {
1832                 if (rec->flush_state == HAMMER_FST_FLUSH) {
1833                         KKASSERT(rec->flush_group == rec->ip->flush_group);
1834                         r = 1;
1835                 } else {
1836                         r = 0;
1837                 }
1838                 return(r);
1839         }
1840
1841         /*
1842          * If the record is in an idle state it has no dependancies and
1843          * can be flushed.
1844          */
1845         ip = rec->ip;
1846         flg = ip->flush_group;
1847         r = 0;
1848
1849         switch(rec->flush_state) {
1850         case HAMMER_FST_IDLE:
1851                 /*
1852                  * The record has no setup dependancy, we can flush it.
1853                  */
1854                 KKASSERT(rec->target_ip == NULL);
1855                 rec->flush_state = HAMMER_FST_FLUSH;
1856                 rec->flush_group = flg;
1857                 ++flg->refs;
1858                 hammer_ref(&rec->lock);
1859                 r = 1;
1860                 break;
1861         case HAMMER_FST_SETUP:
1862                 /*
1863                  * The record has a setup dependancy.  These are typically
1864                  * directory entry adds and deletes.  Such entries will be
1865                  * flushed when their inodes are flushed so we do not
1866                  * usually have to add them to the flush here.  However,
1867                  * if the target_ip has set HAMMER_INODE_CONN_DOWN then
1868                  * it is asking us to flush this record (and it).
1869                  */
1870                 target_ip = rec->target_ip;
1871                 KKASSERT(target_ip != NULL);
1872                 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
1873
1874                 /*
1875                  * If the target IP is already flushing in our group
1876                  * we could associate the record, but target_ip has
1877                  * already synced ino_data to sync_ino_data and we
1878                  * would also have to adjust nlinks.   Plus there are
1879                  * ordering issues for adds and deletes.
1880                  *
1881                  * Reflush downward if this is an ADD, and upward if
1882                  * this is a DEL.
1883                  */
1884                 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
1885                         if (rec->flush_state == HAMMER_MEM_RECORD_ADD)
1886                                 ip->flags |= HAMMER_INODE_REFLUSH;
1887                         else
1888                                 target_ip->flags |= HAMMER_INODE_REFLUSH;
1889                         break;
1890                 } 
1891
1892                 /*
1893                  * Target IP is not yet flushing.  This can get complex
1894                  * because we have to be careful about the recursion.
1895                  *
1896                  * Directories create an issue for us in that if a flush
1897                  * of a directory is requested the expectation is to flush
1898                  * any pending directory entries, but this will cause the
1899                  * related inodes to recursively flush as well.  We can't
1900                  * really defer the operation so just get as many as we
1901                  * can and
1902                  */
1903 #if 0
1904                 if ((target_ip->flags & HAMMER_INODE_RECLAIM) == 0 &&
1905                     (target_ip->flags & HAMMER_INODE_CONN_DOWN) == 0) {
1906                         /*
1907                          * We aren't reclaiming and the target ip was not
1908                          * previously prevented from flushing due to this
1909                          * record dependancy.  Do not flush this record.
1910                          */
1911                         /*r = 0;*/
1912                 } else
1913 #endif
1914                 if (flg->total_count + flg->refs >
1915                            ip->hmp->undo_rec_limit) {
1916                         /*
1917                          * Our flush group is over-full and we risk blowing
1918                          * out the UNDO FIFO.  Stop the scan, flush what we
1919                          * have, then reflush the directory.
1920                          *
1921                          * The directory may be forced through multiple
1922                          * flush groups before it can be completely
1923                          * flushed.
1924                          */
1925                         ip->flags |= HAMMER_INODE_RESIGNAL |
1926                                      HAMMER_INODE_REFLUSH;
1927                         r = -1;
1928                 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
1929                         /*
1930                          * If the target IP is not flushing we can force
1931                          * it to flush, even if it is unable to write out
1932                          * any of its own records we have at least one in
1933                          * hand that we CAN deal with.
1934                          */
1935                         rec->flush_state = HAMMER_FST_FLUSH;
1936                         rec->flush_group = flg;
1937                         ++flg->refs;
1938                         hammer_ref(&rec->lock);
1939                         hammer_flush_inode_core(target_ip, flg,
1940                                                 HAMMER_FLUSH_RECURSION);
1941                         r = 1;
1942                 } else {
1943                         /*
1944                          * General or delete-on-disk record.
1945                          *
1946                          * XXX this needs help.  If a delete-on-disk we could
1947                          * disconnect the target.  If the target has its own
1948                          * dependancies they really need to be flushed.
1949                          *
1950                          * XXX
1951                          */
1952                         rec->flush_state = HAMMER_FST_FLUSH;
1953                         rec->flush_group = flg;
1954                         ++flg->refs;
1955                         hammer_ref(&rec->lock);
1956                         hammer_flush_inode_core(target_ip, flg,
1957                                                 HAMMER_FLUSH_RECURSION);
1958                         r = 1;
1959                 }
1960                 break;
1961         case HAMMER_FST_FLUSH:
1962                 /* 
1963                  * If the WOULDBLOCK flag is set records may have been left
1964                  * over from a previous flush attempt.  The flush group will
1965                  * have been left intact - we are probably reflushing it
1966                  * now.
1967                  *
1968                  * If a flush error occured ip->error will be non-zero.
1969                  */
1970                 KKASSERT(rec->flush_group == flg);
1971                 r = 1;
1972                 break;
1973         }
1974         return(r);
1975 }
1976
1977 #if 0
1978 /*
1979  * This version just moves records already in a flush state to the new
1980  * flush group and that is it.
1981  */
1982 static int
1983 hammer_syncgrp_child_callback(hammer_record_t rec, void *data)
1984 {
1985         hammer_inode_t ip = rec->ip;
1986
1987         switch(rec->flush_state) {
1988         case HAMMER_FST_FLUSH:
1989                 KKASSERT(rec->flush_group == ip->flush_group);
1990                 break;
1991         default:
1992                 break;
1993         }
1994         return(0);
1995 }
1996 #endif
1997
1998 /*
1999  * Wait for a previously queued flush to complete.
2000  *
2001  * If a critical error occured we don't try to wait.
2002  */
2003 void
2004 hammer_wait_inode(hammer_inode_t ip)
2005 {
2006         hammer_flush_group_t flg;
2007
2008         flg = NULL;
2009         if ((ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) {
2010                 while (ip->flush_state != HAMMER_FST_IDLE &&
2011                        (ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) {
2012                         if (ip->flush_state == HAMMER_FST_SETUP)
2013                                 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2014                         if (ip->flush_state != HAMMER_FST_IDLE) {
2015                                 ip->flags |= HAMMER_INODE_FLUSHW;
2016                                 tsleep(&ip->flags, 0, "hmrwin", 0);
2017                         }
2018                 }
2019         }
2020 }
2021
2022 /*
2023  * Called by the backend code when a flush has been completed.
2024  * The inode has already been removed from the flush list.
2025  *
2026  * A pipelined flush can occur, in which case we must re-enter the
2027  * inode on the list and re-copy its fields.
2028  */
2029 void
2030 hammer_flush_inode_done(hammer_inode_t ip, int error)
2031 {
2032         hammer_mount_t hmp;
2033         int dorel;
2034
2035         KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
2036
2037         hmp = ip->hmp;
2038
2039         /*
2040          * Merge left-over flags back into the frontend and fix the state.
2041          * Incomplete truncations are retained by the backend.
2042          */
2043         ip->error = error;
2044         ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED;
2045         ip->sync_flags &= HAMMER_INODE_TRUNCATED;
2046
2047         /*
2048          * The backend may have adjusted nlinks, so if the adjusted nlinks
2049          * does not match the fronttend set the frontend's RDIRTY flag again.
2050          */
2051         if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
2052                 ip->flags |= HAMMER_INODE_DDIRTY;
2053
2054         /*
2055          * Fix up the dirty buffer status.
2056          */
2057         if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) {
2058                 ip->flags |= HAMMER_INODE_BUFS;
2059         }
2060
2061         /*
2062          * Re-set the XDIRTY flag if some of the inode's in-memory records
2063          * could not be flushed.
2064          */
2065         KKASSERT((RB_EMPTY(&ip->rec_tree) &&
2066                   (ip->flags & HAMMER_INODE_XDIRTY) == 0) ||
2067                  (!RB_EMPTY(&ip->rec_tree) &&
2068                   (ip->flags & HAMMER_INODE_XDIRTY) != 0));
2069
2070         /*
2071          * Do not lose track of inodes which no longer have vnode
2072          * assocations, otherwise they may never get flushed again.
2073          */
2074         if ((ip->flags & HAMMER_INODE_MODMASK) && ip->vp == NULL)
2075                 ip->flags |= HAMMER_INODE_REFLUSH;
2076
2077         /*
2078          * Adjust the flush state.
2079          */
2080         if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
2081                 /*
2082                  * We were unable to flush out all our records, leave the
2083                  * inode in a flush state and in the current flush group.
2084                  *
2085                  * This occurs if the UNDO block gets too full
2086                  * or there is too much dirty meta-data and allows the
2087                  * flusher to finalize the UNDO block and then re-flush.
2088                  */
2089                 ip->flags &= ~HAMMER_INODE_WOULDBLOCK;
2090                 dorel = 0;
2091         } else {
2092                 /*
2093                  * Remove from the flush_group
2094                  */
2095                 TAILQ_REMOVE(&ip->flush_group->flush_list, ip, flush_entry);
2096                 ip->flush_group = NULL;
2097
2098                 /*
2099                  * Clean up the vnode ref and tracking counts.
2100                  */
2101                 if (ip->flags & HAMMER_INODE_VHELD) {
2102                         ip->flags &= ~HAMMER_INODE_VHELD;
2103                         vrele(ip->vp);
2104                 }
2105                 --hmp->count_iqueued;
2106                 --hammer_count_iqueued;
2107
2108                 /*
2109                  * And adjust the state.
2110                  */
2111                 if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
2112                         ip->flush_state = HAMMER_FST_IDLE;
2113                         dorel = 1;
2114                 } else {
2115                         ip->flush_state = HAMMER_FST_SETUP;
2116                         dorel = 0;
2117                 }
2118
2119                 /*
2120                  * If the frontend is waiting for a flush to complete,
2121                  * wake it up.
2122                  */
2123                 if (ip->flags & HAMMER_INODE_FLUSHW) {
2124                         ip->flags &= ~HAMMER_INODE_FLUSHW;
2125                         wakeup(&ip->flags);
2126                 }
2127         }
2128
2129         /*
2130          * If the frontend made more changes and requested another flush,
2131          * then try to get it running.
2132          *
2133          * Reflushes are aborted when the inode is errored out.
2134          */
2135         if (ip->flags & HAMMER_INODE_REFLUSH) {
2136                 ip->flags &= ~HAMMER_INODE_REFLUSH;
2137                 if (ip->flags & HAMMER_INODE_RESIGNAL) {
2138                         ip->flags &= ~HAMMER_INODE_RESIGNAL;
2139                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2140                 } else {
2141                         hammer_flush_inode(ip, 0);
2142                 }
2143         }
2144
2145         /*
2146          * If we have no parent dependancies we can clear CONN_DOWN
2147          */
2148         if (TAILQ_EMPTY(&ip->target_list))
2149                 ip->flags &= ~HAMMER_INODE_CONN_DOWN;
2150
2151         /*
2152          * If the inode is now clean drop the space reservation.
2153          */
2154         if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
2155             (ip->flags & HAMMER_INODE_RSV_INODES)) {
2156                 ip->flags &= ~HAMMER_INODE_RSV_INODES;
2157                 --hmp->rsv_inodes;
2158         }
2159
2160         if (dorel)
2161                 hammer_rel_inode(ip, 0);
2162 }
2163
2164 /*
2165  * Called from hammer_sync_inode() to synchronize in-memory records
2166  * to the media.
2167  */
2168 static int
2169 hammer_sync_record_callback(hammer_record_t record, void *data)
2170 {
2171         hammer_cursor_t cursor = data;
2172         hammer_transaction_t trans = cursor->trans;
2173         hammer_mount_t hmp = trans->hmp;
2174         int error;
2175
2176         /*
2177          * Skip records that do not belong to the current flush.
2178          */
2179         ++hammer_stats_record_iterations;
2180         if (record->flush_state != HAMMER_FST_FLUSH)
2181                 return(0);
2182
2183 #if 1
2184         if (record->flush_group != record->ip->flush_group) {
2185                 kprintf("sync_record %p ip %p bad flush group %p %p\n", record, record->ip, record->flush_group ,record->ip->flush_group);
2186                 Debugger("blah2");
2187                 return(0);
2188         }
2189 #endif
2190         KKASSERT(record->flush_group == record->ip->flush_group);
2191
2192         /*
2193          * Interlock the record using the BE flag.  Once BE is set the
2194          * frontend cannot change the state of FE.
2195          *
2196          * NOTE: If FE is set prior to us setting BE we still sync the
2197          * record out, but the flush completion code converts it to 
2198          * a delete-on-disk record instead of destroying it.
2199          */
2200         KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
2201         record->flags |= HAMMER_RECF_INTERLOCK_BE;
2202
2203         /*
2204          * The backend may have already disposed of the record.
2205          */
2206         if (record->flags & HAMMER_RECF_DELETED_BE) {
2207                 error = 0;
2208                 goto done;
2209         }
2210
2211         /*
2212          * If the whole inode is being deleting all on-disk records will
2213          * be deleted very soon, we can't sync any new records to disk
2214          * because they will be deleted in the same transaction they were
2215          * created in (delete_tid == create_tid), which will assert.
2216          *
2217          * XXX There may be a case with RECORD_ADD with DELETED_FE set
2218          * that we currently panic on.
2219          */
2220         if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
2221                 switch(record->type) {
2222                 case HAMMER_MEM_RECORD_DATA:
2223                         /*
2224                          * We don't have to do anything, if the record was
2225                          * committed the space will have been accounted for
2226                          * in the blockmap.
2227                          */
2228                         /* fall through */
2229                 case HAMMER_MEM_RECORD_GENERAL:
2230                         record->flags |= HAMMER_RECF_DELETED_FE;
2231                         record->flags |= HAMMER_RECF_DELETED_BE;
2232                         error = 0;
2233                         goto done;
2234                 case HAMMER_MEM_RECORD_ADD:
2235                         panic("hammer_sync_record_callback: illegal add "
2236                               "during inode deletion record %p", record);
2237                         break; /* NOT REACHED */
2238                 case HAMMER_MEM_RECORD_INODE:
2239                         panic("hammer_sync_record_callback: attempt to "
2240                               "sync inode record %p?", record);
2241                         break; /* NOT REACHED */
2242                 case HAMMER_MEM_RECORD_DEL:
2243                         /* 
2244                          * Follow through and issue the on-disk deletion
2245                          */
2246                         break;
2247                 }
2248         }
2249
2250         /*
2251          * If DELETED_FE is set special handling is needed for directory
2252          * entries.  Dependant pieces related to the directory entry may
2253          * have already been synced to disk.  If this occurs we have to
2254          * sync the directory entry and then change the in-memory record
2255          * from an ADD to a DELETE to cover the fact that it's been
2256          * deleted by the frontend.
2257          *
2258          * A directory delete covering record (MEM_RECORD_DEL) can never
2259          * be deleted by the frontend.
2260          *
2261          * Any other record type (aka DATA) can be deleted by the frontend.
2262          * XXX At the moment the flusher must skip it because there may
2263          * be another data record in the flush group for the same block,
2264          * meaning that some frontend data changes can leak into the backend's
2265          * synchronization point.
2266          */
2267         if (record->flags & HAMMER_RECF_DELETED_FE) {
2268                 if (record->type == HAMMER_MEM_RECORD_ADD) {
2269                         record->flags |= HAMMER_RECF_CONVERT_DELETE;
2270                 } else {
2271                         KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
2272                         record->flags |= HAMMER_RECF_DELETED_BE;
2273                         error = 0;
2274                         goto done;
2275                 }
2276         }
2277
2278         /*
2279          * Assign the create_tid for new records.  Deletions already
2280          * have the record's entire key properly set up.
2281          */
2282         if (record->type != HAMMER_MEM_RECORD_DEL)
2283                 record->leaf.base.create_tid = trans->tid;
2284                 record->leaf.create_ts = trans->time32;
2285         for (;;) {
2286                 error = hammer_ip_sync_record_cursor(cursor, record);
2287                 if (error != EDEADLK)
2288                         break;
2289                 hammer_done_cursor(cursor);
2290                 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
2291                                            record->ip);
2292                 if (error)
2293                         break;
2294         }
2295         record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
2296
2297         if (error)
2298                 error = -error;
2299 done:
2300         hammer_flush_record_done(record, error);
2301
2302         /*
2303          * Do partial finalization if we have built up too many dirty
2304          * buffers.  Otherwise a buffer cache deadlock can occur when
2305          * doing things like creating tens of thousands of tiny files.
2306          *
2307          * We must release our cursor lock to avoid a 3-way deadlock
2308          * due to the exclusive sync lock the finalizer must get.
2309          */
2310         if (hammer_flusher_meta_limit(hmp)) {
2311                 hammer_unlock_cursor(cursor, 0);
2312                 hammer_flusher_finalize(trans, 0);
2313                 hammer_lock_cursor(cursor, 0);
2314         }
2315
2316         return(error);
2317 }
2318
2319 /*
2320  * XXX error handling
2321  */
2322 int
2323 hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip)
2324 {
2325         struct hammer_cursor cursor;
2326         hammer_node_t tmp_node;
2327         hammer_record_t depend;
2328         hammer_record_t next;
2329         int error, tmp_error;
2330         u_int64_t nlinks;
2331
2332         if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
2333                 return(0);
2334
2335         error = hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
2336         if (error)
2337                 goto done;
2338
2339         /*
2340          * Any directory records referencing this inode which are not in
2341          * our current flush group must adjust our nlink count for the
2342          * purposes of synchronization to disk.
2343          *
2344          * Records which are in our flush group can be unlinked from our
2345          * inode now, potentially allowing the inode to be physically
2346          * deleted.
2347          *
2348          * This cannot block.
2349          */
2350         nlinks = ip->ino_data.nlinks;
2351         next = TAILQ_FIRST(&ip->target_list);
2352         while ((depend = next) != NULL) {
2353                 next = TAILQ_NEXT(depend, target_entry);
2354                 if (depend->flush_state == HAMMER_FST_FLUSH &&
2355                     depend->flush_group == ip->flush_group) {
2356                         /*
2357                          * If this is an ADD that was deleted by the frontend
2358                          * the frontend nlinks count will have already been
2359                          * decremented, but the backend is going to sync its
2360                          * directory entry and must account for it.  The
2361                          * record will be converted to a delete-on-disk when
2362                          * it gets synced.
2363                          *
2364                          * If the ADD was not deleted by the frontend we
2365                          * can remove the dependancy from our target_list.
2366                          */
2367                         if (depend->flags & HAMMER_RECF_DELETED_FE) {
2368                                 ++nlinks;
2369                         } else {
2370                                 TAILQ_REMOVE(&ip->target_list, depend,
2371                                              target_entry);
2372                                 depend->target_ip = NULL;
2373                         }
2374                 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
2375                         /*
2376                          * Not part of our flush group
2377                          */
2378                         KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
2379                         switch(depend->type) {
2380                         case HAMMER_MEM_RECORD_ADD:
2381                                 --nlinks;
2382                                 break;
2383                         case HAMMER_MEM_RECORD_DEL:
2384                                 ++nlinks;
2385                                 break;
2386                         default:
2387                                 break;
2388                         }
2389                 }
2390         }
2391
2392         /*
2393          * Set dirty if we had to modify the link count.
2394          */
2395         if (ip->sync_ino_data.nlinks != nlinks) {
2396                 KKASSERT((int64_t)nlinks >= 0);
2397                 ip->sync_ino_data.nlinks = nlinks;
2398                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2399         }
2400
2401         /*
2402          * If there is a trunction queued destroy any data past the (aligned)
2403          * truncation point.  Userland will have dealt with the buffer
2404          * containing the truncation point for us.
2405          *
2406          * We don't flush pending frontend data buffers until after we've
2407          * dealt with the truncation.
2408          */
2409         if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2410                 /*
2411                  * Interlock trunc_off.  The VOP front-end may continue to
2412                  * make adjustments to it while we are blocked.
2413                  */
2414                 off_t trunc_off;
2415                 off_t aligned_trunc_off;
2416                 int blkmask;
2417
2418                 trunc_off = ip->sync_trunc_off;
2419                 blkmask = hammer_blocksize(trunc_off) - 1;
2420                 aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask;
2421
2422                 /*
2423                  * Delete any whole blocks on-media.  The front-end has
2424                  * already cleaned out any partial block and made it
2425                  * pending.  The front-end may have updated trunc_off
2426                  * while we were blocked so we only use sync_trunc_off.
2427                  *
2428                  * This operation can blow out the buffer cache, EWOULDBLOCK
2429                  * means we were unable to complete the deletion.  The
2430                  * deletion will update sync_trunc_off in that case.
2431                  */
2432                 error = hammer_ip_delete_range(&cursor, ip,
2433                                                 aligned_trunc_off,
2434                                                 0x7FFFFFFFFFFFFFFFLL, 2);
2435                 if (error == EWOULDBLOCK) {
2436                         ip->flags |= HAMMER_INODE_WOULDBLOCK;
2437                         error = 0;
2438                         goto defer_buffer_flush;
2439                 }
2440
2441                 if (error)
2442                         goto done;
2443
2444                 /*
2445                  * Clear the truncation flag on the backend after we have
2446                  * complete the deletions.  Backend data is now good again
2447                  * (including new records we are about to sync, below).
2448                  *
2449                  * Leave sync_trunc_off intact.  As we write additional
2450                  * records the backend will update sync_trunc_off.  This
2451                  * tells the backend whether it can skip the overwrite
2452                  * test.  This should work properly even when the backend
2453                  * writes full blocks where the truncation point straddles
2454                  * the block because the comparison is against the base
2455                  * offset of the record.
2456                  */
2457                 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2458                 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
2459         } else {
2460                 error = 0;
2461         }
2462
2463         /*
2464          * Now sync related records.  These will typically be directory
2465          * entries, records tracking direct-writes, or delete-on-disk records.
2466          */
2467         if (error == 0) {
2468                 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2469                                     hammer_sync_record_callback, &cursor);
2470                 if (tmp_error < 0)
2471                         tmp_error = -error;
2472                 if (tmp_error)
2473                         error = tmp_error;
2474         }
2475         hammer_cache_node(&ip->cache[1], cursor.node);
2476
2477         /*
2478          * Re-seek for inode update, assuming our cache hasn't been ripped
2479          * out from under us.
2480          */
2481         if (error == 0) {
2482                 tmp_node = hammer_ref_node_safe(ip->hmp, &ip->cache[0], &error);
2483                 if (tmp_node) {
2484                         hammer_cursor_downgrade(&cursor);
2485                         hammer_lock_sh(&tmp_node->lock);
2486                         if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0)
2487                                 hammer_cursor_seek(&cursor, tmp_node, 0);
2488                         hammer_unlock(&tmp_node->lock);
2489                         hammer_rel_node(tmp_node);
2490                 }
2491                 error = 0;
2492         }
2493
2494         /*
2495          * If we are deleting the inode the frontend had better not have
2496          * any active references on elements making up the inode.
2497          *
2498          * The call to hammer_ip_delete_clean() cleans up auxillary records
2499          * but not DB or DATA records.  Those must have already been deleted
2500          * by the normal truncation mechanic.
2501          */
2502         if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
2503                 RB_EMPTY(&ip->rec_tree)  &&
2504             (ip->sync_flags & HAMMER_INODE_DELETING) &&
2505             (ip->flags & HAMMER_INODE_DELETED) == 0) {
2506                 int count1 = 0;
2507
2508                 error = hammer_ip_delete_clean(&cursor, ip, &count1);
2509                 if (error == 0) {
2510                         ip->flags |= HAMMER_INODE_DELETED;
2511                         ip->sync_flags &= ~HAMMER_INODE_DELETING;
2512                         ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2513                         KKASSERT(RB_EMPTY(&ip->rec_tree));
2514
2515                         /*
2516                          * Set delete_tid in both the frontend and backend
2517                          * copy of the inode record.  The DELETED flag handles
2518                          * this, do not set RDIRTY.
2519                          */
2520                         ip->ino_leaf.base.delete_tid = trans->tid;
2521                         ip->sync_ino_leaf.base.delete_tid = trans->tid;
2522                         ip->ino_leaf.delete_ts = trans->time32;
2523                         ip->sync_ino_leaf.delete_ts = trans->time32;
2524
2525
2526                         /*
2527                          * Adjust the inode count in the volume header
2528                          */
2529                         hammer_sync_lock_sh(trans);
2530                         if (ip->flags & HAMMER_INODE_ONDISK) {
2531                                 hammer_modify_volume_field(trans,
2532                                                            trans->rootvol,
2533                                                            vol0_stat_inodes);
2534                                 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
2535                                 hammer_modify_volume_done(trans->rootvol);
2536                         }
2537                         hammer_sync_unlock(trans);
2538                 }
2539         }
2540
2541         if (error)
2542                 goto done;
2543         ip->sync_flags &= ~HAMMER_INODE_BUFS;
2544
2545 defer_buffer_flush:
2546         /*
2547          * Now update the inode's on-disk inode-data and/or on-disk record.
2548          * DELETED and ONDISK are managed only in ip->flags.
2549          *
2550          * In the case of a defered buffer flush we still update the on-disk
2551          * inode to satisfy visibility requirements if there happen to be
2552          * directory dependancies.
2553          */
2554         switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
2555         case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
2556                 /*
2557                  * If deleted and on-disk, don't set any additional flags.
2558                  * the delete flag takes care of things.
2559                  *
2560                  * Clear flags which may have been set by the frontend.
2561                  */
2562                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2563                                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2564                                     HAMMER_INODE_DELETING);
2565                 break;
2566         case HAMMER_INODE_DELETED:
2567                 /*
2568                  * Take care of the case where a deleted inode was never
2569                  * flushed to the disk in the first place.
2570                  *
2571                  * Clear flags which may have been set by the frontend.
2572                  */
2573                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2574                                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2575                                     HAMMER_INODE_DELETING);
2576                 while (RB_ROOT(&ip->rec_tree)) {
2577                         hammer_record_t record = RB_ROOT(&ip->rec_tree);
2578                         hammer_ref(&record->lock);
2579                         KKASSERT(record->lock.refs == 1);
2580                         record->flags |= HAMMER_RECF_DELETED_FE;
2581                         record->flags |= HAMMER_RECF_DELETED_BE;
2582                         hammer_rel_mem_record(record);
2583                 }
2584                 break;
2585         case HAMMER_INODE_ONDISK:
2586                 /*
2587                  * If already on-disk, do not set any additional flags.
2588                  */
2589                 break;
2590         default:
2591                 /*
2592                  * If not on-disk and not deleted, set DDIRTY to force
2593                  * an initial record to be written.
2594                  *
2595                  * Also set the create_tid in both the frontend and backend
2596                  * copy of the inode record.
2597                  */
2598                 ip->ino_leaf.base.create_tid = trans->tid;
2599                 ip->ino_leaf.create_ts = trans->time32;
2600                 ip->sync_ino_leaf.base.create_tid = trans->tid;
2601                 ip->sync_ino_leaf.create_ts = trans->time32;
2602                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2603                 break;
2604         }
2605
2606         /*
2607          * If RDIRTY or DDIRTY is set, write out a new record.  If the inode
2608          * is already on-disk the old record is marked as deleted.
2609          *
2610          * If DELETED is set hammer_update_inode() will delete the existing
2611          * record without writing out a new one.
2612          *
2613          * If *ONLY* the ITIMES flag is set we can update the record in-place.
2614          */
2615         if (ip->flags & HAMMER_INODE_DELETED) {
2616                 error = hammer_update_inode(&cursor, ip);
2617         } else 
2618         if ((ip->sync_flags & HAMMER_INODE_DDIRTY) == 0 &&
2619             (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) {
2620                 error = hammer_update_itimes(&cursor, ip);
2621         } else
2622         if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) {
2623                 error = hammer_update_inode(&cursor, ip);
2624         }
2625 done:
2626         if (error) {
2627                 hammer_critical_error(ip->hmp, ip, error,
2628                                       "while syncing inode");
2629         }
2630         hammer_done_cursor(&cursor);
2631         return(error);
2632 }
2633
2634 /*
2635  * This routine is called when the OS is no longer actively referencing
2636  * the inode (but might still be keeping it cached), or when releasing
2637  * the last reference to an inode.
2638  *
2639  * At this point if the inode's nlinks count is zero we want to destroy
2640  * it, which may mean destroying it on-media too.
2641  */
2642 void
2643 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
2644 {
2645         struct vnode *vp;
2646
2647         /*
2648          * Set the DELETING flag when the link count drops to 0 and the
2649          * OS no longer has any opens on the inode.
2650          *
2651          * The backend will clear DELETING (a mod flag) and set DELETED
2652          * (a state flag) when it is actually able to perform the
2653          * operation.
2654          */
2655         if (ip->ino_data.nlinks == 0 &&
2656             (ip->flags & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
2657                 ip->flags |= HAMMER_INODE_DELETING;
2658                 ip->flags |= HAMMER_INODE_TRUNCATED;
2659                 ip->trunc_off = 0;
2660                 vp = NULL;
2661                 if (getvp) {
2662                         if (hammer_get_vnode(ip, &vp) != 0)
2663                                 return;
2664                 }
2665
2666                 /*
2667                  * Final cleanup
2668                  */
2669                 if (ip->vp) {
2670                         vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
2671                         vnode_pager_setsize(ip->vp, 0);
2672                 }
2673                 if (getvp) {
2674                         vput(vp);
2675                 }
2676         }
2677 }
2678
2679 /*
2680  * After potentially resolving a dependancy the inode is tested
2681  * to determine whether it needs to be reflushed.
2682  */
2683 void
2684 hammer_test_inode(hammer_inode_t ip)
2685 {
2686         if (ip->flags & HAMMER_INODE_REFLUSH) {
2687                 ip->flags &= ~HAMMER_INODE_REFLUSH;
2688                 hammer_ref(&ip->lock);
2689                 if (ip->flags & HAMMER_INODE_RESIGNAL) {
2690                         ip->flags &= ~HAMMER_INODE_RESIGNAL;
2691                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2692                 } else {
2693                         hammer_flush_inode(ip, 0);
2694                 }
2695                 hammer_rel_inode(ip, 0);
2696         }
2697 }
2698
2699 /*
2700  * Clear the RECLAIM flag on an inode.  This occurs when the inode is
2701  * reassociated with a vp or just before it gets freed.
2702  *
2703  * Wakeup one thread blocked waiting on reclaims to complete.  Note that
2704  * the inode the thread is waiting on behalf of is a different inode then
2705  * the inode we are called with.  This is to create a pipeline.
2706  */
2707 static void
2708 hammer_inode_wakereclaims(hammer_inode_t ip)
2709 {
2710         struct hammer_reclaim *reclaim;
2711         hammer_mount_t hmp = ip->hmp;
2712
2713         if ((ip->flags & HAMMER_INODE_RECLAIM) == 0)
2714                 return;
2715
2716         --hammer_count_reclaiming;
2717         --hmp->inode_reclaims;
2718         ip->flags &= ~HAMMER_INODE_RECLAIM;
2719
2720         if ((reclaim = TAILQ_FIRST(&hmp->reclaim_list)) != NULL) {
2721                 TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry);
2722                 reclaim->okydoky = 1;
2723                 wakeup(reclaim);
2724         }
2725 }
2726
2727 /*
2728  * Setup our reclaim pipeline.  We only let so many detached (and dirty)
2729  * inodes build up before we start blocking.
2730  *
2731  * When we block we don't care *which* inode has finished reclaiming,
2732  * as lone as one does.  This is somewhat heuristical... we also put a
2733  * cap on how long we are willing to wait.
2734  */
2735 void
2736 hammer_inode_waitreclaims(hammer_mount_t hmp)
2737 {
2738         struct hammer_reclaim reclaim;
2739         int delay;
2740
2741         if (hmp->inode_reclaims > HAMMER_RECLAIM_WAIT) {
2742                 reclaim.okydoky = 0;
2743                 TAILQ_INSERT_TAIL(&hmp->reclaim_list,
2744                                   &reclaim, entry);
2745         } else {
2746                 reclaim.okydoky = 1;
2747         }
2748
2749         if (reclaim.okydoky == 0) {
2750                 delay = (hmp->inode_reclaims - HAMMER_RECLAIM_WAIT) * hz /
2751                         HAMMER_RECLAIM_WAIT;
2752                 if (delay >= 0)
2753                         tsleep(&reclaim, 0, "hmrrcm", delay + 1);
2754                 if (reclaim.okydoky == 0)
2755                         TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry);
2756         }
2757 }
2758