Merge branches 'master' and 'suser_to_priv'
[dragonfly.git] / sys / vfs / hammer / hammer_inode.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.114 2008/09/24 00:53:51 dillon Exp $
35  */
36
37 #include "hammer.h"
38 #include <vm/vm_extern.h>
39 #include <sys/buf.h>
40 #include <sys/buf2.h>
41
42 static int      hammer_unload_inode(struct hammer_inode *ip);
43 static void     hammer_free_inode(hammer_inode_t ip);
44 static void     hammer_flush_inode_core(hammer_inode_t ip,
45                                         hammer_flush_group_t flg, int flags);
46 static int      hammer_setup_child_callback(hammer_record_t rec, void *data);
47 #if 0
48 static int      hammer_syncgrp_child_callback(hammer_record_t rec, void *data);
49 #endif
50 static int      hammer_setup_parent_inodes(hammer_inode_t ip,
51                                         hammer_flush_group_t flg);
52 static int      hammer_setup_parent_inodes_helper(hammer_record_t record,
53                                         hammer_flush_group_t flg);
54 static void     hammer_inode_wakereclaims(hammer_inode_t ip, int dowake);
55
56 #ifdef DEBUG_TRUNCATE
57 extern struct hammer_inode *HammerTruncIp;
58 #endif
59
60 /*
61  * RB-Tree support for inode structures
62  */
63 int
64 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
65 {
66         if (ip1->obj_localization < ip2->obj_localization)
67                 return(-1);
68         if (ip1->obj_localization > ip2->obj_localization)
69                 return(1);
70         if (ip1->obj_id < ip2->obj_id)
71                 return(-1);
72         if (ip1->obj_id > ip2->obj_id)
73                 return(1);
74         if (ip1->obj_asof < ip2->obj_asof)
75                 return(-1);
76         if (ip1->obj_asof > ip2->obj_asof)
77                 return(1);
78         return(0);
79 }
80
81 /*
82  * RB-Tree support for inode structures / special LOOKUP_INFO
83  */
84 static int
85 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
86 {
87         if (info->obj_localization < ip->obj_localization)
88                 return(-1);
89         if (info->obj_localization > ip->obj_localization)
90                 return(1);
91         if (info->obj_id < ip->obj_id)
92                 return(-1);
93         if (info->obj_id > ip->obj_id)
94                 return(1);
95         if (info->obj_asof < ip->obj_asof)
96                 return(-1);
97         if (info->obj_asof > ip->obj_asof)
98                 return(1);
99         return(0);
100 }
101
102 /*
103  * Used by hammer_scan_inode_snapshots() to locate all of an object's
104  * snapshots.  Note that the asof field is not tested, which we can get
105  * away with because it is the lowest-priority field.
106  */
107 static int
108 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data)
109 {
110         hammer_inode_info_t info = data;
111
112         if (ip->obj_localization > info->obj_localization)
113                 return(1);
114         if (ip->obj_localization < info->obj_localization)
115                 return(-1);
116         if (ip->obj_id > info->obj_id)
117                 return(1);
118         if (ip->obj_id < info->obj_id)
119                 return(-1);
120         return(0);
121 }
122
123 /*
124  * Used by hammer_unload_pseudofs() to locate all inodes associated with
125  * a particular PFS.
126  */
127 static int
128 hammer_inode_pfs_cmp(hammer_inode_t ip, void *data)
129 {
130         u_int32_t localization = *(u_int32_t *)data;
131         if (ip->obj_localization > localization)
132                 return(1);
133         if (ip->obj_localization < localization)
134                 return(-1);
135         return(0);
136 }
137
138 /*
139  * RB-Tree support for pseudofs structures
140  */
141 static int
142 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1, hammer_pseudofs_inmem_t p2)
143 {
144         if (p1->localization < p2->localization)
145                 return(-1);
146         if (p1->localization > p2->localization)
147                 return(1);
148         return(0);
149 }
150
151
152 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
153 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
154                 hammer_inode_info_cmp, hammer_inode_info_t);
155 RB_GENERATE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node,
156              hammer_pfs_rb_compare, u_int32_t, localization);
157
158 /*
159  * The kernel is not actively referencing this vnode but is still holding
160  * it cached.
161  *
162  * This is called from the frontend.
163  */
164 int
165 hammer_vop_inactive(struct vop_inactive_args *ap)
166 {
167         struct hammer_inode *ip = VTOI(ap->a_vp);
168
169         /*
170          * Degenerate case
171          */
172         if (ip == NULL) {
173                 vrecycle(ap->a_vp);
174                 return(0);
175         }
176
177         /*
178          * If the inode no longer has visibility in the filesystem try to
179          * recycle it immediately, even if the inode is dirty.  Recycling
180          * it quickly allows the system to reclaim buffer cache and VM
181          * resources which can matter a lot in a heavily loaded system.
182          *
183          * This can deadlock in vfsync() if we aren't careful.
184          * 
185          * Do not queue the inode to the flusher if we still have visibility,
186          * otherwise namespace calls such as chmod will unnecessarily generate
187          * multiple inode updates.
188          */
189         hammer_inode_unloadable_check(ip, 0);
190         if (ip->ino_data.nlinks == 0) {
191                 if (ip->flags & HAMMER_INODE_MODMASK)
192                         hammer_flush_inode(ip, 0);
193                 vrecycle(ap->a_vp);
194         }
195         return(0);
196 }
197
198 /*
199  * Release the vnode association.  This is typically (but not always)
200  * the last reference on the inode.
201  *
202  * Once the association is lost we are on our own with regards to
203  * flushing the inode.
204  */
205 int
206 hammer_vop_reclaim(struct vop_reclaim_args *ap)
207 {
208         struct hammer_inode *ip;
209         hammer_mount_t hmp;
210         struct vnode *vp;
211
212         vp = ap->a_vp;
213
214         if ((ip = vp->v_data) != NULL) {
215                 hmp = ip->hmp;
216                 vp->v_data = NULL;
217                 ip->vp = NULL;
218
219                 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) {
220                         ++hammer_count_reclaiming;
221                         ++hmp->inode_reclaims;
222                         ip->flags |= HAMMER_INODE_RECLAIM;
223                 }
224                 hammer_rel_inode(ip, 1);
225         }
226         return(0);
227 }
228
229 /*
230  * Return a locked vnode for the specified inode.  The inode must be
231  * referenced but NOT LOCKED on entry and will remain referenced on
232  * return.
233  *
234  * Called from the frontend.
235  */
236 int
237 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
238 {
239         hammer_mount_t hmp;
240         struct vnode *vp;
241         int error = 0;
242         u_int8_t obj_type;
243
244         hmp = ip->hmp;
245
246         for (;;) {
247                 if ((vp = ip->vp) == NULL) {
248                         error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0);
249                         if (error)
250                                 break;
251                         hammer_lock_ex(&ip->lock);
252                         if (ip->vp != NULL) {
253                                 hammer_unlock(&ip->lock);
254                                 vp->v_type = VBAD;
255                                 vx_put(vp);
256                                 continue;
257                         }
258                         hammer_ref(&ip->lock);
259                         vp = *vpp;
260                         ip->vp = vp;
261
262                         obj_type = ip->ino_data.obj_type;
263                         vp->v_type = hammer_get_vnode_type(obj_type);
264
265                         hammer_inode_wakereclaims(ip, 0);
266
267                         switch(ip->ino_data.obj_type) {
268                         case HAMMER_OBJTYPE_CDEV:
269                         case HAMMER_OBJTYPE_BDEV:
270                                 vp->v_ops = &hmp->mp->mnt_vn_spec_ops;
271                                 addaliasu(vp, ip->ino_data.rmajor,
272                                           ip->ino_data.rminor);
273                                 break;
274                         case HAMMER_OBJTYPE_FIFO:
275                                 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops;
276                                 break;
277                         default:
278                                 break;
279                         }
280
281                         /*
282                          * Only mark as the root vnode if the ip is not
283                          * historical, otherwise the VFS cache will get
284                          * confused.  The other half of the special handling
285                          * is in hammer_vop_nlookupdotdot().
286                          *
287                          * Pseudo-filesystem roots can be accessed via
288                          * non-root filesystem paths and setting VROOT may
289                          * confuse the namecache.  Set VPFSROOT instead.
290                          */
291                         if (ip->obj_id == HAMMER_OBJID_ROOT &&
292                             ip->obj_asof == hmp->asof) {
293                                 if (ip->obj_localization == 0)
294                                         vp->v_flag |= VROOT;
295                                 else
296                                         vp->v_flag |= VPFSROOT;
297                         }
298
299                         vp->v_data = (void *)ip;
300                         /* vnode locked by getnewvnode() */
301                         /* make related vnode dirty if inode dirty? */
302                         hammer_unlock(&ip->lock);
303                         if (vp->v_type == VREG)
304                                 vinitvmio(vp, ip->ino_data.size);
305                         break;
306                 }
307
308                 /*
309                  * loop if the vget fails (aka races), or if the vp
310                  * no longer matches ip->vp.
311                  */
312                 if (vget(vp, LK_EXCLUSIVE) == 0) {
313                         if (vp == ip->vp)
314                                 break;
315                         vput(vp);
316                 }
317         }
318         *vpp = vp;
319         return(error);
320 }
321
322 /*
323  * Locate all copies of the inode for obj_id compatible with the specified
324  * asof, reference, and issue the related call-back.  This routine is used
325  * for direct-io invalidation and does not create any new inodes.
326  */
327 void
328 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo,
329                             int (*callback)(hammer_inode_t ip, void *data),
330                             void *data)
331 {
332         hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root,
333                                    hammer_inode_info_cmp_all_history,
334                                    callback, iinfo);
335 }
336
337 /*
338  * Acquire a HAMMER inode.  The returned inode is not locked.  These functions
339  * do not attach or detach the related vnode (use hammer_get_vnode() for
340  * that).
341  *
342  * The flags argument is only applied for newly created inodes, and only
343  * certain flags are inherited.
344  *
345  * Called from the frontend.
346  */
347 struct hammer_inode *
348 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip,
349                  int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
350                  int flags, int *errorp)
351 {
352         hammer_mount_t hmp = trans->hmp;
353         struct hammer_inode_info iinfo;
354         struct hammer_cursor cursor;
355         struct hammer_inode *ip;
356
357
358         /*
359          * Determine if we already have an inode cached.  If we do then
360          * we are golden.
361          *
362          * If we find an inode with no vnode we have to mark the
363          * transaction such that hammer_inode_waitreclaims() is
364          * called later on to avoid building up an infinite number
365          * of inodes.  Otherwise we can continue to * add new inodes
366          * faster then they can be disposed of, even with the tsleep
367          * delay.
368          */
369         iinfo.obj_id = obj_id;
370         iinfo.obj_asof = asof;
371         iinfo.obj_localization = localization;
372 loop:
373         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
374         if (ip) {
375 #if 0
376                 if (ip->vp == NULL)
377                         trans->flags |= HAMMER_TRANSF_NEWINODE;
378 #endif
379                 hammer_ref(&ip->lock);
380                 *errorp = 0;
381                 return(ip);
382         }
383
384         /*
385          * Allocate a new inode structure and deal with races later.
386          */
387         ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
388         ++hammer_count_inodes;
389         ++hmp->count_inodes;
390         ip->obj_id = obj_id;
391         ip->obj_asof = iinfo.obj_asof;
392         ip->obj_localization = localization;
393         ip->hmp = hmp;
394         ip->flags = flags & HAMMER_INODE_RO;
395         ip->cache[0].ip = ip;
396         ip->cache[1].ip = ip;
397         if (hmp->ronly)
398                 ip->flags |= HAMMER_INODE_RO;
399         ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
400                 0x7FFFFFFFFFFFFFFFLL;
401         RB_INIT(&ip->rec_tree);
402         TAILQ_INIT(&ip->target_list);
403         hammer_ref(&ip->lock);
404
405         /*
406          * Locate the on-disk inode.  If this is a PFS root we always
407          * access the current version of the root inode and (if it is not
408          * a master) always access information under it with a snapshot
409          * TID.
410          */
411 retry:
412         hammer_init_cursor(trans, &cursor, (dip ? &dip->cache[0] : NULL), NULL);
413         cursor.key_beg.localization = localization + HAMMER_LOCALIZE_INODE;
414         cursor.key_beg.obj_id = ip->obj_id;
415         cursor.key_beg.key = 0;
416         cursor.key_beg.create_tid = 0;
417         cursor.key_beg.delete_tid = 0;
418         cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
419         cursor.key_beg.obj_type = 0;
420
421         cursor.asof = iinfo.obj_asof;
422         cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
423                        HAMMER_CURSOR_ASOF;
424
425         *errorp = hammer_btree_lookup(&cursor);
426         if (*errorp == EDEADLK) {
427                 hammer_done_cursor(&cursor);
428                 goto retry;
429         }
430
431         /*
432          * On success the B-Tree lookup will hold the appropriate
433          * buffer cache buffers and provide a pointer to the requested
434          * information.  Copy the information to the in-memory inode
435          * and cache the B-Tree node to improve future operations.
436          */
437         if (*errorp == 0) {
438                 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
439                 ip->ino_data = cursor.data->inode;
440
441                 /*
442                  * cache[0] tries to cache the location of the object inode.
443                  * The assumption is that it is near the directory inode.
444                  *
445                  * cache[1] tries to cache the location of the object data.
446                  * The assumption is that it is near the directory data.
447                  */
448                 hammer_cache_node(&ip->cache[0], cursor.node);
449                 if (dip && dip->cache[1].node)
450                         hammer_cache_node(&ip->cache[1], dip->cache[1].node);
451
452                 /*
453                  * The file should not contain any data past the file size
454                  * stored in the inode.  Setting save_trunc_off to the
455                  * file size instead of max reduces B-Tree lookup overheads
456                  * on append by allowing the flusher to avoid checking for
457                  * record overwrites.
458                  */
459                 ip->save_trunc_off = ip->ino_data.size;
460
461                 /*
462                  * Locate and assign the pseudofs management structure to
463                  * the inode.
464                  */
465                 if (dip && dip->obj_localization == ip->obj_localization) {
466                         ip->pfsm = dip->pfsm;
467                         hammer_ref(&ip->pfsm->lock);
468                 } else {
469                         ip->pfsm = hammer_load_pseudofs(trans,
470                                                         ip->obj_localization,
471                                                         errorp);
472                         *errorp = 0;    /* ignore ENOENT */
473                 }
474         }
475
476         /*
477          * The inode is placed on the red-black tree and will be synced to
478          * the media when flushed or by the filesystem sync.  If this races
479          * another instantiation/lookup the insertion will fail.
480          */
481         if (*errorp == 0) {
482                 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
483                         hammer_free_inode(ip);
484                         hammer_done_cursor(&cursor);
485                         goto loop;
486                 }
487                 ip->flags |= HAMMER_INODE_ONDISK;
488         } else {
489                 if (ip->flags & HAMMER_INODE_RSV_INODES) {
490                         ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
491                         --hmp->rsv_inodes;
492                 }
493
494                 hammer_free_inode(ip);
495                 ip = NULL;
496         }
497         hammer_done_cursor(&cursor);
498         trans->flags |= HAMMER_TRANSF_NEWINODE;
499         return (ip);
500 }
501
502 /*
503  * Create a new filesystem object, returning the inode in *ipp.  The
504  * returned inode will be referenced.  The inode is created in-memory.
505  *
506  * If pfsm is non-NULL the caller wishes to create the root inode for
507  * a master PFS.
508  */
509 int
510 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
511                     struct ucred *cred, hammer_inode_t dip,
512                     hammer_pseudofs_inmem_t pfsm, struct hammer_inode **ipp)
513 {
514         hammer_mount_t hmp;
515         hammer_inode_t ip;
516         uid_t xuid;
517         int error;
518
519         hmp = trans->hmp;
520
521         ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
522         ++hammer_count_inodes;
523         ++hmp->count_inodes;
524         trans->flags |= HAMMER_TRANSF_NEWINODE;
525
526         if (pfsm) {
527                 KKASSERT(pfsm->localization != 0);
528                 ip->obj_id = HAMMER_OBJID_ROOT;
529                 ip->obj_localization = pfsm->localization;
530         } else {
531                 KKASSERT(dip != NULL);
532                 ip->obj_id = hammer_alloc_objid(hmp, dip);
533                 ip->obj_localization = dip->obj_localization;
534         }
535
536         KKASSERT(ip->obj_id != 0);
537         ip->obj_asof = hmp->asof;
538         ip->hmp = hmp;
539         ip->flush_state = HAMMER_FST_IDLE;
540         ip->flags = HAMMER_INODE_DDIRTY |
541                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME;
542         ip->cache[0].ip = ip;
543         ip->cache[1].ip = ip;
544
545         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
546         /* ip->save_trunc_off = 0; (already zero) */
547         RB_INIT(&ip->rec_tree);
548         TAILQ_INIT(&ip->target_list);
549
550         ip->ino_data.atime = trans->time;
551         ip->ino_data.mtime = trans->time;
552         ip->ino_data.size = 0;
553         ip->ino_data.nlinks = 0;
554
555         /*
556          * A nohistory designator on the parent directory is inherited by
557          * the child.  We will do this even for pseudo-fs creation... the
558          * sysad can turn it off.
559          */
560         if (dip) {
561                 ip->ino_data.uflags = dip->ino_data.uflags &
562                                       (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
563         }
564
565         ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
566         ip->ino_leaf.base.localization = ip->obj_localization +
567                                          HAMMER_LOCALIZE_INODE;
568         ip->ino_leaf.base.obj_id = ip->obj_id;
569         ip->ino_leaf.base.key = 0;
570         ip->ino_leaf.base.create_tid = 0;
571         ip->ino_leaf.base.delete_tid = 0;
572         ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
573         ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
574
575         ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
576         ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
577         ip->ino_data.mode = vap->va_mode;
578         ip->ino_data.ctime = trans->time;
579
580         /*
581          * If we are running version 2 or greater we use dirhash algorithm #1
582          * which is semi-sorted.  Algorithm #0 was just a pure crc.
583          */
584         if (trans->hmp->version >= HAMMER_VOL_VERSION_TWO) {
585                 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
586                         ip->ino_data.cap_flags |= HAMMER_INODE_CAP_DIRHASH_ALG1;
587                 }
588         }
589
590         /*
591          * Setup the ".." pointer.  This only needs to be done for directories
592          * but we do it for all objects as a recovery aid.
593          */
594         if (dip)
595                 ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id;
596 #if 0
597         /*
598          * The parent_obj_localization field only applies to pseudo-fs roots.
599          * XXX this is no longer applicable, PFSs are no longer directly
600          * tied into the parent's directory structure.
601          */
602         if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY &&
603             ip->obj_id == HAMMER_OBJID_ROOT) {
604                 ip->ino_data.ext.obj.parent_obj_localization = 
605                                                 dip->obj_localization;
606         }
607 #endif
608
609         switch(ip->ino_leaf.base.obj_type) {
610         case HAMMER_OBJTYPE_CDEV:
611         case HAMMER_OBJTYPE_BDEV:
612                 ip->ino_data.rmajor = vap->va_rmajor;
613                 ip->ino_data.rminor = vap->va_rminor;
614                 break;
615         default:
616                 break;
617         }
618
619         /*
620          * Calculate default uid/gid and overwrite with information from
621          * the vap.
622          */
623         if (dip) {
624                 xuid = hammer_to_unix_xid(&dip->ino_data.uid);
625                 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode,
626                                              xuid, cred, &vap->va_mode);
627         } else {
628                 xuid = 0;
629         }
630         ip->ino_data.mode = vap->va_mode;
631
632         if (vap->va_vaflags & VA_UID_UUID_VALID)
633                 ip->ino_data.uid = vap->va_uid_uuid;
634         else if (vap->va_uid != (uid_t)VNOVAL)
635                 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
636         else
637                 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
638
639         if (vap->va_vaflags & VA_GID_UUID_VALID)
640                 ip->ino_data.gid = vap->va_gid_uuid;
641         else if (vap->va_gid != (gid_t)VNOVAL)
642                 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
643         else if (dip)
644                 ip->ino_data.gid = dip->ino_data.gid;
645
646         hammer_ref(&ip->lock);
647
648         if (pfsm) {
649                 ip->pfsm = pfsm;
650                 hammer_ref(&pfsm->lock);
651                 error = 0;
652         } else if (dip->obj_localization == ip->obj_localization) {
653                 ip->pfsm = dip->pfsm;
654                 hammer_ref(&ip->pfsm->lock);
655                 error = 0;
656         } else {
657                 ip->pfsm = hammer_load_pseudofs(trans,
658                                                 ip->obj_localization,
659                                                 &error);
660                 error = 0;      /* ignore ENOENT */
661         }
662
663         if (error) {
664                 hammer_free_inode(ip);
665                 ip = NULL;
666         } else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
667                 panic("hammer_create_inode: duplicate obj_id %llx", ip->obj_id);
668                 /* not reached */
669                 hammer_free_inode(ip);
670         }
671         *ipp = ip;
672         return(error);
673 }
674
675 /*
676  * Final cleanup / freeing of an inode structure
677  */
678 static void
679 hammer_free_inode(hammer_inode_t ip)
680 {
681         struct hammer_mount *hmp;
682
683         hmp = ip->hmp;
684         KKASSERT(ip->lock.refs == 1);
685         hammer_uncache_node(&ip->cache[0]);
686         hammer_uncache_node(&ip->cache[1]);
687         hammer_inode_wakereclaims(ip, 1);
688         if (ip->objid_cache)
689                 hammer_clear_objid(ip);
690         --hammer_count_inodes;
691         --hmp->count_inodes;
692         if (ip->pfsm) {
693                 hammer_rel_pseudofs(hmp, ip->pfsm);
694                 ip->pfsm = NULL;
695         }
696         kfree(ip, hmp->m_inodes);
697         ip = NULL;
698 }
699
700 /*
701  * Retrieve pseudo-fs data.  NULL will never be returned.
702  *
703  * If an error occurs *errorp will be set and a default template is returned,
704  * otherwise *errorp is set to 0.  Typically when an error occurs it will
705  * be ENOENT.
706  */
707 hammer_pseudofs_inmem_t
708 hammer_load_pseudofs(hammer_transaction_t trans,
709                      u_int32_t localization, int *errorp)
710 {
711         hammer_mount_t hmp = trans->hmp;
712         hammer_inode_t ip;
713         hammer_pseudofs_inmem_t pfsm;
714         struct hammer_cursor cursor;
715         int bytes;
716
717 retry:
718         pfsm = RB_LOOKUP(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, localization);
719         if (pfsm) {
720                 hammer_ref(&pfsm->lock);
721                 *errorp = 0;
722                 return(pfsm);
723         }
724
725         /*
726          * PFS records are stored in the root inode (not the PFS root inode,
727          * but the real root).  Avoid an infinite recursion if loading
728          * the PFS for the real root.
729          */
730         if (localization) {
731                 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT,
732                                       HAMMER_MAX_TID,
733                                       HAMMER_DEF_LOCALIZATION, 0, errorp);
734         } else {
735                 ip = NULL;
736         }
737
738         pfsm = kmalloc(sizeof(*pfsm), hmp->m_misc, M_WAITOK | M_ZERO);
739         pfsm->localization = localization;
740         pfsm->pfsd.unique_uuid = trans->rootvol->ondisk->vol_fsid;
741         pfsm->pfsd.shared_uuid = pfsm->pfsd.unique_uuid;
742
743         hammer_init_cursor(trans, &cursor, (ip ? &ip->cache[1] : NULL), ip);
744         cursor.key_beg.localization = HAMMER_DEF_LOCALIZATION +
745                                       HAMMER_LOCALIZE_MISC;
746         cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
747         cursor.key_beg.create_tid = 0;
748         cursor.key_beg.delete_tid = 0;
749         cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
750         cursor.key_beg.obj_type = 0;
751         cursor.key_beg.key = localization;
752         cursor.asof = HAMMER_MAX_TID;
753         cursor.flags |= HAMMER_CURSOR_ASOF;
754
755         if (ip)
756                 *errorp = hammer_ip_lookup(&cursor);
757         else
758                 *errorp = hammer_btree_lookup(&cursor);
759         if (*errorp == 0) {
760                 *errorp = hammer_ip_resolve_data(&cursor);
761                 if (*errorp == 0) {
762                         if (cursor.data->pfsd.mirror_flags &
763                             HAMMER_PFSD_DELETED) {
764                                 *errorp = ENOENT;
765                         } else {
766                                 bytes = cursor.leaf->data_len;
767                                 if (bytes > sizeof(pfsm->pfsd))
768                                         bytes = sizeof(pfsm->pfsd);
769                                 bcopy(cursor.data, &pfsm->pfsd, bytes);
770                         }
771                 }
772         }
773         hammer_done_cursor(&cursor);
774
775         pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
776         hammer_ref(&pfsm->lock);
777         if (ip)
778                 hammer_rel_inode(ip, 0);
779         if (RB_INSERT(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm)) {
780                 kfree(pfsm, hmp->m_misc);
781                 goto retry;
782         }
783         return(pfsm);
784 }
785
786 /*
787  * Store pseudo-fs data.  The backend will automatically delete any prior
788  * on-disk pseudo-fs data but we have to delete in-memory versions.
789  */
790 int
791 hammer_save_pseudofs(hammer_transaction_t trans, hammer_pseudofs_inmem_t pfsm)
792 {
793         struct hammer_cursor cursor;
794         hammer_record_t record;
795         hammer_inode_t ip;
796         int error;
797
798         ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
799                               HAMMER_DEF_LOCALIZATION, 0, &error);
800 retry:
801         pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
802         hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
803         cursor.key_beg.localization = ip->obj_localization +
804                                       HAMMER_LOCALIZE_MISC;
805         cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
806         cursor.key_beg.create_tid = 0;
807         cursor.key_beg.delete_tid = 0;
808         cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
809         cursor.key_beg.obj_type = 0;
810         cursor.key_beg.key = pfsm->localization;
811         cursor.asof = HAMMER_MAX_TID;
812         cursor.flags |= HAMMER_CURSOR_ASOF;
813
814         error = hammer_ip_lookup(&cursor);
815         if (error == 0 && hammer_cursor_inmem(&cursor)) {
816                 record = cursor.iprec;
817                 if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
818                         KKASSERT(cursor.deadlk_rec == NULL);
819                         hammer_ref(&record->lock);
820                         cursor.deadlk_rec = record;
821                         error = EDEADLK;
822                 } else {
823                         record->flags |= HAMMER_RECF_DELETED_FE;
824                         error = 0;
825                 }
826         }
827         if (error == 0 || error == ENOENT) {
828                 record = hammer_alloc_mem_record(ip, sizeof(pfsm->pfsd));
829                 record->type = HAMMER_MEM_RECORD_GENERAL;
830
831                 record->leaf.base.localization = ip->obj_localization +
832                                                  HAMMER_LOCALIZE_MISC;
833                 record->leaf.base.rec_type = HAMMER_RECTYPE_PFS;
834                 record->leaf.base.key = pfsm->localization;
835                 record->leaf.data_len = sizeof(pfsm->pfsd);
836                 bcopy(&pfsm->pfsd, record->data, sizeof(pfsm->pfsd));
837                 error = hammer_ip_add_record(trans, record);
838         }
839         hammer_done_cursor(&cursor);
840         if (error == EDEADLK)
841                 goto retry;
842         hammer_rel_inode(ip, 0);
843         return(error);
844 }
845
846 /*
847  * Create a root directory for a PFS if one does not alredy exist.
848  *
849  * The PFS root stands alone so we must also bump the nlinks count
850  * to prevent it from being destroyed on release.
851  */
852 int
853 hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred,
854                        hammer_pseudofs_inmem_t pfsm)
855 {
856         hammer_inode_t ip;
857         struct vattr vap;
858         int error;
859
860         ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
861                               pfsm->localization, 0, &error);
862         if (ip == NULL) {
863                 vattr_null(&vap);
864                 vap.va_mode = 0755;
865                 vap.va_type = VDIR;
866                 error = hammer_create_inode(trans, &vap, cred, NULL, pfsm, &ip);
867                 if (error == 0) {
868                         ++ip->ino_data.nlinks;
869                         hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
870                 }
871         }
872         if (ip)
873                 hammer_rel_inode(ip, 0);
874         return(error);
875 }
876
877 /*
878  * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY
879  * if we are unable to disassociate all the inodes.
880  */
881 static
882 int
883 hammer_unload_pseudofs_callback(hammer_inode_t ip, void *data)
884 {
885         int res;
886
887         hammer_ref(&ip->lock);
888         if (ip->lock.refs == 2 && ip->vp)
889                 vclean_unlocked(ip->vp);
890         if (ip->lock.refs == 1 && ip->vp == NULL)
891                 res = 0;
892         else
893                 res = -1;       /* stop, someone is using the inode */
894         hammer_rel_inode(ip, 0);
895         return(res);
896 }
897
898 int
899 hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization)
900 {
901         int res;
902         int try;
903
904         for (try = res = 0; try < 4; ++try) {
905                 res = hammer_ino_rb_tree_RB_SCAN(&trans->hmp->rb_inos_root,
906                                            hammer_inode_pfs_cmp,
907                                            hammer_unload_pseudofs_callback,
908                                            &localization);
909                 if (res == 0 && try > 1)
910                         break;
911                 hammer_flusher_sync(trans->hmp);
912         }
913         if (res != 0)
914                 res = ENOTEMPTY;
915         return(res);
916 }
917
918
919 /*
920  * Release a reference on a PFS
921  */
922 void
923 hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm)
924 {
925         hammer_unref(&pfsm->lock);
926         if (pfsm->lock.refs == 0) {
927                 RB_REMOVE(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm);
928                 kfree(pfsm, hmp->m_misc);
929         }
930 }
931
932 /*
933  * Called by hammer_sync_inode().
934  */
935 static int
936 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
937 {
938         hammer_transaction_t trans = cursor->trans;
939         hammer_record_t record;
940         int error;
941         int redirty;
942
943 retry:
944         error = 0;
945
946         /*
947          * If the inode has a presence on-disk then locate it and mark
948          * it deleted, setting DELONDISK.
949          *
950          * The record may or may not be physically deleted, depending on
951          * the retention policy.
952          */
953         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
954             HAMMER_INODE_ONDISK) {
955                 hammer_normalize_cursor(cursor);
956                 cursor->key_beg.localization = ip->obj_localization + 
957                                                HAMMER_LOCALIZE_INODE;
958                 cursor->key_beg.obj_id = ip->obj_id;
959                 cursor->key_beg.key = 0;
960                 cursor->key_beg.create_tid = 0;
961                 cursor->key_beg.delete_tid = 0;
962                 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
963                 cursor->key_beg.obj_type = 0;
964                 cursor->asof = ip->obj_asof;
965                 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
966                 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
967                 cursor->flags |= HAMMER_CURSOR_BACKEND;
968
969                 error = hammer_btree_lookup(cursor);
970                 if (hammer_debug_inode)
971                         kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
972
973                 if (error == 0) {
974                         error = hammer_ip_delete_record(cursor, ip, trans->tid);
975                         if (hammer_debug_inode)
976                                 kprintf(" error %d\n", error);
977                         if (error == 0) {
978                                 ip->flags |= HAMMER_INODE_DELONDISK;
979                         }
980                         if (cursor->node)
981                                 hammer_cache_node(&ip->cache[0], cursor->node);
982                 }
983                 if (error == EDEADLK) {
984                         hammer_done_cursor(cursor);
985                         error = hammer_init_cursor(trans, cursor,
986                                                    &ip->cache[0], ip);
987                         if (hammer_debug_inode)
988                                 kprintf("IPDED %p %d\n", ip, error);
989                         if (error == 0)
990                                 goto retry;
991                 }
992         }
993
994         /*
995          * Ok, write out the initial record or a new record (after deleting
996          * the old one), unless the DELETED flag is set.  This routine will
997          * clear DELONDISK if it writes out a record.
998          *
999          * Update our inode statistics if this is the first application of
1000          * the inode on-disk.
1001          */
1002         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
1003                 /*
1004                  * Generate a record and write it to the media.  We clean-up
1005                  * the state before releasing so we do not have to set-up
1006                  * a flush_group.
1007                  */
1008                 record = hammer_alloc_mem_record(ip, 0);
1009                 record->type = HAMMER_MEM_RECORD_INODE;
1010                 record->flush_state = HAMMER_FST_FLUSH;
1011                 record->leaf = ip->sync_ino_leaf;
1012                 record->leaf.base.create_tid = trans->tid;
1013                 record->leaf.data_len = sizeof(ip->sync_ino_data);
1014                 record->leaf.create_ts = trans->time32;
1015                 record->data = (void *)&ip->sync_ino_data;
1016                 record->flags |= HAMMER_RECF_INTERLOCK_BE;
1017
1018                 /*
1019                  * If this flag is set we cannot sync the new file size
1020                  * because we haven't finished related truncations.  The
1021                  * inode will be flushed in another flush group to finish
1022                  * the job.
1023                  */
1024                 if ((ip->flags & HAMMER_INODE_WOULDBLOCK) &&
1025                     ip->sync_ino_data.size != ip->ino_data.size) {
1026                         redirty = 1;
1027                         ip->sync_ino_data.size = ip->ino_data.size;
1028                 } else {
1029                         redirty = 0;
1030                 }
1031
1032                 for (;;) {
1033                         error = hammer_ip_sync_record_cursor(cursor, record);
1034                         if (hammer_debug_inode)
1035                                 kprintf("GENREC %p rec %08x %d\n",      
1036                                         ip, record->flags, error);
1037                         if (error != EDEADLK)
1038                                 break;
1039                         hammer_done_cursor(cursor);
1040                         error = hammer_init_cursor(trans, cursor,
1041                                                    &ip->cache[0], ip);
1042                         if (hammer_debug_inode)
1043                                 kprintf("GENREC reinit %d\n", error);
1044                         if (error)
1045                                 break;
1046                 }
1047
1048                 /*
1049                  * The record isn't managed by the inode's record tree,
1050                  * destroy it whether we succeed or fail.
1051                  */
1052                 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
1053                 record->flags |= HAMMER_RECF_DELETED_FE | HAMMER_RECF_COMMITTED;
1054                 record->flush_state = HAMMER_FST_IDLE;
1055                 hammer_rel_mem_record(record);
1056
1057                 /*
1058                  * Finish up.
1059                  */
1060                 if (error == 0) {
1061                         if (hammer_debug_inode)
1062                                 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
1063                         ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1064                                             HAMMER_INODE_ATIME |
1065                                             HAMMER_INODE_MTIME);
1066                         ip->flags &= ~HAMMER_INODE_DELONDISK;
1067                         if (redirty)
1068                                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1069
1070                         /*
1071                          * Root volume count of inodes
1072                          */
1073                         hammer_sync_lock_sh(trans);
1074                         if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
1075                                 hammer_modify_volume_field(trans,
1076                                                            trans->rootvol,
1077                                                            vol0_stat_inodes);
1078                                 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1079                                 hammer_modify_volume_done(trans->rootvol);
1080                                 ip->flags |= HAMMER_INODE_ONDISK;
1081                                 if (hammer_debug_inode)
1082                                         kprintf("NOWONDISK %p\n", ip);
1083                         }
1084                         hammer_sync_unlock(trans);
1085                 }
1086         }
1087
1088         /*
1089          * If the inode has been destroyed, clean out any left-over flags
1090          * that may have been set by the frontend.
1091          */
1092         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) { 
1093                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1094                                     HAMMER_INODE_ATIME |
1095                                     HAMMER_INODE_MTIME);
1096         }
1097         return(error);
1098 }
1099
1100 /*
1101  * Update only the itimes fields.
1102  *
1103  * ATIME can be updated without generating any UNDO.  MTIME is updated
1104  * with UNDO so it is guaranteed to be synchronized properly in case of
1105  * a crash.
1106  *
1107  * Neither field is included in the B-Tree leaf element's CRC, which is how
1108  * we can get away with updating ATIME the way we do.
1109  */
1110 static int
1111 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
1112 {
1113         hammer_transaction_t trans = cursor->trans;
1114         int error;
1115
1116 retry:
1117         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) !=
1118             HAMMER_INODE_ONDISK) {
1119                 return(0);
1120         }
1121
1122         hammer_normalize_cursor(cursor);
1123         cursor->key_beg.localization = ip->obj_localization + 
1124                                        HAMMER_LOCALIZE_INODE;
1125         cursor->key_beg.obj_id = ip->obj_id;
1126         cursor->key_beg.key = 0;
1127         cursor->key_beg.create_tid = 0;
1128         cursor->key_beg.delete_tid = 0;
1129         cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1130         cursor->key_beg.obj_type = 0;
1131         cursor->asof = ip->obj_asof;
1132         cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1133         cursor->flags |= HAMMER_CURSOR_ASOF;
1134         cursor->flags |= HAMMER_CURSOR_GET_LEAF;
1135         cursor->flags |= HAMMER_CURSOR_GET_DATA;
1136         cursor->flags |= HAMMER_CURSOR_BACKEND;
1137
1138         error = hammer_btree_lookup(cursor);
1139         if (error == 0) {
1140                 hammer_cache_node(&ip->cache[0], cursor->node);
1141                 if (ip->sync_flags & HAMMER_INODE_MTIME) {
1142                         /*
1143                          * Updating MTIME requires an UNDO.  Just cover
1144                          * both atime and mtime.
1145                          */
1146                         hammer_sync_lock_sh(trans);
1147                         hammer_modify_buffer(trans, cursor->data_buffer,
1148                                      HAMMER_ITIMES_BASE(&cursor->data->inode),
1149                                      HAMMER_ITIMES_BYTES);
1150                         cursor->data->inode.atime = ip->sync_ino_data.atime;
1151                         cursor->data->inode.mtime = ip->sync_ino_data.mtime;
1152                         hammer_modify_buffer_done(cursor->data_buffer);
1153                         hammer_sync_unlock(trans);
1154                 } else if (ip->sync_flags & HAMMER_INODE_ATIME) {
1155                         /*
1156                          * Updating atime only can be done in-place with
1157                          * no UNDO.
1158                          */
1159                         hammer_sync_lock_sh(trans);
1160                         hammer_modify_buffer(trans, cursor->data_buffer,
1161                                              NULL, 0);
1162                         cursor->data->inode.atime = ip->sync_ino_data.atime;
1163                         hammer_modify_buffer_done(cursor->data_buffer);
1164                         hammer_sync_unlock(trans);
1165                 }
1166                 ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME);
1167         }
1168         if (error == EDEADLK) {
1169                 hammer_done_cursor(cursor);
1170                 error = hammer_init_cursor(trans, cursor,
1171                                            &ip->cache[0], ip);
1172                 if (error == 0)
1173                         goto retry;
1174         }
1175         return(error);
1176 }
1177
1178 /*
1179  * Release a reference on an inode, flush as requested.
1180  *
1181  * On the last reference we queue the inode to the flusher for its final
1182  * disposition.
1183  */
1184 void
1185 hammer_rel_inode(struct hammer_inode *ip, int flush)
1186 {
1187         /*hammer_mount_t hmp = ip->hmp;*/
1188
1189         /*
1190          * Handle disposition when dropping the last ref.
1191          */
1192         for (;;) {
1193                 if (ip->lock.refs == 1) {
1194                         /*
1195                          * Determine whether on-disk action is needed for
1196                          * the inode's final disposition.
1197                          */
1198                         KKASSERT(ip->vp == NULL);
1199                         hammer_inode_unloadable_check(ip, 0);
1200                         if (ip->flags & HAMMER_INODE_MODMASK) {
1201                                 hammer_flush_inode(ip, 0);
1202                         } else if (ip->lock.refs == 1) {
1203                                 hammer_unload_inode(ip);
1204                                 break;
1205                         }
1206                 } else {
1207                         if (flush)
1208                                 hammer_flush_inode(ip, 0);
1209
1210                         /*
1211                          * The inode still has multiple refs, try to drop
1212                          * one ref.
1213                          */
1214                         KKASSERT(ip->lock.refs >= 1);
1215                         if (ip->lock.refs > 1) {
1216                                 hammer_unref(&ip->lock);
1217                                 break;
1218                         }
1219                 }
1220         }
1221 }
1222
1223 /*
1224  * Unload and destroy the specified inode.  Must be called with one remaining
1225  * reference.  The reference is disposed of.
1226  *
1227  * The inode must be completely clean.
1228  */
1229 static int
1230 hammer_unload_inode(struct hammer_inode *ip)
1231 {
1232         hammer_mount_t hmp = ip->hmp;
1233
1234         KASSERT(ip->lock.refs == 1,
1235                 ("hammer_unload_inode: %d refs\n", ip->lock.refs));
1236         KKASSERT(ip->vp == NULL);
1237         KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
1238         KKASSERT(ip->cursor_ip_refs == 0);
1239         KKASSERT(ip->lock.lockcount == 0);
1240         KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
1241
1242         KKASSERT(RB_EMPTY(&ip->rec_tree));
1243         KKASSERT(TAILQ_EMPTY(&ip->target_list));
1244
1245         RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip);
1246
1247         hammer_free_inode(ip);
1248         return(0);
1249 }
1250
1251 /*
1252  * Called during unmounting if a critical error occured.  The in-memory
1253  * inode and all related structures are destroyed.
1254  *
1255  * If a critical error did not occur the unmount code calls the standard
1256  * release and asserts that the inode is gone.
1257  */
1258 int
1259 hammer_destroy_inode_callback(struct hammer_inode *ip, void *data __unused)
1260 {
1261         hammer_record_t rec;
1262
1263         /*
1264          * Get rid of the inodes in-memory records, regardless of their
1265          * state, and clear the mod-mask.
1266          */
1267         while ((rec = TAILQ_FIRST(&ip->target_list)) != NULL) {
1268                 TAILQ_REMOVE(&ip->target_list, rec, target_entry);
1269                 rec->target_ip = NULL;
1270                 if (rec->flush_state == HAMMER_FST_SETUP)
1271                         rec->flush_state = HAMMER_FST_IDLE;
1272         }
1273         while ((rec = RB_ROOT(&ip->rec_tree)) != NULL) {
1274                 if (rec->flush_state == HAMMER_FST_FLUSH)
1275                         --rec->flush_group->refs;
1276                 else
1277                         hammer_ref(&rec->lock);
1278                 KKASSERT(rec->lock.refs == 1);
1279                 rec->flush_state = HAMMER_FST_IDLE;
1280                 rec->flush_group = NULL;
1281                 rec->flags |= HAMMER_RECF_DELETED_FE;
1282                 rec->flags |= HAMMER_RECF_DELETED_BE;
1283                 hammer_rel_mem_record(rec);
1284         }
1285         ip->flags &= ~HAMMER_INODE_MODMASK;
1286         ip->sync_flags &= ~HAMMER_INODE_MODMASK;
1287         KKASSERT(ip->vp == NULL);
1288
1289         /*
1290          * Remove the inode from any flush group, force it idle.  FLUSH
1291          * and SETUP states have an inode ref.
1292          */
1293         switch(ip->flush_state) {
1294         case HAMMER_FST_FLUSH:
1295                 TAILQ_REMOVE(&ip->flush_group->flush_list, ip, flush_entry);
1296                 --ip->flush_group->refs;
1297                 ip->flush_group = NULL;
1298                 /* fall through */
1299         case HAMMER_FST_SETUP:
1300                 hammer_unref(&ip->lock);
1301                 ip->flush_state = HAMMER_FST_IDLE;
1302                 /* fall through */
1303         case HAMMER_FST_IDLE:
1304                 break;
1305         }
1306
1307         /*
1308          * There shouldn't be any associated vnode.  The unload needs at
1309          * least one ref, if we do have a vp steal its ip ref.
1310          */
1311         if (ip->vp) {
1312                 kprintf("hammer_destroy_inode_callback: Unexpected "
1313                         "vnode association ip %p vp %p\n", ip, ip->vp);
1314                 ip->vp->v_data = NULL;
1315                 ip->vp = NULL;
1316         } else {
1317                 hammer_ref(&ip->lock);
1318         }
1319         hammer_unload_inode(ip);
1320         return(0);
1321 }
1322
1323 /*
1324  * Called on mount -u when switching from RW to RO or vise-versa.  Adjust
1325  * the read-only flag for cached inodes.
1326  *
1327  * This routine is called from a RB_SCAN().
1328  */
1329 int
1330 hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
1331 {
1332         hammer_mount_t hmp = ip->hmp;
1333
1334         if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
1335                 ip->flags |= HAMMER_INODE_RO;
1336         else
1337                 ip->flags &= ~HAMMER_INODE_RO;
1338         return(0);
1339 }
1340
1341 /*
1342  * A transaction has modified an inode, requiring updates as specified by
1343  * the passed flags.
1344  *
1345  * HAMMER_INODE_DDIRTY: Inode data has been updated
1346  * HAMMER_INODE_XDIRTY: Dirty in-memory records
1347  * HAMMER_INODE_BUFS:   Dirty buffer cache buffers
1348  * HAMMER_INODE_DELETED: Inode record/data must be deleted
1349  * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
1350  */
1351 void
1352 hammer_modify_inode(hammer_inode_t ip, int flags)
1353 {
1354         /* 
1355          * ronly of 0 or 2 does not trigger assertion.
1356          * 2 is a special error state 
1357          */
1358         KKASSERT(ip->hmp->ronly != 1 ||
1359                   (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 
1360                             HAMMER_INODE_BUFS | HAMMER_INODE_DELETED |
1361                             HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0);
1362         if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
1363                 ip->flags |= HAMMER_INODE_RSV_INODES;
1364                 ++ip->hmp->rsv_inodes;
1365         }
1366
1367         ip->flags |= flags;
1368 }
1369
1370 /*
1371  * Request that an inode be flushed.  This whole mess cannot block and may
1372  * recurse (if not synchronous).  Once requested HAMMER will attempt to
1373  * actively flush the inode until the flush can be done.
1374  *
1375  * The inode may already be flushing, or may be in a setup state.  We can
1376  * place the inode in a flushing state if it is currently idle and flag it
1377  * to reflush if it is currently flushing.
1378  *
1379  * Upon return if the inode could not be flushed due to a setup
1380  * dependancy, then it will be automatically flushed when the dependancy
1381  * is satisfied.
1382  */
1383 void
1384 hammer_flush_inode(hammer_inode_t ip, int flags)
1385 {
1386         hammer_mount_t hmp;
1387         hammer_flush_group_t flg;
1388         int good;
1389
1390         /*
1391          * next_flush_group is the first flush group we can place the inode
1392          * in.  It may be NULL.  If it becomes full we append a new flush
1393          * group and make that the next_flush_group.
1394          */
1395         hmp = ip->hmp;
1396         while ((flg = hmp->next_flush_group) != NULL) {
1397                 KKASSERT(flg->running == 0);
1398                 if (flg->total_count + flg->refs <= ip->hmp->undo_rec_limit)
1399                         break;
1400                 hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
1401                 hammer_flusher_async(ip->hmp, flg);
1402         }
1403         if (flg == NULL) {
1404                 flg = kmalloc(sizeof(*flg), hmp->m_misc, M_WAITOK|M_ZERO);
1405                 hmp->next_flush_group = flg;
1406                 TAILQ_INIT(&flg->flush_list);
1407                 TAILQ_INSERT_TAIL(&hmp->flush_group_list, flg, flush_entry);
1408         }
1409
1410         /*
1411          * Trivial 'nothing to flush' case.  If the inode is in a SETUP
1412          * state we have to put it back into an IDLE state so we can
1413          * drop the extra ref.
1414          *
1415          * If we have a parent dependancy we must still fall through
1416          * so we can run it.
1417          */
1418         if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
1419                 if (ip->flush_state == HAMMER_FST_SETUP &&
1420                     TAILQ_EMPTY(&ip->target_list)) {
1421                         ip->flush_state = HAMMER_FST_IDLE;
1422                         hammer_rel_inode(ip, 0);
1423                 }
1424                 if (ip->flush_state == HAMMER_FST_IDLE)
1425                         return;
1426         }
1427
1428         /*
1429          * Our flush action will depend on the current state.
1430          */
1431         switch(ip->flush_state) {
1432         case HAMMER_FST_IDLE:
1433                 /*
1434                  * We have no dependancies and can flush immediately.  Some
1435                  * our children may not be flushable so we have to re-test
1436                  * with that additional knowledge.
1437                  */
1438                 hammer_flush_inode_core(ip, flg, flags);
1439                 break;
1440         case HAMMER_FST_SETUP:
1441                 /*
1442                  * Recurse upwards through dependancies via target_list
1443                  * and start their flusher actions going if possible.
1444                  *
1445                  * 'good' is our connectivity.  -1 means we have none and
1446                  * can't flush, 0 means there weren't any dependancies, and
1447                  * 1 means we have good connectivity.
1448                  */
1449                 good = hammer_setup_parent_inodes(ip, flg);
1450
1451                 if (good >= 0) {
1452                         /*
1453                          * We can continue if good >= 0.  Determine how 
1454                          * many records under our inode can be flushed (and
1455                          * mark them).
1456                          */
1457                         hammer_flush_inode_core(ip, flg, flags);
1458                 } else {
1459                         /*
1460                          * Parent has no connectivity, tell it to flush
1461                          * us as soon as it does.
1462                          *
1463                          * The REFLUSH flag is also needed to trigger
1464                          * dependancy wakeups.
1465                          */
1466                         ip->flags |= HAMMER_INODE_CONN_DOWN |
1467                                      HAMMER_INODE_REFLUSH;
1468                         if (flags & HAMMER_FLUSH_SIGNAL) {
1469                                 ip->flags |= HAMMER_INODE_RESIGNAL;
1470                                 hammer_flusher_async(ip->hmp, flg);
1471                         }
1472                 }
1473                 break;
1474         case HAMMER_FST_FLUSH:
1475                 /*
1476                  * We are already flushing, flag the inode to reflush
1477                  * if needed after it completes its current flush.
1478                  *
1479                  * The REFLUSH flag is also needed to trigger
1480                  * dependancy wakeups.
1481                  */
1482                 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
1483                         ip->flags |= HAMMER_INODE_REFLUSH;
1484                 if (flags & HAMMER_FLUSH_SIGNAL) {
1485                         ip->flags |= HAMMER_INODE_RESIGNAL;
1486                         hammer_flusher_async(ip->hmp, flg);
1487                 }
1488                 break;
1489         }
1490 }
1491
1492 /*
1493  * Scan ip->target_list, which is a list of records owned by PARENTS to our
1494  * ip which reference our ip.
1495  *
1496  * XXX This is a huge mess of recursive code, but not one bit of it blocks
1497  *     so for now do not ref/deref the structures.  Note that if we use the
1498  *     ref/rel code later, the rel CAN block.
1499  */
1500 static int
1501 hammer_setup_parent_inodes(hammer_inode_t ip, hammer_flush_group_t flg)
1502 {
1503         hammer_record_t depend;
1504         int good;
1505         int r;
1506
1507         good = 0;
1508         TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
1509                 r = hammer_setup_parent_inodes_helper(depend, flg);
1510                 KKASSERT(depend->target_ip == ip);
1511                 if (r < 0 && good == 0)
1512                         good = -1;
1513                 if (r > 0)
1514                         good = 1;
1515         }
1516         return(good);
1517 }
1518
1519 /*
1520  * This helper function takes a record representing the dependancy between
1521  * the parent inode and child inode.
1522  *
1523  * record->ip           = parent inode
1524  * record->target_ip    = child inode
1525  * 
1526  * We are asked to recurse upwards and convert the record from SETUP
1527  * to FLUSH if possible.
1528  *
1529  * Return 1 if the record gives us connectivity
1530  *
1531  * Return 0 if the record is not relevant 
1532  *
1533  * Return -1 if we can't resolve the dependancy and there is no connectivity.
1534  */
1535 static int
1536 hammer_setup_parent_inodes_helper(hammer_record_t record,
1537                                   hammer_flush_group_t flg)
1538 {
1539         hammer_mount_t hmp;
1540         hammer_inode_t pip;
1541         int good;
1542
1543         KKASSERT(record->flush_state != HAMMER_FST_IDLE);
1544         pip = record->ip;
1545         hmp = pip->hmp;
1546
1547         /*
1548          * If the record is already flushing, is it in our flush group?
1549          *
1550          * If it is in our flush group but it is a general record or a 
1551          * delete-on-disk, it does not improve our connectivity (return 0),
1552          * and if the target inode is not trying to destroy itself we can't
1553          * allow the operation yet anyway (the second return -1).
1554          */
1555         if (record->flush_state == HAMMER_FST_FLUSH) {
1556                 /*
1557                  * If not in our flush group ask the parent to reflush
1558                  * us as soon as possible.
1559                  */
1560                 if (record->flush_group != flg) {
1561                         pip->flags |= HAMMER_INODE_REFLUSH;
1562                         record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1563                         return(-1);
1564                 }
1565
1566                 /*
1567                  * If in our flush group everything is already set up,
1568                  * just return whether the record will improve our
1569                  * visibility or not.
1570                  */
1571                 if (record->type == HAMMER_MEM_RECORD_ADD)
1572                         return(1);
1573                 return(0);
1574         }
1575
1576         /*
1577          * It must be a setup record.  Try to resolve the setup dependancies
1578          * by recursing upwards so we can place ip on the flush list.
1579          */
1580         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1581
1582         good = hammer_setup_parent_inodes(pip, flg);
1583
1584         /*
1585          * If good < 0 the parent has no connectivity and we cannot safely
1586          * flush the directory entry, which also means we can't flush our
1587          * ip.  Flag the parent and us for downward recursion once the
1588          * parent's connectivity is resolved.
1589          */
1590         if (good < 0) {
1591                 /* pip->flags |= HAMMER_INODE_CONN_DOWN; set by recursion */
1592                 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1593                 return(good);
1594         }
1595
1596         /*
1597          * We are go, place the parent inode in a flushing state so we can
1598          * place its record in a flushing state.  Note that the parent
1599          * may already be flushing.  The record must be in the same flush
1600          * group as the parent.
1601          */
1602         if (pip->flush_state != HAMMER_FST_FLUSH)
1603                 hammer_flush_inode_core(pip, flg, HAMMER_FLUSH_RECURSION);
1604         KKASSERT(pip->flush_state == HAMMER_FST_FLUSH);
1605         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1606
1607 #if 0
1608         if (record->type == HAMMER_MEM_RECORD_DEL &&
1609             (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
1610                 /*
1611                  * Regardless of flushing state we cannot sync this path if the
1612                  * record represents a delete-on-disk but the target inode
1613                  * is not ready to sync its own deletion.
1614                  *
1615                  * XXX need to count effective nlinks to determine whether
1616                  * the flush is ok, otherwise removing a hardlink will
1617                  * just leave the DEL record to rot.
1618                  */
1619                 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
1620                 return(-1);
1621         } else
1622 #endif
1623         if (pip->flush_group == flg) {
1624                 /*
1625                  * Because we have not calculated nlinks yet we can just
1626                  * set records to the flush state if the parent is in
1627                  * the same flush group as we are.
1628                  */
1629                 record->flush_state = HAMMER_FST_FLUSH;
1630                 record->flush_group = flg;
1631                 ++record->flush_group->refs;
1632                 hammer_ref(&record->lock);
1633
1634                 /*
1635                  * A general directory-add contributes to our visibility.
1636                  *
1637                  * Otherwise it is probably a directory-delete or 
1638                  * delete-on-disk record and does not contribute to our
1639                  * visbility (but we can still flush it).
1640                  */
1641                 if (record->type == HAMMER_MEM_RECORD_ADD)
1642                         return(1);
1643                 return(0);
1644         } else {
1645                 /*
1646                  * If the parent is not in our flush group we cannot
1647                  * flush this record yet, there is no visibility.
1648                  * We tell the parent to reflush and mark ourselves
1649                  * so the parent knows it should flush us too.
1650                  */
1651                 pip->flags |= HAMMER_INODE_REFLUSH;
1652                 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1653                 return(-1);
1654         }
1655 }
1656
1657 /*
1658  * This is the core routine placing an inode into the FST_FLUSH state.
1659  */
1660 static void
1661 hammer_flush_inode_core(hammer_inode_t ip, hammer_flush_group_t flg, int flags)
1662 {
1663         int go_count;
1664
1665         /*
1666          * Set flush state and prevent the flusher from cycling into
1667          * the next flush group.  Do not place the ip on the list yet.
1668          * Inodes not in the idle state get an extra reference.
1669          */
1670         KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
1671         if (ip->flush_state == HAMMER_FST_IDLE)
1672                 hammer_ref(&ip->lock);
1673         ip->flush_state = HAMMER_FST_FLUSH;
1674         ip->flush_group = flg;
1675         ++ip->hmp->flusher.group_lock;
1676         ++ip->hmp->count_iqueued;
1677         ++hammer_count_iqueued;
1678         ++flg->total_count;
1679
1680         /*
1681          * If the flush group reaches the autoflush limit we want to signal
1682          * the flusher.  This is particularly important for remove()s.
1683          */
1684         if (flg->total_count == hammer_autoflush)
1685                 flags |= HAMMER_FLUSH_SIGNAL;
1686
1687         /*
1688          * We need to be able to vfsync/truncate from the backend.
1689          */
1690         KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
1691         if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
1692                 ip->flags |= HAMMER_INODE_VHELD;
1693                 vref(ip->vp);
1694         }
1695
1696         /*
1697          * Figure out how many in-memory records we can actually flush
1698          * (not including inode meta-data, buffers, etc).
1699          */
1700         KKASSERT((ip->flags & HAMMER_INODE_WOULDBLOCK) == 0);
1701         if (flags & HAMMER_FLUSH_RECURSION) {
1702                 /*
1703                  * If this is a upwards recursion we do not want to
1704                  * recurse down again!
1705                  */
1706                 go_count = 1;
1707 #if 0
1708         } else if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
1709                 /*
1710                  * No new records are added if we must complete a flush
1711                  * from a previous cycle, but we do have to move the records
1712                  * from the previous cycle to the current one.
1713                  */
1714 #if 0
1715                 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1716                                    hammer_syncgrp_child_callback, NULL);
1717 #endif
1718                 go_count = 1;
1719 #endif
1720         } else {
1721                 /*
1722                  * Normal flush, scan records and bring them into the flush.
1723                  * Directory adds and deletes are usually skipped (they are
1724                  * grouped with the related inode rather then with the
1725                  * directory).
1726                  *
1727                  * go_count can be negative, which means the scan aborted
1728                  * due to the flush group being over-full and we should
1729                  * flush what we have.
1730                  */
1731                 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1732                                    hammer_setup_child_callback, NULL);
1733         }
1734
1735         /*
1736          * This is a more involved test that includes go_count.  If we
1737          * can't flush, flag the inode and return.  If go_count is 0 we
1738          * were are unable to flush any records in our rec_tree and
1739          * must ignore the XDIRTY flag.
1740          */
1741         if (go_count == 0) {
1742                 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
1743                         --ip->hmp->count_iqueued;
1744                         --hammer_count_iqueued;
1745
1746                         --flg->total_count;
1747                         ip->flush_state = HAMMER_FST_SETUP;
1748                         ip->flush_group = NULL;
1749                         if (ip->flags & HAMMER_INODE_VHELD) {
1750                                 ip->flags &= ~HAMMER_INODE_VHELD;
1751                                 vrele(ip->vp);
1752                         }
1753
1754                         /*
1755                          * REFLUSH is needed to trigger dependancy wakeups
1756                          * when an inode is in SETUP.
1757                          */
1758                         ip->flags |= HAMMER_INODE_REFLUSH;
1759                         if (flags & HAMMER_FLUSH_SIGNAL) {
1760                                 ip->flags |= HAMMER_INODE_RESIGNAL;
1761                                 hammer_flusher_async(ip->hmp, flg);
1762                         }
1763                         if (--ip->hmp->flusher.group_lock == 0)
1764                                 wakeup(&ip->hmp->flusher.group_lock);
1765                         return;
1766                 }
1767         }
1768
1769         /*
1770          * Snapshot the state of the inode for the backend flusher.
1771          *
1772          * We continue to retain save_trunc_off even when all truncations
1773          * have been resolved as an optimization to determine if we can
1774          * skip the B-Tree lookup for overwrite deletions.
1775          *
1776          * NOTE: The DELETING flag is a mod flag, but it is also sticky,
1777          * and stays in ip->flags.  Once set, it stays set until the
1778          * inode is destroyed.
1779          */
1780         if (ip->flags & HAMMER_INODE_TRUNCATED) {
1781                 KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0);
1782                 ip->sync_trunc_off = ip->trunc_off;
1783                 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
1784                 ip->flags &= ~HAMMER_INODE_TRUNCATED;
1785                 ip->sync_flags |= HAMMER_INODE_TRUNCATED;
1786
1787                 /*
1788                  * The save_trunc_off used to cache whether the B-Tree
1789                  * holds any records past that point is not used until
1790                  * after the truncation has succeeded, so we can safely
1791                  * set it now.
1792                  */
1793                 if (ip->save_trunc_off > ip->sync_trunc_off)
1794                         ip->save_trunc_off = ip->sync_trunc_off;
1795         }
1796         ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK &
1797                            ~HAMMER_INODE_TRUNCATED);
1798         ip->sync_ino_leaf = ip->ino_leaf;
1799         ip->sync_ino_data = ip->ino_data;
1800         ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
1801 #ifdef DEBUG_TRUNCATE
1802         if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp)
1803                 kprintf("truncateS %016llx\n", ip->sync_trunc_off);
1804 #endif
1805
1806         /*
1807          * The flusher list inherits our inode and reference.
1808          */
1809         KKASSERT(flg->running == 0);
1810         TAILQ_INSERT_TAIL(&flg->flush_list, ip, flush_entry);
1811         if (--ip->hmp->flusher.group_lock == 0)
1812                 wakeup(&ip->hmp->flusher.group_lock);
1813
1814         if (flags & HAMMER_FLUSH_SIGNAL) {
1815                 hammer_flusher_async(ip->hmp, flg);
1816         }
1817 }
1818
1819 /*
1820  * Callback for scan of ip->rec_tree.  Try to include each record in our
1821  * flush.  ip->flush_group has been set but the inode has not yet been
1822  * moved into a flushing state.
1823  *
1824  * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
1825  * both inodes.
1826  *
1827  * We return 1 for any record placed or found in FST_FLUSH, which prevents
1828  * the caller from shortcutting the flush.
1829  */
1830 static int
1831 hammer_setup_child_callback(hammer_record_t rec, void *data)
1832 {
1833         hammer_flush_group_t flg;
1834         hammer_inode_t target_ip;
1835         hammer_inode_t ip;
1836         int r;
1837
1838         /*
1839          * Deleted records are ignored.  Note that the flush detects deleted
1840          * front-end records at multiple points to deal with races.  This is
1841          * just the first line of defense.  The only time DELETED_FE cannot
1842          * be set is when HAMMER_RECF_INTERLOCK_BE is set. 
1843          *
1844          * Don't get confused between record deletion and, say, directory
1845          * entry deletion.  The deletion of a directory entry that is on
1846          * the media has nothing to do with the record deletion flags.
1847          */
1848         if (rec->flags & (HAMMER_RECF_DELETED_FE|HAMMER_RECF_DELETED_BE)) {
1849                 if (rec->flush_state == HAMMER_FST_FLUSH) {
1850                         KKASSERT(rec->flush_group == rec->ip->flush_group);
1851                         r = 1;
1852                 } else {
1853                         r = 0;
1854                 }
1855                 return(r);
1856         }
1857
1858         /*
1859          * If the record is in an idle state it has no dependancies and
1860          * can be flushed.
1861          */
1862         ip = rec->ip;
1863         flg = ip->flush_group;
1864         r = 0;
1865
1866         switch(rec->flush_state) {
1867         case HAMMER_FST_IDLE:
1868                 /*
1869                  * The record has no setup dependancy, we can flush it.
1870                  */
1871                 KKASSERT(rec->target_ip == NULL);
1872                 rec->flush_state = HAMMER_FST_FLUSH;
1873                 rec->flush_group = flg;
1874                 ++flg->refs;
1875                 hammer_ref(&rec->lock);
1876                 r = 1;
1877                 break;
1878         case HAMMER_FST_SETUP:
1879                 /*
1880                  * The record has a setup dependancy.  These are typically
1881                  * directory entry adds and deletes.  Such entries will be
1882                  * flushed when their inodes are flushed so we do not
1883                  * usually have to add them to the flush here.  However,
1884                  * if the target_ip has set HAMMER_INODE_CONN_DOWN then
1885                  * it is asking us to flush this record (and it).
1886                  */
1887                 target_ip = rec->target_ip;
1888                 KKASSERT(target_ip != NULL);
1889                 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
1890
1891                 /*
1892                  * If the target IP is already flushing in our group
1893                  * we could associate the record, but target_ip has
1894                  * already synced ino_data to sync_ino_data and we
1895                  * would also have to adjust nlinks.   Plus there are
1896                  * ordering issues for adds and deletes.
1897                  *
1898                  * Reflush downward if this is an ADD, and upward if
1899                  * this is a DEL.
1900                  */
1901                 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
1902                         if (rec->flush_state == HAMMER_MEM_RECORD_ADD)
1903                                 ip->flags |= HAMMER_INODE_REFLUSH;
1904                         else
1905                                 target_ip->flags |= HAMMER_INODE_REFLUSH;
1906                         break;
1907                 } 
1908
1909                 /*
1910                  * Target IP is not yet flushing.  This can get complex
1911                  * because we have to be careful about the recursion.
1912                  *
1913                  * Directories create an issue for us in that if a flush
1914                  * of a directory is requested the expectation is to flush
1915                  * any pending directory entries, but this will cause the
1916                  * related inodes to recursively flush as well.  We can't
1917                  * really defer the operation so just get as many as we
1918                  * can and
1919                  */
1920 #if 0
1921                 if ((target_ip->flags & HAMMER_INODE_RECLAIM) == 0 &&
1922                     (target_ip->flags & HAMMER_INODE_CONN_DOWN) == 0) {
1923                         /*
1924                          * We aren't reclaiming and the target ip was not
1925                          * previously prevented from flushing due to this
1926                          * record dependancy.  Do not flush this record.
1927                          */
1928                         /*r = 0;*/
1929                 } else
1930 #endif
1931                 if (flg->total_count + flg->refs >
1932                            ip->hmp->undo_rec_limit) {
1933                         /*
1934                          * Our flush group is over-full and we risk blowing
1935                          * out the UNDO FIFO.  Stop the scan, flush what we
1936                          * have, then reflush the directory.
1937                          *
1938                          * The directory may be forced through multiple
1939                          * flush groups before it can be completely
1940                          * flushed.
1941                          */
1942                         ip->flags |= HAMMER_INODE_RESIGNAL |
1943                                      HAMMER_INODE_REFLUSH;
1944                         r = -1;
1945                 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
1946                         /*
1947                          * If the target IP is not flushing we can force
1948                          * it to flush, even if it is unable to write out
1949                          * any of its own records we have at least one in
1950                          * hand that we CAN deal with.
1951                          */
1952                         rec->flush_state = HAMMER_FST_FLUSH;
1953                         rec->flush_group = flg;
1954                         ++flg->refs;
1955                         hammer_ref(&rec->lock);
1956                         hammer_flush_inode_core(target_ip, flg,
1957                                                 HAMMER_FLUSH_RECURSION);
1958                         r = 1;
1959                 } else {
1960                         /*
1961                          * General or delete-on-disk record.
1962                          *
1963                          * XXX this needs help.  If a delete-on-disk we could
1964                          * disconnect the target.  If the target has its own
1965                          * dependancies they really need to be flushed.
1966                          *
1967                          * XXX
1968                          */
1969                         rec->flush_state = HAMMER_FST_FLUSH;
1970                         rec->flush_group = flg;
1971                         ++flg->refs;
1972                         hammer_ref(&rec->lock);
1973                         hammer_flush_inode_core(target_ip, flg,
1974                                                 HAMMER_FLUSH_RECURSION);
1975                         r = 1;
1976                 }
1977                 break;
1978         case HAMMER_FST_FLUSH:
1979                 /* 
1980                  * The flush_group should already match.
1981                  */
1982                 KKASSERT(rec->flush_group == flg);
1983                 r = 1;
1984                 break;
1985         }
1986         return(r);
1987 }
1988
1989 #if 0
1990 /*
1991  * This version just moves records already in a flush state to the new
1992  * flush group and that is it.
1993  */
1994 static int
1995 hammer_syncgrp_child_callback(hammer_record_t rec, void *data)
1996 {
1997         hammer_inode_t ip = rec->ip;
1998
1999         switch(rec->flush_state) {
2000         case HAMMER_FST_FLUSH:
2001                 KKASSERT(rec->flush_group == ip->flush_group);
2002                 break;
2003         default:
2004                 break;
2005         }
2006         return(0);
2007 }
2008 #endif
2009
2010 /*
2011  * Wait for a previously queued flush to complete.
2012  *
2013  * If a critical error occured we don't try to wait.
2014  */
2015 void
2016 hammer_wait_inode(hammer_inode_t ip)
2017 {
2018         hammer_flush_group_t flg;
2019
2020         flg = NULL;
2021         if ((ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) {
2022                 while (ip->flush_state != HAMMER_FST_IDLE &&
2023                        (ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) {
2024                         if (ip->flush_state == HAMMER_FST_SETUP)
2025                                 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2026                         if (ip->flush_state != HAMMER_FST_IDLE) {
2027                                 ip->flags |= HAMMER_INODE_FLUSHW;
2028                                 tsleep(&ip->flags, 0, "hmrwin", 0);
2029                         }
2030                 }
2031         }
2032 }
2033
2034 /*
2035  * Called by the backend code when a flush has been completed.
2036  * The inode has already been removed from the flush list.
2037  *
2038  * A pipelined flush can occur, in which case we must re-enter the
2039  * inode on the list and re-copy its fields.
2040  */
2041 void
2042 hammer_flush_inode_done(hammer_inode_t ip, int error)
2043 {
2044         hammer_mount_t hmp;
2045         int dorel;
2046
2047         KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
2048
2049         hmp = ip->hmp;
2050
2051         /*
2052          * Merge left-over flags back into the frontend and fix the state.
2053          * Incomplete truncations are retained by the backend.
2054          */
2055         ip->error = error;
2056         ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED;
2057         ip->sync_flags &= HAMMER_INODE_TRUNCATED;
2058
2059         /*
2060          * The backend may have adjusted nlinks, so if the adjusted nlinks
2061          * does not match the fronttend set the frontend's RDIRTY flag again.
2062          */
2063         if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
2064                 ip->flags |= HAMMER_INODE_DDIRTY;
2065
2066         /*
2067          * Fix up the dirty buffer status.
2068          */
2069         if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) {
2070                 ip->flags |= HAMMER_INODE_BUFS;
2071         }
2072
2073         /*
2074          * Re-set the XDIRTY flag if some of the inode's in-memory records
2075          * could not be flushed.
2076          */
2077         KKASSERT((RB_EMPTY(&ip->rec_tree) &&
2078                   (ip->flags & HAMMER_INODE_XDIRTY) == 0) ||
2079                  (!RB_EMPTY(&ip->rec_tree) &&
2080                   (ip->flags & HAMMER_INODE_XDIRTY) != 0));
2081
2082         /*
2083          * Do not lose track of inodes which no longer have vnode
2084          * assocations, otherwise they may never get flushed again.
2085          *
2086          * The reflush flag can be set superfluously, causing extra pain
2087          * for no reason.  If the inode is no longer modified it no longer
2088          * needs to be flushed.
2089          */
2090         if (ip->flags & HAMMER_INODE_MODMASK) {
2091                 if (ip->vp == NULL)
2092                         ip->flags |= HAMMER_INODE_REFLUSH;
2093         } else {
2094                 ip->flags &= ~HAMMER_INODE_REFLUSH;
2095         }
2096
2097         /*
2098          * Adjust the flush state.
2099          */
2100         if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
2101                 /*
2102                  * We were unable to flush out all our records, leave the
2103                  * inode in a flush state and in the current flush group.
2104                  * The flush group will be re-run.
2105                  *
2106                  * This occurs if the UNDO block gets too full or there is
2107                  * too much dirty meta-data and allows the flusher to
2108                  * finalize the UNDO block and then re-flush.
2109                  */
2110                 ip->flags &= ~HAMMER_INODE_WOULDBLOCK;
2111                 dorel = 0;
2112         } else {
2113                 /*
2114                  * Remove from the flush_group
2115                  */
2116                 TAILQ_REMOVE(&ip->flush_group->flush_list, ip, flush_entry);
2117                 ip->flush_group = NULL;
2118
2119                 /*
2120                  * Clean up the vnode ref and tracking counts.
2121                  */
2122                 if (ip->flags & HAMMER_INODE_VHELD) {
2123                         ip->flags &= ~HAMMER_INODE_VHELD;
2124                         vrele(ip->vp);
2125                 }
2126                 --hmp->count_iqueued;
2127                 --hammer_count_iqueued;
2128
2129                 /*
2130                  * And adjust the state.
2131                  */
2132                 if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
2133                         ip->flush_state = HAMMER_FST_IDLE;
2134                         dorel = 1;
2135                 } else {
2136                         ip->flush_state = HAMMER_FST_SETUP;
2137                         dorel = 0;
2138                 }
2139
2140                 /*
2141                  * If the frontend is waiting for a flush to complete,
2142                  * wake it up.
2143                  */
2144                 if (ip->flags & HAMMER_INODE_FLUSHW) {
2145                         ip->flags &= ~HAMMER_INODE_FLUSHW;
2146                         wakeup(&ip->flags);
2147                 }
2148
2149                 /*
2150                  * If the frontend made more changes and requested another
2151                  * flush, then try to get it running.
2152                  *
2153                  * Reflushes are aborted when the inode is errored out.
2154                  */
2155                 if (ip->flags & HAMMER_INODE_REFLUSH) {
2156                         ip->flags &= ~HAMMER_INODE_REFLUSH;
2157                         if (ip->flags & HAMMER_INODE_RESIGNAL) {
2158                                 ip->flags &= ~HAMMER_INODE_RESIGNAL;
2159                                 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2160                         } else {
2161                                 hammer_flush_inode(ip, 0);
2162                         }
2163                 }
2164         }
2165
2166         /*
2167          * If we have no parent dependancies we can clear CONN_DOWN
2168          */
2169         if (TAILQ_EMPTY(&ip->target_list))
2170                 ip->flags &= ~HAMMER_INODE_CONN_DOWN;
2171
2172         /*
2173          * If the inode is now clean drop the space reservation.
2174          */
2175         if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
2176             (ip->flags & HAMMER_INODE_RSV_INODES)) {
2177                 ip->flags &= ~HAMMER_INODE_RSV_INODES;
2178                 --hmp->rsv_inodes;
2179         }
2180
2181         if (dorel)
2182                 hammer_rel_inode(ip, 0);
2183 }
2184
2185 /*
2186  * Called from hammer_sync_inode() to synchronize in-memory records
2187  * to the media.
2188  */
2189 static int
2190 hammer_sync_record_callback(hammer_record_t record, void *data)
2191 {
2192         hammer_cursor_t cursor = data;
2193         hammer_transaction_t trans = cursor->trans;
2194         hammer_mount_t hmp = trans->hmp;
2195         int error;
2196
2197         /*
2198          * Skip records that do not belong to the current flush.
2199          */
2200         ++hammer_stats_record_iterations;
2201         if (record->flush_state != HAMMER_FST_FLUSH)
2202                 return(0);
2203
2204 #if 1
2205         if (record->flush_group != record->ip->flush_group) {
2206                 kprintf("sync_record %p ip %p bad flush group %p %p\n", record, record->ip, record->flush_group ,record->ip->flush_group);
2207                 Debugger("blah2");
2208                 return(0);
2209         }
2210 #endif
2211         KKASSERT(record->flush_group == record->ip->flush_group);
2212
2213         /*
2214          * Interlock the record using the BE flag.  Once BE is set the
2215          * frontend cannot change the state of FE.
2216          *
2217          * NOTE: If FE is set prior to us setting BE we still sync the
2218          * record out, but the flush completion code converts it to 
2219          * a delete-on-disk record instead of destroying it.
2220          */
2221         KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
2222         record->flags |= HAMMER_RECF_INTERLOCK_BE;
2223
2224         /*
2225          * The backend may have already disposed of the record.
2226          */
2227         if (record->flags & HAMMER_RECF_DELETED_BE) {
2228                 error = 0;
2229                 goto done;
2230         }
2231
2232         /*
2233          * If the whole inode is being deleting all on-disk records will
2234          * be deleted very soon, we can't sync any new records to disk
2235          * because they will be deleted in the same transaction they were
2236          * created in (delete_tid == create_tid), which will assert.
2237          *
2238          * XXX There may be a case with RECORD_ADD with DELETED_FE set
2239          * that we currently panic on.
2240          */
2241         if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
2242                 switch(record->type) {
2243                 case HAMMER_MEM_RECORD_DATA:
2244                         /*
2245                          * We don't have to do anything, if the record was
2246                          * committed the space will have been accounted for
2247                          * in the blockmap.
2248                          */
2249                         /* fall through */
2250                 case HAMMER_MEM_RECORD_GENERAL:
2251                         record->flags |= HAMMER_RECF_DELETED_FE;
2252                         record->flags |= HAMMER_RECF_DELETED_BE;
2253                         error = 0;
2254                         goto done;
2255                 case HAMMER_MEM_RECORD_ADD:
2256                         panic("hammer_sync_record_callback: illegal add "
2257                               "during inode deletion record %p", record);
2258                         break; /* NOT REACHED */
2259                 case HAMMER_MEM_RECORD_INODE:
2260                         panic("hammer_sync_record_callback: attempt to "
2261                               "sync inode record %p?", record);
2262                         break; /* NOT REACHED */
2263                 case HAMMER_MEM_RECORD_DEL:
2264                         /* 
2265                          * Follow through and issue the on-disk deletion
2266                          */
2267                         break;
2268                 }
2269         }
2270
2271         /*
2272          * If DELETED_FE is set special handling is needed for directory
2273          * entries.  Dependant pieces related to the directory entry may
2274          * have already been synced to disk.  If this occurs we have to
2275          * sync the directory entry and then change the in-memory record
2276          * from an ADD to a DELETE to cover the fact that it's been
2277          * deleted by the frontend.
2278          *
2279          * A directory delete covering record (MEM_RECORD_DEL) can never
2280          * be deleted by the frontend.
2281          *
2282          * Any other record type (aka DATA) can be deleted by the frontend.
2283          * XXX At the moment the flusher must skip it because there may
2284          * be another data record in the flush group for the same block,
2285          * meaning that some frontend data changes can leak into the backend's
2286          * synchronization point.
2287          */
2288         if (record->flags & HAMMER_RECF_DELETED_FE) {
2289                 if (record->type == HAMMER_MEM_RECORD_ADD) {
2290                         record->flags |= HAMMER_RECF_CONVERT_DELETE;
2291                 } else {
2292                         KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
2293                         record->flags |= HAMMER_RECF_DELETED_BE;
2294                         error = 0;
2295                         goto done;
2296                 }
2297         }
2298
2299         /*
2300          * Assign the create_tid for new records.  Deletions already
2301          * have the record's entire key properly set up.
2302          */
2303         if (record->type != HAMMER_MEM_RECORD_DEL)
2304                 record->leaf.base.create_tid = trans->tid;
2305                 record->leaf.create_ts = trans->time32;
2306         for (;;) {
2307                 error = hammer_ip_sync_record_cursor(cursor, record);
2308                 if (error != EDEADLK)
2309                         break;
2310                 hammer_done_cursor(cursor);
2311                 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
2312                                            record->ip);
2313                 if (error)
2314                         break;
2315         }
2316         record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
2317
2318         if (error)
2319                 error = -error;
2320 done:
2321         hammer_flush_record_done(record, error);
2322
2323         /*
2324          * Do partial finalization if we have built up too many dirty
2325          * buffers.  Otherwise a buffer cache deadlock can occur when
2326          * doing things like creating tens of thousands of tiny files.
2327          *
2328          * We must release our cursor lock to avoid a 3-way deadlock
2329          * due to the exclusive sync lock the finalizer must get.
2330          */
2331         if (hammer_flusher_meta_limit(hmp)) {
2332                 hammer_unlock_cursor(cursor, 0);
2333                 hammer_flusher_finalize(trans, 0);
2334                 hammer_lock_cursor(cursor, 0);
2335         }
2336
2337         return(error);
2338 }
2339
2340 /*
2341  * Backend function called by the flusher to sync an inode to media.
2342  */
2343 int
2344 hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip)
2345 {
2346         struct hammer_cursor cursor;
2347         hammer_node_t tmp_node;
2348         hammer_record_t depend;
2349         hammer_record_t next;
2350         int error, tmp_error;
2351         u_int64_t nlinks;
2352
2353         if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
2354                 return(0);
2355
2356         error = hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
2357         if (error)
2358                 goto done;
2359
2360         /*
2361          * Any directory records referencing this inode which are not in
2362          * our current flush group must adjust our nlink count for the
2363          * purposes of synchronization to disk.
2364          *
2365          * Records which are in our flush group can be unlinked from our
2366          * inode now, potentially allowing the inode to be physically
2367          * deleted.
2368          *
2369          * This cannot block.
2370          */
2371         nlinks = ip->ino_data.nlinks;
2372         next = TAILQ_FIRST(&ip->target_list);
2373         while ((depend = next) != NULL) {
2374                 next = TAILQ_NEXT(depend, target_entry);
2375                 if (depend->flush_state == HAMMER_FST_FLUSH &&
2376                     depend->flush_group == ip->flush_group) {
2377                         /*
2378                          * If this is an ADD that was deleted by the frontend
2379                          * the frontend nlinks count will have already been
2380                          * decremented, but the backend is going to sync its
2381                          * directory entry and must account for it.  The
2382                          * record will be converted to a delete-on-disk when
2383                          * it gets synced.
2384                          *
2385                          * If the ADD was not deleted by the frontend we
2386                          * can remove the dependancy from our target_list.
2387                          */
2388                         if (depend->flags & HAMMER_RECF_DELETED_FE) {
2389                                 ++nlinks;
2390                         } else {
2391                                 TAILQ_REMOVE(&ip->target_list, depend,
2392                                              target_entry);
2393                                 depend->target_ip = NULL;
2394                         }
2395                 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
2396                         /*
2397                          * Not part of our flush group
2398                          */
2399                         KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
2400                         switch(depend->type) {
2401                         case HAMMER_MEM_RECORD_ADD:
2402                                 --nlinks;
2403                                 break;
2404                         case HAMMER_MEM_RECORD_DEL:
2405                                 ++nlinks;
2406                                 break;
2407                         default:
2408                                 break;
2409                         }
2410                 }
2411         }
2412
2413         /*
2414          * Set dirty if we had to modify the link count.
2415          */
2416         if (ip->sync_ino_data.nlinks != nlinks) {
2417                 KKASSERT((int64_t)nlinks >= 0);
2418                 ip->sync_ino_data.nlinks = nlinks;
2419                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2420         }
2421
2422         /*
2423          * If there is a trunction queued destroy any data past the (aligned)
2424          * truncation point.  Userland will have dealt with the buffer
2425          * containing the truncation point for us.
2426          *
2427          * We don't flush pending frontend data buffers until after we've
2428          * dealt with the truncation.
2429          */
2430         if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2431                 /*
2432                  * Interlock trunc_off.  The VOP front-end may continue to
2433                  * make adjustments to it while we are blocked.
2434                  */
2435                 off_t trunc_off;
2436                 off_t aligned_trunc_off;
2437                 int blkmask;
2438
2439                 trunc_off = ip->sync_trunc_off;
2440                 blkmask = hammer_blocksize(trunc_off) - 1;
2441                 aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask;
2442
2443                 /*
2444                  * Delete any whole blocks on-media.  The front-end has
2445                  * already cleaned out any partial block and made it
2446                  * pending.  The front-end may have updated trunc_off
2447                  * while we were blocked so we only use sync_trunc_off.
2448                  *
2449                  * This operation can blow out the buffer cache, EWOULDBLOCK
2450                  * means we were unable to complete the deletion.  The
2451                  * deletion will update sync_trunc_off in that case.
2452                  */
2453                 error = hammer_ip_delete_range(&cursor, ip,
2454                                                 aligned_trunc_off,
2455                                                 0x7FFFFFFFFFFFFFFFLL, 2);
2456                 if (error == EWOULDBLOCK) {
2457                         ip->flags |= HAMMER_INODE_WOULDBLOCK;
2458                         error = 0;
2459                         goto defer_buffer_flush;
2460                 }
2461
2462                 if (error)
2463                         goto done;
2464
2465                 /*
2466                  * Clear the truncation flag on the backend after we have
2467                  * complete the deletions.  Backend data is now good again
2468                  * (including new records we are about to sync, below).
2469                  *
2470                  * Leave sync_trunc_off intact.  As we write additional
2471                  * records the backend will update sync_trunc_off.  This
2472                  * tells the backend whether it can skip the overwrite
2473                  * test.  This should work properly even when the backend
2474                  * writes full blocks where the truncation point straddles
2475                  * the block because the comparison is against the base
2476                  * offset of the record.
2477                  */
2478                 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2479                 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
2480         } else {
2481                 error = 0;
2482         }
2483
2484         /*
2485          * Now sync related records.  These will typically be directory
2486          * entries, records tracking direct-writes, or delete-on-disk records.
2487          */
2488         if (error == 0) {
2489                 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2490                                     hammer_sync_record_callback, &cursor);
2491                 if (tmp_error < 0)
2492                         tmp_error = -error;
2493                 if (tmp_error)
2494                         error = tmp_error;
2495         }
2496         hammer_cache_node(&ip->cache[1], cursor.node);
2497
2498         /*
2499          * Re-seek for inode update, assuming our cache hasn't been ripped
2500          * out from under us.
2501          */
2502         if (error == 0) {
2503                 tmp_node = hammer_ref_node_safe(ip->hmp, &ip->cache[0], &error);
2504                 if (tmp_node) {
2505                         hammer_cursor_downgrade(&cursor);
2506                         hammer_lock_sh(&tmp_node->lock);
2507                         if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0)
2508                                 hammer_cursor_seek(&cursor, tmp_node, 0);
2509                         hammer_unlock(&tmp_node->lock);
2510                         hammer_rel_node(tmp_node);
2511                 }
2512                 error = 0;
2513         }
2514
2515         /*
2516          * If we are deleting the inode the frontend had better not have
2517          * any active references on elements making up the inode.
2518          *
2519          * The call to hammer_ip_delete_clean() cleans up auxillary records
2520          * but not DB or DATA records.  Those must have already been deleted
2521          * by the normal truncation mechanic.
2522          */
2523         if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
2524                 RB_EMPTY(&ip->rec_tree)  &&
2525             (ip->sync_flags & HAMMER_INODE_DELETING) &&
2526             (ip->flags & HAMMER_INODE_DELETED) == 0) {
2527                 int count1 = 0;
2528
2529                 error = hammer_ip_delete_clean(&cursor, ip, &count1);
2530                 if (error == 0) {
2531                         ip->flags |= HAMMER_INODE_DELETED;
2532                         ip->sync_flags &= ~HAMMER_INODE_DELETING;
2533                         ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2534                         KKASSERT(RB_EMPTY(&ip->rec_tree));
2535
2536                         /*
2537                          * Set delete_tid in both the frontend and backend
2538                          * copy of the inode record.  The DELETED flag handles
2539                          * this, do not set RDIRTY.
2540                          */
2541                         ip->ino_leaf.base.delete_tid = trans->tid;
2542                         ip->sync_ino_leaf.base.delete_tid = trans->tid;
2543                         ip->ino_leaf.delete_ts = trans->time32;
2544                         ip->sync_ino_leaf.delete_ts = trans->time32;
2545
2546
2547                         /*
2548                          * Adjust the inode count in the volume header
2549                          */
2550                         hammer_sync_lock_sh(trans);
2551                         if (ip->flags & HAMMER_INODE_ONDISK) {
2552                                 hammer_modify_volume_field(trans,
2553                                                            trans->rootvol,
2554                                                            vol0_stat_inodes);
2555                                 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
2556                                 hammer_modify_volume_done(trans->rootvol);
2557                         }
2558                         hammer_sync_unlock(trans);
2559                 }
2560         }
2561
2562         if (error)
2563                 goto done;
2564         ip->sync_flags &= ~HAMMER_INODE_BUFS;
2565
2566 defer_buffer_flush:
2567         /*
2568          * Now update the inode's on-disk inode-data and/or on-disk record.
2569          * DELETED and ONDISK are managed only in ip->flags.
2570          *
2571          * In the case of a defered buffer flush we still update the on-disk
2572          * inode to satisfy visibility requirements if there happen to be
2573          * directory dependancies.
2574          */
2575         switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
2576         case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
2577                 /*
2578                  * If deleted and on-disk, don't set any additional flags.
2579                  * the delete flag takes care of things.
2580                  *
2581                  * Clear flags which may have been set by the frontend.
2582                  */
2583                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2584                                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2585                                     HAMMER_INODE_DELETING);
2586                 break;
2587         case HAMMER_INODE_DELETED:
2588                 /*
2589                  * Take care of the case where a deleted inode was never
2590                  * flushed to the disk in the first place.
2591                  *
2592                  * Clear flags which may have been set by the frontend.
2593                  */
2594                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2595                                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2596                                     HAMMER_INODE_DELETING);
2597                 while (RB_ROOT(&ip->rec_tree)) {
2598                         hammer_record_t record = RB_ROOT(&ip->rec_tree);
2599                         hammer_ref(&record->lock);
2600                         KKASSERT(record->lock.refs == 1);
2601                         record->flags |= HAMMER_RECF_DELETED_FE;
2602                         record->flags |= HAMMER_RECF_DELETED_BE;
2603                         hammer_rel_mem_record(record);
2604                 }
2605                 break;
2606         case HAMMER_INODE_ONDISK:
2607                 /*
2608                  * If already on-disk, do not set any additional flags.
2609                  */
2610                 break;
2611         default:
2612                 /*
2613                  * If not on-disk and not deleted, set DDIRTY to force
2614                  * an initial record to be written.
2615                  *
2616                  * Also set the create_tid in both the frontend and backend
2617                  * copy of the inode record.
2618                  */
2619                 ip->ino_leaf.base.create_tid = trans->tid;
2620                 ip->ino_leaf.create_ts = trans->time32;
2621                 ip->sync_ino_leaf.base.create_tid = trans->tid;
2622                 ip->sync_ino_leaf.create_ts = trans->time32;
2623                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2624                 break;
2625         }
2626
2627         /*
2628          * If RDIRTY or DDIRTY is set, write out a new record.  If the inode
2629          * is already on-disk the old record is marked as deleted.
2630          *
2631          * If DELETED is set hammer_update_inode() will delete the existing
2632          * record without writing out a new one.
2633          *
2634          * If *ONLY* the ITIMES flag is set we can update the record in-place.
2635          */
2636         if (ip->flags & HAMMER_INODE_DELETED) {
2637                 error = hammer_update_inode(&cursor, ip);
2638         } else 
2639         if ((ip->sync_flags & HAMMER_INODE_DDIRTY) == 0 &&
2640             (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) {
2641                 error = hammer_update_itimes(&cursor, ip);
2642         } else
2643         if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) {
2644                 error = hammer_update_inode(&cursor, ip);
2645         }
2646 done:
2647         if (error) {
2648                 hammer_critical_error(ip->hmp, ip, error,
2649                                       "while syncing inode");
2650         }
2651         hammer_done_cursor(&cursor);
2652         return(error);
2653 }
2654
2655 /*
2656  * This routine is called when the OS is no longer actively referencing
2657  * the inode (but might still be keeping it cached), or when releasing
2658  * the last reference to an inode.
2659  *
2660  * At this point if the inode's nlinks count is zero we want to destroy
2661  * it, which may mean destroying it on-media too.
2662  */
2663 void
2664 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
2665 {
2666         struct vnode *vp;
2667
2668         /*
2669          * Set the DELETING flag when the link count drops to 0 and the
2670          * OS no longer has any opens on the inode.
2671          *
2672          * The backend will clear DELETING (a mod flag) and set DELETED
2673          * (a state flag) when it is actually able to perform the
2674          * operation.
2675          *
2676          * Don't reflag the deletion if the flusher is currently syncing
2677          * one that was already flagged.  A previously set DELETING flag
2678          * may bounce around flags and sync_flags until the operation is
2679          * completely done.
2680          */
2681         if (ip->ino_data.nlinks == 0 &&
2682             ((ip->flags | ip->sync_flags) & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
2683                 ip->flags |= HAMMER_INODE_DELETING;
2684                 ip->flags |= HAMMER_INODE_TRUNCATED;
2685                 ip->trunc_off = 0;
2686                 vp = NULL;
2687                 if (getvp) {
2688                         if (hammer_get_vnode(ip, &vp) != 0)
2689                                 return;
2690                 }
2691
2692                 /*
2693                  * Final cleanup
2694                  */
2695                 if (ip->vp) {
2696                         vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
2697                         vnode_pager_setsize(ip->vp, 0);
2698                 }
2699                 if (getvp) {
2700                         vput(vp);
2701                 }
2702         }
2703 }
2704
2705 /*
2706  * After potentially resolving a dependancy the inode is tested
2707  * to determine whether it needs to be reflushed.
2708  */
2709 void
2710 hammer_test_inode(hammer_inode_t ip)
2711 {
2712         if (ip->flags & HAMMER_INODE_REFLUSH) {
2713                 ip->flags &= ~HAMMER_INODE_REFLUSH;
2714                 hammer_ref(&ip->lock);
2715                 if (ip->flags & HAMMER_INODE_RESIGNAL) {
2716                         ip->flags &= ~HAMMER_INODE_RESIGNAL;
2717                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2718                 } else {
2719                         hammer_flush_inode(ip, 0);
2720                 }
2721                 hammer_rel_inode(ip, 0);
2722         }
2723 }
2724
2725 /*
2726  * Clear the RECLAIM flag on an inode.  This occurs when the inode is
2727  * reassociated with a vp or just before it gets freed.
2728  *
2729  * Pipeline wakeups to threads blocked due to an excessive number of
2730  * detached inodes.  The reclaim count generates a bit of negative
2731  * feedback.
2732  */
2733 static void
2734 hammer_inode_wakereclaims(hammer_inode_t ip, int dowake)
2735 {
2736         struct hammer_reclaim *reclaim;
2737         hammer_mount_t hmp = ip->hmp;
2738
2739         if ((ip->flags & HAMMER_INODE_RECLAIM) == 0)
2740                 return;
2741
2742         --hammer_count_reclaiming;
2743         --hmp->inode_reclaims;
2744         ip->flags &= ~HAMMER_INODE_RECLAIM;
2745
2746         if (hmp->inode_reclaims < HAMMER_RECLAIM_WAIT || dowake) {
2747                 reclaim = TAILQ_FIRST(&hmp->reclaim_list);
2748                 if (reclaim && reclaim->count > 0 && --reclaim->count == 0) {
2749                         TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry);
2750                         wakeup(reclaim);
2751                 }
2752         }
2753 }
2754
2755 /*
2756  * Setup our reclaim pipeline.  We only let so many detached (and dirty)
2757  * inodes build up before we start blocking.
2758  *
2759  * When we block we don't care *which* inode has finished reclaiming,
2760  * as lone as one does.  This is somewhat heuristical... we also put a
2761  * cap on how long we are willing to wait.
2762  */
2763 void
2764 hammer_inode_waitreclaims(hammer_mount_t hmp)
2765 {
2766         struct hammer_reclaim reclaim;
2767         int delay;
2768
2769         if (hmp->inode_reclaims < HAMMER_RECLAIM_WAIT)
2770                 return;
2771         delay = (hmp->inode_reclaims - HAMMER_RECLAIM_WAIT) * hz /
2772                 (HAMMER_RECLAIM_WAIT * 3) + 1;
2773         if (delay > 0) {
2774                 reclaim.count = 2;
2775                 TAILQ_INSERT_TAIL(&hmp->reclaim_list, &reclaim, entry);
2776                 tsleep(&reclaim, 0, "hmrrcm", delay);
2777                 if (reclaim.count > 0)
2778                         TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry);
2779         }
2780 }
2781
2782 /*
2783  * A larger then normal backlog of inodes is sitting in the flusher,
2784  * enforce a general slowdown to let it catch up.  This routine is only
2785  * called on completion of a non-flusher-related transaction which
2786  * performed B-Tree node I/O.
2787  *
2788  * It is possible for the flusher to stall in a continuous load.
2789  * blogbench -i1000 -o seems to do a good job generating this sort of load.
2790  * If the flusher is unable to catch up the inode count can bloat until
2791  * we run out of kvm.
2792  *
2793  * This is a bit of a hack.
2794  */
2795 void
2796 hammer_inode_waithard(hammer_mount_t hmp)
2797 {
2798         /*
2799          * Hysteresis.
2800          */
2801         if (hmp->flags & HAMMER_MOUNT_FLUSH_RECOVERY) {
2802                 if (hmp->inode_reclaims < HAMMER_RECLAIM_WAIT / 2 &&
2803                     hmp->count_iqueued < hmp->count_inodes / 20) {
2804                         hmp->flags &= ~HAMMER_MOUNT_FLUSH_RECOVERY;
2805                         return;
2806                 }
2807         } else {
2808                 if (hmp->inode_reclaims < HAMMER_RECLAIM_WAIT ||
2809                     hmp->count_iqueued < hmp->count_inodes / 10) {
2810                         return;
2811                 }
2812                 hmp->flags |= HAMMER_MOUNT_FLUSH_RECOVERY;
2813         }
2814
2815         /*
2816          * Block for one flush cycle.
2817          */
2818         hammer_flusher_wait_next(hmp);
2819 }
2820