sys/vfs/hammer: Use either HAMMER: or hammer:
[dragonfly.git] / sys / vfs / hammer / hammer_inode.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34
35 #include "hammer.h"
36
37 static int      hammer_unload_inode(struct hammer_inode *ip);
38 static void     hammer_free_inode(hammer_inode_t ip);
39 static void     hammer_flush_inode_core(hammer_inode_t ip,
40                                         hammer_flush_group_t flg, int flags);
41 static int      hammer_setup_child_callback(hammer_record_t rec, void *data);
42 #if 0
43 static int      hammer_syncgrp_child_callback(hammer_record_t rec, void *data);
44 #endif
45 static int      hammer_setup_parent_inodes(hammer_inode_t ip, int depth,
46                                         hammer_flush_group_t flg);
47 static int      hammer_setup_parent_inodes_helper(hammer_record_t record,
48                                         int depth, hammer_flush_group_t flg);
49 static void     hammer_inode_wakereclaims(hammer_inode_t ip);
50 static struct hammer_inostats *hammer_inode_inostats(hammer_mount_t hmp,
51                                         pid_t pid);
52
53 #ifdef DEBUG_TRUNCATE
54 extern struct hammer_inode *HammerTruncIp;
55 #endif
56
57 struct krate hammer_gen_krate = { 1 };
58
59 /*
60  * RB-Tree support for inode structures
61  */
62 int
63 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
64 {
65         if (ip1->obj_localization < ip2->obj_localization)
66                 return(-1);
67         if (ip1->obj_localization > ip2->obj_localization)
68                 return(1);
69         if (ip1->obj_id < ip2->obj_id)
70                 return(-1);
71         if (ip1->obj_id > ip2->obj_id)
72                 return(1);
73         if (ip1->obj_asof < ip2->obj_asof)
74                 return(-1);
75         if (ip1->obj_asof > ip2->obj_asof)
76                 return(1);
77         return(0);
78 }
79
80 int
81 hammer_redo_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
82 {
83         if (ip1->redo_fifo_start < ip2->redo_fifo_start)
84                 return(-1);
85         if (ip1->redo_fifo_start > ip2->redo_fifo_start)
86                 return(1);
87         return(0);
88 }
89
90 /*
91  * RB-Tree support for inode structures / special LOOKUP_INFO
92  */
93 static int
94 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
95 {
96         if (info->obj_localization < ip->obj_localization)
97                 return(-1);
98         if (info->obj_localization > ip->obj_localization)
99                 return(1);
100         if (info->obj_id < ip->obj_id)
101                 return(-1);
102         if (info->obj_id > ip->obj_id)
103                 return(1);
104         if (info->obj_asof < ip->obj_asof)
105                 return(-1);
106         if (info->obj_asof > ip->obj_asof)
107                 return(1);
108         return(0);
109 }
110
111 /*
112  * Used by hammer_scan_inode_snapshots() to locate all of an object's
113  * snapshots.  Note that the asof field is not tested, which we can get
114  * away with because it is the lowest-priority field.
115  */
116 static int
117 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data)
118 {
119         hammer_inode_info_t info = data;
120
121         if (ip->obj_localization > info->obj_localization)
122                 return(1);
123         if (ip->obj_localization < info->obj_localization)
124                 return(-1);
125         if (ip->obj_id > info->obj_id)
126                 return(1);
127         if (ip->obj_id < info->obj_id)
128                 return(-1);
129         return(0);
130 }
131
132 /*
133  * Used by hammer_unload_pseudofs() to locate all inodes associated with
134  * a particular PFS.
135  */
136 static int
137 hammer_inode_pfs_cmp(hammer_inode_t ip, void *data)
138 {
139         u_int32_t localization = *(u_int32_t *)data;
140         if (ip->obj_localization > localization)
141                 return(1);
142         if (ip->obj_localization < localization)
143                 return(-1);
144         return(0);
145 }
146
147 /*
148  * RB-Tree support for pseudofs structures
149  */
150 static int
151 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1, hammer_pseudofs_inmem_t p2)
152 {
153         if (p1->localization < p2->localization)
154                 return(-1);
155         if (p1->localization > p2->localization)
156                 return(1);
157         return(0);
158 }
159
160
161 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
162 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
163                 hammer_inode_info_cmp, hammer_inode_info_t);
164 RB_GENERATE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node,
165              hammer_pfs_rb_compare, u_int32_t, localization);
166
167 /*
168  * The kernel is not actively referencing this vnode but is still holding
169  * it cached.
170  *
171  * This is called from the frontend.
172  *
173  * MPALMOSTSAFE
174  */
175 int
176 hammer_vop_inactive(struct vop_inactive_args *ap)
177 {
178         struct hammer_inode *ip = VTOI(ap->a_vp);
179         hammer_mount_t hmp;
180
181         /*
182          * Degenerate case
183          */
184         if (ip == NULL) {
185                 vrecycle(ap->a_vp);
186                 return(0);
187         }
188
189         /*
190          * If the inode no longer has visibility in the filesystem try to
191          * recycle it immediately, even if the inode is dirty.  Recycling
192          * it quickly allows the system to reclaim buffer cache and VM
193          * resources which can matter a lot in a heavily loaded system.
194          *
195          * This can deadlock in vfsync() if we aren't careful.
196          *
197          * Do not queue the inode to the flusher if we still have visibility,
198          * otherwise namespace calls such as chmod will unnecessarily generate
199          * multiple inode updates.
200          */
201         if (ip->ino_data.nlinks == 0) {
202                 hmp = ip->hmp;
203                 lwkt_gettoken(&hmp->fs_token);
204                 hammer_inode_unloadable_check(ip, 0);
205                 if (ip->flags & HAMMER_INODE_MODMASK)
206                         hammer_flush_inode(ip, 0);
207                 lwkt_reltoken(&hmp->fs_token);
208                 vrecycle(ap->a_vp);
209         }
210         return(0);
211 }
212
213 /*
214  * Release the vnode association.  This is typically (but not always)
215  * the last reference on the inode.
216  *
217  * Once the association is lost we are on our own with regards to
218  * flushing the inode.
219  *
220  * We must interlock ip->vp so hammer_get_vnode() can avoid races.
221  */
222 int
223 hammer_vop_reclaim(struct vop_reclaim_args *ap)
224 {
225         struct hammer_inode *ip;
226         hammer_mount_t hmp;
227         struct vnode *vp;
228
229         vp = ap->a_vp;
230
231         if ((ip = vp->v_data) != NULL) {
232                 hmp = ip->hmp;
233                 lwkt_gettoken(&hmp->fs_token);
234                 hammer_lock_ex(&ip->lock);
235                 vp->v_data = NULL;
236                 ip->vp = NULL;
237
238                 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) {
239                         ++hammer_count_reclaims;
240                         ++hmp->count_reclaims;
241                         ip->flags |= HAMMER_INODE_RECLAIM;
242                 }
243                 hammer_unlock(&ip->lock);
244                 vclrisdirty(vp);
245                 hammer_rel_inode(ip, 1);
246                 lwkt_reltoken(&hmp->fs_token);
247         }
248         return(0);
249 }
250
251 /*
252  * Inform the kernel that the inode is dirty.  This will be checked
253  * by vn_unlock().
254  *
255  * Theoretically in order to reclaim a vnode the hammer_vop_reclaim()
256  * must be called which will interlock against our inode lock, so
257  * if VRECLAIMED is not set vp->v_mount (as used by vsetisdirty())
258  * should be stable without having to acquire any new locks.
259  */
260 void
261 hammer_inode_dirty(struct hammer_inode *ip)
262 {
263         struct vnode *vp;
264
265         if ((ip->flags & HAMMER_INODE_MODMASK) &&
266             (vp = ip->vp) != NULL &&
267             (vp->v_flag & (VRECLAIMED | VISDIRTY)) == 0) {
268                 vsetisdirty(vp);
269         }
270 }
271
272 /*
273  * Return a locked vnode for the specified inode.  The inode must be
274  * referenced but NOT LOCKED on entry and will remain referenced on
275  * return.
276  *
277  * Called from the frontend.
278  */
279 int
280 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
281 {
282         hammer_mount_t hmp;
283         struct vnode *vp;
284         int error = 0;
285         u_int8_t obj_type;
286
287         hmp = ip->hmp;
288
289         for (;;) {
290                 if ((vp = ip->vp) == NULL) {
291                         error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0);
292                         if (error)
293                                 break;
294                         hammer_lock_ex(&ip->lock);
295                         if (ip->vp != NULL) {
296                                 hammer_unlock(&ip->lock);
297                                 vp = *vpp;
298                                 vp->v_type = VBAD;
299                                 vx_put(vp);
300                                 continue;
301                         }
302                         hammer_ref(&ip->lock);
303                         vp = *vpp;
304                         ip->vp = vp;
305
306                         obj_type = ip->ino_data.obj_type;
307                         vp->v_type = hammer_get_vnode_type(obj_type);
308
309                         hammer_inode_wakereclaims(ip);
310
311                         switch(ip->ino_data.obj_type) {
312                         case HAMMER_OBJTYPE_CDEV:
313                         case HAMMER_OBJTYPE_BDEV:
314                                 vp->v_ops = &hmp->mp->mnt_vn_spec_ops;
315                                 addaliasu(vp, ip->ino_data.rmajor,
316                                           ip->ino_data.rminor);
317                                 break;
318                         case HAMMER_OBJTYPE_FIFO:
319                                 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops;
320                                 break;
321                         case HAMMER_OBJTYPE_REGFILE:
322                                 break;
323                         default:
324                                 break;
325                         }
326
327                         /*
328                          * Only mark as the root vnode if the ip is not
329                          * historical, otherwise the VFS cache will get
330                          * confused.  The other half of the special handling
331                          * is in hammer_vop_nlookupdotdot().
332                          *
333                          * Pseudo-filesystem roots can be accessed via
334                          * non-root filesystem paths and setting VROOT may
335                          * confuse the namecache.  Set VPFSROOT instead.
336                          */
337                         if (ip->obj_id == HAMMER_OBJID_ROOT) {
338                                 if (ip->obj_asof == hmp->asof) {
339                                         if (ip->obj_localization == 0)
340                                                 vsetflags(vp, VROOT);
341                                         else
342                                                 vsetflags(vp, VPFSROOT);
343                                 } else {
344                                         vsetflags(vp, VPFSROOT);
345                                 }
346                         }
347
348                         vp->v_data = (void *)ip;
349                         /* vnode locked by getnewvnode() */
350                         /* make related vnode dirty if inode dirty? */
351                         hammer_unlock(&ip->lock);
352                         if (vp->v_type == VREG) {
353                                 vinitvmio(vp, ip->ino_data.size,
354                                           hammer_blocksize(ip->ino_data.size),
355                                           hammer_blockoff(ip->ino_data.size));
356                         }
357                         break;
358                 }
359
360                 /*
361                  * Interlock vnode clearing.  This does not prevent the
362                  * vnode from going into a reclaimed state but it does
363                  * prevent it from being destroyed or reused so the vget()
364                  * will properly fail.
365                  */
366                 hammer_lock_ex(&ip->lock);
367                 if ((vp = ip->vp) == NULL) {
368                         hammer_unlock(&ip->lock);
369                         continue;
370                 }
371                 vhold(vp);
372                 hammer_unlock(&ip->lock);
373
374                 /*
375                  * loop if the vget fails (aka races), or if the vp
376                  * no longer matches ip->vp.
377                  */
378                 if (vget(vp, LK_EXCLUSIVE) == 0) {
379                         if (vp == ip->vp) {
380                                 vdrop(vp);
381                                 break;
382                         }
383                         vput(vp);
384                 }
385                 vdrop(vp);
386         }
387         *vpp = vp;
388         return(error);
389 }
390
391 /*
392  * Locate all copies of the inode for obj_id compatible with the specified
393  * asof, reference, and issue the related call-back.  This routine is used
394  * for direct-io invalidation and does not create any new inodes.
395  */
396 void
397 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo,
398                             int (*callback)(hammer_inode_t ip, void *data),
399                             void *data)
400 {
401         hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root,
402                                    hammer_inode_info_cmp_all_history,
403                                    callback, iinfo);
404 }
405
406 /*
407  * Acquire a HAMMER inode.  The returned inode is not locked.  These functions
408  * do not attach or detach the related vnode (use hammer_get_vnode() for
409  * that).
410  *
411  * The flags argument is only applied for newly created inodes, and only
412  * certain flags are inherited.
413  *
414  * Called from the frontend.
415  */
416 struct hammer_inode *
417 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip,
418                  int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
419                  int flags, int *errorp)
420 {
421         hammer_mount_t hmp = trans->hmp;
422         struct hammer_node_cache *cachep;
423         struct hammer_inode_info iinfo;
424         struct hammer_cursor cursor;
425         struct hammer_inode *ip;
426
427
428         /*
429          * Determine if we already have an inode cached.  If we do then
430          * we are golden.
431          *
432          * If we find an inode with no vnode we have to mark the
433          * transaction such that hammer_inode_waitreclaims() is
434          * called later on to avoid building up an infinite number
435          * of inodes.  Otherwise we can continue to * add new inodes
436          * faster then they can be disposed of, even with the tsleep
437          * delay.
438          *
439          * If we find a dummy inode we return a failure so dounlink
440          * (which does another lookup) doesn't try to mess with the
441          * link count.  hammer_vop_nresolve() uses hammer_get_dummy_inode()
442          * to ref dummy inodes.
443          */
444         iinfo.obj_id = obj_id;
445         iinfo.obj_asof = asof;
446         iinfo.obj_localization = localization;
447 loop:
448         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
449         if (ip) {
450                 if (ip->flags & HAMMER_INODE_DUMMY) {
451                         *errorp = ENOENT;
452                         return(NULL);
453                 }
454                 hammer_ref(&ip->lock);
455                 *errorp = 0;
456                 return(ip);
457         }
458
459         /*
460          * Allocate a new inode structure and deal with races later.
461          */
462         ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
463         ++hammer_count_inodes;
464         ++hmp->count_inodes;
465         ip->obj_id = obj_id;
466         ip->obj_asof = iinfo.obj_asof;
467         ip->obj_localization = localization;
468         ip->hmp = hmp;
469         ip->flags = flags & HAMMER_INODE_RO;
470         ip->cache[0].ip = ip;
471         ip->cache[1].ip = ip;
472         ip->cache[2].ip = ip;
473         ip->cache[3].ip = ip;
474         if (hmp->ronly)
475                 ip->flags |= HAMMER_INODE_RO;
476         ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
477                 0x7FFFFFFFFFFFFFFFLL;
478         RB_INIT(&ip->rec_tree);
479         TAILQ_INIT(&ip->target_list);
480         hammer_ref(&ip->lock);
481
482         /*
483          * Locate the on-disk inode.  If this is a PFS root we always
484          * access the current version of the root inode and (if it is not
485          * a master) always access information under it with a snapshot
486          * TID.
487          *
488          * We cache recent inode lookups in this directory in dip->cache[2].
489          * If we can't find it we assume the inode we are looking for is
490          * close to the directory inode.
491          */
492 retry:
493         cachep = NULL;
494         if (dip) {
495                 if (dip->cache[2].node)
496                         cachep = &dip->cache[2];
497                 else
498                         cachep = &dip->cache[0];
499         }
500         hammer_init_cursor(trans, &cursor, cachep, NULL);
501         cursor.key_beg.localization = localization + HAMMER_LOCALIZE_INODE;
502         cursor.key_beg.obj_id = ip->obj_id;
503         cursor.key_beg.key = 0;
504         cursor.key_beg.create_tid = 0;
505         cursor.key_beg.delete_tid = 0;
506         cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
507         cursor.key_beg.obj_type = 0;
508
509         cursor.asof = iinfo.obj_asof;
510         cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
511                        HAMMER_CURSOR_ASOF;
512
513         *errorp = hammer_btree_lookup(&cursor);
514         if (*errorp == EDEADLK) {
515                 hammer_done_cursor(&cursor);
516                 goto retry;
517         }
518
519         /*
520          * On success the B-Tree lookup will hold the appropriate
521          * buffer cache buffers and provide a pointer to the requested
522          * information.  Copy the information to the in-memory inode
523          * and cache the B-Tree node to improve future operations.
524          */
525         if (*errorp == 0) {
526                 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
527                 ip->ino_data = cursor.data->inode;
528
529                 /*
530                  * cache[0] tries to cache the location of the object inode.
531                  * The assumption is that it is near the directory inode.
532                  *
533                  * cache[1] tries to cache the location of the object data.
534                  * We might have something in the governing directory from
535                  * scan optimizations (see the strategy code in
536                  * hammer_vnops.c).
537                  *
538                  * We update dip->cache[2], if possible, with the location
539                  * of the object inode for future directory shortcuts.
540                  */
541                 hammer_cache_node(&ip->cache[0], cursor.node);
542                 if (dip) {
543                         if (dip->cache[3].node) {
544                                 hammer_cache_node(&ip->cache[1],
545                                                   dip->cache[3].node);
546                         }
547                         hammer_cache_node(&dip->cache[2], cursor.node);
548                 }
549
550                 /*
551                  * The file should not contain any data past the file size
552                  * stored in the inode.  Setting save_trunc_off to the
553                  * file size instead of max reduces B-Tree lookup overheads
554                  * on append by allowing the flusher to avoid checking for
555                  * record overwrites.
556                  */
557                 ip->save_trunc_off = ip->ino_data.size;
558
559                 /*
560                  * Locate and assign the pseudofs management structure to
561                  * the inode.
562                  */
563                 if (dip && dip->obj_localization == ip->obj_localization) {
564                         ip->pfsm = dip->pfsm;
565                         hammer_ref(&ip->pfsm->lock);
566                 } else {
567                         ip->pfsm = hammer_load_pseudofs(trans,
568                                                         ip->obj_localization,
569                                                         errorp);
570                         *errorp = 0;    /* ignore ENOENT */
571                 }
572         }
573
574         /*
575          * The inode is placed on the red-black tree and will be synced to
576          * the media when flushed or by the filesystem sync.  If this races
577          * another instantiation/lookup the insertion will fail.
578          */
579         if (*errorp == 0) {
580                 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
581                         hammer_free_inode(ip);
582                         hammer_done_cursor(&cursor);
583                         goto loop;
584                 }
585                 ip->flags |= HAMMER_INODE_ONDISK;
586         } else {
587                 if (ip->flags & HAMMER_INODE_RSV_INODES) {
588                         ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
589                         --hmp->rsv_inodes;
590                 }
591
592                 hammer_free_inode(ip);
593                 ip = NULL;
594         }
595         hammer_done_cursor(&cursor);
596
597         /*
598          * NEWINODE is only set if the inode becomes dirty later,
599          * setting it here just leads to unnecessary stalls.
600          *
601          * trans->flags |= HAMMER_TRANSF_NEWINODE;
602          */
603         return (ip);
604 }
605
606 /*
607  * Get a dummy inode to placemark a broken directory entry.
608  */
609 struct hammer_inode *
610 hammer_get_dummy_inode(hammer_transaction_t trans, hammer_inode_t dip,
611                  int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
612                  int flags, int *errorp)
613 {
614         hammer_mount_t hmp = trans->hmp;
615         struct hammer_inode_info iinfo;
616         struct hammer_inode *ip;
617
618         /*
619          * Determine if we already have an inode cached.  If we do then
620          * we are golden.
621          *
622          * If we find an inode with no vnode we have to mark the
623          * transaction such that hammer_inode_waitreclaims() is
624          * called later on to avoid building up an infinite number
625          * of inodes.  Otherwise we can continue to * add new inodes
626          * faster then they can be disposed of, even with the tsleep
627          * delay.
628          *
629          * If we find a non-fake inode we return an error.  Only fake
630          * inodes can be returned by this routine.
631          */
632         iinfo.obj_id = obj_id;
633         iinfo.obj_asof = asof;
634         iinfo.obj_localization = localization;
635 loop:
636         *errorp = 0;
637         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
638         if (ip) {
639                 if ((ip->flags & HAMMER_INODE_DUMMY) == 0) {
640                         *errorp = ENOENT;
641                         return(NULL);
642                 }
643                 hammer_ref(&ip->lock);
644                 return(ip);
645         }
646
647         /*
648          * Allocate a new inode structure and deal with races later.
649          */
650         ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
651         ++hammer_count_inodes;
652         ++hmp->count_inodes;
653         ip->obj_id = obj_id;
654         ip->obj_asof = iinfo.obj_asof;
655         ip->obj_localization = localization;
656         ip->hmp = hmp;
657         ip->flags = flags | HAMMER_INODE_RO | HAMMER_INODE_DUMMY;
658         ip->cache[0].ip = ip;
659         ip->cache[1].ip = ip;
660         ip->cache[2].ip = ip;
661         ip->cache[3].ip = ip;
662         ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
663                 0x7FFFFFFFFFFFFFFFLL;
664         RB_INIT(&ip->rec_tree);
665         TAILQ_INIT(&ip->target_list);
666         hammer_ref(&ip->lock);
667
668         /*
669          * Populate the dummy inode.  Leave everything zero'd out.
670          *
671          * (ip->ino_leaf and ip->ino_data)
672          *
673          * Make the dummy inode a FIFO object which most copy programs
674          * will properly ignore.
675          */
676         ip->save_trunc_off = ip->ino_data.size;
677         ip->ino_data.obj_type = HAMMER_OBJTYPE_FIFO;
678
679         /*
680          * Locate and assign the pseudofs management structure to
681          * the inode.
682          */
683         if (dip && dip->obj_localization == ip->obj_localization) {
684                 ip->pfsm = dip->pfsm;
685                 hammer_ref(&ip->pfsm->lock);
686         } else {
687                 ip->pfsm = hammer_load_pseudofs(trans, ip->obj_localization,
688                                                 errorp);
689                 *errorp = 0;    /* ignore ENOENT */
690         }
691
692         /*
693          * The inode is placed on the red-black tree and will be synced to
694          * the media when flushed or by the filesystem sync.  If this races
695          * another instantiation/lookup the insertion will fail.
696          *
697          * NOTE: Do not set HAMMER_INODE_ONDISK.  The inode is a fake.
698          */
699         if (*errorp == 0) {
700                 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
701                         hammer_free_inode(ip);
702                         goto loop;
703                 }
704         } else {
705                 if (ip->flags & HAMMER_INODE_RSV_INODES) {
706                         ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
707                         --hmp->rsv_inodes;
708                 }
709                 hammer_free_inode(ip);
710                 ip = NULL;
711         }
712         trans->flags |= HAMMER_TRANSF_NEWINODE;
713         return (ip);
714 }
715
716 /*
717  * Return a referenced inode only if it is in our inode cache.
718  *
719  * Dummy inodes do not count.
720  */
721 struct hammer_inode *
722 hammer_find_inode(hammer_transaction_t trans, int64_t obj_id,
723                   hammer_tid_t asof, u_int32_t localization)
724 {
725         hammer_mount_t hmp = trans->hmp;
726         struct hammer_inode_info iinfo;
727         struct hammer_inode *ip;
728
729         iinfo.obj_id = obj_id;
730         iinfo.obj_asof = asof;
731         iinfo.obj_localization = localization;
732
733         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
734         if (ip) {
735                 if (ip->flags & HAMMER_INODE_DUMMY)
736                         ip = NULL;
737                 else
738                         hammer_ref(&ip->lock);
739         }
740         return(ip);
741 }
742
743 /*
744  * Create a new filesystem object, returning the inode in *ipp.  The
745  * returned inode will be referenced.  The inode is created in-memory.
746  *
747  * If pfsm is non-NULL the caller wishes to create the root inode for
748  * a master PFS.
749  */
750 int
751 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
752                     struct ucred *cred,
753                     hammer_inode_t dip, const char *name, int namelen,
754                     hammer_pseudofs_inmem_t pfsm, struct hammer_inode **ipp)
755 {
756         hammer_mount_t hmp;
757         hammer_inode_t ip;
758         uid_t xuid;
759         int error;
760         int64_t namekey;
761         u_int32_t dummy;
762
763         hmp = trans->hmp;
764
765         /*
766          * Disallow the creation of new inodes in directories which
767          * have been deleted.  In HAMMER, this will cause a record
768          * syncing assertion later on in the flush code.
769          */
770         if (dip && dip->ino_data.nlinks == 0) {
771                 *ipp = NULL;
772                 return (EINVAL);
773         }
774
775         /*
776          * Allocate inode
777          */
778         ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
779         ++hammer_count_inodes;
780         ++hmp->count_inodes;
781         trans->flags |= HAMMER_TRANSF_NEWINODE;
782
783         if (pfsm) {
784                 KKASSERT(pfsm->localization != 0);
785                 ip->obj_id = HAMMER_OBJID_ROOT;
786                 ip->obj_localization = pfsm->localization;
787         } else {
788                 KKASSERT(dip != NULL);
789                 namekey = hammer_directory_namekey(dip, name, namelen, &dummy);
790                 ip->obj_id = hammer_alloc_objid(hmp, dip, namekey);
791                 ip->obj_localization = dip->obj_localization;
792         }
793
794         KKASSERT(ip->obj_id != 0);
795         ip->obj_asof = hmp->asof;
796         ip->hmp = hmp;
797         ip->flush_state = HAMMER_FST_IDLE;
798         ip->flags = HAMMER_INODE_DDIRTY |
799                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME;
800         ip->cache[0].ip = ip;
801         ip->cache[1].ip = ip;
802         ip->cache[2].ip = ip;
803         ip->cache[3].ip = ip;
804
805         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
806         /* ip->save_trunc_off = 0; (already zero) */
807         RB_INIT(&ip->rec_tree);
808         TAILQ_INIT(&ip->target_list);
809
810         ip->ino_data.atime = trans->time;
811         ip->ino_data.mtime = trans->time;
812         ip->ino_data.size = 0;
813         ip->ino_data.nlinks = 0;
814
815         /*
816          * A nohistory designator on the parent directory is inherited by
817          * the child.  We will do this even for pseudo-fs creation... the
818          * sysad can turn it off.
819          */
820         if (dip) {
821                 ip->ino_data.uflags = dip->ino_data.uflags &
822                                       (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
823         }
824
825         ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
826         ip->ino_leaf.base.localization = ip->obj_localization +
827                                          HAMMER_LOCALIZE_INODE;
828         ip->ino_leaf.base.obj_id = ip->obj_id;
829         ip->ino_leaf.base.key = 0;
830         ip->ino_leaf.base.create_tid = 0;
831         ip->ino_leaf.base.delete_tid = 0;
832         ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
833         ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
834
835         ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
836         ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
837         ip->ino_data.mode = vap->va_mode;
838         ip->ino_data.ctime = trans->time;
839
840         /*
841          * If we are running version 2 or greater directory entries are
842          * inode-localized instead of data-localized.
843          */
844         if (trans->hmp->version >= HAMMER_VOL_VERSION_TWO) {
845                 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
846                         ip->ino_data.cap_flags |=
847                                 HAMMER_INODE_CAP_DIR_LOCAL_INO;
848                 }
849         }
850         if (trans->hmp->version >= HAMMER_VOL_VERSION_SIX) {
851                 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
852                         ip->ino_data.cap_flags |=
853                                 HAMMER_INODE_CAP_DIRHASH_ALG1;
854                 }
855         }
856
857         /*
858          * Setup the ".." pointer.  This only needs to be done for directories
859          * but we do it for all objects as a recovery aid if dip exists.
860          * The inode is probably a PFS root if dip is NULL.
861          */
862         if (dip)
863                 ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id;
864 #if 0
865         /*
866          * The parent_obj_localization field only applies to pseudo-fs roots.
867          * XXX this is no longer applicable, PFSs are no longer directly
868          * tied into the parent's directory structure.
869          */
870         if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY &&
871             ip->obj_id == HAMMER_OBJID_ROOT) {
872                 ip->ino_data.ext.obj.parent_obj_localization =
873                                                 dip->obj_localization;
874         }
875 #endif
876
877         switch(ip->ino_leaf.base.obj_type) {
878         case HAMMER_OBJTYPE_CDEV:
879         case HAMMER_OBJTYPE_BDEV:
880                 ip->ino_data.rmajor = vap->va_rmajor;
881                 ip->ino_data.rminor = vap->va_rminor;
882                 break;
883         default:
884                 break;
885         }
886
887         /*
888          * Calculate default uid/gid and overwrite with information from
889          * the vap.
890          */
891         if (dip) {
892                 xuid = hammer_to_unix_xid(&dip->ino_data.uid);
893                 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode,
894                                              xuid, cred, &vap->va_mode);
895         } else {
896                 xuid = 0;
897         }
898         ip->ino_data.mode = vap->va_mode;
899
900         if (vap->va_vaflags & VA_UID_UUID_VALID)
901                 ip->ino_data.uid = vap->va_uid_uuid;
902         else if (vap->va_uid != (uid_t)VNOVAL)
903                 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
904         else
905                 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
906
907         if (vap->va_vaflags & VA_GID_UUID_VALID)
908                 ip->ino_data.gid = vap->va_gid_uuid;
909         else if (vap->va_gid != (gid_t)VNOVAL)
910                 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
911         else if (dip)
912                 ip->ino_data.gid = dip->ino_data.gid;
913
914         hammer_ref(&ip->lock);
915
916         if (pfsm) {
917                 ip->pfsm = pfsm;
918                 hammer_ref(&pfsm->lock);
919                 error = 0;
920         } else if (dip->obj_localization == ip->obj_localization) {
921                 ip->pfsm = dip->pfsm;
922                 hammer_ref(&ip->pfsm->lock);
923                 error = 0;
924         } else {
925                 ip->pfsm = hammer_load_pseudofs(trans,
926                                                 ip->obj_localization,
927                                                 &error);
928                 error = 0;      /* ignore ENOENT */
929         }
930
931         if (error) {
932                 hammer_free_inode(ip);
933                 ip = NULL;
934         } else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
935                 panic("hammer_create_inode: duplicate obj_id %llx",
936                       (long long)ip->obj_id);
937                 /* not reached */
938                 hammer_free_inode(ip);
939         }
940         *ipp = ip;
941         return(error);
942 }
943
944 /*
945  * Final cleanup / freeing of an inode structure
946  */
947 static void
948 hammer_free_inode(hammer_inode_t ip)
949 {
950         struct hammer_mount *hmp;
951
952         hmp = ip->hmp;
953         KKASSERT(hammer_oneref(&ip->lock));
954         hammer_uncache_node(&ip->cache[0]);
955         hammer_uncache_node(&ip->cache[1]);
956         hammer_uncache_node(&ip->cache[2]);
957         hammer_uncache_node(&ip->cache[3]);
958         hammer_inode_wakereclaims(ip);
959         if (ip->objid_cache)
960                 hammer_clear_objid(ip);
961         --hammer_count_inodes;
962         --hmp->count_inodes;
963         if (ip->pfsm) {
964                 hammer_rel_pseudofs(hmp, ip->pfsm);
965                 ip->pfsm = NULL;
966         }
967         kfree(ip, hmp->m_inodes);
968         ip = NULL;
969 }
970
971 /*
972  * Retrieve pseudo-fs data.  NULL will never be returned.
973  *
974  * If an error occurs *errorp will be set and a default template is returned,
975  * otherwise *errorp is set to 0.  Typically when an error occurs it will
976  * be ENOENT.
977  */
978 hammer_pseudofs_inmem_t
979 hammer_load_pseudofs(hammer_transaction_t trans,
980                      u_int32_t localization, int *errorp)
981 {
982         hammer_mount_t hmp = trans->hmp;
983         hammer_inode_t ip;
984         hammer_pseudofs_inmem_t pfsm;
985         struct hammer_cursor cursor;
986         int bytes;
987
988 retry:
989         pfsm = RB_LOOKUP(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, localization);
990         if (pfsm) {
991                 hammer_ref(&pfsm->lock);
992                 *errorp = 0;
993                 return(pfsm);
994         }
995
996         /*
997          * PFS records are associated with the root inode (not the PFS root
998          * inode, but the real root).  Avoid an infinite recursion if loading
999          * the PFS for the real root.
1000          */
1001         if (localization) {
1002                 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT,
1003                                       HAMMER_MAX_TID,
1004                                       HAMMER_DEF_LOCALIZATION, 0, errorp);
1005         } else {
1006                 ip = NULL;
1007         }
1008
1009         pfsm = kmalloc(sizeof(*pfsm), hmp->m_misc, M_WAITOK | M_ZERO);
1010         pfsm->localization = localization;
1011         pfsm->pfsd.unique_uuid = trans->rootvol->ondisk->vol_fsid;
1012         pfsm->pfsd.shared_uuid = pfsm->pfsd.unique_uuid;
1013
1014         hammer_init_cursor(trans, &cursor, (ip ? &ip->cache[1] : NULL), ip);
1015         cursor.key_beg.localization = HAMMER_DEF_LOCALIZATION +
1016                                       HAMMER_LOCALIZE_MISC;
1017         cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
1018         cursor.key_beg.create_tid = 0;
1019         cursor.key_beg.delete_tid = 0;
1020         cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
1021         cursor.key_beg.obj_type = 0;
1022         cursor.key_beg.key = localization;
1023         cursor.asof = HAMMER_MAX_TID;
1024         cursor.flags |= HAMMER_CURSOR_ASOF;
1025
1026         if (ip)
1027                 *errorp = hammer_ip_lookup(&cursor);
1028         else
1029                 *errorp = hammer_btree_lookup(&cursor);
1030         if (*errorp == 0) {
1031                 *errorp = hammer_ip_resolve_data(&cursor);
1032                 if (*errorp == 0) {
1033                         if (cursor.data->pfsd.mirror_flags &
1034                             HAMMER_PFSD_DELETED) {
1035                                 *errorp = ENOENT;
1036                         } else {
1037                                 bytes = cursor.leaf->data_len;
1038                                 if (bytes > sizeof(pfsm->pfsd))
1039                                         bytes = sizeof(pfsm->pfsd);
1040                                 bcopy(cursor.data, &pfsm->pfsd, bytes);
1041                         }
1042                 }
1043         }
1044         hammer_done_cursor(&cursor);
1045
1046         pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
1047         hammer_ref(&pfsm->lock);
1048         if (ip)
1049                 hammer_rel_inode(ip, 0);
1050         if (RB_INSERT(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm)) {
1051                 kfree(pfsm, hmp->m_misc);
1052                 goto retry;
1053         }
1054         return(pfsm);
1055 }
1056
1057 /*
1058  * Store pseudo-fs data.  The backend will automatically delete any prior
1059  * on-disk pseudo-fs data but we have to delete in-memory versions.
1060  */
1061 int
1062 hammer_save_pseudofs(hammer_transaction_t trans, hammer_pseudofs_inmem_t pfsm)
1063 {
1064         struct hammer_cursor cursor;
1065         hammer_record_t record;
1066         hammer_inode_t ip;
1067         int error;
1068
1069         /*
1070          * PFS records are associated with the root inode (not the PFS root
1071          * inode, but the real root).
1072          */
1073         ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
1074                               HAMMER_DEF_LOCALIZATION, 0, &error);
1075 retry:
1076         pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
1077         hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
1078         cursor.key_beg.localization = ip->obj_localization +
1079                                       HAMMER_LOCALIZE_MISC;
1080         cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
1081         cursor.key_beg.create_tid = 0;
1082         cursor.key_beg.delete_tid = 0;
1083         cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
1084         cursor.key_beg.obj_type = 0;
1085         cursor.key_beg.key = pfsm->localization;
1086         cursor.asof = HAMMER_MAX_TID;
1087         cursor.flags |= HAMMER_CURSOR_ASOF;
1088
1089         /*
1090          * Replace any in-memory version of the record.
1091          */
1092         error = hammer_ip_lookup(&cursor);
1093         if (error == 0 && hammer_cursor_inmem(&cursor)) {
1094                 record = cursor.iprec;
1095                 if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
1096                         KKASSERT(cursor.deadlk_rec == NULL);
1097                         hammer_ref(&record->lock);
1098                         cursor.deadlk_rec = record;
1099                         error = EDEADLK;
1100                 } else {
1101                         record->flags |= HAMMER_RECF_DELETED_FE;
1102                         error = 0;
1103                 }
1104         }
1105
1106         /*
1107          * Allocate replacement general record.  The backend flush will
1108          * delete any on-disk version of the record.
1109          */
1110         if (error == 0 || error == ENOENT) {
1111                 record = hammer_alloc_mem_record(ip, sizeof(pfsm->pfsd));
1112                 record->type = HAMMER_MEM_RECORD_GENERAL;
1113
1114                 record->leaf.base.localization = ip->obj_localization +
1115                                                  HAMMER_LOCALIZE_MISC;
1116                 record->leaf.base.rec_type = HAMMER_RECTYPE_PFS;
1117                 record->leaf.base.key = pfsm->localization;
1118                 record->leaf.data_len = sizeof(pfsm->pfsd);
1119                 bcopy(&pfsm->pfsd, record->data, sizeof(pfsm->pfsd));
1120                 error = hammer_ip_add_record(trans, record);
1121         }
1122         hammer_done_cursor(&cursor);
1123         if (error == EDEADLK)
1124                 goto retry;
1125         hammer_rel_inode(ip, 0);
1126         return(error);
1127 }
1128
1129 /*
1130  * Create a root directory for a PFS if one does not alredy exist.
1131  *
1132  * The PFS root stands alone so we must also bump the nlinks count
1133  * to prevent it from being destroyed on release.
1134  */
1135 int
1136 hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred,
1137                        hammer_pseudofs_inmem_t pfsm)
1138 {
1139         hammer_inode_t ip;
1140         struct vattr vap;
1141         int error;
1142
1143         ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
1144                               pfsm->localization, 0, &error);
1145         if (ip == NULL) {
1146                 vattr_null(&vap);
1147                 vap.va_mode = 0755;
1148                 vap.va_type = VDIR;
1149                 error = hammer_create_inode(trans, &vap, cred,
1150                                             NULL, NULL, 0,
1151                                             pfsm, &ip);
1152                 if (error == 0) {
1153                         ++ip->ino_data.nlinks;
1154                         hammer_modify_inode(trans, ip, HAMMER_INODE_DDIRTY);
1155                 }
1156         }
1157         if (ip)
1158                 hammer_rel_inode(ip, 0);
1159         return(error);
1160 }
1161
1162 /*
1163  * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY
1164  * if we are unable to disassociate all the inodes.
1165  */
1166 static
1167 int
1168 hammer_unload_pseudofs_callback(hammer_inode_t ip, void *data)
1169 {
1170         int res;
1171
1172         hammer_ref(&ip->lock);
1173         if (ip->vp && (ip->vp->v_flag & VPFSROOT)) {
1174                 /*
1175                  * The hammer pfs-upgrade directive itself might have the
1176                  * root of the pfs open.  Just allow it.
1177                  */
1178                 res = 0;
1179         } else {
1180                 /*
1181                  * Don't allow any subdirectories or files to be open.
1182                  */
1183                 if (hammer_isactive(&ip->lock) == 2 && ip->vp)
1184                         vclean_unlocked(ip->vp);
1185                 if (hammer_isactive(&ip->lock) == 1 && ip->vp == NULL)
1186                         res = 0;
1187                 else
1188                         res = -1;       /* stop, someone is using the inode */
1189         }
1190         hammer_rel_inode(ip, 0);
1191         return(res);
1192 }
1193
1194 int
1195 hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization)
1196 {
1197         int res;
1198         int try;
1199
1200         for (try = res = 0; try < 4; ++try) {
1201                 res = hammer_ino_rb_tree_RB_SCAN(&trans->hmp->rb_inos_root,
1202                                            hammer_inode_pfs_cmp,
1203                                            hammer_unload_pseudofs_callback,
1204                                            &localization);
1205                 if (res == 0 && try > 1)
1206                         break;
1207                 hammer_flusher_sync(trans->hmp);
1208         }
1209         if (res != 0)
1210                 res = ENOTEMPTY;
1211         return(res);
1212 }
1213
1214
1215 /*
1216  * Release a reference on a PFS
1217  */
1218 void
1219 hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm)
1220 {
1221         hammer_rel(&pfsm->lock);
1222         if (hammer_norefs(&pfsm->lock)) {
1223                 RB_REMOVE(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm);
1224                 kfree(pfsm, hmp->m_misc);
1225         }
1226 }
1227
1228 /*
1229  * Called by hammer_sync_inode().
1230  */
1231 static int
1232 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
1233 {
1234         hammer_transaction_t trans = cursor->trans;
1235         hammer_record_t record;
1236         int error;
1237         int redirty;
1238
1239 retry:
1240         error = 0;
1241
1242         /*
1243          * If the inode has a presence on-disk then locate it and mark
1244          * it deleted, setting DELONDISK.
1245          *
1246          * The record may or may not be physically deleted, depending on
1247          * the retention policy.
1248          */
1249         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
1250             HAMMER_INODE_ONDISK) {
1251                 hammer_normalize_cursor(cursor);
1252                 cursor->key_beg.localization = ip->obj_localization +
1253                                                HAMMER_LOCALIZE_INODE;
1254                 cursor->key_beg.obj_id = ip->obj_id;
1255                 cursor->key_beg.key = 0;
1256                 cursor->key_beg.create_tid = 0;
1257                 cursor->key_beg.delete_tid = 0;
1258                 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1259                 cursor->key_beg.obj_type = 0;
1260                 cursor->asof = ip->obj_asof;
1261                 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1262                 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
1263                 cursor->flags |= HAMMER_CURSOR_BACKEND;
1264
1265                 error = hammer_btree_lookup(cursor);
1266                 if (hammer_debug_inode)
1267                         kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
1268
1269                 if (error == 0) {
1270                         error = hammer_ip_delete_record(cursor, ip, trans->tid);
1271                         if (hammer_debug_inode)
1272                                 kprintf(" error %d\n", error);
1273                         if (error == 0) {
1274                                 ip->flags |= HAMMER_INODE_DELONDISK;
1275                         }
1276                         if (cursor->node)
1277                                 hammer_cache_node(&ip->cache[0], cursor->node);
1278                 }
1279                 if (error == EDEADLK) {
1280                         hammer_done_cursor(cursor);
1281                         error = hammer_init_cursor(trans, cursor,
1282                                                    &ip->cache[0], ip);
1283                         if (hammer_debug_inode)
1284                                 kprintf("IPDED %p %d\n", ip, error);
1285                         if (error == 0)
1286                                 goto retry;
1287                 }
1288         }
1289
1290         /*
1291          * Ok, write out the initial record or a new record (after deleting
1292          * the old one), unless the DELETED flag is set.  This routine will
1293          * clear DELONDISK if it writes out a record.
1294          *
1295          * Update our inode statistics if this is the first application of
1296          * the inode on-disk.
1297          */
1298         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
1299                 /*
1300                  * Generate a record and write it to the media.  We clean-up
1301                  * the state before releasing so we do not have to set-up
1302                  * a flush_group.
1303                  */
1304                 record = hammer_alloc_mem_record(ip, 0);
1305                 record->type = HAMMER_MEM_RECORD_INODE;
1306                 record->flush_state = HAMMER_FST_FLUSH;
1307                 record->leaf = ip->sync_ino_leaf;
1308                 record->leaf.base.create_tid = trans->tid;
1309                 record->leaf.data_len = sizeof(ip->sync_ino_data);
1310                 record->leaf.create_ts = trans->time32;
1311                 record->data = (void *)&ip->sync_ino_data;
1312                 record->flags |= HAMMER_RECF_INTERLOCK_BE;
1313
1314                 /*
1315                  * If this flag is set we cannot sync the new file size
1316                  * because we haven't finished related truncations.  The
1317                  * inode will be flushed in another flush group to finish
1318                  * the job.
1319                  */
1320                 if ((ip->flags & HAMMER_INODE_WOULDBLOCK) &&
1321                     ip->sync_ino_data.size != ip->ino_data.size) {
1322                         redirty = 1;
1323                         ip->sync_ino_data.size = ip->ino_data.size;
1324                 } else {
1325                         redirty = 0;
1326                 }
1327
1328                 for (;;) {
1329                         error = hammer_ip_sync_record_cursor(cursor, record);
1330                         if (hammer_debug_inode)
1331                                 kprintf("GENREC %p rec %08x %d\n",
1332                                         ip, record->flags, error);
1333                         if (error != EDEADLK)
1334                                 break;
1335                         hammer_done_cursor(cursor);
1336                         error = hammer_init_cursor(trans, cursor,
1337                                                    &ip->cache[0], ip);
1338                         if (hammer_debug_inode)
1339                                 kprintf("GENREC reinit %d\n", error);
1340                         if (error)
1341                                 break;
1342                 }
1343
1344                 /*
1345                  * Note:  The record was never on the inode's record tree
1346                  * so just wave our hands importantly and destroy it.
1347                  */
1348                 record->flags |= HAMMER_RECF_COMMITTED;
1349                 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
1350                 record->flush_state = HAMMER_FST_IDLE;
1351                 ++ip->rec_generation;
1352                 hammer_rel_mem_record(record);
1353
1354                 /*
1355                  * Finish up.
1356                  */
1357                 if (error == 0) {
1358                         if (hammer_debug_inode)
1359                                 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
1360                         ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1361                                             HAMMER_INODE_SDIRTY |
1362                                             HAMMER_INODE_ATIME |
1363                                             HAMMER_INODE_MTIME);
1364                         ip->flags &= ~HAMMER_INODE_DELONDISK;
1365                         if (redirty)
1366                                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1367
1368                         /*
1369                          * Root volume count of inodes
1370                          */
1371                         hammer_sync_lock_sh(trans);
1372                         if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
1373                                 hammer_modify_volume_field(trans,
1374                                                            trans->rootvol,
1375                                                            vol0_stat_inodes);
1376                                 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1377                                 hammer_modify_volume_done(trans->rootvol);
1378                                 ip->flags |= HAMMER_INODE_ONDISK;
1379                                 if (hammer_debug_inode)
1380                                         kprintf("NOWONDISK %p\n", ip);
1381                         }
1382                         hammer_sync_unlock(trans);
1383                 }
1384         }
1385
1386         /*
1387          * If the inode has been destroyed, clean out any left-over flags
1388          * that may have been set by the frontend.
1389          */
1390         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) {
1391                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1392                                     HAMMER_INODE_SDIRTY |
1393                                     HAMMER_INODE_ATIME |
1394                                     HAMMER_INODE_MTIME);
1395         }
1396         return(error);
1397 }
1398
1399 /*
1400  * Update only the itimes fields.
1401  *
1402  * ATIME can be updated without generating any UNDO.  MTIME is updated
1403  * with UNDO so it is guaranteed to be synchronized properly in case of
1404  * a crash.
1405  *
1406  * Neither field is included in the B-Tree leaf element's CRC, which is how
1407  * we can get away with updating ATIME the way we do.
1408  */
1409 static int
1410 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
1411 {
1412         hammer_transaction_t trans = cursor->trans;
1413         int error;
1414
1415 retry:
1416         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) !=
1417             HAMMER_INODE_ONDISK) {
1418                 return(0);
1419         }
1420
1421         hammer_normalize_cursor(cursor);
1422         cursor->key_beg.localization = ip->obj_localization +
1423                                        HAMMER_LOCALIZE_INODE;
1424         cursor->key_beg.obj_id = ip->obj_id;
1425         cursor->key_beg.key = 0;
1426         cursor->key_beg.create_tid = 0;
1427         cursor->key_beg.delete_tid = 0;
1428         cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1429         cursor->key_beg.obj_type = 0;
1430         cursor->asof = ip->obj_asof;
1431         cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1432         cursor->flags |= HAMMER_CURSOR_ASOF;
1433         cursor->flags |= HAMMER_CURSOR_GET_LEAF;
1434         cursor->flags |= HAMMER_CURSOR_GET_DATA;
1435         cursor->flags |= HAMMER_CURSOR_BACKEND;
1436
1437         error = hammer_btree_lookup(cursor);
1438         if (error == 0) {
1439                 hammer_cache_node(&ip->cache[0], cursor->node);
1440                 if (ip->sync_flags & HAMMER_INODE_MTIME) {
1441                         /*
1442                          * Updating MTIME requires an UNDO.  Just cover
1443                          * both atime and mtime.
1444                          */
1445                         hammer_sync_lock_sh(trans);
1446                         hammer_modify_buffer(trans, cursor->data_buffer,
1447                                      HAMMER_ITIMES_BASE(&cursor->data->inode),
1448                                      HAMMER_ITIMES_BYTES);
1449                         cursor->data->inode.atime = ip->sync_ino_data.atime;
1450                         cursor->data->inode.mtime = ip->sync_ino_data.mtime;
1451                         hammer_modify_buffer_done(cursor->data_buffer);
1452                         hammer_sync_unlock(trans);
1453                 } else if (ip->sync_flags & HAMMER_INODE_ATIME) {
1454                         /*
1455                          * Updating atime only can be done in-place with
1456                          * no UNDO.
1457                          */
1458                         hammer_sync_lock_sh(trans);
1459                         hammer_modify_buffer(trans, cursor->data_buffer,
1460                                              NULL, 0);
1461                         cursor->data->inode.atime = ip->sync_ino_data.atime;
1462                         hammer_modify_buffer_done(cursor->data_buffer);
1463                         hammer_sync_unlock(trans);
1464                 }
1465                 ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME);
1466         }
1467         if (error == EDEADLK) {
1468                 hammer_done_cursor(cursor);
1469                 error = hammer_init_cursor(trans, cursor, &ip->cache[0], ip);
1470                 if (error == 0)
1471                         goto retry;
1472         }
1473         return(error);
1474 }
1475
1476 /*
1477  * Release a reference on an inode, flush as requested.
1478  *
1479  * On the last reference we queue the inode to the flusher for its final
1480  * disposition.
1481  */
1482 void
1483 hammer_rel_inode(struct hammer_inode *ip, int flush)
1484 {
1485         /*
1486          * Handle disposition when dropping the last ref.
1487          */
1488         for (;;) {
1489                 if (hammer_oneref(&ip->lock)) {
1490                         /*
1491                          * Determine whether on-disk action is needed for
1492                          * the inode's final disposition.
1493                          */
1494                         KKASSERT(ip->vp == NULL);
1495                         hammer_inode_unloadable_check(ip, 0);
1496                         if (ip->flags & HAMMER_INODE_MODMASK) {
1497                                 hammer_flush_inode(ip, 0);
1498                         } else if (hammer_oneref(&ip->lock)) {
1499                                 hammer_unload_inode(ip);
1500                                 break;
1501                         }
1502                 } else {
1503                         if (flush)
1504                                 hammer_flush_inode(ip, 0);
1505
1506                         /*
1507                          * The inode still has multiple refs, try to drop
1508                          * one ref.
1509                          */
1510                         KKASSERT(hammer_isactive(&ip->lock) >= 1);
1511                         if (hammer_isactive(&ip->lock) > 1) {
1512                                 hammer_rel(&ip->lock);
1513                                 break;
1514                         }
1515                 }
1516         }
1517 }
1518
1519 /*
1520  * Unload and destroy the specified inode.  Must be called with one remaining
1521  * reference.  The reference is disposed of.
1522  *
1523  * The inode must be completely clean.
1524  */
1525 static int
1526 hammer_unload_inode(struct hammer_inode *ip)
1527 {
1528         hammer_mount_t hmp = ip->hmp;
1529
1530         KASSERT(hammer_oneref(&ip->lock),
1531                 ("hammer_unload_inode: %d refs", hammer_isactive(&ip->lock)));
1532         KKASSERT(ip->vp == NULL);
1533         KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
1534         KKASSERT(ip->cursor_ip_refs == 0);
1535         KKASSERT(hammer_notlocked(&ip->lock));
1536         KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
1537
1538         KKASSERT(RB_EMPTY(&ip->rec_tree));
1539         KKASSERT(TAILQ_EMPTY(&ip->target_list));
1540
1541         if (ip->flags & HAMMER_INODE_RDIRTY) {
1542                 RB_REMOVE(hammer_redo_rb_tree, &hmp->rb_redo_root, ip);
1543                 ip->flags &= ~HAMMER_INODE_RDIRTY;
1544         }
1545         RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip);
1546
1547         hammer_free_inode(ip);
1548         return(0);
1549 }
1550
1551 /*
1552  * Called during unmounting if a critical error occured.  The in-memory
1553  * inode and all related structures are destroyed.
1554  *
1555  * If a critical error did not occur the unmount code calls the standard
1556  * release and asserts that the inode is gone.
1557  */
1558 int
1559 hammer_destroy_inode_callback(struct hammer_inode *ip, void *data __unused)
1560 {
1561         hammer_record_t rec;
1562
1563         /*
1564          * Get rid of the inodes in-memory records, regardless of their
1565          * state, and clear the mod-mask.
1566          */
1567         while ((rec = TAILQ_FIRST(&ip->target_list)) != NULL) {
1568                 TAILQ_REMOVE(&ip->target_list, rec, target_entry);
1569                 rec->target_ip = NULL;
1570                 if (rec->flush_state == HAMMER_FST_SETUP)
1571                         rec->flush_state = HAMMER_FST_IDLE;
1572         }
1573         while ((rec = RB_ROOT(&ip->rec_tree)) != NULL) {
1574                 if (rec->flush_state == HAMMER_FST_FLUSH)
1575                         --rec->flush_group->refs;
1576                 else
1577                         hammer_ref(&rec->lock);
1578                 KKASSERT(hammer_oneref(&rec->lock));
1579                 rec->flush_state = HAMMER_FST_IDLE;
1580                 rec->flush_group = NULL;
1581                 rec->flags |= HAMMER_RECF_DELETED_FE; /* wave hands */
1582                 rec->flags |= HAMMER_RECF_DELETED_BE; /* wave hands */
1583                 ++ip->rec_generation;
1584                 hammer_rel_mem_record(rec);
1585         }
1586         ip->flags &= ~HAMMER_INODE_MODMASK;
1587         ip->sync_flags &= ~HAMMER_INODE_MODMASK;
1588         KKASSERT(ip->vp == NULL);
1589
1590         /*
1591          * Remove the inode from any flush group, force it idle.  FLUSH
1592          * and SETUP states have an inode ref.
1593          */
1594         switch(ip->flush_state) {
1595         case HAMMER_FST_FLUSH:
1596                 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip);
1597                 --ip->flush_group->refs;
1598                 ip->flush_group = NULL;
1599                 /* fall through */
1600         case HAMMER_FST_SETUP:
1601                 hammer_rel(&ip->lock);
1602                 ip->flush_state = HAMMER_FST_IDLE;
1603                 /* fall through */
1604         case HAMMER_FST_IDLE:
1605                 break;
1606         }
1607
1608         /*
1609          * There shouldn't be any associated vnode.  The unload needs at
1610          * least one ref, if we do have a vp steal its ip ref.
1611          */
1612         if (ip->vp) {
1613                 kprintf("hammer_destroy_inode_callback: Unexpected "
1614                         "vnode association ip %p vp %p\n", ip, ip->vp);
1615                 ip->vp->v_data = NULL;
1616                 ip->vp = NULL;
1617         } else {
1618                 hammer_ref(&ip->lock);
1619         }
1620         hammer_unload_inode(ip);
1621         return(0);
1622 }
1623
1624 /*
1625  * Called on mount -u when switching from RW to RO or vise-versa.  Adjust
1626  * the read-only flag for cached inodes.
1627  *
1628  * This routine is called from a RB_SCAN().
1629  */
1630 int
1631 hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
1632 {
1633         hammer_mount_t hmp = ip->hmp;
1634
1635         if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
1636                 ip->flags |= HAMMER_INODE_RO;
1637         else
1638                 ip->flags &= ~HAMMER_INODE_RO;
1639         return(0);
1640 }
1641
1642 /*
1643  * A transaction has modified an inode, requiring updates as specified by
1644  * the passed flags.
1645  *
1646  * HAMMER_INODE_DDIRTY: Inode data has been updated, not incl mtime/atime,
1647  *                      and not including size changes due to write-append
1648  *                      (but other size changes are included).
1649  * HAMMER_INODE_SDIRTY: Inode data has been updated, size changes due to
1650  *                      write-append.
1651  * HAMMER_INODE_XDIRTY: Dirty in-memory records
1652  * HAMMER_INODE_BUFS:   Dirty buffer cache buffers
1653  * HAMMER_INODE_DELETED: Inode record/data must be deleted
1654  * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
1655  */
1656 void
1657 hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags)
1658 {
1659         /*
1660          * ronly of 0 or 2 does not trigger assertion.
1661          * 2 is a special error state
1662          */
1663         KKASSERT(ip->hmp->ronly != 1 ||
1664                   (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
1665                             HAMMER_INODE_SDIRTY |
1666                             HAMMER_INODE_BUFS | HAMMER_INODE_DELETED |
1667                             HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0);
1668         if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
1669                 ip->flags |= HAMMER_INODE_RSV_INODES;
1670                 ++ip->hmp->rsv_inodes;
1671         }
1672
1673         /*
1674          * Set the NEWINODE flag in the transaction if the inode
1675          * transitions to a dirty state.  This is used to track
1676          * the load on the inode cache.
1677          */
1678         if (trans &&
1679             (ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1680             (flags & HAMMER_INODE_MODMASK)) {
1681                 trans->flags |= HAMMER_TRANSF_NEWINODE;
1682         }
1683         if (flags & HAMMER_INODE_MODMASK)
1684                 hammer_inode_dirty(ip);
1685         ip->flags |= flags;
1686 }
1687
1688 /*
1689  * Attempt to quickly update the atime for a hammer inode.  Return 0 on
1690  * success, -1 on failure.
1691  *
1692  * We attempt to update the atime with only the ip lock and not the
1693  * whole filesystem lock in order to improve concurrency.  We can only
1694  * do this safely if the ATIME flag is already pending on the inode.
1695  *
1696  * This function is called via a vnops path (ip pointer is stable) without
1697  * fs_token held.
1698  */
1699 int
1700 hammer_update_atime_quick(hammer_inode_t ip)
1701 {
1702         struct timeval tv;
1703         int res = -1;
1704
1705         if ((ip->flags & HAMMER_INODE_RO) ||
1706             (ip->hmp->mp->mnt_flag & MNT_NOATIME)) {
1707                 /*
1708                  * Silently indicate success on read-only mount/snap
1709                  */
1710                 res = 0;
1711         } else if (ip->flags & HAMMER_INODE_ATIME) {
1712                 /*
1713                  * Double check with inode lock held against backend.  This
1714                  * is only safe if all we need to do is update
1715                  * ino_data.atime.
1716                  */
1717                 getmicrotime(&tv);
1718                 hammer_lock_ex(&ip->lock);
1719                 if (ip->flags & HAMMER_INODE_ATIME) {
1720                         ip->ino_data.atime =
1721                             (unsigned long)tv.tv_sec * 1000000ULL + tv.tv_usec;
1722                         res = 0;
1723                 }
1724                 hammer_unlock(&ip->lock);
1725         }
1726         return res;
1727 }
1728
1729 /*
1730  * Request that an inode be flushed.  This whole mess cannot block and may
1731  * recurse (if not synchronous).  Once requested HAMMER will attempt to
1732  * actively flush the inode until the flush can be done.
1733  *
1734  * The inode may already be flushing, or may be in a setup state.  We can
1735  * place the inode in a flushing state if it is currently idle and flag it
1736  * to reflush if it is currently flushing.
1737  *
1738  * Upon return if the inode could not be flushed due to a setup
1739  * dependancy, then it will be automatically flushed when the dependancy
1740  * is satisfied.
1741  */
1742 void
1743 hammer_flush_inode(hammer_inode_t ip, int flags)
1744 {
1745         hammer_mount_t hmp;
1746         hammer_flush_group_t flg;
1747         int good;
1748
1749         /*
1750          * fill_flush_group is the first flush group we may be able to
1751          * continue filling, it may be open or closed but it will always
1752          * be past the currently flushing (running) flg.
1753          *
1754          * next_flush_group is the next open flush group.
1755          */
1756         hmp = ip->hmp;
1757         while ((flg = hmp->fill_flush_group) != NULL) {
1758                 KKASSERT(flg->running == 0);
1759                 if (flg->total_count + flg->refs <= ip->hmp->undo_rec_limit &&
1760                     flg->total_count <= hammer_autoflush) {
1761                         break;
1762                 }
1763                 hmp->fill_flush_group = TAILQ_NEXT(flg, flush_entry);
1764                 hammer_flusher_async(ip->hmp, flg);
1765         }
1766         if (flg == NULL) {
1767                 flg = kmalloc(sizeof(*flg), hmp->m_misc, M_WAITOK|M_ZERO);
1768                 flg->seq = hmp->flusher.next++;
1769                 if (hmp->next_flush_group == NULL)
1770                         hmp->next_flush_group = flg;
1771                 if (hmp->fill_flush_group == NULL)
1772                         hmp->fill_flush_group = flg;
1773                 RB_INIT(&flg->flush_tree);
1774                 TAILQ_INSERT_TAIL(&hmp->flush_group_list, flg, flush_entry);
1775         }
1776
1777         /*
1778          * Trivial 'nothing to flush' case.  If the inode is in a SETUP
1779          * state we have to put it back into an IDLE state so we can
1780          * drop the extra ref.
1781          *
1782          * If we have a parent dependancy we must still fall through
1783          * so we can run it.
1784          */
1785         if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
1786                 if (ip->flush_state == HAMMER_FST_SETUP &&
1787                     TAILQ_EMPTY(&ip->target_list)) {
1788                         ip->flush_state = HAMMER_FST_IDLE;
1789                         hammer_rel_inode(ip, 0);
1790                 }
1791                 if (ip->flush_state == HAMMER_FST_IDLE)
1792                         return;
1793         }
1794
1795         /*
1796          * Our flush action will depend on the current state.
1797          */
1798         switch(ip->flush_state) {
1799         case HAMMER_FST_IDLE:
1800                 /*
1801                  * We have no dependancies and can flush immediately.  Some
1802                  * our children may not be flushable so we have to re-test
1803                  * with that additional knowledge.
1804                  */
1805                 hammer_flush_inode_core(ip, flg, flags);
1806                 break;
1807         case HAMMER_FST_SETUP:
1808                 /*
1809                  * Recurse upwards through dependancies via target_list
1810                  * and start their flusher actions going if possible.
1811                  *
1812                  * 'good' is our connectivity.  -1 means we have none and
1813                  * can't flush, 0 means there weren't any dependancies, and
1814                  * 1 means we have good connectivity.
1815                  */
1816                 good = hammer_setup_parent_inodes(ip, 0, flg);
1817
1818                 if (good >= 0) {
1819                         /*
1820                          * We can continue if good >= 0.  Determine how
1821                          * many records under our inode can be flushed (and
1822                          * mark them).
1823                          */
1824                         hammer_flush_inode_core(ip, flg, flags);
1825                 } else {
1826                         /*
1827                          * Parent has no connectivity, tell it to flush
1828                          * us as soon as it does.
1829                          *
1830                          * The REFLUSH flag is also needed to trigger
1831                          * dependancy wakeups.
1832                          */
1833                         ip->flags |= HAMMER_INODE_CONN_DOWN |
1834                                      HAMMER_INODE_REFLUSH;
1835                         if (flags & HAMMER_FLUSH_SIGNAL) {
1836                                 ip->flags |= HAMMER_INODE_RESIGNAL;
1837                                 hammer_flusher_async(ip->hmp, flg);
1838                         }
1839                 }
1840                 break;
1841         case HAMMER_FST_FLUSH:
1842                 /*
1843                  * We are already flushing, flag the inode to reflush
1844                  * if needed after it completes its current flush.
1845                  *
1846                  * The REFLUSH flag is also needed to trigger
1847                  * dependancy wakeups.
1848                  */
1849                 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
1850                         ip->flags |= HAMMER_INODE_REFLUSH;
1851                 if (flags & HAMMER_FLUSH_SIGNAL) {
1852                         ip->flags |= HAMMER_INODE_RESIGNAL;
1853                         hammer_flusher_async(ip->hmp, flg);
1854                 }
1855                 break;
1856         }
1857 }
1858
1859 /*
1860  * Scan ip->target_list, which is a list of records owned by PARENTS to our
1861  * ip which reference our ip.
1862  *
1863  * XXX This is a huge mess of recursive code, but not one bit of it blocks
1864  *     so for now do not ref/deref the structures.  Note that if we use the
1865  *     ref/rel code later, the rel CAN block.
1866  */
1867 static int
1868 hammer_setup_parent_inodes(hammer_inode_t ip, int depth,
1869                            hammer_flush_group_t flg)
1870 {
1871         hammer_record_t depend;
1872         int good;
1873         int r;
1874
1875         /*
1876          * If we hit our recursion limit and we have parent dependencies
1877          * We cannot continue.  Returning < 0 will cause us to be flagged
1878          * for reflush.  Returning -2 cuts off additional dependency checks
1879          * because they are likely to also hit the depth limit.
1880          *
1881          * We cannot return < 0 if there are no dependencies or there might
1882          * not be anything to wakeup (ip).
1883          */
1884         if (depth == 20 && TAILQ_FIRST(&ip->target_list)) {
1885                 if (hammer_debug_general & 0x10000)
1886                         krateprintf(&hammer_gen_krate,
1887                             "HAMMER Warning: depth limit reached on "
1888                             "setup recursion, inode %p %016llx\n",
1889                             ip, (long long)ip->obj_id);
1890                 return(-2);
1891         }
1892
1893         /*
1894          * Scan dependencies
1895          */
1896         good = 0;
1897         TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
1898                 r = hammer_setup_parent_inodes_helper(depend, depth, flg);
1899                 KKASSERT(depend->target_ip == ip);
1900                 if (r < 0 && good == 0)
1901                         good = -1;
1902                 if (r > 0)
1903                         good = 1;
1904
1905                 /*
1906                  * If we failed due to the recursion depth limit then stop
1907                  * now.
1908                  */
1909                 if (r == -2)
1910                         break;
1911         }
1912         return(good);
1913 }
1914
1915 /*
1916  * This helper function takes a record representing the dependancy between
1917  * the parent inode and child inode.
1918  *
1919  * record               = record in question (*rec in below)
1920  * record->ip           = parent inode (*pip in below)
1921  * record->target_ip    = child inode (*ip in below)
1922  *
1923  * *pip--------------\
1924  *    ^               \rec_tree
1925  *     \               \
1926  *      \ip            /\\\\\ rbtree of recs from parent inode's view
1927  *       \            //\\\\\\
1928  *        \          / ........
1929  *         \        /
1930  *          \------*rec------target_ip------>*ip
1931  *               ...target_entry<----...----->target_list<---...
1932  *                                            list of recs from inode's view
1933  *
1934  * We are asked to recurse upwards and convert the record from SETUP
1935  * to FLUSH if possible.
1936  *
1937  * Return 1 if the record gives us connectivity
1938  *
1939  * Return 0 if the record is not relevant
1940  *
1941  * Return -1 if we can't resolve the dependancy and there is no connectivity.
1942  */
1943 static int
1944 hammer_setup_parent_inodes_helper(hammer_record_t record, int depth,
1945                                   hammer_flush_group_t flg)
1946 {
1947         hammer_inode_t pip;
1948         int good;
1949
1950         KKASSERT(record->flush_state != HAMMER_FST_IDLE);
1951         pip = record->ip;
1952
1953         /*
1954          * If the record is already flushing, is it in our flush group?
1955          *
1956          * If it is in our flush group but it is a general record or a
1957          * delete-on-disk, it does not improve our connectivity (return 0),
1958          * and if the target inode is not trying to destroy itself we can't
1959          * allow the operation yet anyway (the second return -1).
1960          */
1961         if (record->flush_state == HAMMER_FST_FLUSH) {
1962                 /*
1963                  * If not in our flush group ask the parent to reflush
1964                  * us as soon as possible.
1965                  */
1966                 if (record->flush_group != flg) {
1967                         pip->flags |= HAMMER_INODE_REFLUSH;
1968                         record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1969                         return(-1);
1970                 }
1971
1972                 /*
1973                  * If in our flush group everything is already set up,
1974                  * just return whether the record will improve our
1975                  * visibility or not.
1976                  */
1977                 if (record->type == HAMMER_MEM_RECORD_ADD)
1978                         return(1);
1979                 return(0);
1980         }
1981
1982         /*
1983          * It must be a setup record.  Try to resolve the setup dependancies
1984          * by recursing upwards so we can place ip on the flush list.
1985          *
1986          * Limit ourselves to 20 levels of recursion to avoid blowing out
1987          * the kernel stack.  If we hit the recursion limit we can't flush
1988          * until the parent flushes.  The parent will flush independantly
1989          * on its own and ultimately a deep recursion will be resolved.
1990          */
1991         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1992
1993         good = hammer_setup_parent_inodes(pip, depth + 1, flg);
1994
1995         /*
1996          * If good < 0 the parent has no connectivity and we cannot safely
1997          * flush the directory entry, which also means we can't flush our
1998          * ip.  Flag us for downward recursion once the parent's
1999          * connectivity is resolved.  Flag the parent for [re]flush or it
2000          * may not check for downward recursions.
2001          */
2002         if (good < 0) {
2003                 pip->flags |= HAMMER_INODE_REFLUSH;
2004                 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
2005                 return(good);
2006         }
2007
2008         /*
2009          * We are go, place the parent inode in a flushing state so we can
2010          * place its record in a flushing state.  Note that the parent
2011          * may already be flushing.  The record must be in the same flush
2012          * group as the parent.
2013          */
2014         if (pip->flush_state != HAMMER_FST_FLUSH)
2015                 hammer_flush_inode_core(pip, flg, HAMMER_FLUSH_RECURSION);
2016         KKASSERT(pip->flush_state == HAMMER_FST_FLUSH);
2017
2018         /*
2019          * It is possible for a rename to create a loop in the recursion
2020          * and revisit a record.  This will result in the record being
2021          * placed in a flush state unexpectedly.  This check deals with
2022          * the case.
2023          */
2024         if (record->flush_state == HAMMER_FST_FLUSH) {
2025                 if (record->type == HAMMER_MEM_RECORD_ADD)
2026                         return(1);
2027                 return(0);
2028         }
2029
2030         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
2031
2032 #if 0
2033         if (record->type == HAMMER_MEM_RECORD_DEL &&
2034             (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
2035                 /*
2036                  * Regardless of flushing state we cannot sync this path if the
2037                  * record represents a delete-on-disk but the target inode
2038                  * is not ready to sync its own deletion.
2039                  *
2040                  * XXX need to count effective nlinks to determine whether
2041                  * the flush is ok, otherwise removing a hardlink will
2042                  * just leave the DEL record to rot.
2043                  */
2044                 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
2045                 return(-1);
2046         } else
2047 #endif
2048         if (pip->flush_group == flg) {
2049                 /*
2050                  * Because we have not calculated nlinks yet we can just
2051                  * set records to the flush state if the parent is in
2052                  * the same flush group as we are.
2053                  */
2054                 record->flush_state = HAMMER_FST_FLUSH;
2055                 record->flush_group = flg;
2056                 ++record->flush_group->refs;
2057                 hammer_ref(&record->lock);
2058
2059                 /*
2060                  * A general directory-add contributes to our visibility.
2061                  *
2062                  * Otherwise it is probably a directory-delete or
2063                  * delete-on-disk record and does not contribute to our
2064                  * visbility (but we can still flush it).
2065                  */
2066                 if (record->type == HAMMER_MEM_RECORD_ADD)
2067                         return(1);
2068                 return(0);
2069         } else {
2070                 /*
2071                  * If the parent is not in our flush group we cannot
2072                  * flush this record yet, there is no visibility.
2073                  * We tell the parent to reflush and mark ourselves
2074                  * so the parent knows it should flush us too.
2075                  */
2076                 pip->flags |= HAMMER_INODE_REFLUSH;
2077                 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
2078                 return(-1);
2079         }
2080 }
2081
2082 /*
2083  * This is the core routine placing an inode into the FST_FLUSH state.
2084  */
2085 static void
2086 hammer_flush_inode_core(hammer_inode_t ip, hammer_flush_group_t flg, int flags)
2087 {
2088         hammer_mount_t hmp = ip->hmp;
2089         int go_count;
2090
2091         /*
2092          * Set flush state and prevent the flusher from cycling into
2093          * the next flush group.  Do not place the ip on the list yet.
2094          * Inodes not in the idle state get an extra reference.
2095          */
2096         KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
2097         if (ip->flush_state == HAMMER_FST_IDLE)
2098                 hammer_ref(&ip->lock);
2099         ip->flush_state = HAMMER_FST_FLUSH;
2100         ip->flush_group = flg;
2101         ++hmp->flusher.group_lock;
2102         ++hmp->count_iqueued;
2103         ++hammer_count_iqueued;
2104         ++flg->total_count;
2105         hammer_redo_fifo_start_flush(ip);
2106
2107 #if 0
2108         /*
2109          * We need to be able to vfsync/truncate from the backend.
2110          *
2111          * XXX Any truncation from the backend will acquire the vnode
2112          *     independently.
2113          */
2114         KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
2115         if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
2116                 ip->flags |= HAMMER_INODE_VHELD;
2117                 vref(ip->vp);
2118         }
2119 #endif
2120
2121         /*
2122          * Figure out how many in-memory records we can actually flush
2123          * (not including inode meta-data, buffers, etc).
2124          */
2125         KKASSERT((ip->flags & HAMMER_INODE_WOULDBLOCK) == 0);
2126         if (flags & HAMMER_FLUSH_RECURSION) {
2127                 /*
2128                  * If this is a upwards recursion we do not want to
2129                  * recurse down again!
2130                  */
2131                 go_count = 1;
2132 #if 0
2133         } else if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
2134                 /*
2135                  * No new records are added if we must complete a flush
2136                  * from a previous cycle, but we do have to move the records
2137                  * from the previous cycle to the current one.
2138                  */
2139 #if 0
2140                 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2141                                    hammer_syncgrp_child_callback, NULL);
2142 #endif
2143                 go_count = 1;
2144 #endif
2145         } else {
2146                 /*
2147                  * Normal flush, scan records and bring them into the flush.
2148                  * Directory adds and deletes are usually skipped (they are
2149                  * grouped with the related inode rather then with the
2150                  * directory).
2151                  *
2152                  * go_count can be negative, which means the scan aborted
2153                  * due to the flush group being over-full and we should
2154                  * flush what we have.
2155                  */
2156                 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2157                                    hammer_setup_child_callback, NULL);
2158         }
2159
2160         /*
2161          * This is a more involved test that includes go_count.  If we
2162          * can't flush, flag the inode and return.  If go_count is 0 we
2163          * were are unable to flush any records in our rec_tree and
2164          * must ignore the XDIRTY flag.
2165          */
2166         if (go_count == 0) {
2167                 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
2168                         --hmp->count_iqueued;
2169                         --hammer_count_iqueued;
2170
2171                         --flg->total_count;
2172                         ip->flush_state = HAMMER_FST_SETUP;
2173                         ip->flush_group = NULL;
2174                         if (flags & HAMMER_FLUSH_SIGNAL) {
2175                                 ip->flags |= HAMMER_INODE_REFLUSH |
2176                                              HAMMER_INODE_RESIGNAL;
2177                         } else {
2178                                 ip->flags |= HAMMER_INODE_REFLUSH;
2179                         }
2180 #if 0
2181                         if (ip->flags & HAMMER_INODE_VHELD) {
2182                                 ip->flags &= ~HAMMER_INODE_VHELD;
2183                                 vrele(ip->vp);
2184                         }
2185 #endif
2186
2187                         /*
2188                          * REFLUSH is needed to trigger dependancy wakeups
2189                          * when an inode is in SETUP.
2190                          */
2191                         ip->flags |= HAMMER_INODE_REFLUSH;
2192                         if (--hmp->flusher.group_lock == 0)
2193                                 wakeup(&hmp->flusher.group_lock);
2194                         return;
2195                 }
2196         }
2197
2198         /*
2199          * Snapshot the state of the inode for the backend flusher.
2200          *
2201          * We continue to retain save_trunc_off even when all truncations
2202          * have been resolved as an optimization to determine if we can
2203          * skip the B-Tree lookup for overwrite deletions.
2204          *
2205          * NOTE: The DELETING flag is a mod flag, but it is also sticky,
2206          * and stays in ip->flags.  Once set, it stays set until the
2207          * inode is destroyed.
2208          */
2209         if (ip->flags & HAMMER_INODE_TRUNCATED) {
2210                 KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0);
2211                 ip->sync_trunc_off = ip->trunc_off;
2212                 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
2213                 ip->flags &= ~HAMMER_INODE_TRUNCATED;
2214                 ip->sync_flags |= HAMMER_INODE_TRUNCATED;
2215
2216                 /*
2217                  * The save_trunc_off used to cache whether the B-Tree
2218                  * holds any records past that point is not used until
2219                  * after the truncation has succeeded, so we can safely
2220                  * set it now.
2221                  */
2222                 if (ip->save_trunc_off > ip->sync_trunc_off)
2223                         ip->save_trunc_off = ip->sync_trunc_off;
2224         }
2225         ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK &
2226                            ~HAMMER_INODE_TRUNCATED);
2227         ip->sync_ino_leaf = ip->ino_leaf;
2228         ip->sync_ino_data = ip->ino_data;
2229         ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
2230 #ifdef DEBUG_TRUNCATE
2231         if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp)
2232                 kprintf("truncateS %016llx\n", ip->sync_trunc_off);
2233 #endif
2234
2235         /*
2236          * The flusher list inherits our inode and reference.
2237          */
2238         KKASSERT(flg->running == 0);
2239         RB_INSERT(hammer_fls_rb_tree, &flg->flush_tree, ip);
2240         if (--hmp->flusher.group_lock == 0)
2241                 wakeup(&hmp->flusher.group_lock);
2242
2243         /*
2244          * Auto-flush the group if it grows too large.  Make sure the
2245          * inode reclaim wait pipeline continues to work.
2246          */
2247         if (flg->total_count >= hammer_autoflush ||
2248             flg->total_count >= hammer_limit_reclaims / 4) {
2249                 if (hmp->fill_flush_group == flg)
2250                         hmp->fill_flush_group = TAILQ_NEXT(flg, flush_entry);
2251                 hammer_flusher_async(hmp, flg);
2252         }
2253 }
2254
2255 /*
2256  * Callback for scan of ip->rec_tree.  Try to include each record in our
2257  * flush.  ip->flush_group has been set but the inode has not yet been
2258  * moved into a flushing state.
2259  *
2260  * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
2261  * both inodes.
2262  *
2263  * We return 1 for any record placed or found in FST_FLUSH, which prevents
2264  * the caller from shortcutting the flush.
2265  */
2266 static int
2267 hammer_setup_child_callback(hammer_record_t rec, void *data)
2268 {
2269         hammer_flush_group_t flg;
2270         hammer_inode_t target_ip;
2271         hammer_inode_t ip;
2272         int r;
2273
2274         /*
2275          * Records deleted or committed by the backend are ignored.
2276          * Note that the flush detects deleted frontend records at
2277          * multiple points to deal with races.  This is just the first
2278          * line of defense.  The only time HAMMER_RECF_DELETED_FE cannot
2279          * be set is when HAMMER_RECF_INTERLOCK_BE is set, because it
2280          * messes up link-count calculations.
2281          *
2282          * NOTE: Don't get confused between record deletion and, say,
2283          * directory entry deletion.  The deletion of a directory entry
2284          * which is on-media has nothing to do with the record deletion
2285          * flags.
2286          */
2287         if (rec->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
2288                           HAMMER_RECF_COMMITTED)) {
2289                 if (rec->flush_state == HAMMER_FST_FLUSH) {
2290                         KKASSERT(rec->flush_group == rec->ip->flush_group);
2291                         r = 1;
2292                 } else {
2293                         r = 0;
2294                 }
2295                 return(r);
2296         }
2297
2298         /*
2299          * If the record is in an idle state it has no dependancies and
2300          * can be flushed.
2301          */
2302         ip = rec->ip;
2303         flg = ip->flush_group;
2304         r = 0;
2305
2306         switch(rec->flush_state) {
2307         case HAMMER_FST_IDLE:
2308                 /*
2309                  * The record has no setup dependancy, we can flush it.
2310                  */
2311                 KKASSERT(rec->target_ip == NULL);
2312                 rec->flush_state = HAMMER_FST_FLUSH;
2313                 rec->flush_group = flg;
2314                 ++flg->refs;
2315                 hammer_ref(&rec->lock);
2316                 r = 1;
2317                 break;
2318         case HAMMER_FST_SETUP:
2319                 /*
2320                  * The record has a setup dependancy.  These are typically
2321                  * directory entry adds and deletes.  Such entries will be
2322                  * flushed when their inodes are flushed so we do not
2323                  * usually have to add them to the flush here.  However,
2324                  * if the target_ip has set HAMMER_INODE_CONN_DOWN then
2325                  * it is asking us to flush this record (and it).
2326                  */
2327                 target_ip = rec->target_ip;
2328                 KKASSERT(target_ip != NULL);
2329                 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
2330
2331                 /*
2332                  * If the target IP is already flushing in our group
2333                  * we could associate the record, but target_ip has
2334                  * already synced ino_data to sync_ino_data and we
2335                  * would also have to adjust nlinks.   Plus there are
2336                  * ordering issues for adds and deletes.
2337                  *
2338                  * Reflush downward if this is an ADD, and upward if
2339                  * this is a DEL.
2340                  */
2341                 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
2342                         if (rec->type == HAMMER_MEM_RECORD_ADD)
2343                                 ip->flags |= HAMMER_INODE_REFLUSH;
2344                         else
2345                                 target_ip->flags |= HAMMER_INODE_REFLUSH;
2346                         break;
2347                 }
2348
2349                 /*
2350                  * Target IP is not yet flushing.  This can get complex
2351                  * because we have to be careful about the recursion.
2352                  *
2353                  * Directories create an issue for us in that if a flush
2354                  * of a directory is requested the expectation is to flush
2355                  * any pending directory entries, but this will cause the
2356                  * related inodes to recursively flush as well.  We can't
2357                  * really defer the operation so just get as many as we
2358                  * can and
2359                  */
2360 #if 0
2361                 if ((target_ip->flags & HAMMER_INODE_RECLAIM) == 0 &&
2362                     (target_ip->flags & HAMMER_INODE_CONN_DOWN) == 0) {
2363                         /*
2364                          * We aren't reclaiming and the target ip was not
2365                          * previously prevented from flushing due to this
2366                          * record dependancy.  Do not flush this record.
2367                          */
2368                         /*r = 0;*/
2369                 } else
2370 #endif
2371                 if (flg->total_count + flg->refs >
2372                            ip->hmp->undo_rec_limit) {
2373                         /*
2374                          * Our flush group is over-full and we risk blowing
2375                          * out the UNDO FIFO.  Stop the scan, flush what we
2376                          * have, then reflush the directory.
2377                          *
2378                          * The directory may be forced through multiple
2379                          * flush groups before it can be completely
2380                          * flushed.
2381                          */
2382                         ip->flags |= HAMMER_INODE_RESIGNAL |
2383                                      HAMMER_INODE_REFLUSH;
2384                         r = -1;
2385                 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
2386                         /*
2387                          * If the target IP is not flushing we can force
2388                          * it to flush, even if it is unable to write out
2389                          * any of its own records we have at least one in
2390                          * hand that we CAN deal with.
2391                          */
2392                         rec->flush_state = HAMMER_FST_FLUSH;
2393                         rec->flush_group = flg;
2394                         ++flg->refs;
2395                         hammer_ref(&rec->lock);
2396                         hammer_flush_inode_core(target_ip, flg,
2397                                                 HAMMER_FLUSH_RECURSION);
2398                         r = 1;
2399                 } else {
2400                         /*
2401                          * General or delete-on-disk record.
2402                          *
2403                          * XXX this needs help.  If a delete-on-disk we could
2404                          * disconnect the target.  If the target has its own
2405                          * dependancies they really need to be flushed.
2406                          *
2407                          * XXX
2408                          */
2409                         rec->flush_state = HAMMER_FST_FLUSH;
2410                         rec->flush_group = flg;
2411                         ++flg->refs;
2412                         hammer_ref(&rec->lock);
2413                         hammer_flush_inode_core(target_ip, flg,
2414                                                 HAMMER_FLUSH_RECURSION);
2415                         r = 1;
2416                 }
2417                 break;
2418         case HAMMER_FST_FLUSH:
2419                 /*
2420                  * The record could be part of a previous flush group if the
2421                  * inode is a directory (the record being a directory entry).
2422                  * Once the flush group was closed a hammer_test_inode()
2423                  * function can cause a new flush group to be setup, placing
2424                  * the directory inode itself in a new flush group.
2425                  *
2426                  * When associated with a previous flush group we count it
2427                  * as if it were in our current flush group, since it will
2428                  * effectively be flushed by the time we flush our current
2429                  * flush group.
2430                  */
2431                 KKASSERT(
2432                     rec->ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY ||
2433                     rec->flush_group == flg);
2434                 r = 1;
2435                 break;
2436         }
2437         return(r);
2438 }
2439
2440 #if 0
2441 /*
2442  * This version just moves records already in a flush state to the new
2443  * flush group and that is it.
2444  */
2445 static int
2446 hammer_syncgrp_child_callback(hammer_record_t rec, void *data)
2447 {
2448         hammer_inode_t ip = rec->ip;
2449
2450         switch(rec->flush_state) {
2451         case HAMMER_FST_FLUSH:
2452                 KKASSERT(rec->flush_group == ip->flush_group);
2453                 break;
2454         default:
2455                 break;
2456         }
2457         return(0);
2458 }
2459 #endif
2460
2461 /*
2462  * Wait for a previously queued flush to complete.
2463  *
2464  * If a critical error occured we don't try to wait.
2465  */
2466 void
2467 hammer_wait_inode(hammer_inode_t ip)
2468 {
2469         /*
2470          * The inode can be in a SETUP state in which case RESIGNAL
2471          * should be set.  If RESIGNAL is not set then the previous
2472          * flush completed and a later operation placed the inode
2473          * in a passive setup state again, so we're done.
2474          *
2475          * The inode can be in a FLUSH state in which case we
2476          * can just wait for completion.
2477          */
2478         while (ip->flush_state == HAMMER_FST_FLUSH ||
2479             (ip->flush_state == HAMMER_FST_SETUP &&
2480              (ip->flags & HAMMER_INODE_RESIGNAL))) {
2481                 /*
2482                  * Don't try to flush on a critical error
2483                  */
2484                 if (ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
2485                         break;
2486
2487                 /*
2488                  * If the inode was already being flushed its flg
2489                  * may not have been queued to the backend.  We
2490                  * have to make sure it gets queued or we can wind
2491                  * up blocked or deadlocked (particularly if we are
2492                  * the vnlru thread).
2493                  */
2494                 if (ip->flush_state == HAMMER_FST_FLUSH) {
2495                         KKASSERT(ip->flush_group);
2496                         if (ip->flush_group->closed == 0) {
2497                                 if (hammer_debug_inode) {
2498                                         kprintf("HAMMER: debug: forcing "
2499                                                 "async flush ip %016jx\n",
2500                                                 (intmax_t)ip->obj_id);
2501                                 }
2502                                 hammer_flusher_async(ip->hmp, ip->flush_group);
2503                                 continue; /* retest */
2504                         }
2505                 }
2506
2507                 /*
2508                  * In a flush state with the flg queued to the backend
2509                  * or in a setup state with RESIGNAL set, we can safely
2510                  * wait.
2511                  */
2512                 ip->flags |= HAMMER_INODE_FLUSHW;
2513                 tsleep(&ip->flags, 0, "hmrwin", 0);
2514         }
2515
2516 #if 0
2517         /*
2518          * The inode may have been in a passive setup state,
2519          * call flush to make sure we get signaled.
2520          */
2521         if (ip->flush_state == HAMMER_FST_SETUP)
2522                 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2523 #endif
2524
2525 }
2526
2527 /*
2528  * Called by the backend code when a flush has been completed.
2529  * The inode has already been removed from the flush list.
2530  *
2531  * A pipelined flush can occur, in which case we must re-enter the
2532  * inode on the list and re-copy its fields.
2533  */
2534 void
2535 hammer_flush_inode_done(hammer_inode_t ip, int error)
2536 {
2537         hammer_mount_t hmp;
2538         int dorel;
2539
2540         KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
2541
2542         hmp = ip->hmp;
2543
2544         /*
2545          * Auto-reflush if the backend could not completely flush
2546          * the inode.  This fixes a case where a deferred buffer flush
2547          * could cause fsync to return early.
2548          */
2549         if (ip->sync_flags & HAMMER_INODE_MODMASK)
2550                 ip->flags |= HAMMER_INODE_REFLUSH;
2551
2552         /*
2553          * Merge left-over flags back into the frontend and fix the state.
2554          * Incomplete truncations are retained by the backend.
2555          */
2556         ip->error = error;
2557         ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED;
2558         ip->sync_flags &= HAMMER_INODE_TRUNCATED;
2559
2560         /*
2561          * The backend may have adjusted nlinks, so if the adjusted nlinks
2562          * does not match the fronttend set the frontend's DDIRTY flag again.
2563          */
2564         if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
2565                 ip->flags |= HAMMER_INODE_DDIRTY;
2566
2567         /*
2568          * Fix up the dirty buffer status.
2569          */
2570         if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) {
2571                 ip->flags |= HAMMER_INODE_BUFS;
2572         }
2573         hammer_redo_fifo_end_flush(ip);
2574
2575         /*
2576          * Re-set the XDIRTY flag if some of the inode's in-memory records
2577          * could not be flushed.
2578          */
2579         KKASSERT((RB_EMPTY(&ip->rec_tree) &&
2580                   (ip->flags & HAMMER_INODE_XDIRTY) == 0) ||
2581                  (!RB_EMPTY(&ip->rec_tree) &&
2582                   (ip->flags & HAMMER_INODE_XDIRTY) != 0));
2583
2584         /*
2585          * Do not lose track of inodes which no longer have vnode
2586          * assocations, otherwise they may never get flushed again.
2587          *
2588          * The reflush flag can be set superfluously, causing extra pain
2589          * for no reason.  If the inode is no longer modified it no longer
2590          * needs to be flushed.
2591          */
2592         if (ip->flags & HAMMER_INODE_MODMASK) {
2593                 if (ip->vp == NULL)
2594                         ip->flags |= HAMMER_INODE_REFLUSH;
2595         } else {
2596                 ip->flags &= ~HAMMER_INODE_REFLUSH;
2597         }
2598
2599         /*
2600          * The fs token is held but the inode lock is not held.  Because this
2601          * is a backend flush it is possible that the vnode has no references
2602          * and cause a reclaim race inside vsetisdirty() if/when it blocks.
2603          *
2604          * Therefore, we must lock the inode around this particular dirtying
2605          * operation.  We don't have to around other dirtying operations
2606          * where the vnode is implicitly or explicitly held.
2607          */
2608         if (ip->flags & HAMMER_INODE_MODMASK) {
2609                 hammer_lock_ex(&ip->lock);
2610                 hammer_inode_dirty(ip);
2611                 hammer_unlock(&ip->lock);
2612         }
2613
2614         /*
2615          * Adjust the flush state.
2616          */
2617         if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
2618                 /*
2619                  * We were unable to flush out all our records, leave the
2620                  * inode in a flush state and in the current flush group.
2621                  * The flush group will be re-run.
2622                  *
2623                  * This occurs if the UNDO block gets too full or there is
2624                  * too much dirty meta-data and allows the flusher to
2625                  * finalize the UNDO block and then re-flush.
2626                  */
2627                 ip->flags &= ~HAMMER_INODE_WOULDBLOCK;
2628                 dorel = 0;
2629         } else {
2630                 /*
2631                  * Remove from the flush_group
2632                  */
2633                 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip);
2634                 ip->flush_group = NULL;
2635
2636 #if 0
2637                 /*
2638                  * Clean up the vnode ref and tracking counts.
2639                  */
2640                 if (ip->flags & HAMMER_INODE_VHELD) {
2641                         ip->flags &= ~HAMMER_INODE_VHELD;
2642                         vrele(ip->vp);
2643                 }
2644 #endif
2645                 --hmp->count_iqueued;
2646                 --hammer_count_iqueued;
2647
2648                 /*
2649                  * And adjust the state.
2650                  */
2651                 if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
2652                         ip->flush_state = HAMMER_FST_IDLE;
2653                         dorel = 1;
2654                 } else {
2655                         ip->flush_state = HAMMER_FST_SETUP;
2656                         dorel = 0;
2657                 }
2658
2659                 /*
2660                  * If the frontend is waiting for a flush to complete,
2661                  * wake it up.
2662                  */
2663                 if (ip->flags & HAMMER_INODE_FLUSHW) {
2664                         ip->flags &= ~HAMMER_INODE_FLUSHW;
2665                         wakeup(&ip->flags);
2666                 }
2667
2668                 /*
2669                  * If the frontend made more changes and requested another
2670                  * flush, then try to get it running.
2671                  *
2672                  * Reflushes are aborted when the inode is errored out.
2673                  */
2674                 if (ip->flags & HAMMER_INODE_REFLUSH) {
2675                         ip->flags &= ~HAMMER_INODE_REFLUSH;
2676                         if (ip->flags & HAMMER_INODE_RESIGNAL) {
2677                                 ip->flags &= ~HAMMER_INODE_RESIGNAL;
2678                                 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2679                         } else {
2680                                 hammer_flush_inode(ip, 0);
2681                         }
2682                 }
2683         }
2684
2685         /*
2686          * If we have no parent dependancies we can clear CONN_DOWN
2687          */
2688         if (TAILQ_EMPTY(&ip->target_list))
2689                 ip->flags &= ~HAMMER_INODE_CONN_DOWN;
2690
2691         /*
2692          * If the inode is now clean drop the space reservation.
2693          */
2694         if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
2695             (ip->flags & HAMMER_INODE_RSV_INODES)) {
2696                 ip->flags &= ~HAMMER_INODE_RSV_INODES;
2697                 --hmp->rsv_inodes;
2698         }
2699
2700         ip->flags &= ~HAMMER_INODE_SLAVEFLUSH;
2701
2702         if (dorel)
2703                 hammer_rel_inode(ip, 0);
2704 }
2705
2706 /*
2707  * Called from hammer_sync_inode() to synchronize in-memory records
2708  * to the media.
2709  */
2710 static int
2711 hammer_sync_record_callback(hammer_record_t record, void *data)
2712 {
2713         hammer_cursor_t cursor = data;
2714         hammer_transaction_t trans = cursor->trans;
2715         hammer_mount_t hmp = trans->hmp;
2716         int error;
2717
2718         /*
2719          * Skip records that do not belong to the current flush.
2720          */
2721         ++hammer_stats_record_iterations;
2722         if (record->flush_state != HAMMER_FST_FLUSH)
2723                 return(0);
2724
2725 #if 1
2726         if (record->flush_group != record->ip->flush_group) {
2727                 kprintf("sync_record %p ip %p bad flush group %p %p\n", record, record->ip, record->flush_group ,record->ip->flush_group);
2728                 if (hammer_debug_critical)
2729                         Debugger("blah2");
2730                 return(0);
2731         }
2732 #endif
2733         KKASSERT(record->flush_group == record->ip->flush_group);
2734
2735         /*
2736          * Interlock the record using the BE flag.  Once BE is set the
2737          * frontend cannot change the state of FE.
2738          *
2739          * NOTE: If FE is set prior to us setting BE we still sync the
2740          * record out, but the flush completion code converts it to
2741          * a delete-on-disk record instead of destroying it.
2742          */
2743         KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
2744         record->flags |= HAMMER_RECF_INTERLOCK_BE;
2745
2746         /*
2747          * The backend has already disposed of the record.
2748          */
2749         if (record->flags & (HAMMER_RECF_DELETED_BE | HAMMER_RECF_COMMITTED)) {
2750                 error = 0;
2751                 goto done;
2752         }
2753
2754         /*
2755          * If the whole inode is being deleted and all on-disk records will
2756          * be deleted very soon, we can't sync any new records to disk
2757          * because they will be deleted in the same transaction they were
2758          * created in (delete_tid == create_tid), which will assert.
2759          *
2760          * XXX There may be a case with RECORD_ADD with DELETED_FE set
2761          * that we currently panic on.
2762          */
2763         if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
2764                 switch(record->type) {
2765                 case HAMMER_MEM_RECORD_DATA:
2766                         /*
2767                          * We don't have to do anything, if the record was
2768                          * committed the space will have been accounted for
2769                          * in the blockmap.
2770                          */
2771                         /* fall through */
2772                 case HAMMER_MEM_RECORD_GENERAL:
2773                         /*
2774                          * Set deleted-by-backend flag.  Do not set the
2775                          * backend committed flag, because we are throwing
2776                          * the record away.
2777                          */
2778                         record->flags |= HAMMER_RECF_DELETED_BE;
2779                         ++record->ip->rec_generation;
2780                         error = 0;
2781                         goto done;
2782                 case HAMMER_MEM_RECORD_ADD:
2783                         panic("hammer_sync_record_callback: illegal add "
2784                               "during inode deletion record %p", record);
2785                         break; /* NOT REACHED */
2786                 case HAMMER_MEM_RECORD_INODE:
2787                         panic("hammer_sync_record_callback: attempt to "
2788                               "sync inode record %p?", record);
2789                         break; /* NOT REACHED */
2790                 case HAMMER_MEM_RECORD_DEL:
2791                         /*
2792                          * Follow through and issue the on-disk deletion
2793                          */
2794                         break;
2795                 }
2796         }
2797
2798         /*
2799          * If DELETED_FE is set special handling is needed for directory
2800          * entries.  Dependant pieces related to the directory entry may
2801          * have already been synced to disk.  If this occurs we have to
2802          * sync the directory entry and then change the in-memory record
2803          * from an ADD to a DELETE to cover the fact that it's been
2804          * deleted by the frontend.
2805          *
2806          * A directory delete covering record (MEM_RECORD_DEL) can never
2807          * be deleted by the frontend.
2808          *
2809          * Any other record type (aka DATA) can be deleted by the frontend.
2810          * XXX At the moment the flusher must skip it because there may
2811          * be another data record in the flush group for the same block,
2812          * meaning that some frontend data changes can leak into the backend's
2813          * synchronization point.
2814          */
2815         if (record->flags & HAMMER_RECF_DELETED_FE) {
2816                 if (record->type == HAMMER_MEM_RECORD_ADD) {
2817                         /*
2818                          * Convert a front-end deleted directory-add to
2819                          * a directory-delete entry later.
2820                          */
2821                         record->flags |= HAMMER_RECF_CONVERT_DELETE;
2822                 } else {
2823                         /*
2824                          * Dispose of the record (race case).  Mark as
2825                          * deleted by backend (and not committed).
2826                          */
2827                         KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
2828                         record->flags |= HAMMER_RECF_DELETED_BE;
2829                         ++record->ip->rec_generation;
2830                         error = 0;
2831                         goto done;
2832                 }
2833         }
2834
2835         /*
2836          * Assign the create_tid for new records.  Deletions already
2837          * have the record's entire key properly set up.
2838          */
2839         if (record->type != HAMMER_MEM_RECORD_DEL) {
2840                 record->leaf.base.create_tid = trans->tid;
2841                 record->leaf.create_ts = trans->time32;
2842         }
2843
2844         /*
2845          * This actually moves the record to the on-media B-Tree.  We
2846          * must also generate REDO_TERM entries in the UNDO/REDO FIFO
2847          * indicating that the related REDO_WRITE(s) have been committed.
2848          *
2849          * During recovery any REDO_TERM's within the nominal recovery span
2850          * are ignored since the related meta-data is being undone, causing
2851          * any matching REDO_WRITEs to execute.  The REDO_TERMs outside
2852          * the nominal recovery span will match against REDO_WRITEs and
2853          * prevent them from being executed (because the meta-data has
2854          * already been synchronized).
2855          */
2856         if (record->flags & HAMMER_RECF_REDO) {
2857                 KKASSERT(record->type == HAMMER_MEM_RECORD_DATA);
2858                 hammer_generate_redo(trans, record->ip,
2859                                      record->leaf.base.key -
2860                                          record->leaf.data_len,
2861                                      HAMMER_REDO_TERM_WRITE,
2862                                      NULL,
2863                                      record->leaf.data_len);
2864         }
2865
2866         for (;;) {
2867                 error = hammer_ip_sync_record_cursor(cursor, record);
2868                 if (error != EDEADLK)
2869                         break;
2870                 hammer_done_cursor(cursor);
2871                 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
2872                                            record->ip);
2873                 if (error)
2874                         break;
2875         }
2876         record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
2877
2878         if (error)
2879                 error = -error;
2880 done:
2881         hammer_flush_record_done(record, error);
2882
2883         /*
2884          * Do partial finalization if we have built up too many dirty
2885          * buffers.  Otherwise a buffer cache deadlock can occur when
2886          * doing things like creating tens of thousands of tiny files.
2887          *
2888          * We must release our cursor lock to avoid a 3-way deadlock
2889          * due to the exclusive sync lock the finalizer must get.
2890          *
2891          * WARNING: See warnings in hammer_unlock_cursor() function.
2892          */
2893         if (hammer_flusher_meta_limit(hmp) ||
2894             vm_page_count_severe()) {
2895                 hammer_unlock_cursor(cursor);
2896                 hammer_flusher_finalize(trans, 0);
2897                 hammer_lock_cursor(cursor);
2898         }
2899         return(error);
2900 }
2901
2902 /*
2903  * Backend function called by the flusher to sync an inode to media.
2904  */
2905 int
2906 hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip)
2907 {
2908         struct hammer_cursor cursor;
2909         hammer_node_t tmp_node;
2910         hammer_record_t depend;
2911         hammer_record_t next;
2912         int error, tmp_error;
2913         u_int64_t nlinks;
2914
2915         if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
2916                 return(0);
2917
2918         error = hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
2919         if (error)
2920                 goto done;
2921
2922         /*
2923          * Any directory records referencing this inode which are not in
2924          * our current flush group must adjust our nlink count for the
2925          * purposes of synchronizating to disk.
2926          *
2927          * Records which are in our flush group can be unlinked from our
2928          * inode now, potentially allowing the inode to be physically
2929          * deleted.
2930          *
2931          * This cannot block.
2932          */
2933         nlinks = ip->ino_data.nlinks;
2934         next = TAILQ_FIRST(&ip->target_list);
2935         while ((depend = next) != NULL) {
2936                 next = TAILQ_NEXT(depend, target_entry);
2937                 if (depend->flush_state == HAMMER_FST_FLUSH &&
2938                     depend->flush_group == ip->flush_group) {
2939                         /*
2940                          * If this is an ADD that was deleted by the frontend
2941                          * the frontend nlinks count will have already been
2942                          * decremented, but the backend is going to sync its
2943                          * directory entry and must account for it.  The
2944                          * record will be converted to a delete-on-disk when
2945                          * it gets synced.
2946                          *
2947                          * If the ADD was not deleted by the frontend we
2948                          * can remove the dependancy from our target_list.
2949                          */
2950                         if (depend->flags & HAMMER_RECF_DELETED_FE) {
2951                                 ++nlinks;
2952                         } else {
2953                                 TAILQ_REMOVE(&ip->target_list, depend,
2954                                              target_entry);
2955                                 depend->target_ip = NULL;
2956                         }
2957                 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
2958                         /*
2959                          * Not part of our flush group and not deleted by
2960                          * the front-end, adjust the link count synced to
2961                          * the media (undo what the frontend did when it
2962                          * queued the record).
2963                          */
2964                         KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
2965                         switch(depend->type) {
2966                         case HAMMER_MEM_RECORD_ADD:
2967                                 --nlinks;
2968                                 break;
2969                         case HAMMER_MEM_RECORD_DEL:
2970                                 ++nlinks;
2971                                 break;
2972                         default:
2973                                 break;
2974                         }
2975                 }
2976         }
2977
2978         /*
2979          * Set dirty if we had to modify the link count.
2980          */
2981         if (ip->sync_ino_data.nlinks != nlinks) {
2982                 KKASSERT((int64_t)nlinks >= 0);
2983                 ip->sync_ino_data.nlinks = nlinks;
2984                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2985         }
2986
2987         /*
2988          * If there is a trunction queued destroy any data past the (aligned)
2989          * truncation point.  Userland will have dealt with the buffer
2990          * containing the truncation point for us.
2991          *
2992          * We don't flush pending frontend data buffers until after we've
2993          * dealt with the truncation.
2994          */
2995         if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2996                 /*
2997                  * Interlock trunc_off.  The VOP front-end may continue to
2998                  * make adjustments to it while we are blocked.
2999                  */
3000                 off_t trunc_off;
3001                 off_t aligned_trunc_off;
3002                 int blkmask;
3003
3004                 trunc_off = ip->sync_trunc_off;
3005                 blkmask = hammer_blocksize(trunc_off) - 1;
3006                 aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask;
3007
3008                 /*
3009                  * Delete any whole blocks on-media.  The front-end has
3010                  * already cleaned out any partial block and made it
3011                  * pending.  The front-end may have updated trunc_off
3012                  * while we were blocked so we only use sync_trunc_off.
3013                  *
3014                  * This operation can blow out the buffer cache, EWOULDBLOCK
3015                  * means we were unable to complete the deletion.  The
3016                  * deletion will update sync_trunc_off in that case.
3017                  */
3018                 error = hammer_ip_delete_range(&cursor, ip,
3019                                                 aligned_trunc_off,
3020                                                 0x7FFFFFFFFFFFFFFFLL, 2);
3021                 if (error == EWOULDBLOCK) {
3022                         ip->flags |= HAMMER_INODE_WOULDBLOCK;
3023                         error = 0;
3024                         goto defer_buffer_flush;
3025                 }
3026
3027                 if (error)
3028                         goto done;
3029
3030                 /*
3031                  * Generate a REDO_TERM_TRUNC entry in the UNDO/REDO FIFO.
3032                  *
3033                  * XXX we do this even if we did not previously generate
3034                  * a REDO_TRUNC record.  This operation may enclosed the
3035                  * range for multiple prior truncation entries in the REDO
3036                  * log.
3037                  */
3038                 if (trans->hmp->version >= HAMMER_VOL_VERSION_FOUR &&
3039                     (ip->flags & HAMMER_INODE_RDIRTY)) {
3040                         hammer_generate_redo(trans, ip, aligned_trunc_off,
3041                                              HAMMER_REDO_TERM_TRUNC,
3042                                              NULL, 0);
3043                 }
3044
3045                 /*
3046                  * Clear the truncation flag on the backend after we have
3047                  * completed the deletions.  Backend data is now good again
3048                  * (including new records we are about to sync, below).
3049                  *
3050                  * Leave sync_trunc_off intact.  As we write additional
3051                  * records the backend will update sync_trunc_off.  This
3052                  * tells the backend whether it can skip the overwrite
3053                  * test.  This should work properly even when the backend
3054                  * writes full blocks where the truncation point straddles
3055                  * the block because the comparison is against the base
3056                  * offset of the record.
3057                  */
3058                 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
3059                 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
3060         } else {
3061                 error = 0;
3062         }
3063
3064         /*
3065          * Now sync related records.  These will typically be directory
3066          * entries, records tracking direct-writes, or delete-on-disk records.
3067          */
3068         if (error == 0) {
3069                 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
3070                                     hammer_sync_record_callback, &cursor);
3071                 if (tmp_error < 0)
3072                         tmp_error = -error;
3073                 if (tmp_error)
3074                         error = tmp_error;
3075         }
3076         hammer_cache_node(&ip->cache[1], cursor.node);
3077
3078         /*
3079          * Re-seek for inode update, assuming our cache hasn't been ripped
3080          * out from under us.
3081          */
3082         if (error == 0) {
3083                 tmp_node = hammer_ref_node_safe(trans, &ip->cache[0], &error);
3084                 if (tmp_node) {
3085                         hammer_cursor_downgrade(&cursor);
3086                         hammer_lock_sh(&tmp_node->lock);
3087                         if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0)
3088                                 hammer_cursor_seek(&cursor, tmp_node, 0);
3089                         hammer_unlock(&tmp_node->lock);
3090                         hammer_rel_node(tmp_node);
3091                 }
3092                 error = 0;
3093         }
3094
3095         /*
3096          * If we are deleting the inode the frontend had better not have
3097          * any active references on elements making up the inode.
3098          *
3099          * The call to hammer_ip_delete_clean() cleans up auxillary records
3100          * but not DB or DATA records.  Those must have already been deleted
3101          * by the normal truncation mechanic.
3102          */
3103         if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
3104                 RB_EMPTY(&ip->rec_tree)  &&
3105             (ip->sync_flags & HAMMER_INODE_DELETING) &&
3106             (ip->flags & HAMMER_INODE_DELETED) == 0) {
3107                 int count1 = 0;
3108
3109                 error = hammer_ip_delete_clean(&cursor, ip, &count1);
3110                 if (error == 0) {
3111                         ip->flags |= HAMMER_INODE_DELETED;
3112                         ip->sync_flags &= ~HAMMER_INODE_DELETING;
3113                         ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
3114                         KKASSERT(RB_EMPTY(&ip->rec_tree));
3115
3116                         /*
3117                          * Set delete_tid in both the frontend and backend
3118                          * copy of the inode record.  The DELETED flag handles
3119                          * this, do not set DDIRTY.
3120                          */
3121                         ip->ino_leaf.base.delete_tid = trans->tid;
3122                         ip->sync_ino_leaf.base.delete_tid = trans->tid;
3123                         ip->ino_leaf.delete_ts = trans->time32;
3124                         ip->sync_ino_leaf.delete_ts = trans->time32;
3125
3126
3127                         /*
3128                          * Adjust the inode count in the volume header
3129                          */
3130                         hammer_sync_lock_sh(trans);
3131                         if (ip->flags & HAMMER_INODE_ONDISK) {
3132                                 hammer_modify_volume_field(trans,
3133                                                            trans->rootvol,
3134                                                            vol0_stat_inodes);
3135                                 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
3136                                 hammer_modify_volume_done(trans->rootvol);
3137                         }
3138                         hammer_sync_unlock(trans);
3139                 }
3140         }
3141
3142         if (error)
3143                 goto done;
3144         ip->sync_flags &= ~HAMMER_INODE_BUFS;
3145
3146 defer_buffer_flush:
3147         /*
3148          * Now update the inode's on-disk inode-data and/or on-disk record.
3149          * DELETED and ONDISK are managed only in ip->flags.
3150          *
3151          * In the case of a defered buffer flush we still update the on-disk
3152          * inode to satisfy visibility requirements if there happen to be
3153          * directory dependancies.
3154          */
3155         switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
3156         case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
3157                 /*
3158                  * If deleted and on-disk, don't set any additional flags.
3159                  * the delete flag takes care of things.
3160                  *
3161                  * Clear flags which may have been set by the frontend.
3162                  */
3163                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
3164                                     HAMMER_INODE_SDIRTY |
3165                                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
3166                                     HAMMER_INODE_DELETING);
3167                 break;
3168         case HAMMER_INODE_DELETED:
3169                 /*
3170                  * Take care of the case where a deleted inode was never
3171                  * flushed to the disk in the first place.
3172                  *
3173                  * Clear flags which may have been set by the frontend.
3174                  */
3175                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
3176                                     HAMMER_INODE_SDIRTY |
3177                                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
3178                                     HAMMER_INODE_DELETING);
3179                 while (RB_ROOT(&ip->rec_tree)) {
3180                         hammer_record_t record = RB_ROOT(&ip->rec_tree);
3181                         hammer_ref(&record->lock);
3182                         KKASSERT(hammer_oneref(&record->lock));
3183                         record->flags |= HAMMER_RECF_DELETED_BE;
3184                         ++record->ip->rec_generation;
3185                         hammer_rel_mem_record(record);
3186                 }
3187                 break;
3188         case HAMMER_INODE_ONDISK:
3189                 /*
3190                  * If already on-disk, do not set any additional flags.
3191                  */
3192                 break;
3193         default:
3194                 /*
3195                  * If not on-disk and not deleted, set DDIRTY to force
3196                  * an initial record to be written.
3197                  *
3198                  * Also set the create_tid in both the frontend and backend
3199                  * copy of the inode record.
3200                  */
3201                 ip->ino_leaf.base.create_tid = trans->tid;
3202                 ip->ino_leaf.create_ts = trans->time32;
3203                 ip->sync_ino_leaf.base.create_tid = trans->tid;
3204                 ip->sync_ino_leaf.create_ts = trans->time32;
3205                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
3206                 break;
3207         }
3208
3209         /*
3210          * If DDIRTY or SDIRTY is set, write out a new record.
3211          * If the inode is already on-disk the old record is marked as
3212          * deleted.
3213          *
3214          * If DELETED is set hammer_update_inode() will delete the existing
3215          * record without writing out a new one.
3216          *
3217          * If *ONLY* the ITIMES flag is set we can update the record in-place.
3218          */
3219         if (ip->flags & HAMMER_INODE_DELETED) {
3220                 error = hammer_update_inode(&cursor, ip);
3221         } else
3222         if (!(ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY)) &&
3223             (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) {
3224                 error = hammer_update_itimes(&cursor, ip);
3225         } else
3226         if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY |
3227                               HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) {
3228                 error = hammer_update_inode(&cursor, ip);
3229         }
3230 done:
3231         if (ip->flags & HAMMER_INODE_MODMASK)
3232                 hammer_inode_dirty(ip);
3233         if (error) {
3234                 hammer_critical_error(ip->hmp, ip, error,
3235                                       "while syncing inode");
3236         }
3237         hammer_done_cursor(&cursor);
3238         return(error);
3239 }
3240
3241 /*
3242  * This routine is called when the OS is no longer actively referencing
3243  * the inode (but might still be keeping it cached), or when releasing
3244  * the last reference to an inode.
3245  *
3246  * At this point if the inode's nlinks count is zero we want to destroy
3247  * it, which may mean destroying it on-media too.
3248  */
3249 void
3250 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
3251 {
3252         struct vnode *vp;
3253
3254         /*
3255          * Set the DELETING flag when the link count drops to 0 and the
3256          * OS no longer has any opens on the inode.
3257          *
3258          * The backend will clear DELETING (a mod flag) and set DELETED
3259          * (a state flag) when it is actually able to perform the
3260          * operation.
3261          *
3262          * Don't reflag the deletion if the flusher is currently syncing
3263          * one that was already flagged.  A previously set DELETING flag
3264          * may bounce around flags and sync_flags until the operation is
3265          * completely done.
3266          *
3267          * Do not attempt to modify a snapshot inode (one set to read-only).
3268          */
3269         if (ip->ino_data.nlinks == 0 &&
3270             ((ip->flags | ip->sync_flags) & (HAMMER_INODE_RO|HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
3271                 ip->flags |= HAMMER_INODE_DELETING;
3272                 ip->flags |= HAMMER_INODE_TRUNCATED;
3273                 ip->trunc_off = 0;
3274                 vp = NULL;
3275                 if (getvp) {
3276                         if (hammer_get_vnode(ip, &vp) != 0)
3277                                 return;
3278                 }
3279
3280                 /*
3281                  * Final cleanup
3282                  */
3283                 if (ip->vp)
3284                         nvtruncbuf(ip->vp, 0, HAMMER_BUFSIZE, 0, 0);
3285                 if (ip->flags & HAMMER_INODE_MODMASK)
3286                         hammer_inode_dirty(ip);
3287                 if (getvp)
3288                         vput(vp);
3289         }
3290 }
3291
3292 /*
3293  * After potentially resolving a dependancy the inode is tested
3294  * to determine whether it needs to be reflushed.
3295  */
3296 void
3297 hammer_test_inode(hammer_inode_t ip)
3298 {
3299         if (ip->flags & HAMMER_INODE_REFLUSH) {
3300                 ip->flags &= ~HAMMER_INODE_REFLUSH;
3301                 hammer_ref(&ip->lock);
3302                 if (ip->flags & HAMMER_INODE_RESIGNAL) {
3303                         ip->flags &= ~HAMMER_INODE_RESIGNAL;
3304                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
3305                 } else {
3306                         hammer_flush_inode(ip, 0);
3307                 }
3308                 hammer_rel_inode(ip, 0);
3309         }
3310 }
3311
3312 /*
3313  * Clear the RECLAIM flag on an inode.  This occurs when the inode is
3314  * reassociated with a vp or just before it gets freed.
3315  *
3316  * Pipeline wakeups to threads blocked due to an excessive number of
3317  * detached inodes.  This typically occurs when atime updates accumulate
3318  * while scanning a directory tree.
3319  */
3320 static void
3321 hammer_inode_wakereclaims(hammer_inode_t ip)
3322 {
3323         struct hammer_reclaim *reclaim;
3324         hammer_mount_t hmp = ip->hmp;
3325
3326         if ((ip->flags & HAMMER_INODE_RECLAIM) == 0)
3327                 return;
3328
3329         --hammer_count_reclaims;
3330         --hmp->count_reclaims;
3331         ip->flags &= ~HAMMER_INODE_RECLAIM;
3332
3333         if ((reclaim = TAILQ_FIRST(&hmp->reclaim_list)) != NULL) {
3334                 KKASSERT(reclaim->count > 0);
3335                 if (--reclaim->count == 0) {
3336                         TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry);
3337                         wakeup(reclaim);
3338                 }
3339         }
3340 }
3341
3342 /*
3343  * Setup our reclaim pipeline.  We only let so many detached (and dirty)
3344  * inodes build up before we start blocking.  This routine is called
3345  * if a new inode is created or an inode is loaded from media.
3346  *
3347  * When we block we don't care *which* inode has finished reclaiming,
3348  * as long as one does.
3349  *
3350  * The reclaim pipeline is primarily governed by the auto-flush which is
3351  * 1/4 hammer_limit_reclaims.  We don't want to block if the count is
3352  * less than 1/2 hammer_limit_reclaims.  From 1/2 to full count is
3353  * dynamically governed.
3354  */
3355 void
3356 hammer_inode_waitreclaims(hammer_transaction_t trans)
3357 {
3358         hammer_mount_t hmp = trans->hmp;
3359         struct hammer_reclaim reclaim;
3360         int lower_limit;
3361
3362         /*
3363          * Track inode load, delay if the number of reclaiming inodes is
3364          * between 2/4 and 4/4 hammer_limit_reclaims, depending.
3365          */
3366         if (curthread->td_proc) {
3367                 struct hammer_inostats *stats;
3368
3369                 stats = hammer_inode_inostats(hmp, curthread->td_proc->p_pid);
3370                 ++stats->count;
3371
3372                 if (stats->count > hammer_limit_reclaims / 2)
3373                         stats->count = hammer_limit_reclaims / 2;
3374                 lower_limit = hammer_limit_reclaims - stats->count;
3375                 if (hammer_debug_general & 0x10000) {
3376                         kprintf("pid %5d limit %d\n",
3377                                 (int)curthread->td_proc->p_pid, lower_limit);
3378                 }
3379         } else {
3380                 lower_limit = hammer_limit_reclaims * 3 / 4;
3381         }
3382         if (hmp->count_reclaims >= lower_limit) {
3383                 reclaim.count = 1;
3384                 TAILQ_INSERT_TAIL(&hmp->reclaim_list, &reclaim, entry);
3385                 tsleep(&reclaim, 0, "hmrrcm", hz);
3386                 if (reclaim.count > 0)
3387                         TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry);
3388         }
3389 }
3390
3391 /*
3392  * Keep track of reclaim statistics on a per-pid basis using a loose
3393  * 4-way set associative hash table.  Collisions inherit the count of
3394  * the previous entry.
3395  *
3396  * NOTE: We want to be careful here to limit the chain size.  If the chain
3397  *       size is too large a pid will spread its stats out over too many
3398  *       entries under certain types of heavy filesystem activity and
3399  *       wind up not delaying long enough.
3400  */
3401 static
3402 struct hammer_inostats *
3403 hammer_inode_inostats(hammer_mount_t hmp, pid_t pid)
3404 {
3405         struct hammer_inostats *stats;
3406         int delta;
3407         int chain;
3408         static volatile int iterator;   /* we don't care about MP races */
3409
3410         /*
3411          * Chain up to 4 times to find our entry.
3412          */
3413         for (chain = 0; chain < 4; ++chain) {
3414                 stats = &hmp->inostats[(pid + chain) & HAMMER_INOSTATS_HMASK];
3415                 if (stats->pid == pid)
3416                         break;
3417         }
3418
3419         /*
3420          * Replace one of the four chaining entries with our new entry.
3421          */
3422         if (chain == 4) {
3423                 stats = &hmp->inostats[(pid + (iterator++ & 3)) &
3424                                        HAMMER_INOSTATS_HMASK];
3425                 stats->pid = pid;
3426         }
3427
3428         /*
3429          * Decay the entry
3430          */
3431         if (stats->count && stats->ltick != ticks) {
3432                 delta = ticks - stats->ltick;
3433                 stats->ltick = ticks;
3434                 if (delta <= 0 || delta > hz * 60)
3435                         stats->count = 0;
3436                 else
3437                         stats->count = stats->count * hz / (hz + delta);
3438         }
3439         if (hammer_debug_general & 0x10000)
3440                 kprintf("pid %5d stats %d\n", (int)pid, stats->count);
3441         return (stats);
3442 }
3443
3444 #if 0
3445
3446 /*
3447  * XXX not used, doesn't work very well due to the large batching nature
3448  * of flushes.
3449  *
3450  * A larger then normal backlog of inodes is sitting in the flusher,
3451  * enforce a general slowdown to let it catch up.  This routine is only
3452  * called on completion of a non-flusher-related transaction which
3453  * performed B-Tree node I/O.
3454  *
3455  * It is possible for the flusher to stall in a continuous load.
3456  * blogbench -i1000 -o seems to do a good job generating this sort of load.
3457  * If the flusher is unable to catch up the inode count can bloat until
3458  * we run out of kvm.
3459  *
3460  * This is a bit of a hack.
3461  */
3462 void
3463 hammer_inode_waithard(hammer_mount_t hmp)
3464 {
3465         /*
3466          * Hysteresis.
3467          */
3468         if (hmp->flags & HAMMER_MOUNT_FLUSH_RECOVERY) {
3469                 if (hmp->count_reclaims < hammer_limit_reclaims / 2 &&
3470                     hmp->count_iqueued < hmp->count_inodes / 20) {
3471                         hmp->flags &= ~HAMMER_MOUNT_FLUSH_RECOVERY;
3472                         return;
3473                 }
3474         } else {
3475                 if (hmp->count_reclaims < hammer_limit_reclaims ||
3476                     hmp->count_iqueued < hmp->count_inodes / 10) {
3477                         return;
3478                 }
3479                 hmp->flags |= HAMMER_MOUNT_FLUSH_RECOVERY;
3480         }
3481
3482         /*
3483          * Block for one flush cycle.
3484          */
3485         hammer_flusher_wait_next(hmp);
3486 }
3487
3488 #endif