f63d29e8791fa01095c0fce088b7bc5e6d68493e
[dragonfly.git] / sys / vfs / hammer / hammer_inode.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34
35 #include <vm/vm_page2.h>
36
37 #include "hammer.h"
38
39 static int      hammer_unload_inode(struct hammer_inode *ip);
40 static void     hammer_free_inode(hammer_inode_t ip);
41 static void     hammer_flush_inode_core(hammer_inode_t ip,
42                                         hammer_flush_group_t flg, int flags);
43 static int      hammer_setup_child_callback(hammer_record_t rec, void *data);
44 #if 0
45 static int      hammer_syncgrp_child_callback(hammer_record_t rec, void *data);
46 #endif
47 static int      hammer_setup_parent_inodes(hammer_inode_t ip, int depth,
48                                         hammer_flush_group_t flg);
49 static int      hammer_setup_parent_inodes_helper(hammer_record_t record,
50                                         int depth, hammer_flush_group_t flg);
51 static void     hammer_inode_wakereclaims(hammer_inode_t ip);
52 static struct hammer_inostats *hammer_inode_inostats(hammer_mount_t hmp,
53                                         pid_t pid);
54 static struct hammer_inode *__hammer_find_inode(hammer_transaction_t trans,
55                                         int64_t obj_id, hammer_tid_t asof,
56                                         uint32_t localization);
57
58 struct krate hammer_gen_krate = { 1 };
59
60 /*
61  * RB-Tree support for inode structures
62  */
63 int
64 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
65 {
66         if (ip1->obj_localization < ip2->obj_localization)
67                 return(-1);
68         if (ip1->obj_localization > ip2->obj_localization)
69                 return(1);
70         if (ip1->obj_id < ip2->obj_id)
71                 return(-1);
72         if (ip1->obj_id > ip2->obj_id)
73                 return(1);
74         if (ip1->obj_asof < ip2->obj_asof)
75                 return(-1);
76         if (ip1->obj_asof > ip2->obj_asof)
77                 return(1);
78         return(0);
79 }
80
81 int
82 hammer_redo_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
83 {
84         if (ip1->redo_fifo_start < ip2->redo_fifo_start)
85                 return(-1);
86         if (ip1->redo_fifo_start > ip2->redo_fifo_start)
87                 return(1);
88         return(0);
89 }
90
91 /*
92  * RB-Tree support for inode structures / special LOOKUP_INFO
93  */
94 static int
95 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
96 {
97         if (info->obj_localization < ip->obj_localization)
98                 return(-1);
99         if (info->obj_localization > ip->obj_localization)
100                 return(1);
101         if (info->obj_id < ip->obj_id)
102                 return(-1);
103         if (info->obj_id > ip->obj_id)
104                 return(1);
105         if (info->obj_asof < ip->obj_asof)
106                 return(-1);
107         if (info->obj_asof > ip->obj_asof)
108                 return(1);
109         return(0);
110 }
111
112 /*
113  * Used by hammer_scan_inode_snapshots() to locate all of an object's
114  * snapshots.  Note that the asof field is not tested, which we can get
115  * away with because it is the lowest-priority field.
116  */
117 static int
118 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data)
119 {
120         hammer_inode_info_t info = data;
121
122         if (ip->obj_localization > info->obj_localization)
123                 return(1);
124         if (ip->obj_localization < info->obj_localization)
125                 return(-1);
126         if (ip->obj_id > info->obj_id)
127                 return(1);
128         if (ip->obj_id < info->obj_id)
129                 return(-1);
130         return(0);
131 }
132
133 /*
134  * Used by hammer_unload_pseudofs() to locate all inodes associated with
135  * a particular PFS.
136  */
137 static int
138 hammer_inode_pfs_cmp(hammer_inode_t ip, void *data)
139 {
140         uint32_t localization = *(uint32_t *)data;
141         if (ip->obj_localization > localization)
142                 return(1);
143         if (ip->obj_localization < localization)
144                 return(-1);
145         return(0);
146 }
147
148 /*
149  * RB-Tree support for pseudofs structures
150  */
151 static int
152 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1, hammer_pseudofs_inmem_t p2)
153 {
154         if (p1->localization < p2->localization)
155                 return(-1);
156         if (p1->localization > p2->localization)
157                 return(1);
158         return(0);
159 }
160
161
162 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
163 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
164                 hammer_inode_info_cmp, hammer_inode_info_t);
165 RB_GENERATE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node,
166              hammer_pfs_rb_compare, uint32_t, localization);
167
168 /*
169  * The kernel is not actively referencing this vnode but is still holding
170  * it cached.
171  *
172  * This is called from the frontend.
173  *
174  * MPALMOSTSAFE
175  */
176 int
177 hammer_vop_inactive(struct vop_inactive_args *ap)
178 {
179         struct hammer_inode *ip = VTOI(ap->a_vp);
180         hammer_mount_t hmp;
181
182         /*
183          * Degenerate case
184          */
185         if (ip == NULL) {
186                 vrecycle(ap->a_vp);
187                 return(0);
188         }
189
190         /*
191          * If the inode no longer has visibility in the filesystem try to
192          * recycle it immediately, even if the inode is dirty.  Recycling
193          * it quickly allows the system to reclaim buffer cache and VM
194          * resources which can matter a lot in a heavily loaded system.
195          *
196          * This can deadlock in vfsync() if we aren't careful.
197          *
198          * Do not queue the inode to the flusher if we still have visibility,
199          * otherwise namespace calls such as chmod will unnecessarily generate
200          * multiple inode updates.
201          */
202         if (ip->ino_data.nlinks == 0) {
203                 hmp = ip->hmp;
204                 lwkt_gettoken(&hmp->fs_token);
205                 hammer_inode_unloadable_check(ip, 0);
206                 if (ip->flags & HAMMER_INODE_MODMASK)
207                         hammer_flush_inode(ip, 0);
208                 lwkt_reltoken(&hmp->fs_token);
209                 vrecycle(ap->a_vp);
210         }
211         return(0);
212 }
213
214 /*
215  * Release the vnode association.  This is typically (but not always)
216  * the last reference on the inode.
217  *
218  * Once the association is lost we are on our own with regards to
219  * flushing the inode.
220  *
221  * We must interlock ip->vp so hammer_get_vnode() can avoid races.
222  */
223 int
224 hammer_vop_reclaim(struct vop_reclaim_args *ap)
225 {
226         struct hammer_inode *ip;
227         hammer_mount_t hmp;
228         struct vnode *vp;
229
230         vp = ap->a_vp;
231
232         if ((ip = vp->v_data) != NULL) {
233                 hmp = ip->hmp;
234                 lwkt_gettoken(&hmp->fs_token);
235                 hammer_lock_ex(&ip->lock);
236                 vp->v_data = NULL;
237                 ip->vp = NULL;
238
239                 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) {
240                         ++hammer_count_reclaims;
241                         ++hmp->count_reclaims;
242                         ip->flags |= HAMMER_INODE_RECLAIM;
243                 }
244                 hammer_unlock(&ip->lock);
245                 vclrisdirty(vp);
246                 hammer_rel_inode(ip, 1);
247                 lwkt_reltoken(&hmp->fs_token);
248         }
249         return(0);
250 }
251
252 /*
253  * Inform the kernel that the inode is dirty.  This will be checked
254  * by vn_unlock().
255  *
256  * Theoretically in order to reclaim a vnode the hammer_vop_reclaim()
257  * must be called which will interlock against our inode lock, so
258  * if VRECLAIMED is not set vp->v_mount (as used by vsetisdirty())
259  * should be stable without having to acquire any new locks.
260  */
261 void
262 hammer_inode_dirty(struct hammer_inode *ip)
263 {
264         struct vnode *vp;
265
266         if ((ip->flags & HAMMER_INODE_MODMASK) &&
267             (vp = ip->vp) != NULL &&
268             (vp->v_flag & (VRECLAIMED | VISDIRTY)) == 0) {
269                 vsetisdirty(vp);
270         }
271 }
272
273 /*
274  * Return a locked vnode for the specified inode.  The inode must be
275  * referenced but NOT LOCKED on entry and will remain referenced on
276  * return.
277  *
278  * Called from the frontend.
279  */
280 int
281 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
282 {
283         hammer_mount_t hmp;
284         struct vnode *vp;
285         int error = 0;
286         uint8_t obj_type;
287
288         hmp = ip->hmp;
289
290         for (;;) {
291                 if ((vp = ip->vp) == NULL) {
292                         error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0);
293                         if (error)
294                                 break;
295                         hammer_lock_ex(&ip->lock);
296                         if (ip->vp != NULL) {
297                                 hammer_unlock(&ip->lock);
298                                 vp = *vpp;
299                                 vp->v_type = VBAD;
300                                 vx_put(vp);
301                                 continue;
302                         }
303                         hammer_ref(&ip->lock);
304                         vp = *vpp;
305                         ip->vp = vp;
306
307                         obj_type = ip->ino_data.obj_type;
308                         vp->v_type = hammer_get_vnode_type(obj_type);
309
310                         hammer_inode_wakereclaims(ip);
311
312                         switch(ip->ino_data.obj_type) {
313                         case HAMMER_OBJTYPE_CDEV:
314                         case HAMMER_OBJTYPE_BDEV:
315                                 vp->v_ops = &hmp->mp->mnt_vn_spec_ops;
316                                 addaliasu(vp, ip->ino_data.rmajor,
317                                           ip->ino_data.rminor);
318                                 break;
319                         case HAMMER_OBJTYPE_FIFO:
320                                 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops;
321                                 break;
322                         case HAMMER_OBJTYPE_REGFILE:
323                                 break;
324                         default:
325                                 break;
326                         }
327
328                         /*
329                          * Only mark as the root vnode if the ip is not
330                          * historical, otherwise the VFS cache will get
331                          * confused.  The other half of the special handling
332                          * is in hammer_vop_nlookupdotdot().
333                          *
334                          * Pseudo-filesystem roots can be accessed via
335                          * non-root filesystem paths and setting VROOT may
336                          * confuse the namecache.  Set VPFSROOT instead.
337                          */
338                         if (ip->obj_id == HAMMER_OBJID_ROOT) {
339                                 if (ip->obj_asof == hmp->asof) {
340                                         if (ip->obj_localization ==
341                                                 HAMMER_DEF_LOCALIZATION)
342                                                 vsetflags(vp, VROOT);
343                                         else
344                                                 vsetflags(vp, VPFSROOT);
345                                 } else {
346                                         vsetflags(vp, VPFSROOT);
347                                 }
348                         }
349
350                         vp->v_data = (void *)ip;
351                         /* vnode locked by getnewvnode() */
352                         /* make related vnode dirty if inode dirty? */
353                         hammer_unlock(&ip->lock);
354                         if (vp->v_type == VREG) {
355                                 vinitvmio(vp, ip->ino_data.size,
356                                           hammer_blocksize(ip->ino_data.size),
357                                           hammer_blockoff(ip->ino_data.size));
358                         }
359                         break;
360                 }
361
362                 /*
363                  * Interlock vnode clearing.  This does not prevent the
364                  * vnode from going into a reclaimed state but it does
365                  * prevent it from being destroyed or reused so the vget()
366                  * will properly fail.
367                  */
368                 hammer_lock_ex(&ip->lock);
369                 if ((vp = ip->vp) == NULL) {
370                         hammer_unlock(&ip->lock);
371                         continue;
372                 }
373                 vhold(vp);
374                 hammer_unlock(&ip->lock);
375
376                 /*
377                  * loop if the vget fails (aka races), or if the vp
378                  * no longer matches ip->vp.
379                  */
380                 if (vget(vp, LK_EXCLUSIVE) == 0) {
381                         if (vp == ip->vp) {
382                                 vdrop(vp);
383                                 break;
384                         }
385                         vput(vp);
386                 }
387                 vdrop(vp);
388         }
389         *vpp = vp;
390         return(error);
391 }
392
393 /*
394  * Locate all copies of the inode for obj_id compatible with the specified
395  * asof, reference, and issue the related call-back.  This routine is used
396  * for direct-io invalidation and does not create any new inodes.
397  */
398 void
399 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo,
400                             int (*callback)(hammer_inode_t ip, void *data),
401                             void *data)
402 {
403         hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root,
404                                    hammer_inode_info_cmp_all_history,
405                                    callback, iinfo);
406 }
407
408 /*
409  * Acquire a HAMMER inode.  The returned inode is not locked.  These functions
410  * do not attach or detach the related vnode (use hammer_get_vnode() for
411  * that).
412  *
413  * The flags argument is only applied for newly created inodes, and only
414  * certain flags are inherited.
415  *
416  * Called from the frontend.
417  */
418 struct hammer_inode *
419 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip,
420                  int64_t obj_id, hammer_tid_t asof, uint32_t localization,
421                  int flags, int *errorp)
422 {
423         hammer_mount_t hmp = trans->hmp;
424         struct hammer_node_cache *cachep;
425         struct hammer_cursor cursor;
426         struct hammer_inode *ip;
427
428
429         /*
430          * Determine if we already have an inode cached.  If we do then
431          * we are golden.
432          *
433          * If we find an inode with no vnode we have to mark the
434          * transaction such that hammer_inode_waitreclaims() is
435          * called later on to avoid building up an infinite number
436          * of inodes.  Otherwise we can continue to * add new inodes
437          * faster then they can be disposed of, even with the tsleep
438          * delay.
439          *
440          * If we find a dummy inode we return a failure so dounlink
441          * (which does another lookup) doesn't try to mess with the
442          * link count.  hammer_vop_nresolve() uses hammer_get_dummy_inode()
443          * to ref dummy inodes.
444          */
445 loop:
446         *errorp = 0;
447         ip = __hammer_find_inode(trans, obj_id, asof, localization);
448         if (ip) {
449                 if (ip->flags & HAMMER_INODE_DUMMY) {
450                         *errorp = ENOENT;
451                         return(NULL);
452                 }
453                 hammer_ref(&ip->lock);
454                 return(ip);
455         }
456
457         /*
458          * Allocate a new inode structure and deal with races later.
459          */
460         ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
461         ++hammer_count_inodes;
462         ++hmp->count_inodes;
463         ip->obj_id = obj_id;
464         ip->obj_asof = asof;
465         ip->obj_localization = localization;
466         ip->hmp = hmp;
467         ip->flags = flags & HAMMER_INODE_RO;
468         ip->cache[0].ip = ip;
469         ip->cache[1].ip = ip;
470         ip->cache[2].ip = ip;
471         ip->cache[3].ip = ip;
472         if (hmp->ronly)
473                 ip->flags |= HAMMER_INODE_RO;
474         ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
475                 0x7FFFFFFFFFFFFFFFLL;
476         RB_INIT(&ip->rec_tree);
477         TAILQ_INIT(&ip->target_list);
478         hammer_ref(&ip->lock);
479
480         /*
481          * Locate the on-disk inode.  If this is a PFS root we always
482          * access the current version of the root inode and (if it is not
483          * a master) always access information under it with a snapshot
484          * TID.
485          *
486          * We cache recent inode lookups in this directory in dip->cache[2].
487          * If we can't find it we assume the inode we are looking for is
488          * close to the directory inode.
489          */
490 retry:
491         cachep = NULL;
492         if (dip) {
493                 if (dip->cache[2].node)
494                         cachep = &dip->cache[2];
495                 else
496                         cachep = &dip->cache[0];
497         }
498         hammer_init_cursor(trans, &cursor, cachep, NULL);
499         cursor.key_beg.localization = localization | HAMMER_LOCALIZE_INODE;
500         cursor.key_beg.obj_id = ip->obj_id;
501         cursor.key_beg.key = 0;
502         cursor.key_beg.create_tid = 0;
503         cursor.key_beg.delete_tid = 0;
504         cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
505         cursor.key_beg.obj_type = 0;
506
507         cursor.asof = asof;
508         cursor.flags = HAMMER_CURSOR_GET_DATA | HAMMER_CURSOR_ASOF;
509
510         *errorp = hammer_btree_lookup(&cursor);
511         if (*errorp == EDEADLK) {
512                 hammer_done_cursor(&cursor);
513                 goto retry;
514         }
515
516         /*
517          * On success the B-Tree lookup will hold the appropriate
518          * buffer cache buffers and provide a pointer to the requested
519          * information.  Copy the information to the in-memory inode
520          * and cache the B-Tree node to improve future operations.
521          */
522         if (*errorp == 0) {
523                 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
524                 ip->ino_data = cursor.data->inode;
525
526                 /*
527                  * cache[0] tries to cache the location of the object inode.
528                  * The assumption is that it is near the directory inode.
529                  *
530                  * cache[1] tries to cache the location of the object data.
531                  * We might have something in the governing directory from
532                  * scan optimizations (see the strategy code in
533                  * hammer_vnops.c).
534                  *
535                  * We update dip->cache[2], if possible, with the location
536                  * of the object inode for future directory shortcuts.
537                  */
538                 hammer_cache_node(&ip->cache[0], cursor.node);
539                 if (dip) {
540                         if (dip->cache[3].node) {
541                                 hammer_cache_node(&ip->cache[1],
542                                                   dip->cache[3].node);
543                         }
544                         hammer_cache_node(&dip->cache[2], cursor.node);
545                 }
546
547                 /*
548                  * The file should not contain any data past the file size
549                  * stored in the inode.  Setting save_trunc_off to the
550                  * file size instead of max reduces B-Tree lookup overheads
551                  * on append by allowing the flusher to avoid checking for
552                  * record overwrites.
553                  */
554                 ip->save_trunc_off = ip->ino_data.size;
555
556                 /*
557                  * Locate and assign the pseudofs management structure to
558                  * the inode.
559                  */
560                 if (dip && dip->obj_localization == ip->obj_localization) {
561                         ip->pfsm = dip->pfsm;
562                         hammer_ref(&ip->pfsm->lock);
563                 } else {
564                         ip->pfsm = hammer_load_pseudofs(trans,
565                                                         ip->obj_localization,
566                                                         errorp);
567                         *errorp = 0;    /* ignore ENOENT */
568                 }
569         }
570
571         /*
572          * The inode is placed on the red-black tree and will be synced to
573          * the media when flushed or by the filesystem sync.  If this races
574          * another instantiation/lookup the insertion will fail.
575          */
576         if (*errorp == 0) {
577                 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
578                         hammer_free_inode(ip);
579                         hammer_done_cursor(&cursor);
580                         goto loop;
581                 }
582                 ip->flags |= HAMMER_INODE_ONDISK;
583         } else {
584                 if (ip->flags & HAMMER_INODE_RSV_INODES) {
585                         ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
586                         --hmp->rsv_inodes;
587                 }
588
589                 hammer_free_inode(ip);
590                 ip = NULL;
591         }
592         hammer_done_cursor(&cursor);
593
594         /*
595          * NEWINODE is only set if the inode becomes dirty later,
596          * setting it here just leads to unnecessary stalls.
597          *
598          * trans->flags |= HAMMER_TRANSF_NEWINODE;
599          */
600         return (ip);
601 }
602
603 /*
604  * Get a dummy inode to placemark a broken directory entry.
605  */
606 struct hammer_inode *
607 hammer_get_dummy_inode(hammer_transaction_t trans, hammer_inode_t dip,
608                  int64_t obj_id, hammer_tid_t asof, uint32_t localization,
609                  int flags, int *errorp)
610 {
611         hammer_mount_t hmp = trans->hmp;
612         struct hammer_inode *ip;
613
614         /*
615          * Determine if we already have an inode cached.  If we do then
616          * we are golden.
617          *
618          * If we find an inode with no vnode we have to mark the
619          * transaction such that hammer_inode_waitreclaims() is
620          * called later on to avoid building up an infinite number
621          * of inodes.  Otherwise we can continue to * add new inodes
622          * faster then they can be disposed of, even with the tsleep
623          * delay.
624          *
625          * If we find a non-fake inode we return an error.  Only fake
626          * inodes can be returned by this routine.
627          */
628 loop:
629         *errorp = 0;
630         ip = __hammer_find_inode(trans, obj_id, asof, localization);
631         if (ip) {
632                 if ((ip->flags & HAMMER_INODE_DUMMY) == 0) {
633                         *errorp = ENOENT;
634                         return(NULL);
635                 }
636                 hammer_ref(&ip->lock);
637                 return(ip);
638         }
639
640         /*
641          * Allocate a new inode structure and deal with races later.
642          */
643         ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
644         ++hammer_count_inodes;
645         ++hmp->count_inodes;
646         ip->obj_id = obj_id;
647         ip->obj_asof = asof;
648         ip->obj_localization = localization;
649         ip->hmp = hmp;
650         ip->flags = flags | HAMMER_INODE_RO | HAMMER_INODE_DUMMY;
651         ip->cache[0].ip = ip;
652         ip->cache[1].ip = ip;
653         ip->cache[2].ip = ip;
654         ip->cache[3].ip = ip;
655         ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
656                 0x7FFFFFFFFFFFFFFFLL;
657         RB_INIT(&ip->rec_tree);
658         TAILQ_INIT(&ip->target_list);
659         hammer_ref(&ip->lock);
660
661         /*
662          * Populate the dummy inode.  Leave everything zero'd out.
663          *
664          * (ip->ino_leaf and ip->ino_data)
665          *
666          * Make the dummy inode a FIFO object which most copy programs
667          * will properly ignore.
668          */
669         ip->save_trunc_off = ip->ino_data.size;
670         ip->ino_data.obj_type = HAMMER_OBJTYPE_FIFO;
671
672         /*
673          * Locate and assign the pseudofs management structure to
674          * the inode.
675          */
676         if (dip && dip->obj_localization == ip->obj_localization) {
677                 ip->pfsm = dip->pfsm;
678                 hammer_ref(&ip->pfsm->lock);
679         } else {
680                 ip->pfsm = hammer_load_pseudofs(trans, ip->obj_localization,
681                                                 errorp);
682                 *errorp = 0;    /* ignore ENOENT */
683         }
684
685         /*
686          * The inode is placed on the red-black tree and will be synced to
687          * the media when flushed or by the filesystem sync.  If this races
688          * another instantiation/lookup the insertion will fail.
689          *
690          * NOTE: Do not set HAMMER_INODE_ONDISK.  The inode is a fake.
691          */
692         if (*errorp == 0) {
693                 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
694                         hammer_free_inode(ip);
695                         goto loop;
696                 }
697         } else {
698                 if (ip->flags & HAMMER_INODE_RSV_INODES) {
699                         ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
700                         --hmp->rsv_inodes;
701                 }
702                 hammer_free_inode(ip);
703                 ip = NULL;
704         }
705         trans->flags |= HAMMER_TRANSF_NEWINODE;
706         return (ip);
707 }
708
709 /*
710  * Return a referenced inode only if it is in our inode cache.
711  * Dummy inodes do not count.
712  */
713 struct hammer_inode *
714 hammer_find_inode(hammer_transaction_t trans, int64_t obj_id,
715                   hammer_tid_t asof, uint32_t localization)
716 {
717         struct hammer_inode *ip;
718
719         ip = __hammer_find_inode(trans, obj_id, asof, localization);
720         if (ip) {
721                 if (ip->flags & HAMMER_INODE_DUMMY)
722                         ip = NULL;
723                 else
724                         hammer_ref(&ip->lock);
725         }
726         return(ip);
727 }
728
729 /*
730  * Return a referenced inode only if it is in our inode cache.
731  * This function does not reference inode.
732  */
733 static struct hammer_inode *
734 __hammer_find_inode(hammer_transaction_t trans, int64_t obj_id,
735                   hammer_tid_t asof, uint32_t localization)
736 {
737         hammer_mount_t hmp = trans->hmp;
738         struct hammer_inode_info iinfo;
739         struct hammer_inode *ip;
740
741         iinfo.obj_id = obj_id;
742         iinfo.obj_asof = asof;
743         iinfo.obj_localization = localization;
744
745         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
746
747         return(ip);
748 }
749
750 /*
751  * Create a new filesystem object, returning the inode in *ipp.  The
752  * returned inode will be referenced.  The inode is created in-memory.
753  *
754  * If pfsm is non-NULL the caller wishes to create the root inode for
755  * a non-root PFS.
756  */
757 int
758 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
759                     struct ucred *cred,
760                     hammer_inode_t dip, const char *name, int namelen,
761                     hammer_pseudofs_inmem_t pfsm, struct hammer_inode **ipp)
762 {
763         hammer_mount_t hmp;
764         hammer_inode_t ip;
765         uid_t xuid;
766         int error;
767         int64_t namekey;
768         uint32_t dummy;
769
770         hmp = trans->hmp;
771
772         /*
773          * Disallow the creation of new inodes in directories which
774          * have been deleted.  In HAMMER, this will cause a record
775          * syncing assertion later on in the flush code.
776          */
777         if (dip && dip->ino_data.nlinks == 0) {
778                 *ipp = NULL;
779                 return (EINVAL);
780         }
781
782         /*
783          * Allocate inode
784          */
785         ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
786         ++hammer_count_inodes;
787         ++hmp->count_inodes;
788         trans->flags |= HAMMER_TRANSF_NEWINODE;
789
790         if (pfsm) {
791                 KKASSERT(pfsm->localization != HAMMER_DEF_LOCALIZATION);
792                 ip->obj_id = HAMMER_OBJID_ROOT;
793                 ip->obj_localization = pfsm->localization;
794         } else {
795                 KKASSERT(dip != NULL);
796                 namekey = hammer_direntry_namekey(dip, name, namelen, &dummy);
797                 ip->obj_id = hammer_alloc_objid(hmp, dip, namekey);
798                 ip->obj_localization = dip->obj_localization;
799         }
800
801         KKASSERT(ip->obj_id != 0);
802         ip->obj_asof = hmp->asof;
803         ip->hmp = hmp;
804         ip->flush_state = HAMMER_FST_IDLE;
805         ip->flags = HAMMER_INODE_DDIRTY |
806                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME;
807         ip->cache[0].ip = ip;
808         ip->cache[1].ip = ip;
809         ip->cache[2].ip = ip;
810         ip->cache[3].ip = ip;
811
812         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
813         /* ip->save_trunc_off = 0; (already zero) */
814         RB_INIT(&ip->rec_tree);
815         TAILQ_INIT(&ip->target_list);
816
817         ip->ino_data.atime = trans->time;
818         ip->ino_data.mtime = trans->time;
819         ip->ino_data.size = 0;
820         ip->ino_data.nlinks = 0;
821
822         /*
823          * A nohistory designator on the parent directory is inherited by
824          * the child.  We will do this even for pseudo-fs creation... the
825          * sysad can turn it off.
826          */
827         if (dip) {
828                 ip->ino_data.uflags = dip->ino_data.uflags &
829                                       (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
830         }
831
832         ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
833         ip->ino_leaf.base.localization = ip->obj_localization |
834                                          HAMMER_LOCALIZE_INODE;
835         ip->ino_leaf.base.obj_id = ip->obj_id;
836         ip->ino_leaf.base.key = 0;
837         ip->ino_leaf.base.create_tid = 0;
838         ip->ino_leaf.base.delete_tid = 0;
839         ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
840         ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
841
842         ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
843         ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
844         ip->ino_data.mode = vap->va_mode;
845         ip->ino_data.ctime = trans->time;
846
847         /*
848          * If we are running version 2 or greater directory entries are
849          * inode-localized instead of data-localized.
850          */
851         if (trans->hmp->version >= HAMMER_VOL_VERSION_TWO) {
852                 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
853                         ip->ino_data.cap_flags |=
854                                 HAMMER_INODE_CAP_DIR_LOCAL_INO;
855                 }
856         }
857         if (trans->hmp->version >= HAMMER_VOL_VERSION_SIX) {
858                 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
859                         ip->ino_data.cap_flags |=
860                                 HAMMER_INODE_CAP_DIRHASH_ALG1;
861                 }
862         }
863
864         /*
865          * Setup the ".." pointer.  This only needs to be done for directories
866          * but we do it for all objects as a recovery aid if dip exists.
867          * The inode is probably a PFS root if dip is NULL.
868          */
869         if (dip)
870                 ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id;
871
872         switch(ip->ino_leaf.base.obj_type) {
873         case HAMMER_OBJTYPE_CDEV:
874         case HAMMER_OBJTYPE_BDEV:
875                 ip->ino_data.rmajor = vap->va_rmajor;
876                 ip->ino_data.rminor = vap->va_rminor;
877                 break;
878         default:
879                 break;
880         }
881
882         /*
883          * Calculate default uid/gid and overwrite with information from
884          * the vap.
885          */
886         if (dip) {
887                 xuid = hammer_to_unix_xid(&dip->ino_data.uid);
888                 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode,
889                                              xuid, cred, &vap->va_mode);
890         } else {
891                 xuid = 0;
892         }
893         ip->ino_data.mode = vap->va_mode;
894
895         if (vap->va_vaflags & VA_UID_UUID_VALID)
896                 ip->ino_data.uid = vap->va_uid_uuid;
897         else if (vap->va_uid != (uid_t)VNOVAL)
898                 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
899         else
900                 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
901
902         if (vap->va_vaflags & VA_GID_UUID_VALID)
903                 ip->ino_data.gid = vap->va_gid_uuid;
904         else if (vap->va_gid != (gid_t)VNOVAL)
905                 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
906         else if (dip)
907                 ip->ino_data.gid = dip->ino_data.gid;
908
909         hammer_ref(&ip->lock);
910
911         if (pfsm) {
912                 ip->pfsm = pfsm;
913                 hammer_ref(&pfsm->lock);
914                 error = 0;
915         } else if (dip->obj_localization == ip->obj_localization) {
916                 ip->pfsm = dip->pfsm;
917                 hammer_ref(&ip->pfsm->lock);
918                 error = 0;
919         } else {
920                 ip->pfsm = hammer_load_pseudofs(trans,
921                                                 ip->obj_localization,
922                                                 &error);
923                 error = 0;      /* ignore ENOENT */
924         }
925
926         if (error) {
927                 hammer_free_inode(ip);
928                 ip = NULL;
929         } else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
930                 hpanic("duplicate obj_id %llx", (long long)ip->obj_id);
931                 /* not reached */
932                 hammer_free_inode(ip);
933         }
934         *ipp = ip;
935         return(error);
936 }
937
938 /*
939  * Final cleanup / freeing of an inode structure
940  */
941 static void
942 hammer_free_inode(hammer_inode_t ip)
943 {
944         struct hammer_mount *hmp;
945
946         hmp = ip->hmp;
947         KKASSERT(hammer_oneref(&ip->lock));
948         hammer_uncache_node(&ip->cache[0]);
949         hammer_uncache_node(&ip->cache[1]);
950         hammer_uncache_node(&ip->cache[2]);
951         hammer_uncache_node(&ip->cache[3]);
952         hammer_inode_wakereclaims(ip);
953         if (ip->objid_cache)
954                 hammer_clear_objid(ip);
955         --hammer_count_inodes;
956         --hmp->count_inodes;
957         if (ip->pfsm) {
958                 hammer_rel_pseudofs(hmp, ip->pfsm);
959                 ip->pfsm = NULL;
960         }
961         kfree(ip, hmp->m_inodes);
962 }
963
964 /*
965  * Retrieve pseudo-fs data.  NULL will never be returned.
966  *
967  * If an error occurs *errorp will be set and a default template is returned,
968  * otherwise *errorp is set to 0.  Typically when an error occurs it will
969  * be ENOENT.
970  */
971 hammer_pseudofs_inmem_t
972 hammer_load_pseudofs(hammer_transaction_t trans,
973                      uint32_t localization, int *errorp)
974 {
975         hammer_mount_t hmp = trans->hmp;
976         hammer_inode_t ip;
977         hammer_pseudofs_inmem_t pfsm;
978         struct hammer_cursor cursor;
979         int bytes;
980
981 retry:
982         pfsm = RB_LOOKUP(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, localization);
983         if (pfsm) {
984                 hammer_ref(&pfsm->lock);
985                 *errorp = 0;
986                 return(pfsm);
987         }
988
989         /*
990          * PFS records are associated with the root inode (not the PFS root
991          * inode, but the real root).  Avoid an infinite recursion if loading
992          * the PFS for the real root.
993          */
994         if (localization) {
995                 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT,
996                                       HAMMER_MAX_TID,
997                                       HAMMER_DEF_LOCALIZATION, 0, errorp);
998         } else {
999                 ip = NULL;
1000         }
1001
1002         pfsm = kmalloc(sizeof(*pfsm), hmp->m_misc, M_WAITOK | M_ZERO);
1003         pfsm->localization = localization;
1004         pfsm->pfsd.unique_uuid = trans->rootvol->ondisk->vol_fsid;
1005         pfsm->pfsd.shared_uuid = pfsm->pfsd.unique_uuid;
1006
1007         hammer_init_cursor(trans, &cursor, (ip ? &ip->cache[1] : NULL), ip);
1008         cursor.key_beg.localization = HAMMER_DEF_LOCALIZATION |
1009                                       HAMMER_LOCALIZE_MISC;
1010         cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
1011         cursor.key_beg.create_tid = 0;
1012         cursor.key_beg.delete_tid = 0;
1013         cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
1014         cursor.key_beg.obj_type = 0;
1015         cursor.key_beg.key = localization;
1016         cursor.asof = HAMMER_MAX_TID;
1017         cursor.flags |= HAMMER_CURSOR_ASOF;
1018
1019         if (ip)
1020                 *errorp = hammer_ip_lookup(&cursor);
1021         else
1022                 *errorp = hammer_btree_lookup(&cursor);
1023         if (*errorp == 0) {
1024                 *errorp = hammer_ip_resolve_data(&cursor);
1025                 if (*errorp == 0) {
1026                         if (hammer_is_pfs_deleted(&cursor.data->pfsd)) {
1027                                 *errorp = ENOENT;
1028                         } else {
1029                                 bytes = cursor.leaf->data_len;
1030                                 if (bytes > sizeof(pfsm->pfsd))
1031                                         bytes = sizeof(pfsm->pfsd);
1032                                 bcopy(cursor.data, &pfsm->pfsd, bytes);
1033                         }
1034                 }
1035         }
1036         hammer_done_cursor(&cursor);
1037
1038         pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
1039         hammer_ref(&pfsm->lock);
1040         if (ip)
1041                 hammer_rel_inode(ip, 0);
1042         if (RB_INSERT(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm)) {
1043                 kfree(pfsm, hmp->m_misc);
1044                 goto retry;
1045         }
1046         return(pfsm);
1047 }
1048
1049 /*
1050  * Store pseudo-fs data.  The backend will automatically delete any prior
1051  * on-disk pseudo-fs data but we have to delete in-memory versions.
1052  */
1053 int
1054 hammer_save_pseudofs(hammer_transaction_t trans, hammer_pseudofs_inmem_t pfsm)
1055 {
1056         struct hammer_cursor cursor;
1057         hammer_record_t record;
1058         hammer_inode_t ip;
1059         int error;
1060
1061         /*
1062          * PFS records are associated with the root inode (not the PFS root
1063          * inode, but the real root).
1064          */
1065         ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
1066                               HAMMER_DEF_LOCALIZATION, 0, &error);
1067 retry:
1068         pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
1069         hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
1070         cursor.key_beg.localization = ip->obj_localization |
1071                                       HAMMER_LOCALIZE_MISC;
1072         cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
1073         cursor.key_beg.create_tid = 0;
1074         cursor.key_beg.delete_tid = 0;
1075         cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
1076         cursor.key_beg.obj_type = 0;
1077         cursor.key_beg.key = pfsm->localization;
1078         cursor.asof = HAMMER_MAX_TID;
1079         cursor.flags |= HAMMER_CURSOR_ASOF;
1080
1081         /*
1082          * Replace any in-memory version of the record.
1083          */
1084         error = hammer_ip_lookup(&cursor);
1085         if (error == 0 && hammer_cursor_inmem(&cursor)) {
1086                 record = cursor.iprec;
1087                 if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
1088                         KKASSERT(cursor.deadlk_rec == NULL);
1089                         hammer_ref(&record->lock);
1090                         cursor.deadlk_rec = record;
1091                         error = EDEADLK;
1092                 } else {
1093                         record->flags |= HAMMER_RECF_DELETED_FE;
1094                         error = 0;
1095                 }
1096         }
1097
1098         /*
1099          * Allocate replacement general record.  The backend flush will
1100          * delete any on-disk version of the record.
1101          */
1102         if (error == 0 || error == ENOENT) {
1103                 record = hammer_alloc_mem_record(ip, sizeof(pfsm->pfsd));
1104                 record->type = HAMMER_MEM_RECORD_GENERAL;
1105
1106                 record->leaf.base.localization = ip->obj_localization |
1107                                                  HAMMER_LOCALIZE_MISC;
1108                 record->leaf.base.rec_type = HAMMER_RECTYPE_PFS;
1109                 record->leaf.base.key = pfsm->localization;
1110                 record->leaf.data_len = sizeof(pfsm->pfsd);
1111                 bcopy(&pfsm->pfsd, record->data, sizeof(pfsm->pfsd));
1112                 error = hammer_ip_add_record(trans, record);
1113         }
1114         hammer_done_cursor(&cursor);
1115         if (error == EDEADLK)
1116                 goto retry;
1117         hammer_rel_inode(ip, 0);
1118         return(error);
1119 }
1120
1121 /*
1122  * Create a root directory for a PFS if one does not alredy exist.
1123  *
1124  * The PFS root stands alone so we must also bump the nlinks count
1125  * to prevent it from being destroyed on release.
1126  *
1127  * Make sure a caller isn't creating a PFS from non-root PFS.
1128  */
1129 int
1130 hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred,
1131                        hammer_pseudofs_inmem_t pfsm, hammer_inode_t dip)
1132 {
1133         hammer_inode_t ip;
1134         struct vattr vap;
1135         int error;
1136
1137         ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
1138                               pfsm->localization, 0, &error);
1139         if (ip == NULL) {
1140                 if (lo_to_pfs(dip->obj_localization) != HAMMER_ROOT_PFSID) {
1141                         hmkprintf(trans->hmp,
1142                                 "Warning: creating a PFS from non-root PFS "
1143                                 "is not allowed\n");
1144                         return(EINVAL);
1145                 }
1146                 vattr_null(&vap);
1147                 vap.va_mode = 0755;
1148                 vap.va_type = VDIR;
1149                 error = hammer_create_inode(trans, &vap, cred,
1150                                             NULL, NULL, 0,
1151                                             pfsm, &ip);
1152                 if (error == 0) {
1153                         ++ip->ino_data.nlinks;
1154                         hammer_modify_inode(trans, ip, HAMMER_INODE_DDIRTY);
1155                 }
1156         }
1157         if (ip)
1158                 hammer_rel_inode(ip, 0);
1159         return(error);
1160 }
1161
1162 /*
1163  * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY
1164  * if we are unable to disassociate all the inodes.
1165  */
1166 static
1167 int
1168 hammer_unload_pseudofs_callback(hammer_inode_t ip, void *data)
1169 {
1170         int res;
1171
1172         hammer_ref(&ip->lock);
1173         if (ip->vp && (ip->vp->v_flag & VPFSROOT)) {
1174                 /*
1175                  * The hammer pfs-upgrade directive itself might have the
1176                  * root of the pfs open.  Just allow it.
1177                  */
1178                 res = 0;
1179         } else {
1180                 /*
1181                  * Don't allow any subdirectories or files to be open.
1182                  */
1183                 if (hammer_isactive(&ip->lock) == 2 && ip->vp)
1184                         vclean_unlocked(ip->vp);
1185                 if (hammer_isactive(&ip->lock) == 1 && ip->vp == NULL)
1186                         res = 0;
1187                 else
1188                         res = -1;       /* stop, someone is using the inode */
1189         }
1190         hammer_rel_inode(ip, 0);
1191         return(res);
1192 }
1193
1194 int
1195 hammer_unload_pseudofs(hammer_transaction_t trans, uint32_t localization)
1196 {
1197         int res;
1198         int try;
1199
1200         for (try = res = 0; try < 4; ++try) {
1201                 res = hammer_ino_rb_tree_RB_SCAN(&trans->hmp->rb_inos_root,
1202                                            hammer_inode_pfs_cmp,
1203                                            hammer_unload_pseudofs_callback,
1204                                            &localization);
1205                 if (res == 0 && try > 1)
1206                         break;
1207                 hammer_flusher_sync(trans->hmp);
1208         }
1209         if (res != 0)
1210                 res = ENOTEMPTY;
1211         return(res);
1212 }
1213
1214
1215 /*
1216  * Release a reference on a PFS
1217  */
1218 void
1219 hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm)
1220 {
1221         hammer_rel(&pfsm->lock);
1222         if (hammer_norefs(&pfsm->lock)) {
1223                 RB_REMOVE(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm);
1224                 kfree(pfsm, hmp->m_misc);
1225         }
1226 }
1227
1228 /*
1229  * Called by hammer_sync_inode().
1230  */
1231 static int
1232 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
1233 {
1234         hammer_transaction_t trans = cursor->trans;
1235         hammer_record_t record;
1236         int error;
1237         int redirty;
1238
1239 retry:
1240         error = 0;
1241
1242         /*
1243          * If the inode has a presence on-disk then locate it and mark
1244          * it deleted, setting DELONDISK.
1245          *
1246          * The record may or may not be physically deleted, depending on
1247          * the retention policy.
1248          */
1249         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
1250             HAMMER_INODE_ONDISK) {
1251                 hammer_normalize_cursor(cursor);
1252                 cursor->key_beg.localization = ip->obj_localization |
1253                                                HAMMER_LOCALIZE_INODE;
1254                 cursor->key_beg.obj_id = ip->obj_id;
1255                 cursor->key_beg.key = 0;
1256                 cursor->key_beg.create_tid = 0;
1257                 cursor->key_beg.delete_tid = 0;
1258                 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1259                 cursor->key_beg.obj_type = 0;
1260                 cursor->asof = ip->obj_asof;
1261                 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1262                 cursor->flags |= HAMMER_CURSOR_ASOF;
1263                 cursor->flags |= HAMMER_CURSOR_BACKEND;
1264
1265                 error = hammer_btree_lookup(cursor);
1266                 if (hammer_debug_inode)
1267                         hdkprintf("IPDEL %p %08x %d", ip, ip->flags, error);
1268
1269                 if (error == 0) {
1270                         error = hammer_ip_delete_record(cursor, ip, trans->tid);
1271                         if (hammer_debug_inode)
1272                                 hdkprintf("error %d\n", error);
1273                         if (error == 0) {
1274                                 ip->flags |= HAMMER_INODE_DELONDISK;
1275                         }
1276                         if (cursor->node)
1277                                 hammer_cache_node(&ip->cache[0], cursor->node);
1278                 }
1279                 if (error == EDEADLK) {
1280                         hammer_done_cursor(cursor);
1281                         error = hammer_init_cursor(trans, cursor,
1282                                                    &ip->cache[0], ip);
1283                         if (hammer_debug_inode)
1284                                 hdkprintf("IPDED %p %d\n", ip, error);
1285                         if (error == 0)
1286                                 goto retry;
1287                 }
1288         }
1289
1290         /*
1291          * Ok, write out the initial record or a new record (after deleting
1292          * the old one), unless the DELETED flag is set.  This routine will
1293          * clear DELONDISK if it writes out a record.
1294          *
1295          * Update our inode statistics if this is the first application of
1296          * the inode on-disk.
1297          */
1298         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
1299                 /*
1300                  * Generate a record and write it to the media.  We clean-up
1301                  * the state before releasing so we do not have to set-up
1302                  * a flush_group.
1303                  */
1304                 record = hammer_alloc_mem_record(ip, 0);
1305                 record->type = HAMMER_MEM_RECORD_INODE;
1306                 record->flush_state = HAMMER_FST_FLUSH;
1307                 record->leaf = ip->sync_ino_leaf;
1308                 record->leaf.base.create_tid = trans->tid;
1309                 record->leaf.data_len = sizeof(ip->sync_ino_data);
1310                 record->leaf.create_ts = trans->time32;
1311                 record->data = (void *)&ip->sync_ino_data;
1312                 record->flags |= HAMMER_RECF_INTERLOCK_BE;
1313
1314                 /*
1315                  * If this flag is set we cannot sync the new file size
1316                  * because we haven't finished related truncations.  The
1317                  * inode will be flushed in another flush group to finish
1318                  * the job.
1319                  */
1320                 if ((ip->flags & HAMMER_INODE_WOULDBLOCK) &&
1321                     ip->sync_ino_data.size != ip->ino_data.size) {
1322                         redirty = 1;
1323                         ip->sync_ino_data.size = ip->ino_data.size;
1324                 } else {
1325                         redirty = 0;
1326                 }
1327
1328                 for (;;) {
1329                         error = hammer_ip_sync_record_cursor(cursor, record);
1330                         if (hammer_debug_inode)
1331                                 hdkprintf("GENREC %p rec %08x %d\n",
1332                                         ip, record->flags, error);
1333                         if (error != EDEADLK)
1334                                 break;
1335                         hammer_done_cursor(cursor);
1336                         error = hammer_init_cursor(trans, cursor,
1337                                                    &ip->cache[0], ip);
1338                         if (hammer_debug_inode)
1339                                 hdkprintf("GENREC reinit %d\n", error);
1340                         if (error)
1341                                 break;
1342                 }
1343
1344                 /*
1345                  * Note:  The record was never on the inode's record tree
1346                  * so just wave our hands importantly and destroy it.
1347                  */
1348                 record->flags |= HAMMER_RECF_COMMITTED;
1349                 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
1350                 record->flush_state = HAMMER_FST_IDLE;
1351                 ++ip->rec_generation;
1352                 hammer_rel_mem_record(record);
1353
1354                 /*
1355                  * Finish up.
1356                  */
1357                 if (error == 0) {
1358                         if (hammer_debug_inode)
1359                                 hdkprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
1360                         ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1361                                             HAMMER_INODE_SDIRTY |
1362                                             HAMMER_INODE_ATIME |
1363                                             HAMMER_INODE_MTIME);
1364                         ip->flags &= ~HAMMER_INODE_DELONDISK;
1365                         if (redirty)
1366                                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1367
1368                         /*
1369                          * Root volume count of inodes
1370                          */
1371                         hammer_sync_lock_sh(trans);
1372                         if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
1373                                 hammer_modify_volume_field(trans,
1374                                                            trans->rootvol,
1375                                                            vol0_stat_inodes);
1376                                 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1377                                 hammer_modify_volume_done(trans->rootvol);
1378                                 ip->flags |= HAMMER_INODE_ONDISK;
1379                                 if (hammer_debug_inode)
1380                                         hdkprintf("NOWONDISK %p\n", ip);
1381                         }
1382                         hammer_sync_unlock(trans);
1383                 }
1384         }
1385
1386         /*
1387          * If the inode has been destroyed, clean out any left-over flags
1388          * that may have been set by the frontend.
1389          */
1390         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) {
1391                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1392                                     HAMMER_INODE_SDIRTY |
1393                                     HAMMER_INODE_ATIME |
1394                                     HAMMER_INODE_MTIME);
1395         }
1396         return(error);
1397 }
1398
1399 /*
1400  * Update only the itimes fields.
1401  *
1402  * ATIME can be updated without generating any UNDO.  MTIME is updated
1403  * with UNDO so it is guaranteed to be synchronized properly in case of
1404  * a crash.
1405  *
1406  * Neither field is included in the B-Tree leaf element's CRC, which is how
1407  * we can get away with updating ATIME the way we do.
1408  */
1409 static int
1410 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
1411 {
1412         hammer_transaction_t trans = cursor->trans;
1413         int error;
1414
1415 retry:
1416         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) !=
1417             HAMMER_INODE_ONDISK) {
1418                 return(0);
1419         }
1420
1421         hammer_normalize_cursor(cursor);
1422         cursor->key_beg.localization = ip->obj_localization |
1423                                        HAMMER_LOCALIZE_INODE;
1424         cursor->key_beg.obj_id = ip->obj_id;
1425         cursor->key_beg.key = 0;
1426         cursor->key_beg.create_tid = 0;
1427         cursor->key_beg.delete_tid = 0;
1428         cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1429         cursor->key_beg.obj_type = 0;
1430         cursor->asof = ip->obj_asof;
1431         cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1432         cursor->flags |= HAMMER_CURSOR_ASOF;
1433         cursor->flags |= HAMMER_CURSOR_GET_DATA;
1434         cursor->flags |= HAMMER_CURSOR_BACKEND;
1435
1436         error = hammer_btree_lookup(cursor);
1437         if (error == 0) {
1438                 hammer_cache_node(&ip->cache[0], cursor->node);
1439                 if (ip->sync_flags & HAMMER_INODE_MTIME) {
1440                         /*
1441                          * Updating MTIME requires an UNDO.  Just cover
1442                          * both atime and mtime.
1443                          */
1444                         hammer_sync_lock_sh(trans);
1445                         hammer_modify_buffer(trans, cursor->data_buffer,
1446                                 &cursor->data->inode.mtime,
1447                                 sizeof(cursor->data->inode.atime) +
1448                                 sizeof(cursor->data->inode.mtime));
1449                         cursor->data->inode.atime = ip->sync_ino_data.atime;
1450                         cursor->data->inode.mtime = ip->sync_ino_data.mtime;
1451                         hammer_modify_buffer_done(cursor->data_buffer);
1452                         hammer_sync_unlock(trans);
1453                 } else if (ip->sync_flags & HAMMER_INODE_ATIME) {
1454                         /*
1455                          * Updating atime only can be done in-place with
1456                          * no UNDO.
1457                          */
1458                         hammer_sync_lock_sh(trans);
1459                         hammer_modify_buffer_noundo(trans, cursor->data_buffer);
1460                         cursor->data->inode.atime = ip->sync_ino_data.atime;
1461                         hammer_modify_buffer_done(cursor->data_buffer);
1462                         hammer_sync_unlock(trans);
1463                 }
1464                 ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME);
1465         }
1466         if (error == EDEADLK) {
1467                 hammer_done_cursor(cursor);
1468                 error = hammer_init_cursor(trans, cursor, &ip->cache[0], ip);
1469                 if (error == 0)
1470                         goto retry;
1471         }
1472         return(error);
1473 }
1474
1475 /*
1476  * Release a reference on an inode, flush as requested.
1477  *
1478  * On the last reference we queue the inode to the flusher for its final
1479  * disposition.
1480  */
1481 void
1482 hammer_rel_inode(struct hammer_inode *ip, int flush)
1483 {
1484         /*
1485          * Handle disposition when dropping the last ref.
1486          */
1487         for (;;) {
1488                 if (hammer_oneref(&ip->lock)) {
1489                         /*
1490                          * Determine whether on-disk action is needed for
1491                          * the inode's final disposition.
1492                          */
1493                         KKASSERT(ip->vp == NULL);
1494                         hammer_inode_unloadable_check(ip, 0);
1495                         if (ip->flags & HAMMER_INODE_MODMASK) {
1496                                 hammer_flush_inode(ip, 0);
1497                         } else if (hammer_oneref(&ip->lock)) {
1498                                 hammer_unload_inode(ip);
1499                                 break;
1500                         }
1501                 } else {
1502                         if (flush)
1503                                 hammer_flush_inode(ip, 0);
1504
1505                         /*
1506                          * The inode still has multiple refs, try to drop
1507                          * one ref.
1508                          */
1509                         KKASSERT(hammer_isactive(&ip->lock) >= 1);
1510                         if (hammer_isactive(&ip->lock) > 1) {
1511                                 hammer_rel(&ip->lock);
1512                                 break;
1513                         }
1514                 }
1515         }
1516 }
1517
1518 /*
1519  * Unload and destroy the specified inode.  Must be called with one remaining
1520  * reference.  The reference is disposed of.
1521  *
1522  * The inode must be completely clean.
1523  */
1524 static int
1525 hammer_unload_inode(struct hammer_inode *ip)
1526 {
1527         hammer_mount_t hmp = ip->hmp;
1528
1529         KASSERT(hammer_oneref(&ip->lock),
1530                 ("hammer_unload_inode: %d refs", hammer_isactive(&ip->lock)));
1531         KKASSERT(ip->vp == NULL);
1532         KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
1533         KKASSERT(ip->cursor_ip_refs == 0);
1534         KKASSERT(hammer_notlocked(&ip->lock));
1535         KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
1536
1537         KKASSERT(RB_EMPTY(&ip->rec_tree));
1538         KKASSERT(TAILQ_EMPTY(&ip->target_list));
1539
1540         if (ip->flags & HAMMER_INODE_RDIRTY) {
1541                 RB_REMOVE(hammer_redo_rb_tree, &hmp->rb_redo_root, ip);
1542                 ip->flags &= ~HAMMER_INODE_RDIRTY;
1543         }
1544         RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip);
1545
1546         hammer_free_inode(ip);
1547         return(0);
1548 }
1549
1550 /*
1551  * Called during unmounting if a critical error occured.  The in-memory
1552  * inode and all related structures are destroyed.
1553  *
1554  * If a critical error did not occur the unmount code calls the standard
1555  * release and asserts that the inode is gone.
1556  */
1557 int
1558 hammer_destroy_inode_callback(struct hammer_inode *ip, void *data __unused)
1559 {
1560         hammer_record_t rec;
1561
1562         /*
1563          * Get rid of the inodes in-memory records, regardless of their
1564          * state, and clear the mod-mask.
1565          */
1566         while ((rec = TAILQ_FIRST(&ip->target_list)) != NULL) {
1567                 TAILQ_REMOVE(&ip->target_list, rec, target_entry);
1568                 rec->target_ip = NULL;
1569                 if (rec->flush_state == HAMMER_FST_SETUP)
1570                         rec->flush_state = HAMMER_FST_IDLE;
1571         }
1572         while ((rec = RB_ROOT(&ip->rec_tree)) != NULL) {
1573                 if (rec->flush_state == HAMMER_FST_FLUSH)
1574                         --rec->flush_group->refs;
1575                 else
1576                         hammer_ref(&rec->lock);
1577                 KKASSERT(hammer_oneref(&rec->lock));
1578                 rec->flush_state = HAMMER_FST_IDLE;
1579                 rec->flush_group = NULL;
1580                 rec->flags |= HAMMER_RECF_DELETED_FE; /* wave hands */
1581                 rec->flags |= HAMMER_RECF_DELETED_BE; /* wave hands */
1582                 ++ip->rec_generation;
1583                 hammer_rel_mem_record(rec);
1584         }
1585         ip->flags &= ~HAMMER_INODE_MODMASK;
1586         ip->sync_flags &= ~HAMMER_INODE_MODMASK;
1587         KKASSERT(ip->vp == NULL);
1588
1589         /*
1590          * Remove the inode from any flush group, force it idle.  FLUSH
1591          * and SETUP states have an inode ref.
1592          */
1593         switch(ip->flush_state) {
1594         case HAMMER_FST_FLUSH:
1595                 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip);
1596                 --ip->flush_group->refs;
1597                 ip->flush_group = NULL;
1598                 /* fall through */
1599         case HAMMER_FST_SETUP:
1600                 hammer_rel(&ip->lock);
1601                 ip->flush_state = HAMMER_FST_IDLE;
1602                 /* fall through */
1603         case HAMMER_FST_IDLE:
1604                 break;
1605         }
1606
1607         /*
1608          * There shouldn't be any associated vnode.  The unload needs at
1609          * least one ref, if we do have a vp steal its ip ref.
1610          */
1611         if (ip->vp) {
1612                 hdkprintf("Unexpected vnode association ip %p vp %p\n",
1613                         ip, ip->vp);
1614                 ip->vp->v_data = NULL;
1615                 ip->vp = NULL;
1616         } else {
1617                 hammer_ref(&ip->lock);
1618         }
1619         hammer_unload_inode(ip);
1620         return(0);
1621 }
1622
1623 /*
1624  * Called on mount -u when switching from RW to RO or vise-versa.  Adjust
1625  * the read-only flag for cached inodes.
1626  *
1627  * This routine is called from a RB_SCAN().
1628  */
1629 int
1630 hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
1631 {
1632         hammer_mount_t hmp = ip->hmp;
1633
1634         if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
1635                 ip->flags |= HAMMER_INODE_RO;
1636         else
1637                 ip->flags &= ~HAMMER_INODE_RO;
1638         return(0);
1639 }
1640
1641 /*
1642  * A transaction has modified an inode, requiring updates as specified by
1643  * the passed flags.
1644  *
1645  * HAMMER_INODE_DDIRTY: Inode data has been updated, not incl mtime/atime,
1646  *                      and not including size changes due to write-append
1647  *                      (but other size changes are included).
1648  * HAMMER_INODE_SDIRTY: Inode data has been updated, size changes due to
1649  *                      write-append.
1650  * HAMMER_INODE_XDIRTY: Dirty in-memory records
1651  * HAMMER_INODE_BUFS:   Dirty buffer cache buffers
1652  * HAMMER_INODE_DELETED: Inode record/data must be deleted
1653  * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
1654  */
1655 void
1656 hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags)
1657 {
1658         /*
1659          * ronly of 0 or 2 does not trigger assertion.
1660          * 2 is a special error state
1661          */
1662         KKASSERT(ip->hmp->ronly != 1 ||
1663                   (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
1664                             HAMMER_INODE_SDIRTY |
1665                             HAMMER_INODE_BUFS | HAMMER_INODE_DELETED |
1666                             HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0);
1667         if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
1668                 ip->flags |= HAMMER_INODE_RSV_INODES;
1669                 ++ip->hmp->rsv_inodes;
1670         }
1671
1672         /*
1673          * Set the NEWINODE flag in the transaction if the inode
1674          * transitions to a dirty state.  This is used to track
1675          * the load on the inode cache.
1676          */
1677         if (trans &&
1678             (ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1679             (flags & HAMMER_INODE_MODMASK)) {
1680                 trans->flags |= HAMMER_TRANSF_NEWINODE;
1681         }
1682         if (flags & HAMMER_INODE_MODMASK)
1683                 hammer_inode_dirty(ip);
1684         ip->flags |= flags;
1685 }
1686
1687 /*
1688  * Attempt to quickly update the atime for a hammer inode.  Return 0 on
1689  * success, -1 on failure.
1690  *
1691  * We attempt to update the atime with only the ip lock and not the
1692  * whole filesystem lock in order to improve concurrency.  We can only
1693  * do this safely if the ATIME flag is already pending on the inode.
1694  *
1695  * This function is called via a vnops path (ip pointer is stable) without
1696  * fs_token held.
1697  */
1698 int
1699 hammer_update_atime_quick(hammer_inode_t ip)
1700 {
1701         struct timeval tv;
1702         int res = -1;
1703
1704         if ((ip->flags & HAMMER_INODE_RO) ||
1705             (ip->hmp->mp->mnt_flag & MNT_NOATIME)) {
1706                 /*
1707                  * Silently indicate success on read-only mount/snap
1708                  */
1709                 res = 0;
1710         } else if (ip->flags & HAMMER_INODE_ATIME) {
1711                 /*
1712                  * Double check with inode lock held against backend.  This
1713                  * is only safe if all we need to do is update
1714                  * ino_data.atime.
1715                  */
1716                 getmicrotime(&tv);
1717                 hammer_lock_ex(&ip->lock);
1718                 if (ip->flags & HAMMER_INODE_ATIME) {
1719                         ip->ino_data.atime =
1720                             (unsigned long)tv.tv_sec * 1000000ULL + tv.tv_usec;
1721                         res = 0;
1722                 }
1723                 hammer_unlock(&ip->lock);
1724         }
1725         return res;
1726 }
1727
1728 /*
1729  * Request that an inode be flushed.  This whole mess cannot block and may
1730  * recurse (if not synchronous).  Once requested HAMMER will attempt to
1731  * actively flush the inode until the flush can be done.
1732  *
1733  * The inode may already be flushing, or may be in a setup state.  We can
1734  * place the inode in a flushing state if it is currently idle and flag it
1735  * to reflush if it is currently flushing.
1736  *
1737  * Upon return if the inode could not be flushed due to a setup
1738  * dependancy, then it will be automatically flushed when the dependancy
1739  * is satisfied.
1740  */
1741 void
1742 hammer_flush_inode(hammer_inode_t ip, int flags)
1743 {
1744         hammer_mount_t hmp;
1745         hammer_flush_group_t flg;
1746         int good;
1747
1748         /*
1749          * fill_flush_group is the first flush group we may be able to
1750          * continue filling, it may be open or closed but it will always
1751          * be past the currently flushing (running) flg.
1752          *
1753          * next_flush_group is the next open flush group.
1754          */
1755         hmp = ip->hmp;
1756         while ((flg = hmp->fill_flush_group) != NULL) {
1757                 KKASSERT(flg->running == 0);
1758                 if (flg->total_count + flg->refs <= ip->hmp->undo_rec_limit &&
1759                     flg->total_count <= hammer_autoflush) {
1760                         break;
1761                 }
1762                 hmp->fill_flush_group = TAILQ_NEXT(flg, flush_entry);
1763                 hammer_flusher_async(ip->hmp, flg);
1764         }
1765         if (flg == NULL) {
1766                 flg = kmalloc(sizeof(*flg), hmp->m_misc, M_WAITOK|M_ZERO);
1767                 flg->seq = hmp->flusher.next++;
1768                 if (hmp->next_flush_group == NULL)
1769                         hmp->next_flush_group = flg;
1770                 if (hmp->fill_flush_group == NULL)
1771                         hmp->fill_flush_group = flg;
1772                 RB_INIT(&flg->flush_tree);
1773                 TAILQ_INSERT_TAIL(&hmp->flush_group_list, flg, flush_entry);
1774         }
1775
1776         /*
1777          * Trivial 'nothing to flush' case.  If the inode is in a SETUP
1778          * state we have to put it back into an IDLE state so we can
1779          * drop the extra ref.
1780          *
1781          * If we have a parent dependancy we must still fall through
1782          * so we can run it.
1783          */
1784         if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
1785                 if (ip->flush_state == HAMMER_FST_SETUP &&
1786                     TAILQ_EMPTY(&ip->target_list)) {
1787                         ip->flush_state = HAMMER_FST_IDLE;
1788                         hammer_rel_inode(ip, 0);
1789                 }
1790                 if (ip->flush_state == HAMMER_FST_IDLE)
1791                         return;
1792         }
1793
1794         /*
1795          * Our flush action will depend on the current state.
1796          */
1797         switch(ip->flush_state) {
1798         case HAMMER_FST_IDLE:
1799                 /*
1800                  * We have no dependancies and can flush immediately.  Some
1801                  * our children may not be flushable so we have to re-test
1802                  * with that additional knowledge.
1803                  */
1804                 hammer_flush_inode_core(ip, flg, flags);
1805                 break;
1806         case HAMMER_FST_SETUP:
1807                 /*
1808                  * Recurse upwards through dependancies via target_list
1809                  * and start their flusher actions going if possible.
1810                  *
1811                  * 'good' is our connectivity.  -1 means we have none and
1812                  * can't flush, 0 means there weren't any dependancies, and
1813                  * 1 means we have good connectivity.
1814                  */
1815                 good = hammer_setup_parent_inodes(ip, 0, flg);
1816
1817                 if (good >= 0) {
1818                         /*
1819                          * We can continue if good >= 0.  Determine how
1820                          * many records under our inode can be flushed (and
1821                          * mark them).
1822                          */
1823                         hammer_flush_inode_core(ip, flg, flags);
1824                 } else {
1825                         /*
1826                          * Parent has no connectivity, tell it to flush
1827                          * us as soon as it does.
1828                          *
1829                          * The REFLUSH flag is also needed to trigger
1830                          * dependancy wakeups.
1831                          */
1832                         ip->flags |= HAMMER_INODE_CONN_DOWN |
1833                                      HAMMER_INODE_REFLUSH;
1834                         if (flags & HAMMER_FLUSH_SIGNAL) {
1835                                 ip->flags |= HAMMER_INODE_RESIGNAL;
1836                                 hammer_flusher_async(ip->hmp, flg);
1837                         }
1838                 }
1839                 break;
1840         case HAMMER_FST_FLUSH:
1841                 /*
1842                  * We are already flushing, flag the inode to reflush
1843                  * if needed after it completes its current flush.
1844                  *
1845                  * The REFLUSH flag is also needed to trigger
1846                  * dependancy wakeups.
1847                  */
1848                 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
1849                         ip->flags |= HAMMER_INODE_REFLUSH;
1850                 if (flags & HAMMER_FLUSH_SIGNAL) {
1851                         ip->flags |= HAMMER_INODE_RESIGNAL;
1852                         hammer_flusher_async(ip->hmp, flg);
1853                 }
1854                 break;
1855         }
1856 }
1857
1858 /*
1859  * Scan ip->target_list, which is a list of records owned by PARENTS to our
1860  * ip which reference our ip.
1861  *
1862  * XXX This is a huge mess of recursive code, but not one bit of it blocks
1863  *     so for now do not ref/deref the structures.  Note that if we use the
1864  *     ref/rel code later, the rel CAN block.
1865  */
1866 static int
1867 hammer_setup_parent_inodes(hammer_inode_t ip, int depth,
1868                            hammer_flush_group_t flg)
1869 {
1870         hammer_record_t depend;
1871         int good;
1872         int r;
1873
1874         /*
1875          * If we hit our recursion limit and we have parent dependencies
1876          * We cannot continue.  Returning < 0 will cause us to be flagged
1877          * for reflush.  Returning -2 cuts off additional dependency checks
1878          * because they are likely to also hit the depth limit.
1879          *
1880          * We cannot return < 0 if there are no dependencies or there might
1881          * not be anything to wakeup (ip).
1882          */
1883         if (depth == 20 && TAILQ_FIRST(&ip->target_list)) {
1884                 if (hammer_debug_general & 0x10000)
1885                         hkrateprintf(&hammer_gen_krate,
1886                             "Warning: depth limit reached on "
1887                             "setup recursion, inode %p %016llx\n",
1888                             ip, (long long)ip->obj_id);
1889                 return(-2);
1890         }
1891
1892         /*
1893          * Scan dependencies
1894          */
1895         good = 0;
1896         TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
1897                 r = hammer_setup_parent_inodes_helper(depend, depth, flg);
1898                 KKASSERT(depend->target_ip == ip);
1899                 if (r < 0 && good == 0)
1900                         good = -1;
1901                 if (r > 0)
1902                         good = 1;
1903
1904                 /*
1905                  * If we failed due to the recursion depth limit then stop
1906                  * now.
1907                  */
1908                 if (r == -2)
1909                         break;
1910         }
1911         return(good);
1912 }
1913
1914 /*
1915  * This helper function takes a record representing the dependancy between
1916  * the parent inode and child inode.
1917  *
1918  * record               = record in question (*rec in below)
1919  * record->ip           = parent inode (*pip in below)
1920  * record->target_ip    = child inode (*ip in below)
1921  *
1922  * *pip--------------\
1923  *    ^               \rec_tree
1924  *     \               \
1925  *      \ip            /\\\\\ rbtree of recs from parent inode's view
1926  *       \            //\\\\\\
1927  *        \          / ........
1928  *         \        /
1929  *          \------*rec------target_ip------>*ip
1930  *               ...target_entry<----...----->target_list<---...
1931  *                                            list of recs from inode's view
1932  *
1933  * We are asked to recurse upwards and convert the record from SETUP
1934  * to FLUSH if possible.
1935  *
1936  * Return 1 if the record gives us connectivity
1937  *
1938  * Return 0 if the record is not relevant
1939  *
1940  * Return -1 if we can't resolve the dependancy and there is no connectivity.
1941  */
1942 static int
1943 hammer_setup_parent_inodes_helper(hammer_record_t record, int depth,
1944                                   hammer_flush_group_t flg)
1945 {
1946         hammer_inode_t pip;
1947         int good;
1948
1949         KKASSERT(record->flush_state != HAMMER_FST_IDLE);
1950         pip = record->ip;
1951
1952         /*
1953          * If the record is already flushing, is it in our flush group?
1954          *
1955          * If it is in our flush group but it is a general record or a
1956          * delete-on-disk, it does not improve our connectivity (return 0),
1957          * and if the target inode is not trying to destroy itself we can't
1958          * allow the operation yet anyway (the second return -1).
1959          */
1960         if (record->flush_state == HAMMER_FST_FLUSH) {
1961                 /*
1962                  * If not in our flush group ask the parent to reflush
1963                  * us as soon as possible.
1964                  */
1965                 if (record->flush_group != flg) {
1966                         pip->flags |= HAMMER_INODE_REFLUSH;
1967                         record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1968                         return(-1);
1969                 }
1970
1971                 /*
1972                  * If in our flush group everything is already set up,
1973                  * just return whether the record will improve our
1974                  * visibility or not.
1975                  */
1976                 if (record->type == HAMMER_MEM_RECORD_ADD)
1977                         return(1);
1978                 return(0);
1979         }
1980
1981         /*
1982          * It must be a setup record.  Try to resolve the setup dependancies
1983          * by recursing upwards so we can place ip on the flush list.
1984          *
1985          * Limit ourselves to 20 levels of recursion to avoid blowing out
1986          * the kernel stack.  If we hit the recursion limit we can't flush
1987          * until the parent flushes.  The parent will flush independantly
1988          * on its own and ultimately a deep recursion will be resolved.
1989          */
1990         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1991
1992         good = hammer_setup_parent_inodes(pip, depth + 1, flg);
1993
1994         /*
1995          * If good < 0 the parent has no connectivity and we cannot safely
1996          * flush the directory entry, which also means we can't flush our
1997          * ip.  Flag us for downward recursion once the parent's
1998          * connectivity is resolved.  Flag the parent for [re]flush or it
1999          * may not check for downward recursions.
2000          */
2001         if (good < 0) {
2002                 pip->flags |= HAMMER_INODE_REFLUSH;
2003                 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
2004                 return(good);
2005         }
2006
2007         /*
2008          * We are go, place the parent inode in a flushing state so we can
2009          * place its record in a flushing state.  Note that the parent
2010          * may already be flushing.  The record must be in the same flush
2011          * group as the parent.
2012          */
2013         if (pip->flush_state != HAMMER_FST_FLUSH)
2014                 hammer_flush_inode_core(pip, flg, HAMMER_FLUSH_RECURSION);
2015         KKASSERT(pip->flush_state == HAMMER_FST_FLUSH);
2016
2017         /*
2018          * It is possible for a rename to create a loop in the recursion
2019          * and revisit a record.  This will result in the record being
2020          * placed in a flush state unexpectedly.  This check deals with
2021          * the case.
2022          */
2023         if (record->flush_state == HAMMER_FST_FLUSH) {
2024                 if (record->type == HAMMER_MEM_RECORD_ADD)
2025                         return(1);
2026                 return(0);
2027         }
2028
2029         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
2030
2031 #if 0
2032         if (record->type == HAMMER_MEM_RECORD_DEL &&
2033             (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
2034                 /*
2035                  * Regardless of flushing state we cannot sync this path if the
2036                  * record represents a delete-on-disk but the target inode
2037                  * is not ready to sync its own deletion.
2038                  *
2039                  * XXX need to count effective nlinks to determine whether
2040                  * the flush is ok, otherwise removing a hardlink will
2041                  * just leave the DEL record to rot.
2042                  */
2043                 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
2044                 return(-1);
2045         } else
2046 #endif
2047         if (pip->flush_group == flg) {
2048                 /*
2049                  * Because we have not calculated nlinks yet we can just
2050                  * set records to the flush state if the parent is in
2051                  * the same flush group as we are.
2052                  */
2053                 record->flush_state = HAMMER_FST_FLUSH;
2054                 record->flush_group = flg;
2055                 ++record->flush_group->refs;
2056                 hammer_ref(&record->lock);
2057
2058                 /*
2059                  * A general directory-add contributes to our visibility.
2060                  *
2061                  * Otherwise it is probably a directory-delete or
2062                  * delete-on-disk record and does not contribute to our
2063                  * visbility (but we can still flush it).
2064                  */
2065                 if (record->type == HAMMER_MEM_RECORD_ADD)
2066                         return(1);
2067                 return(0);
2068         } else {
2069                 /*
2070                  * If the parent is not in our flush group we cannot
2071                  * flush this record yet, there is no visibility.
2072                  * We tell the parent to reflush and mark ourselves
2073                  * so the parent knows it should flush us too.
2074                  */
2075                 pip->flags |= HAMMER_INODE_REFLUSH;
2076                 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
2077                 return(-1);
2078         }
2079 }
2080
2081 /*
2082  * This is the core routine placing an inode into the FST_FLUSH state.
2083  */
2084 static void
2085 hammer_flush_inode_core(hammer_inode_t ip, hammer_flush_group_t flg, int flags)
2086 {
2087         hammer_mount_t hmp = ip->hmp;
2088         int go_count;
2089
2090         /*
2091          * Set flush state and prevent the flusher from cycling into
2092          * the next flush group.  Do not place the ip on the list yet.
2093          * Inodes not in the idle state get an extra reference.
2094          */
2095         KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
2096         if (ip->flush_state == HAMMER_FST_IDLE)
2097                 hammer_ref(&ip->lock);
2098         ip->flush_state = HAMMER_FST_FLUSH;
2099         ip->flush_group = flg;
2100         ++hmp->flusher.group_lock;
2101         ++hmp->count_iqueued;
2102         ++hammer_count_iqueued;
2103         ++flg->total_count;
2104         hammer_redo_fifo_start_flush(ip);
2105
2106 #if 0
2107         /*
2108          * We need to be able to vfsync/truncate from the backend.
2109          *
2110          * XXX Any truncation from the backend will acquire the vnode
2111          *     independently.
2112          */
2113         KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
2114         if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
2115                 ip->flags |= HAMMER_INODE_VHELD;
2116                 vref(ip->vp);
2117         }
2118 #endif
2119
2120         /*
2121          * Figure out how many in-memory records we can actually flush
2122          * (not including inode meta-data, buffers, etc).
2123          */
2124         KKASSERT((ip->flags & HAMMER_INODE_WOULDBLOCK) == 0);
2125         if (flags & HAMMER_FLUSH_RECURSION) {
2126                 /*
2127                  * If this is a upwards recursion we do not want to
2128                  * recurse down again!
2129                  */
2130                 go_count = 1;
2131 #if 0
2132         } else if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
2133                 /*
2134                  * No new records are added if we must complete a flush
2135                  * from a previous cycle, but we do have to move the records
2136                  * from the previous cycle to the current one.
2137                  */
2138 #if 0
2139                 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2140                                    hammer_syncgrp_child_callback, NULL);
2141 #endif
2142                 go_count = 1;
2143 #endif
2144         } else {
2145                 /*
2146                  * Normal flush, scan records and bring them into the flush.
2147                  * Directory adds and deletes are usually skipped (they are
2148                  * grouped with the related inode rather then with the
2149                  * directory).
2150                  *
2151                  * go_count can be negative, which means the scan aborted
2152                  * due to the flush group being over-full and we should
2153                  * flush what we have.
2154                  */
2155                 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2156                                    hammer_setup_child_callback, NULL);
2157         }
2158
2159         /*
2160          * This is a more involved test that includes go_count.  If we
2161          * can't flush, flag the inode and return.  If go_count is 0 we
2162          * were are unable to flush any records in our rec_tree and
2163          * must ignore the XDIRTY flag.
2164          */
2165         if (go_count == 0) {
2166                 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
2167                         --hmp->count_iqueued;
2168                         --hammer_count_iqueued;
2169
2170                         --flg->total_count;
2171                         ip->flush_state = HAMMER_FST_SETUP;
2172                         ip->flush_group = NULL;
2173                         if (flags & HAMMER_FLUSH_SIGNAL) {
2174                                 ip->flags |= HAMMER_INODE_REFLUSH |
2175                                              HAMMER_INODE_RESIGNAL;
2176                         } else {
2177                                 ip->flags |= HAMMER_INODE_REFLUSH;
2178                         }
2179 #if 0
2180                         if (ip->flags & HAMMER_INODE_VHELD) {
2181                                 ip->flags &= ~HAMMER_INODE_VHELD;
2182                                 vrele(ip->vp);
2183                         }
2184 #endif
2185
2186                         /*
2187                          * REFLUSH is needed to trigger dependancy wakeups
2188                          * when an inode is in SETUP.
2189                          */
2190                         ip->flags |= HAMMER_INODE_REFLUSH;
2191                         if (--hmp->flusher.group_lock == 0)
2192                                 wakeup(&hmp->flusher.group_lock);
2193                         return;
2194                 }
2195         }
2196
2197         /*
2198          * Snapshot the state of the inode for the backend flusher.
2199          *
2200          * We continue to retain save_trunc_off even when all truncations
2201          * have been resolved as an optimization to determine if we can
2202          * skip the B-Tree lookup for overwrite deletions.
2203          *
2204          * NOTE: The DELETING flag is a mod flag, but it is also sticky,
2205          * and stays in ip->flags.  Once set, it stays set until the
2206          * inode is destroyed.
2207          */
2208         if (ip->flags & HAMMER_INODE_TRUNCATED) {
2209                 KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0);
2210                 ip->sync_trunc_off = ip->trunc_off;
2211                 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
2212                 ip->flags &= ~HAMMER_INODE_TRUNCATED;
2213                 ip->sync_flags |= HAMMER_INODE_TRUNCATED;
2214
2215                 /*
2216                  * The save_trunc_off used to cache whether the B-Tree
2217                  * holds any records past that point is not used until
2218                  * after the truncation has succeeded, so we can safely
2219                  * set it now.
2220                  */
2221                 if (ip->save_trunc_off > ip->sync_trunc_off)
2222                         ip->save_trunc_off = ip->sync_trunc_off;
2223         }
2224         ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK &
2225                            ~HAMMER_INODE_TRUNCATED);
2226         ip->sync_ino_leaf = ip->ino_leaf;
2227         ip->sync_ino_data = ip->ino_data;
2228         ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
2229
2230         /*
2231          * The flusher list inherits our inode and reference.
2232          */
2233         KKASSERT(flg->running == 0);
2234         RB_INSERT(hammer_fls_rb_tree, &flg->flush_tree, ip);
2235         if (--hmp->flusher.group_lock == 0)
2236                 wakeup(&hmp->flusher.group_lock);
2237
2238         /*
2239          * Auto-flush the group if it grows too large.  Make sure the
2240          * inode reclaim wait pipeline continues to work.
2241          */
2242         if (flg->total_count >= hammer_autoflush ||
2243             flg->total_count >= hammer_limit_reclaims / 4) {
2244                 if (hmp->fill_flush_group == flg)
2245                         hmp->fill_flush_group = TAILQ_NEXT(flg, flush_entry);
2246                 hammer_flusher_async(hmp, flg);
2247         }
2248 }
2249
2250 /*
2251  * Callback for scan of ip->rec_tree.  Try to include each record in our
2252  * flush.  ip->flush_group has been set but the inode has not yet been
2253  * moved into a flushing state.
2254  *
2255  * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
2256  * both inodes.
2257  *
2258  * We return 1 for any record placed or found in FST_FLUSH, which prevents
2259  * the caller from shortcutting the flush.
2260  */
2261 static int
2262 hammer_setup_child_callback(hammer_record_t rec, void *data)
2263 {
2264         hammer_flush_group_t flg;
2265         hammer_inode_t target_ip;
2266         hammer_inode_t ip;
2267         int r;
2268
2269         /*
2270          * Records deleted or committed by the backend are ignored.
2271          * Note that the flush detects deleted frontend records at
2272          * multiple points to deal with races.  This is just the first
2273          * line of defense.  The only time HAMMER_RECF_DELETED_FE cannot
2274          * be set is when HAMMER_RECF_INTERLOCK_BE is set, because it
2275          * messes up link-count calculations.
2276          *
2277          * NOTE: Don't get confused between record deletion and, say,
2278          * directory entry deletion.  The deletion of a directory entry
2279          * which is on-media has nothing to do with the record deletion
2280          * flags.
2281          */
2282         if (rec->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
2283                           HAMMER_RECF_COMMITTED)) {
2284                 if (rec->flush_state == HAMMER_FST_FLUSH) {
2285                         KKASSERT(rec->flush_group == rec->ip->flush_group);
2286                         r = 1;
2287                 } else {
2288                         r = 0;
2289                 }
2290                 return(r);
2291         }
2292
2293         /*
2294          * If the record is in an idle state it has no dependancies and
2295          * can be flushed.
2296          */
2297         ip = rec->ip;
2298         flg = ip->flush_group;
2299         r = 0;
2300
2301         switch(rec->flush_state) {
2302         case HAMMER_FST_IDLE:
2303                 /*
2304                  * The record has no setup dependancy, we can flush it.
2305                  */
2306                 KKASSERT(rec->target_ip == NULL);
2307                 rec->flush_state = HAMMER_FST_FLUSH;
2308                 rec->flush_group = flg;
2309                 ++flg->refs;
2310                 hammer_ref(&rec->lock);
2311                 r = 1;
2312                 break;
2313         case HAMMER_FST_SETUP:
2314                 /*
2315                  * The record has a setup dependancy.  These are typically
2316                  * directory entry adds and deletes.  Such entries will be
2317                  * flushed when their inodes are flushed so we do not
2318                  * usually have to add them to the flush here.  However,
2319                  * if the target_ip has set HAMMER_INODE_CONN_DOWN then
2320                  * it is asking us to flush this record (and it).
2321                  */
2322                 target_ip = rec->target_ip;
2323                 KKASSERT(target_ip != NULL);
2324                 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
2325
2326                 /*
2327                  * If the target IP is already flushing in our group
2328                  * we could associate the record, but target_ip has
2329                  * already synced ino_data to sync_ino_data and we
2330                  * would also have to adjust nlinks.   Plus there are
2331                  * ordering issues for adds and deletes.
2332                  *
2333                  * Reflush downward if this is an ADD, and upward if
2334                  * this is a DEL.
2335                  */
2336                 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
2337                         if (rec->type == HAMMER_MEM_RECORD_ADD)
2338                                 ip->flags |= HAMMER_INODE_REFLUSH;
2339                         else
2340                                 target_ip->flags |= HAMMER_INODE_REFLUSH;
2341                         break;
2342                 }
2343
2344                 /*
2345                  * Target IP is not yet flushing.  This can get complex
2346                  * because we have to be careful about the recursion.
2347                  *
2348                  * Directories create an issue for us in that if a flush
2349                  * of a directory is requested the expectation is to flush
2350                  * any pending directory entries, but this will cause the
2351                  * related inodes to recursively flush as well.  We can't
2352                  * really defer the operation so just get as many as we
2353                  * can and
2354                  */
2355 #if 0
2356                 if ((target_ip->flags & HAMMER_INODE_RECLAIM) == 0 &&
2357                     (target_ip->flags & HAMMER_INODE_CONN_DOWN) == 0) {
2358                         /*
2359                          * We aren't reclaiming and the target ip was not
2360                          * previously prevented from flushing due to this
2361                          * record dependancy.  Do not flush this record.
2362                          */
2363                         /*r = 0;*/
2364                 } else
2365 #endif
2366                 if (flg->total_count + flg->refs >
2367                            ip->hmp->undo_rec_limit) {
2368                         /*
2369                          * Our flush group is over-full and we risk blowing
2370                          * out the UNDO FIFO.  Stop the scan, flush what we
2371                          * have, then reflush the directory.
2372                          *
2373                          * The directory may be forced through multiple
2374                          * flush groups before it can be completely
2375                          * flushed.
2376                          */
2377                         ip->flags |= HAMMER_INODE_RESIGNAL |
2378                                      HAMMER_INODE_REFLUSH;
2379                         r = -1;
2380                 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
2381                         /*
2382                          * If the target IP is not flushing we can force
2383                          * it to flush, even if it is unable to write out
2384                          * any of its own records we have at least one in
2385                          * hand that we CAN deal with.
2386                          */
2387                         rec->flush_state = HAMMER_FST_FLUSH;
2388                         rec->flush_group = flg;
2389                         ++flg->refs;
2390                         hammer_ref(&rec->lock);
2391                         hammer_flush_inode_core(target_ip, flg,
2392                                                 HAMMER_FLUSH_RECURSION);
2393                         r = 1;
2394                 } else {
2395                         /*
2396                          * General or delete-on-disk record.
2397                          *
2398                          * XXX this needs help.  If a delete-on-disk we could
2399                          * disconnect the target.  If the target has its own
2400                          * dependancies they really need to be flushed.
2401                          *
2402                          * XXX
2403                          */
2404                         rec->flush_state = HAMMER_FST_FLUSH;
2405                         rec->flush_group = flg;
2406                         ++flg->refs;
2407                         hammer_ref(&rec->lock);
2408                         hammer_flush_inode_core(target_ip, flg,
2409                                                 HAMMER_FLUSH_RECURSION);
2410                         r = 1;
2411                 }
2412                 break;
2413         case HAMMER_FST_FLUSH:
2414                 /*
2415                  * The record could be part of a previous flush group if the
2416                  * inode is a directory (the record being a directory entry).
2417                  * Once the flush group was closed a hammer_test_inode()
2418                  * function can cause a new flush group to be setup, placing
2419                  * the directory inode itself in a new flush group.
2420                  *
2421                  * When associated with a previous flush group we count it
2422                  * as if it were in our current flush group, since it will
2423                  * effectively be flushed by the time we flush our current
2424                  * flush group.
2425                  */
2426                 KKASSERT(
2427                     rec->ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY ||
2428                     rec->flush_group == flg);
2429                 r = 1;
2430                 break;
2431         }
2432         return(r);
2433 }
2434
2435 #if 0
2436 /*
2437  * This version just moves records already in a flush state to the new
2438  * flush group and that is it.
2439  */
2440 static int
2441 hammer_syncgrp_child_callback(hammer_record_t rec, void *data)
2442 {
2443         hammer_inode_t ip = rec->ip;
2444
2445         switch(rec->flush_state) {
2446         case HAMMER_FST_FLUSH:
2447                 KKASSERT(rec->flush_group == ip->flush_group);
2448                 break;
2449         default:
2450                 break;
2451         }
2452         return(0);
2453 }
2454 #endif
2455
2456 /*
2457  * Wait for a previously queued flush to complete.
2458  *
2459  * If a critical error occured we don't try to wait.
2460  */
2461 void
2462 hammer_wait_inode(hammer_inode_t ip)
2463 {
2464         /*
2465          * The inode can be in a SETUP state in which case RESIGNAL
2466          * should be set.  If RESIGNAL is not set then the previous
2467          * flush completed and a later operation placed the inode
2468          * in a passive setup state again, so we're done.
2469          *
2470          * The inode can be in a FLUSH state in which case we
2471          * can just wait for completion.
2472          */
2473         while (ip->flush_state == HAMMER_FST_FLUSH ||
2474             (ip->flush_state == HAMMER_FST_SETUP &&
2475              (ip->flags & HAMMER_INODE_RESIGNAL))) {
2476                 /*
2477                  * Don't try to flush on a critical error
2478                  */
2479                 if (ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
2480                         break;
2481
2482                 /*
2483                  * If the inode was already being flushed its flg
2484                  * may not have been queued to the backend.  We
2485                  * have to make sure it gets queued or we can wind
2486                  * up blocked or deadlocked (particularly if we are
2487                  * the vnlru thread).
2488                  */
2489                 if (ip->flush_state == HAMMER_FST_FLUSH) {
2490                         KKASSERT(ip->flush_group);
2491                         if (ip->flush_group->closed == 0) {
2492                                 if (hammer_debug_inode) {
2493                                         hkprintf("debug: forcing "
2494                                                 "async flush ip %016jx\n",
2495                                                 (intmax_t)ip->obj_id);
2496                                 }
2497                                 hammer_flusher_async(ip->hmp, ip->flush_group);
2498                                 continue; /* retest */
2499                         }
2500                 }
2501
2502                 /*
2503                  * In a flush state with the flg queued to the backend
2504                  * or in a setup state with RESIGNAL set, we can safely
2505                  * wait.
2506                  */
2507                 ip->flags |= HAMMER_INODE_FLUSHW;
2508                 tsleep(&ip->flags, 0, "hmrwin", 0);
2509         }
2510
2511 #if 0
2512         /*
2513          * The inode may have been in a passive setup state,
2514          * call flush to make sure we get signaled.
2515          */
2516         if (ip->flush_state == HAMMER_FST_SETUP)
2517                 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2518 #endif
2519
2520 }
2521
2522 /*
2523  * Called by the backend code when a flush has been completed.
2524  * The inode has already been removed from the flush list.
2525  *
2526  * A pipelined flush can occur, in which case we must re-enter the
2527  * inode on the list and re-copy its fields.
2528  */
2529 void
2530 hammer_flush_inode_done(hammer_inode_t ip, int error)
2531 {
2532         hammer_mount_t hmp;
2533         int dorel;
2534
2535         KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
2536
2537         hmp = ip->hmp;
2538
2539         /*
2540          * Auto-reflush if the backend could not completely flush
2541          * the inode.  This fixes a case where a deferred buffer flush
2542          * could cause fsync to return early.
2543          */
2544         if (ip->sync_flags & HAMMER_INODE_MODMASK)
2545                 ip->flags |= HAMMER_INODE_REFLUSH;
2546
2547         /*
2548          * Merge left-over flags back into the frontend and fix the state.
2549          * Incomplete truncations are retained by the backend.
2550          */
2551         ip->error = error;
2552         ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED;
2553         ip->sync_flags &= HAMMER_INODE_TRUNCATED;
2554
2555         /*
2556          * The backend may have adjusted nlinks, so if the adjusted nlinks
2557          * does not match the fronttend set the frontend's DDIRTY flag again.
2558          */
2559         if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
2560                 ip->flags |= HAMMER_INODE_DDIRTY;
2561
2562         /*
2563          * Fix up the dirty buffer status.
2564          */
2565         if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) {
2566                 ip->flags |= HAMMER_INODE_BUFS;
2567         }
2568         hammer_redo_fifo_end_flush(ip);
2569
2570         /*
2571          * Re-set the XDIRTY flag if some of the inode's in-memory records
2572          * could not be flushed.
2573          */
2574         KKASSERT((RB_EMPTY(&ip->rec_tree) &&
2575                   (ip->flags & HAMMER_INODE_XDIRTY) == 0) ||
2576                  (!RB_EMPTY(&ip->rec_tree) &&
2577                   (ip->flags & HAMMER_INODE_XDIRTY) != 0));
2578
2579         /*
2580          * Do not lose track of inodes which no longer have vnode
2581          * assocations, otherwise they may never get flushed again.
2582          *
2583          * The reflush flag can be set superfluously, causing extra pain
2584          * for no reason.  If the inode is no longer modified it no longer
2585          * needs to be flushed.
2586          */
2587         if (ip->flags & HAMMER_INODE_MODMASK) {
2588                 if (ip->vp == NULL)
2589                         ip->flags |= HAMMER_INODE_REFLUSH;
2590         } else {
2591                 ip->flags &= ~HAMMER_INODE_REFLUSH;
2592         }
2593
2594         /*
2595          * The fs token is held but the inode lock is not held.  Because this
2596          * is a backend flush it is possible that the vnode has no references
2597          * and cause a reclaim race inside vsetisdirty() if/when it blocks.
2598          *
2599          * Therefore, we must lock the inode around this particular dirtying
2600          * operation.  We don't have to around other dirtying operations
2601          * where the vnode is implicitly or explicitly held.
2602          */
2603         if (ip->flags & HAMMER_INODE_MODMASK) {
2604                 hammer_lock_ex(&ip->lock);
2605                 hammer_inode_dirty(ip);
2606                 hammer_unlock(&ip->lock);
2607         }
2608
2609         /*
2610          * Adjust the flush state.
2611          */
2612         if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
2613                 /*
2614                  * We were unable to flush out all our records, leave the
2615                  * inode in a flush state and in the current flush group.
2616                  * The flush group will be re-run.
2617                  *
2618                  * This occurs if the UNDO block gets too full or there is
2619                  * too much dirty meta-data and allows the flusher to
2620                  * finalize the UNDO block and then re-flush.
2621                  */
2622                 ip->flags &= ~HAMMER_INODE_WOULDBLOCK;
2623                 dorel = 0;
2624         } else {
2625                 /*
2626                  * Remove from the flush_group
2627                  */
2628                 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip);
2629                 ip->flush_group = NULL;
2630
2631 #if 0
2632                 /*
2633                  * Clean up the vnode ref and tracking counts.
2634                  */
2635                 if (ip->flags & HAMMER_INODE_VHELD) {
2636                         ip->flags &= ~HAMMER_INODE_VHELD;
2637                         vrele(ip->vp);
2638                 }
2639 #endif
2640                 --hmp->count_iqueued;
2641                 --hammer_count_iqueued;
2642
2643                 /*
2644                  * And adjust the state.
2645                  */
2646                 if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
2647                         ip->flush_state = HAMMER_FST_IDLE;
2648                         dorel = 1;
2649                 } else {
2650                         ip->flush_state = HAMMER_FST_SETUP;
2651                         dorel = 0;
2652                 }
2653
2654                 /*
2655                  * If the frontend is waiting for a flush to complete,
2656                  * wake it up.
2657                  */
2658                 if (ip->flags & HAMMER_INODE_FLUSHW) {
2659                         ip->flags &= ~HAMMER_INODE_FLUSHW;
2660                         wakeup(&ip->flags);
2661                 }
2662
2663                 /*
2664                  * If the frontend made more changes and requested another
2665                  * flush, then try to get it running.
2666                  *
2667                  * Reflushes are aborted when the inode is errored out.
2668                  */
2669                 if (ip->flags & HAMMER_INODE_REFLUSH) {
2670                         ip->flags &= ~HAMMER_INODE_REFLUSH;
2671                         if (ip->flags & HAMMER_INODE_RESIGNAL) {
2672                                 ip->flags &= ~HAMMER_INODE_RESIGNAL;
2673                                 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2674                         } else {
2675                                 hammer_flush_inode(ip, 0);
2676                         }
2677                 }
2678         }
2679
2680         /*
2681          * If we have no parent dependancies we can clear CONN_DOWN
2682          */
2683         if (TAILQ_EMPTY(&ip->target_list))
2684                 ip->flags &= ~HAMMER_INODE_CONN_DOWN;
2685
2686         /*
2687          * If the inode is now clean drop the space reservation.
2688          */
2689         if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
2690             (ip->flags & HAMMER_INODE_RSV_INODES)) {
2691                 ip->flags &= ~HAMMER_INODE_RSV_INODES;
2692                 --hmp->rsv_inodes;
2693         }
2694
2695         ip->flags &= ~HAMMER_INODE_SLAVEFLUSH;
2696
2697         if (dorel)
2698                 hammer_rel_inode(ip, 0);
2699 }
2700
2701 /*
2702  * Called from hammer_sync_inode() to synchronize in-memory records
2703  * to the media.
2704  */
2705 static int
2706 hammer_sync_record_callback(hammer_record_t record, void *data)
2707 {
2708         hammer_cursor_t cursor = data;
2709         hammer_transaction_t trans = cursor->trans;
2710         hammer_mount_t hmp = trans->hmp;
2711         int error;
2712
2713         /*
2714          * Skip records that do not belong to the current flush.
2715          */
2716         ++hammer_stats_record_iterations;
2717         if (record->flush_state != HAMMER_FST_FLUSH)
2718                 return(0);
2719
2720         if (record->flush_group != record->ip->flush_group) {
2721                 hdkprintf("rec %p ip %p bad flush group %p %p\n",
2722                         record,
2723                         record->ip,
2724                         record->flush_group,
2725                         record->ip->flush_group);
2726                 if (hammer_debug_critical)
2727                         Debugger("blah2");
2728                 return(0);
2729         }
2730         KKASSERT(record->flush_group == record->ip->flush_group);
2731
2732         /*
2733          * Interlock the record using the BE flag.  Once BE is set the
2734          * frontend cannot change the state of FE.
2735          *
2736          * NOTE: If FE is set prior to us setting BE we still sync the
2737          * record out, but the flush completion code converts it to
2738          * a delete-on-disk record instead of destroying it.
2739          */
2740         KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
2741         record->flags |= HAMMER_RECF_INTERLOCK_BE;
2742
2743         /*
2744          * The backend has already disposed of the record.
2745          */
2746         if (record->flags & (HAMMER_RECF_DELETED_BE | HAMMER_RECF_COMMITTED)) {
2747                 error = 0;
2748                 goto done;
2749         }
2750
2751         /*
2752          * If the whole inode is being deleted and all on-disk records will
2753          * be deleted very soon, we can't sync any new records to disk
2754          * because they will be deleted in the same transaction they were
2755          * created in (delete_tid == create_tid), which will assert.
2756          *
2757          * XXX There may be a case with RECORD_ADD with DELETED_FE set
2758          * that we currently panic on.
2759          */
2760         if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
2761                 switch(record->type) {
2762                 case HAMMER_MEM_RECORD_DATA:
2763                         /*
2764                          * We don't have to do anything, if the record was
2765                          * committed the space will have been accounted for
2766                          * in the blockmap.
2767                          */
2768                         /* fall through */
2769                 case HAMMER_MEM_RECORD_GENERAL:
2770                         /*
2771                          * Set deleted-by-backend flag.  Do not set the
2772                          * backend committed flag, because we are throwing
2773                          * the record away.
2774                          */
2775                         record->flags |= HAMMER_RECF_DELETED_BE;
2776                         ++record->ip->rec_generation;
2777                         error = 0;
2778                         goto done;
2779                 case HAMMER_MEM_RECORD_ADD:
2780                         hpanic("illegal add during inode deletion record %p",
2781                                 record);
2782                         break; /* NOT REACHED */
2783                 case HAMMER_MEM_RECORD_INODE:
2784                         hpanic("attempt to sync inode record %p?", record);
2785                         break; /* NOT REACHED */
2786                 case HAMMER_MEM_RECORD_DEL:
2787                         /*
2788                          * Follow through and issue the on-disk deletion
2789                          */
2790                         break;
2791                 }
2792         }
2793
2794         /*
2795          * If DELETED_FE is set special handling is needed for directory
2796          * entries.  Dependant pieces related to the directory entry may
2797          * have already been synced to disk.  If this occurs we have to
2798          * sync the directory entry and then change the in-memory record
2799          * from an ADD to a DELETE to cover the fact that it's been
2800          * deleted by the frontend.
2801          *
2802          * A directory delete covering record (MEM_RECORD_DEL) can never
2803          * be deleted by the frontend.
2804          *
2805          * Any other record type (aka DATA) can be deleted by the frontend.
2806          * XXX At the moment the flusher must skip it because there may
2807          * be another data record in the flush group for the same block,
2808          * meaning that some frontend data changes can leak into the backend's
2809          * synchronization point.
2810          */
2811         if (record->flags & HAMMER_RECF_DELETED_FE) {
2812                 if (record->type == HAMMER_MEM_RECORD_ADD) {
2813                         /*
2814                          * Convert a front-end deleted directory-add to
2815                          * a directory-delete entry later.
2816                          */
2817                         record->flags |= HAMMER_RECF_CONVERT_DELETE;
2818                 } else {
2819                         /*
2820                          * Dispose of the record (race case).  Mark as
2821                          * deleted by backend (and not committed).
2822                          */
2823                         KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
2824                         record->flags |= HAMMER_RECF_DELETED_BE;
2825                         ++record->ip->rec_generation;
2826                         error = 0;
2827                         goto done;
2828                 }
2829         }
2830
2831         /*
2832          * Assign the create_tid for new records.  Deletions already
2833          * have the record's entire key properly set up.
2834          */
2835         if (record->type != HAMMER_MEM_RECORD_DEL) {
2836                 record->leaf.base.create_tid = trans->tid;
2837                 record->leaf.create_ts = trans->time32;
2838         }
2839
2840         /*
2841          * This actually moves the record to the on-media B-Tree.  We
2842          * must also generate REDO_TERM entries in the UNDO/REDO FIFO
2843          * indicating that the related REDO_WRITE(s) have been committed.
2844          *
2845          * During recovery any REDO_TERM's within the nominal recovery span
2846          * are ignored since the related meta-data is being undone, causing
2847          * any matching REDO_WRITEs to execute.  The REDO_TERMs outside
2848          * the nominal recovery span will match against REDO_WRITEs and
2849          * prevent them from being executed (because the meta-data has
2850          * already been synchronized).
2851          */
2852         if (record->flags & HAMMER_RECF_REDO) {
2853                 KKASSERT(record->type == HAMMER_MEM_RECORD_DATA);
2854                 hammer_generate_redo(trans, record->ip,
2855                                      record->leaf.base.key -
2856                                          record->leaf.data_len,
2857                                      HAMMER_REDO_TERM_WRITE,
2858                                      NULL,
2859                                      record->leaf.data_len);
2860         }
2861
2862         for (;;) {
2863                 error = hammer_ip_sync_record_cursor(cursor, record);
2864                 if (error != EDEADLK)
2865                         break;
2866                 hammer_done_cursor(cursor);
2867                 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
2868                                            record->ip);
2869                 if (error)
2870                         break;
2871         }
2872         record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
2873
2874         if (error)
2875                 error = -error;
2876 done:
2877         hammer_flush_record_done(record, error);
2878
2879         /*
2880          * Do partial finalization if we have built up too many dirty
2881          * buffers.  Otherwise a buffer cache deadlock can occur when
2882          * doing things like creating tens of thousands of tiny files.
2883          *
2884          * We must release our cursor lock to avoid a 3-way deadlock
2885          * due to the exclusive sync lock the finalizer must get.
2886          *
2887          * WARNING: See warnings in hammer_unlock_cursor() function.
2888          */
2889         if (hammer_flusher_meta_limit(hmp) ||
2890             vm_page_count_severe()) {
2891                 hammer_unlock_cursor(cursor);
2892                 hammer_flusher_finalize(trans, 0);
2893                 hammer_lock_cursor(cursor);
2894         }
2895         return(error);
2896 }
2897
2898 /*
2899  * Backend function called by the flusher to sync an inode to media.
2900  */
2901 int
2902 hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip)
2903 {
2904         struct hammer_cursor cursor;
2905         hammer_node_t tmp_node;
2906         hammer_record_t depend;
2907         hammer_record_t next;
2908         int error, tmp_error;
2909         uint64_t nlinks;
2910
2911         if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
2912                 return(0);
2913
2914         error = hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
2915         if (error)
2916                 goto done;
2917
2918         /*
2919          * Any directory records referencing this inode which are not in
2920          * our current flush group must adjust our nlink count for the
2921          * purposes of synchronizating to disk.
2922          *
2923          * Records which are in our flush group can be unlinked from our
2924          * inode now, potentially allowing the inode to be physically
2925          * deleted.
2926          *
2927          * This cannot block.
2928          */
2929         nlinks = ip->ino_data.nlinks;
2930         next = TAILQ_FIRST(&ip->target_list);
2931         while ((depend = next) != NULL) {
2932                 next = TAILQ_NEXT(depend, target_entry);
2933                 if (depend->flush_state == HAMMER_FST_FLUSH &&
2934                     depend->flush_group == ip->flush_group) {
2935                         /*
2936                          * If this is an ADD that was deleted by the frontend
2937                          * the frontend nlinks count will have already been
2938                          * decremented, but the backend is going to sync its
2939                          * directory entry and must account for it.  The
2940                          * record will be converted to a delete-on-disk when
2941                          * it gets synced.
2942                          *
2943                          * If the ADD was not deleted by the frontend we
2944                          * can remove the dependancy from our target_list.
2945                          */
2946                         if (depend->flags & HAMMER_RECF_DELETED_FE) {
2947                                 ++nlinks;
2948                         } else {
2949                                 TAILQ_REMOVE(&ip->target_list, depend,
2950                                              target_entry);
2951                                 depend->target_ip = NULL;
2952                         }
2953                 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
2954                         /*
2955                          * Not part of our flush group and not deleted by
2956                          * the front-end, adjust the link count synced to
2957                          * the media (undo what the frontend did when it
2958                          * queued the record).
2959                          */
2960                         KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
2961                         switch(depend->type) {
2962                         case HAMMER_MEM_RECORD_ADD:
2963                                 --nlinks;
2964                                 break;
2965                         case HAMMER_MEM_RECORD_DEL:
2966                                 ++nlinks;
2967                                 break;
2968                         default:
2969                                 break;
2970                         }
2971                 }
2972         }
2973
2974         /*
2975          * Set dirty if we had to modify the link count.
2976          */
2977         if (ip->sync_ino_data.nlinks != nlinks) {
2978                 KKASSERT((int64_t)nlinks >= 0);
2979                 ip->sync_ino_data.nlinks = nlinks;
2980                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2981         }
2982
2983         /*
2984          * If there is a trunction queued destroy any data past the (aligned)
2985          * truncation point.  Userland will have dealt with the buffer
2986          * containing the truncation point for us.
2987          *
2988          * We don't flush pending frontend data buffers until after we've
2989          * dealt with the truncation.
2990          */
2991         if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2992                 /*
2993                  * Interlock trunc_off.  The VOP front-end may continue to
2994                  * make adjustments to it while we are blocked.
2995                  */
2996                 off_t trunc_off;
2997                 off_t aligned_trunc_off;
2998                 int blkmask;
2999
3000                 trunc_off = ip->sync_trunc_off;
3001                 blkmask = hammer_blocksize(trunc_off) - 1;
3002                 aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask;
3003
3004                 /*
3005                  * Delete any whole blocks on-media.  The front-end has
3006                  * already cleaned out any partial block and made it
3007                  * pending.  The front-end may have updated trunc_off
3008                  * while we were blocked so we only use sync_trunc_off.
3009                  *
3010                  * This operation can blow out the buffer cache, EWOULDBLOCK
3011                  * means we were unable to complete the deletion.  The
3012                  * deletion will update sync_trunc_off in that case.
3013                  */
3014                 error = hammer_ip_delete_range(&cursor, ip,
3015                                                 aligned_trunc_off,
3016                                                 0x7FFFFFFFFFFFFFFFLL, 2);
3017                 if (error == EWOULDBLOCK) {
3018                         ip->flags |= HAMMER_INODE_WOULDBLOCK;
3019                         error = 0;
3020                         goto defer_buffer_flush;
3021                 }
3022
3023                 if (error)
3024                         goto done;
3025
3026                 /*
3027                  * Generate a REDO_TERM_TRUNC entry in the UNDO/REDO FIFO.
3028                  *
3029                  * XXX we do this even if we did not previously generate
3030                  * a REDO_TRUNC record.  This operation may enclosed the
3031                  * range for multiple prior truncation entries in the REDO
3032                  * log.
3033                  */
3034                 if (trans->hmp->version >= HAMMER_VOL_VERSION_FOUR &&
3035                     (ip->flags & HAMMER_INODE_RDIRTY)) {
3036                         hammer_generate_redo(trans, ip, aligned_trunc_off,
3037                                              HAMMER_REDO_TERM_TRUNC,
3038                                              NULL, 0);
3039                 }
3040
3041                 /*
3042                  * Clear the truncation flag on the backend after we have
3043                  * completed the deletions.  Backend data is now good again
3044                  * (including new records we are about to sync, below).
3045                  *
3046                  * Leave sync_trunc_off intact.  As we write additional
3047                  * records the backend will update sync_trunc_off.  This
3048                  * tells the backend whether it can skip the overwrite
3049                  * test.  This should work properly even when the backend
3050                  * writes full blocks where the truncation point straddles
3051                  * the block because the comparison is against the base
3052                  * offset of the record.
3053                  */
3054                 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
3055                 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
3056         } else {
3057                 error = 0;
3058         }
3059
3060         /*
3061          * Now sync related records.  These will typically be directory
3062          * entries, records tracking direct-writes, or delete-on-disk records.
3063          */
3064         if (error == 0) {
3065                 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
3066                                     hammer_sync_record_callback, &cursor);
3067                 if (tmp_error < 0)
3068                         tmp_error = -error;
3069                 if (tmp_error)
3070                         error = tmp_error;
3071         }
3072         hammer_cache_node(&ip->cache[1], cursor.node);
3073
3074         /*
3075          * Re-seek for inode update, assuming our cache hasn't been ripped
3076          * out from under us.
3077          */
3078         if (error == 0) {
3079                 tmp_node = hammer_ref_node_safe(trans, &ip->cache[0], &error);
3080                 if (tmp_node) {
3081                         hammer_cursor_downgrade(&cursor);
3082                         hammer_lock_sh(&tmp_node->lock);
3083                         if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0)
3084                                 hammer_cursor_seek(&cursor, tmp_node, 0);
3085                         hammer_unlock(&tmp_node->lock);
3086                         hammer_rel_node(tmp_node);
3087                 }
3088                 error = 0;
3089         }
3090
3091         /*
3092          * If we are deleting the inode the frontend had better not have
3093          * any active references on elements making up the inode.
3094          *
3095          * The call to hammer_ip_delete_clean() cleans up auxillary records
3096          * but not DB or DATA records.  Those must have already been deleted
3097          * by the normal truncation mechanic.
3098          */
3099         if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
3100                 RB_EMPTY(&ip->rec_tree)  &&
3101             (ip->sync_flags & HAMMER_INODE_DELETING) &&
3102             (ip->flags & HAMMER_INODE_DELETED) == 0) {
3103                 int count1 = 0;
3104
3105                 error = hammer_ip_delete_clean(&cursor, ip, &count1);
3106                 if (error == 0) {
3107                         ip->flags |= HAMMER_INODE_DELETED;
3108                         ip->sync_flags &= ~HAMMER_INODE_DELETING;
3109                         ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
3110                         KKASSERT(RB_EMPTY(&ip->rec_tree));
3111
3112                         /*
3113                          * Set delete_tid in both the frontend and backend
3114                          * copy of the inode record.  The DELETED flag handles
3115                          * this, do not set DDIRTY.
3116                          */
3117                         ip->ino_leaf.base.delete_tid = trans->tid;
3118                         ip->sync_ino_leaf.base.delete_tid = trans->tid;
3119                         ip->ino_leaf.delete_ts = trans->time32;
3120                         ip->sync_ino_leaf.delete_ts = trans->time32;
3121
3122
3123                         /*
3124                          * Adjust the inode count in the volume header
3125                          */
3126                         hammer_sync_lock_sh(trans);
3127                         if (ip->flags & HAMMER_INODE_ONDISK) {
3128                                 hammer_modify_volume_field(trans,
3129                                                            trans->rootvol,
3130                                                            vol0_stat_inodes);
3131                                 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
3132                                 hammer_modify_volume_done(trans->rootvol);
3133                         }
3134                         hammer_sync_unlock(trans);
3135                 }
3136         }
3137
3138         if (error)
3139                 goto done;
3140         ip->sync_flags &= ~HAMMER_INODE_BUFS;
3141
3142 defer_buffer_flush:
3143         /*
3144          * Now update the inode's on-disk inode-data and/or on-disk record.
3145          * DELETED and ONDISK are managed only in ip->flags.
3146          *
3147          * In the case of a defered buffer flush we still update the on-disk
3148          * inode to satisfy visibility requirements if there happen to be
3149          * directory dependancies.
3150          */
3151         switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
3152         case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
3153                 /*
3154                  * If deleted and on-disk, don't set any additional flags.
3155                  * the delete flag takes care of things.
3156                  *
3157                  * Clear flags which may have been set by the frontend.
3158                  */
3159                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
3160                                     HAMMER_INODE_SDIRTY |
3161                                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
3162                                     HAMMER_INODE_DELETING);
3163                 break;
3164         case HAMMER_INODE_DELETED:
3165                 /*
3166                  * Take care of the case where a deleted inode was never
3167                  * flushed to the disk in the first place.
3168                  *
3169                  * Clear flags which may have been set by the frontend.
3170                  */
3171                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
3172                                     HAMMER_INODE_SDIRTY |
3173                                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
3174                                     HAMMER_INODE_DELETING);
3175                 while (RB_ROOT(&ip->rec_tree)) {
3176                         hammer_record_t record = RB_ROOT(&ip->rec_tree);
3177                         hammer_ref(&record->lock);
3178                         KKASSERT(hammer_oneref(&record->lock));
3179                         record->flags |= HAMMER_RECF_DELETED_BE;
3180                         ++record->ip->rec_generation;
3181                         hammer_rel_mem_record(record);
3182                 }
3183                 break;
3184         case HAMMER_INODE_ONDISK:
3185                 /*
3186                  * If already on-disk, do not set any additional flags.
3187                  */
3188                 break;
3189         default:
3190                 /*
3191                  * If not on-disk and not deleted, set DDIRTY to force
3192                  * an initial record to be written.
3193                  *
3194                  * Also set the create_tid in both the frontend and backend
3195                  * copy of the inode record.
3196                  */
3197                 ip->ino_leaf.base.create_tid = trans->tid;
3198                 ip->ino_leaf.create_ts = trans->time32;
3199                 ip->sync_ino_leaf.base.create_tid = trans->tid;
3200                 ip->sync_ino_leaf.create_ts = trans->time32;
3201                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
3202                 break;
3203         }
3204
3205         /*
3206          * If DDIRTY or SDIRTY is set, write out a new record.
3207          * If the inode is already on-disk the old record is marked as
3208          * deleted.
3209          *
3210          * If DELETED is set hammer_update_inode() will delete the existing
3211          * record without writing out a new one.
3212          */
3213         if (ip->flags & HAMMER_INODE_DELETED) {
3214                 error = hammer_update_inode(&cursor, ip);
3215         } else
3216         if (!(ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY)) &&
3217             (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) {
3218                 error = hammer_update_itimes(&cursor, ip);
3219         } else
3220         if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY |
3221                               HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) {
3222                 error = hammer_update_inode(&cursor, ip);
3223         }
3224 done:
3225         if (ip->flags & HAMMER_INODE_MODMASK)
3226                 hammer_inode_dirty(ip);
3227         if (error) {
3228                 hammer_critical_error(ip->hmp, ip, error,
3229                                       "while syncing inode");
3230         }
3231         hammer_done_cursor(&cursor);
3232         return(error);
3233 }
3234
3235 /*
3236  * This routine is called when the OS is no longer actively referencing
3237  * the inode (but might still be keeping it cached), or when releasing
3238  * the last reference to an inode.
3239  *
3240  * At this point if the inode's nlinks count is zero we want to destroy
3241  * it, which may mean destroying it on-media too.
3242  */
3243 void
3244 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
3245 {
3246         struct vnode *vp;
3247
3248         /*
3249          * Set the DELETING flag when the link count drops to 0 and the
3250          * OS no longer has any opens on the inode.
3251          *
3252          * The backend will clear DELETING (a mod flag) and set DELETED
3253          * (a state flag) when it is actually able to perform the
3254          * operation.
3255          *
3256          * Don't reflag the deletion if the flusher is currently syncing
3257          * one that was already flagged.  A previously set DELETING flag
3258          * may bounce around flags and sync_flags until the operation is
3259          * completely done.
3260          *
3261          * Do not attempt to modify a snapshot inode (one set to read-only).
3262          */
3263         if (ip->ino_data.nlinks == 0 &&
3264             ((ip->flags | ip->sync_flags) & (HAMMER_INODE_RO|HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
3265                 ip->flags |= HAMMER_INODE_DELETING;
3266                 ip->flags |= HAMMER_INODE_TRUNCATED;
3267                 ip->trunc_off = 0;
3268                 vp = NULL;
3269                 if (getvp) {
3270                         if (hammer_get_vnode(ip, &vp) != 0)
3271                                 return;
3272                 }
3273
3274                 /*
3275                  * Final cleanup
3276                  */
3277                 if (ip->vp)
3278                         nvtruncbuf(ip->vp, 0, HAMMER_BUFSIZE, 0, 0);
3279                 if (ip->flags & HAMMER_INODE_MODMASK)
3280                         hammer_inode_dirty(ip);
3281                 if (getvp)
3282                         vput(vp);
3283         }
3284 }
3285
3286 /*
3287  * After potentially resolving a dependancy the inode is tested
3288  * to determine whether it needs to be reflushed.
3289  */
3290 void
3291 hammer_test_inode(hammer_inode_t ip)
3292 {
3293         if (ip->flags & HAMMER_INODE_REFLUSH) {
3294                 ip->flags &= ~HAMMER_INODE_REFLUSH;
3295                 hammer_ref(&ip->lock);
3296                 if (ip->flags & HAMMER_INODE_RESIGNAL) {
3297                         ip->flags &= ~HAMMER_INODE_RESIGNAL;
3298                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
3299                 } else {
3300                         hammer_flush_inode(ip, 0);
3301                 }
3302                 hammer_rel_inode(ip, 0);
3303         }
3304 }
3305
3306 /*
3307  * Clear the RECLAIM flag on an inode.  This occurs when the inode is
3308  * reassociated with a vp or just before it gets freed.
3309  *
3310  * Pipeline wakeups to threads blocked due to an excessive number of
3311  * detached inodes.  This typically occurs when atime updates accumulate
3312  * while scanning a directory tree.
3313  */
3314 static void
3315 hammer_inode_wakereclaims(hammer_inode_t ip)
3316 {
3317         struct hammer_reclaim *reclaim;
3318         hammer_mount_t hmp = ip->hmp;
3319
3320         if ((ip->flags & HAMMER_INODE_RECLAIM) == 0)
3321                 return;
3322
3323         --hammer_count_reclaims;
3324         --hmp->count_reclaims;
3325         ip->flags &= ~HAMMER_INODE_RECLAIM;
3326
3327         if ((reclaim = TAILQ_FIRST(&hmp->reclaim_list)) != NULL) {
3328                 KKASSERT(reclaim->count > 0);
3329                 if (--reclaim->count == 0) {
3330                         TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry);
3331                         wakeup(reclaim);
3332                 }
3333         }
3334 }
3335
3336 /*
3337  * Setup our reclaim pipeline.  We only let so many detached (and dirty)
3338  * inodes build up before we start blocking.  This routine is called
3339  * if a new inode is created or an inode is loaded from media.
3340  *
3341  * When we block we don't care *which* inode has finished reclaiming,
3342  * as long as one does.
3343  *
3344  * The reclaim pipeline is primarily governed by the auto-flush which is
3345  * 1/4 hammer_limit_reclaims.  We don't want to block if the count is
3346  * less than 1/2 hammer_limit_reclaims.  From 1/2 to full count is
3347  * dynamically governed.
3348  */
3349 void
3350 hammer_inode_waitreclaims(hammer_transaction_t trans)
3351 {
3352         hammer_mount_t hmp = trans->hmp;
3353         struct hammer_reclaim reclaim;
3354         int lower_limit;
3355
3356         /*
3357          * Track inode load, delay if the number of reclaiming inodes is
3358          * between 2/4 and 4/4 hammer_limit_reclaims, depending.
3359          */
3360         if (curthread->td_proc) {
3361                 struct hammer_inostats *stats;
3362
3363                 stats = hammer_inode_inostats(hmp, curthread->td_proc->p_pid);
3364                 ++stats->count;
3365
3366                 if (stats->count > hammer_limit_reclaims / 2)
3367                         stats->count = hammer_limit_reclaims / 2;
3368                 lower_limit = hammer_limit_reclaims - stats->count;
3369                 if (hammer_debug_general & 0x10000) {
3370                         hdkprintf("pid %5d limit %d\n",
3371                                 (int)curthread->td_proc->p_pid, lower_limit);
3372                 }
3373         } else {
3374                 lower_limit = hammer_limit_reclaims * 3 / 4;
3375         }
3376         if (hmp->count_reclaims >= lower_limit) {
3377                 reclaim.count = 1;
3378                 TAILQ_INSERT_TAIL(&hmp->reclaim_list, &reclaim, entry);
3379                 tsleep(&reclaim, 0, "hmrrcm", hz);
3380                 if (reclaim.count > 0)
3381                         TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry);
3382         }
3383 }
3384
3385 /*
3386  * Keep track of reclaim statistics on a per-pid basis using a loose
3387  * 4-way set associative hash table.  Collisions inherit the count of
3388  * the previous entry.
3389  *
3390  * NOTE: We want to be careful here to limit the chain size.  If the chain
3391  *       size is too large a pid will spread its stats out over too many
3392  *       entries under certain types of heavy filesystem activity and
3393  *       wind up not delaying long enough.
3394  */
3395 static
3396 struct hammer_inostats *
3397 hammer_inode_inostats(hammer_mount_t hmp, pid_t pid)
3398 {
3399         struct hammer_inostats *stats;
3400         int delta;
3401         int chain;
3402         static volatile int iterator;   /* we don't care about MP races */
3403
3404         /*
3405          * Chain up to 4 times to find our entry.
3406          */
3407         for (chain = 0; chain < 4; ++chain) {
3408                 stats = &hmp->inostats[(pid + chain) & HAMMER_INOSTATS_HMASK];
3409                 if (stats->pid == pid)
3410                         break;
3411         }
3412
3413         /*
3414          * Replace one of the four chaining entries with our new entry.
3415          */
3416         if (chain == 4) {
3417                 stats = &hmp->inostats[(pid + (iterator++ & 3)) &
3418                                        HAMMER_INOSTATS_HMASK];
3419                 stats->pid = pid;
3420         }
3421
3422         /*
3423          * Decay the entry
3424          */
3425         if (stats->count && stats->ltick != ticks) {
3426                 delta = ticks - stats->ltick;
3427                 stats->ltick = ticks;
3428                 if (delta <= 0 || delta > hz * 60)
3429                         stats->count = 0;
3430                 else
3431                         stats->count = stats->count * hz / (hz + delta);
3432         }
3433         if (hammer_debug_general & 0x10000)
3434                 hdkprintf("pid %5d stats %d\n", (int)pid, stats->count);
3435         return (stats);
3436 }
3437
3438 #if 0
3439
3440 /*
3441  * XXX not used, doesn't work very well due to the large batching nature
3442  * of flushes.
3443  *
3444  * A larger then normal backlog of inodes is sitting in the flusher,
3445  * enforce a general slowdown to let it catch up.  This routine is only
3446  * called on completion of a non-flusher-related transaction which
3447  * performed B-Tree node I/O.
3448  *
3449  * It is possible for the flusher to stall in a continuous load.
3450  * blogbench -i1000 -o seems to do a good job generating this sort of load.
3451  * If the flusher is unable to catch up the inode count can bloat until
3452  * we run out of kvm.
3453  *
3454  * This is a bit of a hack.
3455  */
3456 void
3457 hammer_inode_waithard(hammer_mount_t hmp)
3458 {
3459         /*
3460          * Hysteresis.
3461          */
3462         if (hmp->flags & HAMMER_MOUNT_FLUSH_RECOVERY) {
3463                 if (hmp->count_reclaims < hammer_limit_reclaims / 2 &&
3464                     hmp->count_iqueued < hmp->count_inodes / 20) {
3465                         hmp->flags &= ~HAMMER_MOUNT_FLUSH_RECOVERY;
3466                         return;
3467                 }
3468         } else {
3469                 if (hmp->count_reclaims < hammer_limit_reclaims ||
3470                     hmp->count_iqueued < hmp->count_inodes / 10) {
3471                         return;
3472                 }
3473                 hmp->flags |= HAMMER_MOUNT_FLUSH_RECOVERY;
3474         }
3475
3476         /*
3477          * Block for one flush cycle.
3478          */
3479         hammer_flusher_wait_next(hmp);
3480 }
3481
3482 #endif