kernel - fine-grained namecache and partial vnode MPSAFE work
[dragonfly.git] / sys / vfs / hammer / hammer_inode.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.114 2008/09/24 00:53:51 dillon Exp $
35  */
36
37 #include "hammer.h"
38 #include <vm/vm_extern.h>
39
40 static int      hammer_unload_inode(struct hammer_inode *ip);
41 static void     hammer_free_inode(hammer_inode_t ip);
42 static void     hammer_flush_inode_core(hammer_inode_t ip,
43                                         hammer_flush_group_t flg, int flags);
44 static int      hammer_setup_child_callback(hammer_record_t rec, void *data);
45 #if 0
46 static int      hammer_syncgrp_child_callback(hammer_record_t rec, void *data);
47 #endif
48 static int      hammer_setup_parent_inodes(hammer_inode_t ip, int depth,
49                                         hammer_flush_group_t flg);
50 static int      hammer_setup_parent_inodes_helper(hammer_record_t record,
51                                         int depth, hammer_flush_group_t flg);
52 static void     hammer_inode_wakereclaims(hammer_inode_t ip);
53
54 #ifdef DEBUG_TRUNCATE
55 extern struct hammer_inode *HammerTruncIp;
56 #endif
57
58 /*
59  * RB-Tree support for inode structures
60  */
61 int
62 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
63 {
64         if (ip1->obj_localization < ip2->obj_localization)
65                 return(-1);
66         if (ip1->obj_localization > ip2->obj_localization)
67                 return(1);
68         if (ip1->obj_id < ip2->obj_id)
69                 return(-1);
70         if (ip1->obj_id > ip2->obj_id)
71                 return(1);
72         if (ip1->obj_asof < ip2->obj_asof)
73                 return(-1);
74         if (ip1->obj_asof > ip2->obj_asof)
75                 return(1);
76         return(0);
77 }
78
79 /*
80  * RB-Tree support for inode structures / special LOOKUP_INFO
81  */
82 static int
83 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
84 {
85         if (info->obj_localization < ip->obj_localization)
86                 return(-1);
87         if (info->obj_localization > ip->obj_localization)
88                 return(1);
89         if (info->obj_id < ip->obj_id)
90                 return(-1);
91         if (info->obj_id > ip->obj_id)
92                 return(1);
93         if (info->obj_asof < ip->obj_asof)
94                 return(-1);
95         if (info->obj_asof > ip->obj_asof)
96                 return(1);
97         return(0);
98 }
99
100 /*
101  * Used by hammer_scan_inode_snapshots() to locate all of an object's
102  * snapshots.  Note that the asof field is not tested, which we can get
103  * away with because it is the lowest-priority field.
104  */
105 static int
106 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data)
107 {
108         hammer_inode_info_t info = data;
109
110         if (ip->obj_localization > info->obj_localization)
111                 return(1);
112         if (ip->obj_localization < info->obj_localization)
113                 return(-1);
114         if (ip->obj_id > info->obj_id)
115                 return(1);
116         if (ip->obj_id < info->obj_id)
117                 return(-1);
118         return(0);
119 }
120
121 /*
122  * Used by hammer_unload_pseudofs() to locate all inodes associated with
123  * a particular PFS.
124  */
125 static int
126 hammer_inode_pfs_cmp(hammer_inode_t ip, void *data)
127 {
128         u_int32_t localization = *(u_int32_t *)data;
129         if (ip->obj_localization > localization)
130                 return(1);
131         if (ip->obj_localization < localization)
132                 return(-1);
133         return(0);
134 }
135
136 /*
137  * RB-Tree support for pseudofs structures
138  */
139 static int
140 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1, hammer_pseudofs_inmem_t p2)
141 {
142         if (p1->localization < p2->localization)
143                 return(-1);
144         if (p1->localization > p2->localization)
145                 return(1);
146         return(0);
147 }
148
149
150 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
151 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
152                 hammer_inode_info_cmp, hammer_inode_info_t);
153 RB_GENERATE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node,
154              hammer_pfs_rb_compare, u_int32_t, localization);
155
156 /*
157  * The kernel is not actively referencing this vnode but is still holding
158  * it cached.
159  *
160  * This is called from the frontend.
161  *
162  * MPALMOSTSAFE
163  */
164 int
165 hammer_vop_inactive(struct vop_inactive_args *ap)
166 {
167         struct hammer_inode *ip = VTOI(ap->a_vp);
168
169         /*
170          * Degenerate case
171          */
172         if (ip == NULL) {
173                 vrecycle(ap->a_vp);
174                 return(0);
175         }
176
177         /*
178          * If the inode no longer has visibility in the filesystem try to
179          * recycle it immediately, even if the inode is dirty.  Recycling
180          * it quickly allows the system to reclaim buffer cache and VM
181          * resources which can matter a lot in a heavily loaded system.
182          *
183          * This can deadlock in vfsync() if we aren't careful.
184          * 
185          * Do not queue the inode to the flusher if we still have visibility,
186          * otherwise namespace calls such as chmod will unnecessarily generate
187          * multiple inode updates.
188          */
189         if (ip->ino_data.nlinks == 0) {
190                 get_mplock();
191                 hammer_inode_unloadable_check(ip, 0);
192                 if (ip->flags & HAMMER_INODE_MODMASK)
193                         hammer_flush_inode(ip, 0);
194                 vrecycle(ap->a_vp);
195                 rel_mplock();
196         }
197         return(0);
198 }
199
200 /*
201  * Release the vnode association.  This is typically (but not always)
202  * the last reference on the inode.
203  *
204  * Once the association is lost we are on our own with regards to
205  * flushing the inode.
206  */
207 int
208 hammer_vop_reclaim(struct vop_reclaim_args *ap)
209 {
210         struct hammer_inode *ip;
211         hammer_mount_t hmp;
212         struct vnode *vp;
213
214         vp = ap->a_vp;
215
216         if ((ip = vp->v_data) != NULL) {
217                 hmp = ip->hmp;
218                 vp->v_data = NULL;
219                 ip->vp = NULL;
220
221                 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) {
222                         ++hammer_count_reclaiming;
223                         ++hmp->inode_reclaims;
224                         ip->flags |= HAMMER_INODE_RECLAIM;
225                 }
226                 hammer_rel_inode(ip, 1);
227         }
228         return(0);
229 }
230
231 /*
232  * Return a locked vnode for the specified inode.  The inode must be
233  * referenced but NOT LOCKED on entry and will remain referenced on
234  * return.
235  *
236  * Called from the frontend.
237  */
238 int
239 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
240 {
241         hammer_mount_t hmp;
242         struct vnode *vp;
243         int error = 0;
244         u_int8_t obj_type;
245
246         hmp = ip->hmp;
247
248         for (;;) {
249                 if ((vp = ip->vp) == NULL) {
250                         error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0);
251                         if (error)
252                                 break;
253                         hammer_lock_ex(&ip->lock);
254                         if (ip->vp != NULL) {
255                                 hammer_unlock(&ip->lock);
256                                 vp = *vpp;
257                                 vp->v_type = VBAD;
258                                 vx_put(vp);
259                                 continue;
260                         }
261                         hammer_ref(&ip->lock);
262                         vp = *vpp;
263                         ip->vp = vp;
264
265                         obj_type = ip->ino_data.obj_type;
266                         vp->v_type = hammer_get_vnode_type(obj_type);
267
268                         hammer_inode_wakereclaims(ip);
269
270                         switch(ip->ino_data.obj_type) {
271                         case HAMMER_OBJTYPE_CDEV:
272                         case HAMMER_OBJTYPE_BDEV:
273                                 vp->v_ops = &hmp->mp->mnt_vn_spec_ops;
274                                 addaliasu(vp, ip->ino_data.rmajor,
275                                           ip->ino_data.rminor);
276                                 break;
277                         case HAMMER_OBJTYPE_FIFO:
278                                 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops;
279                                 break;
280                         case HAMMER_OBJTYPE_REGFILE:
281                                 break;
282                         default:
283                                 break;
284                         }
285
286                         /*
287                          * Only mark as the root vnode if the ip is not
288                          * historical, otherwise the VFS cache will get
289                          * confused.  The other half of the special handling
290                          * is in hammer_vop_nlookupdotdot().
291                          *
292                          * Pseudo-filesystem roots can be accessed via
293                          * non-root filesystem paths and setting VROOT may
294                          * confuse the namecache.  Set VPFSROOT instead.
295                          */
296                         if (ip->obj_id == HAMMER_OBJID_ROOT &&
297                             ip->obj_asof == hmp->asof) {
298                                 if (ip->obj_localization == 0)
299                                         vsetflags(vp, VROOT);
300                                 else
301                                         vsetflags(vp, VPFSROOT);
302                         }
303
304                         vp->v_data = (void *)ip;
305                         /* vnode locked by getnewvnode() */
306                         /* make related vnode dirty if inode dirty? */
307                         hammer_unlock(&ip->lock);
308                         if (vp->v_type == VREG)
309                                 vinitvmio(vp, ip->ino_data.size);
310                         break;
311                 }
312
313                 /*
314                  * loop if the vget fails (aka races), or if the vp
315                  * no longer matches ip->vp.
316                  */
317                 if (vget(vp, LK_EXCLUSIVE) == 0) {
318                         if (vp == ip->vp)
319                                 break;
320                         vput(vp);
321                 }
322         }
323         *vpp = vp;
324         return(error);
325 }
326
327 /*
328  * Locate all copies of the inode for obj_id compatible with the specified
329  * asof, reference, and issue the related call-back.  This routine is used
330  * for direct-io invalidation and does not create any new inodes.
331  */
332 void
333 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo,
334                             int (*callback)(hammer_inode_t ip, void *data),
335                             void *data)
336 {
337         hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root,
338                                    hammer_inode_info_cmp_all_history,
339                                    callback, iinfo);
340 }
341
342 /*
343  * Acquire a HAMMER inode.  The returned inode is not locked.  These functions
344  * do not attach or detach the related vnode (use hammer_get_vnode() for
345  * that).
346  *
347  * The flags argument is only applied for newly created inodes, and only
348  * certain flags are inherited.
349  *
350  * Called from the frontend.
351  */
352 struct hammer_inode *
353 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip,
354                  int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
355                  int flags, int *errorp)
356 {
357         hammer_mount_t hmp = trans->hmp;
358         struct hammer_node_cache *cachep;
359         struct hammer_inode_info iinfo;
360         struct hammer_cursor cursor;
361         struct hammer_inode *ip;
362
363
364         /*
365          * Determine if we already have an inode cached.  If we do then
366          * we are golden.
367          *
368          * If we find an inode with no vnode we have to mark the
369          * transaction such that hammer_inode_waitreclaims() is
370          * called later on to avoid building up an infinite number
371          * of inodes.  Otherwise we can continue to * add new inodes
372          * faster then they can be disposed of, even with the tsleep
373          * delay.
374          *
375          * If we find a dummy inode we return a failure so dounlink
376          * (which does another lookup) doesn't try to mess with the
377          * link count.  hammer_vop_nresolve() uses hammer_get_dummy_inode()
378          * to ref dummy inodes.
379          */
380         iinfo.obj_id = obj_id;
381         iinfo.obj_asof = asof;
382         iinfo.obj_localization = localization;
383 loop:
384         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
385         if (ip) {
386                 if (ip->flags & HAMMER_INODE_DUMMY) {
387                         *errorp = ENOENT;
388                         return(NULL);
389                 }
390                 hammer_ref(&ip->lock);
391                 *errorp = 0;
392                 return(ip);
393         }
394
395         /*
396          * Allocate a new inode structure and deal with races later.
397          */
398         ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
399         ++hammer_count_inodes;
400         ++hmp->count_inodes;
401         ip->obj_id = obj_id;
402         ip->obj_asof = iinfo.obj_asof;
403         ip->obj_localization = localization;
404         ip->hmp = hmp;
405         ip->flags = flags & HAMMER_INODE_RO;
406         ip->cache[0].ip = ip;
407         ip->cache[1].ip = ip;
408         ip->cache[2].ip = ip;
409         ip->cache[3].ip = ip;
410         if (hmp->ronly)
411                 ip->flags |= HAMMER_INODE_RO;
412         ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
413                 0x7FFFFFFFFFFFFFFFLL;
414         RB_INIT(&ip->rec_tree);
415         TAILQ_INIT(&ip->target_list);
416         hammer_ref(&ip->lock);
417
418         /*
419          * Locate the on-disk inode.  If this is a PFS root we always
420          * access the current version of the root inode and (if it is not
421          * a master) always access information under it with a snapshot
422          * TID.
423          *
424          * We cache recent inode lookups in this directory in dip->cache[2].
425          * If we can't find it we assume the inode we are looking for is
426          * close to the directory inode.
427          */
428 retry:
429         cachep = NULL;
430         if (dip) {
431                 if (dip->cache[2].node)
432                         cachep = &dip->cache[2];
433                 else
434                         cachep = &dip->cache[0];
435         }
436         hammer_init_cursor(trans, &cursor, cachep, NULL);
437         cursor.key_beg.localization = localization + HAMMER_LOCALIZE_INODE;
438         cursor.key_beg.obj_id = ip->obj_id;
439         cursor.key_beg.key = 0;
440         cursor.key_beg.create_tid = 0;
441         cursor.key_beg.delete_tid = 0;
442         cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
443         cursor.key_beg.obj_type = 0;
444
445         cursor.asof = iinfo.obj_asof;
446         cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
447                        HAMMER_CURSOR_ASOF;
448
449         *errorp = hammer_btree_lookup(&cursor);
450         if (*errorp == EDEADLK) {
451                 hammer_done_cursor(&cursor);
452                 goto retry;
453         }
454
455         /*
456          * On success the B-Tree lookup will hold the appropriate
457          * buffer cache buffers and provide a pointer to the requested
458          * information.  Copy the information to the in-memory inode
459          * and cache the B-Tree node to improve future operations.
460          */
461         if (*errorp == 0) {
462                 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
463                 ip->ino_data = cursor.data->inode;
464
465                 /*
466                  * cache[0] tries to cache the location of the object inode.
467                  * The assumption is that it is near the directory inode.
468                  *
469                  * cache[1] tries to cache the location of the object data.
470                  * We might have something in the governing directory from
471                  * scan optimizations (see the strategy code in
472                  * hammer_vnops.c).
473                  *
474                  * We update dip->cache[2], if possible, with the location
475                  * of the object inode for future directory shortcuts.
476                  */
477                 hammer_cache_node(&ip->cache[0], cursor.node);
478                 if (dip) {
479                         if (dip->cache[3].node) {
480                                 hammer_cache_node(&ip->cache[1],
481                                                   dip->cache[3].node);
482                         }
483                         hammer_cache_node(&dip->cache[2], cursor.node);
484                 }
485
486                 /*
487                  * The file should not contain any data past the file size
488                  * stored in the inode.  Setting save_trunc_off to the
489                  * file size instead of max reduces B-Tree lookup overheads
490                  * on append by allowing the flusher to avoid checking for
491                  * record overwrites.
492                  */
493                 ip->save_trunc_off = ip->ino_data.size;
494
495                 /*
496                  * Locate and assign the pseudofs management structure to
497                  * the inode.
498                  */
499                 if (dip && dip->obj_localization == ip->obj_localization) {
500                         ip->pfsm = dip->pfsm;
501                         hammer_ref(&ip->pfsm->lock);
502                 } else {
503                         ip->pfsm = hammer_load_pseudofs(trans,
504                                                         ip->obj_localization,
505                                                         errorp);
506                         *errorp = 0;    /* ignore ENOENT */
507                 }
508         }
509
510         /*
511          * The inode is placed on the red-black tree and will be synced to
512          * the media when flushed or by the filesystem sync.  If this races
513          * another instantiation/lookup the insertion will fail.
514          */
515         if (*errorp == 0) {
516                 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
517                         hammer_free_inode(ip);
518                         hammer_done_cursor(&cursor);
519                         goto loop;
520                 }
521                 ip->flags |= HAMMER_INODE_ONDISK;
522         } else {
523                 if (ip->flags & HAMMER_INODE_RSV_INODES) {
524                         ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
525                         --hmp->rsv_inodes;
526                 }
527
528                 hammer_free_inode(ip);
529                 ip = NULL;
530         }
531         hammer_done_cursor(&cursor);
532         trans->flags |= HAMMER_TRANSF_NEWINODE;
533         return (ip);
534 }
535
536 /*
537  * Get a dummy inode to placemark a broken directory entry.
538  */
539 struct hammer_inode *
540 hammer_get_dummy_inode(hammer_transaction_t trans, hammer_inode_t dip,
541                  int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
542                  int flags, int *errorp)
543 {
544         hammer_mount_t hmp = trans->hmp;
545         struct hammer_inode_info iinfo;
546         struct hammer_inode *ip;
547
548         /*
549          * Determine if we already have an inode cached.  If we do then
550          * we are golden.
551          *
552          * If we find an inode with no vnode we have to mark the
553          * transaction such that hammer_inode_waitreclaims() is
554          * called later on to avoid building up an infinite number
555          * of inodes.  Otherwise we can continue to * add new inodes
556          * faster then they can be disposed of, even with the tsleep
557          * delay.
558          *
559          * If we find a non-fake inode we return an error.  Only fake
560          * inodes can be returned by this routine.
561          */
562         iinfo.obj_id = obj_id;
563         iinfo.obj_asof = asof;
564         iinfo.obj_localization = localization;
565 loop:
566         *errorp = 0;
567         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
568         if (ip) {
569                 if ((ip->flags & HAMMER_INODE_DUMMY) == 0) {
570                         *errorp = ENOENT;
571                         return(NULL);
572                 }
573                 hammer_ref(&ip->lock);
574                 return(ip);
575         }
576
577         /*
578          * Allocate a new inode structure and deal with races later.
579          */
580         ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
581         ++hammer_count_inodes;
582         ++hmp->count_inodes;
583         ip->obj_id = obj_id;
584         ip->obj_asof = iinfo.obj_asof;
585         ip->obj_localization = localization;
586         ip->hmp = hmp;
587         ip->flags = flags | HAMMER_INODE_RO | HAMMER_INODE_DUMMY;
588         ip->cache[0].ip = ip;
589         ip->cache[1].ip = ip;
590         ip->cache[2].ip = ip;
591         ip->cache[3].ip = ip;
592         ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
593                 0x7FFFFFFFFFFFFFFFLL;
594         RB_INIT(&ip->rec_tree);
595         TAILQ_INIT(&ip->target_list);
596         hammer_ref(&ip->lock);
597
598         /*
599          * Populate the dummy inode.  Leave everything zero'd out.
600          *
601          * (ip->ino_leaf and ip->ino_data)
602          *
603          * Make the dummy inode a FIFO object which most copy programs
604          * will properly ignore.
605          */
606         ip->save_trunc_off = ip->ino_data.size;
607         ip->ino_data.obj_type = HAMMER_OBJTYPE_FIFO;
608
609         /*
610          * Locate and assign the pseudofs management structure to
611          * the inode.
612          */
613         if (dip && dip->obj_localization == ip->obj_localization) {
614                 ip->pfsm = dip->pfsm;
615                 hammer_ref(&ip->pfsm->lock);
616         } else {
617                 ip->pfsm = hammer_load_pseudofs(trans, ip->obj_localization,
618                                                 errorp);
619                 *errorp = 0;    /* ignore ENOENT */
620         }
621
622         /*
623          * The inode is placed on the red-black tree and will be synced to
624          * the media when flushed or by the filesystem sync.  If this races
625          * another instantiation/lookup the insertion will fail.
626          *
627          * NOTE: Do not set HAMMER_INODE_ONDISK.  The inode is a fake.
628          */
629         if (*errorp == 0) {
630                 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
631                         hammer_free_inode(ip);
632                         goto loop;
633                 }
634         } else {
635                 if (ip->flags & HAMMER_INODE_RSV_INODES) {
636                         ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
637                         --hmp->rsv_inodes;
638                 }
639                 hammer_free_inode(ip);
640                 ip = NULL;
641         }
642         trans->flags |= HAMMER_TRANSF_NEWINODE;
643         return (ip);
644 }
645
646 /*
647  * Return a referenced inode only if it is in our inode cache.
648  *
649  * Dummy inodes do not count.
650  */
651 struct hammer_inode *
652 hammer_find_inode(hammer_transaction_t trans, int64_t obj_id,
653                   hammer_tid_t asof, u_int32_t localization)
654 {
655         hammer_mount_t hmp = trans->hmp;
656         struct hammer_inode_info iinfo;
657         struct hammer_inode *ip;
658
659         iinfo.obj_id = obj_id;
660         iinfo.obj_asof = asof;
661         iinfo.obj_localization = localization;
662
663         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
664         if (ip) {
665                 if (ip->flags & HAMMER_INODE_DUMMY)
666                         ip = NULL;
667                 else
668                         hammer_ref(&ip->lock);
669         }
670         return(ip);
671 }
672
673 /*
674  * Create a new filesystem object, returning the inode in *ipp.  The
675  * returned inode will be referenced.  The inode is created in-memory.
676  *
677  * If pfsm is non-NULL the caller wishes to create the root inode for
678  * a master PFS.
679  */
680 int
681 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
682                     struct ucred *cred,
683                     hammer_inode_t dip, const char *name, int namelen,
684                     hammer_pseudofs_inmem_t pfsm, struct hammer_inode **ipp)
685 {
686         hammer_mount_t hmp;
687         hammer_inode_t ip;
688         uid_t xuid;
689         int error;
690         int64_t namekey;
691         u_int32_t dummy;
692
693         hmp = trans->hmp;
694
695         ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
696         ++hammer_count_inodes;
697         ++hmp->count_inodes;
698         trans->flags |= HAMMER_TRANSF_NEWINODE;
699
700         if (pfsm) {
701                 KKASSERT(pfsm->localization != 0);
702                 ip->obj_id = HAMMER_OBJID_ROOT;
703                 ip->obj_localization = pfsm->localization;
704         } else {
705                 KKASSERT(dip != NULL);
706                 namekey = hammer_directory_namekey(dip, name, namelen, &dummy);
707                 ip->obj_id = hammer_alloc_objid(hmp, dip, namekey);
708                 ip->obj_localization = dip->obj_localization;
709         }
710
711         KKASSERT(ip->obj_id != 0);
712         ip->obj_asof = hmp->asof;
713         ip->hmp = hmp;
714         ip->flush_state = HAMMER_FST_IDLE;
715         ip->flags = HAMMER_INODE_DDIRTY |
716                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME;
717         ip->cache[0].ip = ip;
718         ip->cache[1].ip = ip;
719         ip->cache[2].ip = ip;
720         ip->cache[3].ip = ip;
721
722         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
723         /* ip->save_trunc_off = 0; (already zero) */
724         RB_INIT(&ip->rec_tree);
725         TAILQ_INIT(&ip->target_list);
726
727         ip->ino_data.atime = trans->time;
728         ip->ino_data.mtime = trans->time;
729         ip->ino_data.size = 0;
730         ip->ino_data.nlinks = 0;
731
732         /*
733          * A nohistory designator on the parent directory is inherited by
734          * the child.  We will do this even for pseudo-fs creation... the
735          * sysad can turn it off.
736          */
737         if (dip) {
738                 ip->ino_data.uflags = dip->ino_data.uflags &
739                                       (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
740         }
741
742         ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
743         ip->ino_leaf.base.localization = ip->obj_localization +
744                                          HAMMER_LOCALIZE_INODE;
745         ip->ino_leaf.base.obj_id = ip->obj_id;
746         ip->ino_leaf.base.key = 0;
747         ip->ino_leaf.base.create_tid = 0;
748         ip->ino_leaf.base.delete_tid = 0;
749         ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
750         ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
751
752         ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
753         ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
754         ip->ino_data.mode = vap->va_mode;
755         ip->ino_data.ctime = trans->time;
756
757         /*
758          * If we are running version 2 or greater directory entries are
759          * inode-localized instead of data-localized.
760          */
761         if (trans->hmp->version >= HAMMER_VOL_VERSION_TWO) {
762                 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
763                         ip->ino_data.cap_flags |=
764                                 HAMMER_INODE_CAP_DIR_LOCAL_INO;
765                 }
766         }
767
768         /*
769          * Setup the ".." pointer.  This only needs to be done for directories
770          * but we do it for all objects as a recovery aid.
771          */
772         if (dip)
773                 ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id;
774 #if 0
775         /*
776          * The parent_obj_localization field only applies to pseudo-fs roots.
777          * XXX this is no longer applicable, PFSs are no longer directly
778          * tied into the parent's directory structure.
779          */
780         if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY &&
781             ip->obj_id == HAMMER_OBJID_ROOT) {
782                 ip->ino_data.ext.obj.parent_obj_localization = 
783                                                 dip->obj_localization;
784         }
785 #endif
786
787         switch(ip->ino_leaf.base.obj_type) {
788         case HAMMER_OBJTYPE_CDEV:
789         case HAMMER_OBJTYPE_BDEV:
790                 ip->ino_data.rmajor = vap->va_rmajor;
791                 ip->ino_data.rminor = vap->va_rminor;
792                 break;
793         default:
794                 break;
795         }
796
797         /*
798          * Calculate default uid/gid and overwrite with information from
799          * the vap.
800          */
801         if (dip) {
802                 xuid = hammer_to_unix_xid(&dip->ino_data.uid);
803                 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode,
804                                              xuid, cred, &vap->va_mode);
805         } else {
806                 xuid = 0;
807         }
808         ip->ino_data.mode = vap->va_mode;
809
810         if (vap->va_vaflags & VA_UID_UUID_VALID)
811                 ip->ino_data.uid = vap->va_uid_uuid;
812         else if (vap->va_uid != (uid_t)VNOVAL)
813                 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
814         else
815                 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
816
817         if (vap->va_vaflags & VA_GID_UUID_VALID)
818                 ip->ino_data.gid = vap->va_gid_uuid;
819         else if (vap->va_gid != (gid_t)VNOVAL)
820                 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
821         else if (dip)
822                 ip->ino_data.gid = dip->ino_data.gid;
823
824         hammer_ref(&ip->lock);
825
826         if (pfsm) {
827                 ip->pfsm = pfsm;
828                 hammer_ref(&pfsm->lock);
829                 error = 0;
830         } else if (dip->obj_localization == ip->obj_localization) {
831                 ip->pfsm = dip->pfsm;
832                 hammer_ref(&ip->pfsm->lock);
833                 error = 0;
834         } else {
835                 ip->pfsm = hammer_load_pseudofs(trans,
836                                                 ip->obj_localization,
837                                                 &error);
838                 error = 0;      /* ignore ENOENT */
839         }
840
841         if (error) {
842                 hammer_free_inode(ip);
843                 ip = NULL;
844         } else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
845                 panic("hammer_create_inode: duplicate obj_id %llx",
846                       (long long)ip->obj_id);
847                 /* not reached */
848                 hammer_free_inode(ip);
849         }
850         *ipp = ip;
851         return(error);
852 }
853
854 /*
855  * Final cleanup / freeing of an inode structure
856  */
857 static void
858 hammer_free_inode(hammer_inode_t ip)
859 {
860         struct hammer_mount *hmp;
861
862         hmp = ip->hmp;
863         KKASSERT(ip->lock.refs == 1);
864         hammer_uncache_node(&ip->cache[0]);
865         hammer_uncache_node(&ip->cache[1]);
866         hammer_uncache_node(&ip->cache[2]);
867         hammer_uncache_node(&ip->cache[3]);
868         hammer_inode_wakereclaims(ip);
869         if (ip->objid_cache)
870                 hammer_clear_objid(ip);
871         --hammer_count_inodes;
872         --hmp->count_inodes;
873         if (ip->pfsm) {
874                 hammer_rel_pseudofs(hmp, ip->pfsm);
875                 ip->pfsm = NULL;
876         }
877         kfree(ip, hmp->m_inodes);
878         ip = NULL;
879 }
880
881 /*
882  * Retrieve pseudo-fs data.  NULL will never be returned.
883  *
884  * If an error occurs *errorp will be set and a default template is returned,
885  * otherwise *errorp is set to 0.  Typically when an error occurs it will
886  * be ENOENT.
887  */
888 hammer_pseudofs_inmem_t
889 hammer_load_pseudofs(hammer_transaction_t trans,
890                      u_int32_t localization, int *errorp)
891 {
892         hammer_mount_t hmp = trans->hmp;
893         hammer_inode_t ip;
894         hammer_pseudofs_inmem_t pfsm;
895         struct hammer_cursor cursor;
896         int bytes;
897
898 retry:
899         pfsm = RB_LOOKUP(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, localization);
900         if (pfsm) {
901                 hammer_ref(&pfsm->lock);
902                 *errorp = 0;
903                 return(pfsm);
904         }
905
906         /*
907          * PFS records are stored in the root inode (not the PFS root inode,
908          * but the real root).  Avoid an infinite recursion if loading
909          * the PFS for the real root.
910          */
911         if (localization) {
912                 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT,
913                                       HAMMER_MAX_TID,
914                                       HAMMER_DEF_LOCALIZATION, 0, errorp);
915         } else {
916                 ip = NULL;
917         }
918
919         pfsm = kmalloc(sizeof(*pfsm), hmp->m_misc, M_WAITOK | M_ZERO);
920         pfsm->localization = localization;
921         pfsm->pfsd.unique_uuid = trans->rootvol->ondisk->vol_fsid;
922         pfsm->pfsd.shared_uuid = pfsm->pfsd.unique_uuid;
923
924         hammer_init_cursor(trans, &cursor, (ip ? &ip->cache[1] : NULL), ip);
925         cursor.key_beg.localization = HAMMER_DEF_LOCALIZATION +
926                                       HAMMER_LOCALIZE_MISC;
927         cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
928         cursor.key_beg.create_tid = 0;
929         cursor.key_beg.delete_tid = 0;
930         cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
931         cursor.key_beg.obj_type = 0;
932         cursor.key_beg.key = localization;
933         cursor.asof = HAMMER_MAX_TID;
934         cursor.flags |= HAMMER_CURSOR_ASOF;
935
936         if (ip)
937                 *errorp = hammer_ip_lookup(&cursor);
938         else
939                 *errorp = hammer_btree_lookup(&cursor);
940         if (*errorp == 0) {
941                 *errorp = hammer_ip_resolve_data(&cursor);
942                 if (*errorp == 0) {
943                         if (cursor.data->pfsd.mirror_flags &
944                             HAMMER_PFSD_DELETED) {
945                                 *errorp = ENOENT;
946                         } else {
947                                 bytes = cursor.leaf->data_len;
948                                 if (bytes > sizeof(pfsm->pfsd))
949                                         bytes = sizeof(pfsm->pfsd);
950                                 bcopy(cursor.data, &pfsm->pfsd, bytes);
951                         }
952                 }
953         }
954         hammer_done_cursor(&cursor);
955
956         pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
957         hammer_ref(&pfsm->lock);
958         if (ip)
959                 hammer_rel_inode(ip, 0);
960         if (RB_INSERT(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm)) {
961                 kfree(pfsm, hmp->m_misc);
962                 goto retry;
963         }
964         return(pfsm);
965 }
966
967 /*
968  * Store pseudo-fs data.  The backend will automatically delete any prior
969  * on-disk pseudo-fs data but we have to delete in-memory versions.
970  */
971 int
972 hammer_save_pseudofs(hammer_transaction_t trans, hammer_pseudofs_inmem_t pfsm)
973 {
974         struct hammer_cursor cursor;
975         hammer_record_t record;
976         hammer_inode_t ip;
977         int error;
978
979         ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
980                               HAMMER_DEF_LOCALIZATION, 0, &error);
981 retry:
982         pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
983         hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
984         cursor.key_beg.localization = ip->obj_localization +
985                                       HAMMER_LOCALIZE_MISC;
986         cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
987         cursor.key_beg.create_tid = 0;
988         cursor.key_beg.delete_tid = 0;
989         cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
990         cursor.key_beg.obj_type = 0;
991         cursor.key_beg.key = pfsm->localization;
992         cursor.asof = HAMMER_MAX_TID;
993         cursor.flags |= HAMMER_CURSOR_ASOF;
994
995         /*
996          * Replace any in-memory version of the record.
997          */
998         error = hammer_ip_lookup(&cursor);
999         if (error == 0 && hammer_cursor_inmem(&cursor)) {
1000                 record = cursor.iprec;
1001                 if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
1002                         KKASSERT(cursor.deadlk_rec == NULL);
1003                         hammer_ref(&record->lock);
1004                         cursor.deadlk_rec = record;
1005                         error = EDEADLK;
1006                 } else {
1007                         record->flags |= HAMMER_RECF_DELETED_FE;
1008                         error = 0;
1009                 }
1010         }
1011
1012         /*
1013          * Allocate replacement general record.  The backend flush will
1014          * delete any on-disk version of the record.
1015          */
1016         if (error == 0 || error == ENOENT) {
1017                 record = hammer_alloc_mem_record(ip, sizeof(pfsm->pfsd));
1018                 record->type = HAMMER_MEM_RECORD_GENERAL;
1019
1020                 record->leaf.base.localization = ip->obj_localization +
1021                                                  HAMMER_LOCALIZE_MISC;
1022                 record->leaf.base.rec_type = HAMMER_RECTYPE_PFS;
1023                 record->leaf.base.key = pfsm->localization;
1024                 record->leaf.data_len = sizeof(pfsm->pfsd);
1025                 bcopy(&pfsm->pfsd, record->data, sizeof(pfsm->pfsd));
1026                 error = hammer_ip_add_record(trans, record);
1027         }
1028         hammer_done_cursor(&cursor);
1029         if (error == EDEADLK)
1030                 goto retry;
1031         hammer_rel_inode(ip, 0);
1032         return(error);
1033 }
1034
1035 /*
1036  * Create a root directory for a PFS if one does not alredy exist.
1037  *
1038  * The PFS root stands alone so we must also bump the nlinks count
1039  * to prevent it from being destroyed on release.
1040  */
1041 int
1042 hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred,
1043                        hammer_pseudofs_inmem_t pfsm)
1044 {
1045         hammer_inode_t ip;
1046         struct vattr vap;
1047         int error;
1048
1049         ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
1050                               pfsm->localization, 0, &error);
1051         if (ip == NULL) {
1052                 vattr_null(&vap);
1053                 vap.va_mode = 0755;
1054                 vap.va_type = VDIR;
1055                 error = hammer_create_inode(trans, &vap, cred,
1056                                             NULL, NULL, 0,
1057                                             pfsm, &ip);
1058                 if (error == 0) {
1059                         ++ip->ino_data.nlinks;
1060                         hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
1061                 }
1062         }
1063         if (ip)
1064                 hammer_rel_inode(ip, 0);
1065         return(error);
1066 }
1067
1068 /*
1069  * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY
1070  * if we are unable to disassociate all the inodes.
1071  */
1072 static
1073 int
1074 hammer_unload_pseudofs_callback(hammer_inode_t ip, void *data)
1075 {
1076         int res;
1077
1078         hammer_ref(&ip->lock);
1079         if (ip->lock.refs == 2 && ip->vp)
1080                 vclean_unlocked(ip->vp);
1081         if (ip->lock.refs == 1 && ip->vp == NULL)
1082                 res = 0;
1083         else
1084                 res = -1;       /* stop, someone is using the inode */
1085         hammer_rel_inode(ip, 0);
1086         return(res);
1087 }
1088
1089 int
1090 hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization)
1091 {
1092         int res;
1093         int try;
1094
1095         for (try = res = 0; try < 4; ++try) {
1096                 res = hammer_ino_rb_tree_RB_SCAN(&trans->hmp->rb_inos_root,
1097                                            hammer_inode_pfs_cmp,
1098                                            hammer_unload_pseudofs_callback,
1099                                            &localization);
1100                 if (res == 0 && try > 1)
1101                         break;
1102                 hammer_flusher_sync(trans->hmp);
1103         }
1104         if (res != 0)
1105                 res = ENOTEMPTY;
1106         return(res);
1107 }
1108
1109
1110 /*
1111  * Release a reference on a PFS
1112  */
1113 void
1114 hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm)
1115 {
1116         hammer_unref(&pfsm->lock);
1117         if (pfsm->lock.refs == 0) {
1118                 RB_REMOVE(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm);
1119                 kfree(pfsm, hmp->m_misc);
1120         }
1121 }
1122
1123 /*
1124  * Called by hammer_sync_inode().
1125  */
1126 static int
1127 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
1128 {
1129         hammer_transaction_t trans = cursor->trans;
1130         hammer_record_t record;
1131         int error;
1132         int redirty;
1133
1134 retry:
1135         error = 0;
1136
1137         /*
1138          * If the inode has a presence on-disk then locate it and mark
1139          * it deleted, setting DELONDISK.
1140          *
1141          * The record may or may not be physically deleted, depending on
1142          * the retention policy.
1143          */
1144         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
1145             HAMMER_INODE_ONDISK) {
1146                 hammer_normalize_cursor(cursor);
1147                 cursor->key_beg.localization = ip->obj_localization + 
1148                                                HAMMER_LOCALIZE_INODE;
1149                 cursor->key_beg.obj_id = ip->obj_id;
1150                 cursor->key_beg.key = 0;
1151                 cursor->key_beg.create_tid = 0;
1152                 cursor->key_beg.delete_tid = 0;
1153                 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1154                 cursor->key_beg.obj_type = 0;
1155                 cursor->asof = ip->obj_asof;
1156                 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1157                 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
1158                 cursor->flags |= HAMMER_CURSOR_BACKEND;
1159
1160                 error = hammer_btree_lookup(cursor);
1161                 if (hammer_debug_inode)
1162                         kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
1163
1164                 if (error == 0) {
1165                         error = hammer_ip_delete_record(cursor, ip, trans->tid);
1166                         if (hammer_debug_inode)
1167                                 kprintf(" error %d\n", error);
1168                         if (error == 0) {
1169                                 ip->flags |= HAMMER_INODE_DELONDISK;
1170                         }
1171                         if (cursor->node)
1172                                 hammer_cache_node(&ip->cache[0], cursor->node);
1173                 }
1174                 if (error == EDEADLK) {
1175                         hammer_done_cursor(cursor);
1176                         error = hammer_init_cursor(trans, cursor,
1177                                                    &ip->cache[0], ip);
1178                         if (hammer_debug_inode)
1179                                 kprintf("IPDED %p %d\n", ip, error);
1180                         if (error == 0)
1181                                 goto retry;
1182                 }
1183         }
1184
1185         /*
1186          * Ok, write out the initial record or a new record (after deleting
1187          * the old one), unless the DELETED flag is set.  This routine will
1188          * clear DELONDISK if it writes out a record.
1189          *
1190          * Update our inode statistics if this is the first application of
1191          * the inode on-disk.
1192          */
1193         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
1194                 /*
1195                  * Generate a record and write it to the media.  We clean-up
1196                  * the state before releasing so we do not have to set-up
1197                  * a flush_group.
1198                  */
1199                 record = hammer_alloc_mem_record(ip, 0);
1200                 record->type = HAMMER_MEM_RECORD_INODE;
1201                 record->flush_state = HAMMER_FST_FLUSH;
1202                 record->leaf = ip->sync_ino_leaf;
1203                 record->leaf.base.create_tid = trans->tid;
1204                 record->leaf.data_len = sizeof(ip->sync_ino_data);
1205                 record->leaf.create_ts = trans->time32;
1206                 record->data = (void *)&ip->sync_ino_data;
1207                 record->flags |= HAMMER_RECF_INTERLOCK_BE;
1208
1209                 /*
1210                  * If this flag is set we cannot sync the new file size
1211                  * because we haven't finished related truncations.  The
1212                  * inode will be flushed in another flush group to finish
1213                  * the job.
1214                  */
1215                 if ((ip->flags & HAMMER_INODE_WOULDBLOCK) &&
1216                     ip->sync_ino_data.size != ip->ino_data.size) {
1217                         redirty = 1;
1218                         ip->sync_ino_data.size = ip->ino_data.size;
1219                 } else {
1220                         redirty = 0;
1221                 }
1222
1223                 for (;;) {
1224                         error = hammer_ip_sync_record_cursor(cursor, record);
1225                         if (hammer_debug_inode)
1226                                 kprintf("GENREC %p rec %08x %d\n",      
1227                                         ip, record->flags, error);
1228                         if (error != EDEADLK)
1229                                 break;
1230                         hammer_done_cursor(cursor);
1231                         error = hammer_init_cursor(trans, cursor,
1232                                                    &ip->cache[0], ip);
1233                         if (hammer_debug_inode)
1234                                 kprintf("GENREC reinit %d\n", error);
1235                         if (error)
1236                                 break;
1237                 }
1238
1239                 /*
1240                  * Note:  The record was never on the inode's record tree
1241                  * so just wave our hands importantly and destroy it.
1242                  */
1243                 record->flags |= HAMMER_RECF_COMMITTED;
1244                 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
1245                 record->flush_state = HAMMER_FST_IDLE;
1246                 ++ip->rec_generation;
1247                 hammer_rel_mem_record(record);
1248
1249                 /*
1250                  * Finish up.
1251                  */
1252                 if (error == 0) {
1253                         if (hammer_debug_inode)
1254                                 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
1255                         ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1256                                             HAMMER_INODE_ATIME |
1257                                             HAMMER_INODE_MTIME);
1258                         ip->flags &= ~HAMMER_INODE_DELONDISK;
1259                         if (redirty)
1260                                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1261
1262                         /*
1263                          * Root volume count of inodes
1264                          */
1265                         hammer_sync_lock_sh(trans);
1266                         if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
1267                                 hammer_modify_volume_field(trans,
1268                                                            trans->rootvol,
1269                                                            vol0_stat_inodes);
1270                                 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1271                                 hammer_modify_volume_done(trans->rootvol);
1272                                 ip->flags |= HAMMER_INODE_ONDISK;
1273                                 if (hammer_debug_inode)
1274                                         kprintf("NOWONDISK %p\n", ip);
1275                         }
1276                         hammer_sync_unlock(trans);
1277                 }
1278         }
1279
1280         /*
1281          * If the inode has been destroyed, clean out any left-over flags
1282          * that may have been set by the frontend.
1283          */
1284         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) { 
1285                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1286                                     HAMMER_INODE_ATIME |
1287                                     HAMMER_INODE_MTIME);
1288         }
1289         return(error);
1290 }
1291
1292 /*
1293  * Update only the itimes fields.
1294  *
1295  * ATIME can be updated without generating any UNDO.  MTIME is updated
1296  * with UNDO so it is guaranteed to be synchronized properly in case of
1297  * a crash.
1298  *
1299  * Neither field is included in the B-Tree leaf element's CRC, which is how
1300  * we can get away with updating ATIME the way we do.
1301  */
1302 static int
1303 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
1304 {
1305         hammer_transaction_t trans = cursor->trans;
1306         int error;
1307
1308 retry:
1309         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) !=
1310             HAMMER_INODE_ONDISK) {
1311                 return(0);
1312         }
1313
1314         hammer_normalize_cursor(cursor);
1315         cursor->key_beg.localization = ip->obj_localization + 
1316                                        HAMMER_LOCALIZE_INODE;
1317         cursor->key_beg.obj_id = ip->obj_id;
1318         cursor->key_beg.key = 0;
1319         cursor->key_beg.create_tid = 0;
1320         cursor->key_beg.delete_tid = 0;
1321         cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1322         cursor->key_beg.obj_type = 0;
1323         cursor->asof = ip->obj_asof;
1324         cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1325         cursor->flags |= HAMMER_CURSOR_ASOF;
1326         cursor->flags |= HAMMER_CURSOR_GET_LEAF;
1327         cursor->flags |= HAMMER_CURSOR_GET_DATA;
1328         cursor->flags |= HAMMER_CURSOR_BACKEND;
1329
1330         error = hammer_btree_lookup(cursor);
1331         if (error == 0) {
1332                 hammer_cache_node(&ip->cache[0], cursor->node);
1333                 if (ip->sync_flags & HAMMER_INODE_MTIME) {
1334                         /*
1335                          * Updating MTIME requires an UNDO.  Just cover
1336                          * both atime and mtime.
1337                          */
1338                         hammer_sync_lock_sh(trans);
1339                         hammer_modify_buffer(trans, cursor->data_buffer,
1340                                      HAMMER_ITIMES_BASE(&cursor->data->inode),
1341                                      HAMMER_ITIMES_BYTES);
1342                         cursor->data->inode.atime = ip->sync_ino_data.atime;
1343                         cursor->data->inode.mtime = ip->sync_ino_data.mtime;
1344                         hammer_modify_buffer_done(cursor->data_buffer);
1345                         hammer_sync_unlock(trans);
1346                 } else if (ip->sync_flags & HAMMER_INODE_ATIME) {
1347                         /*
1348                          * Updating atime only can be done in-place with
1349                          * no UNDO.
1350                          */
1351                         hammer_sync_lock_sh(trans);
1352                         hammer_modify_buffer(trans, cursor->data_buffer,
1353                                              NULL, 0);
1354                         cursor->data->inode.atime = ip->sync_ino_data.atime;
1355                         hammer_modify_buffer_done(cursor->data_buffer);
1356                         hammer_sync_unlock(trans);
1357                 }
1358                 ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME);
1359         }
1360         if (error == EDEADLK) {
1361                 hammer_done_cursor(cursor);
1362                 error = hammer_init_cursor(trans, cursor,
1363                                            &ip->cache[0], ip);
1364                 if (error == 0)
1365                         goto retry;
1366         }
1367         return(error);
1368 }
1369
1370 /*
1371  * Release a reference on an inode, flush as requested.
1372  *
1373  * On the last reference we queue the inode to the flusher for its final
1374  * disposition.
1375  */
1376 void
1377 hammer_rel_inode(struct hammer_inode *ip, int flush)
1378 {
1379         /*hammer_mount_t hmp = ip->hmp;*/
1380
1381         /*
1382          * Handle disposition when dropping the last ref.
1383          */
1384         for (;;) {
1385                 if (ip->lock.refs == 1) {
1386                         /*
1387                          * Determine whether on-disk action is needed for
1388                          * the inode's final disposition.
1389                          */
1390                         KKASSERT(ip->vp == NULL);
1391                         hammer_inode_unloadable_check(ip, 0);
1392                         if (ip->flags & HAMMER_INODE_MODMASK) {
1393                                 hammer_flush_inode(ip, 0);
1394                         } else if (ip->lock.refs == 1) {
1395                                 hammer_unload_inode(ip);
1396                                 break;
1397                         }
1398                 } else {
1399                         if (flush)
1400                                 hammer_flush_inode(ip, 0);
1401
1402                         /*
1403                          * The inode still has multiple refs, try to drop
1404                          * one ref.
1405                          */
1406                         KKASSERT(ip->lock.refs >= 1);
1407                         if (ip->lock.refs > 1) {
1408                                 hammer_unref(&ip->lock);
1409                                 break;
1410                         }
1411                 }
1412         }
1413 }
1414
1415 /*
1416  * Unload and destroy the specified inode.  Must be called with one remaining
1417  * reference.  The reference is disposed of.
1418  *
1419  * The inode must be completely clean.
1420  */
1421 static int
1422 hammer_unload_inode(struct hammer_inode *ip)
1423 {
1424         hammer_mount_t hmp = ip->hmp;
1425
1426         KASSERT(ip->lock.refs == 1,
1427                 ("hammer_unload_inode: %d refs\n", ip->lock.refs));
1428         KKASSERT(ip->vp == NULL);
1429         KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
1430         KKASSERT(ip->cursor_ip_refs == 0);
1431         KKASSERT(hammer_notlocked(&ip->lock));
1432         KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
1433
1434         KKASSERT(RB_EMPTY(&ip->rec_tree));
1435         KKASSERT(TAILQ_EMPTY(&ip->target_list));
1436
1437         RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip);
1438
1439         hammer_free_inode(ip);
1440         return(0);
1441 }
1442
1443 /*
1444  * Called during unmounting if a critical error occured.  The in-memory
1445  * inode and all related structures are destroyed.
1446  *
1447  * If a critical error did not occur the unmount code calls the standard
1448  * release and asserts that the inode is gone.
1449  */
1450 int
1451 hammer_destroy_inode_callback(struct hammer_inode *ip, void *data __unused)
1452 {
1453         hammer_record_t rec;
1454
1455         /*
1456          * Get rid of the inodes in-memory records, regardless of their
1457          * state, and clear the mod-mask.
1458          */
1459         while ((rec = TAILQ_FIRST(&ip->target_list)) != NULL) {
1460                 TAILQ_REMOVE(&ip->target_list, rec, target_entry);
1461                 rec->target_ip = NULL;
1462                 if (rec->flush_state == HAMMER_FST_SETUP)
1463                         rec->flush_state = HAMMER_FST_IDLE;
1464         }
1465         while ((rec = RB_ROOT(&ip->rec_tree)) != NULL) {
1466                 if (rec->flush_state == HAMMER_FST_FLUSH)
1467                         --rec->flush_group->refs;
1468                 else
1469                         hammer_ref(&rec->lock);
1470                 KKASSERT(rec->lock.refs == 1);
1471                 rec->flush_state = HAMMER_FST_IDLE;
1472                 rec->flush_group = NULL;
1473                 rec->flags |= HAMMER_RECF_DELETED_FE; /* wave hands */
1474                 rec->flags |= HAMMER_RECF_DELETED_BE; /* wave hands */
1475                 ++ip->rec_generation;
1476                 hammer_rel_mem_record(rec);
1477         }
1478         ip->flags &= ~HAMMER_INODE_MODMASK;
1479         ip->sync_flags &= ~HAMMER_INODE_MODMASK;
1480         KKASSERT(ip->vp == NULL);
1481
1482         /*
1483          * Remove the inode from any flush group, force it idle.  FLUSH
1484          * and SETUP states have an inode ref.
1485          */
1486         switch(ip->flush_state) {
1487         case HAMMER_FST_FLUSH:
1488                 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip);
1489                 --ip->flush_group->refs;
1490                 ip->flush_group = NULL;
1491                 /* fall through */
1492         case HAMMER_FST_SETUP:
1493                 hammer_unref(&ip->lock);
1494                 ip->flush_state = HAMMER_FST_IDLE;
1495                 /* fall through */
1496         case HAMMER_FST_IDLE:
1497                 break;
1498         }
1499
1500         /*
1501          * There shouldn't be any associated vnode.  The unload needs at
1502          * least one ref, if we do have a vp steal its ip ref.
1503          */
1504         if (ip->vp) {
1505                 kprintf("hammer_destroy_inode_callback: Unexpected "
1506                         "vnode association ip %p vp %p\n", ip, ip->vp);
1507                 ip->vp->v_data = NULL;
1508                 ip->vp = NULL;
1509         } else {
1510                 hammer_ref(&ip->lock);
1511         }
1512         hammer_unload_inode(ip);
1513         return(0);
1514 }
1515
1516 /*
1517  * Called on mount -u when switching from RW to RO or vise-versa.  Adjust
1518  * the read-only flag for cached inodes.
1519  *
1520  * This routine is called from a RB_SCAN().
1521  */
1522 int
1523 hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
1524 {
1525         hammer_mount_t hmp = ip->hmp;
1526
1527         if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
1528                 ip->flags |= HAMMER_INODE_RO;
1529         else
1530                 ip->flags &= ~HAMMER_INODE_RO;
1531         return(0);
1532 }
1533
1534 /*
1535  * A transaction has modified an inode, requiring updates as specified by
1536  * the passed flags.
1537  *
1538  * HAMMER_INODE_DDIRTY: Inode data has been updated
1539  * HAMMER_INODE_XDIRTY: Dirty in-memory records
1540  * HAMMER_INODE_BUFS:   Dirty buffer cache buffers
1541  * HAMMER_INODE_DELETED: Inode record/data must be deleted
1542  * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
1543  */
1544 void
1545 hammer_modify_inode(hammer_inode_t ip, int flags)
1546 {
1547         /* 
1548          * ronly of 0 or 2 does not trigger assertion.
1549          * 2 is a special error state 
1550          */
1551         KKASSERT(ip->hmp->ronly != 1 ||
1552                   (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY | 
1553                             HAMMER_INODE_BUFS | HAMMER_INODE_DELETED |
1554                             HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0);
1555         if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
1556                 ip->flags |= HAMMER_INODE_RSV_INODES;
1557                 ++ip->hmp->rsv_inodes;
1558         }
1559
1560         ip->flags |= flags;
1561 }
1562
1563 /*
1564  * Request that an inode be flushed.  This whole mess cannot block and may
1565  * recurse (if not synchronous).  Once requested HAMMER will attempt to
1566  * actively flush the inode until the flush can be done.
1567  *
1568  * The inode may already be flushing, or may be in a setup state.  We can
1569  * place the inode in a flushing state if it is currently idle and flag it
1570  * to reflush if it is currently flushing.
1571  *
1572  * Upon return if the inode could not be flushed due to a setup
1573  * dependancy, then it will be automatically flushed when the dependancy
1574  * is satisfied.
1575  */
1576 void
1577 hammer_flush_inode(hammer_inode_t ip, int flags)
1578 {
1579         hammer_mount_t hmp;
1580         hammer_flush_group_t flg;
1581         int good;
1582
1583         /*
1584          * next_flush_group is the first flush group we can place the inode
1585          * in.  It may be NULL.  If it becomes full we append a new flush
1586          * group and make that the next_flush_group.
1587          */
1588         hmp = ip->hmp;
1589         while ((flg = hmp->next_flush_group) != NULL) {
1590                 KKASSERT(flg->running == 0);
1591                 if (flg->total_count + flg->refs <= ip->hmp->undo_rec_limit)
1592                         break;
1593                 hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
1594                 hammer_flusher_async(ip->hmp, flg);
1595         }
1596         if (flg == NULL) {
1597                 flg = kmalloc(sizeof(*flg), hmp->m_misc, M_WAITOK|M_ZERO);
1598                 hmp->next_flush_group = flg;
1599                 RB_INIT(&flg->flush_tree);
1600                 TAILQ_INSERT_TAIL(&hmp->flush_group_list, flg, flush_entry);
1601         }
1602
1603         /*
1604          * Trivial 'nothing to flush' case.  If the inode is in a SETUP
1605          * state we have to put it back into an IDLE state so we can
1606          * drop the extra ref.
1607          *
1608          * If we have a parent dependancy we must still fall through
1609          * so we can run it.
1610          */
1611         if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
1612                 if (ip->flush_state == HAMMER_FST_SETUP &&
1613                     TAILQ_EMPTY(&ip->target_list)) {
1614                         ip->flush_state = HAMMER_FST_IDLE;
1615                         hammer_rel_inode(ip, 0);
1616                 }
1617                 if (ip->flush_state == HAMMER_FST_IDLE)
1618                         return;
1619         }
1620
1621         /*
1622          * Our flush action will depend on the current state.
1623          */
1624         switch(ip->flush_state) {
1625         case HAMMER_FST_IDLE:
1626                 /*
1627                  * We have no dependancies and can flush immediately.  Some
1628                  * our children may not be flushable so we have to re-test
1629                  * with that additional knowledge.
1630                  */
1631                 hammer_flush_inode_core(ip, flg, flags);
1632                 break;
1633         case HAMMER_FST_SETUP:
1634                 /*
1635                  * Recurse upwards through dependancies via target_list
1636                  * and start their flusher actions going if possible.
1637                  *
1638                  * 'good' is our connectivity.  -1 means we have none and
1639                  * can't flush, 0 means there weren't any dependancies, and
1640                  * 1 means we have good connectivity.
1641                  */
1642                 good = hammer_setup_parent_inodes(ip, 0, flg);
1643
1644                 if (good >= 0) {
1645                         /*
1646                          * We can continue if good >= 0.  Determine how 
1647                          * many records under our inode can be flushed (and
1648                          * mark them).
1649                          */
1650                         hammer_flush_inode_core(ip, flg, flags);
1651                 } else {
1652                         /*
1653                          * Parent has no connectivity, tell it to flush
1654                          * us as soon as it does.
1655                          *
1656                          * The REFLUSH flag is also needed to trigger
1657                          * dependancy wakeups.
1658                          */
1659                         ip->flags |= HAMMER_INODE_CONN_DOWN |
1660                                      HAMMER_INODE_REFLUSH;
1661                         if (flags & HAMMER_FLUSH_SIGNAL) {
1662                                 ip->flags |= HAMMER_INODE_RESIGNAL;
1663                                 hammer_flusher_async(ip->hmp, flg);
1664                         }
1665                 }
1666                 break;
1667         case HAMMER_FST_FLUSH:
1668                 /*
1669                  * We are already flushing, flag the inode to reflush
1670                  * if needed after it completes its current flush.
1671                  *
1672                  * The REFLUSH flag is also needed to trigger
1673                  * dependancy wakeups.
1674                  */
1675                 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
1676                         ip->flags |= HAMMER_INODE_REFLUSH;
1677                 if (flags & HAMMER_FLUSH_SIGNAL) {
1678                         ip->flags |= HAMMER_INODE_RESIGNAL;
1679                         hammer_flusher_async(ip->hmp, flg);
1680                 }
1681                 break;
1682         }
1683 }
1684
1685 /*
1686  * Scan ip->target_list, which is a list of records owned by PARENTS to our
1687  * ip which reference our ip.
1688  *
1689  * XXX This is a huge mess of recursive code, but not one bit of it blocks
1690  *     so for now do not ref/deref the structures.  Note that if we use the
1691  *     ref/rel code later, the rel CAN block.
1692  */
1693 static int
1694 hammer_setup_parent_inodes(hammer_inode_t ip, int depth,
1695                            hammer_flush_group_t flg)
1696 {
1697         hammer_record_t depend;
1698         int good;
1699         int r;
1700
1701         /*
1702          * If we hit our recursion limit and we have parent dependencies
1703          * We cannot continue.  Returning < 0 will cause us to be flagged
1704          * for reflush.  Returning -2 cuts off additional dependency checks
1705          * because they are likely to also hit the depth limit.
1706          *
1707          * We cannot return < 0 if there are no dependencies or there might
1708          * not be anything to wakeup (ip).
1709          */
1710         if (depth == 20 && TAILQ_FIRST(&ip->target_list)) {
1711                 kprintf("HAMMER Warning: depth limit reached on "
1712                         "setup recursion, inode %p %016llx\n",
1713                         ip, (long long)ip->obj_id);
1714                 return(-2);
1715         }
1716
1717         /*
1718          * Scan dependencies
1719          */
1720         good = 0;
1721         TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
1722                 r = hammer_setup_parent_inodes_helper(depend, depth, flg);
1723                 KKASSERT(depend->target_ip == ip);
1724                 if (r < 0 && good == 0)
1725                         good = -1;
1726                 if (r > 0)
1727                         good = 1;
1728
1729                 /*
1730                  * If we failed due to the recursion depth limit then stop
1731                  * now.
1732                  */
1733                 if (r == -2)
1734                         break;
1735         }
1736         return(good);
1737 }
1738
1739 /*
1740  * This helper function takes a record representing the dependancy between
1741  * the parent inode and child inode.
1742  *
1743  * record->ip           = parent inode
1744  * record->target_ip    = child inode
1745  * 
1746  * We are asked to recurse upwards and convert the record from SETUP
1747  * to FLUSH if possible.
1748  *
1749  * Return 1 if the record gives us connectivity
1750  *
1751  * Return 0 if the record is not relevant 
1752  *
1753  * Return -1 if we can't resolve the dependancy and there is no connectivity.
1754  */
1755 static int
1756 hammer_setup_parent_inodes_helper(hammer_record_t record, int depth,
1757                                   hammer_flush_group_t flg)
1758 {
1759         hammer_mount_t hmp;
1760         hammer_inode_t pip;
1761         int good;
1762
1763         KKASSERT(record->flush_state != HAMMER_FST_IDLE);
1764         pip = record->ip;
1765         hmp = pip->hmp;
1766
1767         /*
1768          * If the record is already flushing, is it in our flush group?
1769          *
1770          * If it is in our flush group but it is a general record or a 
1771          * delete-on-disk, it does not improve our connectivity (return 0),
1772          * and if the target inode is not trying to destroy itself we can't
1773          * allow the operation yet anyway (the second return -1).
1774          */
1775         if (record->flush_state == HAMMER_FST_FLUSH) {
1776                 /*
1777                  * If not in our flush group ask the parent to reflush
1778                  * us as soon as possible.
1779                  */
1780                 if (record->flush_group != flg) {
1781                         pip->flags |= HAMMER_INODE_REFLUSH;
1782                         record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1783                         return(-1);
1784                 }
1785
1786                 /*
1787                  * If in our flush group everything is already set up,
1788                  * just return whether the record will improve our
1789                  * visibility or not.
1790                  */
1791                 if (record->type == HAMMER_MEM_RECORD_ADD)
1792                         return(1);
1793                 return(0);
1794         }
1795
1796         /*
1797          * It must be a setup record.  Try to resolve the setup dependancies
1798          * by recursing upwards so we can place ip on the flush list.
1799          *
1800          * Limit ourselves to 20 levels of recursion to avoid blowing out
1801          * the kernel stack.  If we hit the recursion limit we can't flush
1802          * until the parent flushes.  The parent will flush independantly
1803          * on its own and ultimately a deep recursion will be resolved.
1804          */
1805         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1806
1807         good = hammer_setup_parent_inodes(pip, depth + 1, flg);
1808
1809         /*
1810          * If good < 0 the parent has no connectivity and we cannot safely
1811          * flush the directory entry, which also means we can't flush our
1812          * ip.  Flag us for downward recursion once the parent's
1813          * connectivity is resolved.  Flag the parent for [re]flush or it
1814          * may not check for downward recursions.
1815          */
1816         if (good < 0) {
1817                 pip->flags |= HAMMER_INODE_REFLUSH;
1818                 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1819                 return(good);
1820         }
1821
1822         /*
1823          * We are go, place the parent inode in a flushing state so we can
1824          * place its record in a flushing state.  Note that the parent
1825          * may already be flushing.  The record must be in the same flush
1826          * group as the parent.
1827          */
1828         if (pip->flush_state != HAMMER_FST_FLUSH)
1829                 hammer_flush_inode_core(pip, flg, HAMMER_FLUSH_RECURSION);
1830         KKASSERT(pip->flush_state == HAMMER_FST_FLUSH);
1831         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1832
1833 #if 0
1834         if (record->type == HAMMER_MEM_RECORD_DEL &&
1835             (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
1836                 /*
1837                  * Regardless of flushing state we cannot sync this path if the
1838                  * record represents a delete-on-disk but the target inode
1839                  * is not ready to sync its own deletion.
1840                  *
1841                  * XXX need to count effective nlinks to determine whether
1842                  * the flush is ok, otherwise removing a hardlink will
1843                  * just leave the DEL record to rot.
1844                  */
1845                 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
1846                 return(-1);
1847         } else
1848 #endif
1849         if (pip->flush_group == flg) {
1850                 /*
1851                  * Because we have not calculated nlinks yet we can just
1852                  * set records to the flush state if the parent is in
1853                  * the same flush group as we are.
1854                  */
1855                 record->flush_state = HAMMER_FST_FLUSH;
1856                 record->flush_group = flg;
1857                 ++record->flush_group->refs;
1858                 hammer_ref(&record->lock);
1859
1860                 /*
1861                  * A general directory-add contributes to our visibility.
1862                  *
1863                  * Otherwise it is probably a directory-delete or 
1864                  * delete-on-disk record and does not contribute to our
1865                  * visbility (but we can still flush it).
1866                  */
1867                 if (record->type == HAMMER_MEM_RECORD_ADD)
1868                         return(1);
1869                 return(0);
1870         } else {
1871                 /*
1872                  * If the parent is not in our flush group we cannot
1873                  * flush this record yet, there is no visibility.
1874                  * We tell the parent to reflush and mark ourselves
1875                  * so the parent knows it should flush us too.
1876                  */
1877                 pip->flags |= HAMMER_INODE_REFLUSH;
1878                 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1879                 return(-1);
1880         }
1881 }
1882
1883 /*
1884  * This is the core routine placing an inode into the FST_FLUSH state.
1885  */
1886 static void
1887 hammer_flush_inode_core(hammer_inode_t ip, hammer_flush_group_t flg, int flags)
1888 {
1889         int go_count;
1890
1891         /*
1892          * Set flush state and prevent the flusher from cycling into
1893          * the next flush group.  Do not place the ip on the list yet.
1894          * Inodes not in the idle state get an extra reference.
1895          */
1896         KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
1897         if (ip->flush_state == HAMMER_FST_IDLE)
1898                 hammer_ref(&ip->lock);
1899         ip->flush_state = HAMMER_FST_FLUSH;
1900         ip->flush_group = flg;
1901         ++ip->hmp->flusher.group_lock;
1902         ++ip->hmp->count_iqueued;
1903         ++hammer_count_iqueued;
1904         ++flg->total_count;
1905
1906         /*
1907          * If the flush group reaches the autoflush limit we want to signal
1908          * the flusher.  This is particularly important for remove()s.
1909          */
1910         if (flg->total_count == hammer_autoflush)
1911                 flags |= HAMMER_FLUSH_SIGNAL;
1912
1913         /*
1914          * We need to be able to vfsync/truncate from the backend.
1915          */
1916         KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
1917         if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
1918                 ip->flags |= HAMMER_INODE_VHELD;
1919                 vref(ip->vp);
1920         }
1921
1922         /*
1923          * Figure out how many in-memory records we can actually flush
1924          * (not including inode meta-data, buffers, etc).
1925          */
1926         KKASSERT((ip->flags & HAMMER_INODE_WOULDBLOCK) == 0);
1927         if (flags & HAMMER_FLUSH_RECURSION) {
1928                 /*
1929                  * If this is a upwards recursion we do not want to
1930                  * recurse down again!
1931                  */
1932                 go_count = 1;
1933 #if 0
1934         } else if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
1935                 /*
1936                  * No new records are added if we must complete a flush
1937                  * from a previous cycle, but we do have to move the records
1938                  * from the previous cycle to the current one.
1939                  */
1940 #if 0
1941                 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1942                                    hammer_syncgrp_child_callback, NULL);
1943 #endif
1944                 go_count = 1;
1945 #endif
1946         } else {
1947                 /*
1948                  * Normal flush, scan records and bring them into the flush.
1949                  * Directory adds and deletes are usually skipped (they are
1950                  * grouped with the related inode rather then with the
1951                  * directory).
1952                  *
1953                  * go_count can be negative, which means the scan aborted
1954                  * due to the flush group being over-full and we should
1955                  * flush what we have.
1956                  */
1957                 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1958                                    hammer_setup_child_callback, NULL);
1959         }
1960
1961         /*
1962          * This is a more involved test that includes go_count.  If we
1963          * can't flush, flag the inode and return.  If go_count is 0 we
1964          * were are unable to flush any records in our rec_tree and
1965          * must ignore the XDIRTY flag.
1966          */
1967         if (go_count == 0) {
1968                 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
1969                         --ip->hmp->count_iqueued;
1970                         --hammer_count_iqueued;
1971
1972                         --flg->total_count;
1973                         ip->flush_state = HAMMER_FST_SETUP;
1974                         ip->flush_group = NULL;
1975                         if (ip->flags & HAMMER_INODE_VHELD) {
1976                                 ip->flags &= ~HAMMER_INODE_VHELD;
1977                                 vrele(ip->vp);
1978                         }
1979
1980                         /*
1981                          * REFLUSH is needed to trigger dependancy wakeups
1982                          * when an inode is in SETUP.
1983                          */
1984                         ip->flags |= HAMMER_INODE_REFLUSH;
1985                         if (flags & HAMMER_FLUSH_SIGNAL) {
1986                                 ip->flags |= HAMMER_INODE_RESIGNAL;
1987                                 hammer_flusher_async(ip->hmp, flg);
1988                         }
1989                         if (--ip->hmp->flusher.group_lock == 0)
1990                                 wakeup(&ip->hmp->flusher.group_lock);
1991                         return;
1992                 }
1993         }
1994
1995         /*
1996          * Snapshot the state of the inode for the backend flusher.
1997          *
1998          * We continue to retain save_trunc_off even when all truncations
1999          * have been resolved as an optimization to determine if we can
2000          * skip the B-Tree lookup for overwrite deletions.
2001          *
2002          * NOTE: The DELETING flag is a mod flag, but it is also sticky,
2003          * and stays in ip->flags.  Once set, it stays set until the
2004          * inode is destroyed.
2005          */
2006         if (ip->flags & HAMMER_INODE_TRUNCATED) {
2007                 KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0);
2008                 ip->sync_trunc_off = ip->trunc_off;
2009                 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
2010                 ip->flags &= ~HAMMER_INODE_TRUNCATED;
2011                 ip->sync_flags |= HAMMER_INODE_TRUNCATED;
2012
2013                 /*
2014                  * The save_trunc_off used to cache whether the B-Tree
2015                  * holds any records past that point is not used until
2016                  * after the truncation has succeeded, so we can safely
2017                  * set it now.
2018                  */
2019                 if (ip->save_trunc_off > ip->sync_trunc_off)
2020                         ip->save_trunc_off = ip->sync_trunc_off;
2021         }
2022         ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK &
2023                            ~HAMMER_INODE_TRUNCATED);
2024         ip->sync_ino_leaf = ip->ino_leaf;
2025         ip->sync_ino_data = ip->ino_data;
2026         ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
2027 #ifdef DEBUG_TRUNCATE
2028         if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp)
2029                 kprintf("truncateS %016llx\n", ip->sync_trunc_off);
2030 #endif
2031
2032         /*
2033          * The flusher list inherits our inode and reference.
2034          */
2035         KKASSERT(flg->running == 0);
2036         RB_INSERT(hammer_fls_rb_tree, &flg->flush_tree, ip);
2037         if (--ip->hmp->flusher.group_lock == 0)
2038                 wakeup(&ip->hmp->flusher.group_lock);
2039
2040         if (flags & HAMMER_FLUSH_SIGNAL) {
2041                 hammer_flusher_async(ip->hmp, flg);
2042         }
2043 }
2044
2045 /*
2046  * Callback for scan of ip->rec_tree.  Try to include each record in our
2047  * flush.  ip->flush_group has been set but the inode has not yet been
2048  * moved into a flushing state.
2049  *
2050  * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
2051  * both inodes.
2052  *
2053  * We return 1 for any record placed or found in FST_FLUSH, which prevents
2054  * the caller from shortcutting the flush.
2055  */
2056 static int
2057 hammer_setup_child_callback(hammer_record_t rec, void *data)
2058 {
2059         hammer_flush_group_t flg;
2060         hammer_inode_t target_ip;
2061         hammer_inode_t ip;
2062         int r;
2063
2064         /*
2065          * Records deleted or committed by the backend are ignored.
2066          * Note that the flush detects deleted frontend records at
2067          * multiple points to deal with races.  This is just the first
2068          * line of defense.  The only time HAMMER_RECF_DELETED_FE cannot
2069          * be set is when HAMMER_RECF_INTERLOCK_BE is set, because it
2070          * messes up link-count calculations.
2071          *
2072          * NOTE: Don't get confused between record deletion and, say,
2073          * directory entry deletion.  The deletion of a directory entry
2074          * which is on-media has nothing to do with the record deletion
2075          * flags.
2076          */
2077         if (rec->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
2078                           HAMMER_RECF_COMMITTED)) {
2079                 if (rec->flush_state == HAMMER_FST_FLUSH) {
2080                         KKASSERT(rec->flush_group == rec->ip->flush_group);
2081                         r = 1;
2082                 } else {
2083                         r = 0;
2084                 }
2085                 return(r);
2086         }
2087
2088         /*
2089          * If the record is in an idle state it has no dependancies and
2090          * can be flushed.
2091          */
2092         ip = rec->ip;
2093         flg = ip->flush_group;
2094         r = 0;
2095
2096         switch(rec->flush_state) {
2097         case HAMMER_FST_IDLE:
2098                 /*
2099                  * The record has no setup dependancy, we can flush it.
2100                  */
2101                 KKASSERT(rec->target_ip == NULL);
2102                 rec->flush_state = HAMMER_FST_FLUSH;
2103                 rec->flush_group = flg;
2104                 ++flg->refs;
2105                 hammer_ref(&rec->lock);
2106                 r = 1;
2107                 break;
2108         case HAMMER_FST_SETUP:
2109                 /*
2110                  * The record has a setup dependancy.  These are typically
2111                  * directory entry adds and deletes.  Such entries will be
2112                  * flushed when their inodes are flushed so we do not
2113                  * usually have to add them to the flush here.  However,
2114                  * if the target_ip has set HAMMER_INODE_CONN_DOWN then
2115                  * it is asking us to flush this record (and it).
2116                  */
2117                 target_ip = rec->target_ip;
2118                 KKASSERT(target_ip != NULL);
2119                 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
2120
2121                 /*
2122                  * If the target IP is already flushing in our group
2123                  * we could associate the record, but target_ip has
2124                  * already synced ino_data to sync_ino_data and we
2125                  * would also have to adjust nlinks.   Plus there are
2126                  * ordering issues for adds and deletes.
2127                  *
2128                  * Reflush downward if this is an ADD, and upward if
2129                  * this is a DEL.
2130                  */
2131                 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
2132                         if (rec->flush_state == HAMMER_MEM_RECORD_ADD)
2133                                 ip->flags |= HAMMER_INODE_REFLUSH;
2134                         else
2135                                 target_ip->flags |= HAMMER_INODE_REFLUSH;
2136                         break;
2137                 } 
2138
2139                 /*
2140                  * Target IP is not yet flushing.  This can get complex
2141                  * because we have to be careful about the recursion.
2142                  *
2143                  * Directories create an issue for us in that if a flush
2144                  * of a directory is requested the expectation is to flush
2145                  * any pending directory entries, but this will cause the
2146                  * related inodes to recursively flush as well.  We can't
2147                  * really defer the operation so just get as many as we
2148                  * can and
2149                  */
2150 #if 0
2151                 if ((target_ip->flags & HAMMER_INODE_RECLAIM) == 0 &&
2152                     (target_ip->flags & HAMMER_INODE_CONN_DOWN) == 0) {
2153                         /*
2154                          * We aren't reclaiming and the target ip was not
2155                          * previously prevented from flushing due to this
2156                          * record dependancy.  Do not flush this record.
2157                          */
2158                         /*r = 0;*/
2159                 } else
2160 #endif
2161                 if (flg->total_count + flg->refs >
2162                            ip->hmp->undo_rec_limit) {
2163                         /*
2164                          * Our flush group is over-full and we risk blowing
2165                          * out the UNDO FIFO.  Stop the scan, flush what we
2166                          * have, then reflush the directory.
2167                          *
2168                          * The directory may be forced through multiple
2169                          * flush groups before it can be completely
2170                          * flushed.
2171                          */
2172                         ip->flags |= HAMMER_INODE_RESIGNAL |
2173                                      HAMMER_INODE_REFLUSH;
2174                         r = -1;
2175                 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
2176                         /*
2177                          * If the target IP is not flushing we can force
2178                          * it to flush, even if it is unable to write out
2179                          * any of its own records we have at least one in
2180                          * hand that we CAN deal with.
2181                          */
2182                         rec->flush_state = HAMMER_FST_FLUSH;
2183                         rec->flush_group = flg;
2184                         ++flg->refs;
2185                         hammer_ref(&rec->lock);
2186                         hammer_flush_inode_core(target_ip, flg,
2187                                                 HAMMER_FLUSH_RECURSION);
2188                         r = 1;
2189                 } else {
2190                         /*
2191                          * General or delete-on-disk record.
2192                          *
2193                          * XXX this needs help.  If a delete-on-disk we could
2194                          * disconnect the target.  If the target has its own
2195                          * dependancies they really need to be flushed.
2196                          *
2197                          * XXX
2198                          */
2199                         rec->flush_state = HAMMER_FST_FLUSH;
2200                         rec->flush_group = flg;
2201                         ++flg->refs;
2202                         hammer_ref(&rec->lock);
2203                         hammer_flush_inode_core(target_ip, flg,
2204                                                 HAMMER_FLUSH_RECURSION);
2205                         r = 1;
2206                 }
2207                 break;
2208         case HAMMER_FST_FLUSH:
2209                 /* 
2210                  * The flush_group should already match.
2211                  */
2212                 KKASSERT(rec->flush_group == flg);
2213                 r = 1;
2214                 break;
2215         }
2216         return(r);
2217 }
2218
2219 #if 0
2220 /*
2221  * This version just moves records already in a flush state to the new
2222  * flush group and that is it.
2223  */
2224 static int
2225 hammer_syncgrp_child_callback(hammer_record_t rec, void *data)
2226 {
2227         hammer_inode_t ip = rec->ip;
2228
2229         switch(rec->flush_state) {
2230         case HAMMER_FST_FLUSH:
2231                 KKASSERT(rec->flush_group == ip->flush_group);
2232                 break;
2233         default:
2234                 break;
2235         }
2236         return(0);
2237 }
2238 #endif
2239
2240 /*
2241  * Wait for a previously queued flush to complete.
2242  *
2243  * If a critical error occured we don't try to wait.
2244  */
2245 void
2246 hammer_wait_inode(hammer_inode_t ip)
2247 {
2248         hammer_flush_group_t flg;
2249
2250         flg = NULL;
2251         if ((ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) {
2252                 while (ip->flush_state != HAMMER_FST_IDLE &&
2253                        (ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) {
2254                         if (ip->flush_state == HAMMER_FST_SETUP)
2255                                 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2256                         if (ip->flush_state != HAMMER_FST_IDLE) {
2257                                 ip->flags |= HAMMER_INODE_FLUSHW;
2258                                 tsleep(&ip->flags, 0, "hmrwin", 0);
2259                         }
2260                 }
2261         }
2262 }
2263
2264 /*
2265  * Called by the backend code when a flush has been completed.
2266  * The inode has already been removed from the flush list.
2267  *
2268  * A pipelined flush can occur, in which case we must re-enter the
2269  * inode on the list and re-copy its fields.
2270  */
2271 void
2272 hammer_flush_inode_done(hammer_inode_t ip, int error)
2273 {
2274         hammer_mount_t hmp;
2275         int dorel;
2276
2277         KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
2278
2279         hmp = ip->hmp;
2280
2281         /*
2282          * Auto-reflush if the backend could not completely flush
2283          * the inode.  This fixes a case where a deferred buffer flush
2284          * could cause fsync to return early.
2285          */
2286         if (ip->sync_flags & HAMMER_INODE_MODMASK)
2287                 ip->flags |= HAMMER_INODE_REFLUSH;
2288
2289         /*
2290          * Merge left-over flags back into the frontend and fix the state.
2291          * Incomplete truncations are retained by the backend.
2292          */
2293         ip->error = error;
2294         ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED;
2295         ip->sync_flags &= HAMMER_INODE_TRUNCATED;
2296
2297         /*
2298          * The backend may have adjusted nlinks, so if the adjusted nlinks
2299          * does not match the fronttend set the frontend's RDIRTY flag again.
2300          */
2301         if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
2302                 ip->flags |= HAMMER_INODE_DDIRTY;
2303
2304         /*
2305          * Fix up the dirty buffer status.
2306          */
2307         if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) {
2308                 ip->flags |= HAMMER_INODE_BUFS;
2309         }
2310
2311         /*
2312          * Re-set the XDIRTY flag if some of the inode's in-memory records
2313          * could not be flushed.
2314          */
2315         KKASSERT((RB_EMPTY(&ip->rec_tree) &&
2316                   (ip->flags & HAMMER_INODE_XDIRTY) == 0) ||
2317                  (!RB_EMPTY(&ip->rec_tree) &&
2318                   (ip->flags & HAMMER_INODE_XDIRTY) != 0));
2319
2320         /*
2321          * Do not lose track of inodes which no longer have vnode
2322          * assocations, otherwise they may never get flushed again.
2323          *
2324          * The reflush flag can be set superfluously, causing extra pain
2325          * for no reason.  If the inode is no longer modified it no longer
2326          * needs to be flushed.
2327          */
2328         if (ip->flags & HAMMER_INODE_MODMASK) {
2329                 if (ip->vp == NULL)
2330                         ip->flags |= HAMMER_INODE_REFLUSH;
2331         } else {
2332                 ip->flags &= ~HAMMER_INODE_REFLUSH;
2333         }
2334
2335         /*
2336          * Adjust the flush state.
2337          */
2338         if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
2339                 /*
2340                  * We were unable to flush out all our records, leave the
2341                  * inode in a flush state and in the current flush group.
2342                  * The flush group will be re-run.
2343                  *
2344                  * This occurs if the UNDO block gets too full or there is
2345                  * too much dirty meta-data and allows the flusher to
2346                  * finalize the UNDO block and then re-flush.
2347                  */
2348                 ip->flags &= ~HAMMER_INODE_WOULDBLOCK;
2349                 dorel = 0;
2350         } else {
2351                 /*
2352                  * Remove from the flush_group
2353                  */
2354                 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip);
2355                 ip->flush_group = NULL;
2356
2357                 /*
2358                  * Clean up the vnode ref and tracking counts.
2359                  */
2360                 if (ip->flags & HAMMER_INODE_VHELD) {
2361                         ip->flags &= ~HAMMER_INODE_VHELD;
2362                         vrele(ip->vp);
2363                 }
2364                 --hmp->count_iqueued;
2365                 --hammer_count_iqueued;
2366
2367                 /*
2368                  * And adjust the state.
2369                  */
2370                 if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
2371                         ip->flush_state = HAMMER_FST_IDLE;
2372                         dorel = 1;
2373                 } else {
2374                         ip->flush_state = HAMMER_FST_SETUP;
2375                         dorel = 0;
2376                 }
2377
2378                 /*
2379                  * If the frontend is waiting for a flush to complete,
2380                  * wake it up.
2381                  */
2382                 if (ip->flags & HAMMER_INODE_FLUSHW) {
2383                         ip->flags &= ~HAMMER_INODE_FLUSHW;
2384                         wakeup(&ip->flags);
2385                 }
2386
2387                 /*
2388                  * If the frontend made more changes and requested another
2389                  * flush, then try to get it running.
2390                  *
2391                  * Reflushes are aborted when the inode is errored out.
2392                  */
2393                 if (ip->flags & HAMMER_INODE_REFLUSH) {
2394                         ip->flags &= ~HAMMER_INODE_REFLUSH;
2395                         if (ip->flags & HAMMER_INODE_RESIGNAL) {
2396                                 ip->flags &= ~HAMMER_INODE_RESIGNAL;
2397                                 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2398                         } else {
2399                                 hammer_flush_inode(ip, 0);
2400                         }
2401                 }
2402         }
2403
2404         /*
2405          * If we have no parent dependancies we can clear CONN_DOWN
2406          */
2407         if (TAILQ_EMPTY(&ip->target_list))
2408                 ip->flags &= ~HAMMER_INODE_CONN_DOWN;
2409
2410         /*
2411          * If the inode is now clean drop the space reservation.
2412          */
2413         if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
2414             (ip->flags & HAMMER_INODE_RSV_INODES)) {
2415                 ip->flags &= ~HAMMER_INODE_RSV_INODES;
2416                 --hmp->rsv_inodes;
2417         }
2418
2419         if (dorel)
2420                 hammer_rel_inode(ip, 0);
2421 }
2422
2423 /*
2424  * Called from hammer_sync_inode() to synchronize in-memory records
2425  * to the media.
2426  */
2427 static int
2428 hammer_sync_record_callback(hammer_record_t record, void *data)
2429 {
2430         hammer_cursor_t cursor = data;
2431         hammer_transaction_t trans = cursor->trans;
2432         hammer_mount_t hmp = trans->hmp;
2433         int error;
2434
2435         /*
2436          * Skip records that do not belong to the current flush.
2437          */
2438         ++hammer_stats_record_iterations;
2439         if (record->flush_state != HAMMER_FST_FLUSH)
2440                 return(0);
2441
2442 #if 1
2443         if (record->flush_group != record->ip->flush_group) {
2444                 kprintf("sync_record %p ip %p bad flush group %p %p\n", record, record->ip, record->flush_group ,record->ip->flush_group);
2445                 if (hammer_debug_critical)
2446                         Debugger("blah2");
2447                 return(0);
2448         }
2449 #endif
2450         KKASSERT(record->flush_group == record->ip->flush_group);
2451
2452         /*
2453          * Interlock the record using the BE flag.  Once BE is set the
2454          * frontend cannot change the state of FE.
2455          *
2456          * NOTE: If FE is set prior to us setting BE we still sync the
2457          * record out, but the flush completion code converts it to 
2458          * a delete-on-disk record instead of destroying it.
2459          */
2460         KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
2461         record->flags |= HAMMER_RECF_INTERLOCK_BE;
2462
2463         /*
2464          * The backend has already disposed of the record.
2465          */
2466         if (record->flags & (HAMMER_RECF_DELETED_BE | HAMMER_RECF_COMMITTED)) {
2467                 error = 0;
2468                 goto done;
2469         }
2470
2471         /*
2472          * If the whole inode is being deleting all on-disk records will
2473          * be deleted very soon, we can't sync any new records to disk
2474          * because they will be deleted in the same transaction they were
2475          * created in (delete_tid == create_tid), which will assert.
2476          *
2477          * XXX There may be a case with RECORD_ADD with DELETED_FE set
2478          * that we currently panic on.
2479          */
2480         if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
2481                 switch(record->type) {
2482                 case HAMMER_MEM_RECORD_DATA:
2483                         /*
2484                          * We don't have to do anything, if the record was
2485                          * committed the space will have been accounted for
2486                          * in the blockmap.
2487                          */
2488                         /* fall through */
2489                 case HAMMER_MEM_RECORD_GENERAL:
2490                         /*
2491                          * Set deleted-by-backend flag.  Do not set the
2492                          * backend committed flag, because we are throwing
2493                          * the record away.
2494                          */
2495                         record->flags |= HAMMER_RECF_DELETED_BE;
2496                         ++record->ip->rec_generation;
2497                         error = 0;
2498                         goto done;
2499                 case HAMMER_MEM_RECORD_ADD:
2500                         panic("hammer_sync_record_callback: illegal add "
2501                               "during inode deletion record %p", record);
2502                         break; /* NOT REACHED */
2503                 case HAMMER_MEM_RECORD_INODE:
2504                         panic("hammer_sync_record_callback: attempt to "
2505                               "sync inode record %p?", record);
2506                         break; /* NOT REACHED */
2507                 case HAMMER_MEM_RECORD_DEL:
2508                         /* 
2509                          * Follow through and issue the on-disk deletion
2510                          */
2511                         break;
2512                 }
2513         }
2514
2515         /*
2516          * If DELETED_FE is set special handling is needed for directory
2517          * entries.  Dependant pieces related to the directory entry may
2518          * have already been synced to disk.  If this occurs we have to
2519          * sync the directory entry and then change the in-memory record
2520          * from an ADD to a DELETE to cover the fact that it's been
2521          * deleted by the frontend.
2522          *
2523          * A directory delete covering record (MEM_RECORD_DEL) can never
2524          * be deleted by the frontend.
2525          *
2526          * Any other record type (aka DATA) can be deleted by the frontend.
2527          * XXX At the moment the flusher must skip it because there may
2528          * be another data record in the flush group for the same block,
2529          * meaning that some frontend data changes can leak into the backend's
2530          * synchronization point.
2531          */
2532         if (record->flags & HAMMER_RECF_DELETED_FE) {
2533                 if (record->type == HAMMER_MEM_RECORD_ADD) {
2534                         /*
2535                          * Convert a front-end deleted directory-add to
2536                          * a directory-delete entry later.
2537                          */
2538                         record->flags |= HAMMER_RECF_CONVERT_DELETE;
2539                 } else {
2540                         /*
2541                          * Dispose of the record (race case).  Mark as
2542                          * deleted by backend (and not committed).
2543                          */
2544                         KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
2545                         record->flags |= HAMMER_RECF_DELETED_BE;
2546                         ++record->ip->rec_generation;
2547                         error = 0;
2548                         goto done;
2549                 }
2550         }
2551
2552         /*
2553          * Assign the create_tid for new records.  Deletions already
2554          * have the record's entire key properly set up.
2555          */
2556         if (record->type != HAMMER_MEM_RECORD_DEL) {
2557                 record->leaf.base.create_tid = trans->tid;
2558                 record->leaf.create_ts = trans->time32;
2559         }
2560         for (;;) {
2561                 error = hammer_ip_sync_record_cursor(cursor, record);
2562                 if (error != EDEADLK)
2563                         break;
2564                 hammer_done_cursor(cursor);
2565                 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
2566                                            record->ip);
2567                 if (error)
2568                         break;
2569         }
2570         record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
2571
2572         if (error)
2573                 error = -error;
2574 done:
2575         hammer_flush_record_done(record, error);
2576
2577         /*
2578          * Do partial finalization if we have built up too many dirty
2579          * buffers.  Otherwise a buffer cache deadlock can occur when
2580          * doing things like creating tens of thousands of tiny files.
2581          *
2582          * We must release our cursor lock to avoid a 3-way deadlock
2583          * due to the exclusive sync lock the finalizer must get.
2584          *
2585          * WARNING: See warnings in hammer_unlock_cursor() function.
2586          */
2587         if (hammer_flusher_meta_limit(hmp)) {
2588                 hammer_unlock_cursor(cursor);
2589                 hammer_flusher_finalize(trans, 0);
2590                 hammer_lock_cursor(cursor);
2591         }
2592
2593         return(error);
2594 }
2595
2596 /*
2597  * Backend function called by the flusher to sync an inode to media.
2598  */
2599 int
2600 hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip)
2601 {
2602         struct hammer_cursor cursor;
2603         hammer_node_t tmp_node;
2604         hammer_record_t depend;
2605         hammer_record_t next;
2606         int error, tmp_error;
2607         u_int64_t nlinks;
2608
2609         if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
2610                 return(0);
2611
2612         error = hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
2613         if (error)
2614                 goto done;
2615
2616         /*
2617          * Any directory records referencing this inode which are not in
2618          * our current flush group must adjust our nlink count for the
2619          * purposes of synchronization to disk.
2620          *
2621          * Records which are in our flush group can be unlinked from our
2622          * inode now, potentially allowing the inode to be physically
2623          * deleted.
2624          *
2625          * This cannot block.
2626          */
2627         nlinks = ip->ino_data.nlinks;
2628         next = TAILQ_FIRST(&ip->target_list);
2629         while ((depend = next) != NULL) {
2630                 next = TAILQ_NEXT(depend, target_entry);
2631                 if (depend->flush_state == HAMMER_FST_FLUSH &&
2632                     depend->flush_group == ip->flush_group) {
2633                         /*
2634                          * If this is an ADD that was deleted by the frontend
2635                          * the frontend nlinks count will have already been
2636                          * decremented, but the backend is going to sync its
2637                          * directory entry and must account for it.  The
2638                          * record will be converted to a delete-on-disk when
2639                          * it gets synced.
2640                          *
2641                          * If the ADD was not deleted by the frontend we
2642                          * can remove the dependancy from our target_list.
2643                          */
2644                         if (depend->flags & HAMMER_RECF_DELETED_FE) {
2645                                 ++nlinks;
2646                         } else {
2647                                 TAILQ_REMOVE(&ip->target_list, depend,
2648                                              target_entry);
2649                                 depend->target_ip = NULL;
2650                         }
2651                 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
2652                         /*
2653                          * Not part of our flush group and not deleted by
2654                          * the front-end, adjust the link count synced to
2655                          * the media (undo what the frontend did when it
2656                          * queued the record).
2657                          */
2658                         KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
2659                         switch(depend->type) {
2660                         case HAMMER_MEM_RECORD_ADD:
2661                                 --nlinks;
2662                                 break;
2663                         case HAMMER_MEM_RECORD_DEL:
2664                                 ++nlinks;
2665                                 break;
2666                         default:
2667                                 break;
2668                         }
2669                 }
2670         }
2671
2672         /*
2673          * Set dirty if we had to modify the link count.
2674          */
2675         if (ip->sync_ino_data.nlinks != nlinks) {
2676                 KKASSERT((int64_t)nlinks >= 0);
2677                 ip->sync_ino_data.nlinks = nlinks;
2678                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2679         }
2680
2681         /*
2682          * If there is a trunction queued destroy any data past the (aligned)
2683          * truncation point.  Userland will have dealt with the buffer
2684          * containing the truncation point for us.
2685          *
2686          * We don't flush pending frontend data buffers until after we've
2687          * dealt with the truncation.
2688          */
2689         if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2690                 /*
2691                  * Interlock trunc_off.  The VOP front-end may continue to
2692                  * make adjustments to it while we are blocked.
2693                  */
2694                 off_t trunc_off;
2695                 off_t aligned_trunc_off;
2696                 int blkmask;
2697
2698                 trunc_off = ip->sync_trunc_off;
2699                 blkmask = hammer_blocksize(trunc_off) - 1;
2700                 aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask;
2701
2702                 /*
2703                  * Delete any whole blocks on-media.  The front-end has
2704                  * already cleaned out any partial block and made it
2705                  * pending.  The front-end may have updated trunc_off
2706                  * while we were blocked so we only use sync_trunc_off.
2707                  *
2708                  * This operation can blow out the buffer cache, EWOULDBLOCK
2709                  * means we were unable to complete the deletion.  The
2710                  * deletion will update sync_trunc_off in that case.
2711                  */
2712                 error = hammer_ip_delete_range(&cursor, ip,
2713                                                 aligned_trunc_off,
2714                                                 0x7FFFFFFFFFFFFFFFLL, 2);
2715                 if (error == EWOULDBLOCK) {
2716                         ip->flags |= HAMMER_INODE_WOULDBLOCK;
2717                         error = 0;
2718                         goto defer_buffer_flush;
2719                 }
2720
2721                 if (error)
2722                         goto done;
2723
2724                 /*
2725                  * Clear the truncation flag on the backend after we have
2726                  * complete the deletions.  Backend data is now good again
2727                  * (including new records we are about to sync, below).
2728                  *
2729                  * Leave sync_trunc_off intact.  As we write additional
2730                  * records the backend will update sync_trunc_off.  This
2731                  * tells the backend whether it can skip the overwrite
2732                  * test.  This should work properly even when the backend
2733                  * writes full blocks where the truncation point straddles
2734                  * the block because the comparison is against the base
2735                  * offset of the record.
2736                  */
2737                 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2738                 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
2739         } else {
2740                 error = 0;
2741         }
2742
2743         /*
2744          * Now sync related records.  These will typically be directory
2745          * entries, records tracking direct-writes, or delete-on-disk records.
2746          */
2747         if (error == 0) {
2748                 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2749                                     hammer_sync_record_callback, &cursor);
2750                 if (tmp_error < 0)
2751                         tmp_error = -error;
2752                 if (tmp_error)
2753                         error = tmp_error;
2754         }
2755         hammer_cache_node(&ip->cache[1], cursor.node);
2756
2757         /*
2758          * Re-seek for inode update, assuming our cache hasn't been ripped
2759          * out from under us.
2760          */
2761         if (error == 0) {
2762                 tmp_node = hammer_ref_node_safe(trans, &ip->cache[0], &error);
2763                 if (tmp_node) {
2764                         hammer_cursor_downgrade(&cursor);
2765                         hammer_lock_sh(&tmp_node->lock);
2766                         if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0)
2767                                 hammer_cursor_seek(&cursor, tmp_node, 0);
2768                         hammer_unlock(&tmp_node->lock);
2769                         hammer_rel_node(tmp_node);
2770                 }
2771                 error = 0;
2772         }
2773
2774         /*
2775          * If we are deleting the inode the frontend had better not have
2776          * any active references on elements making up the inode.
2777          *
2778          * The call to hammer_ip_delete_clean() cleans up auxillary records
2779          * but not DB or DATA records.  Those must have already been deleted
2780          * by the normal truncation mechanic.
2781          */
2782         if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
2783                 RB_EMPTY(&ip->rec_tree)  &&
2784             (ip->sync_flags & HAMMER_INODE_DELETING) &&
2785             (ip->flags & HAMMER_INODE_DELETED) == 0) {
2786                 int count1 = 0;
2787
2788                 error = hammer_ip_delete_clean(&cursor, ip, &count1);
2789                 if (error == 0) {
2790                         ip->flags |= HAMMER_INODE_DELETED;
2791                         ip->sync_flags &= ~HAMMER_INODE_DELETING;
2792                         ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2793                         KKASSERT(RB_EMPTY(&ip->rec_tree));
2794
2795                         /*
2796                          * Set delete_tid in both the frontend and backend
2797                          * copy of the inode record.  The DELETED flag handles
2798                          * this, do not set RDIRTY.
2799                          */
2800                         ip->ino_leaf.base.delete_tid = trans->tid;
2801                         ip->sync_ino_leaf.base.delete_tid = trans->tid;
2802                         ip->ino_leaf.delete_ts = trans->time32;
2803                         ip->sync_ino_leaf.delete_ts = trans->time32;
2804
2805
2806                         /*
2807                          * Adjust the inode count in the volume header
2808                          */
2809                         hammer_sync_lock_sh(trans);
2810                         if (ip->flags & HAMMER_INODE_ONDISK) {
2811                                 hammer_modify_volume_field(trans,
2812                                                            trans->rootvol,
2813                                                            vol0_stat_inodes);
2814                                 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
2815                                 hammer_modify_volume_done(trans->rootvol);
2816                         }
2817                         hammer_sync_unlock(trans);
2818                 }
2819         }
2820
2821         if (error)
2822                 goto done;
2823         ip->sync_flags &= ~HAMMER_INODE_BUFS;
2824
2825 defer_buffer_flush:
2826         /*
2827          * Now update the inode's on-disk inode-data and/or on-disk record.
2828          * DELETED and ONDISK are managed only in ip->flags.
2829          *
2830          * In the case of a defered buffer flush we still update the on-disk
2831          * inode to satisfy visibility requirements if there happen to be
2832          * directory dependancies.
2833          */
2834         switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
2835         case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
2836                 /*
2837                  * If deleted and on-disk, don't set any additional flags.
2838                  * the delete flag takes care of things.
2839                  *
2840                  * Clear flags which may have been set by the frontend.
2841                  */
2842                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2843                                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2844                                     HAMMER_INODE_DELETING);
2845                 break;
2846         case HAMMER_INODE_DELETED:
2847                 /*
2848                  * Take care of the case where a deleted inode was never
2849                  * flushed to the disk in the first place.
2850                  *
2851                  * Clear flags which may have been set by the frontend.
2852                  */
2853                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2854                                     HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2855                                     HAMMER_INODE_DELETING);
2856                 while (RB_ROOT(&ip->rec_tree)) {
2857                         hammer_record_t record = RB_ROOT(&ip->rec_tree);
2858                         hammer_ref(&record->lock);
2859                         KKASSERT(record->lock.refs == 1);
2860                         record->flags |= HAMMER_RECF_DELETED_BE;
2861                         ++record->ip->rec_generation;
2862                         hammer_rel_mem_record(record);
2863                 }
2864                 break;
2865         case HAMMER_INODE_ONDISK:
2866                 /*
2867                  * If already on-disk, do not set any additional flags.
2868                  */
2869                 break;
2870         default:
2871                 /*
2872                  * If not on-disk and not deleted, set DDIRTY to force
2873                  * an initial record to be written.
2874                  *
2875                  * Also set the create_tid in both the frontend and backend
2876                  * copy of the inode record.
2877                  */
2878                 ip->ino_leaf.base.create_tid = trans->tid;
2879                 ip->ino_leaf.create_ts = trans->time32;
2880                 ip->sync_ino_leaf.base.create_tid = trans->tid;
2881                 ip->sync_ino_leaf.create_ts = trans->time32;
2882                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2883                 break;
2884         }
2885
2886         /*
2887          * If RDIRTY or DDIRTY is set, write out a new record.  If the inode
2888          * is already on-disk the old record is marked as deleted.
2889          *
2890          * If DELETED is set hammer_update_inode() will delete the existing
2891          * record without writing out a new one.
2892          *
2893          * If *ONLY* the ITIMES flag is set we can update the record in-place.
2894          */
2895         if (ip->flags & HAMMER_INODE_DELETED) {
2896                 error = hammer_update_inode(&cursor, ip);
2897         } else 
2898         if ((ip->sync_flags & HAMMER_INODE_DDIRTY) == 0 &&
2899             (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) {
2900                 error = hammer_update_itimes(&cursor, ip);
2901         } else
2902         if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) {
2903                 error = hammer_update_inode(&cursor, ip);
2904         }
2905 done:
2906         if (error) {
2907                 hammer_critical_error(ip->hmp, ip, error,
2908                                       "while syncing inode");
2909         }
2910         hammer_done_cursor(&cursor);
2911         return(error);
2912 }
2913
2914 /*
2915  * This routine is called when the OS is no longer actively referencing
2916  * the inode (but might still be keeping it cached), or when releasing
2917  * the last reference to an inode.
2918  *
2919  * At this point if the inode's nlinks count is zero we want to destroy
2920  * it, which may mean destroying it on-media too.
2921  */
2922 void
2923 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
2924 {
2925         struct vnode *vp;
2926
2927         /*
2928          * Set the DELETING flag when the link count drops to 0 and the
2929          * OS no longer has any opens on the inode.
2930          *
2931          * The backend will clear DELETING (a mod flag) and set DELETED
2932          * (a state flag) when it is actually able to perform the
2933          * operation.
2934          *
2935          * Don't reflag the deletion if the flusher is currently syncing
2936          * one that was already flagged.  A previously set DELETING flag
2937          * may bounce around flags and sync_flags until the operation is
2938          * completely done.
2939          */
2940         if (ip->ino_data.nlinks == 0 &&
2941             ((ip->flags | ip->sync_flags) & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
2942                 ip->flags |= HAMMER_INODE_DELETING;
2943                 ip->flags |= HAMMER_INODE_TRUNCATED;
2944                 ip->trunc_off = 0;
2945                 vp = NULL;
2946                 if (getvp) {
2947                         if (hammer_get_vnode(ip, &vp) != 0)
2948                                 return;
2949                 }
2950
2951                 /*
2952                  * Final cleanup
2953                  */
2954                 if (ip->vp) {
2955                         vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
2956                         vnode_pager_setsize(ip->vp, 0);
2957                 }
2958                 if (getvp) {
2959                         vput(vp);
2960                 }
2961         }
2962 }
2963
2964 /*
2965  * After potentially resolving a dependancy the inode is tested
2966  * to determine whether it needs to be reflushed.
2967  */
2968 void
2969 hammer_test_inode(hammer_inode_t ip)
2970 {
2971         if (ip->flags & HAMMER_INODE_REFLUSH) {
2972                 ip->flags &= ~HAMMER_INODE_REFLUSH;
2973                 hammer_ref(&ip->lock);
2974                 if (ip->flags & HAMMER_INODE_RESIGNAL) {
2975                         ip->flags &= ~HAMMER_INODE_RESIGNAL;
2976                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2977                 } else {
2978                         hammer_flush_inode(ip, 0);
2979                 }
2980                 hammer_rel_inode(ip, 0);
2981         }
2982 }
2983
2984 /*
2985  * Clear the RECLAIM flag on an inode.  This occurs when the inode is
2986  * reassociated with a vp or just before it gets freed.
2987  *
2988  * Pipeline wakeups to threads blocked due to an excessive number of
2989  * detached inodes.  This typically occurs when atime updates accumulate
2990  * while scanning a directory tree.
2991  */
2992 static void
2993 hammer_inode_wakereclaims(hammer_inode_t ip)
2994 {
2995         struct hammer_reclaim *reclaim;
2996         hammer_mount_t hmp = ip->hmp;
2997
2998         if ((ip->flags & HAMMER_INODE_RECLAIM) == 0)
2999                 return;
3000
3001         --hammer_count_reclaiming;
3002         --hmp->inode_reclaims;
3003         ip->flags &= ~HAMMER_INODE_RECLAIM;
3004
3005         while ((reclaim = TAILQ_FIRST(&hmp->reclaim_list)) != NULL) {
3006                 if (reclaim->count > 0 && --reclaim->count == 0) {
3007                         TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry);
3008                         wakeup(reclaim);
3009                 }
3010                 if (hmp->inode_reclaims > hammer_limit_reclaim / 2)
3011                         break;
3012         }
3013 }
3014
3015 /*
3016  * Setup our reclaim pipeline.  We only let so many detached (and dirty)
3017  * inodes build up before we start blocking.  This routine is called
3018  * if a new inode is created or an inode is loaded from media.
3019  *
3020  * When we block we don't care *which* inode has finished reclaiming,
3021  * as lone as one does.
3022  */
3023 void
3024 hammer_inode_waitreclaims(hammer_mount_t hmp)
3025 {
3026         struct hammer_reclaim reclaim;
3027
3028         if (hmp->inode_reclaims < hammer_limit_reclaim)
3029                 return;
3030         reclaim.count = 1;
3031         TAILQ_INSERT_TAIL(&hmp->reclaim_list, &reclaim, entry);
3032         tsleep(&reclaim, 0, "hmrrcm", hz);
3033         if (reclaim.count > 0)
3034                 TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry);
3035 }
3036
3037 #if 0
3038
3039 /*
3040  * XXX not used, doesn't work very well due to the large batching nature
3041  * of flushes.
3042  *
3043  * A larger then normal backlog of inodes is sitting in the flusher,
3044  * enforce a general slowdown to let it catch up.  This routine is only
3045  * called on completion of a non-flusher-related transaction which
3046  * performed B-Tree node I/O.
3047  *
3048  * It is possible for the flusher to stall in a continuous load.
3049  * blogbench -i1000 -o seems to do a good job generating this sort of load.
3050  * If the flusher is unable to catch up the inode count can bloat until
3051  * we run out of kvm.
3052  *
3053  * This is a bit of a hack.
3054  */
3055 void
3056 hammer_inode_waithard(hammer_mount_t hmp)
3057 {
3058         /*
3059          * Hysteresis.
3060          */
3061         if (hmp->flags & HAMMER_MOUNT_FLUSH_RECOVERY) {
3062                 if (hmp->inode_reclaims < hammer_limit_reclaim / 2 &&
3063                     hmp->count_iqueued < hmp->count_inodes / 20) {
3064                         hmp->flags &= ~HAMMER_MOUNT_FLUSH_RECOVERY;
3065                         return;
3066                 }
3067         } else {
3068                 if (hmp->inode_reclaims < hammer_limit_reclaim ||
3069                     hmp->count_iqueued < hmp->count_inodes / 10) {
3070                         return;
3071                 }
3072                 hmp->flags |= HAMMER_MOUNT_FLUSH_RECOVERY;
3073         }
3074
3075         /*
3076          * Block for one flush cycle.
3077          */
3078         hammer_flusher_wait_next(hmp);
3079 }
3080
3081 #endif