HAMMER 53B/Many: Complete overhaul of strategy code, reservations, etc
[dragonfly.git] / sys / vfs / hammer / hammer_inode.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.66 2008/06/08 18:16:26 dillon Exp $
35  */
36
37 #include "hammer.h"
38 #include <vm/vm_extern.h>
39 #include <sys/buf.h>
40 #include <sys/buf2.h>
41
42 static int hammer_unload_inode(struct hammer_inode *ip);
43 static void hammer_flush_inode_core(hammer_inode_t ip, int flags);
44 static int hammer_setup_child_callback(hammer_record_t rec, void *data);
45 static int hammer_setup_parent_inodes(hammer_record_t record);
46
47 #ifdef DEBUG_TRUNCATE
48 extern struct hammer_inode *HammerTruncIp;
49 #endif
50
51 /*
52  * The kernel is not actively referencing this vnode but is still holding
53  * it cached.
54  *
55  * This is called from the frontend.
56  */
57 int
58 hammer_vop_inactive(struct vop_inactive_args *ap)
59 {
60         struct hammer_inode *ip = VTOI(ap->a_vp);
61
62         /*
63          * Degenerate case
64          */
65         if (ip == NULL) {
66                 vrecycle(ap->a_vp);
67                 return(0);
68         }
69
70         /*
71          * If the inode no longer has visibility in the filesystem and is
72          * fairly clean, try to recycle it immediately.  This can deadlock
73          * in vfsync() if we aren't careful.
74          * 
75          * Do not queue the inode to the flusher if we still have visibility,
76          * otherwise namespace calls such as chmod will unnecessarily generate
77          * multiple inode updates.
78          */
79         hammer_inode_unloadable_check(ip, 0);
80         if (ip->ino_data.nlinks == 0) {
81                 if (ip->flags & HAMMER_INODE_MODMASK)
82                         hammer_flush_inode(ip, 0);
83                 else
84                         vrecycle(ap->a_vp);
85         }
86         return(0);
87 }
88
89 /*
90  * Release the vnode association.  This is typically (but not always)
91  * the last reference on the inode.
92  *
93  * Once the association is lost we are on our own with regards to
94  * flushing the inode.
95  */
96 int
97 hammer_vop_reclaim(struct vop_reclaim_args *ap)
98 {
99         struct hammer_inode *ip;
100         struct vnode *vp;
101
102         vp = ap->a_vp;
103
104         if ((ip = vp->v_data) != NULL) {
105                 vp->v_data = NULL;
106                 ip->vp = NULL;
107                 hammer_rel_inode(ip, 1);
108         }
109         return(0);
110 }
111
112 /*
113  * Return a locked vnode for the specified inode.  The inode must be
114  * referenced but NOT LOCKED on entry and will remain referenced on
115  * return.
116  *
117  * Called from the frontend.
118  */
119 int
120 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
121 {
122         struct vnode *vp;
123         int error = 0;
124
125         for (;;) {
126                 if ((vp = ip->vp) == NULL) {
127                         error = getnewvnode(VT_HAMMER, ip->hmp->mp, vpp, 0, 0);
128                         if (error)
129                                 break;
130                         hammer_lock_ex(&ip->lock);
131                         if (ip->vp != NULL) {
132                                 hammer_unlock(&ip->lock);
133                                 vp->v_type = VBAD;
134                                 vx_put(vp);
135                                 continue;
136                         }
137                         hammer_ref(&ip->lock);
138                         vp = *vpp;
139                         ip->vp = vp;
140                         vp->v_type =
141                                 hammer_get_vnode_type(ip->ino_data.obj_type);
142
143                         switch(ip->ino_data.obj_type) {
144                         case HAMMER_OBJTYPE_CDEV:
145                         case HAMMER_OBJTYPE_BDEV:
146                                 vp->v_ops = &ip->hmp->mp->mnt_vn_spec_ops;
147                                 addaliasu(vp, ip->ino_data.rmajor,
148                                           ip->ino_data.rminor);
149                                 break;
150                         case HAMMER_OBJTYPE_FIFO:
151                                 vp->v_ops = &ip->hmp->mp->mnt_vn_fifo_ops;
152                                 break;
153                         default:
154                                 break;
155                         }
156
157                         /*
158                          * Only mark as the root vnode if the ip is not
159                          * historical, otherwise the VFS cache will get
160                          * confused.  The other half of the special handling
161                          * is in hammer_vop_nlookupdotdot().
162                          */
163                         if (ip->obj_id == HAMMER_OBJID_ROOT &&
164                             ip->obj_asof == ip->hmp->asof) {
165                                 vp->v_flag |= VROOT;
166                         }
167
168                         vp->v_data = (void *)ip;
169                         /* vnode locked by getnewvnode() */
170                         /* make related vnode dirty if inode dirty? */
171                         hammer_unlock(&ip->lock);
172                         if (vp->v_type == VREG)
173                                 vinitvmio(vp, ip->ino_data.size);
174                         break;
175                 }
176
177                 /*
178                  * loop if the vget fails (aka races), or if the vp
179                  * no longer matches ip->vp.
180                  */
181                 if (vget(vp, LK_EXCLUSIVE) == 0) {
182                         if (vp == ip->vp)
183                                 break;
184                         vput(vp);
185                 }
186         }
187         *vpp = vp;
188         return(error);
189 }
190
191 /*
192  * Acquire a HAMMER inode.  The returned inode is not locked.  These functions
193  * do not attach or detach the related vnode (use hammer_get_vnode() for
194  * that).
195  *
196  * The flags argument is only applied for newly created inodes, and only
197  * certain flags are inherited.
198  *
199  * Called from the frontend.
200  */
201 struct hammer_inode *
202 hammer_get_inode(hammer_transaction_t trans, struct hammer_node **cache,
203                  u_int64_t obj_id, hammer_tid_t asof, int flags, int *errorp)
204 {
205         hammer_mount_t hmp = trans->hmp;
206         struct hammer_inode_info iinfo;
207         struct hammer_cursor cursor;
208         struct hammer_inode *ip;
209
210         /*
211          * Determine if we already have an inode cached.  If we do then
212          * we are golden.
213          */
214         iinfo.obj_id = obj_id;
215         iinfo.obj_asof = asof;
216 loop:
217         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
218         if (ip) {
219                 hammer_ref(&ip->lock);
220                 *errorp = 0;
221                 return(ip);
222         }
223
224         ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
225         ++hammer_count_inodes;
226         ip->obj_id = obj_id;
227         ip->obj_asof = iinfo.obj_asof;
228         ip->hmp = hmp;
229         ip->flags = flags & HAMMER_INODE_RO;
230         if (hmp->ronly)
231                 ip->flags |= HAMMER_INODE_RO;
232         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
233         RB_INIT(&ip->rec_tree);
234         TAILQ_INIT(&ip->target_list);
235
236         /*
237          * Locate the on-disk inode.
238          */
239 retry:
240         hammer_init_cursor(trans, &cursor, cache, NULL);
241         cursor.key_beg.localization = HAMMER_LOCALIZE_INODE;
242         cursor.key_beg.obj_id = ip->obj_id;
243         cursor.key_beg.key = 0;
244         cursor.key_beg.create_tid = 0;
245         cursor.key_beg.delete_tid = 0;
246         cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
247         cursor.key_beg.obj_type = 0;
248         cursor.asof = iinfo.obj_asof;
249         cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
250                        HAMMER_CURSOR_ASOF;
251
252         *errorp = hammer_btree_lookup(&cursor);
253         if (*errorp == EDEADLK) {
254                 hammer_done_cursor(&cursor);
255                 goto retry;
256         }
257
258         /*
259          * On success the B-Tree lookup will hold the appropriate
260          * buffer cache buffers and provide a pointer to the requested
261          * information.  Copy the information to the in-memory inode
262          * and cache the B-Tree node to improve future operations.
263          */
264         if (*errorp == 0) {
265                 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
266                 ip->ino_data = cursor.data->inode;
267                 hammer_cache_node(cursor.node, &ip->cache[0]);
268                 if (cache)
269                         hammer_cache_node(cursor.node, cache);
270         }
271
272         /*
273          * On success load the inode's record and data and insert the
274          * inode into the B-Tree.  It is possible to race another lookup
275          * insertion of the same inode so deal with that condition too.
276          *
277          * The cursor's locked node interlocks against others creating and
278          * destroying ip while we were blocked.
279          */
280         if (*errorp == 0) {
281                 hammer_ref(&ip->lock);
282                 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
283                         hammer_uncache_node(&ip->cache[0]);
284                         hammer_uncache_node(&ip->cache[1]);
285                         KKASSERT(ip->lock.refs == 1);
286                         --hammer_count_inodes;
287                         kfree(ip, M_HAMMER);
288                         hammer_done_cursor(&cursor);
289                         goto loop;
290                 }
291                 ip->flags |= HAMMER_INODE_ONDISK;
292         } else {
293                 /*
294                  * Do not panic on read-only accesses which fail, particularly
295                  * historical accesses where the snapshot might not have
296                  * complete connectivity.
297                  */
298                 if ((flags & HAMMER_INODE_RO) == 0) {
299                         kprintf("hammer_get_inode: failed ip %p obj_id %016llx cursor %p error %d\n",
300                                 ip, ip->obj_id, &cursor, *errorp);
301                         Debugger("x");
302                 }
303                 if (ip->flags & HAMMER_INODE_RSV_INODES) {
304                         ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
305                         --ip->hmp->rsv_inodes;
306                 }
307                 ip->hmp->rsv_databufs -= ip->rsv_databufs;
308                 ip->rsv_databufs = 0;                          /* sanity */
309
310                 --hammer_count_inodes;
311                 kfree(ip, M_HAMMER);
312                 ip = NULL;
313         }
314         hammer_done_cursor(&cursor);
315         return (ip);
316 }
317
318 /*
319  * Create a new filesystem object, returning the inode in *ipp.  The
320  * returned inode will be referenced.
321  *
322  * The inode is created in-memory.
323  */
324 int
325 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
326                     struct ucred *cred, hammer_inode_t dip,
327                     struct hammer_inode **ipp)
328 {
329         hammer_mount_t hmp;
330         hammer_inode_t ip;
331         uid_t xuid;
332
333         hmp = trans->hmp;
334         ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
335         ++hammer_count_inodes;
336         ip->obj_id = hammer_alloc_objid(trans, dip);
337         KKASSERT(ip->obj_id != 0);
338         ip->obj_asof = hmp->asof;
339         ip->hmp = hmp;
340         ip->flush_state = HAMMER_FST_IDLE;
341         ip->flags = HAMMER_INODE_DDIRTY | HAMMER_INODE_ITIMES;
342
343         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
344         RB_INIT(&ip->rec_tree);
345         TAILQ_INIT(&ip->target_list);
346
347         ip->ino_leaf.atime = trans->time;
348         ip->ino_data.mtime = trans->time;
349         ip->ino_data.size = 0;
350         ip->ino_data.nlinks = 0;
351
352         /*
353          * A nohistory designator on the parent directory is inherited by
354          * the child.
355          */
356         ip->ino_data.uflags = dip->ino_data.uflags &
357                               (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
358
359         ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
360         ip->ino_leaf.base.localization = HAMMER_LOCALIZE_INODE;
361         ip->ino_leaf.base.obj_id = ip->obj_id;
362         ip->ino_leaf.base.key = 0;
363         ip->ino_leaf.base.create_tid = 0;
364         ip->ino_leaf.base.delete_tid = 0;
365         ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
366         ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
367
368         ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
369         ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
370         ip->ino_data.mode = vap->va_mode;
371         ip->ino_data.ctime = trans->time;
372         ip->ino_data.parent_obj_id = (dip) ? dip->ino_leaf.base.obj_id : 0;
373
374         switch(ip->ino_leaf.base.obj_type) {
375         case HAMMER_OBJTYPE_CDEV:
376         case HAMMER_OBJTYPE_BDEV:
377                 ip->ino_data.rmajor = vap->va_rmajor;
378                 ip->ino_data.rminor = vap->va_rminor;
379                 break;
380         default:
381                 break;
382         }
383
384         /*
385          * Calculate default uid/gid and overwrite with information from
386          * the vap.
387          */
388         xuid = hammer_to_unix_xid(&dip->ino_data.uid);
389         xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode, xuid, cred,
390                                      &vap->va_mode);
391         ip->ino_data.mode = vap->va_mode;
392
393         if (vap->va_vaflags & VA_UID_UUID_VALID)
394                 ip->ino_data.uid = vap->va_uid_uuid;
395         else if (vap->va_uid != (uid_t)VNOVAL)
396                 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
397         else
398                 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
399
400         if (vap->va_vaflags & VA_GID_UUID_VALID)
401                 ip->ino_data.gid = vap->va_gid_uuid;
402         else if (vap->va_gid != (gid_t)VNOVAL)
403                 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
404         else
405                 ip->ino_data.gid = dip->ino_data.gid;
406
407         hammer_ref(&ip->lock);
408         if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
409                 hammer_unref(&ip->lock);
410                 panic("hammer_create_inode: duplicate obj_id %llx", ip->obj_id);
411         }
412         *ipp = ip;
413         return(0);
414 }
415
416 /*
417  * Called by hammer_sync_inode().
418  */
419 static int
420 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
421 {
422         hammer_transaction_t trans = cursor->trans;
423         hammer_record_t record;
424         int error;
425
426 retry:
427         error = 0;
428
429         /*
430          * If the inode has a presence on-disk then locate it and mark
431          * it deleted, setting DELONDISK.
432          *
433          * The record may or may not be physically deleted, depending on
434          * the retention policy.
435          */
436         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
437             HAMMER_INODE_ONDISK) {
438                 hammer_normalize_cursor(cursor);
439                 cursor->key_beg.localization = HAMMER_LOCALIZE_INODE;
440                 cursor->key_beg.obj_id = ip->obj_id;
441                 cursor->key_beg.key = 0;
442                 cursor->key_beg.create_tid = 0;
443                 cursor->key_beg.delete_tid = 0;
444                 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
445                 cursor->key_beg.obj_type = 0;
446                 cursor->asof = ip->obj_asof;
447                 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
448                 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
449                 cursor->flags |= HAMMER_CURSOR_BACKEND;
450
451                 error = hammer_btree_lookup(cursor);
452                 if (hammer_debug_inode)
453                         kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
454                 if (error) {
455                         kprintf("error %d\n", error);
456                         Debugger("hammer_update_inode");
457                 }
458
459                 if (error == 0) {
460                         error = hammer_ip_delete_record(cursor, ip, trans->tid);
461                         if (hammer_debug_inode)
462                                 kprintf(" error %d\n", error);
463                         if (error && error != EDEADLK) {
464                                 kprintf("error %d\n", error);
465                                 Debugger("hammer_update_inode2");
466                         }
467                         if (error == 0) {
468                                 ip->flags |= HAMMER_INODE_DELONDISK;
469                         }
470                         if (cursor->node)
471                                 hammer_cache_node(cursor->node, &ip->cache[0]);
472                 }
473                 if (error == EDEADLK) {
474                         hammer_done_cursor(cursor);
475                         error = hammer_init_cursor(trans, cursor,
476                                                    &ip->cache[0], ip);
477                         if (hammer_debug_inode)
478                                 kprintf("IPDED %p %d\n", ip, error);
479                         if (error == 0)
480                                 goto retry;
481                 }
482         }
483
484         /*
485          * Ok, write out the initial record or a new record (after deleting
486          * the old one), unless the DELETED flag is set.  This routine will
487          * clear DELONDISK if it writes out a record.
488          *
489          * Update our inode statistics if this is the first application of
490          * the inode on-disk.
491          */
492         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
493                 /*
494                  * Generate a record and write it to the media
495                  */
496                 record = hammer_alloc_mem_record(ip, 0);
497                 record->type = HAMMER_MEM_RECORD_INODE;
498                 record->flush_state = HAMMER_FST_FLUSH;
499                 record->leaf = ip->sync_ino_leaf;
500                 record->leaf.base.create_tid = trans->tid;
501                 record->leaf.data_len = sizeof(ip->sync_ino_data);
502                 record->data = (void *)&ip->sync_ino_data;
503                 record->flags |= HAMMER_RECF_INTERLOCK_BE;
504                 for (;;) {
505                         error = hammer_ip_sync_record_cursor(cursor, record);
506                         if (hammer_debug_inode)
507                                 kprintf("GENREC %p rec %08x %d\n",      
508                                         ip, record->flags, error);
509                         if (error != EDEADLK)
510                                 break;
511                         hammer_done_cursor(cursor);
512                         error = hammer_init_cursor(trans, cursor,
513                                                    &ip->cache[0], ip);
514                         if (hammer_debug_inode)
515                                 kprintf("GENREC reinit %d\n", error);
516                         if (error)
517                                 break;
518                 }
519                 if (error) {
520                         kprintf("error %d\n", error);
521                         Debugger("hammer_update_inode3");
522                 }
523
524                 /*
525                  * The record isn't managed by the inode's record tree,
526                  * destroy it whether we succeed or fail.
527                  */
528                 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
529                 record->flags |= HAMMER_RECF_DELETED_FE;
530                 record->flush_state = HAMMER_FST_IDLE;
531                 hammer_rel_mem_record(record);
532
533                 /*
534                  * Finish up.
535                  */
536                 if (error == 0) {
537                         if (hammer_debug_inode)
538                                 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
539                         ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
540                                             HAMMER_INODE_ITIMES);
541                         ip->flags &= ~HAMMER_INODE_DELONDISK;
542
543                         /*
544                          * Root volume count of inodes
545                          */
546                         if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
547                                 hammer_modify_volume_field(trans,
548                                                            trans->rootvol,
549                                                            vol0_stat_inodes);
550                                 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
551                                 hammer_modify_volume_done(trans->rootvol);
552                                 ip->flags |= HAMMER_INODE_ONDISK;
553                                 if (hammer_debug_inode)
554                                         kprintf("NOWONDISK %p\n", ip);
555                         }
556                 }
557         }
558
559         /*
560          * If the inode has been destroyed, clean out any left-over flags
561          * that may have been set by the frontend.
562          */
563         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) { 
564                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
565                                     HAMMER_INODE_ITIMES);
566         }
567         return(error);
568 }
569
570 /*
571  * Update only the itimes fields.  This is done no-historically.  The
572  * record is updated in-place on the disk.
573  */
574 static int
575 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
576 {
577         hammer_transaction_t trans = cursor->trans;
578         struct hammer_btree_leaf_elm *leaf;
579         int error;
580
581 retry:
582         error = 0;
583         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
584             HAMMER_INODE_ONDISK) {
585                 hammer_normalize_cursor(cursor);
586                 cursor->key_beg.localization = HAMMER_LOCALIZE_INODE;
587                 cursor->key_beg.obj_id = ip->obj_id;
588                 cursor->key_beg.key = 0;
589                 cursor->key_beg.create_tid = 0;
590                 cursor->key_beg.delete_tid = 0;
591                 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
592                 cursor->key_beg.obj_type = 0;
593                 cursor->asof = ip->obj_asof;
594                 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
595                 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
596                 cursor->flags |= HAMMER_CURSOR_BACKEND;
597
598                 error = hammer_btree_lookup(cursor);
599                 if (error) {
600                         kprintf("error %d\n", error);
601                         Debugger("hammer_update_itimes1");
602                 }
603                 if (error == 0) {
604                         /*
605                          * Do not generate UNDO records for atime updates.
606                          */
607                         leaf = cursor->leaf;
608                         hammer_modify_node(trans, cursor->node, 
609                                            &leaf->atime, sizeof(leaf->atime));
610                         leaf->atime = ip->sync_ino_leaf.atime;
611                         hammer_modify_node_done(cursor->node);
612                         /*rec->ino_mtime = ip->sync_ino_rec.ino_mtime;*/
613                         ip->sync_flags &= ~HAMMER_INODE_ITIMES;
614                         /* XXX recalculate crc */
615                         hammer_cache_node(cursor->node, &ip->cache[0]);
616                 }
617                 if (error == EDEADLK) {
618                         hammer_done_cursor(cursor);
619                         error = hammer_init_cursor(trans, cursor,
620                                                    &ip->cache[0], ip);
621                         if (error == 0)
622                                 goto retry;
623                 }
624         }
625         return(error);
626 }
627
628 /*
629  * Release a reference on an inode, flush as requested.
630  *
631  * On the last reference we queue the inode to the flusher for its final
632  * disposition.
633  */
634 void
635 hammer_rel_inode(struct hammer_inode *ip, int flush)
636 {
637         hammer_mount_t hmp = ip->hmp;
638
639         /*
640          * Handle disposition when dropping the last ref.
641          */
642         for (;;) {
643                 if (ip->lock.refs == 1) {
644                         /*
645                          * Determine whether on-disk action is needed for
646                          * the inode's final disposition.
647                          */
648                         KKASSERT(ip->vp == NULL);
649                         hammer_inode_unloadable_check(ip, 0);
650                         if (ip->flags & HAMMER_INODE_MODMASK) {
651                                 if (hmp->rsv_inodes > desiredvnodes) {
652                                         hammer_flush_inode(ip,
653                                                            HAMMER_FLUSH_SIGNAL);
654                                 } else {
655                                         hammer_flush_inode(ip, 0);
656                                 }
657                         } else if (ip->lock.refs == 1) {
658                                 hammer_unload_inode(ip);
659                                 break;
660                         }
661                 } else {
662                         if (flush)
663                                 hammer_flush_inode(ip, 0);
664
665                         /*
666                          * The inode still has multiple refs, try to drop
667                          * one ref.
668                          */
669                         KKASSERT(ip->lock.refs >= 1);
670                         if (ip->lock.refs > 1) {
671                                 hammer_unref(&ip->lock);
672                                 break;
673                         }
674                 }
675         }
676 }
677
678 /*
679  * Unload and destroy the specified inode.  Must be called with one remaining
680  * reference.  The reference is disposed of.
681  *
682  * This can only be called in the context of the flusher.
683  */
684 static int
685 hammer_unload_inode(struct hammer_inode *ip)
686 {
687         KASSERT(ip->lock.refs == 1,
688                 ("hammer_unload_inode: %d refs\n", ip->lock.refs));
689         KKASSERT(ip->vp == NULL);
690         KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
691         KKASSERT(ip->cursor_ip_refs == 0);
692         KKASSERT(ip->lock.lockcount == 0);
693         KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
694
695         KKASSERT(RB_EMPTY(&ip->rec_tree));
696         KKASSERT(TAILQ_EMPTY(&ip->target_list));
697
698         RB_REMOVE(hammer_ino_rb_tree, &ip->hmp->rb_inos_root, ip);
699
700         hammer_uncache_node(&ip->cache[0]);
701         hammer_uncache_node(&ip->cache[1]);
702         if (ip->objid_cache)
703                 hammer_clear_objid(ip);
704         --hammer_count_inodes;
705         kfree(ip, M_HAMMER);
706
707         return(0);
708 }
709
710 /*
711  * Called on mount -u when switching from RW to RO or vise-versa.  Adjust
712  * the read-only flag for cached inodes.
713  *
714  * This routine is called from a RB_SCAN().
715  */
716 int
717 hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
718 {
719         hammer_mount_t hmp = ip->hmp;
720
721         if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
722                 ip->flags |= HAMMER_INODE_RO;
723         else
724                 ip->flags &= ~HAMMER_INODE_RO;
725         return(0);
726 }
727
728 /*
729  * A transaction has modified an inode, requiring updates as specified by
730  * the passed flags.
731  *
732  * HAMMER_INODE_DDIRTY: Inode data has been updated
733  * HAMMER_INODE_XDIRTY: Dirty in-memory records
734  * HAMMER_INODE_BUFS:   Dirty buffer cache buffers
735  * HAMMER_INODE_DELETED: Inode record/data must be deleted
736  * HAMMER_INODE_ITIMES: mtime/atime has been updated
737  */
738 void
739 hammer_modify_inode(hammer_inode_t ip, int flags)
740 {
741         KKASSERT ((ip->flags & HAMMER_INODE_RO) == 0 ||
742                   (flags & (HAMMER_INODE_DDIRTY |
743                             HAMMER_INODE_XDIRTY | HAMMER_INODE_BUFS |
744                             HAMMER_INODE_DELETED | HAMMER_INODE_ITIMES)) == 0);
745         if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
746                 ip->flags |= HAMMER_INODE_RSV_INODES;
747                 ++ip->hmp->rsv_inodes;
748         }
749
750         ip->flags |= flags;
751 }
752
753 /*
754  * Request that an inode be flushed.  This whole mess cannot block and may
755  * recurse.  Once requested HAMMER will attempt to actively flush it until
756  * the flush can be done.
757  *
758  * The inode may already be flushing, or may be in a setup state.  We can
759  * place the inode in a flushing state if it is currently idle and flag it
760  * to reflush if it is currently flushing.
761  */
762 void
763 hammer_flush_inode(hammer_inode_t ip, int flags)
764 {
765         hammer_record_t depend;
766         int r, good;
767
768         /*
769          * Trivial 'nothing to flush' case.  If the inode is ina SETUP
770          * state we have to put it back into an IDLE state so we can
771          * drop the extra ref.
772          */
773         if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
774                 if (ip->flush_state == HAMMER_FST_SETUP) {
775                         ip->flush_state = HAMMER_FST_IDLE;
776                         hammer_rel_inode(ip, 0);
777                 }
778                 return;
779         }
780
781         /*
782          * Our flush action will depend on the current state.
783          */
784         switch(ip->flush_state) {
785         case HAMMER_FST_IDLE:
786                 /*
787                  * We have no dependancies and can flush immediately.  Some
788                  * our children may not be flushable so we have to re-test
789                  * with that additional knowledge.
790                  */
791                 hammer_flush_inode_core(ip, flags);
792                 break;
793         case HAMMER_FST_SETUP:
794                 /*
795                  * Recurse upwards through dependancies via target_list
796                  * and start their flusher actions going if possible.
797                  *
798                  * 'good' is our connectivity.  -1 means we have none and
799                  * can't flush, 0 means there weren't any dependancies, and
800                  * 1 means we have good connectivity.
801                  */
802                 good = 0;
803                 TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
804                         r = hammer_setup_parent_inodes(depend);
805                         if (r < 0 && good == 0)
806                                 good = -1;
807                         if (r > 0)
808                                 good = 1;
809                 }
810
811                 /*
812                  * We can continue if good >= 0.  Determine how many records
813                  * under our inode can be flushed (and mark them).
814                  */
815                 if (good >= 0) {
816                         hammer_flush_inode_core(ip, flags);
817                 } else {
818                         ip->flags |= HAMMER_INODE_REFLUSH;
819                         if (flags & HAMMER_FLUSH_SIGNAL) {
820                                 ip->flags |= HAMMER_INODE_RESIGNAL;
821                                 hammer_flusher_async(ip->hmp);
822                         }
823                 }
824                 break;
825         default:
826                 /*
827                  * We are already flushing, flag the inode to reflush
828                  * if needed after it completes its current flush.
829                  */
830                 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
831                         ip->flags |= HAMMER_INODE_REFLUSH;
832                 if (flags & HAMMER_FLUSH_SIGNAL) {
833                         ip->flags |= HAMMER_INODE_RESIGNAL;
834                         hammer_flusher_async(ip->hmp);
835                 }
836                 break;
837         }
838 }
839
840 /*
841  * We are asked to recurse upwards and convert the record from SETUP
842  * to FLUSH if possible.  record->ip is a parent of the caller's inode,
843  * and record->target_ip is the caller's inode.
844  *
845  * Return 1 if the record gives us connectivity
846  *
847  * Return 0 if the record is not relevant 
848  *
849  * Return -1 if we can't resolve the dependancy and there is no connectivity.
850  */
851 static int
852 hammer_setup_parent_inodes(hammer_record_t record)
853 {
854         hammer_mount_t hmp = record->ip->hmp;
855         hammer_record_t depend;
856         hammer_inode_t ip;
857         int r, good;
858
859         KKASSERT(record->flush_state != HAMMER_FST_IDLE);
860         ip = record->ip;
861
862         /*
863          * If the record is already flushing, is it in our flush group?
864          *
865          * If it is in our flush group but it is a general record or a 
866          * delete-on-disk, it does not improve our connectivity (return 0),
867          * and if the target inode is not trying to destroy itself we can't
868          * allow the operation yet anyway (the second return -1).
869          */
870         if (record->flush_state == HAMMER_FST_FLUSH) {
871                 if (record->flush_group != hmp->flusher_next) {
872                         ip->flags |= HAMMER_INODE_REFLUSH;
873                         return(-1);
874                 }
875                 if (record->type == HAMMER_MEM_RECORD_ADD)
876                         return(1);
877                 /* GENERAL or DEL */
878                 return(0);
879         }
880
881         /*
882          * It must be a setup record.  Try to resolve the setup dependancies
883          * by recursing upwards so we can place ip on the flush list.
884          */
885         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
886
887         good = 0;
888         TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
889                 r = hammer_setup_parent_inodes(depend);
890                 if (r < 0 && good == 0)
891                         good = -1;
892                 if (r > 0)
893                         good = 1;
894         }
895
896         /*
897          * We can't flush ip because it has no connectivity (XXX also check
898          * nlinks for pre-existing connectivity!).  Flag it so any resolution
899          * recurses back down.
900          */
901         if (good < 0) {
902                 ip->flags |= HAMMER_INODE_REFLUSH;
903                 return(good);
904         }
905
906         /*
907          * We are go, place the parent inode in a flushing state so we can
908          * place its record in a flushing state.  Note that the parent
909          * may already be flushing.  The record must be in the same flush
910          * group as the parent.
911          */
912         if (ip->flush_state != HAMMER_FST_FLUSH)
913                 hammer_flush_inode_core(ip, HAMMER_FLUSH_RECURSION);
914         KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
915         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
916
917 #if 0
918         if (record->type == HAMMER_MEM_RECORD_DEL &&
919             (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
920                 /*
921                  * Regardless of flushing state we cannot sync this path if the
922                  * record represents a delete-on-disk but the target inode
923                  * is not ready to sync its own deletion.
924                  *
925                  * XXX need to count effective nlinks to determine whether
926                  * the flush is ok, otherwise removing a hardlink will
927                  * just leave the DEL record to rot.
928                  */
929                 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
930                 return(-1);
931         } else
932 #endif
933         if (ip->flush_group == ip->hmp->flusher_next) {
934                 /*
935                  * This is the record we wanted to synchronize.
936                  */
937                 record->flush_state = HAMMER_FST_FLUSH;
938                 record->flush_group = ip->flush_group;
939                 hammer_ref(&record->lock);
940                 if (record->type == HAMMER_MEM_RECORD_ADD)
941                         return(1);
942
943                 /*
944                  * A general or delete-on-disk record does not contribute
945                  * to our visibility.  We can still flush it, however.
946                  */
947                 return(0);
948         } else {
949                 /*
950                  * We couldn't resolve the dependancies, request that the
951                  * inode be flushed when the dependancies can be resolved.
952                  */
953                 ip->flags |= HAMMER_INODE_REFLUSH;
954                 return(-1);
955         }
956 }
957
958 /*
959  * This is the core routine placing an inode into the FST_FLUSH state.
960  */
961 static void
962 hammer_flush_inode_core(hammer_inode_t ip, int flags)
963 {
964         int go_count;
965
966         /*
967          * Set flush state and prevent the flusher from cycling into
968          * the next flush group.  Do not place the ip on the list yet.
969          * Inodes not in the idle state get an extra reference.
970          */
971         KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
972         if (ip->flush_state == HAMMER_FST_IDLE)
973                 hammer_ref(&ip->lock);
974         ip->flush_state = HAMMER_FST_FLUSH;
975         ip->flush_group = ip->hmp->flusher_next;
976         ++ip->hmp->flusher_lock;
977
978         /*
979          * We need to be able to vfsync/truncate from the backend.
980          */
981         KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
982         if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
983                 ip->flags |= HAMMER_INODE_VHELD;
984                 vref(ip->vp);
985         }
986
987         /*
988          * Figure out how many in-memory records we can actually flush
989          * (not including inode meta-data, buffers, etc).
990          */
991         if (flags & HAMMER_FLUSH_RECURSION) {
992                 go_count = 1;
993         } else {
994                 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
995                                    hammer_setup_child_callback, NULL);
996         }
997
998         /*
999          * This is a more involved test that includes go_count.  If we
1000          * can't flush, flag the inode and return.  If go_count is 0 we
1001          * were are unable to flush any records in our rec_tree and
1002          * must ignore the XDIRTY flag.
1003          */
1004         if (go_count == 0) {
1005                 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
1006                         ip->flags |= HAMMER_INODE_REFLUSH;
1007                         ip->flush_state = HAMMER_FST_SETUP;
1008                         if (ip->flags & HAMMER_INODE_VHELD) {
1009                                 ip->flags &= ~HAMMER_INODE_VHELD;
1010                                 vrele(ip->vp);
1011                         }
1012                         if (flags & HAMMER_FLUSH_SIGNAL) {
1013                                 ip->flags |= HAMMER_INODE_RESIGNAL;
1014                                 hammer_flusher_async(ip->hmp);
1015                         }
1016                         if (--ip->hmp->flusher_lock == 0)
1017                                 wakeup(&ip->hmp->flusher_lock);
1018                         return;
1019                 }
1020         }
1021
1022         /*
1023          * Snapshot the state of the inode for the backend flusher.
1024          *
1025          * The truncation must be retained in the frontend until after
1026          * we've actually performed the record deletion.
1027          *
1028          * NOTE: The DELETING flag is a mod flag, but it is also sticky,
1029          * and stays in ip->flags.  Once set, it stays set until the
1030          * inode is destroyed.
1031          */
1032         ip->sync_flags = (ip->flags & HAMMER_INODE_MODMASK);
1033         ip->sync_trunc_off = ip->trunc_off;
1034         ip->sync_ino_leaf = ip->ino_leaf;
1035         ip->sync_ino_data = ip->ino_data;
1036         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
1037         ip->flags &= ~HAMMER_INODE_MODMASK;
1038 #ifdef DEBUG_TRUNCATE
1039         if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp)
1040                 kprintf("truncateS %016llx\n", ip->sync_trunc_off);
1041 #endif
1042
1043         /*
1044          * The flusher list inherits our inode and reference.
1045          */
1046         TAILQ_INSERT_TAIL(&ip->hmp->flush_list, ip, flush_entry);
1047         if (--ip->hmp->flusher_lock == 0)
1048                 wakeup(&ip->hmp->flusher_lock);
1049
1050         if (flags & HAMMER_FLUSH_SIGNAL) {
1051                 hammer_flusher_async(ip->hmp);
1052         }
1053 }
1054
1055 /*
1056  * Callback for scan of ip->rec_tree.  Try to include each record in our
1057  * flush.  ip->flush_group has been set but the inode has not yet been
1058  * moved into a flushing state.
1059  *
1060  * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
1061  * both inodes.
1062  *
1063  * We return 1 for any record placed or found in FST_FLUSH, which prevents
1064  * the caller from shortcutting the flush.
1065  */
1066 static int
1067 hammer_setup_child_callback(hammer_record_t rec, void *data)
1068 {
1069         hammer_inode_t target_ip;
1070         hammer_inode_t ip;
1071         int r;
1072
1073         /*
1074          * If the record has been deleted by the backend (it's being held
1075          * by the frontend in a race), just ignore it.
1076          */
1077         if (rec->flags & HAMMER_RECF_DELETED_BE)
1078                 return(0);
1079
1080         /*
1081          * If the record is in an idle state it has no dependancies and
1082          * can be flushed.
1083          */
1084         ip = rec->ip;
1085         r = 0;
1086
1087         switch(rec->flush_state) {
1088         case HAMMER_FST_IDLE:
1089                 /*
1090                  * Record has no setup dependancy, we can flush it.
1091                  */
1092                 KKASSERT(rec->target_ip == NULL);
1093                 rec->flush_state = HAMMER_FST_FLUSH;
1094                 rec->flush_group = ip->flush_group;
1095                 hammer_ref(&rec->lock);
1096                 r = 1;
1097                 break;
1098         case HAMMER_FST_SETUP:
1099                 /*
1100                  * Record has a setup dependancy.  Try to include the
1101                  * target ip in the flush. 
1102                  *
1103                  * We have to be careful here, if we do not do the right
1104                  * thing we can lose track of dirty inodes and the system
1105                  * will lockup trying to allocate buffers.
1106                  */
1107                 target_ip = rec->target_ip;
1108                 KKASSERT(target_ip != NULL);
1109                 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
1110                 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
1111                         /*
1112                          * If the target IP is already flushing in our group
1113                          * we are golden, otherwise make sure the target
1114                          * reflushes.
1115                          */
1116                         if (target_ip->flush_group == ip->flush_group) {
1117                                 rec->flush_state = HAMMER_FST_FLUSH;
1118                                 rec->flush_group = ip->flush_group;
1119                                 hammer_ref(&rec->lock);
1120                                 r = 1;
1121                         } else {
1122                                 target_ip->flags |= HAMMER_INODE_REFLUSH;
1123                         }
1124                 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
1125                         /*
1126                          * If the target IP is not flushing we can force
1127                          * it to flush, even if it is unable to write out
1128                          * any of its own records we have at least one in
1129                          * hand that we CAN deal with.
1130                          */
1131                         rec->flush_state = HAMMER_FST_FLUSH;
1132                         rec->flush_group = ip->flush_group;
1133                         hammer_ref(&rec->lock);
1134                         hammer_flush_inode_core(target_ip,
1135                                                 HAMMER_FLUSH_RECURSION);
1136                         r = 1;
1137                 } else {
1138                         /*
1139                          * General or delete-on-disk record.
1140                          *
1141                          * XXX this needs help.  If a delete-on-disk we could
1142                          * disconnect the target.  If the target has its own
1143                          * dependancies they really need to be flushed.
1144                          *
1145                          * XXX
1146                          */
1147                         rec->flush_state = HAMMER_FST_FLUSH;
1148                         rec->flush_group = ip->flush_group;
1149                         hammer_ref(&rec->lock);
1150                         hammer_flush_inode_core(target_ip,
1151                                                 HAMMER_FLUSH_RECURSION);
1152                         r = 1;
1153                 }
1154                 break;
1155         case HAMMER_FST_FLUSH:
1156                 /* 
1157                  * Record already associated with a flush group.  It had
1158                  * better be ours.
1159                  */
1160                 KKASSERT(rec->flush_group == ip->flush_group);
1161                 r = 1;
1162                 break;
1163         }
1164         return(r);
1165 }
1166
1167 /*
1168  * Wait for a previously queued flush to complete
1169  */
1170 void
1171 hammer_wait_inode(hammer_inode_t ip)
1172 {
1173         while (ip->flush_state != HAMMER_FST_IDLE) {
1174                 if (ip->flush_state == HAMMER_FST_SETUP) {
1175                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1176                 } else {
1177                         ip->flags |= HAMMER_INODE_FLUSHW;
1178                         tsleep(&ip->flags, 0, "hmrwin", 0);
1179                 }
1180         }
1181 }
1182
1183 /*
1184  * Called by the backend code when a flush has been completed.
1185  * The inode has already been removed from the flush list.
1186  *
1187  * A pipelined flush can occur, in which case we must re-enter the
1188  * inode on the list and re-copy its fields.
1189  */
1190 void
1191 hammer_flush_inode_done(hammer_inode_t ip)
1192 {
1193         int dorel = 0;
1194
1195         KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
1196
1197         /*
1198          * Merge left-over flags back into the frontend and fix the state.
1199          */
1200         ip->flags |= ip->sync_flags;
1201
1202         /*
1203          * The backend may have adjusted nlinks, so if the adjusted nlinks
1204          * does not match the fronttend set the frontend's RDIRTY flag again.
1205          */
1206         if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
1207                 ip->flags |= HAMMER_INODE_DDIRTY;
1208
1209         /*
1210          * Fix up the dirty buffer status.  IO completions will also
1211          * try to clean up rsv_databufs.
1212          */
1213         if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) {
1214                 ip->flags |= HAMMER_INODE_BUFS;
1215         } else {
1216                 ip->hmp->rsv_databufs -= ip->rsv_databufs;
1217                 ip->rsv_databufs = 0;
1218         }
1219
1220         /*
1221          * Re-set the XDIRTY flag if some of the inode's in-memory records
1222          * could not be flushed.
1223          */
1224         KKASSERT((RB_EMPTY(&ip->rec_tree) &&
1225                   (ip->flags & HAMMER_INODE_XDIRTY) == 0) ||
1226                  (!RB_EMPTY(&ip->rec_tree) &&
1227                   (ip->flags & HAMMER_INODE_XDIRTY) != 0));
1228
1229         /*
1230          * Do not lose track of inodes which no longer have vnode
1231          * assocations, otherwise they may never get flushed again.
1232          */
1233         if ((ip->flags & HAMMER_INODE_MODMASK) && ip->vp == NULL)
1234                 ip->flags |= HAMMER_INODE_REFLUSH;
1235
1236         /*
1237          * Adjust flush_state.  The target state (idle or setup) shouldn't
1238          * be terribly important since we will reflush if we really need
1239          * to do anything. XXX
1240          */
1241         if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
1242                 ip->flush_state = HAMMER_FST_IDLE;
1243                 dorel = 1;
1244         } else {
1245                 ip->flush_state = HAMMER_FST_SETUP;
1246         }
1247
1248         /*
1249          * Clean up the vnode ref
1250          */
1251         if (ip->flags & HAMMER_INODE_VHELD) {
1252                 ip->flags &= ~HAMMER_INODE_VHELD;
1253                 vrele(ip->vp);
1254         }
1255
1256         /*
1257          * If the frontend made more changes and requested another flush,
1258          * then try to get it running.
1259          */
1260         if (ip->flags & HAMMER_INODE_REFLUSH) {
1261                 ip->flags &= ~HAMMER_INODE_REFLUSH;
1262                 if (ip->flags & HAMMER_INODE_RESIGNAL) {
1263                         ip->flags &= ~HAMMER_INODE_RESIGNAL;
1264                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1265                 } else {
1266                         hammer_flush_inode(ip, 0);
1267                 }
1268         }
1269
1270         /*
1271          * If the inode is now clean drop the space reservation.
1272          */
1273         if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1274             (ip->flags & HAMMER_INODE_RSV_INODES)) {
1275                 ip->flags &= ~HAMMER_INODE_RSV_INODES;
1276                 --ip->hmp->rsv_inodes;
1277         }
1278
1279         /*
1280          * Finally, if the frontend is waiting for a flush to complete,
1281          * wake it up.
1282          */
1283         if (ip->flush_state != HAMMER_FST_FLUSH) {
1284                 if (ip->flags & HAMMER_INODE_FLUSHW) {
1285                         ip->flags &= ~HAMMER_INODE_FLUSHW;
1286                         wakeup(&ip->flags);
1287                 }
1288         }
1289         if (dorel)
1290                 hammer_rel_inode(ip, 0);
1291 }
1292
1293 /*
1294  * Called from hammer_sync_inode() to synchronize in-memory records
1295  * to the media.
1296  */
1297 static int
1298 hammer_sync_record_callback(hammer_record_t record, void *data)
1299 {
1300         hammer_cursor_t cursor = data;
1301         hammer_transaction_t trans = cursor->trans;
1302         int error;
1303
1304         /*
1305          * Skip records that do not belong to the current flush.
1306          */
1307         ++hammer_stats_record_iterations;
1308         if (record->flush_state != HAMMER_FST_FLUSH)
1309                 return(0);
1310
1311 #if 1
1312         if (record->flush_group != record->ip->flush_group) {
1313                 kprintf("sync_record %p ip %p bad flush group %d %d\n", record, record->ip, record->flush_group ,record->ip->flush_group);
1314                 Debugger("blah2");
1315                 return(0);
1316         }
1317 #endif
1318         KKASSERT(record->flush_group == record->ip->flush_group);
1319
1320         /*
1321          * Interlock the record using the BE flag.  Once BE is set the
1322          * frontend cannot change the state of FE.
1323          *
1324          * NOTE: If FE is set prior to us setting BE we still sync the
1325          * record out, but the flush completion code converts it to 
1326          * a delete-on-disk record instead of destroying it.
1327          */
1328         KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
1329         record->flags |= HAMMER_RECF_INTERLOCK_BE;
1330
1331         /*
1332          * The backend may have already disposed of the record.
1333          */
1334         if (record->flags & HAMMER_RECF_DELETED_BE) {
1335                 error = 0;
1336                 goto done;
1337         }
1338
1339         /*
1340          * If the whole inode is being deleting all on-disk records will
1341          * be deleted very soon, we can't sync any new records to disk
1342          * because they will be deleted in the same transaction they were
1343          * created in (delete_tid == create_tid), which will assert.
1344          *
1345          * XXX There may be a case with RECORD_ADD with DELETED_FE set
1346          * that we currently panic on.
1347          */
1348         if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
1349                 switch(record->type) {
1350                 case HAMMER_MEM_RECORD_DATA:
1351                         /*
1352                          * We don't have to do anything, if the record was
1353                          * committed the space will have been accounted for
1354                          * in the blockmap.
1355                          */
1356                         /* fall through */
1357                 case HAMMER_MEM_RECORD_GENERAL:
1358                         record->flags |= HAMMER_RECF_DELETED_FE;
1359                         record->flags |= HAMMER_RECF_DELETED_BE;
1360                         error = 0;
1361                         goto done;
1362                 case HAMMER_MEM_RECORD_ADD:
1363                         panic("hammer_sync_record_callback: illegal add "
1364                               "during inode deletion record %p", record);
1365                         break; /* NOT REACHED */
1366                 case HAMMER_MEM_RECORD_INODE:
1367                         panic("hammer_sync_record_callback: attempt to "
1368                               "sync inode record %p?", record);
1369                         break; /* NOT REACHED */
1370                 case HAMMER_MEM_RECORD_DEL:
1371                         /* 
1372                          * Follow through and issue the on-disk deletion
1373                          */
1374                         break;
1375                 }
1376         }
1377
1378         /*
1379          * If DELETED_FE is set we may have already sent dependant pieces
1380          * to the disk and we must flush the record as if it hadn't been
1381          * deleted.  This creates a bit of a mess because we have to
1382          * have ip_sync_record convert the record to MEM_RECORD_DEL before
1383          * it inserts the B-Tree record.  Otherwise the media sync might
1384          * be visible to the frontend.
1385          */
1386         if (record->flags & HAMMER_RECF_DELETED_FE) {
1387                 if (record->type == HAMMER_MEM_RECORD_ADD) {
1388                         record->flags |= HAMMER_RECF_CONVERT_DELETE;
1389                 } else {
1390                         KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
1391                         return(0);
1392                 }
1393         }
1394
1395         /*
1396          * Assign the create_tid for new records.  Deletions already
1397          * have the record's entire key properly set up.
1398          */
1399         if (record->type != HAMMER_MEM_RECORD_DEL)
1400                 record->leaf.base.create_tid = trans->tid;
1401         for (;;) {
1402                 error = hammer_ip_sync_record_cursor(cursor, record);
1403                 if (error != EDEADLK)
1404                         break;
1405                 hammer_done_cursor(cursor);
1406                 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
1407                                            record->ip);
1408                 if (error)
1409                         break;
1410         }
1411         record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
1412
1413         if (error) {
1414                 error = -error;
1415                 if (error != -ENOSPC) {
1416                         kprintf("hammer_sync_record_callback: sync failed rec "
1417                                 "%p, error %d\n", record, error);
1418                         Debugger("sync failed rec");
1419                 }
1420         }
1421 done:
1422         hammer_flush_record_done(record, error);
1423         return(error);
1424 }
1425
1426 /*
1427  * XXX error handling
1428  */
1429 int
1430 hammer_sync_inode(hammer_inode_t ip)
1431 {
1432         struct hammer_transaction trans;
1433         struct hammer_cursor cursor;
1434         hammer_record_t depend;
1435         hammer_record_t next;
1436         int error, tmp_error;
1437         u_int64_t nlinks;
1438
1439         if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
1440                 return(0);
1441
1442         hammer_start_transaction_fls(&trans, ip->hmp);
1443         error = hammer_init_cursor(&trans, &cursor, &ip->cache[0], ip);
1444         if (error)
1445                 goto done;
1446
1447         /*
1448          * Any directory records referencing this inode which are not in
1449          * our current flush group must adjust our nlink count for the
1450          * purposes of synchronization to disk.
1451          *
1452          * Records which are in our flush group can be unlinked from our
1453          * inode now, potentially allowing the inode to be physically
1454          * deleted.
1455          */
1456         nlinks = ip->ino_data.nlinks;
1457         next = TAILQ_FIRST(&ip->target_list);
1458         while ((depend = next) != NULL) {
1459                 next = TAILQ_NEXT(depend, target_entry);
1460                 if (depend->flush_state == HAMMER_FST_FLUSH &&
1461                     depend->flush_group == ip->hmp->flusher_act) {
1462                         /*
1463                          * If this is an ADD that was deleted by the frontend
1464                          * the frontend nlinks count will have already been
1465                          * decremented, but the backend is going to sync its
1466                          * directory entry and must account for it.  The
1467                          * record will be converted to a delete-on-disk when
1468                          * it gets synced.
1469                          *
1470                          * If the ADD was not deleted by the frontend we
1471                          * can remove the dependancy from our target_list.
1472                          */
1473                         if (depend->flags & HAMMER_RECF_DELETED_FE) {
1474                                 ++nlinks;
1475                         } else {
1476                                 TAILQ_REMOVE(&ip->target_list, depend,
1477                                              target_entry);
1478                                 depend->target_ip = NULL;
1479                         }
1480                 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
1481                         /*
1482                          * Not part of our flush group
1483                          */
1484                         KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
1485                         switch(depend->type) {
1486                         case HAMMER_MEM_RECORD_ADD:
1487                                 --nlinks;
1488                                 break;
1489                         case HAMMER_MEM_RECORD_DEL:
1490                                 ++nlinks;
1491                                 break;
1492                         default:
1493                                 break;
1494                         }
1495                 }
1496         }
1497
1498         /*
1499          * Set dirty if we had to modify the link count.
1500          */
1501         if (ip->sync_ino_data.nlinks != nlinks) {
1502                 KKASSERT((int64_t)nlinks >= 0);
1503                 ip->sync_ino_data.nlinks = nlinks;
1504                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1505         }
1506
1507 #if 0
1508         /*
1509          * XXX DISABLED FOR NOW.  With the new reservation support
1510          * we cannot resync pending data without confusing the hell
1511          * out of the in-memory record tree.
1512          */
1513         /*
1514          * Queue up as many dirty buffers as we can then set a flag to
1515          * cause any further BIOs to go to the alternative queue.
1516          */
1517         if (ip->flags & HAMMER_INODE_VHELD)
1518                 error = vfsync(ip->vp, MNT_NOWAIT, 1, NULL, NULL);
1519         ip->flags |= HAMMER_INODE_WRITE_ALT;
1520
1521         /*
1522          * The buffer cache may contain dirty buffers beyond the inode
1523          * state we copied from the frontend to the backend.  Because
1524          * we are syncing our buffer cache on the backend, resync
1525          * the truncation point and the file size so we don't wipe out
1526          * any data.
1527          *
1528          * Syncing the buffer cache on the frontend has serious problems
1529          * because it prevents us from passively queueing dirty inodes
1530          * to the backend (the BIO's could stall indefinitely).
1531          */
1532         if (ip->flags & HAMMER_INODE_TRUNCATED) {
1533                 ip->sync_trunc_off = ip->trunc_off;
1534                 ip->sync_flags |= HAMMER_INODE_TRUNCATED;
1535         }
1536         if (ip->sync_ino_data.size != ip->ino_data.size) {
1537                 ip->sync_ino_data.size = ip->ino_data.size;
1538                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1539         }
1540 #endif
1541
1542         /*
1543          * If there is a trunction queued destroy any data past the (aligned)
1544          * truncation point.  Userland will have dealt with the buffer
1545          * containing the truncation point for us.
1546          *
1547          * We don't flush pending frontend data buffers until after we've
1548          * dealth with the truncation.
1549          *
1550          * Don't bother if the inode is or has been deleted.
1551          */
1552         if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
1553                 /*
1554                  * Interlock trunc_off.  The VOP front-end may continue to
1555                  * make adjustments to it while we are blocked.
1556                  */
1557                 off_t trunc_off;
1558                 off_t aligned_trunc_off;
1559
1560                 trunc_off = ip->sync_trunc_off;
1561                 aligned_trunc_off = (trunc_off + HAMMER_BUFMASK) &
1562                                     ~HAMMER_BUFMASK64;
1563
1564                 /*
1565                  * Delete any whole blocks on-media.  The front-end has
1566                  * already cleaned out any partial block and made it
1567                  * pending.  The front-end may have updated trunc_off
1568                  * while we were blocked so we only use sync_trunc_off.
1569                  */
1570                 error = hammer_ip_delete_range(&cursor, ip,
1571                                                 aligned_trunc_off,
1572                                                 0x7FFFFFFFFFFFFFFFLL, 1);
1573                 if (error)
1574                         Debugger("hammer_ip_delete_range errored");
1575
1576                 /*
1577                  * Clear the truncation flag on the backend after we have
1578                  * complete the deletions.  Backend data is now good again
1579                  * (including new records we are about to sync, below).
1580                  */
1581                 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
1582                 ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL;
1583         } else {
1584                 error = 0;
1585         }
1586
1587         /*
1588          * Now sync related records.  These will typically be directory
1589          * entries or delete-on-disk records.
1590          *
1591          * Not all records will be flushed, but clear XDIRTY anyway.  We
1592          * will set it again in the frontend hammer_flush_inode_done() 
1593          * if records remain.
1594          */
1595         if (error == 0) {
1596                 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1597                                     hammer_sync_record_callback, &cursor);
1598                 if (tmp_error < 0)
1599                         tmp_error = -error;
1600                 if (tmp_error)
1601                         error = tmp_error;
1602         }
1603
1604         /*
1605          * If we are deleting the inode the frontend had better not have
1606          * any active references on elements making up the inode.
1607          */
1608         if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
1609                 RB_EMPTY(&ip->rec_tree)  &&
1610             (ip->sync_flags & HAMMER_INODE_DELETING) &&
1611             (ip->flags & HAMMER_INODE_DELETED) == 0) {
1612                 int count1 = 0;
1613
1614                 ip->flags |= HAMMER_INODE_DELETED;
1615                 error = hammer_ip_delete_range_all(&cursor, ip, &count1);
1616                 if (error == 0) {
1617                         ip->sync_flags &= ~HAMMER_INODE_DELETING;
1618                         ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
1619                         KKASSERT(RB_EMPTY(&ip->rec_tree));
1620
1621                         /*
1622                          * Set delete_tid in both the frontend and backend
1623                          * copy of the inode record.  The DELETED flag handles
1624                          * this, do not set RDIRTY.
1625                          */
1626                         ip->ino_leaf.base.delete_tid = trans.tid;
1627                         ip->sync_ino_leaf.base.delete_tid = trans.tid;
1628
1629                         /*
1630                          * Adjust the inode count in the volume header
1631                          */
1632                         if (ip->flags & HAMMER_INODE_ONDISK) {
1633                                 hammer_modify_volume_field(&trans,
1634                                                            trans.rootvol,
1635                                                            vol0_stat_inodes);
1636                                 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1637                                 hammer_modify_volume_done(trans.rootvol);
1638                         }
1639                 } else {
1640                         ip->flags &= ~HAMMER_INODE_DELETED;
1641                         Debugger("hammer_ip_delete_range_all errored");
1642                 }
1643         }
1644
1645         ip->sync_flags &= ~HAMMER_INODE_BUFS;
1646
1647         if (error)
1648                 Debugger("RB_SCAN errored");
1649
1650         /*
1651          * Now update the inode's on-disk inode-data and/or on-disk record.
1652          * DELETED and ONDISK are managed only in ip->flags.
1653          */
1654         switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
1655         case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
1656                 /*
1657                  * If deleted and on-disk, don't set any additional flags.
1658                  * the delete flag takes care of things.
1659                  *
1660                  * Clear flags which may have been set by the frontend.
1661                  */
1662                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY|
1663                                     HAMMER_INODE_XDIRTY|HAMMER_INODE_ITIMES|
1664                                     HAMMER_INODE_DELETING);
1665                 break;
1666         case HAMMER_INODE_DELETED:
1667                 /*
1668                  * Take care of the case where a deleted inode was never
1669                  * flushed to the disk in the first place.
1670                  *
1671                  * Clear flags which may have been set by the frontend.
1672                  */
1673                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY|
1674                                     HAMMER_INODE_XDIRTY|HAMMER_INODE_ITIMES|
1675                                     HAMMER_INODE_DELETING);
1676                 while (RB_ROOT(&ip->rec_tree)) {
1677                         hammer_record_t record = RB_ROOT(&ip->rec_tree);
1678                         hammer_ref(&record->lock);
1679                         KKASSERT(record->lock.refs == 1);
1680                         record->flags |= HAMMER_RECF_DELETED_FE;
1681                         record->flags |= HAMMER_RECF_DELETED_BE;
1682                         hammer_rel_mem_record(record);
1683                 }
1684                 break;
1685         case HAMMER_INODE_ONDISK:
1686                 /*
1687                  * If already on-disk, do not set any additional flags.
1688                  */
1689                 break;
1690         default:
1691                 /*
1692                  * If not on-disk and not deleted, set both dirty flags
1693                  * to force an initial record to be written.  Also set
1694                  * the create_tid for the inode.
1695                  *
1696                  * Set create_tid in both the frontend and backend
1697                  * copy of the inode record.
1698                  */
1699                 ip->ino_leaf.base.create_tid = trans.tid;
1700                 ip->sync_ino_leaf.base.create_tid = trans.tid;
1701                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1702                 break;
1703         }
1704
1705         /*
1706          * If RDIRTY or DDIRTY is set, write out a new record.  If the inode
1707          * is already on-disk the old record is marked as deleted.
1708          *
1709          * If DELETED is set hammer_update_inode() will delete the existing
1710          * record without writing out a new one.
1711          *
1712          * If *ONLY* the ITIMES flag is set we can update the record in-place.
1713          */
1714         if (ip->flags & HAMMER_INODE_DELETED) {
1715                 error = hammer_update_inode(&cursor, ip);
1716         } else 
1717         if ((ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ITIMES)) ==
1718             HAMMER_INODE_ITIMES) {
1719                 error = hammer_update_itimes(&cursor, ip);
1720         } else
1721         if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ITIMES)) {
1722                 error = hammer_update_inode(&cursor, ip);
1723         }
1724         if (error)
1725                 Debugger("hammer_update_itimes/inode errored");
1726 done:
1727         /*
1728          * Save the TID we used to sync the inode with to make sure we
1729          * do not improperly reuse it.
1730          */
1731         hammer_done_cursor(&cursor);
1732         hammer_done_transaction(&trans);
1733         return(error);
1734 }
1735
1736 /*
1737  * This routine is called when the OS is no longer actively referencing
1738  * the inode (but might still be keeping it cached), or when releasing
1739  * the last reference to an inode.
1740  *
1741  * At this point if the inode's nlinks count is zero we want to destroy
1742  * it, which may mean destroying it on-media too.
1743  */
1744 void
1745 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
1746 {
1747         struct vnode *vp;
1748
1749         /*
1750          * Set the DELETING flag when the link count drops to 0 and the
1751          * OS no longer has any opens on the inode.
1752          *
1753          * The backend will clear DELETING (a mod flag) and set DELETED
1754          * (a state flag) when it is actually able to perform the
1755          * operation.
1756          */
1757         if (ip->ino_data.nlinks == 0 &&
1758             (ip->flags & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
1759                 ip->flags |= HAMMER_INODE_DELETING;
1760                 ip->flags |= HAMMER_INODE_TRUNCATED;
1761                 ip->trunc_off = 0;
1762                 vp = NULL;
1763                 if (getvp) {
1764                         if (hammer_get_vnode(ip, &vp) != 0)
1765                                 return;
1766                 }
1767
1768                 /*
1769                  * Final cleanup
1770                  */
1771                 if (ip->vp) {
1772                         vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
1773                         vnode_pager_setsize(ip->vp, 0);
1774                 }
1775                 if (getvp) {
1776                         vput(vp);
1777                 }
1778         }
1779 }
1780
1781 /*
1782  * Re-test an inode when a dependancy had gone away to see if we
1783  * can chain flush it.
1784  */
1785 void
1786 hammer_test_inode(hammer_inode_t ip)
1787 {
1788         if (ip->flags & HAMMER_INODE_REFLUSH) {
1789                 ip->flags &= ~HAMMER_INODE_REFLUSH;
1790                 hammer_ref(&ip->lock);
1791                 if (ip->flags & HAMMER_INODE_RESIGNAL) {
1792                         ip->flags &= ~HAMMER_INODE_RESIGNAL;
1793                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1794                 } else {
1795                         hammer_flush_inode(ip, 0);
1796                 }
1797                 hammer_rel_inode(ip, 0);
1798         }
1799 }
1800