Use ASSERT_IFAC_VALID whenever possible
[dragonfly.git] / sys / vfs / hammer / hammer_inode.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.64 2008/06/03 18:47:25 dillon Exp $
35  */
36
37 #include "hammer.h"
38 #include <vm/vm_extern.h>
39 #include <sys/buf.h>
40 #include <sys/buf2.h>
41
42 static int hammer_unload_inode(struct hammer_inode *ip);
43 static void hammer_flush_inode_core(hammer_inode_t ip, int flags);
44 static int hammer_setup_child_callback(hammer_record_t rec, void *data);
45 static int hammer_setup_parent_inodes(hammer_record_t record);
46
47 /*
48  * The kernel is not actively referencing this vnode but is still holding
49  * it cached.
50  *
51  * This is called from the frontend.
52  */
53 int
54 hammer_vop_inactive(struct vop_inactive_args *ap)
55 {
56         struct hammer_inode *ip = VTOI(ap->a_vp);
57
58         /*
59          * Degenerate case
60          */
61         if (ip == NULL) {
62                 vrecycle(ap->a_vp);
63                 return(0);
64         }
65
66         /*
67          * If the inode no longer has visibility in the filesystem and is
68          * fairly clean, try to recycle it immediately.  This can deadlock
69          * in vfsync() if we aren't careful.
70          * 
71          * Do not queue the inode to the flusher if we still have visibility,
72          * otherwise namespace calls such as chmod will unnecessarily generate
73          * multiple inode updates.
74          */
75         hammer_inode_unloadable_check(ip, 0);
76         if (ip->ino_data.nlinks == 0) {
77                 if (ip->flags & HAMMER_INODE_MODMASK)
78                         hammer_flush_inode(ip, 0);
79                 else
80                         vrecycle(ap->a_vp);
81         }
82         return(0);
83 }
84
85 /*
86  * Release the vnode association.  This is typically (but not always)
87  * the last reference on the inode.
88  *
89  * Once the association is lost we are on our own with regards to
90  * flushing the inode.
91  */
92 int
93 hammer_vop_reclaim(struct vop_reclaim_args *ap)
94 {
95         struct hammer_inode *ip;
96         struct vnode *vp;
97
98         vp = ap->a_vp;
99
100         if ((ip = vp->v_data) != NULL) {
101                 vp->v_data = NULL;
102                 ip->vp = NULL;
103                 hammer_rel_inode(ip, 1);
104         }
105         return(0);
106 }
107
108 /*
109  * Return a locked vnode for the specified inode.  The inode must be
110  * referenced but NOT LOCKED on entry and will remain referenced on
111  * return.
112  *
113  * Called from the frontend.
114  */
115 int
116 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
117 {
118         struct vnode *vp;
119         int error = 0;
120
121         for (;;) {
122                 if ((vp = ip->vp) == NULL) {
123                         error = getnewvnode(VT_HAMMER, ip->hmp->mp, vpp, 0, 0);
124                         if (error)
125                                 break;
126                         hammer_lock_ex(&ip->lock);
127                         if (ip->vp != NULL) {
128                                 hammer_unlock(&ip->lock);
129                                 vp->v_type = VBAD;
130                                 vx_put(vp);
131                                 continue;
132                         }
133                         hammer_ref(&ip->lock);
134                         vp = *vpp;
135                         ip->vp = vp;
136                         vp->v_type =
137                                 hammer_get_vnode_type(ip->ino_data.obj_type);
138
139                         switch(ip->ino_data.obj_type) {
140                         case HAMMER_OBJTYPE_CDEV:
141                         case HAMMER_OBJTYPE_BDEV:
142                                 vp->v_ops = &ip->hmp->mp->mnt_vn_spec_ops;
143                                 addaliasu(vp, ip->ino_data.rmajor,
144                                           ip->ino_data.rminor);
145                                 break;
146                         case HAMMER_OBJTYPE_FIFO:
147                                 vp->v_ops = &ip->hmp->mp->mnt_vn_fifo_ops;
148                                 break;
149                         default:
150                                 break;
151                         }
152
153                         /*
154                          * Only mark as the root vnode if the ip is not
155                          * historical, otherwise the VFS cache will get
156                          * confused.  The other half of the special handling
157                          * is in hammer_vop_nlookupdotdot().
158                          */
159                         if (ip->obj_id == HAMMER_OBJID_ROOT &&
160                             ip->obj_asof == ip->hmp->asof) {
161                                 vp->v_flag |= VROOT;
162                         }
163
164                         vp->v_data = (void *)ip;
165                         /* vnode locked by getnewvnode() */
166                         /* make related vnode dirty if inode dirty? */
167                         hammer_unlock(&ip->lock);
168                         if (vp->v_type == VREG)
169                                 vinitvmio(vp, ip->ino_data.size);
170                         break;
171                 }
172
173                 /*
174                  * loop if the vget fails (aka races), or if the vp
175                  * no longer matches ip->vp.
176                  */
177                 if (vget(vp, LK_EXCLUSIVE) == 0) {
178                         if (vp == ip->vp)
179                                 break;
180                         vput(vp);
181                 }
182         }
183         *vpp = vp;
184         return(error);
185 }
186
187 /*
188  * Acquire a HAMMER inode.  The returned inode is not locked.  These functions
189  * do not attach or detach the related vnode (use hammer_get_vnode() for
190  * that).
191  *
192  * The flags argument is only applied for newly created inodes, and only
193  * certain flags are inherited.
194  *
195  * Called from the frontend.
196  */
197 struct hammer_inode *
198 hammer_get_inode(hammer_transaction_t trans, struct hammer_node **cache,
199                  u_int64_t obj_id, hammer_tid_t asof, int flags, int *errorp)
200 {
201         hammer_mount_t hmp = trans->hmp;
202         struct hammer_inode_info iinfo;
203         struct hammer_cursor cursor;
204         struct hammer_inode *ip;
205
206         /*
207          * Determine if we already have an inode cached.  If we do then
208          * we are golden.
209          */
210         iinfo.obj_id = obj_id;
211         iinfo.obj_asof = asof;
212 loop:
213         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
214         if (ip) {
215                 hammer_ref(&ip->lock);
216                 *errorp = 0;
217                 return(ip);
218         }
219
220         ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
221         ++hammer_count_inodes;
222         ip->obj_id = obj_id;
223         ip->obj_asof = iinfo.obj_asof;
224         ip->hmp = hmp;
225         ip->flags = flags & HAMMER_INODE_RO;
226         if (hmp->ronly)
227                 ip->flags |= HAMMER_INODE_RO;
228         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
229         RB_INIT(&ip->rec_tree);
230         TAILQ_INIT(&ip->bio_list);
231         TAILQ_INIT(&ip->bio_alt_list);
232         TAILQ_INIT(&ip->target_list);
233
234         /*
235          * Locate the on-disk inode.
236          */
237 retry:
238         hammer_init_cursor(trans, &cursor, cache, NULL);
239         cursor.key_beg.localization = HAMMER_LOCALIZE_INODE;
240         cursor.key_beg.obj_id = ip->obj_id;
241         cursor.key_beg.key = 0;
242         cursor.key_beg.create_tid = 0;
243         cursor.key_beg.delete_tid = 0;
244         cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
245         cursor.key_beg.obj_type = 0;
246         cursor.asof = iinfo.obj_asof;
247         cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
248                        HAMMER_CURSOR_ASOF;
249
250         *errorp = hammer_btree_lookup(&cursor);
251         if (*errorp == EDEADLK) {
252                 hammer_done_cursor(&cursor);
253                 goto retry;
254         }
255
256         /*
257          * On success the B-Tree lookup will hold the appropriate
258          * buffer cache buffers and provide a pointer to the requested
259          * information.  Copy the information to the in-memory inode
260          * and cache the B-Tree node to improve future operations.
261          */
262         if (*errorp == 0) {
263                 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
264                 ip->ino_data = cursor.data->inode;
265                 hammer_cache_node(cursor.node, &ip->cache[0]);
266                 if (cache)
267                         hammer_cache_node(cursor.node, cache);
268         }
269
270         /*
271          * On success load the inode's record and data and insert the
272          * inode into the B-Tree.  It is possible to race another lookup
273          * insertion of the same inode so deal with that condition too.
274          *
275          * The cursor's locked node interlocks against others creating and
276          * destroying ip while we were blocked.
277          */
278         if (*errorp == 0) {
279                 hammer_ref(&ip->lock);
280                 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
281                         hammer_uncache_node(&ip->cache[0]);
282                         hammer_uncache_node(&ip->cache[1]);
283                         KKASSERT(ip->lock.refs == 1);
284                         --hammer_count_inodes;
285                         kfree(ip, M_HAMMER);
286                         hammer_done_cursor(&cursor);
287                         goto loop;
288                 }
289                 ip->flags |= HAMMER_INODE_ONDISK;
290         } else {
291                 /*
292                  * Do not panic on read-only accesses which fail, particularly
293                  * historical accesses where the snapshot might not have
294                  * complete connectivity.
295                  */
296                 if ((flags & HAMMER_INODE_RO) == 0) {
297                         kprintf("hammer_get_inode: failed ip %p obj_id %016llx cursor %p error %d\n",
298                                 ip, ip->obj_id, &cursor, *errorp);
299                         Debugger("x");
300                 }
301                 if (ip->flags & HAMMER_INODE_RSV_INODES) {
302                         ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
303                         --ip->hmp->rsv_inodes;
304                 }
305                 ip->hmp->rsv_databufs -= ip->rsv_databufs;
306                 ip->rsv_databufs = 0;                          /* sanity */
307
308                 --hammer_count_inodes;
309                 kfree(ip, M_HAMMER);
310                 ip = NULL;
311         }
312         hammer_done_cursor(&cursor);
313         return (ip);
314 }
315
316 /*
317  * Create a new filesystem object, returning the inode in *ipp.  The
318  * returned inode will be referenced.
319  *
320  * The inode is created in-memory.
321  */
322 int
323 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
324                     struct ucred *cred, hammer_inode_t dip,
325                     struct hammer_inode **ipp)
326 {
327         hammer_mount_t hmp;
328         hammer_inode_t ip;
329         uid_t xuid;
330
331         hmp = trans->hmp;
332         ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
333         ++hammer_count_inodes;
334         ip->obj_id = hammer_alloc_objid(trans, dip);
335         KKASSERT(ip->obj_id != 0);
336         ip->obj_asof = hmp->asof;
337         ip->hmp = hmp;
338         ip->flush_state = HAMMER_FST_IDLE;
339         ip->flags = HAMMER_INODE_DDIRTY | HAMMER_INODE_ITIMES;
340
341         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
342         RB_INIT(&ip->rec_tree);
343         TAILQ_INIT(&ip->bio_list);
344         TAILQ_INIT(&ip->bio_alt_list);
345         TAILQ_INIT(&ip->target_list);
346
347         ip->ino_leaf.atime = trans->time;
348         ip->ino_data.mtime = trans->time;
349         ip->ino_data.size = 0;
350         ip->ino_data.nlinks = 0;
351
352         /*
353          * A nohistory designator on the parent directory is inherited by
354          * the child.
355          */
356         ip->ino_data.uflags = dip->ino_data.uflags &
357                               (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
358
359         ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
360         ip->ino_leaf.base.localization = HAMMER_LOCALIZE_INODE;
361         ip->ino_leaf.base.obj_id = ip->obj_id;
362         ip->ino_leaf.base.key = 0;
363         ip->ino_leaf.base.create_tid = 0;
364         ip->ino_leaf.base.delete_tid = 0;
365         ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
366         ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
367
368         ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
369         ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
370         ip->ino_data.mode = vap->va_mode;
371         ip->ino_data.ctime = trans->time;
372         ip->ino_data.parent_obj_id = (dip) ? dip->ino_leaf.base.obj_id : 0;
373
374         switch(ip->ino_leaf.base.obj_type) {
375         case HAMMER_OBJTYPE_CDEV:
376         case HAMMER_OBJTYPE_BDEV:
377                 ip->ino_data.rmajor = vap->va_rmajor;
378                 ip->ino_data.rminor = vap->va_rminor;
379                 break;
380         default:
381                 break;
382         }
383
384         /*
385          * Calculate default uid/gid and overwrite with information from
386          * the vap.
387          */
388         xuid = hammer_to_unix_xid(&dip->ino_data.uid);
389         xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode, xuid, cred,
390                                      &vap->va_mode);
391         ip->ino_data.mode = vap->va_mode;
392
393         if (vap->va_vaflags & VA_UID_UUID_VALID)
394                 ip->ino_data.uid = vap->va_uid_uuid;
395         else if (vap->va_uid != (uid_t)VNOVAL)
396                 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
397         else
398                 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
399
400         if (vap->va_vaflags & VA_GID_UUID_VALID)
401                 ip->ino_data.gid = vap->va_gid_uuid;
402         else if (vap->va_gid != (gid_t)VNOVAL)
403                 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
404         else
405                 ip->ino_data.gid = dip->ino_data.gid;
406
407         hammer_ref(&ip->lock);
408         if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
409                 hammer_unref(&ip->lock);
410                 panic("hammer_create_inode: duplicate obj_id %llx", ip->obj_id);
411         }
412         *ipp = ip;
413         return(0);
414 }
415
416 /*
417  * Called by hammer_sync_inode().
418  */
419 static int
420 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
421 {
422         hammer_transaction_t trans = cursor->trans;
423         hammer_record_t record;
424         int error;
425
426 retry:
427         error = 0;
428
429         /*
430          * If the inode has a presence on-disk then locate it and mark
431          * it deleted, setting DELONDISK.
432          *
433          * The record may or may not be physically deleted, depending on
434          * the retention policy.
435          */
436         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
437             HAMMER_INODE_ONDISK) {
438                 hammer_normalize_cursor(cursor);
439                 cursor->key_beg.localization = HAMMER_LOCALIZE_INODE;
440                 cursor->key_beg.obj_id = ip->obj_id;
441                 cursor->key_beg.key = 0;
442                 cursor->key_beg.create_tid = 0;
443                 cursor->key_beg.delete_tid = 0;
444                 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
445                 cursor->key_beg.obj_type = 0;
446                 cursor->asof = ip->obj_asof;
447                 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
448                 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
449                 cursor->flags |= HAMMER_CURSOR_BACKEND;
450
451                 error = hammer_btree_lookup(cursor);
452                 if (hammer_debug_inode)
453                         kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
454                 if (error) {
455                         kprintf("error %d\n", error);
456                         Debugger("hammer_update_inode");
457                 }
458
459                 if (error == 0) {
460                         error = hammer_ip_delete_record(cursor, ip, trans->tid);
461                         if (hammer_debug_inode)
462                                 kprintf(" error %d\n", error);
463                         if (error && error != EDEADLK) {
464                                 kprintf("error %d\n", error);
465                                 Debugger("hammer_update_inode2");
466                         }
467                         if (error == 0) {
468                                 ip->flags |= HAMMER_INODE_DELONDISK;
469                         }
470                         if (cursor->node)
471                                 hammer_cache_node(cursor->node, &ip->cache[0]);
472                 }
473                 if (error == EDEADLK) {
474                         hammer_done_cursor(cursor);
475                         error = hammer_init_cursor(trans, cursor,
476                                                    &ip->cache[0], ip);
477                         if (hammer_debug_inode)
478                                 kprintf("IPDED %p %d\n", ip, error);
479                         if (error == 0)
480                                 goto retry;
481                 }
482         }
483
484         /*
485          * Ok, write out the initial record or a new record (after deleting
486          * the old one), unless the DELETED flag is set.  This routine will
487          * clear DELONDISK if it writes out a record.
488          *
489          * Update our inode statistics if this is the first application of
490          * the inode on-disk.
491          */
492         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
493                 /*
494                  * Generate a record and write it to the media
495                  */
496                 record = hammer_alloc_mem_record(ip, 0);
497                 record->type = HAMMER_MEM_RECORD_INODE;
498                 record->flush_state = HAMMER_FST_FLUSH;
499                 record->leaf = ip->sync_ino_leaf;
500                 record->leaf.base.create_tid = trans->tid;
501                 record->leaf.data_len = sizeof(ip->sync_ino_data);
502                 record->data = (void *)&ip->sync_ino_data;
503                 record->flags |= HAMMER_RECF_INTERLOCK_BE;
504                 for (;;) {
505                         error = hammer_ip_sync_record_cursor(cursor, record);
506                         if (hammer_debug_inode)
507                                 kprintf("GENREC %p rec %08x %d\n",      
508                                         ip, record->flags, error);
509                         if (error != EDEADLK)
510                                 break;
511                         hammer_done_cursor(cursor);
512                         error = hammer_init_cursor(trans, cursor,
513                                                    &ip->cache[0], ip);
514                         if (hammer_debug_inode)
515                                 kprintf("GENREC reinit %d\n", error);
516                         if (error)
517                                 break;
518                 }
519                 if (error) {
520                         kprintf("error %d\n", error);
521                         Debugger("hammer_update_inode3");
522                 }
523
524                 /*
525                  * The record isn't managed by the inode's record tree,
526                  * destroy it whether we succeed or fail.
527                  */
528                 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
529                 record->flags |= HAMMER_RECF_DELETED_FE;
530                 record->flush_state = HAMMER_FST_IDLE;
531                 hammer_rel_mem_record(record);
532
533                 /*
534                  * Finish up.
535                  */
536                 if (error == 0) {
537                         if (hammer_debug_inode)
538                                 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
539                         ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
540                                             HAMMER_INODE_ITIMES);
541                         ip->flags &= ~HAMMER_INODE_DELONDISK;
542
543                         /*
544                          * Root volume count of inodes
545                          */
546                         if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
547                                 hammer_modify_volume_field(trans,
548                                                            trans->rootvol,
549                                                            vol0_stat_inodes);
550                                 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
551                                 hammer_modify_volume_done(trans->rootvol);
552                                 ip->flags |= HAMMER_INODE_ONDISK;
553                                 if (hammer_debug_inode)
554                                         kprintf("NOWONDISK %p\n", ip);
555                         }
556                 }
557         }
558
559         /*
560          * If the inode has been destroyed, clean out any left-over flags
561          * that may have been set by the frontend.
562          */
563         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) { 
564                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
565                                     HAMMER_INODE_ITIMES);
566         }
567         return(error);
568 }
569
570 /*
571  * Update only the itimes fields.  This is done no-historically.  The
572  * record is updated in-place on the disk.
573  */
574 static int
575 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
576 {
577         hammer_transaction_t trans = cursor->trans;
578         struct hammer_btree_leaf_elm *leaf;
579         int error;
580
581 retry:
582         error = 0;
583         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
584             HAMMER_INODE_ONDISK) {
585                 hammer_normalize_cursor(cursor);
586                 cursor->key_beg.localization = HAMMER_LOCALIZE_INODE;
587                 cursor->key_beg.obj_id = ip->obj_id;
588                 cursor->key_beg.key = 0;
589                 cursor->key_beg.create_tid = 0;
590                 cursor->key_beg.delete_tid = 0;
591                 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
592                 cursor->key_beg.obj_type = 0;
593                 cursor->asof = ip->obj_asof;
594                 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
595                 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
596                 cursor->flags |= HAMMER_CURSOR_BACKEND;
597
598                 error = hammer_btree_lookup(cursor);
599                 if (error) {
600                         kprintf("error %d\n", error);
601                         Debugger("hammer_update_itimes1");
602                 }
603                 if (error == 0) {
604                         /*
605                          * Do not generate UNDO records for atime updates.
606                          */
607                         leaf = cursor->leaf;
608                         hammer_modify_node(trans, cursor->node, 
609                                            &leaf->atime, sizeof(leaf->atime));
610                         leaf->atime = ip->sync_ino_leaf.atime;
611                         hammer_modify_node_done(cursor->node);
612                         /*rec->ino_mtime = ip->sync_ino_rec.ino_mtime;*/
613                         ip->sync_flags &= ~HAMMER_INODE_ITIMES;
614                         /* XXX recalculate crc */
615                         hammer_cache_node(cursor->node, &ip->cache[0]);
616                 }
617                 if (error == EDEADLK) {
618                         hammer_done_cursor(cursor);
619                         error = hammer_init_cursor(trans, cursor,
620                                                    &ip->cache[0], ip);
621                         if (error == 0)
622                                 goto retry;
623                 }
624         }
625         return(error);
626 }
627
628 /*
629  * Release a reference on an inode, flush as requested.
630  *
631  * On the last reference we queue the inode to the flusher for its final
632  * disposition.
633  */
634 void
635 hammer_rel_inode(struct hammer_inode *ip, int flush)
636 {
637         hammer_mount_t hmp = ip->hmp;
638
639         /*
640          * Handle disposition when dropping the last ref.
641          */
642         for (;;) {
643                 if (ip->lock.refs == 1) {
644                         /*
645                          * Determine whether on-disk action is needed for
646                          * the inode's final disposition.
647                          */
648                         KKASSERT(ip->vp == NULL);
649                         hammer_inode_unloadable_check(ip, 0);
650                         if (ip->flags & HAMMER_INODE_MODMASK) {
651                                 hammer_flush_inode(ip, 0);
652                         } else if (ip->lock.refs == 1) {
653                                 hammer_unload_inode(ip);
654                                 break;
655                         }
656                 } else {
657                         if (flush)
658                                 hammer_flush_inode(ip, 0);
659
660                         /*
661                          * The inode still has multiple refs, try to drop
662                          * one ref.
663                          */
664                         KKASSERT(ip->lock.refs >= 1);
665                         if (ip->lock.refs > 1) {
666                                 hammer_unref(&ip->lock);
667                                 break;
668                         }
669                 }
670         }
671
672         /*
673          * XXX bad hack until I add code to track inodes in SETUP.  We
674          * can queue a lot of inodes to the syncer but if we don't wake
675          * it up the undo sets will be too large or too many unflushed
676          * records will build up and blow our malloc limit.
677          */
678         if (++hmp->reclaim_count > 256) {
679                 hmp->reclaim_count = 0;
680                 hammer_flusher_async(hmp);
681         }
682 }
683
684 /*
685  * Unload and destroy the specified inode.  Must be called with one remaining
686  * reference.  The reference is disposed of.
687  *
688  * This can only be called in the context of the flusher.
689  */
690 static int
691 hammer_unload_inode(struct hammer_inode *ip)
692 {
693         KASSERT(ip->lock.refs == 1,
694                 ("hammer_unload_inode: %d refs\n", ip->lock.refs));
695         KKASSERT(ip->vp == NULL);
696         KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
697         KKASSERT(ip->cursor_ip_refs == 0);
698         KKASSERT(ip->lock.lockcount == 0);
699         KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
700
701         KKASSERT(RB_EMPTY(&ip->rec_tree));
702         KKASSERT(TAILQ_EMPTY(&ip->target_list));
703         KKASSERT(TAILQ_EMPTY(&ip->bio_list));
704         KKASSERT(TAILQ_EMPTY(&ip->bio_alt_list));
705
706         RB_REMOVE(hammer_ino_rb_tree, &ip->hmp->rb_inos_root, ip);
707
708         hammer_uncache_node(&ip->cache[0]);
709         hammer_uncache_node(&ip->cache[1]);
710         if (ip->objid_cache)
711                 hammer_clear_objid(ip);
712         --hammer_count_inodes;
713         kfree(ip, M_HAMMER);
714
715         return(0);
716 }
717
718 /*
719  * Called on mount -u when switching from RW to RO or vise-versa.  Adjust
720  * the read-only flag for cached inodes.
721  *
722  * This routine is called from a RB_SCAN().
723  */
724 int
725 hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
726 {
727         hammer_mount_t hmp = ip->hmp;
728
729         if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
730                 ip->flags |= HAMMER_INODE_RO;
731         else
732                 ip->flags &= ~HAMMER_INODE_RO;
733         return(0);
734 }
735
736 /*
737  * A transaction has modified an inode, requiring updates as specified by
738  * the passed flags.
739  *
740  * HAMMER_INODE_DDIRTY: Inode data has been updated
741  * HAMMER_INODE_XDIRTY: Dirty in-memory records
742  * HAMMER_INODE_BUFS:   Dirty buffer cache buffers
743  * HAMMER_INODE_DELETED: Inode record/data must be deleted
744  * HAMMER_INODE_ITIMES: mtime/atime has been updated
745  */
746 void
747 hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags)
748 {
749         KKASSERT ((ip->flags & HAMMER_INODE_RO) == 0 ||
750                   (flags & (HAMMER_INODE_DDIRTY |
751                             HAMMER_INODE_XDIRTY | HAMMER_INODE_BUFS |
752                             HAMMER_INODE_DELETED | HAMMER_INODE_ITIMES)) == 0);
753         if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
754                 ip->flags |= HAMMER_INODE_RSV_INODES;
755                 ++ip->hmp->rsv_inodes;
756         }
757
758         ip->flags |= flags;
759 }
760
761 /*
762  * Request that an inode be flushed.  This whole mess cannot block and may
763  * recurse.  Once requested HAMMER will attempt to actively flush it until
764  * the flush can be done.
765  *
766  * The inode may already be flushing, or may be in a setup state.  We can
767  * place the inode in a flushing state if it is currently idle and flag it
768  * to reflush if it is currently flushing.
769  */
770 void
771 hammer_flush_inode(hammer_inode_t ip, int flags)
772 {
773         hammer_record_t depend;
774         int r, good;
775
776         /*
777          * Trivial 'nothing to flush' case.  If the inode is ina SETUP
778          * state we have to put it back into an IDLE state so we can
779          * drop the extra ref.
780          */
781         if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
782                 if (ip->flush_state == HAMMER_FST_SETUP) {
783                         ip->flush_state = HAMMER_FST_IDLE;
784                         hammer_rel_inode(ip, 0);
785                 }
786                 return;
787         }
788
789         /*
790          * Our flush action will depend on the current state.
791          */
792         switch(ip->flush_state) {
793         case HAMMER_FST_IDLE:
794                 /*
795                  * We have no dependancies and can flush immediately.  Some
796                  * our children may not be flushable so we have to re-test
797                  * with that additional knowledge.
798                  */
799                 hammer_flush_inode_core(ip, flags);
800                 break;
801         case HAMMER_FST_SETUP:
802                 /*
803                  * Recurse upwards through dependancies via target_list
804                  * and start their flusher actions going if possible.
805                  *
806                  * 'good' is our connectivity.  -1 means we have none and
807                  * can't flush, 0 means there weren't any dependancies, and
808                  * 1 means we have good connectivity.
809                  */
810                 good = 0;
811                 TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
812                         r = hammer_setup_parent_inodes(depend);
813                         if (r < 0 && good == 0)
814                                 good = -1;
815                         if (r > 0)
816                                 good = 1;
817                 }
818
819                 /*
820                  * We can continue if good >= 0.  Determine how many records
821                  * under our inode can be flushed (and mark them).
822                  */
823                 if (good >= 0) {
824                         hammer_flush_inode_core(ip, flags);
825                 } else {
826                         ip->flags |= HAMMER_INODE_REFLUSH;
827                         if (flags & HAMMER_FLUSH_SIGNAL) {
828                                 ip->flags |= HAMMER_INODE_RESIGNAL;
829                                 hammer_flusher_async(ip->hmp);
830                         }
831                 }
832                 break;
833         default:
834                 /*
835                  * We are already flushing, flag the inode to reflush
836                  * if needed after it completes its current flush.
837                  */
838                 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
839                         ip->flags |= HAMMER_INODE_REFLUSH;
840                 if (flags & HAMMER_FLUSH_SIGNAL) {
841                         ip->flags |= HAMMER_INODE_RESIGNAL;
842                         hammer_flusher_async(ip->hmp);
843                 }
844                 break;
845         }
846 }
847
848 /*
849  * We are asked to recurse upwards and convert the record from SETUP
850  * to FLUSH if possible.  record->ip is a parent of the caller's inode,
851  * and record->target_ip is the caller's inode.
852  *
853  * Return 1 if the record gives us connectivity
854  *
855  * Return 0 if the record is not relevant 
856  *
857  * Return -1 if we can't resolve the dependancy and there is no connectivity.
858  */
859 static int
860 hammer_setup_parent_inodes(hammer_record_t record)
861 {
862         hammer_mount_t hmp = record->ip->hmp;
863         hammer_record_t depend;
864         hammer_inode_t ip;
865         int r, good;
866
867         KKASSERT(record->flush_state != HAMMER_FST_IDLE);
868         ip = record->ip;
869
870         /*
871          * If the record is already flushing, is it in our flush group?
872          *
873          * If it is in our flush group but it is a general record or a 
874          * delete-on-disk, it does not improve our connectivity (return 0),
875          * and if the target inode is not trying to destroy itself we can't
876          * allow the operation yet anyway (the second return -1).
877          */
878         if (record->flush_state == HAMMER_FST_FLUSH) {
879                 if (record->flush_group != hmp->flusher_next) {
880                         ip->flags |= HAMMER_INODE_REFLUSH;
881                         return(-1);
882                 }
883                 if (record->type == HAMMER_MEM_RECORD_ADD)
884                         return(1);
885                 /* GENERAL or DEL */
886                 return(0);
887         }
888
889         /*
890          * It must be a setup record.  Try to resolve the setup dependancies
891          * by recursing upwards so we can place ip on the flush list.
892          */
893         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
894
895         good = 0;
896         TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
897                 r = hammer_setup_parent_inodes(depend);
898                 if (r < 0 && good == 0)
899                         good = -1;
900                 if (r > 0)
901                         good = 1;
902         }
903
904         /*
905          * We can't flush ip because it has no connectivity (XXX also check
906          * nlinks for pre-existing connectivity!).  Flag it so any resolution
907          * recurses back down.
908          */
909         if (good < 0) {
910                 ip->flags |= HAMMER_INODE_REFLUSH;
911                 return(good);
912         }
913
914         /*
915          * We are go, place the parent inode in a flushing state so we can
916          * place its record in a flushing state.  Note that the parent
917          * may already be flushing.  The record must be in the same flush
918          * group as the parent.
919          */
920         if (ip->flush_state != HAMMER_FST_FLUSH)
921                 hammer_flush_inode_core(ip, HAMMER_FLUSH_RECURSION);
922         KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
923         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
924
925 #if 0
926         if (record->type == HAMMER_MEM_RECORD_DEL &&
927             (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
928                 /*
929                  * Regardless of flushing state we cannot sync this path if the
930                  * record represents a delete-on-disk but the target inode
931                  * is not ready to sync its own deletion.
932                  *
933                  * XXX need to count effective nlinks to determine whether
934                  * the flush is ok, otherwise removing a hardlink will
935                  * just leave the DEL record to rot.
936                  */
937                 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
938                 return(-1);
939         } else
940 #endif
941         if (ip->flush_group == ip->hmp->flusher_next) {
942                 /*
943                  * This is the record we wanted to synchronize.
944                  */
945                 record->flush_state = HAMMER_FST_FLUSH;
946                 record->flush_group = ip->flush_group;
947                 hammer_ref(&record->lock);
948                 if (record->type == HAMMER_MEM_RECORD_ADD)
949                         return(1);
950
951                 /*
952                  * A general or delete-on-disk record does not contribute
953                  * to our visibility.  We can still flush it, however.
954                  */
955                 return(0);
956         } else {
957                 /*
958                  * We couldn't resolve the dependancies, request that the
959                  * inode be flushed when the dependancies can be resolved.
960                  */
961                 ip->flags |= HAMMER_INODE_REFLUSH;
962                 return(-1);
963         }
964 }
965
966 /*
967  * This is the core routine placing an inode into the FST_FLUSH state.
968  */
969 static void
970 hammer_flush_inode_core(hammer_inode_t ip, int flags)
971 {
972         int go_count;
973
974         /*
975          * Set flush state and prevent the flusher from cycling into
976          * the next flush group.  Do not place the ip on the list yet.
977          * Inodes not in the idle state get an extra reference.
978          */
979         KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
980         if (ip->flush_state == HAMMER_FST_IDLE)
981                 hammer_ref(&ip->lock);
982         ip->flush_state = HAMMER_FST_FLUSH;
983         ip->flush_group = ip->hmp->flusher_next;
984         ++ip->hmp->flusher_lock;
985
986         /*
987          * We need to be able to vfsync/truncate from the backend.
988          */
989         KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
990         if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
991                 ip->flags |= HAMMER_INODE_VHELD;
992                 vref(ip->vp);
993         }
994
995         /*
996          * Figure out how many in-memory records we can actually flush
997          * (not including inode meta-data, buffers, etc).
998          */
999         if (flags & HAMMER_FLUSH_RECURSION) {
1000                 go_count = 1;
1001         } else {
1002                 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1003                                    hammer_setup_child_callback, NULL);
1004         }
1005
1006         /*
1007          * This is a more involved test that includes go_count.  If we
1008          * can't flush, flag the inode and return.  If go_count is 0 we
1009          * were are unable to flush any records in our rec_tree and
1010          * must ignore the XDIRTY flag.
1011          */
1012         if (go_count == 0) {
1013                 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
1014                         ip->flags |= HAMMER_INODE_REFLUSH;
1015                         ip->flush_state = HAMMER_FST_SETUP;
1016                         if (ip->flags & HAMMER_INODE_VHELD) {
1017                                 ip->flags &= ~HAMMER_INODE_VHELD;
1018                                 vrele(ip->vp);
1019                         }
1020                         if (flags & HAMMER_FLUSH_SIGNAL) {
1021                                 ip->flags |= HAMMER_INODE_RESIGNAL;
1022                                 hammer_flusher_async(ip->hmp);
1023                         }
1024                         if (--ip->hmp->flusher_lock == 0)
1025                                 wakeup(&ip->hmp->flusher_lock);
1026                         return;
1027                 }
1028         }
1029
1030         /*
1031          * Snapshot the state of the inode for the backend flusher.
1032          *
1033          * The truncation must be retained in the frontend until after
1034          * we've actually performed the record deletion.
1035          *
1036          * NOTE: The DELETING flag is a mod flag, but it is also sticky,
1037          * and stays in ip->flags.  Once set, it stays set until the
1038          * inode is destroyed.
1039          */
1040         ip->sync_flags = (ip->flags & HAMMER_INODE_MODMASK);
1041         ip->sync_trunc_off = ip->trunc_off;
1042         ip->sync_ino_leaf = ip->ino_leaf;
1043         ip->sync_ino_data = ip->ino_data;
1044         ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
1045
1046         /*
1047          * The flusher list inherits our inode and reference.
1048          */
1049         TAILQ_INSERT_TAIL(&ip->hmp->flush_list, ip, flush_entry);
1050         if (--ip->hmp->flusher_lock == 0)
1051                 wakeup(&ip->hmp->flusher_lock);
1052
1053         if (flags & HAMMER_FLUSH_SIGNAL)
1054                 hammer_flusher_async(ip->hmp);
1055 }
1056
1057 /*
1058  * Callback for scan of ip->rec_tree.  Try to include each record in our
1059  * flush.  ip->flush_group has been set but the inode has not yet been
1060  * moved into a flushing state.
1061  *
1062  * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
1063  * both inodes.
1064  *
1065  * We return 1 for any record placed or found in FST_FLUSH, which prevents
1066  * the caller from shortcutting the flush.
1067  */
1068 static int
1069 hammer_setup_child_callback(hammer_record_t rec, void *data)
1070 {
1071         hammer_inode_t target_ip;
1072         hammer_inode_t ip;
1073         int r;
1074
1075         /*
1076          * If the record has been deleted by the backend (it's being held
1077          * by the frontend in a race), just ignore it.
1078          */
1079         if (rec->flags & HAMMER_RECF_DELETED_BE)
1080                 return(0);
1081
1082         /*
1083          * If the record is in an idle state it has no dependancies and
1084          * can be flushed.
1085          */
1086         ip = rec->ip;
1087         r = 0;
1088
1089         switch(rec->flush_state) {
1090         case HAMMER_FST_IDLE:
1091                 /*
1092                  * Record has no setup dependancy, we can flush it.
1093                  */
1094                 KKASSERT(rec->target_ip == NULL);
1095                 rec->flush_state = HAMMER_FST_FLUSH;
1096                 rec->flush_group = ip->flush_group;
1097                 hammer_ref(&rec->lock);
1098                 r = 1;
1099                 break;
1100         case HAMMER_FST_SETUP:
1101                 /*
1102                  * Record has a setup dependancy.  Try to include the
1103                  * target ip in the flush. 
1104                  *
1105                  * We have to be careful here, if we do not do the right
1106                  * thing we can lose track of dirty inodes and the system
1107                  * will lockup trying to allocate buffers.
1108                  */
1109                 target_ip = rec->target_ip;
1110                 KKASSERT(target_ip != NULL);
1111                 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
1112                 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
1113                         /*
1114                          * If the target IP is already flushing in our group
1115                          * we are golden, otherwise make sure the target
1116                          * reflushes.
1117                          */
1118                         if (target_ip->flush_group == ip->flush_group) {
1119                                 rec->flush_state = HAMMER_FST_FLUSH;
1120                                 rec->flush_group = ip->flush_group;
1121                                 hammer_ref(&rec->lock);
1122                                 r = 1;
1123                         } else {
1124                                 target_ip->flags |= HAMMER_INODE_REFLUSH;
1125                         }
1126                 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
1127                         /*
1128                          * If the target IP is not flushing we can force
1129                          * it to flush, even if it is unable to write out
1130                          * any of its own records we have at least one in
1131                          * hand that we CAN deal with.
1132                          */
1133                         rec->flush_state = HAMMER_FST_FLUSH;
1134                         rec->flush_group = ip->flush_group;
1135                         hammer_ref(&rec->lock);
1136                         hammer_flush_inode_core(target_ip,
1137                                                 HAMMER_FLUSH_RECURSION);
1138                         r = 1;
1139                 } else {
1140                         /*
1141                          * General or delete-on-disk record.
1142                          *
1143                          * XXX this needs help.  If a delete-on-disk we could
1144                          * disconnect the target.  If the target has its own
1145                          * dependancies they really need to be flushed.
1146                          *
1147                          * XXX
1148                          */
1149                         rec->flush_state = HAMMER_FST_FLUSH;
1150                         rec->flush_group = ip->flush_group;
1151                         hammer_ref(&rec->lock);
1152                         hammer_flush_inode_core(target_ip,
1153                                                 HAMMER_FLUSH_RECURSION);
1154                         r = 1;
1155                 }
1156                 break;
1157         case HAMMER_FST_FLUSH:
1158                 /* 
1159                  * Record already associated with a flush group.  It had
1160                  * better be ours.
1161                  */
1162                 KKASSERT(rec->flush_group == ip->flush_group);
1163                 r = 1;
1164                 break;
1165         }
1166         return(r);
1167 }
1168
1169 /*
1170  * Wait for a previously queued flush to complete
1171  */
1172 void
1173 hammer_wait_inode(hammer_inode_t ip)
1174 {
1175         while (ip->flush_state != HAMMER_FST_IDLE) {
1176                 ip->flags |= HAMMER_INODE_FLUSHW;
1177                 tsleep(&ip->flags, 0, "hmrwin", 0);
1178         }
1179 }
1180
1181 /*
1182  * Called by the backend code when a flush has been completed.
1183  * The inode has already been removed from the flush list.
1184  *
1185  * A pipelined flush can occur, in which case we must re-enter the
1186  * inode on the list and re-copy its fields.
1187  */
1188 void
1189 hammer_flush_inode_done(hammer_inode_t ip)
1190 {
1191         struct bio *bio;
1192         int dorel = 0;
1193
1194         KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
1195
1196         /*
1197          * Allow BIOs to queue to the inode's primary bioq again.
1198          */
1199         ip->flags &= ~HAMMER_INODE_WRITE_ALT;
1200
1201         /*
1202          * Merge left-over flags back into the frontend and fix the state.
1203          */
1204         ip->flags |= ip->sync_flags;
1205
1206         /*
1207          * The backend may have adjusted nlinks, so if the adjusted nlinks
1208          * does not match the fronttend set the frontend's RDIRTY flag again.
1209          */
1210         if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
1211                 ip->flags |= HAMMER_INODE_DDIRTY;
1212
1213         /*
1214          * Reflush any BIOs that wound up in the alt list.  Our inode will
1215          * also wind up at the end of the flusher's list.
1216          */
1217         while ((bio = TAILQ_FIRST(&ip->bio_alt_list)) != NULL) {
1218                 TAILQ_REMOVE(&ip->bio_alt_list, bio, bio_act);
1219                 TAILQ_INSERT_TAIL(&ip->bio_list, bio, bio_act);
1220         }
1221         /*
1222          * Fix up the dirty buffer status.  IO completions will also
1223          * try to clean up rsv_databufs.
1224          */
1225         if (TAILQ_FIRST(&ip->bio_list) ||
1226             (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree))) {
1227                 ip->flags |= HAMMER_INODE_BUFS;
1228         } else {
1229                 ip->hmp->rsv_databufs -= ip->rsv_databufs;
1230                 ip->rsv_databufs = 0;
1231         }
1232
1233         /*
1234          * Re-set the XDIRTY flag if some of the inode's in-memory records
1235          * could not be flushed.
1236          */
1237         if (RB_ROOT(&ip->rec_tree))
1238                 ip->flags |= HAMMER_INODE_XDIRTY;
1239
1240         /*
1241          * Do not lose track of inodes which no longer have vnode
1242          * assocations, otherwise they may never get flushed again.
1243          */
1244         if ((ip->flags & HAMMER_INODE_MODMASK) && ip->vp == NULL)
1245                 ip->flags |= HAMMER_INODE_REFLUSH;
1246
1247         /*
1248          * Adjust flush_state.  The target state (idle or setup) shouldn't
1249          * be terribly important since we will reflush if we really need
1250          * to do anything. XXX
1251          */
1252         if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
1253                 ip->flush_state = HAMMER_FST_IDLE;
1254                 dorel = 1;
1255         } else {
1256                 ip->flush_state = HAMMER_FST_SETUP;
1257         }
1258
1259         /*
1260          * Clean up the vnode ref
1261          */
1262         if (ip->flags & HAMMER_INODE_VHELD) {
1263                 ip->flags &= ~HAMMER_INODE_VHELD;
1264                 vrele(ip->vp);
1265         }
1266
1267         /*
1268          * If the frontend made more changes and requested another flush,
1269          * then try to get it running.
1270          */
1271         if (ip->flags & HAMMER_INODE_REFLUSH) {
1272                 ip->flags &= ~HAMMER_INODE_REFLUSH;
1273                 if (ip->flags & HAMMER_INODE_RESIGNAL) {
1274                         ip->flags &= ~HAMMER_INODE_RESIGNAL;
1275                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1276                 } else {
1277                         hammer_flush_inode(ip, 0);
1278                 }
1279         }
1280
1281         /*
1282          * If the inode is now clean drop the space reservation.
1283          */
1284         if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1285             (ip->flags & HAMMER_INODE_RSV_INODES)) {
1286                 ip->flags &= ~HAMMER_INODE_RSV_INODES;
1287                 --ip->hmp->rsv_inodes;
1288         }
1289
1290         /*
1291          * Finally, if the frontend is waiting for a flush to complete,
1292          * wake it up.
1293          */
1294         if (ip->flush_state != HAMMER_FST_FLUSH) {
1295                 if (ip->flags & HAMMER_INODE_FLUSHW) {
1296                         ip->flags &= ~HAMMER_INODE_FLUSHW;
1297                         wakeup(&ip->flags);
1298                 }
1299         }
1300         if (dorel)
1301                 hammer_rel_inode(ip, 0);
1302 }
1303
1304 /*
1305  * Called from hammer_sync_inode() to synchronize in-memory records
1306  * to the media.
1307  */
1308 static int
1309 hammer_sync_record_callback(hammer_record_t record, void *data)
1310 {
1311         hammer_cursor_t cursor = data;
1312         hammer_transaction_t trans = cursor->trans;
1313         int error;
1314
1315         /*
1316          * Skip records that do not belong to the current flush.
1317          */
1318         if (record->flush_state != HAMMER_FST_FLUSH)
1319                 return(0);
1320         KKASSERT((record->flags & HAMMER_RECF_DELETED_BE) == 0);
1321 #if 1
1322         if (record->flush_group != record->ip->flush_group) {
1323                 kprintf("sync_record %p ip %p bad flush group %d %d\n", record, record->ip, record->flush_group ,record->ip->flush_group);
1324                 Debugger("blah2");
1325                 return(0);
1326         }
1327 #endif
1328         KKASSERT(record->flush_group == record->ip->flush_group);
1329
1330         /*
1331          * Interlock the record using the BE flag.  Once BE is set the
1332          * frontend cannot change the state of FE.
1333          *
1334          * NOTE: If FE is set prior to us setting BE we still sync the
1335          * record out, but the flush completion code converts it to 
1336          * a delete-on-disk record instead of destroying it.
1337          */
1338         KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
1339         record->flags |= HAMMER_RECF_INTERLOCK_BE;
1340
1341         /*
1342          * If the whole inode is being deleting all on-disk records will
1343          * be deleted very soon, we can't sync any new records to disk
1344          * because they will be deleted in the same transaction they were
1345          * created in (delete_tid == create_tid), which will assert.
1346          *
1347          * XXX There may be a case with RECORD_ADD with DELETED_FE set
1348          * that we currently panic on.
1349          */
1350         if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
1351                 switch(record->type) {
1352                 case HAMMER_MEM_RECORD_GENERAL:
1353                         record->flags |= HAMMER_RECF_DELETED_FE;
1354                         record->flags |= HAMMER_RECF_DELETED_BE;
1355                         error = 0;
1356                         goto done;
1357                 case HAMMER_MEM_RECORD_ADD:
1358                         panic("hammer_sync_record_callback: illegal add "
1359                               "during inode deletion record %p", record);
1360                         break; /* NOT REACHED */
1361                 case HAMMER_MEM_RECORD_INODE:
1362                         panic("hammer_sync_record_callback: attempt to "
1363                               "sync inode record %p?", record);
1364                         break; /* NOT REACHED */
1365                 case HAMMER_MEM_RECORD_DEL:
1366                         /* 
1367                          * Follow through and issue the on-disk deletion
1368                          */
1369                         break;
1370                 }
1371         }
1372
1373         /*
1374          * If DELETED_FE is set we may have already sent dependant pieces
1375          * to the disk and we must flush the record as if it hadn't been
1376          * deleted.  This creates a bit of a mess because we have to
1377          * have ip_sync_record convert the record to MEM_RECORD_DEL before
1378          * it inserts the B-Tree record.  Otherwise the media sync might
1379          * be visible to the frontend.
1380          */
1381         if (record->flags & HAMMER_RECF_DELETED_FE) {
1382                 if (record->type == HAMMER_MEM_RECORD_ADD) {
1383                         record->flags |= HAMMER_RECF_CONVERT_DELETE;
1384                 } else {
1385                         KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
1386                         return(0);
1387                 }
1388         }
1389
1390         /*
1391          * Assign the create_tid for new records.  Deletions already
1392          * have the record's entire key properly set up.
1393          */
1394         if (record->type != HAMMER_MEM_RECORD_DEL)
1395                 record->leaf.base.create_tid = trans->tid;
1396         for (;;) {
1397                 error = hammer_ip_sync_record_cursor(cursor, record);
1398                 if (error != EDEADLK)
1399                         break;
1400                 hammer_done_cursor(cursor);
1401                 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
1402                                            record->ip);
1403                 if (error)
1404                         break;
1405         }
1406         record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
1407
1408         if (error) {
1409                 error = -error;
1410                 if (error != -ENOSPC) {
1411                         kprintf("hammer_sync_record_callback: sync failed rec "
1412                                 "%p, error %d\n", record, error);
1413                         Debugger("sync failed rec");
1414                 }
1415         }
1416 done:
1417         hammer_flush_record_done(record, error);
1418         return(error);
1419 }
1420
1421 /*
1422  * XXX error handling
1423  */
1424 int
1425 hammer_sync_inode(hammer_inode_t ip)
1426 {
1427         struct hammer_transaction trans;
1428         struct hammer_cursor cursor;
1429         struct bio *bio;
1430         hammer_record_t depend;
1431         hammer_record_t next;
1432         int error, tmp_error;
1433         u_int64_t nlinks;
1434
1435         if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
1436                 return(0);
1437
1438         hammer_start_transaction_fls(&trans, ip->hmp);
1439         error = hammer_init_cursor(&trans, &cursor, &ip->cache[0], ip);
1440         if (error)
1441                 goto done;
1442
1443         /*
1444          * Any directory records referencing this inode which are not in
1445          * our current flush group must adjust our nlink count for the
1446          * purposes of synchronization to disk.
1447          *
1448          * Records which are in our flush group can be unlinked from our
1449          * inode now, potentially allowing the inode to be physically
1450          * deleted.
1451          */
1452         nlinks = ip->ino_data.nlinks;
1453         next = TAILQ_FIRST(&ip->target_list);
1454         while ((depend = next) != NULL) {
1455                 next = TAILQ_NEXT(depend, target_entry);
1456                 if (depend->flush_state == HAMMER_FST_FLUSH &&
1457                     depend->flush_group == ip->hmp->flusher_act) {
1458                         /*
1459                          * If this is an ADD that was deleted by the frontend
1460                          * the frontend nlinks count will have already been
1461                          * decremented, but the backend is going to sync its
1462                          * directory entry and must account for it.  The
1463                          * record will be converted to a delete-on-disk when
1464                          * it gets synced.
1465                          *
1466                          * If the ADD was not deleted by the frontend we
1467                          * can remove the dependancy from our target_list.
1468                          */
1469                         if (depend->flags & HAMMER_RECF_DELETED_FE) {
1470                                 ++nlinks;
1471                         } else {
1472                                 TAILQ_REMOVE(&ip->target_list, depend,
1473                                              target_entry);
1474                                 depend->target_ip = NULL;
1475                         }
1476                 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
1477                         /*
1478                          * Not part of our flush group
1479                          */
1480                         KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
1481                         switch(depend->type) {
1482                         case HAMMER_MEM_RECORD_ADD:
1483                                 --nlinks;
1484                                 break;
1485                         case HAMMER_MEM_RECORD_DEL:
1486                                 ++nlinks;
1487                                 break;
1488                         default:
1489                                 break;
1490                         }
1491                 }
1492         }
1493
1494         /*
1495          * Set dirty if we had to modify the link count.
1496          */
1497         if (ip->sync_ino_data.nlinks != nlinks) {
1498                 KKASSERT((int64_t)nlinks >= 0);
1499                 ip->sync_ino_data.nlinks = nlinks;
1500                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1501         }
1502
1503         /*
1504          * Queue up as many dirty buffers as we can then set a flag to
1505          * cause any further BIOs to go to the alternative queue.
1506          */
1507         if (ip->flags & HAMMER_INODE_VHELD)
1508                 error = vfsync(ip->vp, MNT_NOWAIT, 1, NULL, NULL);
1509         ip->flags |= HAMMER_INODE_WRITE_ALT;
1510
1511         /*
1512          * The buffer cache may contain dirty buffers beyond the inode
1513          * state we copied from the frontend to the backend.  Because
1514          * we are syncing our buffer cache on the backend, resync
1515          * the truncation point and the file size so we don't wipe out
1516          * any data.
1517          *
1518          * Syncing the buffer cache on the frontend has serious problems
1519          * because it prevents us from passively queueing dirty inodes
1520          * to the backend (the BIO's could stall indefinitely).
1521          */
1522         if (ip->flags & HAMMER_INODE_TRUNCATED) {
1523                 ip->sync_trunc_off = ip->trunc_off;
1524                 ip->sync_flags |= HAMMER_INODE_TRUNCATED;
1525         }
1526         if (ip->sync_ino_data.size != ip->ino_data.size) {
1527                 ip->sync_ino_data.size = ip->ino_data.size;
1528                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1529         }
1530
1531         /*
1532          * If there is a trunction queued destroy any data past the (aligned)
1533          * truncation point.  Userland will have dealt with the buffer
1534          * containing the truncation point for us.
1535          *
1536          * We don't flush pending frontend data buffers until after we've
1537          * dealth with the truncation.
1538          *
1539          * Don't bother if the inode is or has been deleted.
1540          */
1541         if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
1542                 /*
1543                  * Interlock trunc_off.  The VOP front-end may continue to
1544                  * make adjustments to it while we are blocked.
1545                  */
1546                 off_t trunc_off;
1547                 off_t aligned_trunc_off;
1548
1549                 trunc_off = ip->sync_trunc_off;
1550                 aligned_trunc_off = (trunc_off + HAMMER_BUFMASK) &
1551                                     ~HAMMER_BUFMASK64;
1552
1553                 /*
1554                  * Delete any whole blocks on-media.  The front-end has
1555                  * already cleaned out any partial block and made it
1556                  * pending.  The front-end may have updated trunc_off
1557                  * while we were blocked so do not just unconditionally
1558                  * set it to the maximum offset.
1559                  */
1560                 error = hammer_ip_delete_range(&cursor, ip,
1561                                                 aligned_trunc_off,
1562                                                 0x7FFFFFFFFFFFFFFFLL);
1563                 if (error)
1564                         Debugger("hammer_ip_delete_range errored");
1565                 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
1566                 if (ip->trunc_off >= trunc_off) {
1567                         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
1568                         ip->flags &= ~HAMMER_INODE_TRUNCATED;
1569                 }
1570         } else {
1571                 error = 0;
1572         }
1573
1574         /*
1575          * Now sync related records.  These will typically be directory
1576          * entries or delete-on-disk records.
1577          *
1578          * Not all records will be flushed, but clear XDIRTY anyway.  We
1579          * will set it again in the frontend hammer_flush_inode_done() 
1580          * if records remain.
1581          */
1582         if (error == 0) {
1583                 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1584                                     hammer_sync_record_callback, &cursor);
1585                 if (tmp_error < 0)
1586                         tmp_error = -error;
1587                 if (tmp_error)
1588                         error = tmp_error;
1589                 if (RB_EMPTY(&ip->rec_tree))
1590                         ip->sync_flags &= ~HAMMER_INODE_XDIRTY;
1591         }
1592
1593         /*
1594          * If we are deleting the inode the frontend had better not have
1595          * any active references on elements making up the inode.
1596          */
1597         if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
1598                 RB_EMPTY(&ip->rec_tree)  &&
1599             (ip->sync_flags & HAMMER_INODE_DELETING) &&
1600             (ip->flags & HAMMER_INODE_DELETED) == 0) {
1601                 int count1 = 0;
1602
1603                 hkprintf("Y");
1604                 ip->flags |= HAMMER_INODE_DELETED;
1605                 error = hammer_ip_delete_range_all(&cursor, ip, &count1);
1606                 if (error == 0) {
1607                         ip->sync_flags &= ~HAMMER_INODE_DELETING;
1608                         ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
1609                         KKASSERT(RB_EMPTY(&ip->rec_tree));
1610
1611                         /*
1612                          * Set delete_tid in both the frontend and backend
1613                          * copy of the inode record.  The DELETED flag handles
1614                          * this, do not set RDIRTY.
1615                          */
1616                         ip->ino_leaf.base.delete_tid = trans.tid;
1617                         ip->sync_ino_leaf.base.delete_tid = trans.tid;
1618
1619                         /*
1620                          * Adjust the inode count in the volume header
1621                          */
1622                         if (ip->flags & HAMMER_INODE_ONDISK) {
1623                                 hammer_modify_volume_field(&trans,
1624                                                            trans.rootvol,
1625                                                            vol0_stat_inodes);
1626                                 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1627                                 hammer_modify_volume_done(trans.rootvol);
1628                         }
1629                 } else {
1630                         ip->flags &= ~HAMMER_INODE_DELETED;
1631                         Debugger("hammer_ip_delete_range_all errored");
1632                 }
1633         }
1634
1635         /*
1636          * Flush any queued BIOs.  These will just biodone() the IO's if
1637          * the inode has been deleted.
1638          */
1639         while ((bio = TAILQ_FIRST(&ip->bio_list)) != NULL) {
1640                 TAILQ_REMOVE(&ip->bio_list, bio, bio_act);
1641                 tmp_error = hammer_dowrite(&cursor, ip, bio);
1642                 if (tmp_error)
1643                         error = tmp_error;
1644         }
1645         ip->sync_flags &= ~HAMMER_INODE_BUFS;
1646
1647         if (error)
1648                 Debugger("RB_SCAN errored");
1649
1650         /*
1651          * Now update the inode's on-disk inode-data and/or on-disk record.
1652          * DELETED and ONDISK are managed only in ip->flags.
1653          */
1654         switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
1655         case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
1656                 /*
1657                  * If deleted and on-disk, don't set any additional flags.
1658                  * the delete flag takes care of things.
1659                  *
1660                  * Clear flags which may have been set by the frontend.
1661                  */
1662                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY|
1663                                     HAMMER_INODE_XDIRTY|HAMMER_INODE_ITIMES|
1664                                     HAMMER_INODE_DELETING);
1665                 break;
1666         case HAMMER_INODE_DELETED:
1667                 /*
1668                  * Take care of the case where a deleted inode was never
1669                  * flushed to the disk in the first place.
1670                  *
1671                  * Clear flags which may have been set by the frontend.
1672                  */
1673                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY|
1674                                     HAMMER_INODE_XDIRTY|HAMMER_INODE_ITIMES|
1675                                     HAMMER_INODE_DELETING);
1676                 while (RB_ROOT(&ip->rec_tree)) {
1677                         hammer_record_t record = RB_ROOT(&ip->rec_tree);
1678                         hammer_ref(&record->lock);
1679                         KKASSERT(record->lock.refs == 1);
1680                         record->flags |= HAMMER_RECF_DELETED_FE;
1681                         record->flags |= HAMMER_RECF_DELETED_BE;
1682                         hammer_rel_mem_record(record);
1683                 }
1684                 break;
1685         case HAMMER_INODE_ONDISK:
1686                 /*
1687                  * If already on-disk, do not set any additional flags.
1688                  */
1689                 break;
1690         default:
1691                 /*
1692                  * If not on-disk and not deleted, set both dirty flags
1693                  * to force an initial record to be written.  Also set
1694                  * the create_tid for the inode.
1695                  *
1696                  * Set create_tid in both the frontend and backend
1697                  * copy of the inode record.
1698                  */
1699                 ip->ino_leaf.base.create_tid = trans.tid;
1700                 ip->sync_ino_leaf.base.create_tid = trans.tid;
1701                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1702                 break;
1703         }
1704
1705         /*
1706          * If RDIRTY or DDIRTY is set, write out a new record.  If the inode
1707          * is already on-disk the old record is marked as deleted.
1708          *
1709          * If DELETED is set hammer_update_inode() will delete the existing
1710          * record without writing out a new one.
1711          *
1712          * If *ONLY* the ITIMES flag is set we can update the record in-place.
1713          */
1714         if (ip->flags & HAMMER_INODE_DELETED) {
1715                 error = hammer_update_inode(&cursor, ip);
1716         } else 
1717         if ((ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ITIMES)) ==
1718             HAMMER_INODE_ITIMES) {
1719                 error = hammer_update_itimes(&cursor, ip);
1720         } else
1721         if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ITIMES)) {
1722                 error = hammer_update_inode(&cursor, ip);
1723         }
1724         if (error)
1725                 Debugger("hammer_update_itimes/inode errored");
1726 done:
1727         /*
1728          * Save the TID we used to sync the inode with to make sure we
1729          * do not improperly reuse it.
1730          */
1731         hammer_done_cursor(&cursor);
1732         hammer_done_transaction(&trans);
1733         return(error);
1734 }
1735
1736 /*
1737  * This routine is called when the OS is no longer actively referencing
1738  * the inode (but might still be keeping it cached), or when releasing
1739  * the last reference to an inode.
1740  *
1741  * At this point if the inode's nlinks count is zero we want to destroy
1742  * it, which may mean destroying it on-media too.
1743  */
1744 void
1745 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
1746 {
1747         struct vnode *vp;
1748         struct bio *bio;
1749
1750         /*
1751          * Set the DELETING flag when the link count drops to 0 and the
1752          * OS no longer has any opens on the inode.
1753          *
1754          * The backend will clear DELETING (a mod flag) and set DELETED
1755          * (a state flag) when it is actually able to perform the
1756          * operation.
1757          */
1758         if (ip->ino_data.nlinks == 0 &&
1759             (ip->flags & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
1760                 ip->flags |= HAMMER_INODE_DELETING;
1761                 ip->flags |= HAMMER_INODE_TRUNCATED;
1762                 ip->trunc_off = 0;
1763                 vp = NULL;
1764                 if (getvp) {
1765                         if (hammer_get_vnode(ip, &vp) != 0)
1766                                 return;
1767                 }
1768
1769                 /*
1770                  * biodone any buffers with pending IO.  These buffers are
1771                  * holding a BUF_KERNPROC() exclusive lock and our
1772                  * vtruncbuf() call will deadlock if any remain.
1773                  *
1774                  * (interlocked against hammer_vop_strategy_write via
1775                  *  HAMMER_INODE_DELETING|HAMMER_INODE_DELETED).
1776                  */
1777                 while ((bio = TAILQ_FIRST(&ip->bio_list)) != NULL) {
1778                         TAILQ_REMOVE(&ip->bio_list, bio, bio_act);
1779                         bio->bio_buf->b_resid = 0;
1780                         biodone(bio);
1781                         if (ip->rsv_databufs) {
1782                                 --ip->rsv_databufs;
1783                                 --ip->hmp->rsv_databufs;
1784                         }
1785                 }
1786                 while ((bio = TAILQ_FIRST(&ip->bio_alt_list)) != NULL) {
1787                         TAILQ_REMOVE(&ip->bio_alt_list, bio, bio_act);
1788                         bio->bio_buf->b_resid = 0;
1789                         biodone(bio);
1790                         if (ip->rsv_databufs) {
1791                                 --ip->rsv_databufs;
1792                                 --ip->hmp->rsv_databufs;
1793                         }
1794                 }
1795
1796                 /*
1797                  * Final cleanup
1798                  */
1799                 if (ip->vp) {
1800                         vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
1801                         vnode_pager_setsize(ip->vp, 0);
1802                 }
1803                 if (getvp) {
1804                         vput(vp);
1805                 }
1806         }
1807 }
1808
1809 /*
1810  * Re-test an inode when a dependancy had gone away to see if we
1811  * can chain flush it.
1812  */
1813 void
1814 hammer_test_inode(hammer_inode_t ip)
1815 {
1816         if (ip->flags & HAMMER_INODE_REFLUSH) {
1817                 ip->flags &= ~HAMMER_INODE_REFLUSH;
1818                 hammer_ref(&ip->lock);
1819                 if (ip->flags & HAMMER_INODE_RESIGNAL) {
1820                         ip->flags &= ~HAMMER_INODE_RESIGNAL;
1821                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1822                 } else {
1823                         hammer_flush_inode(ip, 0);
1824                 }
1825                 hammer_rel_inode(ip, 0);
1826         }
1827 }
1828