HAMMER 48/Many: finish vop_setattr support, ncreate/nmknod/etc, minor bug fixes.
[dragonfly.git] / sys / vfs / hammer / hammer_inode.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.62 2008/05/25 18:41:33 dillon Exp $
35  */
36
37 #include "hammer.h"
38 #include <vm/vm_extern.h>
39 #include <sys/buf.h>
40 #include <sys/buf2.h>
41
42 static int hammer_unload_inode(struct hammer_inode *ip);
43 static void hammer_flush_inode_core(hammer_inode_t ip, int flags);
44 static int hammer_setup_child_callback(hammer_record_t rec, void *data);
45 static int hammer_setup_parent_inodes(hammer_record_t record);
46
47 /*
48  * The kernel is not actively referencing this vnode but is still holding
49  * it cached.
50  *
51  * This is called from the frontend.
52  */
53 int
54 hammer_vop_inactive(struct vop_inactive_args *ap)
55 {
56         struct hammer_inode *ip = VTOI(ap->a_vp);
57
58         /*
59          * Degenerate case
60          */
61         if (ip == NULL) {
62                 vrecycle(ap->a_vp);
63                 return(0);
64         }
65
66         /*
67          * If the inode no longer has visibility in the filesystem and is
68          * fairly clean, try to recycle it immediately.  This can deadlock
69          * in vfsync() if we aren't careful.
70          * 
71          * Do not queue the inode to the flusher if we still have visibility,
72          * otherwise namespace calls such as chmod will unnecessarily generate
73          * multiple inode updates.
74          */
75         hammer_inode_unloadable_check(ip, 0);
76         if (ip->ino_data.nlinks == 0) {
77                 if (ip->flags & HAMMER_INODE_MODMASK)
78                         hammer_flush_inode(ip, 0);
79                 else
80                         vrecycle(ap->a_vp);
81         }
82         return(0);
83 }
84
85 /*
86  * Release the vnode association.  This is typically (but not always)
87  * the last reference on the inode.
88  *
89  * Once the association is lost we are on our own with regards to
90  * flushing the inode.
91  */
92 int
93 hammer_vop_reclaim(struct vop_reclaim_args *ap)
94 {
95         struct hammer_inode *ip;
96         struct vnode *vp;
97
98         vp = ap->a_vp;
99
100         if ((ip = vp->v_data) != NULL) {
101                 vp->v_data = NULL;
102                 ip->vp = NULL;
103                 hammer_rel_inode(ip, 1);
104         }
105         return(0);
106 }
107
108 /*
109  * Return a locked vnode for the specified inode.  The inode must be
110  * referenced but NOT LOCKED on entry and will remain referenced on
111  * return.
112  *
113  * Called from the frontend.
114  */
115 int
116 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
117 {
118         struct vnode *vp;
119         int error = 0;
120
121         for (;;) {
122                 if ((vp = ip->vp) == NULL) {
123                         error = getnewvnode(VT_HAMMER, ip->hmp->mp, vpp, 0, 0);
124                         if (error)
125                                 break;
126                         hammer_lock_ex(&ip->lock);
127                         if (ip->vp != NULL) {
128                                 hammer_unlock(&ip->lock);
129                                 vp->v_type = VBAD;
130                                 vx_put(vp);
131                                 continue;
132                         }
133                         hammer_ref(&ip->lock);
134                         vp = *vpp;
135                         ip->vp = vp;
136                         vp->v_type =
137                                 hammer_get_vnode_type(ip->ino_data.obj_type);
138
139                         switch(ip->ino_data.obj_type) {
140                         case HAMMER_OBJTYPE_CDEV:
141                         case HAMMER_OBJTYPE_BDEV:
142                                 vp->v_ops = &ip->hmp->mp->mnt_vn_spec_ops;
143                                 addaliasu(vp, ip->ino_data.rmajor,
144                                           ip->ino_data.rminor);
145                                 break;
146                         case HAMMER_OBJTYPE_FIFO:
147                                 vp->v_ops = &ip->hmp->mp->mnt_vn_fifo_ops;
148                                 break;
149                         default:
150                                 break;
151                         }
152
153                         /*
154                          * Only mark as the root vnode if the ip is not
155                          * historical, otherwise the VFS cache will get
156                          * confused.  The other half of the special handling
157                          * is in hammer_vop_nlookupdotdot().
158                          */
159                         if (ip->obj_id == HAMMER_OBJID_ROOT &&
160                             ip->obj_asof == ip->hmp->asof) {
161                                 vp->v_flag |= VROOT;
162                         }
163
164                         vp->v_data = (void *)ip;
165                         /* vnode locked by getnewvnode() */
166                         /* make related vnode dirty if inode dirty? */
167                         hammer_unlock(&ip->lock);
168                         if (vp->v_type == VREG)
169                                 vinitvmio(vp, ip->ino_data.size);
170                         break;
171                 }
172
173                 /*
174                  * loop if the vget fails (aka races), or if the vp
175                  * no longer matches ip->vp.
176                  */
177                 if (vget(vp, LK_EXCLUSIVE) == 0) {
178                         if (vp == ip->vp)
179                                 break;
180                         vput(vp);
181                 }
182         }
183         *vpp = vp;
184         return(error);
185 }
186
187 /*
188  * Acquire a HAMMER inode.  The returned inode is not locked.  These functions
189  * do not attach or detach the related vnode (use hammer_get_vnode() for
190  * that).
191  *
192  * The flags argument is only applied for newly created inodes, and only
193  * certain flags are inherited.
194  *
195  * Called from the frontend.
196  */
197 struct hammer_inode *
198 hammer_get_inode(hammer_transaction_t trans, struct hammer_node **cache,
199                  u_int64_t obj_id, hammer_tid_t asof, int flags, int *errorp)
200 {
201         hammer_mount_t hmp = trans->hmp;
202         struct hammer_inode_info iinfo;
203         struct hammer_cursor cursor;
204         struct hammer_inode *ip;
205
206         /*
207          * Determine if we already have an inode cached.  If we do then
208          * we are golden.
209          */
210         iinfo.obj_id = obj_id;
211         iinfo.obj_asof = asof;
212 loop:
213         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
214         if (ip) {
215                 hammer_ref(&ip->lock);
216                 *errorp = 0;
217                 return(ip);
218         }
219
220         ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
221         ++hammer_count_inodes;
222         ip->obj_id = obj_id;
223         ip->obj_asof = iinfo.obj_asof;
224         ip->hmp = hmp;
225         ip->flags = flags & HAMMER_INODE_RO;
226         if (hmp->ronly)
227                 ip->flags |= HAMMER_INODE_RO;
228         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
229         RB_INIT(&ip->rec_tree);
230         TAILQ_INIT(&ip->bio_list);
231         TAILQ_INIT(&ip->bio_alt_list);
232         TAILQ_INIT(&ip->target_list);
233
234         /*
235          * Locate the on-disk inode.
236          */
237 retry:
238         hammer_init_cursor(trans, &cursor, cache, NULL);
239         cursor.key_beg.localization = HAMMER_LOCALIZE_INODE;
240         cursor.key_beg.obj_id = ip->obj_id;
241         cursor.key_beg.key = 0;
242         cursor.key_beg.create_tid = 0;
243         cursor.key_beg.delete_tid = 0;
244         cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
245         cursor.key_beg.obj_type = 0;
246         cursor.asof = iinfo.obj_asof;
247         cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
248                        HAMMER_CURSOR_ASOF;
249
250         *errorp = hammer_btree_lookup(&cursor);
251         if (*errorp == EDEADLK) {
252                 hammer_done_cursor(&cursor);
253                 goto retry;
254         }
255
256         /*
257          * On success the B-Tree lookup will hold the appropriate
258          * buffer cache buffers and provide a pointer to the requested
259          * information.  Copy the information to the in-memory inode
260          * and cache the B-Tree node to improve future operations.
261          */
262         if (*errorp == 0) {
263                 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
264                 ip->ino_data = cursor.data->inode;
265                 hammer_cache_node(cursor.node, &ip->cache[0]);
266                 if (cache)
267                         hammer_cache_node(cursor.node, cache);
268         }
269
270         /*
271          * On success load the inode's record and data and insert the
272          * inode into the B-Tree.  It is possible to race another lookup
273          * insertion of the same inode so deal with that condition too.
274          *
275          * The cursor's locked node interlocks against others creating and
276          * destroying ip while we were blocked.
277          */
278         if (*errorp == 0) {
279                 hammer_ref(&ip->lock);
280                 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
281                         hammer_uncache_node(&ip->cache[0]);
282                         hammer_uncache_node(&ip->cache[1]);
283                         KKASSERT(ip->lock.refs == 1);
284                         --hammer_count_inodes;
285                         kfree(ip, M_HAMMER);
286                         hammer_done_cursor(&cursor);
287                         goto loop;
288                 }
289                 ip->flags |= HAMMER_INODE_ONDISK;
290         } else {
291                 /*
292                  * Do not panic on read-only accesses which fail, particularly
293                  * historical accesses where the snapshot might not have
294                  * complete connectivity.
295                  */
296                 if ((flags & HAMMER_INODE_RO) == 0) {
297                         kprintf("hammer_get_inode: failed ip %p obj_id %016llx cursor %p error %d\n",
298                                 ip, ip->obj_id, &cursor, *errorp);
299                         Debugger("x");
300                 }
301                 --hammer_count_inodes;
302                 kfree(ip, M_HAMMER);
303                 ip = NULL;
304         }
305         hammer_done_cursor(&cursor);
306         return (ip);
307 }
308
309 /*
310  * Create a new filesystem object, returning the inode in *ipp.  The
311  * returned inode will be referenced.
312  *
313  * The inode is created in-memory.
314  */
315 int
316 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
317                     struct ucred *cred, hammer_inode_t dip,
318                     struct hammer_inode **ipp)
319 {
320         hammer_mount_t hmp;
321         hammer_inode_t ip;
322         uid_t xuid;
323
324         hmp = trans->hmp;
325         ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
326         ++hammer_count_inodes;
327         ip->obj_id = hammer_alloc_objid(trans, dip);
328         KKASSERT(ip->obj_id != 0);
329         ip->obj_asof = hmp->asof;
330         ip->hmp = hmp;
331         ip->flush_state = HAMMER_FST_IDLE;
332         ip->flags = HAMMER_INODE_DDIRTY | HAMMER_INODE_ITIMES;
333
334         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
335         RB_INIT(&ip->rec_tree);
336         TAILQ_INIT(&ip->bio_list);
337         TAILQ_INIT(&ip->bio_alt_list);
338         TAILQ_INIT(&ip->target_list);
339
340         ip->ino_leaf.atime = trans->time;
341         ip->ino_data.mtime = trans->time;
342         ip->ino_data.size = 0;
343         ip->ino_data.nlinks = 0;
344         /* XXX */
345         ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
346         ip->ino_leaf.base.localization = HAMMER_LOCALIZE_INODE;
347         ip->ino_leaf.base.obj_id = ip->obj_id;
348         ip->ino_leaf.base.key = 0;
349         ip->ino_leaf.base.create_tid = 0;
350         ip->ino_leaf.base.delete_tid = 0;
351         ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
352         ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
353
354         ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
355         ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
356         ip->ino_data.mode = vap->va_mode;
357         ip->ino_data.ctime = trans->time;
358         ip->ino_data.parent_obj_id = (dip) ? dip->ino_leaf.base.obj_id : 0;
359
360         switch(ip->ino_leaf.base.obj_type) {
361         case HAMMER_OBJTYPE_CDEV:
362         case HAMMER_OBJTYPE_BDEV:
363                 ip->ino_data.rmajor = vap->va_rmajor;
364                 ip->ino_data.rminor = vap->va_rminor;
365                 break;
366         default:
367                 break;
368         }
369
370         /*
371          * Calculate default uid/gid and overwrite with information from
372          * the vap.
373          */
374         xuid = hammer_to_unix_xid(&dip->ino_data.uid);
375         xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode, xuid, cred,
376                                      &vap->va_mode);
377         ip->ino_data.mode = vap->va_mode;
378
379         if (vap->va_vaflags & VA_UID_UUID_VALID)
380                 ip->ino_data.uid = vap->va_uid_uuid;
381         else if (vap->va_uid != (uid_t)VNOVAL)
382                 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
383         else
384                 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
385
386         if (vap->va_vaflags & VA_GID_UUID_VALID)
387                 ip->ino_data.gid = vap->va_gid_uuid;
388         else if (vap->va_gid != (gid_t)VNOVAL)
389                 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
390         else
391                 ip->ino_data.gid = dip->ino_data.gid;
392
393         hammer_ref(&ip->lock);
394         if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
395                 hammer_unref(&ip->lock);
396                 panic("hammer_create_inode: duplicate obj_id %llx", ip->obj_id);
397         }
398         *ipp = ip;
399         return(0);
400 }
401
402 /*
403  * Called by hammer_sync_inode().
404  */
405 static int
406 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
407 {
408         hammer_transaction_t trans = cursor->trans;
409         hammer_record_t record;
410         int error;
411
412 retry:
413         error = 0;
414
415         /*
416          * If the inode has a presence on-disk then locate it and mark
417          * it deleted, setting DELONDISK.
418          *
419          * The record may or may not be physically deleted, depending on
420          * the retention policy.
421          */
422         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
423             HAMMER_INODE_ONDISK) {
424                 hammer_normalize_cursor(cursor);
425                 cursor->key_beg.localization = HAMMER_LOCALIZE_INODE;
426                 cursor->key_beg.obj_id = ip->obj_id;
427                 cursor->key_beg.key = 0;
428                 cursor->key_beg.create_tid = 0;
429                 cursor->key_beg.delete_tid = 0;
430                 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
431                 cursor->key_beg.obj_type = 0;
432                 cursor->asof = ip->obj_asof;
433                 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
434                 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
435                 cursor->flags |= HAMMER_CURSOR_BACKEND;
436
437                 error = hammer_btree_lookup(cursor);
438                 if (hammer_debug_inode)
439                         kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
440                 if (error) {
441                         kprintf("error %d\n", error);
442                         Debugger("hammer_update_inode");
443                 }
444
445                 if (error == 0) {
446                         error = hammer_ip_delete_record(cursor, trans->tid);
447                         if (hammer_debug_inode)
448                                 kprintf(" error %d\n", error);
449                         if (error && error != EDEADLK) {
450                                 kprintf("error %d\n", error);
451                                 Debugger("hammer_update_inode2");
452                         }
453                         if (error == 0) {
454                                 ip->flags |= HAMMER_INODE_DELONDISK;
455                         }
456                         if (cursor->node)
457                                 hammer_cache_node(cursor->node, &ip->cache[0]);
458                 }
459                 if (error == EDEADLK) {
460                         hammer_done_cursor(cursor);
461                         error = hammer_init_cursor(trans, cursor,
462                                                    &ip->cache[0], ip);
463                         if (hammer_debug_inode)
464                                 kprintf("IPDED %p %d\n", ip, error);
465                         if (error == 0)
466                                 goto retry;
467                 }
468         }
469
470         /*
471          * Ok, write out the initial record or a new record (after deleting
472          * the old one), unless the DELETED flag is set.  This routine will
473          * clear DELONDISK if it writes out a record.
474          *
475          * Update our inode statistics if this is the first application of
476          * the inode on-disk.
477          */
478         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
479                 /*
480                  * Generate a record and write it to the media
481                  */
482                 record = hammer_alloc_mem_record(ip, 0);
483                 record->type = HAMMER_MEM_RECORD_INODE;
484                 record->flush_state = HAMMER_FST_FLUSH;
485                 record->leaf = ip->sync_ino_leaf;
486                 record->leaf.base.create_tid = trans->tid;
487                 record->leaf.data_len = sizeof(ip->sync_ino_data);
488                 record->data = (void *)&ip->sync_ino_data;
489                 record->flags |= HAMMER_RECF_INTERLOCK_BE;
490                 for (;;) {
491                         error = hammer_ip_sync_record_cursor(cursor, record);
492                         if (hammer_debug_inode)
493                                 kprintf("GENREC %p rec %08x %d\n",      
494                                         ip, record->flags, error);
495                         if (error != EDEADLK)
496                                 break;
497                         hammer_done_cursor(cursor);
498                         error = hammer_init_cursor(trans, cursor,
499                                                    &ip->cache[0], ip);
500                         if (hammer_debug_inode)
501                                 kprintf("GENREC reinit %d\n", error);
502                         if (error)
503                                 break;
504                 }
505                 if (error) {
506                         kprintf("error %d\n", error);
507                         Debugger("hammer_update_inode3");
508                 }
509
510                 /*
511                  * The record isn't managed by the inode's record tree,
512                  * destroy it whether we succeed or fail.
513                  */
514                 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
515                 record->flags |= HAMMER_RECF_DELETED_FE;
516                 record->flush_state = HAMMER_FST_IDLE;
517                 hammer_rel_mem_record(record);
518
519                 /*
520                  * Finish up.
521                  */
522                 if (error == 0) {
523                         if (hammer_debug_inode)
524                                 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
525                         ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
526                                             HAMMER_INODE_ITIMES);
527                         ip->flags &= ~HAMMER_INODE_DELONDISK;
528
529                         /*
530                          * Root volume count of inodes
531                          */
532                         if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
533                                 hammer_modify_volume_field(trans,
534                                                            trans->rootvol,
535                                                            vol0_stat_inodes);
536                                 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
537                                 hammer_modify_volume_done(trans->rootvol);
538                                 ip->flags |= HAMMER_INODE_ONDISK;
539                                 if (hammer_debug_inode)
540                                         kprintf("NOWONDISK %p\n", ip);
541                         }
542                 }
543         }
544
545         /*
546          * If the inode has been destroyed, clean out any left-over flags
547          * that may have been set by the frontend.
548          */
549         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) { 
550                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
551                                     HAMMER_INODE_ITIMES);
552         }
553         return(error);
554 }
555
556 /*
557  * Update only the itimes fields.  This is done no-historically.  The
558  * record is updated in-place on the disk.
559  */
560 static int
561 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
562 {
563         hammer_transaction_t trans = cursor->trans;
564         struct hammer_btree_leaf_elm *leaf;
565         int error;
566
567 retry:
568         error = 0;
569         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
570             HAMMER_INODE_ONDISK) {
571                 hammer_normalize_cursor(cursor);
572                 cursor->key_beg.localization = HAMMER_LOCALIZE_INODE;
573                 cursor->key_beg.obj_id = ip->obj_id;
574                 cursor->key_beg.key = 0;
575                 cursor->key_beg.create_tid = 0;
576                 cursor->key_beg.delete_tid = 0;
577                 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
578                 cursor->key_beg.obj_type = 0;
579                 cursor->asof = ip->obj_asof;
580                 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
581                 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
582                 cursor->flags |= HAMMER_CURSOR_BACKEND;
583
584                 error = hammer_btree_lookup(cursor);
585                 if (error) {
586                         kprintf("error %d\n", error);
587                         Debugger("hammer_update_itimes1");
588                 }
589                 if (error == 0) {
590                         /*
591                          * Do not generate UNDO records for atime updates.
592                          */
593                         leaf = cursor->leaf;
594                         hammer_modify_node(trans, cursor->node, 
595                                            &leaf->atime, sizeof(leaf->atime));
596                         leaf->atime = ip->sync_ino_leaf.atime;
597                         hammer_modify_node_done(cursor->node);
598                         /*rec->ino_mtime = ip->sync_ino_rec.ino_mtime;*/
599                         ip->sync_flags &= ~HAMMER_INODE_ITIMES;
600                         /* XXX recalculate crc */
601                         hammer_cache_node(cursor->node, &ip->cache[0]);
602                 }
603                 if (error == EDEADLK) {
604                         hammer_done_cursor(cursor);
605                         error = hammer_init_cursor(trans, cursor,
606                                                    &ip->cache[0], ip);
607                         if (error == 0)
608                                 goto retry;
609                 }
610         }
611         return(error);
612 }
613
614 /*
615  * Release a reference on an inode, flush as requested.
616  *
617  * On the last reference we queue the inode to the flusher for its final
618  * disposition.
619  */
620 void
621 hammer_rel_inode(struct hammer_inode *ip, int flush)
622 {
623         hammer_mount_t hmp = ip->hmp;
624
625         /*
626          * Handle disposition when dropping the last ref.
627          */
628         for (;;) {
629                 if (ip->lock.refs == 1) {
630                         /*
631                          * Determine whether on-disk action is needed for
632                          * the inode's final disposition.
633                          */
634                         KKASSERT(ip->vp == NULL);
635                         hammer_inode_unloadable_check(ip, 0);
636                         if (ip->flags & HAMMER_INODE_MODMASK) {
637                                 hammer_flush_inode(ip, 0);
638                         } else if (ip->lock.refs == 1) {
639                                 hammer_unload_inode(ip);
640                                 break;
641                         }
642                 } else {
643                         if (flush)
644                                 hammer_flush_inode(ip, 0);
645
646                         /*
647                          * The inode still has multiple refs, try to drop
648                          * one ref.
649                          */
650                         KKASSERT(ip->lock.refs >= 1);
651                         if (ip->lock.refs > 1) {
652                                 hammer_unref(&ip->lock);
653                                 break;
654                         }
655                 }
656         }
657
658         /*
659          * XXX bad hack until I add code to track inodes in SETUP.  We
660          * can queue a lot of inodes to the syncer but if we don't wake
661          * it up the undo sets will be too large or too many unflushed
662          * records will build up and blow our malloc limit.
663          */
664         if (++hmp->reclaim_count > 256) {
665                 hmp->reclaim_count = 0;
666                 hammer_flusher_async(hmp);
667         }
668 }
669
670 /*
671  * Unload and destroy the specified inode.  Must be called with one remaining
672  * reference.  The reference is disposed of.
673  *
674  * This can only be called in the context of the flusher.
675  */
676 static int
677 hammer_unload_inode(struct hammer_inode *ip)
678 {
679         KASSERT(ip->lock.refs == 1,
680                 ("hammer_unload_inode: %d refs\n", ip->lock.refs));
681         KKASSERT(ip->vp == NULL);
682         KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
683         KKASSERT(ip->cursor_ip_refs == 0);
684         KKASSERT(ip->lock.lockcount == 0);
685         KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
686
687         KKASSERT(RB_EMPTY(&ip->rec_tree));
688         KKASSERT(TAILQ_EMPTY(&ip->target_list));
689         KKASSERT(TAILQ_EMPTY(&ip->bio_list));
690         KKASSERT(TAILQ_EMPTY(&ip->bio_alt_list));
691
692         RB_REMOVE(hammer_ino_rb_tree, &ip->hmp->rb_inos_root, ip);
693
694         hammer_uncache_node(&ip->cache[0]);
695         hammer_uncache_node(&ip->cache[1]);
696         if (ip->objid_cache)
697                 hammer_clear_objid(ip);
698         --hammer_count_inodes;
699         kfree(ip, M_HAMMER);
700
701         return(0);
702 }
703
704 /*
705  * A transaction has modified an inode, requiring updates as specified by
706  * the passed flags.
707  *
708  * HAMMER_INODE_DDIRTY: Inode data has been updated
709  * HAMMER_INODE_XDIRTY: Dirty in-memory records
710  * HAMMER_INODE_BUFS:   Dirty buffer cache buffers
711  * HAMMER_INODE_DELETED: Inode record/data must be deleted
712  * HAMMER_INODE_ITIMES: mtime/atime has been updated
713  */
714 void
715 hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags)
716 {
717         KKASSERT ((ip->flags & HAMMER_INODE_RO) == 0 ||
718                   (flags & (HAMMER_INODE_DDIRTY |
719                             HAMMER_INODE_XDIRTY | HAMMER_INODE_BUFS |
720                             HAMMER_INODE_DELETED | HAMMER_INODE_ITIMES)) == 0);
721
722         ip->flags |= flags;
723 }
724
725 /*
726  * Request that an inode be flushed.  This whole mess cannot block and may
727  * recurse.  Once requested HAMMER will attempt to actively flush it until
728  * the flush can be done.
729  *
730  * The inode may already be flushing, or may be in a setup state.  We can
731  * place the inode in a flushing state if it is currently idle and flag it
732  * to reflush if it is currently flushing.
733  */
734 void
735 hammer_flush_inode(hammer_inode_t ip, int flags)
736 {
737         hammer_record_t depend;
738         int r, good;
739
740         /*
741          * Trivial 'nothing to flush' case.  If the inode is ina SETUP
742          * state we have to put it back into an IDLE state so we can
743          * drop the extra ref.
744          */
745         if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
746                 if (ip->flush_state == HAMMER_FST_SETUP) {
747                         ip->flush_state = HAMMER_FST_IDLE;
748                         hammer_rel_inode(ip, 0);
749                 }
750                 return;
751         }
752
753         /*
754          * Our flush action will depend on the current state.
755          */
756         switch(ip->flush_state) {
757         case HAMMER_FST_IDLE:
758                 /*
759                  * We have no dependancies and can flush immediately.  Some
760                  * our children may not be flushable so we have to re-test
761                  * with that additional knowledge.
762                  */
763                 hammer_flush_inode_core(ip, flags);
764                 break;
765         case HAMMER_FST_SETUP:
766                 /*
767                  * Recurse upwards through dependancies via target_list
768                  * and start their flusher actions going if possible.
769                  *
770                  * 'good' is our connectivity.  -1 means we have none and
771                  * can't flush, 0 means there weren't any dependancies, and
772                  * 1 means we have good connectivity.
773                  */
774                 good = 0;
775                 TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
776                         r = hammer_setup_parent_inodes(depend);
777                         if (r < 0 && good == 0)
778                                 good = -1;
779                         if (r > 0)
780                                 good = 1;
781                 }
782
783                 /*
784                  * We can continue if good >= 0.  Determine how many records
785                  * under our inode can be flushed (and mark them).
786                  */
787                 if (good >= 0) {
788                         hammer_flush_inode_core(ip, flags);
789                 } else {
790                         ip->flags |= HAMMER_INODE_REFLUSH;
791                         if (flags & HAMMER_FLUSH_SIGNAL) {
792                                 ip->flags |= HAMMER_INODE_RESIGNAL;
793                                 hammer_flusher_async(ip->hmp);
794                         }
795                 }
796                 break;
797         default:
798                 /*
799                  * We are already flushing, flag the inode to reflush
800                  * if needed after it completes its current flush.
801                  */
802                 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
803                         ip->flags |= HAMMER_INODE_REFLUSH;
804                 if (flags & HAMMER_FLUSH_SIGNAL) {
805                         ip->flags |= HAMMER_INODE_RESIGNAL;
806                         hammer_flusher_async(ip->hmp);
807                 }
808                 break;
809         }
810 }
811
812 /*
813  * We are asked to recurse upwards and convert the record from SETUP
814  * to FLUSH if possible.  record->ip is a parent of the caller's inode,
815  * and record->target_ip is the caller's inode.
816  *
817  * Return 1 if the record gives us connectivity
818  *
819  * Return 0 if the record is not relevant 
820  *
821  * Return -1 if we can't resolve the dependancy and there is no connectivity.
822  */
823 static int
824 hammer_setup_parent_inodes(hammer_record_t record)
825 {
826         hammer_mount_t hmp = record->ip->hmp;
827         hammer_record_t depend;
828         hammer_inode_t ip;
829         int r, good;
830
831         KKASSERT(record->flush_state != HAMMER_FST_IDLE);
832         ip = record->ip;
833
834         /*
835          * If the record is already flushing, is it in our flush group?
836          *
837          * If it is in our flush group but it is a general record or a 
838          * delete-on-disk, it does not improve our connectivity (return 0),
839          * and if the target inode is not trying to destroy itself we can't
840          * allow the operation yet anyway (the second return -1).
841          */
842         if (record->flush_state == HAMMER_FST_FLUSH) {
843                 if (record->flush_group != hmp->flusher_next) {
844                         ip->flags |= HAMMER_INODE_REFLUSH;
845                         return(-1);
846                 }
847                 if (record->type == HAMMER_MEM_RECORD_ADD)
848                         return(1);
849                 /* GENERAL or DEL */
850                 return(0);
851         }
852
853         /*
854          * It must be a setup record.  Try to resolve the setup dependancies
855          * by recursing upwards so we can place ip on the flush list.
856          */
857         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
858
859         good = 0;
860         TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
861                 r = hammer_setup_parent_inodes(depend);
862                 if (r < 0 && good == 0)
863                         good = -1;
864                 if (r > 0)
865                         good = 1;
866         }
867
868         /*
869          * We can't flush ip because it has no connectivity (XXX also check
870          * nlinks for pre-existing connectivity!).  Flag it so any resolution
871          * recurses back down.
872          */
873         if (good < 0) {
874                 ip->flags |= HAMMER_INODE_REFLUSH;
875                 return(good);
876         }
877
878         /*
879          * We are go, place the parent inode in a flushing state so we can
880          * place its record in a flushing state.  Note that the parent
881          * may already be flushing.  The record must be in the same flush
882          * group as the parent.
883          */
884         if (ip->flush_state != HAMMER_FST_FLUSH)
885                 hammer_flush_inode_core(ip, HAMMER_FLUSH_RECURSION);
886         KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
887         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
888
889 #if 0
890         if (record->type == HAMMER_MEM_RECORD_DEL &&
891             (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
892                 /*
893                  * Regardless of flushing state we cannot sync this path if the
894                  * record represents a delete-on-disk but the target inode
895                  * is not ready to sync its own deletion.
896                  *
897                  * XXX need to count effective nlinks to determine whether
898                  * the flush is ok, otherwise removing a hardlink will
899                  * just leave the DEL record to rot.
900                  */
901                 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
902                 return(-1);
903         } else
904 #endif
905         if (ip->flush_group == ip->hmp->flusher_next) {
906                 /*
907                  * This is the record we wanted to synchronize.
908                  */
909                 record->flush_state = HAMMER_FST_FLUSH;
910                 record->flush_group = ip->flush_group;
911                 hammer_ref(&record->lock);
912                 if (record->type == HAMMER_MEM_RECORD_ADD)
913                         return(1);
914
915                 /*
916                  * A general or delete-on-disk record does not contribute
917                  * to our visibility.  We can still flush it, however.
918                  */
919                 return(0);
920         } else {
921                 /*
922                  * We couldn't resolve the dependancies, request that the
923                  * inode be flushed when the dependancies can be resolved.
924                  */
925                 ip->flags |= HAMMER_INODE_REFLUSH;
926                 return(-1);
927         }
928 }
929
930 /*
931  * This is the core routine placing an inode into the FST_FLUSH state.
932  */
933 static void
934 hammer_flush_inode_core(hammer_inode_t ip, int flags)
935 {
936         int go_count;
937
938         /*
939          * Set flush state and prevent the flusher from cycling into
940          * the next flush group.  Do not place the ip on the list yet.
941          * Inodes not in the idle state get an extra reference.
942          */
943         KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
944         if (ip->flush_state == HAMMER_FST_IDLE)
945                 hammer_ref(&ip->lock);
946         ip->flush_state = HAMMER_FST_FLUSH;
947         ip->flush_group = ip->hmp->flusher_next;
948         ++ip->hmp->flusher_lock;
949
950         /*
951          * We need to be able to vfsync/truncate from the backend.
952          */
953         KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
954         if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
955                 ip->flags |= HAMMER_INODE_VHELD;
956                 vref(ip->vp);
957         }
958
959         /*
960          * Figure out how many in-memory records we can actually flush
961          * (not including inode meta-data, buffers, etc).
962          */
963         if (flags & HAMMER_FLUSH_RECURSION) {
964                 go_count = 1;
965         } else {
966                 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
967                                    hammer_setup_child_callback, NULL);
968         }
969
970         /*
971          * This is a more involved test that includes go_count.  If we
972          * can't flush, flag the inode and return.  If go_count is 0 we
973          * were are unable to flush any records in our rec_tree and
974          * must ignore the XDIRTY flag.
975          */
976         if (go_count == 0) {
977                 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
978                         ip->flags |= HAMMER_INODE_REFLUSH;
979                         ip->flush_state = HAMMER_FST_SETUP;
980                         if (ip->flags & HAMMER_INODE_VHELD) {
981                                 ip->flags &= ~HAMMER_INODE_VHELD;
982                                 vrele(ip->vp);
983                         }
984                         if (flags & HAMMER_FLUSH_SIGNAL) {
985                                 ip->flags |= HAMMER_INODE_RESIGNAL;
986                                 hammer_flusher_async(ip->hmp);
987                         }
988                         if (--ip->hmp->flusher_lock == 0)
989                                 wakeup(&ip->hmp->flusher_lock);
990                         return;
991                 }
992         }
993
994         /*
995          * Snapshot the state of the inode for the backend flusher.
996          *
997          * The truncation must be retained in the frontend until after
998          * we've actually performed the record deletion.
999          *
1000          * NOTE: The DELETING flag is a mod flag, but it is also sticky,
1001          * and stays in ip->flags.  Once set, it stays set until the
1002          * inode is destroyed.
1003          */
1004         ip->sync_flags = (ip->flags & HAMMER_INODE_MODMASK);
1005         ip->sync_trunc_off = ip->trunc_off;
1006         ip->sync_ino_leaf = ip->ino_leaf;
1007         ip->sync_ino_data = ip->ino_data;
1008         ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
1009
1010         /*
1011          * The flusher list inherits our inode and reference.
1012          */
1013         TAILQ_INSERT_TAIL(&ip->hmp->flush_list, ip, flush_entry);
1014         if (--ip->hmp->flusher_lock == 0)
1015                 wakeup(&ip->hmp->flusher_lock);
1016
1017         if (flags & HAMMER_FLUSH_SIGNAL)
1018                 hammer_flusher_async(ip->hmp);
1019 }
1020
1021 /*
1022  * Callback for scan of ip->rec_tree.  Try to include each record in our
1023  * flush.  ip->flush_group has been set but the inode has not yet been
1024  * moved into a flushing state.
1025  *
1026  * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
1027  * both inodes.
1028  *
1029  * We return 1 for any record placed or found in FST_FLUSH, which prevents
1030  * the caller from shortcutting the flush.
1031  */
1032 static int
1033 hammer_setup_child_callback(hammer_record_t rec, void *data)
1034 {
1035         hammer_inode_t target_ip;
1036         hammer_inode_t ip;
1037         int r;
1038
1039         /*
1040          * If the record has been deleted by the backend (it's being held
1041          * by the frontend in a race), just ignore it.
1042          */
1043         if (rec->flags & HAMMER_RECF_DELETED_BE)
1044                 return(0);
1045
1046         /*
1047          * If the record is in an idle state it has no dependancies and
1048          * can be flushed.
1049          */
1050         ip = rec->ip;
1051         r = 0;
1052
1053         switch(rec->flush_state) {
1054         case HAMMER_FST_IDLE:
1055                 /*
1056                  * Record has no setup dependancy, we can flush it.
1057                  */
1058                 KKASSERT(rec->target_ip == NULL);
1059                 rec->flush_state = HAMMER_FST_FLUSH;
1060                 rec->flush_group = ip->flush_group;
1061                 hammer_ref(&rec->lock);
1062                 r = 1;
1063                 break;
1064         case HAMMER_FST_SETUP:
1065                 /*
1066                  * Record has a setup dependancy.  Try to include the
1067                  * target ip in the flush. 
1068                  *
1069                  * We have to be careful here, if we do not do the right
1070                  * thing we can lose track of dirty inodes and the system
1071                  * will lockup trying to allocate buffers.
1072                  */
1073                 target_ip = rec->target_ip;
1074                 KKASSERT(target_ip != NULL);
1075                 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
1076                 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
1077                         /*
1078                          * If the target IP is already flushing in our group
1079                          * we are golden, otherwise make sure the target
1080                          * reflushes.
1081                          */
1082                         if (target_ip->flush_group == ip->flush_group) {
1083                                 rec->flush_state = HAMMER_FST_FLUSH;
1084                                 rec->flush_group = ip->flush_group;
1085                                 hammer_ref(&rec->lock);
1086                                 r = 1;
1087                         } else {
1088                                 target_ip->flags |= HAMMER_INODE_REFLUSH;
1089                         }
1090                 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
1091                         /*
1092                          * If the target IP is not flushing we can force
1093                          * it to flush, even if it is unable to write out
1094                          * any of its own records we have at least one in
1095                          * hand that we CAN deal with.
1096                          */
1097                         rec->flush_state = HAMMER_FST_FLUSH;
1098                         rec->flush_group = ip->flush_group;
1099                         hammer_ref(&rec->lock);
1100                         hammer_flush_inode_core(target_ip,
1101                                                 HAMMER_FLUSH_RECURSION);
1102                         r = 1;
1103                 } else {
1104                         /*
1105                          * General or delete-on-disk record.
1106                          *
1107                          * XXX this needs help.  If a delete-on-disk we could
1108                          * disconnect the target.  If the target has its own
1109                          * dependancies they really need to be flushed.
1110                          *
1111                          * XXX
1112                          */
1113                         rec->flush_state = HAMMER_FST_FLUSH;
1114                         rec->flush_group = ip->flush_group;
1115                         hammer_ref(&rec->lock);
1116                         hammer_flush_inode_core(target_ip,
1117                                                 HAMMER_FLUSH_RECURSION);
1118                         r = 1;
1119                 }
1120                 break;
1121         case HAMMER_FST_FLUSH:
1122                 /* 
1123                  * Record already associated with a flush group.  It had
1124                  * better be ours.
1125                  */
1126                 KKASSERT(rec->flush_group == ip->flush_group);
1127                 r = 1;
1128                 break;
1129         }
1130         return(r);
1131 }
1132
1133 /*
1134  * Wait for a previously queued flush to complete
1135  */
1136 void
1137 hammer_wait_inode(hammer_inode_t ip)
1138 {
1139         while (ip->flush_state != HAMMER_FST_IDLE) {
1140                 ip->flags |= HAMMER_INODE_FLUSHW;
1141                 tsleep(&ip->flags, 0, "hmrwin", 0);
1142         }
1143 }
1144
1145 /*
1146  * Called by the backend code when a flush has been completed.
1147  * The inode has already been removed from the flush list.
1148  *
1149  * A pipelined flush can occur, in which case we must re-enter the
1150  * inode on the list and re-copy its fields.
1151  */
1152 void
1153 hammer_flush_inode_done(hammer_inode_t ip)
1154 {
1155         struct bio *bio;
1156         int dorel = 0;
1157
1158         KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
1159
1160         /*
1161          * Allow BIOs to queue to the inode's primary bioq again.
1162          */
1163         ip->flags &= ~HAMMER_INODE_WRITE_ALT;
1164
1165         /*
1166          * Merge left-over flags back into the frontend and fix the state.
1167          */
1168         ip->flags |= ip->sync_flags;
1169
1170         /*
1171          * The backend may have adjusted nlinks, so if the adjusted nlinks
1172          * does not match the fronttend set the frontend's RDIRTY flag again.
1173          */
1174         if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
1175                 ip->flags |= HAMMER_INODE_DDIRTY;
1176
1177         /*
1178          * Reflush any BIOs that wound up in the alt list.  Our inode will
1179          * also wind up at the end of the flusher's list.
1180          */
1181         while ((bio = TAILQ_FIRST(&ip->bio_alt_list)) != NULL) {
1182                 TAILQ_REMOVE(&ip->bio_alt_list, bio, bio_act);
1183                 TAILQ_INSERT_TAIL(&ip->bio_list, bio, bio_act);
1184         }
1185         /*
1186          * Fix up the dirty buffer status.
1187          */
1188         if (TAILQ_FIRST(&ip->bio_list) ||
1189             (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree))) {
1190                 ip->flags |= HAMMER_INODE_BUFS;
1191         }
1192
1193         /*
1194          * Re-set the XDIRTY flag if some of the inode's in-memory records
1195          * could not be flushed.
1196          */
1197         if (RB_ROOT(&ip->rec_tree))
1198                 ip->flags |= HAMMER_INODE_XDIRTY;
1199
1200         /*
1201          * Do not lose track of inodes which no longer have vnode
1202          * assocations, otherwise they may never get flushed again.
1203          */
1204         if ((ip->flags & HAMMER_INODE_MODMASK) && ip->vp == NULL)
1205                 ip->flags |= HAMMER_INODE_REFLUSH;
1206
1207         /*
1208          * Adjust flush_state.  The target state (idle or setup) shouldn't
1209          * be terribly important since we will reflush if we really need
1210          * to do anything. XXX
1211          */
1212         if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
1213                 ip->flush_state = HAMMER_FST_IDLE;
1214                 dorel = 1;
1215         } else {
1216                 ip->flush_state = HAMMER_FST_SETUP;
1217         }
1218
1219         /*
1220          * Clean up the vnode ref
1221          */
1222         if (ip->flags & HAMMER_INODE_VHELD) {
1223                 ip->flags &= ~HAMMER_INODE_VHELD;
1224                 vrele(ip->vp);
1225         }
1226
1227         /*
1228          * If the frontend made more changes and requested another flush,
1229          * then try to get it running.
1230          */
1231         if (ip->flags & HAMMER_INODE_REFLUSH) {
1232                 ip->flags &= ~HAMMER_INODE_REFLUSH;
1233                 if (ip->flags & HAMMER_INODE_RESIGNAL) {
1234                         ip->flags &= ~HAMMER_INODE_RESIGNAL;
1235                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1236                 } else {
1237                         hammer_flush_inode(ip, 0);
1238                 }
1239         }
1240
1241         /*
1242          * Finally, if the frontend is waiting for a flush to complete,
1243          * wake it up.
1244          */
1245         if (ip->flush_state != HAMMER_FST_FLUSH) {
1246                 if (ip->flags & HAMMER_INODE_FLUSHW) {
1247                         ip->flags &= ~HAMMER_INODE_FLUSHW;
1248                         wakeup(&ip->flags);
1249                 }
1250         }
1251         if (dorel)
1252                 hammer_rel_inode(ip, 0);
1253 }
1254
1255 /*
1256  * Called from hammer_sync_inode() to synchronize in-memory records
1257  * to the media.
1258  */
1259 static int
1260 hammer_sync_record_callback(hammer_record_t record, void *data)
1261 {
1262         hammer_cursor_t cursor = data;
1263         hammer_transaction_t trans = cursor->trans;
1264         int error;
1265
1266         /*
1267          * Skip records that do not belong to the current flush.
1268          */
1269         if (record->flush_state != HAMMER_FST_FLUSH)
1270                 return(0);
1271         KKASSERT((record->flags & HAMMER_RECF_DELETED_BE) == 0);
1272 #if 1
1273         if (record->flush_group != record->ip->flush_group) {
1274                 kprintf("sync_record %p ip %p bad flush group %d %d\n", record, record->ip, record->flush_group ,record->ip->flush_group);
1275                 Debugger("blah2");
1276                 return(0);
1277         }
1278 #endif
1279         KKASSERT(record->flush_group == record->ip->flush_group);
1280
1281         /*
1282          * Interlock the record using the BE flag.  Once BE is set the
1283          * frontend cannot change the state of FE.
1284          *
1285          * NOTE: If FE is set prior to us setting BE we still sync the
1286          * record out, but the flush completion code converts it to 
1287          * a delete-on-disk record instead of destroying it.
1288          */
1289         KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
1290         record->flags |= HAMMER_RECF_INTERLOCK_BE;
1291
1292         /*
1293          * If the whole inode is being deleting all on-disk records will
1294          * be deleted very soon, we can't sync any new records to disk
1295          * because they will be deleted in the same transaction they were
1296          * created in (delete_tid == create_tid), which will assert.
1297          *
1298          * XXX There may be a case with RECORD_ADD with DELETED_FE set
1299          * that we currently panic on.
1300          */
1301         if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
1302                 switch(record->type) {
1303                 case HAMMER_MEM_RECORD_GENERAL:
1304                         record->flags |= HAMMER_RECF_DELETED_FE;
1305                         record->flags |= HAMMER_RECF_DELETED_BE;
1306                         error = 0;
1307                         goto done;
1308                 case HAMMER_MEM_RECORD_ADD:
1309                         panic("hammer_sync_record_callback: illegal add "
1310                               "during inode deletion record %p", record);
1311                         break; /* NOT REACHED */
1312                 case HAMMER_MEM_RECORD_INODE:
1313                         panic("hammer_sync_record_callback: attempt to "
1314                               "sync inode record %p?", record);
1315                         break; /* NOT REACHED */
1316                 case HAMMER_MEM_RECORD_DEL:
1317                         /* 
1318                          * Follow through and issue the on-disk deletion
1319                          */
1320                         break;
1321                 }
1322         }
1323
1324         /*
1325          * If DELETED_FE is set we may have already sent dependant pieces
1326          * to the disk and we must flush the record as if it hadn't been
1327          * deleted.  This creates a bit of a mess because we have to
1328          * have ip_sync_record convert the record to MEM_RECORD_DEL before
1329          * it inserts the B-Tree record.  Otherwise the media sync might
1330          * be visible to the frontend.
1331          */
1332         if (record->flags & HAMMER_RECF_DELETED_FE) {
1333                 if (record->type == HAMMER_MEM_RECORD_ADD) {
1334                         record->flags |= HAMMER_RECF_CONVERT_DELETE;
1335                 } else {
1336                         KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
1337                         return(0);
1338                 }
1339         }
1340
1341         /*
1342          * Assign the create_tid for new records.  Deletions already
1343          * have the record's entire key properly set up.
1344          */
1345         if (record->type != HAMMER_MEM_RECORD_DEL)
1346                 record->leaf.base.create_tid = trans->tid;
1347         for (;;) {
1348                 error = hammer_ip_sync_record_cursor(cursor, record);
1349                 if (error != EDEADLK)
1350                         break;
1351                 hammer_done_cursor(cursor);
1352                 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
1353                                            record->ip);
1354                 if (error)
1355                         break;
1356         }
1357         record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
1358
1359         if (error) {
1360                 error = -error;
1361                 if (error != -ENOSPC) {
1362                         kprintf("hammer_sync_record_callback: sync failed rec "
1363                                 "%p, error %d\n", record, error);
1364                         Debugger("sync failed rec");
1365                 }
1366         }
1367 done:
1368         hammer_flush_record_done(record, error);
1369         return(error);
1370 }
1371
1372 /*
1373  * XXX error handling
1374  */
1375 int
1376 hammer_sync_inode(hammer_inode_t ip)
1377 {
1378         struct hammer_transaction trans;
1379         struct hammer_cursor cursor;
1380         struct bio *bio;
1381         hammer_record_t depend;
1382         hammer_record_t next;
1383         int error, tmp_error;
1384         u_int64_t nlinks;
1385
1386         if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
1387                 return(0);
1388
1389         hammer_start_transaction_fls(&trans, ip->hmp);
1390         error = hammer_init_cursor(&trans, &cursor, &ip->cache[0], ip);
1391         if (error)
1392                 goto done;
1393
1394         /*
1395          * Any directory records referencing this inode which are not in
1396          * our current flush group must adjust our nlink count for the
1397          * purposes of synchronization to disk.
1398          *
1399          * Records which are in our flush group can be unlinked from our
1400          * inode now, potentially allowing the inode to be physically
1401          * deleted.
1402          */
1403         nlinks = ip->ino_data.nlinks;
1404         next = TAILQ_FIRST(&ip->target_list);
1405         while ((depend = next) != NULL) {
1406                 next = TAILQ_NEXT(depend, target_entry);
1407                 if (depend->flush_state == HAMMER_FST_FLUSH &&
1408                     depend->flush_group == ip->hmp->flusher_act) {
1409                         /*
1410                          * If this is an ADD that was deleted by the frontend
1411                          * the frontend nlinks count will have already been
1412                          * decremented, but the backend is going to sync its
1413                          * directory entry and must account for it.  The
1414                          * record will be converted to a delete-on-disk when
1415                          * it gets synced.
1416                          *
1417                          * If the ADD was not deleted by the frontend we
1418                          * can remove the dependancy from our target_list.
1419                          */
1420                         if (depend->flags & HAMMER_RECF_DELETED_FE) {
1421                                 ++nlinks;
1422                         } else {
1423                                 TAILQ_REMOVE(&ip->target_list, depend,
1424                                              target_entry);
1425                                 depend->target_ip = NULL;
1426                         }
1427                 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
1428                         /*
1429                          * Not part of our flush group
1430                          */
1431                         KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
1432                         switch(depend->type) {
1433                         case HAMMER_MEM_RECORD_ADD:
1434                                 --nlinks;
1435                                 break;
1436                         case HAMMER_MEM_RECORD_DEL:
1437                                 ++nlinks;
1438                                 break;
1439                         default:
1440                                 break;
1441                         }
1442                 }
1443         }
1444
1445         /*
1446          * Set dirty if we had to modify the link count.
1447          */
1448         if (ip->sync_ino_data.nlinks != nlinks) {
1449                 KKASSERT((int64_t)nlinks >= 0);
1450                 ip->sync_ino_data.nlinks = nlinks;
1451                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1452         }
1453
1454         /*
1455          * Queue up as many dirty buffers as we can then set a flag to
1456          * cause any further BIOs to go to the alternative queue.
1457          */
1458         if (ip->flags & HAMMER_INODE_VHELD)
1459                 error = vfsync(ip->vp, MNT_NOWAIT, 1, NULL, NULL);
1460         ip->flags |= HAMMER_INODE_WRITE_ALT;
1461
1462         /*
1463          * The buffer cache may contain dirty buffers beyond the inode
1464          * state we copied from the frontend to the backend.  Because
1465          * we are syncing our buffer cache on the backend, resync
1466          * the truncation point and the file size so we don't wipe out
1467          * any data.
1468          *
1469          * Syncing the buffer cache on the frontend has serious problems
1470          * because it prevents us from passively queueing dirty inodes
1471          * to the backend (the BIO's could stall indefinitely).
1472          */
1473         if (ip->flags & HAMMER_INODE_TRUNCATED) {
1474                 ip->sync_trunc_off = ip->trunc_off;
1475                 ip->sync_flags |= HAMMER_INODE_TRUNCATED;
1476         }
1477         if (ip->sync_ino_data.size != ip->ino_data.size) {
1478                 ip->sync_ino_data.size = ip->ino_data.size;
1479                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1480         }
1481
1482         /*
1483          * If there is a trunction queued destroy any data past the (aligned)
1484          * truncation point.  Userland will have dealt with the buffer
1485          * containing the truncation point for us.
1486          *
1487          * We don't flush pending frontend data buffers until after we've
1488          * dealth with the truncation.
1489          *
1490          * Don't bother if the inode is or has been deleted.
1491          */
1492         if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
1493                 /*
1494                  * Interlock trunc_off.  The VOP front-end may continue to
1495                  * make adjustments to it while we are blocked.
1496                  */
1497                 off_t trunc_off;
1498                 off_t aligned_trunc_off;
1499
1500                 trunc_off = ip->sync_trunc_off;
1501                 aligned_trunc_off = (trunc_off + HAMMER_BUFMASK) &
1502                                     ~HAMMER_BUFMASK64;
1503
1504                 /*
1505                  * Delete any whole blocks on-media.  The front-end has
1506                  * already cleaned out any partial block and made it
1507                  * pending.  The front-end may have updated trunc_off
1508                  * while we were blocked so do not just unconditionally
1509                  * set it to the maximum offset.
1510                  */
1511                 error = hammer_ip_delete_range(&cursor, ip,
1512                                                 aligned_trunc_off,
1513                                                 0x7FFFFFFFFFFFFFFFLL);
1514                 if (error)
1515                         Debugger("hammer_ip_delete_range errored");
1516                 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
1517                 if (ip->trunc_off >= trunc_off) {
1518                         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
1519                         ip->flags &= ~HAMMER_INODE_TRUNCATED;
1520                 }
1521         } else {
1522                 error = 0;
1523         }
1524
1525         /*
1526          * Now sync related records.  These will typically be directory
1527          * entries or delete-on-disk records.
1528          *
1529          * Not all records will be flushed, but clear XDIRTY anyway.  We
1530          * will set it again in the frontend hammer_flush_inode_done() 
1531          * if records remain.
1532          */
1533         if (error == 0) {
1534                 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1535                                     hammer_sync_record_callback, &cursor);
1536                 if (tmp_error < 0)
1537                         tmp_error = -error;
1538                 if (tmp_error)
1539                         error = tmp_error;
1540                 if (RB_EMPTY(&ip->rec_tree))
1541                         ip->sync_flags &= ~HAMMER_INODE_XDIRTY;
1542         }
1543
1544         /*
1545          * If we are deleting the inode the frontend had better not have
1546          * any active references on elements making up the inode.
1547          */
1548         if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
1549                 RB_EMPTY(&ip->rec_tree)  &&
1550             (ip->sync_flags & HAMMER_INODE_DELETING) &&
1551             (ip->flags & HAMMER_INODE_DELETED) == 0) {
1552                 int count1 = 0;
1553
1554                 hkprintf("Y");
1555                 ip->flags |= HAMMER_INODE_DELETED;
1556                 error = hammer_ip_delete_range_all(&cursor, ip, &count1);
1557                 if (error == 0) {
1558                         ip->sync_flags &= ~HAMMER_INODE_DELETING;
1559                         ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
1560                         KKASSERT(RB_EMPTY(&ip->rec_tree));
1561
1562                         /*
1563                          * Set delete_tid in both the frontend and backend
1564                          * copy of the inode record.  The DELETED flag handles
1565                          * this, do not set RDIRTY.
1566                          */
1567                         ip->ino_leaf.base.delete_tid = trans.tid;
1568                         ip->sync_ino_leaf.base.delete_tid = trans.tid;
1569
1570                         /*
1571                          * Adjust the inode count in the volume header
1572                          */
1573                         if (ip->flags & HAMMER_INODE_ONDISK) {
1574                                 hammer_modify_volume_field(&trans,
1575                                                            trans.rootvol,
1576                                                            vol0_stat_inodes);
1577                                 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1578                                 hammer_modify_volume_done(trans.rootvol);
1579                         }
1580                 } else {
1581                         ip->flags &= ~HAMMER_INODE_DELETED;
1582                         Debugger("hammer_ip_delete_range_all errored");
1583                 }
1584         }
1585
1586         /*
1587          * Flush any queued BIOs.  These will just biodone() the IO's if
1588          * the inode has been deleted.
1589          */
1590         while ((bio = TAILQ_FIRST(&ip->bio_list)) != NULL) {
1591                 TAILQ_REMOVE(&ip->bio_list, bio, bio_act);
1592                 tmp_error = hammer_dowrite(&cursor, ip, bio);
1593                 if (tmp_error)
1594                         error = tmp_error;
1595         }
1596         ip->sync_flags &= ~HAMMER_INODE_BUFS;
1597
1598         if (error)
1599                 Debugger("RB_SCAN errored");
1600
1601         /*
1602          * Now update the inode's on-disk inode-data and/or on-disk record.
1603          * DELETED and ONDISK are managed only in ip->flags.
1604          */
1605         switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
1606         case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
1607                 /*
1608                  * If deleted and on-disk, don't set any additional flags.
1609                  * the delete flag takes care of things.
1610                  *
1611                  * Clear flags which may have been set by the frontend.
1612                  */
1613                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY|
1614                                     HAMMER_INODE_XDIRTY|HAMMER_INODE_ITIMES|
1615                                     HAMMER_INODE_DELETING);
1616                 break;
1617         case HAMMER_INODE_DELETED:
1618                 /*
1619                  * Take care of the case where a deleted inode was never
1620                  * flushed to the disk in the first place.
1621                  *
1622                  * Clear flags which may have been set by the frontend.
1623                  */
1624                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY|
1625                                     HAMMER_INODE_XDIRTY|HAMMER_INODE_ITIMES|
1626                                     HAMMER_INODE_DELETING);
1627                 while (RB_ROOT(&ip->rec_tree)) {
1628                         hammer_record_t record = RB_ROOT(&ip->rec_tree);
1629                         hammer_ref(&record->lock);
1630                         KKASSERT(record->lock.refs == 1);
1631                         record->flags |= HAMMER_RECF_DELETED_FE;
1632                         record->flags |= HAMMER_RECF_DELETED_BE;
1633                         hammer_rel_mem_record(record);
1634                 }
1635                 break;
1636         case HAMMER_INODE_ONDISK:
1637                 /*
1638                  * If already on-disk, do not set any additional flags.
1639                  */
1640                 break;
1641         default:
1642                 /*
1643                  * If not on-disk and not deleted, set both dirty flags
1644                  * to force an initial record to be written.  Also set
1645                  * the create_tid for the inode.
1646                  *
1647                  * Set create_tid in both the frontend and backend
1648                  * copy of the inode record.
1649                  */
1650                 ip->ino_leaf.base.create_tid = trans.tid;
1651                 ip->sync_ino_leaf.base.create_tid = trans.tid;
1652                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1653                 break;
1654         }
1655
1656         /*
1657          * If RDIRTY or DDIRTY is set, write out a new record.  If the inode
1658          * is already on-disk the old record is marked as deleted.
1659          *
1660          * If DELETED is set hammer_update_inode() will delete the existing
1661          * record without writing out a new one.
1662          *
1663          * If *ONLY* the ITIMES flag is set we can update the record in-place.
1664          */
1665         if (ip->flags & HAMMER_INODE_DELETED) {
1666                 error = hammer_update_inode(&cursor, ip);
1667         } else 
1668         if ((ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ITIMES)) ==
1669             HAMMER_INODE_ITIMES) {
1670                 error = hammer_update_itimes(&cursor, ip);
1671         } else
1672         if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ITIMES)) {
1673                 error = hammer_update_inode(&cursor, ip);
1674         }
1675         if (error)
1676                 Debugger("hammer_update_itimes/inode errored");
1677 done:
1678         /*
1679          * Save the TID we used to sync the inode with to make sure we
1680          * do not improperly reuse it.
1681          */
1682         hammer_done_cursor(&cursor);
1683         hammer_done_transaction(&trans);
1684         return(error);
1685 }
1686
1687 /*
1688  * This routine is called when the OS is no longer actively referencing
1689  * the inode (but might still be keeping it cached), or when releasing
1690  * the last reference to an inode.
1691  *
1692  * At this point if the inode's nlinks count is zero we want to destroy
1693  * it, which may mean destroying it on-media too.
1694  */
1695 void
1696 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
1697 {
1698         struct vnode *vp;
1699         struct bio *bio;
1700
1701         /*
1702          * Set the DELETING flag when the link count drops to 0 and the
1703          * OS no longer has any opens on the inode.
1704          *
1705          * The backend will clear DELETING (a mod flag) and set DELETED
1706          * (a state flag) when it is actually able to perform the
1707          * operation.
1708          */
1709         if (ip->ino_data.nlinks == 0 &&
1710             (ip->flags & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
1711                 ip->flags |= HAMMER_INODE_DELETING;
1712                 ip->flags |= HAMMER_INODE_TRUNCATED;
1713                 ip->trunc_off = 0;
1714                 vp = NULL;
1715                 if (getvp) {
1716                         if (hammer_get_vnode(ip, &vp) != 0)
1717                                 return;
1718                 }
1719
1720                 /*
1721                  * biodone any buffers with pending IO.  These buffers are
1722                  * holding a BUF_KERNPROC() exclusive lock and our
1723                  * vtruncbuf() call will deadlock if any remain.
1724                  *
1725                  * (interlocked against hammer_vop_strategy_write via
1726                  *  HAMMER_INODE_DELETING|HAMMER_INODE_DELETED).
1727                  */
1728                 while ((bio = TAILQ_FIRST(&ip->bio_list)) != NULL) {
1729                         TAILQ_REMOVE(&ip->bio_list, bio, bio_act);
1730                         bio->bio_buf->b_resid = 0;
1731                         biodone(bio);
1732                 }
1733                 while ((bio = TAILQ_FIRST(&ip->bio_alt_list)) != NULL) {
1734                         TAILQ_REMOVE(&ip->bio_alt_list, bio, bio_act);
1735                         bio->bio_buf->b_resid = 0;
1736                         biodone(bio);
1737                 }
1738
1739                 /*
1740                  * Final cleanup
1741                  */
1742                 if (ip->vp) {
1743                         vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
1744                         vnode_pager_setsize(ip->vp, 0);
1745                 }
1746                 if (getvp) {
1747                         vput(vp);
1748                 }
1749         }
1750 }
1751
1752 /*
1753  * Re-test an inode when a dependancy had gone away to see if we
1754  * can chain flush it.
1755  */
1756 void
1757 hammer_test_inode(hammer_inode_t ip)
1758 {
1759         if (ip->flags & HAMMER_INODE_REFLUSH) {
1760                 ip->flags &= ~HAMMER_INODE_REFLUSH;
1761                 hammer_ref(&ip->lock);
1762                 if (ip->flags & HAMMER_INODE_RESIGNAL) {
1763                         ip->flags &= ~HAMMER_INODE_RESIGNAL;
1764                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1765                 } else {
1766                         hammer_flush_inode(ip, 0);
1767                 }
1768                 hammer_rel_inode(ip, 0);
1769         }
1770 }
1771