Merge from vendor branch LIBARCHIVE:
[dragonfly.git] / sys / vfs / hammer / hammer_inode.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.59 2008/05/13 20:46:55 dillon Exp $
35  */
36
37 #include "hammer.h"
38 #include <vm/vm_extern.h>
39 #include <sys/buf.h>
40 #include <sys/buf2.h>
41
42 static int hammer_unload_inode(struct hammer_inode *ip);
43 static void hammer_flush_inode_core(hammer_inode_t ip, int flags);
44 static int hammer_setup_child_callback(hammer_record_t rec, void *data);
45 static int hammer_setup_parent_inodes(hammer_record_t record);
46
47 /*
48  * The kernel is not actively referencing this vnode but is still holding
49  * it cached.
50  *
51  * This is called from the frontend.
52  */
53 int
54 hammer_vop_inactive(struct vop_inactive_args *ap)
55 {
56         struct hammer_inode *ip = VTOI(ap->a_vp);
57
58         /*
59          * Degenerate case
60          */
61         if (ip == NULL) {
62                 vrecycle(ap->a_vp);
63                 return(0);
64         }
65
66         /*
67          * If the inode no longer has visibility in the filesystem and is
68          * fairly clean, try to recycle it immediately.  This can deadlock
69          * in vfsync() if we aren't careful.
70          * 
71          * Do not queue the inode to the flusher if we still have visibility,
72          * otherwise namespace calls such as chmod will unnecessarily generate
73          * multiple inode updates.
74          */
75         hammer_inode_unloadable_check(ip, 0);
76         if (ip->ino_data.nlinks == 0) {
77                 if (ip->flags & HAMMER_INODE_MODMASK)
78                         hammer_flush_inode(ip, 0);
79                 else
80                         vrecycle(ap->a_vp);
81         }
82         return(0);
83 }
84
85 /*
86  * Release the vnode association.  This is typically (but not always)
87  * the last reference on the inode.
88  *
89  * Once the association is lost we are on our own with regards to
90  * flushing the inode.
91  */
92 int
93 hammer_vop_reclaim(struct vop_reclaim_args *ap)
94 {
95         struct hammer_inode *ip;
96         struct vnode *vp;
97
98         vp = ap->a_vp;
99
100         if ((ip = vp->v_data) != NULL) {
101                 vp->v_data = NULL;
102                 ip->vp = NULL;
103                 hammer_rel_inode(ip, 1);
104         }
105         return(0);
106 }
107
108 /*
109  * Return a locked vnode for the specified inode.  The inode must be
110  * referenced but NOT LOCKED on entry and will remain referenced on
111  * return.
112  *
113  * Called from the frontend.
114  */
115 int
116 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
117 {
118         struct vnode *vp;
119         int error = 0;
120
121         for (;;) {
122                 if ((vp = ip->vp) == NULL) {
123                         error = getnewvnode(VT_HAMMER, ip->hmp->mp, vpp, 0, 0);
124                         if (error)
125                                 break;
126                         hammer_lock_ex(&ip->lock);
127                         if (ip->vp != NULL) {
128                                 hammer_unlock(&ip->lock);
129                                 vp->v_type = VBAD;
130                                 vx_put(vp);
131                                 continue;
132                         }
133                         hammer_ref(&ip->lock);
134                         vp = *vpp;
135                         ip->vp = vp;
136                         vp->v_type =
137                                 hammer_get_vnode_type(ip->ino_data.obj_type);
138
139                         switch(ip->ino_data.obj_type) {
140                         case HAMMER_OBJTYPE_CDEV:
141                         case HAMMER_OBJTYPE_BDEV:
142                                 vp->v_ops = &ip->hmp->mp->mnt_vn_spec_ops;
143                                 addaliasu(vp, ip->ino_data.rmajor,
144                                           ip->ino_data.rminor);
145                                 break;
146                         case HAMMER_OBJTYPE_FIFO:
147                                 vp->v_ops = &ip->hmp->mp->mnt_vn_fifo_ops;
148                                 break;
149                         default:
150                                 break;
151                         }
152
153                         /*
154                          * Only mark as the root vnode if the ip is not
155                          * historical, otherwise the VFS cache will get
156                          * confused.  The other half of the special handling
157                          * is in hammer_vop_nlookupdotdot().
158                          */
159                         if (ip->obj_id == HAMMER_OBJID_ROOT &&
160                             ip->obj_asof == ip->hmp->asof) {
161                                 vp->v_flag |= VROOT;
162                         }
163
164                         vp->v_data = (void *)ip;
165                         /* vnode locked by getnewvnode() */
166                         /* make related vnode dirty if inode dirty? */
167                         hammer_unlock(&ip->lock);
168                         if (vp->v_type == VREG)
169                                 vinitvmio(vp, ip->ino_data.size);
170                         break;
171                 }
172
173                 /*
174                  * loop if the vget fails (aka races), or if the vp
175                  * no longer matches ip->vp.
176                  */
177                 if (vget(vp, LK_EXCLUSIVE) == 0) {
178                         if (vp == ip->vp)
179                                 break;
180                         vput(vp);
181                 }
182         }
183         *vpp = vp;
184         return(error);
185 }
186
187 /*
188  * Acquire a HAMMER inode.  The returned inode is not locked.  These functions
189  * do not attach or detach the related vnode (use hammer_get_vnode() for
190  * that).
191  *
192  * The flags argument is only applied for newly created inodes, and only
193  * certain flags are inherited.
194  *
195  * Called from the frontend.
196  */
197 struct hammer_inode *
198 hammer_get_inode(hammer_transaction_t trans, struct hammer_node **cache,
199                  u_int64_t obj_id, hammer_tid_t asof, int flags, int *errorp)
200 {
201         hammer_mount_t hmp = trans->hmp;
202         struct hammer_inode_info iinfo;
203         struct hammer_cursor cursor;
204         struct hammer_inode *ip;
205
206         /*
207          * Determine if we already have an inode cached.  If we do then
208          * we are golden.
209          */
210         iinfo.obj_id = obj_id;
211         iinfo.obj_asof = asof;
212 loop:
213         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
214         if (ip) {
215                 hammer_ref(&ip->lock);
216                 *errorp = 0;
217                 return(ip);
218         }
219
220         ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
221         ++hammer_count_inodes;
222         ip->obj_id = obj_id;
223         ip->obj_asof = iinfo.obj_asof;
224         ip->hmp = hmp;
225         ip->flags = flags & HAMMER_INODE_RO;
226         if (hmp->ronly)
227                 ip->flags |= HAMMER_INODE_RO;
228         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
229         RB_INIT(&ip->rec_tree);
230         TAILQ_INIT(&ip->bio_list);
231         TAILQ_INIT(&ip->bio_alt_list);
232         TAILQ_INIT(&ip->target_list);
233
234         /*
235          * Locate the on-disk inode.
236          */
237 retry:
238         hammer_init_cursor(trans, &cursor, cache, NULL);
239         cursor.key_beg.obj_id = ip->obj_id;
240         cursor.key_beg.key = 0;
241         cursor.key_beg.create_tid = 0;
242         cursor.key_beg.delete_tid = 0;
243         cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
244         cursor.key_beg.obj_type = 0;
245         cursor.asof = iinfo.obj_asof;
246         cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
247                        HAMMER_CURSOR_ASOF;
248
249         *errorp = hammer_btree_lookup(&cursor);
250         if (*errorp == EDEADLK) {
251                 hammer_done_cursor(&cursor);
252                 goto retry;
253         }
254
255         /*
256          * On success the B-Tree lookup will hold the appropriate
257          * buffer cache buffers and provide a pointer to the requested
258          * information.  Copy the information to the in-memory inode
259          * and cache the B-Tree node to improve future operations.
260          */
261         if (*errorp == 0) {
262                 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
263                 ip->ino_data = cursor.data->inode;
264                 hammer_cache_node(cursor.node, &ip->cache[0]);
265                 if (cache)
266                         hammer_cache_node(cursor.node, cache);
267         }
268
269         /*
270          * On success load the inode's record and data and insert the
271          * inode into the B-Tree.  It is possible to race another lookup
272          * insertion of the same inode so deal with that condition too.
273          *
274          * The cursor's locked node interlocks against others creating and
275          * destroying ip while we were blocked.
276          */
277         if (*errorp == 0) {
278                 hammer_ref(&ip->lock);
279                 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
280                         hammer_uncache_node(&ip->cache[0]);
281                         hammer_uncache_node(&ip->cache[1]);
282                         KKASSERT(ip->lock.refs == 1);
283                         --hammer_count_inodes;
284                         kfree(ip, M_HAMMER);
285                         hammer_done_cursor(&cursor);
286                         goto loop;
287                 }
288                 ip->flags |= HAMMER_INODE_ONDISK;
289         } else {
290                 /*
291                  * Do not panic on read-only accesses which fail, particularly
292                  * historical accesses where the snapshot might not have
293                  * complete connectivity.
294                  */
295                 if ((flags & HAMMER_INODE_RO) == 0) {
296                         kprintf("hammer_get_inode: failed ip %p obj_id %016llx cursor %p error %d\n",
297                                 ip, ip->obj_id, &cursor, *errorp);
298                         Debugger("x");
299                 }
300                 --hammer_count_inodes;
301                 kfree(ip, M_HAMMER);
302                 ip = NULL;
303         }
304         hammer_done_cursor(&cursor);
305         return (ip);
306 }
307
308 /*
309  * Create a new filesystem object, returning the inode in *ipp.  The
310  * returned inode will be referenced.
311  *
312  * The inode is created in-memory.
313  */
314 int
315 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
316                     struct ucred *cred, hammer_inode_t dip,
317                     struct hammer_inode **ipp)
318 {
319         hammer_mount_t hmp;
320         hammer_inode_t ip;
321         uid_t xuid;
322
323         hmp = trans->hmp;
324         ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
325         ++hammer_count_inodes;
326         ip->obj_id = hammer_alloc_objid(trans, dip);
327         KKASSERT(ip->obj_id != 0);
328         ip->obj_asof = hmp->asof;
329         ip->hmp = hmp;
330         ip->flush_state = HAMMER_FST_IDLE;
331         ip->flags = HAMMER_INODE_DDIRTY | HAMMER_INODE_ITIMES;
332
333         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
334         RB_INIT(&ip->rec_tree);
335         TAILQ_INIT(&ip->bio_list);
336         TAILQ_INIT(&ip->bio_alt_list);
337         TAILQ_INIT(&ip->target_list);
338
339         ip->ino_leaf.atime = trans->time;
340         ip->ino_data.mtime = trans->time;
341         ip->ino_data.size = 0;
342         ip->ino_data.nlinks = 0;
343         /* XXX */
344         ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
345         ip->ino_leaf.base.obj_id = ip->obj_id;
346         ip->ino_leaf.base.key = 0;
347         ip->ino_leaf.base.create_tid = 0;
348         ip->ino_leaf.base.delete_tid = 0;
349         ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
350         ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
351
352         ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
353         ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
354         ip->ino_data.mode = vap->va_mode;
355         ip->ino_data.ctime = trans->time;
356         ip->ino_data.parent_obj_id = (dip) ? dip->ino_leaf.base.obj_id : 0;
357
358         switch(ip->ino_leaf.base.obj_type) {
359         case HAMMER_OBJTYPE_CDEV:
360         case HAMMER_OBJTYPE_BDEV:
361                 ip->ino_data.rmajor = vap->va_rmajor;
362                 ip->ino_data.rminor = vap->va_rminor;
363                 break;
364         default:
365                 break;
366         }
367
368         /*
369          * Calculate default uid/gid and overwrite with information from
370          * the vap.
371          */
372         xuid = hammer_to_unix_xid(&dip->ino_data.uid);
373         ip->ino_data.gid = dip->ino_data.gid;
374         xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode, xuid, cred,
375                                      &vap->va_mode);
376         ip->ino_data.mode = vap->va_mode;
377
378         if (vap->va_vaflags & VA_UID_UUID_VALID)
379                 ip->ino_data.uid = vap->va_uid_uuid;
380         else if (vap->va_uid != (uid_t)VNOVAL)
381                 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
382         if (vap->va_vaflags & VA_GID_UUID_VALID)
383                 ip->ino_data.gid = vap->va_gid_uuid;
384         else if (vap->va_gid != (gid_t)VNOVAL)
385                 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
386
387         hammer_ref(&ip->lock);
388         if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
389                 hammer_unref(&ip->lock);
390                 panic("hammer_create_inode: duplicate obj_id %llx", ip->obj_id);
391         }
392         *ipp = ip;
393         return(0);
394 }
395
396 /*
397  * Called by hammer_sync_inode().
398  */
399 static int
400 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
401 {
402         hammer_transaction_t trans = cursor->trans;
403         hammer_record_t record;
404         int error;
405
406 retry:
407         error = 0;
408
409         /*
410          * If the inode has a presence on-disk then locate it and mark
411          * it deleted, setting DELONDISK.
412          *
413          * The record may or may not be physically deleted, depending on
414          * the retention policy.
415          */
416         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
417             HAMMER_INODE_ONDISK) {
418                 hammer_normalize_cursor(cursor);
419                 cursor->key_beg.obj_id = ip->obj_id;
420                 cursor->key_beg.key = 0;
421                 cursor->key_beg.create_tid = 0;
422                 cursor->key_beg.delete_tid = 0;
423                 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
424                 cursor->key_beg.obj_type = 0;
425                 cursor->asof = ip->obj_asof;
426                 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
427                 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
428                 cursor->flags |= HAMMER_CURSOR_BACKEND;
429
430                 error = hammer_btree_lookup(cursor);
431                 if (hammer_debug_inode)
432                         kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
433                 if (error) {
434                         kprintf("error %d\n", error);
435                         Debugger("hammer_update_inode");
436                 }
437
438                 if (error == 0) {
439                         error = hammer_ip_delete_record(cursor, trans->tid);
440                         if (hammer_debug_inode)
441                                 kprintf(" error %d\n", error);
442                         if (error && error != EDEADLK) {
443                                 kprintf("error %d\n", error);
444                                 Debugger("hammer_update_inode2");
445                         }
446                         if (error == 0) {
447                                 ip->flags |= HAMMER_INODE_DELONDISK;
448                         }
449                         if (cursor->node)
450                                 hammer_cache_node(cursor->node, &ip->cache[0]);
451                 }
452                 if (error == EDEADLK) {
453                         hammer_done_cursor(cursor);
454                         error = hammer_init_cursor(trans, cursor,
455                                                    &ip->cache[0], ip);
456                         if (hammer_debug_inode)
457                                 kprintf("IPDED %p %d\n", ip, error);
458                         if (error == 0)
459                                 goto retry;
460                 }
461         }
462
463         /*
464          * Ok, write out the initial record or a new record (after deleting
465          * the old one), unless the DELETED flag is set.  This routine will
466          * clear DELONDISK if it writes out a record.
467          *
468          * Update our inode statistics if this is the first application of
469          * the inode on-disk.
470          */
471         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
472                 /*
473                  * Generate a record and write it to the media
474                  */
475                 record = hammer_alloc_mem_record(ip, 0);
476                 record->type = HAMMER_MEM_RECORD_INODE;
477                 record->flush_state = HAMMER_FST_FLUSH;
478                 record->leaf = ip->sync_ino_leaf;
479                 record->leaf.base.create_tid = trans->tid;
480                 record->leaf.data_len = sizeof(ip->sync_ino_data);
481                 record->data = (void *)&ip->sync_ino_data;
482                 record->flags |= HAMMER_RECF_INTERLOCK_BE;
483                 for (;;) {
484                         error = hammer_ip_sync_record_cursor(cursor, record);
485                         if (hammer_debug_inode)
486                                 kprintf("GENREC %p rec %08x %d\n",      
487                                         ip, record->flags, error);
488                         if (error != EDEADLK)
489                                 break;
490                         hammer_done_cursor(cursor);
491                         error = hammer_init_cursor(trans, cursor,
492                                                    &ip->cache[0], ip);
493                         if (hammer_debug_inode)
494                                 kprintf("GENREC reinit %d\n", error);
495                         if (error)
496                                 break;
497                 }
498                 if (error) {
499                         kprintf("error %d\n", error);
500                         Debugger("hammer_update_inode3");
501                 }
502
503                 /*
504                  * The record isn't managed by the inode's record tree,
505                  * destroy it whether we succeed or fail.
506                  */
507                 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
508                 record->flags |= HAMMER_RECF_DELETED_FE;
509                 record->flush_state = HAMMER_FST_IDLE;
510                 hammer_rel_mem_record(record);
511
512                 /*
513                  * Finish up.
514                  */
515                 if (error == 0) {
516                         if (hammer_debug_inode)
517                                 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
518                         ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
519                                             HAMMER_INODE_ITIMES);
520                         ip->flags &= ~HAMMER_INODE_DELONDISK;
521
522                         /*
523                          * Root volume count of inodes
524                          */
525                         if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
526                                 hammer_modify_volume_field(trans,
527                                                            trans->rootvol,
528                                                            vol0_stat_inodes);
529                                 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
530                                 hammer_modify_volume_done(trans->rootvol);
531                                 ip->flags |= HAMMER_INODE_ONDISK;
532                                 if (hammer_debug_inode)
533                                         kprintf("NOWONDISK %p\n", ip);
534                         }
535                 }
536         }
537
538         /*
539          * If the inode has been destroyed, clean out any left-over flags
540          * that may have been set by the frontend.
541          */
542         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) { 
543                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
544                                     HAMMER_INODE_ITIMES);
545         }
546         return(error);
547 }
548
549 /*
550  * Update only the itimes fields.  This is done no-historically.  The
551  * record is updated in-place on the disk.
552  */
553 static int
554 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
555 {
556         hammer_transaction_t trans = cursor->trans;
557         struct hammer_btree_leaf_elm *leaf;
558         int error;
559
560 retry:
561         error = 0;
562         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
563             HAMMER_INODE_ONDISK) {
564                 hammer_normalize_cursor(cursor);
565                 cursor->key_beg.obj_id = ip->obj_id;
566                 cursor->key_beg.key = 0;
567                 cursor->key_beg.create_tid = 0;
568                 cursor->key_beg.delete_tid = 0;
569                 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
570                 cursor->key_beg.obj_type = 0;
571                 cursor->asof = ip->obj_asof;
572                 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
573                 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
574                 cursor->flags |= HAMMER_CURSOR_BACKEND;
575
576                 error = hammer_btree_lookup(cursor);
577                 if (error) {
578                         kprintf("error %d\n", error);
579                         Debugger("hammer_update_itimes1");
580                 }
581                 if (error == 0) {
582                         /*
583                          * Do not generate UNDO records for atime updates.
584                          */
585                         leaf = cursor->leaf;
586                         hammer_modify_node(trans, cursor->node, 
587                                            &leaf->atime, sizeof(leaf->atime));
588                         leaf->atime = ip->sync_ino_leaf.atime;
589                         hammer_modify_node_done(cursor->node);
590                         /*rec->ino_mtime = ip->sync_ino_rec.ino_mtime;*/
591                         ip->sync_flags &= ~HAMMER_INODE_ITIMES;
592                         /* XXX recalculate crc */
593                         hammer_cache_node(cursor->node, &ip->cache[0]);
594                 }
595                 if (error == EDEADLK) {
596                         hammer_done_cursor(cursor);
597                         error = hammer_init_cursor(trans, cursor,
598                                                    &ip->cache[0], ip);
599                         if (error == 0)
600                                 goto retry;
601                 }
602         }
603         return(error);
604 }
605
606 /*
607  * Release a reference on an inode, flush as requested.
608  *
609  * On the last reference we queue the inode to the flusher for its final
610  * disposition.
611  */
612 void
613 hammer_rel_inode(struct hammer_inode *ip, int flush)
614 {
615         hammer_mount_t hmp = ip->hmp;
616
617         /*
618          * Handle disposition when dropping the last ref.
619          */
620         for (;;) {
621                 if (ip->lock.refs == 1) {
622                         /*
623                          * Determine whether on-disk action is needed for
624                          * the inode's final disposition.
625                          */
626                         KKASSERT(ip->vp == NULL);
627                         hammer_inode_unloadable_check(ip, 0);
628                         if (ip->flags & HAMMER_INODE_MODMASK) {
629                                 hammer_flush_inode(ip, 0);
630                         } else if (ip->lock.refs == 1) {
631                                 hammer_unload_inode(ip);
632                                 break;
633                         }
634                 } else {
635                         if (flush)
636                                 hammer_flush_inode(ip, 0);
637
638                         /*
639                          * The inode still has multiple refs, try to drop
640                          * one ref.
641                          */
642                         KKASSERT(ip->lock.refs >= 1);
643                         if (ip->lock.refs > 1) {
644                                 hammer_unref(&ip->lock);
645                                 break;
646                         }
647                 }
648         }
649
650         /*
651          * XXX bad hack until I add code to track inodes in SETUP.  We
652          * can queue a lot of inodes to the syncer but if we don't wake
653          * it up the undo sets will be too large or too many unflushed
654          * records will build up and blow our malloc limit.
655          */
656         if (++hmp->reclaim_count > 256) {
657                 hmp->reclaim_count = 0;
658                 hammer_flusher_async(hmp);
659         }
660 }
661
662 /*
663  * Unload and destroy the specified inode.  Must be called with one remaining
664  * reference.  The reference is disposed of.
665  *
666  * This can only be called in the context of the flusher.
667  */
668 static int
669 hammer_unload_inode(struct hammer_inode *ip)
670 {
671         KASSERT(ip->lock.refs == 1,
672                 ("hammer_unload_inode: %d refs\n", ip->lock.refs));
673         KKASSERT(ip->vp == NULL);
674         KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
675         KKASSERT(ip->cursor_ip_refs == 0);
676         KKASSERT(ip->lock.lockcount == 0);
677         KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
678
679         KKASSERT(RB_EMPTY(&ip->rec_tree));
680         KKASSERT(TAILQ_EMPTY(&ip->target_list));
681         KKASSERT(TAILQ_EMPTY(&ip->bio_list));
682         KKASSERT(TAILQ_EMPTY(&ip->bio_alt_list));
683
684         RB_REMOVE(hammer_ino_rb_tree, &ip->hmp->rb_inos_root, ip);
685
686         hammer_uncache_node(&ip->cache[0]);
687         hammer_uncache_node(&ip->cache[1]);
688         if (ip->objid_cache)
689                 hammer_clear_objid(ip);
690         --hammer_count_inodes;
691         kfree(ip, M_HAMMER);
692
693         return(0);
694 }
695
696 /*
697  * A transaction has modified an inode, requiring updates as specified by
698  * the passed flags.
699  *
700  * HAMMER_INODE_DDIRTY: Inode data has been updated
701  * HAMMER_INODE_XDIRTY: Dirty in-memory records
702  * HAMMER_INODE_BUFS:   Dirty buffer cache buffers
703  * HAMMER_INODE_DELETED: Inode record/data must be deleted
704  * HAMMER_INODE_ITIMES: mtime/atime has been updated
705  */
706 void
707 hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags)
708 {
709         KKASSERT ((ip->flags & HAMMER_INODE_RO) == 0 ||
710                   (flags & (HAMMER_INODE_DDIRTY |
711                             HAMMER_INODE_XDIRTY | HAMMER_INODE_BUFS |
712                             HAMMER_INODE_DELETED | HAMMER_INODE_ITIMES)) == 0);
713
714         ip->flags |= flags;
715 }
716
717 /*
718  * Request that an inode be flushed.  This whole mess cannot block and may
719  * recurse.  Once requested HAMMER will attempt to actively flush it until
720  * the flush can be done.
721  *
722  * The inode may already be flushing, or may be in a setup state.  We can
723  * place the inode in a flushing state if it is currently idle and flag it
724  * to reflush if it is currently flushing.
725  */
726 void
727 hammer_flush_inode(hammer_inode_t ip, int flags)
728 {
729         hammer_record_t depend;
730         int r, good;
731
732         /*
733          * Trivial 'nothing to flush' case.  If the inode is ina SETUP
734          * state we have to put it back into an IDLE state so we can
735          * drop the extra ref.
736          */
737         if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
738                 if (ip->flush_state == HAMMER_FST_SETUP) {
739                         ip->flush_state = HAMMER_FST_IDLE;
740                         hammer_rel_inode(ip, 0);
741                 }
742                 return;
743         }
744
745         /*
746          * Our flush action will depend on the current state.
747          */
748         switch(ip->flush_state) {
749         case HAMMER_FST_IDLE:
750                 /*
751                  * We have no dependancies and can flush immediately.  Some
752                  * our children may not be flushable so we have to re-test
753                  * with that additional knowledge.
754                  */
755                 hammer_flush_inode_core(ip, flags);
756                 break;
757         case HAMMER_FST_SETUP:
758                 /*
759                  * Recurse upwards through dependancies via target_list
760                  * and start their flusher actions going if possible.
761                  *
762                  * 'good' is our connectivity.  -1 means we have none and
763                  * can't flush, 0 means there weren't any dependancies, and
764                  * 1 means we have good connectivity.
765                  */
766                 good = 0;
767                 TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
768                         r = hammer_setup_parent_inodes(depend);
769                         if (r < 0 && good == 0)
770                                 good = -1;
771                         if (r > 0)
772                                 good = 1;
773                 }
774
775                 /*
776                  * We can continue if good >= 0.  Determine how many records
777                  * under our inode can be flushed (and mark them).
778                  */
779                 if (good >= 0) {
780                         hammer_flush_inode_core(ip, flags);
781                 } else {
782                         ip->flags |= HAMMER_INODE_REFLUSH;
783                         if (flags & HAMMER_FLUSH_SIGNAL) {
784                                 ip->flags |= HAMMER_INODE_RESIGNAL;
785                                 hammer_flusher_async(ip->hmp);
786                         }
787                 }
788                 break;
789         default:
790                 /*
791                  * We are already flushing, flag the inode to reflush
792                  * if needed after it completes its current flush.
793                  */
794                 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
795                         ip->flags |= HAMMER_INODE_REFLUSH;
796                 if (flags & HAMMER_FLUSH_SIGNAL) {
797                         ip->flags |= HAMMER_INODE_RESIGNAL;
798                         hammer_flusher_async(ip->hmp);
799                 }
800                 break;
801         }
802 }
803
804 /*
805  * We are asked to recurse upwards and convert the record from SETUP
806  * to FLUSH if possible.  record->ip is a parent of the caller's inode,
807  * and record->target_ip is the caller's inode.
808  *
809  * Return 1 if the record gives us connectivity
810  *
811  * Return 0 if the record is not relevant 
812  *
813  * Return -1 if we can't resolve the dependancy and there is no connectivity.
814  */
815 static int
816 hammer_setup_parent_inodes(hammer_record_t record)
817 {
818         hammer_mount_t hmp = record->ip->hmp;
819         hammer_record_t depend;
820         hammer_inode_t ip;
821         int r, good;
822
823         KKASSERT(record->flush_state != HAMMER_FST_IDLE);
824         ip = record->ip;
825
826         /*
827          * If the record is already flushing, is it in our flush group?
828          *
829          * If it is in our flush group but it is a general record or a 
830          * delete-on-disk, it does not improve our connectivity (return 0),
831          * and if the target inode is not trying to destroy itself we can't
832          * allow the operation yet anyway (the second return -1).
833          */
834         if (record->flush_state == HAMMER_FST_FLUSH) {
835                 if (record->flush_group != hmp->flusher_next) {
836                         ip->flags |= HAMMER_INODE_REFLUSH;
837                         return(-1);
838                 }
839                 if (record->type == HAMMER_MEM_RECORD_ADD)
840                         return(1);
841                 /* GENERAL or DEL */
842                 return(0);
843         }
844
845         /*
846          * It must be a setup record.  Try to resolve the setup dependancies
847          * by recursing upwards so we can place ip on the flush list.
848          */
849         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
850
851         good = 0;
852         TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
853                 r = hammer_setup_parent_inodes(depend);
854                 if (r < 0 && good == 0)
855                         good = -1;
856                 if (r > 0)
857                         good = 1;
858         }
859
860         /*
861          * We can't flush ip because it has no connectivity (XXX also check
862          * nlinks for pre-existing connectivity!).  Flag it so any resolution
863          * recurses back down.
864          */
865         if (good < 0) {
866                 ip->flags |= HAMMER_INODE_REFLUSH;
867                 return(good);
868         }
869
870         /*
871          * We are go, place the parent inode in a flushing state so we can
872          * place its record in a flushing state.  Note that the parent
873          * may already be flushing.  The record must be in the same flush
874          * group as the parent.
875          */
876         if (ip->flush_state != HAMMER_FST_FLUSH)
877                 hammer_flush_inode_core(ip, HAMMER_FLUSH_RECURSION);
878         KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
879         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
880
881 #if 0
882         if (record->type == HAMMER_MEM_RECORD_DEL &&
883             (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
884                 /*
885                  * Regardless of flushing state we cannot sync this path if the
886                  * record represents a delete-on-disk but the target inode
887                  * is not ready to sync its own deletion.
888                  *
889                  * XXX need to count effective nlinks to determine whether
890                  * the flush is ok, otherwise removing a hardlink will
891                  * just leave the DEL record to rot.
892                  */
893                 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
894                 return(-1);
895         } else
896 #endif
897         if (ip->flush_group == ip->hmp->flusher_next) {
898                 /*
899                  * This is the record we wanted to synchronize.
900                  */
901                 record->flush_state = HAMMER_FST_FLUSH;
902                 record->flush_group = ip->flush_group;
903                 hammer_ref(&record->lock);
904                 if (record->type == HAMMER_MEM_RECORD_ADD)
905                         return(1);
906
907                 /*
908                  * A general or delete-on-disk record does not contribute
909                  * to our visibility.  We can still flush it, however.
910                  */
911                 return(0);
912         } else {
913                 /*
914                  * We couldn't resolve the dependancies, request that the
915                  * inode be flushed when the dependancies can be resolved.
916                  */
917                 ip->flags |= HAMMER_INODE_REFLUSH;
918                 return(-1);
919         }
920 }
921
922 /*
923  * This is the core routine placing an inode into the FST_FLUSH state.
924  */
925 static void
926 hammer_flush_inode_core(hammer_inode_t ip, int flags)
927 {
928         int go_count;
929
930         /*
931          * Set flush state and prevent the flusher from cycling into
932          * the next flush group.  Do not place the ip on the list yet.
933          * Inodes not in the idle state get an extra reference.
934          */
935         KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
936         if (ip->flush_state == HAMMER_FST_IDLE)
937                 hammer_ref(&ip->lock);
938         ip->flush_state = HAMMER_FST_FLUSH;
939         ip->flush_group = ip->hmp->flusher_next;
940         ++ip->hmp->flusher_lock;
941
942         /*
943          * We need to be able to vfsync/truncate from the backend.
944          */
945         KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
946         if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
947                 ip->flags |= HAMMER_INODE_VHELD;
948                 vref(ip->vp);
949         }
950
951         /*
952          * Figure out how many in-memory records we can actually flush
953          * (not including inode meta-data, buffers, etc).
954          */
955         if (flags & HAMMER_FLUSH_RECURSION) {
956                 go_count = 1;
957         } else {
958                 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
959                                    hammer_setup_child_callback, NULL);
960         }
961
962         /*
963          * This is a more involved test that includes go_count.  If we
964          * can't flush, flag the inode and return.  If go_count is 0 we
965          * were are unable to flush any records in our rec_tree and
966          * must ignore the XDIRTY flag.
967          */
968         if (go_count == 0) {
969                 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
970                         ip->flags |= HAMMER_INODE_REFLUSH;
971                         ip->flush_state = HAMMER_FST_SETUP;
972                         if (ip->flags & HAMMER_INODE_VHELD) {
973                                 ip->flags &= ~HAMMER_INODE_VHELD;
974                                 vrele(ip->vp);
975                         }
976                         if (flags & HAMMER_FLUSH_SIGNAL) {
977                                 ip->flags |= HAMMER_INODE_RESIGNAL;
978                                 hammer_flusher_async(ip->hmp);
979                         }
980                         if (--ip->hmp->flusher_lock == 0)
981                                 wakeup(&ip->hmp->flusher_lock);
982                         return;
983                 }
984         }
985
986         /*
987          * Snapshot the state of the inode for the backend flusher.
988          *
989          * The truncation must be retained in the frontend until after
990          * we've actually performed the record deletion.
991          *
992          * NOTE: The DELETING flag is a mod flag, but it is also sticky,
993          * and stays in ip->flags.  Once set, it stays set until the
994          * inode is destroyed.
995          */
996         ip->sync_flags = (ip->flags & HAMMER_INODE_MODMASK);
997         ip->sync_trunc_off = ip->trunc_off;
998         ip->sync_ino_leaf = ip->ino_leaf;
999         ip->sync_ino_data = ip->ino_data;
1000         ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
1001
1002         /*
1003          * The flusher list inherits our inode and reference.
1004          */
1005         TAILQ_INSERT_TAIL(&ip->hmp->flush_list, ip, flush_entry);
1006         if (--ip->hmp->flusher_lock == 0)
1007                 wakeup(&ip->hmp->flusher_lock);
1008
1009         if (flags & HAMMER_FLUSH_SIGNAL)
1010                 hammer_flusher_async(ip->hmp);
1011 }
1012
1013 /*
1014  * Callback for scan of ip->rec_tree.  Try to include each record in our
1015  * flush.  ip->flush_group has been set but the inode has not yet been
1016  * moved into a flushing state.
1017  *
1018  * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
1019  * both inodes.
1020  *
1021  * We return 1 for any record placed or found in FST_FLUSH, which prevents
1022  * the caller from shortcutting the flush.
1023  */
1024 static int
1025 hammer_setup_child_callback(hammer_record_t rec, void *data)
1026 {
1027         hammer_inode_t target_ip;
1028         hammer_inode_t ip;
1029         int r;
1030
1031         /*
1032          * If the record has been deleted by the backend (it's being held
1033          * by the frontend in a race), just ignore it.
1034          */
1035         if (rec->flags & HAMMER_RECF_DELETED_BE)
1036                 return(0);
1037
1038         /*
1039          * If the record is in an idle state it has no dependancies and
1040          * can be flushed.
1041          */
1042         ip = rec->ip;
1043         r = 0;
1044
1045         switch(rec->flush_state) {
1046         case HAMMER_FST_IDLE:
1047                 /*
1048                  * Record has no setup dependancy, we can flush it.
1049                  */
1050                 KKASSERT(rec->target_ip == NULL);
1051                 rec->flush_state = HAMMER_FST_FLUSH;
1052                 rec->flush_group = ip->flush_group;
1053                 hammer_ref(&rec->lock);
1054                 r = 1;
1055                 break;
1056         case HAMMER_FST_SETUP:
1057                 /*
1058                  * Record has a setup dependancy.  Try to include the
1059                  * target ip in the flush. 
1060                  *
1061                  * We have to be careful here, if we do not do the right
1062                  * thing we can lose track of dirty inodes and the system
1063                  * will lockup trying to allocate buffers.
1064                  */
1065                 target_ip = rec->target_ip;
1066                 KKASSERT(target_ip != NULL);
1067                 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
1068                 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
1069                         /*
1070                          * If the target IP is already flushing in our group
1071                          * we are golden, otherwise make sure the target
1072                          * reflushes.
1073                          */
1074                         if (target_ip->flush_group == ip->flush_group) {
1075                                 rec->flush_state = HAMMER_FST_FLUSH;
1076                                 rec->flush_group = ip->flush_group;
1077                                 hammer_ref(&rec->lock);
1078                                 r = 1;
1079                         } else {
1080                                 target_ip->flags |= HAMMER_INODE_REFLUSH;
1081                         }
1082                 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
1083                         /*
1084                          * If the target IP is not flushing we can force
1085                          * it to flush, even if it is unable to write out
1086                          * any of its own records we have at least one in
1087                          * hand that we CAN deal with.
1088                          */
1089                         rec->flush_state = HAMMER_FST_FLUSH;
1090                         rec->flush_group = ip->flush_group;
1091                         hammer_ref(&rec->lock);
1092                         hammer_flush_inode_core(target_ip,
1093                                                 HAMMER_FLUSH_RECURSION);
1094                         r = 1;
1095                 } else {
1096                         /*
1097                          * General or delete-on-disk record.
1098                          *
1099                          * XXX this needs help.  If a delete-on-disk we could
1100                          * disconnect the target.  If the target has its own
1101                          * dependancies they really need to be flushed.
1102                          *
1103                          * XXX
1104                          */
1105                         rec->flush_state = HAMMER_FST_FLUSH;
1106                         rec->flush_group = ip->flush_group;
1107                         hammer_ref(&rec->lock);
1108                         hammer_flush_inode_core(target_ip,
1109                                                 HAMMER_FLUSH_RECURSION);
1110                         r = 1;
1111                 }
1112                 break;
1113         case HAMMER_FST_FLUSH:
1114                 /* 
1115                  * Record already associated with a flush group.  It had
1116                  * better be ours.
1117                  */
1118                 KKASSERT(rec->flush_group == ip->flush_group);
1119                 r = 1;
1120                 break;
1121         }
1122         return(r);
1123 }
1124
1125 /*
1126  * Wait for a previously queued flush to complete
1127  */
1128 void
1129 hammer_wait_inode(hammer_inode_t ip)
1130 {
1131         while (ip->flush_state != HAMMER_FST_IDLE) {
1132                 ip->flags |= HAMMER_INODE_FLUSHW;
1133                 tsleep(&ip->flags, 0, "hmrwin", 0);
1134         }
1135 }
1136
1137 /*
1138  * Called by the backend code when a flush has been completed.
1139  * The inode has already been removed from the flush list.
1140  *
1141  * A pipelined flush can occur, in which case we must re-enter the
1142  * inode on the list and re-copy its fields.
1143  */
1144 void
1145 hammer_flush_inode_done(hammer_inode_t ip)
1146 {
1147         struct bio *bio;
1148         int dorel = 0;
1149
1150         KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
1151
1152         /*
1153          * Allow BIOs to queue to the inode's primary bioq again.
1154          */
1155         ip->flags &= ~HAMMER_INODE_WRITE_ALT;
1156
1157         /*
1158          * Merge left-over flags back into the frontend and fix the state.
1159          */
1160         ip->flags |= ip->sync_flags;
1161
1162         /*
1163          * The backend may have adjusted nlinks, so if the adjusted nlinks
1164          * does not match the fronttend set the frontend's RDIRTY flag again.
1165          */
1166         if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
1167                 ip->flags |= HAMMER_INODE_DDIRTY;
1168
1169         /*
1170          * Reflush any BIOs that wound up in the alt list.  Our inode will
1171          * also wind up at the end of the flusher's list.
1172          */
1173         while ((bio = TAILQ_FIRST(&ip->bio_alt_list)) != NULL) {
1174                 TAILQ_REMOVE(&ip->bio_alt_list, bio, bio_act);
1175                 TAILQ_INSERT_TAIL(&ip->bio_list, bio, bio_act);
1176         }
1177         /*
1178          * Fix up the dirty buffer status.
1179          */
1180         if (TAILQ_FIRST(&ip->bio_list) ||
1181             (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree))) {
1182                 ip->flags |= HAMMER_INODE_BUFS;
1183         }
1184
1185         /*
1186          * Re-set the XDIRTY flag if some of the inode's in-memory records
1187          * could not be flushed.
1188          */
1189         if (RB_ROOT(&ip->rec_tree))
1190                 ip->flags |= HAMMER_INODE_XDIRTY;
1191
1192         /*
1193          * Do not lose track of inodes which no longer have vnode
1194          * assocations, otherwise they may never get flushed again.
1195          */
1196         if ((ip->flags & HAMMER_INODE_MODMASK) && ip->vp == NULL)
1197                 ip->flags |= HAMMER_INODE_REFLUSH;
1198
1199         /*
1200          * Adjust flush_state.  The target state (idle or setup) shouldn't
1201          * be terribly important since we will reflush if we really need
1202          * to do anything. XXX
1203          */
1204         if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
1205                 ip->flush_state = HAMMER_FST_IDLE;
1206                 dorel = 1;
1207         } else {
1208                 ip->flush_state = HAMMER_FST_SETUP;
1209         }
1210
1211         /*
1212          * Clean up the vnode ref
1213          */
1214         if (ip->flags & HAMMER_INODE_VHELD) {
1215                 ip->flags &= ~HAMMER_INODE_VHELD;
1216                 vrele(ip->vp);
1217         }
1218
1219         /*
1220          * If the frontend made more changes and requested another flush,
1221          * then try to get it running.
1222          */
1223         if (ip->flags & HAMMER_INODE_REFLUSH) {
1224                 ip->flags &= ~HAMMER_INODE_REFLUSH;
1225                 if (ip->flags & HAMMER_INODE_RESIGNAL) {
1226                         ip->flags &= ~HAMMER_INODE_RESIGNAL;
1227                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1228                 } else {
1229                         hammer_flush_inode(ip, 0);
1230                 }
1231         }
1232
1233         /*
1234          * Finally, if the frontend is waiting for a flush to complete,
1235          * wake it up.
1236          */
1237         if (ip->flush_state != HAMMER_FST_FLUSH) {
1238                 if (ip->flags & HAMMER_INODE_FLUSHW) {
1239                         ip->flags &= ~HAMMER_INODE_FLUSHW;
1240                         wakeup(&ip->flags);
1241                 }
1242         }
1243         if (dorel)
1244                 hammer_rel_inode(ip, 0);
1245 }
1246
1247 /*
1248  * Called from hammer_sync_inode() to synchronize in-memory records
1249  * to the media.
1250  */
1251 static int
1252 hammer_sync_record_callback(hammer_record_t record, void *data)
1253 {
1254         hammer_cursor_t cursor = data;
1255         hammer_transaction_t trans = cursor->trans;
1256         int error;
1257
1258         /*
1259          * Skip records that do not belong to the current flush.
1260          */
1261         if (record->flush_state != HAMMER_FST_FLUSH)
1262                 return(0);
1263         KKASSERT((record->flags & HAMMER_RECF_DELETED_BE) == 0);
1264 #if 1
1265         if (record->flush_group != record->ip->flush_group) {
1266                 kprintf("sync_record %p ip %p bad flush group %d %d\n", record, record->ip, record->flush_group ,record->ip->flush_group);
1267                 Debugger("blah2");
1268                 return(0);
1269         }
1270 #endif
1271         KKASSERT(record->flush_group == record->ip->flush_group);
1272
1273         /*
1274          * Interlock the record using the BE flag.  Once BE is set the
1275          * frontend cannot change the state of FE.
1276          *
1277          * NOTE: If FE is set prior to us setting BE we still sync the
1278          * record out, but the flush completion code converts it to 
1279          * a delete-on-disk record instead of destroying it.
1280          */
1281         KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
1282         record->flags |= HAMMER_RECF_INTERLOCK_BE;
1283
1284         /*
1285          * If the whole inode is being deleting all on-disk records will
1286          * be deleted very soon, we can't sync any new records to disk
1287          * because they will be deleted in the same transaction they were
1288          * created in (delete_tid == create_tid), which will assert.
1289          *
1290          * XXX There may be a case with RECORD_ADD with DELETED_FE set
1291          * that we currently panic on.
1292          */
1293         if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
1294                 switch(record->type) {
1295                 case HAMMER_MEM_RECORD_GENERAL:
1296                         record->flags |= HAMMER_RECF_DELETED_FE;
1297                         record->flags |= HAMMER_RECF_DELETED_BE;
1298                         error = 0;
1299                         goto done;
1300                 case HAMMER_MEM_RECORD_ADD:
1301                         panic("hammer_sync_record_callback: illegal add "
1302                               "during inode deletion record %p", record);
1303                         break; /* NOT REACHED */
1304                 case HAMMER_MEM_RECORD_INODE:
1305                         panic("hammer_sync_record_callback: attempt to "
1306                               "sync inode record %p?", record);
1307                         break; /* NOT REACHED */
1308                 case HAMMER_MEM_RECORD_DEL:
1309                         /* 
1310                          * Follow through and issue the on-disk deletion
1311                          */
1312                         break;
1313                 }
1314         }
1315
1316         /*
1317          * If DELETED_FE is set we may have already sent dependant pieces
1318          * to the disk and we must flush the record as if it hadn't been
1319          * deleted.  This creates a bit of a mess because we have to
1320          * have ip_sync_record convert the record to MEM_RECORD_DEL before
1321          * it inserts the B-Tree record.  Otherwise the media sync might
1322          * be visible to the frontend.
1323          */
1324         if (record->flags & HAMMER_RECF_DELETED_FE) {
1325                 if (record->type == HAMMER_MEM_RECORD_ADD) {
1326                         record->flags |= HAMMER_RECF_CONVERT_DELETE;
1327                 } else {
1328                         KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
1329                         return(0);
1330                 }
1331         }
1332
1333         /*
1334          * Assign the create_tid for new records.  Deletions already
1335          * have the record's entire key properly set up.
1336          */
1337         if (record->type != HAMMER_MEM_RECORD_DEL)
1338                 record->leaf.base.create_tid = trans->tid;
1339         for (;;) {
1340                 error = hammer_ip_sync_record_cursor(cursor, record);
1341                 if (error != EDEADLK)
1342                         break;
1343                 hammer_done_cursor(cursor);
1344                 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
1345                                            record->ip);
1346                 if (error)
1347                         break;
1348         }
1349         record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
1350
1351         if (error) {
1352                 error = -error;
1353                 if (error != -ENOSPC) {
1354                         kprintf("hammer_sync_record_callback: sync failed rec "
1355                                 "%p, error %d\n", record, error);
1356                         Debugger("sync failed rec");
1357                 }
1358         }
1359 done:
1360         hammer_flush_record_done(record, error);
1361         return(error);
1362 }
1363
1364 /*
1365  * XXX error handling
1366  */
1367 int
1368 hammer_sync_inode(hammer_inode_t ip)
1369 {
1370         struct hammer_transaction trans;
1371         struct hammer_cursor cursor;
1372         struct bio *bio;
1373         hammer_record_t depend;
1374         hammer_record_t next;
1375         int error, tmp_error;
1376         u_int64_t nlinks;
1377
1378         if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
1379                 return(0);
1380
1381         hammer_start_transaction_fls(&trans, ip->hmp);
1382         error = hammer_init_cursor(&trans, &cursor, &ip->cache[0], ip);
1383         if (error)
1384                 goto done;
1385
1386         /*
1387          * Any directory records referencing this inode which are not in
1388          * our current flush group must adjust our nlink count for the
1389          * purposes of synchronization to disk.
1390          *
1391          * Records which are in our flush group can be unlinked from our
1392          * inode now, potentially allowing the inode to be physically
1393          * deleted.
1394          */
1395         nlinks = ip->ino_data.nlinks;
1396         next = TAILQ_FIRST(&ip->target_list);
1397         while ((depend = next) != NULL) {
1398                 next = TAILQ_NEXT(depend, target_entry);
1399                 if (depend->flush_state == HAMMER_FST_FLUSH &&
1400                     depend->flush_group == ip->hmp->flusher_act) {
1401                         /*
1402                          * If this is an ADD that was deleted by the frontend
1403                          * the frontend nlinks count will have already been
1404                          * decremented, but the backend is going to sync its
1405                          * directory entry and must account for it.  The
1406                          * record will be converted to a delete-on-disk when
1407                          * it gets synced.
1408                          *
1409                          * If the ADD was not deleted by the frontend we
1410                          * can remove the dependancy from our target_list.
1411                          */
1412                         if (depend->flags & HAMMER_RECF_DELETED_FE) {
1413                                 ++nlinks;
1414                         } else {
1415                                 TAILQ_REMOVE(&ip->target_list, depend,
1416                                              target_entry);
1417                                 depend->target_ip = NULL;
1418                         }
1419                 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
1420                         /*
1421                          * Not part of our flush group
1422                          */
1423                         KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
1424                         switch(depend->type) {
1425                         case HAMMER_MEM_RECORD_ADD:
1426                                 --nlinks;
1427                                 break;
1428                         case HAMMER_MEM_RECORD_DEL:
1429                                 ++nlinks;
1430                                 break;
1431                         default:
1432                                 break;
1433                         }
1434                 }
1435         }
1436
1437         /*
1438          * Set dirty if we had to modify the link count.
1439          */
1440         if (ip->sync_ino_data.nlinks != nlinks) {
1441                 KKASSERT((int64_t)nlinks >= 0);
1442                 ip->sync_ino_data.nlinks = nlinks;
1443                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1444         }
1445
1446         /*
1447          * Queue up as many dirty buffers as we can then set a flag to
1448          * cause any further BIOs to go to the alternative queue.
1449          */
1450         if (ip->flags & HAMMER_INODE_VHELD)
1451                 error = vfsync(ip->vp, MNT_NOWAIT, 1, NULL, NULL);
1452         ip->flags |= HAMMER_INODE_WRITE_ALT;
1453
1454         /*
1455          * The buffer cache may contain dirty buffers beyond the inode
1456          * state we copied from the frontend to the backend.  Because
1457          * we are syncing our buffer cache on the backend, resync
1458          * the truncation point and the file size so we don't wipe out
1459          * any data.
1460          *
1461          * Syncing the buffer cache on the frontend has serious problems
1462          * because it prevents us from passively queueing dirty inodes
1463          * to the backend (the BIO's could stall indefinitely).
1464          */
1465         if (ip->flags & HAMMER_INODE_TRUNCATED) {
1466                 ip->sync_trunc_off = ip->trunc_off;
1467                 ip->sync_flags |= HAMMER_INODE_TRUNCATED;
1468         }
1469         if (ip->sync_ino_data.size != ip->ino_data.size) {
1470                 ip->sync_ino_data.size = ip->ino_data.size;
1471                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1472         }
1473
1474         /*
1475          * If there is a trunction queued destroy any data past the (aligned)
1476          * truncation point.  Userland will have dealt with the buffer
1477          * containing the truncation point for us.
1478          *
1479          * We don't flush pending frontend data buffers until after we've
1480          * dealth with the truncation.
1481          *
1482          * Don't bother if the inode is or has been deleted.
1483          */
1484         if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
1485                 /*
1486                  * Interlock trunc_off.  The VOP front-end may continue to
1487                  * make adjustments to it while we are blocked.
1488                  */
1489                 off_t trunc_off;
1490                 off_t aligned_trunc_off;
1491
1492                 trunc_off = ip->sync_trunc_off;
1493                 aligned_trunc_off = (trunc_off + HAMMER_BUFMASK) &
1494                                     ~HAMMER_BUFMASK64;
1495
1496                 /*
1497                  * Delete any whole blocks on-media.  The front-end has
1498                  * already cleaned out any partial block and made it
1499                  * pending.  The front-end may have updated trunc_off
1500                  * while we were blocked so do not just unconditionally
1501                  * set it to the maximum offset.
1502                  */
1503                 error = hammer_ip_delete_range(&cursor, ip,
1504                                                 aligned_trunc_off,
1505                                                 0x7FFFFFFFFFFFFFFFLL);
1506                 if (error)
1507                         Debugger("hammer_ip_delete_range errored");
1508                 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
1509                 if (ip->trunc_off >= trunc_off) {
1510                         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
1511                         ip->flags &= ~HAMMER_INODE_TRUNCATED;
1512                 }
1513         } else {
1514                 error = 0;
1515         }
1516
1517         /*
1518          * Now sync related records.  These will typically be directory
1519          * entries or delete-on-disk records.
1520          *
1521          * Not all records will be flushed, but clear XDIRTY anyway.  We
1522          * will set it again in the frontend hammer_flush_inode_done() 
1523          * if records remain.
1524          */
1525         if (error == 0) {
1526                 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1527                                     hammer_sync_record_callback, &cursor);
1528                 if (tmp_error < 0)
1529                         tmp_error = -error;
1530                 if (tmp_error)
1531                         error = tmp_error;
1532                 if (RB_EMPTY(&ip->rec_tree))
1533                         ip->sync_flags &= ~HAMMER_INODE_XDIRTY;
1534         }
1535
1536         /*
1537          * If we are deleting the inode the frontend had better not have
1538          * any active references on elements making up the inode.
1539          */
1540         if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
1541                 RB_EMPTY(&ip->rec_tree)  &&
1542             (ip->sync_flags & HAMMER_INODE_DELETING) &&
1543             (ip->flags & HAMMER_INODE_DELETED) == 0) {
1544                 int count1 = 0;
1545
1546                 hkprintf("Y");
1547                 ip->flags |= HAMMER_INODE_DELETED;
1548                 error = hammer_ip_delete_range_all(&cursor, ip, &count1);
1549                 if (error == 0) {
1550                         ip->sync_flags &= ~HAMMER_INODE_DELETING;
1551                         ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
1552                         KKASSERT(RB_EMPTY(&ip->rec_tree));
1553
1554                         /*
1555                          * Set delete_tid in both the frontend and backend
1556                          * copy of the inode record.  The DELETED flag handles
1557                          * this, do not set RDIRTY.
1558                          */
1559                         ip->ino_leaf.base.delete_tid = trans.tid;
1560                         ip->sync_ino_leaf.base.delete_tid = trans.tid;
1561
1562                         /*
1563                          * Adjust the inode count in the volume header
1564                          */
1565                         if (ip->flags & HAMMER_INODE_ONDISK) {
1566                                 hammer_modify_volume_field(&trans,
1567                                                            trans.rootvol,
1568                                                            vol0_stat_inodes);
1569                                 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1570                                 hammer_modify_volume_done(trans.rootvol);
1571                         }
1572                 } else {
1573                         ip->flags &= ~HAMMER_INODE_DELETED;
1574                         Debugger("hammer_ip_delete_range_all errored");
1575                 }
1576         }
1577
1578         /*
1579          * Flush any queued BIOs.  These will just biodone() the IO's if
1580          * the inode has been deleted.
1581          */
1582         while ((bio = TAILQ_FIRST(&ip->bio_list)) != NULL) {
1583                 TAILQ_REMOVE(&ip->bio_list, bio, bio_act);
1584                 tmp_error = hammer_dowrite(&cursor, ip, bio);
1585                 if (tmp_error)
1586                         error = tmp_error;
1587         }
1588         ip->sync_flags &= ~HAMMER_INODE_BUFS;
1589
1590         if (error)
1591                 Debugger("RB_SCAN errored");
1592
1593         /*
1594          * Now update the inode's on-disk inode-data and/or on-disk record.
1595          * DELETED and ONDISK are managed only in ip->flags.
1596          */
1597         switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
1598         case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
1599                 /*
1600                  * If deleted and on-disk, don't set any additional flags.
1601                  * the delete flag takes care of things.
1602                  *
1603                  * Clear flags which may have been set by the frontend.
1604                  */
1605                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY|
1606                                     HAMMER_INODE_XDIRTY|HAMMER_INODE_ITIMES|
1607                                     HAMMER_INODE_DELETING);
1608                 break;
1609         case HAMMER_INODE_DELETED:
1610                 /*
1611                  * Take care of the case where a deleted inode was never
1612                  * flushed to the disk in the first place.
1613                  *
1614                  * Clear flags which may have been set by the frontend.
1615                  */
1616                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY|
1617                                     HAMMER_INODE_XDIRTY|HAMMER_INODE_ITIMES|
1618                                     HAMMER_INODE_DELETING);
1619                 while (RB_ROOT(&ip->rec_tree)) {
1620                         hammer_record_t record = RB_ROOT(&ip->rec_tree);
1621                         hammer_ref(&record->lock);
1622                         KKASSERT(record->lock.refs == 1);
1623                         record->flags |= HAMMER_RECF_DELETED_FE;
1624                         record->flags |= HAMMER_RECF_DELETED_BE;
1625                         hammer_rel_mem_record(record);
1626                 }
1627                 break;
1628         case HAMMER_INODE_ONDISK:
1629                 /*
1630                  * If already on-disk, do not set any additional flags.
1631                  */
1632                 break;
1633         default:
1634                 /*
1635                  * If not on-disk and not deleted, set both dirty flags
1636                  * to force an initial record to be written.  Also set
1637                  * the create_tid for the inode.
1638                  *
1639                  * Set create_tid in both the frontend and backend
1640                  * copy of the inode record.
1641                  */
1642                 ip->ino_leaf.base.create_tid = trans.tid;
1643                 ip->sync_ino_leaf.base.create_tid = trans.tid;
1644                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1645                 break;
1646         }
1647
1648         /*
1649          * If RDIRTY or DDIRTY is set, write out a new record.  If the inode
1650          * is already on-disk the old record is marked as deleted.
1651          *
1652          * If DELETED is set hammer_update_inode() will delete the existing
1653          * record without writing out a new one.
1654          *
1655          * If *ONLY* the ITIMES flag is set we can update the record in-place.
1656          */
1657         if (ip->flags & HAMMER_INODE_DELETED) {
1658                 error = hammer_update_inode(&cursor, ip);
1659         } else 
1660         if ((ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ITIMES)) ==
1661             HAMMER_INODE_ITIMES) {
1662                 error = hammer_update_itimes(&cursor, ip);
1663         } else
1664         if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ITIMES)) {
1665                 error = hammer_update_inode(&cursor, ip);
1666         }
1667         if (error)
1668                 Debugger("hammer_update_itimes/inode errored");
1669 done:
1670         /*
1671          * Save the TID we used to sync the inode with to make sure we
1672          * do not improperly reuse it.
1673          */
1674         hammer_done_cursor(&cursor);
1675         hammer_done_transaction(&trans);
1676         return(error);
1677 }
1678
1679 /*
1680  * This routine is called when the OS is no longer actively referencing
1681  * the inode (but might still be keeping it cached), or when releasing
1682  * the last reference to an inode.
1683  *
1684  * At this point if the inode's nlinks count is zero we want to destroy
1685  * it, which may mean destroying it on-media too.
1686  */
1687 void
1688 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
1689 {
1690         struct vnode *vp;
1691
1692         /*
1693          * Set the DELETING flag when the link count drops to 0 and the
1694          * OS no longer has any opens on the inode.
1695          *
1696          * The backend will clear DELETING (a mod flag) and set DELETED
1697          * (a state flag) when it is actually able to perform the
1698          * operation.
1699          */
1700         if (ip->ino_data.nlinks == 0 &&
1701             (ip->flags & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
1702                 ip->flags |= HAMMER_INODE_DELETING;
1703                 ip->flags |= HAMMER_INODE_TRUNCATED;
1704                 ip->trunc_off = 0;
1705                 vp = NULL;
1706                 if (getvp) {
1707                         if (hammer_get_vnode(ip, &vp) != 0)
1708                                 return;
1709                 }
1710                 if (ip->vp) {
1711                         vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
1712                         vnode_pager_setsize(ip->vp, 0);
1713                 }
1714                 if (getvp) {
1715                         vput(vp);
1716                 }
1717         }
1718 }
1719
1720 /*
1721  * Re-test an inode when a dependancy had gone away to see if we
1722  * can chain flush it.
1723  */
1724 void
1725 hammer_test_inode(hammer_inode_t ip)
1726 {
1727         if (ip->flags & HAMMER_INODE_REFLUSH) {
1728                 ip->flags &= ~HAMMER_INODE_REFLUSH;
1729                 hammer_ref(&ip->lock);
1730                 if (ip->flags & HAMMER_INODE_RESIGNAL) {
1731                         ip->flags &= ~HAMMER_INODE_RESIGNAL;
1732                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1733                 } else {
1734                         hammer_flush_inode(ip, 0);
1735                 }
1736                 hammer_rel_inode(ip, 0);
1737         }
1738 }
1739