HAMMER 43B/Many: Correct delete-on-disk record bug.
[dragonfly.git] / sys / vfs / hammer / hammer_inode.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.57 2008/05/13 00:15:28 dillon Exp $
35  */
36
37 #include "hammer.h"
38 #include <vm/vm_extern.h>
39 #include <sys/buf.h>
40 #include <sys/buf2.h>
41
42 static int hammer_unload_inode(struct hammer_inode *ip);
43 static void hammer_flush_inode_core(hammer_inode_t ip, int flags);
44 static int hammer_setup_child_callback(hammer_record_t rec, void *data);
45 static int hammer_setup_parent_inodes(hammer_record_t record);
46
47 /*
48  * The kernel is not actively referencing this vnode but is still holding
49  * it cached.
50  *
51  * This is called from the frontend.
52  */
53 int
54 hammer_vop_inactive(struct vop_inactive_args *ap)
55 {
56         struct hammer_inode *ip = VTOI(ap->a_vp);
57
58         /*
59          * Degenerate case
60          */
61         if (ip == NULL) {
62                 vrecycle(ap->a_vp);
63                 return(0);
64         }
65
66         /*
67          * If the inode no longer has visibility in the filesystem and is
68          * fairly clean, try to recycle it immediately.  This can deadlock
69          * in vfsync() if we aren't careful.
70          */
71         hammer_inode_unloadable_check(ip, 0);
72         if (ip->flags & HAMMER_INODE_MODMASK)
73                 hammer_flush_inode(ip, 0);
74         else if (ip->ino_data.nlinks == 0)
75                 vrecycle(ap->a_vp);
76         return(0);
77 }
78
79 /*
80  * Release the vnode association.  This is typically (but not always)
81  * the last reference on the inode.
82  *
83  * Once the association is lost we are on our own with regards to
84  * flushing the inode.
85  */
86 int
87 hammer_vop_reclaim(struct vop_reclaim_args *ap)
88 {
89         struct hammer_inode *ip;
90         struct vnode *vp;
91
92         vp = ap->a_vp;
93
94         if ((ip = vp->v_data) != NULL) {
95                 vp->v_data = NULL;
96                 ip->vp = NULL;
97                 hammer_rel_inode(ip, 1);
98         }
99         return(0);
100 }
101
102 /*
103  * Return a locked vnode for the specified inode.  The inode must be
104  * referenced but NOT LOCKED on entry and will remain referenced on
105  * return.
106  *
107  * Called from the frontend.
108  */
109 int
110 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
111 {
112         struct vnode *vp;
113         int error = 0;
114
115         for (;;) {
116                 if ((vp = ip->vp) == NULL) {
117                         error = getnewvnode(VT_HAMMER, ip->hmp->mp, vpp, 0, 0);
118                         if (error)
119                                 break;
120                         hammer_lock_ex(&ip->lock);
121                         if (ip->vp != NULL) {
122                                 hammer_unlock(&ip->lock);
123                                 vp->v_type = VBAD;
124                                 vx_put(vp);
125                                 continue;
126                         }
127                         hammer_ref(&ip->lock);
128                         vp = *vpp;
129                         ip->vp = vp;
130                         vp->v_type =
131                                 hammer_get_vnode_type(ip->ino_data.obj_type);
132
133                         switch(ip->ino_data.obj_type) {
134                         case HAMMER_OBJTYPE_CDEV:
135                         case HAMMER_OBJTYPE_BDEV:
136                                 vp->v_ops = &ip->hmp->mp->mnt_vn_spec_ops;
137                                 addaliasu(vp, ip->ino_data.rmajor,
138                                           ip->ino_data.rminor);
139                                 break;
140                         case HAMMER_OBJTYPE_FIFO:
141                                 vp->v_ops = &ip->hmp->mp->mnt_vn_fifo_ops;
142                                 break;
143                         default:
144                                 break;
145                         }
146
147                         /*
148                          * Only mark as the root vnode if the ip is not
149                          * historical, otherwise the VFS cache will get
150                          * confused.  The other half of the special handling
151                          * is in hammer_vop_nlookupdotdot().
152                          */
153                         if (ip->obj_id == HAMMER_OBJID_ROOT &&
154                             ip->obj_asof == ip->hmp->asof) {
155                                 vp->v_flag |= VROOT;
156                         }
157
158                         vp->v_data = (void *)ip;
159                         /* vnode locked by getnewvnode() */
160                         /* make related vnode dirty if inode dirty? */
161                         hammer_unlock(&ip->lock);
162                         if (vp->v_type == VREG)
163                                 vinitvmio(vp, ip->ino_data.size);
164                         break;
165                 }
166
167                 /*
168                  * loop if the vget fails (aka races), or if the vp
169                  * no longer matches ip->vp.
170                  */
171                 if (vget(vp, LK_EXCLUSIVE) == 0) {
172                         if (vp == ip->vp)
173                                 break;
174                         vput(vp);
175                 }
176         }
177         *vpp = vp;
178         return(error);
179 }
180
181 /*
182  * Acquire a HAMMER inode.  The returned inode is not locked.  These functions
183  * do not attach or detach the related vnode (use hammer_get_vnode() for
184  * that).
185  *
186  * The flags argument is only applied for newly created inodes, and only
187  * certain flags are inherited.
188  *
189  * Called from the frontend.
190  */
191 struct hammer_inode *
192 hammer_get_inode(hammer_transaction_t trans, struct hammer_node **cache,
193                  u_int64_t obj_id, hammer_tid_t asof, int flags, int *errorp)
194 {
195         hammer_mount_t hmp = trans->hmp;
196         struct hammer_inode_info iinfo;
197         struct hammer_cursor cursor;
198         struct hammer_inode *ip;
199
200         /*
201          * Determine if we already have an inode cached.  If we do then
202          * we are golden.
203          */
204         iinfo.obj_id = obj_id;
205         iinfo.obj_asof = asof;
206 loop:
207         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
208         if (ip) {
209                 hammer_ref(&ip->lock);
210                 *errorp = 0;
211                 return(ip);
212         }
213
214         ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
215         ++hammer_count_inodes;
216         ip->obj_id = obj_id;
217         ip->obj_asof = iinfo.obj_asof;
218         ip->hmp = hmp;
219         ip->flags = flags & HAMMER_INODE_RO;
220         if (hmp->ronly)
221                 ip->flags |= HAMMER_INODE_RO;
222         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
223         RB_INIT(&ip->rec_tree);
224         TAILQ_INIT(&ip->bio_list);
225         TAILQ_INIT(&ip->bio_alt_list);
226         TAILQ_INIT(&ip->target_list);
227
228         /*
229          * Locate the on-disk inode.
230          */
231 retry:
232         hammer_init_cursor(trans, &cursor, cache, NULL);
233         cursor.key_beg.obj_id = ip->obj_id;
234         cursor.key_beg.key = 0;
235         cursor.key_beg.create_tid = 0;
236         cursor.key_beg.delete_tid = 0;
237         cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
238         cursor.key_beg.obj_type = 0;
239         cursor.asof = iinfo.obj_asof;
240         cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
241                        HAMMER_CURSOR_ASOF;
242
243         *errorp = hammer_btree_lookup(&cursor);
244         if (*errorp == EDEADLK) {
245                 hammer_done_cursor(&cursor);
246                 goto retry;
247         }
248
249         /*
250          * On success the B-Tree lookup will hold the appropriate
251          * buffer cache buffers and provide a pointer to the requested
252          * information.  Copy the information to the in-memory inode
253          * and cache the B-Tree node to improve future operations.
254          */
255         if (*errorp == 0) {
256                 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
257                 ip->ino_data = cursor.data->inode;
258                 hammer_cache_node(cursor.node, &ip->cache[0]);
259                 if (cache)
260                         hammer_cache_node(cursor.node, cache);
261         }
262
263         /*
264          * On success load the inode's record and data and insert the
265          * inode into the B-Tree.  It is possible to race another lookup
266          * insertion of the same inode so deal with that condition too.
267          *
268          * The cursor's locked node interlocks against others creating and
269          * destroying ip while we were blocked.
270          */
271         if (*errorp == 0) {
272                 hammer_ref(&ip->lock);
273                 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
274                         hammer_uncache_node(&ip->cache[0]);
275                         hammer_uncache_node(&ip->cache[1]);
276                         KKASSERT(ip->lock.refs == 1);
277                         --hammer_count_inodes;
278                         kfree(ip, M_HAMMER);
279                         hammer_done_cursor(&cursor);
280                         goto loop;
281                 }
282                 ip->flags |= HAMMER_INODE_ONDISK;
283         } else {
284                 /*
285                  * Do not panic on read-only accesses which fail, particularly
286                  * historical accesses where the snapshot might not have
287                  * complete connectivity.
288                  */
289                 if ((flags & HAMMER_INODE_RO) == 0) {
290                         kprintf("hammer_get_inode: failed ip %p obj_id %016llx cursor %p error %d\n",
291                                 ip, ip->obj_id, &cursor, *errorp);
292                         Debugger("x");
293                 }
294                 --hammer_count_inodes;
295                 kfree(ip, M_HAMMER);
296                 ip = NULL;
297         }
298         hammer_done_cursor(&cursor);
299         return (ip);
300 }
301
302 /*
303  * Create a new filesystem object, returning the inode in *ipp.  The
304  * returned inode will be referenced.
305  *
306  * The inode is created in-memory.
307  */
308 int
309 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
310                     struct ucred *cred, hammer_inode_t dip,
311                     struct hammer_inode **ipp)
312 {
313         hammer_mount_t hmp;
314         hammer_inode_t ip;
315         uid_t xuid;
316
317         hmp = trans->hmp;
318         ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
319         ++hammer_count_inodes;
320         ip->obj_id = hammer_alloc_objid(trans, dip);
321         KKASSERT(ip->obj_id != 0);
322         ip->obj_asof = hmp->asof;
323         ip->hmp = hmp;
324         ip->flush_state = HAMMER_FST_IDLE;
325         ip->flags = HAMMER_INODE_DDIRTY | HAMMER_INODE_ITIMES;
326
327         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
328         RB_INIT(&ip->rec_tree);
329         TAILQ_INIT(&ip->bio_list);
330         TAILQ_INIT(&ip->bio_alt_list);
331         TAILQ_INIT(&ip->target_list);
332
333         ip->ino_leaf.atime = trans->time;
334         ip->ino_data.mtime = trans->time;
335         ip->ino_data.size = 0;
336         ip->ino_data.nlinks = 0;
337         /* XXX */
338         ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
339         ip->ino_leaf.base.obj_id = ip->obj_id;
340         ip->ino_leaf.base.key = 0;
341         ip->ino_leaf.base.create_tid = 0;
342         ip->ino_leaf.base.delete_tid = 0;
343         ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
344         ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
345
346         ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
347         ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
348         ip->ino_data.mode = vap->va_mode;
349         ip->ino_data.ctime = trans->time;
350         ip->ino_data.parent_obj_id = (dip) ? dip->ino_leaf.base.obj_id : 0;
351
352         switch(ip->ino_leaf.base.obj_type) {
353         case HAMMER_OBJTYPE_CDEV:
354         case HAMMER_OBJTYPE_BDEV:
355                 ip->ino_data.rmajor = vap->va_rmajor;
356                 ip->ino_data.rminor = vap->va_rminor;
357                 break;
358         default:
359                 break;
360         }
361
362         /*
363          * Calculate default uid/gid and overwrite with information from
364          * the vap.
365          */
366         xuid = hammer_to_unix_xid(&dip->ino_data.uid);
367         ip->ino_data.gid = dip->ino_data.gid;
368         xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode, xuid, cred,
369                                      &vap->va_mode);
370         ip->ino_data.mode = vap->va_mode;
371
372         if (vap->va_vaflags & VA_UID_UUID_VALID)
373                 ip->ino_data.uid = vap->va_uid_uuid;
374         else if (vap->va_uid != (uid_t)VNOVAL)
375                 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
376         if (vap->va_vaflags & VA_GID_UUID_VALID)
377                 ip->ino_data.gid = vap->va_gid_uuid;
378         else if (vap->va_gid != (gid_t)VNOVAL)
379                 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
380
381         hammer_ref(&ip->lock);
382         if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
383                 hammer_unref(&ip->lock);
384                 panic("hammer_create_inode: duplicate obj_id %llx", ip->obj_id);
385         }
386         *ipp = ip;
387         return(0);
388 }
389
390 /*
391  * Called by hammer_sync_inode().
392  */
393 static int
394 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
395 {
396         hammer_transaction_t trans = cursor->trans;
397         hammer_record_t record;
398         int error;
399
400 retry:
401         error = 0;
402
403         /*
404          * If the inode has a presence on-disk then locate it and mark
405          * it deleted, setting DELONDISK.
406          *
407          * The record may or may not be physically deleted, depending on
408          * the retention policy.
409          */
410         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
411             HAMMER_INODE_ONDISK) {
412                 hammer_normalize_cursor(cursor);
413                 cursor->key_beg.obj_id = ip->obj_id;
414                 cursor->key_beg.key = 0;
415                 cursor->key_beg.create_tid = 0;
416                 cursor->key_beg.delete_tid = 0;
417                 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
418                 cursor->key_beg.obj_type = 0;
419                 cursor->asof = ip->obj_asof;
420                 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
421                 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
422                 cursor->flags |= HAMMER_CURSOR_BACKEND;
423
424                 error = hammer_btree_lookup(cursor);
425                 if (hammer_debug_inode)
426                         kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
427                 if (error) {
428                         kprintf("error %d\n", error);
429                         Debugger("hammer_update_inode");
430                 }
431
432                 if (error == 0) {
433                         error = hammer_ip_delete_record(cursor, trans->tid);
434                         if (hammer_debug_inode)
435                                 kprintf(" error %d\n", error);
436                         if (error && error != EDEADLK) {
437                                 kprintf("error %d\n", error);
438                                 Debugger("hammer_update_inode2");
439                         }
440                         if (error == 0) {
441                                 ip->flags |= HAMMER_INODE_DELONDISK;
442                         }
443                         if (cursor->node)
444                                 hammer_cache_node(cursor->node, &ip->cache[0]);
445                 }
446                 if (error == EDEADLK) {
447                         hammer_done_cursor(cursor);
448                         error = hammer_init_cursor(trans, cursor,
449                                                    &ip->cache[0], ip);
450                         if (hammer_debug_inode)
451                                 kprintf("IPDED %p %d\n", ip, error);
452                         if (error == 0)
453                                 goto retry;
454                 }
455         }
456
457         /*
458          * Ok, write out the initial record or a new record (after deleting
459          * the old one), unless the DELETED flag is set.  This routine will
460          * clear DELONDISK if it writes out a record.
461          *
462          * Update our inode statistics if this is the first application of
463          * the inode on-disk.
464          */
465         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
466                 /*
467                  * Generate a record and write it to the media
468                  */
469                 record = hammer_alloc_mem_record(ip, 0);
470                 record->type = HAMMER_MEM_RECORD_INODE;
471                 record->flush_state = HAMMER_FST_FLUSH;
472                 record->leaf = ip->sync_ino_leaf;
473                 record->leaf.base.create_tid = trans->tid;
474                 record->leaf.data_len = sizeof(ip->sync_ino_data);
475                 record->data = (void *)&ip->sync_ino_data;
476                 record->flags |= HAMMER_RECF_INTERLOCK_BE;
477                 for (;;) {
478                         error = hammer_ip_sync_record_cursor(cursor, record);
479                         if (hammer_debug_inode)
480                                 kprintf("GENREC %p rec %08x %d\n",      
481                                         ip, record->flags, error);
482                         if (error != EDEADLK)
483                                 break;
484                         hammer_done_cursor(cursor);
485                         error = hammer_init_cursor(trans, cursor,
486                                                    &ip->cache[0], ip);
487                         if (hammer_debug_inode)
488                                 kprintf("GENREC reinit %d\n", error);
489                         if (error)
490                                 break;
491                 }
492                 if (error) {
493                         kprintf("error %d\n", error);
494                         Debugger("hammer_update_inode3");
495                 }
496
497                 /*
498                  * The record isn't managed by the inode's record tree,
499                  * destroy it whether we succeed or fail.
500                  */
501                 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
502                 record->flags |= HAMMER_RECF_DELETED_FE;
503                 record->flush_state = HAMMER_FST_IDLE;
504                 hammer_rel_mem_record(record);
505
506                 /*
507                  * Finish up.
508                  */
509                 if (error == 0) {
510                         if (hammer_debug_inode)
511                                 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
512                         ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
513                                             HAMMER_INODE_ITIMES);
514                         ip->flags &= ~HAMMER_INODE_DELONDISK;
515
516                         /*
517                          * Root volume count of inodes
518                          */
519                         if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
520                                 hammer_modify_volume_field(trans,
521                                                            trans->rootvol,
522                                                            vol0_stat_inodes);
523                                 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
524                                 hammer_modify_volume_done(trans->rootvol);
525                                 ip->flags |= HAMMER_INODE_ONDISK;
526                                 if (hammer_debug_inode)
527                                         kprintf("NOWONDISK %p\n", ip);
528                         }
529                 }
530         }
531
532         /*
533          * If the inode has been destroyed, clean out any left-over flags
534          * that may have been set by the frontend.
535          */
536         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) { 
537                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
538                                     HAMMER_INODE_ITIMES);
539         }
540         return(error);
541 }
542
543 /*
544  * Update only the itimes fields.  This is done no-historically.  The
545  * record is updated in-place on the disk.
546  */
547 static int
548 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
549 {
550         hammer_transaction_t trans = cursor->trans;
551         struct hammer_btree_leaf_elm *leaf;
552         int error;
553
554 retry:
555         error = 0;
556         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
557             HAMMER_INODE_ONDISK) {
558                 hammer_normalize_cursor(cursor);
559                 cursor->key_beg.obj_id = ip->obj_id;
560                 cursor->key_beg.key = 0;
561                 cursor->key_beg.create_tid = 0;
562                 cursor->key_beg.delete_tid = 0;
563                 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
564                 cursor->key_beg.obj_type = 0;
565                 cursor->asof = ip->obj_asof;
566                 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
567                 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
568                 cursor->flags |= HAMMER_CURSOR_BACKEND;
569
570                 error = hammer_btree_lookup(cursor);
571                 if (error) {
572                         kprintf("error %d\n", error);
573                         Debugger("hammer_update_itimes1");
574                 }
575                 if (error == 0) {
576                         /*
577                          * Do not generate UNDO records for atime updates.
578                          */
579                         leaf = cursor->leaf;
580                         hammer_modify_node(trans, cursor->node, 
581                                            &leaf->atime, sizeof(leaf->atime));
582                         leaf->atime = ip->sync_ino_leaf.atime;
583                         hammer_modify_node_done(cursor->node);
584                         /*rec->ino_mtime = ip->sync_ino_rec.ino_mtime;*/
585                         ip->sync_flags &= ~HAMMER_INODE_ITIMES;
586                         /* XXX recalculate crc */
587                         hammer_cache_node(cursor->node, &ip->cache[0]);
588                 }
589                 if (error == EDEADLK) {
590                         hammer_done_cursor(cursor);
591                         error = hammer_init_cursor(trans, cursor,
592                                                    &ip->cache[0], ip);
593                         if (error == 0)
594                                 goto retry;
595                 }
596         }
597         return(error);
598 }
599
600 /*
601  * Release a reference on an inode, flush as requested.
602  *
603  * On the last reference we queue the inode to the flusher for its final
604  * disposition.
605  */
606 void
607 hammer_rel_inode(struct hammer_inode *ip, int flush)
608 {
609         hammer_mount_t hmp = ip->hmp;
610
611         /*
612          * Handle disposition when dropping the last ref.
613          */
614         for (;;) {
615                 if (ip->lock.refs == 1) {
616                         /*
617                          * Determine whether on-disk action is needed for
618                          * the inode's final disposition.
619                          */
620                         KKASSERT(ip->vp == NULL);
621                         hammer_inode_unloadable_check(ip, 0);
622                         if (ip->flags & HAMMER_INODE_MODMASK) {
623                                 hammer_flush_inode(ip, 0);
624                         } else if (ip->lock.refs == 1) {
625                                 hammer_unload_inode(ip);
626                                 break;
627                         }
628                 } else {
629                         if (flush)
630                                 hammer_flush_inode(ip, 0);
631
632                         /*
633                          * The inode still has multiple refs, try to drop
634                          * one ref.
635                          */
636                         KKASSERT(ip->lock.refs >= 1);
637                         if (ip->lock.refs > 1) {
638                                 hammer_unref(&ip->lock);
639                                 break;
640                         }
641                 }
642         }
643
644         /*
645          * XXX bad hack until I add code to track inodes in SETUP.  We
646          * can queue a lot of inodes to the syncer but if we don't wake
647          * it up the undo sets will be too large or too many unflushed
648          * records will build up and blow our malloc limit.
649          */
650         if (++hmp->reclaim_count > 256) {
651                 hmp->reclaim_count = 0;
652                 hammer_flusher_async(hmp);
653         }
654 }
655
656 /*
657  * Unload and destroy the specified inode.  Must be called with one remaining
658  * reference.  The reference is disposed of.
659  *
660  * This can only be called in the context of the flusher.
661  */
662 static int
663 hammer_unload_inode(struct hammer_inode *ip)
664 {
665         KASSERT(ip->lock.refs == 1,
666                 ("hammer_unload_inode: %d refs\n", ip->lock.refs));
667         KKASSERT(ip->vp == NULL);
668         KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
669         KKASSERT(ip->cursor_ip_refs == 0);
670         KKASSERT(ip->lock.lockcount == 0);
671         KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
672
673         KKASSERT(RB_EMPTY(&ip->rec_tree));
674         KKASSERT(TAILQ_EMPTY(&ip->target_list));
675         KKASSERT(TAILQ_EMPTY(&ip->bio_list));
676         KKASSERT(TAILQ_EMPTY(&ip->bio_alt_list));
677
678         RB_REMOVE(hammer_ino_rb_tree, &ip->hmp->rb_inos_root, ip);
679
680         hammer_uncache_node(&ip->cache[0]);
681         hammer_uncache_node(&ip->cache[1]);
682         if (ip->objid_cache)
683                 hammer_clear_objid(ip);
684         --hammer_count_inodes;
685         kfree(ip, M_HAMMER);
686
687         return(0);
688 }
689
690 /*
691  * A transaction has modified an inode, requiring updates as specified by
692  * the passed flags.
693  *
694  * HAMMER_INODE_DDIRTY: Inode data has been updated
695  * HAMMER_INODE_XDIRTY: Dirty in-memory records
696  * HAMMER_INODE_BUFS:   Dirty buffer cache buffers
697  * HAMMER_INODE_DELETED: Inode record/data must be deleted
698  * HAMMER_INODE_ITIMES: mtime/atime has been updated
699  */
700 void
701 hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags)
702 {
703         KKASSERT ((ip->flags & HAMMER_INODE_RO) == 0 ||
704                   (flags & (HAMMER_INODE_DDIRTY |
705                             HAMMER_INODE_XDIRTY | HAMMER_INODE_BUFS |
706                             HAMMER_INODE_DELETED | HAMMER_INODE_ITIMES)) == 0);
707
708         ip->flags |= flags;
709 }
710
711 /*
712  * Request that an inode be flushed.  This whole mess cannot block and may
713  * recurse.  Once requested HAMMER will attempt to actively flush it until
714  * the flush can be done.
715  *
716  * The inode may already be flushing, or may be in a setup state.  We can
717  * place the inode in a flushing state if it is currently idle and flag it
718  * to reflush if it is currently flushing.
719  */
720 void
721 hammer_flush_inode(hammer_inode_t ip, int flags)
722 {
723         hammer_record_t depend;
724         int r, good;
725
726         /*
727          * Trivial 'nothing to flush' case.  If the inode is ina SETUP
728          * state we have to put it back into an IDLE state so we can
729          * drop the extra ref.
730          */
731         if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
732                 if (ip->flush_state == HAMMER_FST_SETUP) {
733                         ip->flush_state = HAMMER_FST_IDLE;
734                         hammer_rel_inode(ip, 0);
735                 }
736                 return;
737         }
738
739         /*
740          * Our flush action will depend on the current state.
741          */
742         switch(ip->flush_state) {
743         case HAMMER_FST_IDLE:
744                 /*
745                  * We have no dependancies and can flush immediately.  Some
746                  * our children may not be flushable so we have to re-test
747                  * with that additional knowledge.
748                  */
749                 hammer_flush_inode_core(ip, flags);
750                 break;
751         case HAMMER_FST_SETUP:
752                 /*
753                  * Recurse upwards through dependancies via target_list
754                  * and start their flusher actions going if possible.
755                  *
756                  * 'good' is our connectivity.  -1 means we have none and
757                  * can't flush, 0 means there weren't any dependancies, and
758                  * 1 means we have good connectivity.
759                  */
760                 good = 0;
761                 TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
762                         r = hammer_setup_parent_inodes(depend);
763                         if (r < 0 && good == 0)
764                                 good = -1;
765                         if (r > 0)
766                                 good = 1;
767                 }
768
769                 /*
770                  * We can continue if good >= 0.  Determine how many records
771                  * under our inode can be flushed (and mark them).
772                  */
773                 if (good >= 0) {
774                         hammer_flush_inode_core(ip, flags);
775                 } else {
776                         ip->flags |= HAMMER_INODE_REFLUSH;
777                         if (flags & HAMMER_FLUSH_SIGNAL) {
778                                 ip->flags |= HAMMER_INODE_RESIGNAL;
779                                 hammer_flusher_async(ip->hmp);
780                         }
781                 }
782                 break;
783         default:
784                 /*
785                  * We are already flushing, flag the inode to reflush
786                  * if needed after it completes its current flush.
787                  */
788                 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
789                         ip->flags |= HAMMER_INODE_REFLUSH;
790                 if (flags & HAMMER_FLUSH_SIGNAL) {
791                         ip->flags |= HAMMER_INODE_RESIGNAL;
792                         hammer_flusher_async(ip->hmp);
793                 }
794                 break;
795         }
796 }
797
798 /*
799  * We are asked to recurse upwards and convert the record from SETUP
800  * to FLUSH if possible.  record->ip is a parent of the caller's inode,
801  * and record->target_ip is the caller's inode.
802  *
803  * Return 1 if the record gives us connectivity
804  *
805  * Return 0 if the record is not relevant 
806  *
807  * Return -1 if we can't resolve the dependancy and there is no connectivity.
808  */
809 static int
810 hammer_setup_parent_inodes(hammer_record_t record)
811 {
812         hammer_mount_t hmp = record->ip->hmp;
813         hammer_record_t depend;
814         hammer_inode_t ip;
815         int r, good;
816
817         KKASSERT(record->flush_state != HAMMER_FST_IDLE);
818         ip = record->ip;
819
820         /*
821          * If the record is already flushing, is it in our flush group?
822          *
823          * If it is in our flush group but it is a general record or a 
824          * delete-on-disk, it does not improve our connectivity (return 0),
825          * and if the target inode is not trying to destroy itself we can't
826          * allow the operation yet anyway (the second return -1).
827          */
828         if (record->flush_state == HAMMER_FST_FLUSH) {
829                 if (record->flush_group != hmp->flusher_next) {
830                         ip->flags |= HAMMER_INODE_REFLUSH;
831                         return(-1);
832                 }
833                 if (record->type == HAMMER_MEM_RECORD_ADD)
834                         return(1);
835                 /* GENERAL or DEL */
836                 return(0);
837         }
838
839         /*
840          * It must be a setup record.  Try to resolve the setup dependancies
841          * by recursing upwards so we can place ip on the flush list.
842          */
843         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
844
845         good = 0;
846         TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
847                 r = hammer_setup_parent_inodes(depend);
848                 if (r < 0 && good == 0)
849                         good = -1;
850                 if (r > 0)
851                         good = 1;
852         }
853
854         /*
855          * We can't flush ip because it has no connectivity (XXX also check
856          * nlinks for pre-existing connectivity!).  Flag it so any resolution
857          * recurses back down.
858          */
859         if (good < 0) {
860                 ip->flags |= HAMMER_INODE_REFLUSH;
861                 return(good);
862         }
863
864         /*
865          * We are go, place the parent inode in a flushing state so we can
866          * place its record in a flushing state.  Note that the parent
867          * may already be flushing.  The record must be in the same flush
868          * group as the parent.
869          */
870         if (ip->flush_state != HAMMER_FST_FLUSH)
871                 hammer_flush_inode_core(ip, HAMMER_FLUSH_RECURSION);
872         KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
873         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
874
875 #if 0
876         if (record->type == HAMMER_MEM_RECORD_DEL &&
877             (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
878                 /*
879                  * Regardless of flushing state we cannot sync this path if the
880                  * record represents a delete-on-disk but the target inode
881                  * is not ready to sync its own deletion.
882                  *
883                  * XXX need to count effective nlinks to determine whether
884                  * the flush is ok, otherwise removing a hardlink will
885                  * just leave the DEL record to rot.
886                  */
887                 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
888                 return(-1);
889         } else
890 #endif
891         if (ip->flush_group == ip->hmp->flusher_next) {
892                 /*
893                  * This is the record we wanted to synchronize.
894                  */
895                 record->flush_state = HAMMER_FST_FLUSH;
896                 record->flush_group = ip->flush_group;
897                 hammer_ref(&record->lock);
898                 if (record->type == HAMMER_MEM_RECORD_ADD)
899                         return(1);
900
901                 /*
902                  * A general or delete-on-disk record does not contribute
903                  * to our visibility.  We can still flush it, however.
904                  */
905                 return(0);
906         } else {
907                 /*
908                  * We couldn't resolve the dependancies, request that the
909                  * inode be flushed when the dependancies can be resolved.
910                  */
911                 ip->flags |= HAMMER_INODE_REFLUSH;
912                 return(-1);
913         }
914 }
915
916 /*
917  * This is the core routine placing an inode into the FST_FLUSH state.
918  */
919 static void
920 hammer_flush_inode_core(hammer_inode_t ip, int flags)
921 {
922         int go_count;
923
924         /*
925          * Set flush state and prevent the flusher from cycling into
926          * the next flush group.  Do not place the ip on the list yet.
927          * Inodes not in the idle state get an extra reference.
928          */
929         KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
930         if (ip->flush_state == HAMMER_FST_IDLE)
931                 hammer_ref(&ip->lock);
932         ip->flush_state = HAMMER_FST_FLUSH;
933         ip->flush_group = ip->hmp->flusher_next;
934         ++ip->hmp->flusher_lock;
935
936         /*
937          * We need to be able to vfsync/truncate from the backend.
938          */
939         KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
940         if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
941                 ip->flags |= HAMMER_INODE_VHELD;
942                 vref(ip->vp);
943         }
944
945         /*
946          * Figure out how many in-memory records we can actually flush
947          * (not including inode meta-data, buffers, etc).
948          */
949         if (flags & HAMMER_FLUSH_RECURSION) {
950                 go_count = 1;
951         } else {
952                 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
953                                    hammer_setup_child_callback, NULL);
954         }
955
956         /*
957          * This is a more involved test that includes go_count.  If we
958          * can't flush, flag the inode and return.  If go_count is 0 we
959          * were are unable to flush any records in our rec_tree and
960          * must ignore the XDIRTY flag.
961          */
962         if (go_count == 0) {
963                 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
964                         ip->flags |= HAMMER_INODE_REFLUSH;
965                         ip->flush_state = HAMMER_FST_SETUP;
966                         if (ip->flags & HAMMER_INODE_VHELD) {
967                                 ip->flags &= ~HAMMER_INODE_VHELD;
968                                 vrele(ip->vp);
969                         }
970                         if (flags & HAMMER_FLUSH_SIGNAL) {
971                                 ip->flags |= HAMMER_INODE_RESIGNAL;
972                                 hammer_flusher_async(ip->hmp);
973                         }
974                         if (--ip->hmp->flusher_lock == 0)
975                                 wakeup(&ip->hmp->flusher_lock);
976                         return;
977                 }
978         }
979
980         /*
981          * Snapshot the state of the inode for the backend flusher.
982          *
983          * The truncation must be retained in the frontend until after
984          * we've actually performed the record deletion.
985          *
986          * NOTE: The DELETING flag is a mod flag, but it is also sticky,
987          * and stays in ip->flags.  Once set, it stays set until the
988          * inode is destroyed.
989          */
990         ip->sync_flags = (ip->flags & HAMMER_INODE_MODMASK);
991         ip->sync_trunc_off = ip->trunc_off;
992         ip->sync_ino_leaf = ip->ino_leaf;
993         ip->sync_ino_data = ip->ino_data;
994         ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
995
996         /*
997          * The flusher list inherits our inode and reference.
998          */
999         TAILQ_INSERT_TAIL(&ip->hmp->flush_list, ip, flush_entry);
1000         if (--ip->hmp->flusher_lock == 0)
1001                 wakeup(&ip->hmp->flusher_lock);
1002
1003         if (flags & HAMMER_FLUSH_SIGNAL)
1004                 hammer_flusher_async(ip->hmp);
1005 }
1006
1007 /*
1008  * Callback for scan of ip->rec_tree.  Try to include each record in our
1009  * flush.  ip->flush_group has been set but the inode has not yet been
1010  * moved into a flushing state.
1011  *
1012  * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
1013  * both inodes.
1014  *
1015  * We return 1 for any record placed or found in FST_FLUSH, which prevents
1016  * the caller from shortcutting the flush.
1017  */
1018 static int
1019 hammer_setup_child_callback(hammer_record_t rec, void *data)
1020 {
1021         hammer_inode_t target_ip;
1022         hammer_inode_t ip;
1023         int r;
1024
1025         /*
1026          * If the record has been deleted by the backend (it's being held
1027          * by the frontend in a race), just ignore it.
1028          */
1029         if (rec->flags & HAMMER_RECF_DELETED_BE)
1030                 return(0);
1031
1032         /*
1033          * If the record is in an idle state it has no dependancies and
1034          * can be flushed.
1035          */
1036         ip = rec->ip;
1037         r = 0;
1038
1039         switch(rec->flush_state) {
1040         case HAMMER_FST_IDLE:
1041                 /*
1042                  * Record has no setup dependancy, we can flush it.
1043                  */
1044                 KKASSERT(rec->target_ip == NULL);
1045                 rec->flush_state = HAMMER_FST_FLUSH;
1046                 rec->flush_group = ip->flush_group;
1047                 hammer_ref(&rec->lock);
1048                 r = 1;
1049                 break;
1050         case HAMMER_FST_SETUP:
1051                 /*
1052                  * Record has a setup dependancy.  Try to include the
1053                  * target ip in the flush. 
1054                  *
1055                  * We have to be careful here, if we do not do the right
1056                  * thing we can lose track of dirty inodes and the system
1057                  * will lockup trying to allocate buffers.
1058                  */
1059                 target_ip = rec->target_ip;
1060                 KKASSERT(target_ip != NULL);
1061                 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
1062                 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
1063                         /*
1064                          * If the target IP is already flushing in our group
1065                          * we are golden, otherwise make sure the target
1066                          * reflushes.
1067                          */
1068                         if (target_ip->flush_group == ip->flush_group) {
1069                                 rec->flush_state = HAMMER_FST_FLUSH;
1070                                 rec->flush_group = ip->flush_group;
1071                                 hammer_ref(&rec->lock);
1072                                 r = 1;
1073                         } else {
1074                                 target_ip->flags |= HAMMER_INODE_REFLUSH;
1075                         }
1076                 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
1077                         /*
1078                          * If the target IP is not flushing we can force
1079                          * it to flush, even if it is unable to write out
1080                          * any of its own records we have at least one in
1081                          * hand that we CAN deal with.
1082                          */
1083                         rec->flush_state = HAMMER_FST_FLUSH;
1084                         rec->flush_group = ip->flush_group;
1085                         hammer_ref(&rec->lock);
1086                         hammer_flush_inode_core(target_ip,
1087                                                 HAMMER_FLUSH_RECURSION);
1088                         r = 1;
1089                 } else {
1090                         /*
1091                          * General or delete-on-disk record.
1092                          *
1093                          * XXX this needs help.  If a delete-on-disk we could
1094                          * disconnect the target.  If the target has its own
1095                          * dependancies they really need to be flushed.
1096                          *
1097                          * XXX
1098                          */
1099                         rec->flush_state = HAMMER_FST_FLUSH;
1100                         rec->flush_group = ip->flush_group;
1101                         hammer_ref(&rec->lock);
1102                         hammer_flush_inode_core(target_ip,
1103                                                 HAMMER_FLUSH_RECURSION);
1104                         r = 1;
1105                 }
1106                 break;
1107         case HAMMER_FST_FLUSH:
1108                 /* 
1109                  * Record already associated with a flush group.  It had
1110                  * better be ours.
1111                  */
1112                 KKASSERT(rec->flush_group == ip->flush_group);
1113                 r = 1;
1114                 break;
1115         }
1116         return(r);
1117 }
1118
1119 /*
1120  * Wait for a previously queued flush to complete
1121  */
1122 void
1123 hammer_wait_inode(hammer_inode_t ip)
1124 {
1125         while (ip->flush_state != HAMMER_FST_IDLE) {
1126                 ip->flags |= HAMMER_INODE_FLUSHW;
1127                 tsleep(&ip->flags, 0, "hmrwin", 0);
1128         }
1129 }
1130
1131 /*
1132  * Called by the backend code when a flush has been completed.
1133  * The inode has already been removed from the flush list.
1134  *
1135  * A pipelined flush can occur, in which case we must re-enter the
1136  * inode on the list and re-copy its fields.
1137  */
1138 void
1139 hammer_flush_inode_done(hammer_inode_t ip)
1140 {
1141         struct bio *bio;
1142         int dorel = 0;
1143
1144         KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
1145
1146         /*
1147          * Allow BIOs to queue to the inode's primary bioq again.
1148          */
1149         ip->flags &= ~HAMMER_INODE_WRITE_ALT;
1150
1151         /*
1152          * Merge left-over flags back into the frontend and fix the state.
1153          */
1154         ip->flags |= ip->sync_flags;
1155
1156         /*
1157          * The backend may have adjusted nlinks, so if the adjusted nlinks
1158          * does not match the fronttend set the frontend's RDIRTY flag again.
1159          */
1160         if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
1161                 ip->flags |= HAMMER_INODE_DDIRTY;
1162
1163         /*
1164          * Reflush any BIOs that wound up in the alt list.  Our inode will
1165          * also wind up at the end of the flusher's list.
1166          */
1167         while ((bio = TAILQ_FIRST(&ip->bio_alt_list)) != NULL) {
1168                 TAILQ_REMOVE(&ip->bio_alt_list, bio, bio_act);
1169                 TAILQ_INSERT_TAIL(&ip->bio_list, bio, bio_act);
1170         }
1171         /*
1172          * Fix up the dirty buffer status.
1173          */
1174         if (TAILQ_FIRST(&ip->bio_list) ||
1175             (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree))) {
1176                 ip->flags |= HAMMER_INODE_BUFS;
1177         }
1178
1179         /*
1180          * Re-set the XDIRTY flag if some of the inode's in-memory records
1181          * could not be flushed.
1182          */
1183         if (RB_ROOT(&ip->rec_tree))
1184                 ip->flags |= HAMMER_INODE_XDIRTY;
1185
1186         /*
1187          * Do not lose track of inodes which no longer have vnode
1188          * assocations, otherwise they may never get flushed again.
1189          */
1190         if ((ip->flags & HAMMER_INODE_MODMASK) && ip->vp == NULL)
1191                 ip->flags |= HAMMER_INODE_REFLUSH;
1192
1193         /*
1194          * Adjust flush_state.  The target state (idle or setup) shouldn't
1195          * be terribly important since we will reflush if we really need
1196          * to do anything. XXX
1197          */
1198         if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
1199                 ip->flush_state = HAMMER_FST_IDLE;
1200                 dorel = 1;
1201         } else {
1202                 ip->flush_state = HAMMER_FST_SETUP;
1203         }
1204
1205         /*
1206          * Clean up the vnode ref
1207          */
1208         if (ip->flags & HAMMER_INODE_VHELD) {
1209                 ip->flags &= ~HAMMER_INODE_VHELD;
1210                 vrele(ip->vp);
1211         }
1212
1213         /*
1214          * If the frontend made more changes and requested another flush,
1215          * then try to get it running.
1216          */
1217         if (ip->flags & HAMMER_INODE_REFLUSH) {
1218                 ip->flags &= ~HAMMER_INODE_REFLUSH;
1219                 if (ip->flags & HAMMER_INODE_RESIGNAL) {
1220                         ip->flags &= ~HAMMER_INODE_RESIGNAL;
1221                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1222                 } else {
1223                         hammer_flush_inode(ip, 0);
1224                 }
1225         }
1226
1227         /*
1228          * Finally, if the frontend is waiting for a flush to complete,
1229          * wake it up.
1230          */
1231         if (ip->flush_state != HAMMER_FST_FLUSH) {
1232                 if (ip->flags & HAMMER_INODE_FLUSHW) {
1233                         ip->flags &= ~HAMMER_INODE_FLUSHW;
1234                         wakeup(&ip->flags);
1235                 }
1236         }
1237         if (dorel)
1238                 hammer_rel_inode(ip, 0);
1239 }
1240
1241 /*
1242  * Called from hammer_sync_inode() to synchronize in-memory records
1243  * to the media.
1244  */
1245 static int
1246 hammer_sync_record_callback(hammer_record_t record, void *data)
1247 {
1248         hammer_cursor_t cursor = data;
1249         hammer_transaction_t trans = cursor->trans;
1250         int error;
1251
1252         /*
1253          * Skip records that do not belong to the current flush.
1254          */
1255         if (record->flush_state != HAMMER_FST_FLUSH)
1256                 return(0);
1257         KKASSERT((record->flags & HAMMER_RECF_DELETED_BE) == 0);
1258 #if 1
1259         if (record->flush_group != record->ip->flush_group) {
1260                 kprintf("sync_record %p ip %p bad flush group %d %d\n", record, record->ip, record->flush_group ,record->ip->flush_group);
1261                 Debugger("blah2");
1262                 return(0);
1263         }
1264 #endif
1265         KKASSERT(record->flush_group == record->ip->flush_group);
1266
1267         /*
1268          * Interlock the record using the BE flag.  Once BE is set the
1269          * frontend cannot change the state of FE.
1270          *
1271          * NOTE: If FE is set prior to us setting BE we still sync the
1272          * record out, but the flush completion code converts it to 
1273          * a delete-on-disk record instead of destroying it.
1274          */
1275         KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
1276         record->flags |= HAMMER_RECF_INTERLOCK_BE;
1277
1278         /*
1279          * If the whole inode is being deleting all on-disk records will
1280          * be deleted very soon, we can't sync any new records to disk
1281          * because they will be deleted in the same transaction they were
1282          * created in (delete_tid == create_tid), which will assert.
1283          *
1284          * XXX There may be a case with RECORD_ADD with DELETED_FE set
1285          * that we currently panic on.
1286          */
1287         if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
1288                 switch(record->type) {
1289                 case HAMMER_MEM_RECORD_GENERAL:
1290                         record->flags |= HAMMER_RECF_DELETED_FE;
1291                         record->flags |= HAMMER_RECF_DELETED_BE;
1292                         error = 0;
1293                         goto done;
1294                 case HAMMER_MEM_RECORD_ADD:
1295                         panic("hammer_sync_record_callback: illegal add "
1296                               "during inode deletion record %p", record);
1297                         break; /* NOT REACHED */
1298                 case HAMMER_MEM_RECORD_INODE:
1299                         panic("hammer_sync_record_callback: attempt to "
1300                               "sync inode record %p?", record);
1301                         break; /* NOT REACHED */
1302                 case HAMMER_MEM_RECORD_DEL:
1303                         /* 
1304                          * Follow through and issue the on-disk deletion
1305                          */
1306                         break;
1307                 }
1308         }
1309
1310         /*
1311          * If DELETED_FE is set we may have already sent dependant pieces
1312          * to the disk and we must flush the record as if it hadn't been
1313          * deleted.  This creates a bit of a mess because we have to
1314          * have ip_sync_record convert the record to MEM_RECORD_DEL before
1315          * it inserts the B-Tree record.  Otherwise the media sync might
1316          * be visible to the frontend.
1317          */
1318         if (record->flags & HAMMER_RECF_DELETED_FE) {
1319                 if (record->type == HAMMER_MEM_RECORD_ADD) {
1320                         record->flags |= HAMMER_RECF_CONVERT_DELETE;
1321                 } else {
1322                         KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
1323                         return(0);
1324                 }
1325         }
1326
1327         /*
1328          * Assign the create_tid for new records.  Deletions already
1329          * have the record's entire key properly set up.
1330          */
1331         if (record->type != HAMMER_MEM_RECORD_DEL)
1332                 record->leaf.base.create_tid = trans->tid;
1333         for (;;) {
1334                 error = hammer_ip_sync_record_cursor(cursor, record);
1335                 if (error != EDEADLK)
1336                         break;
1337                 hammer_done_cursor(cursor);
1338                 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
1339                                            record->ip);
1340                 if (error)
1341                         break;
1342         }
1343         record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
1344
1345         if (error) {
1346                 error = -error;
1347                 if (error != -ENOSPC) {
1348                         kprintf("hammer_sync_record_callback: sync failed rec "
1349                                 "%p, error %d\n", record, error);
1350                         Debugger("sync failed rec");
1351                 }
1352         }
1353 done:
1354         hammer_flush_record_done(record, error);
1355         return(error);
1356 }
1357
1358 /*
1359  * XXX error handling
1360  */
1361 int
1362 hammer_sync_inode(hammer_inode_t ip)
1363 {
1364         struct hammer_transaction trans;
1365         struct hammer_cursor cursor;
1366         struct bio *bio;
1367         hammer_record_t depend;
1368         hammer_record_t next;
1369         int error, tmp_error;
1370         u_int64_t nlinks;
1371
1372         if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
1373                 return(0);
1374
1375         hammer_start_transaction_fls(&trans, ip->hmp);
1376         error = hammer_init_cursor(&trans, &cursor, &ip->cache[0], ip);
1377         if (error)
1378                 goto done;
1379
1380         /*
1381          * Any directory records referencing this inode which are not in
1382          * our current flush group must adjust our nlink count for the
1383          * purposes of synchronization to disk.
1384          *
1385          * Records which are in our flush group can be unlinked from our
1386          * inode now, potentially allowing the inode to be physically
1387          * deleted.
1388          */
1389         nlinks = ip->ino_data.nlinks;
1390         next = TAILQ_FIRST(&ip->target_list);
1391         while ((depend = next) != NULL) {
1392                 next = TAILQ_NEXT(depend, target_entry);
1393                 if (depend->flush_state == HAMMER_FST_FLUSH &&
1394                     depend->flush_group == ip->hmp->flusher_act) {
1395                         /*
1396                          * If this is an ADD that was deleted by the frontend
1397                          * the frontend nlinks count will have already been
1398                          * decremented, but the backend is going to sync its
1399                          * directory entry and must account for it.  The
1400                          * record will be converted to a delete-on-disk when
1401                          * it gets synced.
1402                          *
1403                          * If the ADD was not deleted by the frontend we
1404                          * can remove the dependancy from our target_list.
1405                          */
1406                         if (depend->flags & HAMMER_RECF_DELETED_FE) {
1407                                 ++nlinks;
1408                         } else {
1409                                 TAILQ_REMOVE(&ip->target_list, depend,
1410                                              target_entry);
1411                                 depend->target_ip = NULL;
1412                         }
1413                 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
1414                         /*
1415                          * Not part of our flush group
1416                          */
1417                         KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
1418                         switch(depend->type) {
1419                         case HAMMER_MEM_RECORD_ADD:
1420                                 --nlinks;
1421                                 break;
1422                         case HAMMER_MEM_RECORD_DEL:
1423                                 ++nlinks;
1424                                 break;
1425                         default:
1426                                 break;
1427                         }
1428                 }
1429         }
1430
1431         /*
1432          * Set dirty if we had to modify the link count.
1433          */
1434         if (ip->sync_ino_data.nlinks != nlinks) {
1435                 KKASSERT((int64_t)nlinks >= 0);
1436                 ip->sync_ino_data.nlinks = nlinks;
1437                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1438         }
1439
1440         /*
1441          * Queue up as many dirty buffers as we can then set a flag to
1442          * cause any further BIOs to go to the alternative queue.
1443          */
1444         if (ip->flags & HAMMER_INODE_VHELD)
1445                 error = vfsync(ip->vp, MNT_NOWAIT, 1, NULL, NULL);
1446         ip->flags |= HAMMER_INODE_WRITE_ALT;
1447
1448         /*
1449          * The buffer cache may contain dirty buffers beyond the inode
1450          * state we copied from the frontend to the backend.  Because
1451          * we are syncing our buffer cache on the backend, resync
1452          * the truncation point and the file size so we don't wipe out
1453          * any data.
1454          *
1455          * Syncing the buffer cache on the frontend has serious problems
1456          * because it prevents us from passively queueing dirty inodes
1457          * to the backend (the BIO's could stall indefinitely).
1458          */
1459         if (ip->flags & HAMMER_INODE_TRUNCATED) {
1460                 ip->sync_trunc_off = ip->trunc_off;
1461                 ip->sync_flags |= HAMMER_INODE_TRUNCATED;
1462         }
1463         if (ip->sync_ino_data.size != ip->ino_data.size) {
1464                 ip->sync_ino_data.size = ip->ino_data.size;
1465                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1466         }
1467
1468         /*
1469          * If there is a trunction queued destroy any data past the (aligned)
1470          * truncation point.  Userland will have dealt with the buffer
1471          * containing the truncation point for us.
1472          *
1473          * We don't flush pending frontend data buffers until after we've
1474          * dealth with the truncation.
1475          *
1476          * Don't bother if the inode is or has been deleted.
1477          */
1478         if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
1479                 /*
1480                  * Interlock trunc_off.  The VOP front-end may continue to
1481                  * make adjustments to it while we are blocked.
1482                  */
1483                 off_t trunc_off;
1484                 off_t aligned_trunc_off;
1485
1486                 trunc_off = ip->sync_trunc_off;
1487                 aligned_trunc_off = (trunc_off + HAMMER_BUFMASK) &
1488                                     ~HAMMER_BUFMASK64;
1489
1490                 /*
1491                  * Delete any whole blocks on-media.  The front-end has
1492                  * already cleaned out any partial block and made it
1493                  * pending.  The front-end may have updated trunc_off
1494                  * while we were blocked so do not just unconditionally
1495                  * set it to the maximum offset.
1496                  */
1497                 error = hammer_ip_delete_range(&cursor, ip,
1498                                                 aligned_trunc_off,
1499                                                 0x7FFFFFFFFFFFFFFFLL);
1500                 if (error)
1501                         Debugger("hammer_ip_delete_range errored");
1502                 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
1503                 if (ip->trunc_off >= trunc_off) {
1504                         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
1505                         ip->flags &= ~HAMMER_INODE_TRUNCATED;
1506                 }
1507         } else {
1508                 error = 0;
1509         }
1510
1511         /*
1512          * Now sync related records.  These will typically be directory
1513          * entries or delete-on-disk records.
1514          *
1515          * Not all records will be flushed, but clear XDIRTY anyway.  We
1516          * will set it again in the frontend hammer_flush_inode_done() 
1517          * if records remain.
1518          */
1519         if (error == 0) {
1520                 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1521                                     hammer_sync_record_callback, &cursor);
1522                 if (tmp_error < 0)
1523                         tmp_error = -error;
1524                 if (tmp_error)
1525                         error = tmp_error;
1526                 if (RB_EMPTY(&ip->rec_tree))
1527                         ip->sync_flags &= ~HAMMER_INODE_XDIRTY;
1528         }
1529
1530         /*
1531          * If we are deleting the inode the frontend had better not have
1532          * any active references on elements making up the inode.
1533          */
1534         if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
1535                 RB_EMPTY(&ip->rec_tree)  &&
1536             (ip->sync_flags & HAMMER_INODE_DELETING) &&
1537             (ip->flags & HAMMER_INODE_DELETED) == 0) {
1538                 int count1 = 0;
1539
1540                 hkprintf("Y");
1541                 ip->flags |= HAMMER_INODE_DELETED;
1542                 error = hammer_ip_delete_range_all(&cursor, ip, &count1);
1543                 if (error == 0) {
1544                         ip->sync_flags &= ~HAMMER_INODE_DELETING;
1545                         ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
1546                         KKASSERT(RB_EMPTY(&ip->rec_tree));
1547
1548                         /*
1549                          * Set delete_tid in both the frontend and backend
1550                          * copy of the inode record.  The DELETED flag handles
1551                          * this, do not set RDIRTY.
1552                          */
1553                         ip->ino_leaf.base.delete_tid = trans.tid;
1554                         ip->sync_ino_leaf.base.delete_tid = trans.tid;
1555
1556                         /*
1557                          * Adjust the inode count in the volume header
1558                          */
1559                         hammer_modify_volume_field(&trans, trans.rootvol,
1560                                                    vol0_stat_inodes);
1561                         --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1562                         hammer_modify_volume_done(trans.rootvol);
1563                 } else {
1564                         ip->flags &= ~HAMMER_INODE_DELETED;
1565                         Debugger("hammer_ip_delete_range_all errored");
1566                 }
1567         }
1568
1569         /*
1570          * Flush any queued BIOs.  These will just biodone() the IO's if
1571          * the inode has been deleted.
1572          */
1573         while ((bio = TAILQ_FIRST(&ip->bio_list)) != NULL) {
1574                 TAILQ_REMOVE(&ip->bio_list, bio, bio_act);
1575                 tmp_error = hammer_dowrite(&cursor, ip, bio);
1576                 if (tmp_error)
1577                         error = tmp_error;
1578         }
1579         ip->sync_flags &= ~HAMMER_INODE_BUFS;
1580
1581         if (error)
1582                 Debugger("RB_SCAN errored");
1583
1584         /*
1585          * Now update the inode's on-disk inode-data and/or on-disk record.
1586          * DELETED and ONDISK are managed only in ip->flags.
1587          */
1588         switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
1589         case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
1590                 /*
1591                  * If deleted and on-disk, don't set any additional flags.
1592                  * the delete flag takes care of things.
1593                  *
1594                  * Clear flags which may have been set by the frontend.
1595                  */
1596                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY|
1597                                     HAMMER_INODE_XDIRTY|HAMMER_INODE_ITIMES|
1598                                     HAMMER_INODE_DELETING);
1599                 break;
1600         case HAMMER_INODE_DELETED:
1601                 /*
1602                  * Take care of the case where a deleted inode was never
1603                  * flushed to the disk in the first place.
1604                  *
1605                  * Clear flags which may have been set by the frontend.
1606                  */
1607                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY|
1608                                     HAMMER_INODE_XDIRTY|HAMMER_INODE_ITIMES|
1609                                     HAMMER_INODE_DELETING);
1610                 while (RB_ROOT(&ip->rec_tree)) {
1611                         hammer_record_t record = RB_ROOT(&ip->rec_tree);
1612                         hammer_ref(&record->lock);
1613                         KKASSERT(record->lock.refs == 1);
1614                         record->flags |= HAMMER_RECF_DELETED_FE;
1615                         record->flags |= HAMMER_RECF_DELETED_BE;
1616                         hammer_rel_mem_record(record);
1617                 }
1618                 break;
1619         case HAMMER_INODE_ONDISK:
1620                 /*
1621                  * If already on-disk, do not set any additional flags.
1622                  */
1623                 break;
1624         default:
1625                 /*
1626                  * If not on-disk and not deleted, set both dirty flags
1627                  * to force an initial record to be written.  Also set
1628                  * the create_tid for the inode.
1629                  *
1630                  * Set create_tid in both the frontend and backend
1631                  * copy of the inode record.
1632                  */
1633                 ip->ino_leaf.base.create_tid = trans.tid;
1634                 ip->sync_ino_leaf.base.create_tid = trans.tid;
1635                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1636                 break;
1637         }
1638
1639         /*
1640          * If RDIRTY or DDIRTY is set, write out a new record.  If the inode
1641          * is already on-disk the old record is marked as deleted.
1642          *
1643          * If DELETED is set hammer_update_inode() will delete the existing
1644          * record without writing out a new one.
1645          *
1646          * If *ONLY* the ITIMES flag is set we can update the record in-place.
1647          */
1648         if (ip->flags & HAMMER_INODE_DELETED) {
1649                 error = hammer_update_inode(&cursor, ip);
1650         } else 
1651         if ((ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ITIMES)) ==
1652             HAMMER_INODE_ITIMES) {
1653                 error = hammer_update_itimes(&cursor, ip);
1654         } else
1655         if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ITIMES)) {
1656                 error = hammer_update_inode(&cursor, ip);
1657         }
1658         if (error)
1659                 Debugger("hammer_update_itimes/inode errored");
1660 done:
1661         /*
1662          * Save the TID we used to sync the inode with to make sure we
1663          * do not improperly reuse it.
1664          */
1665         hammer_done_cursor(&cursor);
1666         hammer_done_transaction(&trans);
1667         return(error);
1668 }
1669
1670 /*
1671  * This routine is called when the OS is no longer actively referencing
1672  * the inode (but might still be keeping it cached), or when releasing
1673  * the last reference to an inode.
1674  *
1675  * At this point if the inode's nlinks count is zero we want to destroy
1676  * it, which may mean destroying it on-media too.
1677  */
1678 void
1679 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
1680 {
1681         struct vnode *vp;
1682
1683         /*
1684          * Set the DELETING flag when the link count drops to 0 and the
1685          * OS no longer has any opens on the inode.
1686          *
1687          * The backend will clear DELETING (a mod flag) and set DELETED
1688          * (a state flag) when it is actually able to perform the
1689          * operation.
1690          */
1691         if (ip->ino_data.nlinks == 0 &&
1692             (ip->flags & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
1693                 ip->flags |= HAMMER_INODE_DELETING;
1694                 ip->flags |= HAMMER_INODE_TRUNCATED;
1695                 ip->trunc_off = 0;
1696                 vp = NULL;
1697                 if (getvp) {
1698                         if (hammer_get_vnode(ip, &vp) != 0)
1699                                 return;
1700                 }
1701                 if (ip->vp) {
1702                         vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
1703                         vnode_pager_setsize(ip->vp, 0);
1704                 }
1705                 if (getvp) {
1706                         vput(vp);
1707                 }
1708         }
1709 }
1710
1711 /*
1712  * Re-test an inode when a dependancy had gone away to see if we
1713  * can chain flush it.
1714  */
1715 void
1716 hammer_test_inode(hammer_inode_t ip)
1717 {
1718         if (ip->flags & HAMMER_INODE_REFLUSH) {
1719                 ip->flags &= ~HAMMER_INODE_REFLUSH;
1720                 hammer_ref(&ip->lock);
1721                 if (ip->flags & HAMMER_INODE_RESIGNAL) {
1722                         ip->flags &= ~HAMMER_INODE_RESIGNAL;
1723                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1724                 } else {
1725                         hammer_flush_inode(ip, 0);
1726                 }
1727                 hammer_rel_inode(ip, 0);
1728         }
1729 }
1730