f049032bd9d8ae13de7c24e35d991b77caa7202a
[dragonfly.git] / sys / vfs / hammer / hammer_inode.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.49 2008/05/03 20:21:20 dillon Exp $
35  */
36
37 #include "hammer.h"
38 #include <vm/vm_extern.h>
39 #include <sys/buf.h>
40 #include <sys/buf2.h>
41
42 static int hammer_unload_inode(struct hammer_inode *ip);
43 static void hammer_flush_inode_core(hammer_inode_t ip, int flags);
44 static int hammer_setup_child_callback(hammer_record_t rec, void *data);
45 static int hammer_setup_parent_inodes(hammer_record_t record);
46
47 /*
48  * The kernel is not actively referencing this vnode but is still holding
49  * it cached.
50  *
51  * This is called from the frontend.
52  */
53 int
54 hammer_vop_inactive(struct vop_inactive_args *ap)
55 {
56         struct hammer_inode *ip = VTOI(ap->a_vp);
57
58         /*
59          * Degenerate case
60          */
61         if (ip == NULL) {
62                 vrecycle(ap->a_vp);
63                 return(0);
64         }
65
66         /*
67          * If the inode no longer has visibility in the filesystem and is
68          * fairly clean, try to recycle it immediately.  This can deadlock
69          * in vfsync() if we aren't careful.
70          */
71         hammer_inode_unloadable_check(ip, 0);
72         if (ip->flags & HAMMER_INODE_MODMASK)
73                 hammer_flush_inode(ip, 0);
74         else if (ip->ino_rec.ino_nlinks == 0)
75                 vrecycle(ap->a_vp);
76         return(0);
77 }
78
79 /*
80  * Release the vnode association.  This is typically (but not always)
81  * the last reference on the inode.
82  *
83  * Once the association is lost we are on our own with regards to
84  * flushing the inode.
85  */
86 int
87 hammer_vop_reclaim(struct vop_reclaim_args *ap)
88 {
89         struct hammer_inode *ip;
90         struct vnode *vp;
91
92         vp = ap->a_vp;
93
94         if ((ip = vp->v_data) != NULL) {
95                 vp->v_data = NULL;
96                 ip->vp = NULL;
97                 hammer_rel_inode(ip, 1);
98         }
99         return(0);
100 }
101
102 /*
103  * Return a locked vnode for the specified inode.  The inode must be
104  * referenced but NOT LOCKED on entry and will remain referenced on
105  * return.
106  *
107  * Called from the frontend.
108  */
109 int
110 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
111 {
112         struct vnode *vp;
113         int error = 0;
114
115         for (;;) {
116                 if ((vp = ip->vp) == NULL) {
117                         error = getnewvnode(VT_HAMMER, ip->hmp->mp, vpp, 0, 0);
118                         if (error)
119                                 break;
120                         hammer_lock_ex(&ip->lock);
121                         if (ip->vp != NULL) {
122                                 hammer_unlock(&ip->lock);
123                                 vp->v_type = VBAD;
124                                 vx_put(vp);
125                                 continue;
126                         }
127                         hammer_ref(&ip->lock);
128                         vp = *vpp;
129                         ip->vp = vp;
130                         vp->v_type = hammer_get_vnode_type(
131                                             ip->ino_rec.base.base.obj_type);
132
133                         switch(ip->ino_rec.base.base.obj_type) {
134                         case HAMMER_OBJTYPE_CDEV:
135                         case HAMMER_OBJTYPE_BDEV:
136                                 vp->v_ops = &ip->hmp->mp->mnt_vn_spec_ops;
137                                 addaliasu(vp, ip->ino_data.rmajor,
138                                           ip->ino_data.rminor);
139                                 break;
140                         case HAMMER_OBJTYPE_FIFO:
141                                 vp->v_ops = &ip->hmp->mp->mnt_vn_fifo_ops;
142                                 break;
143                         default:
144                                 break;
145                         }
146
147                         /*
148                          * Only mark as the root vnode if the ip is not
149                          * historical, otherwise the VFS cache will get
150                          * confused.  The other half of the special handling
151                          * is in hammer_vop_nlookupdotdot().
152                          */
153                         if (ip->obj_id == HAMMER_OBJID_ROOT &&
154                             ip->obj_asof == ip->hmp->asof) {
155                                 vp->v_flag |= VROOT;
156                         }
157
158                         vp->v_data = (void *)ip;
159                         /* vnode locked by getnewvnode() */
160                         /* make related vnode dirty if inode dirty? */
161                         hammer_unlock(&ip->lock);
162                         if (vp->v_type == VREG)
163                                 vinitvmio(vp, ip->ino_rec.ino_size);
164                         break;
165                 }
166
167                 /*
168                  * loop if the vget fails (aka races), or if the vp
169                  * no longer matches ip->vp.
170                  */
171                 if (vget(vp, LK_EXCLUSIVE) == 0) {
172                         if (vp == ip->vp)
173                                 break;
174                         vput(vp);
175                 }
176         }
177         *vpp = vp;
178         return(error);
179 }
180
181 /*
182  * Acquire a HAMMER inode.  The returned inode is not locked.  These functions
183  * do not attach or detach the related vnode (use hammer_get_vnode() for
184  * that).
185  *
186  * The flags argument is only applied for newly created inodes, and only
187  * certain flags are inherited.
188  *
189  * Called from the frontend.
190  */
191 struct hammer_inode *
192 hammer_get_inode(hammer_transaction_t trans, struct hammer_node **cache,
193                  u_int64_t obj_id, hammer_tid_t asof, int flags, int *errorp)
194 {
195         hammer_mount_t hmp = trans->hmp;
196         struct hammer_inode_info iinfo;
197         struct hammer_cursor cursor;
198         struct hammer_inode *ip;
199
200         /*
201          * Determine if we already have an inode cached.  If we do then
202          * we are golden.
203          */
204         iinfo.obj_id = obj_id;
205         iinfo.obj_asof = asof;
206 loop:
207         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
208         if (ip) {
209                 hammer_ref(&ip->lock);
210                 *errorp = 0;
211                 return(ip);
212         }
213
214         ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
215         ++hammer_count_inodes;
216         ip->obj_id = obj_id;
217         ip->obj_asof = iinfo.obj_asof;
218         ip->hmp = hmp;
219         ip->flags = flags & HAMMER_INODE_RO;
220         if (hmp->ronly)
221                 ip->flags |= HAMMER_INODE_RO;
222         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
223         RB_INIT(&ip->rec_tree);
224         TAILQ_INIT(&ip->bio_list);
225         TAILQ_INIT(&ip->bio_alt_list);
226         TAILQ_INIT(&ip->target_list);
227
228         /*
229          * Locate the on-disk inode.
230          */
231 retry:
232         hammer_init_cursor(trans, &cursor, cache, NULL);
233         cursor.key_beg.obj_id = ip->obj_id;
234         cursor.key_beg.key = 0;
235         cursor.key_beg.create_tid = 0;
236         cursor.key_beg.delete_tid = 0;
237         cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
238         cursor.key_beg.obj_type = 0;
239         cursor.asof = iinfo.obj_asof;
240         cursor.flags = HAMMER_CURSOR_GET_RECORD | HAMMER_CURSOR_GET_DATA |
241                        HAMMER_CURSOR_ASOF;
242
243         *errorp = hammer_btree_lookup(&cursor);
244         if (*errorp == EDEADLK) {
245                 hammer_done_cursor(&cursor);
246                 goto retry;
247         }
248
249         /*
250          * On success the B-Tree lookup will hold the appropriate
251          * buffer cache buffers and provide a pointer to the requested
252          * information.  Copy the information to the in-memory inode
253          * and cache the B-Tree node to improve future operations.
254          */
255         if (*errorp == 0) {
256                 ip->ino_rec = cursor.record->inode;
257                 ip->ino_data = cursor.data->inode;
258                 hammer_cache_node(cursor.node, &ip->cache[0]);
259                 if (cache)
260                         hammer_cache_node(cursor.node, cache);
261         }
262
263         /*
264          * On success load the inode's record and data and insert the
265          * inode into the B-Tree.  It is possible to race another lookup
266          * insertion of the same inode so deal with that condition too.
267          *
268          * The cursor's locked node interlocks against others creating and
269          * destroying ip while we were blocked.
270          */
271         if (*errorp == 0) {
272                 hammer_ref(&ip->lock);
273                 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
274                         hammer_uncache_node(&ip->cache[0]);
275                         hammer_uncache_node(&ip->cache[1]);
276                         KKASSERT(ip->lock.refs == 1);
277                         --hammer_count_inodes;
278                         kfree(ip, M_HAMMER);
279                         hammer_done_cursor(&cursor);
280                         goto loop;
281                 }
282                 ip->flags |= HAMMER_INODE_ONDISK;
283         } else {
284                 --hammer_count_inodes;
285                 kfree(ip, M_HAMMER);
286                 ip = NULL;
287         }
288         hammer_done_cursor(&cursor);
289         return (ip);
290 }
291
292 /*
293  * Create a new filesystem object, returning the inode in *ipp.  The
294  * returned inode will be referenced.
295  *
296  * The inode is created in-memory.
297  */
298 int
299 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
300                     struct ucred *cred, hammer_inode_t dip,
301                     struct hammer_inode **ipp)
302 {
303         hammer_mount_t hmp;
304         hammer_inode_t ip;
305         uid_t xuid;
306
307         hmp = trans->hmp;
308         ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
309         ++hammer_count_inodes;
310         ip->obj_id = hammer_alloc_objid(trans, dip);
311         KKASSERT(ip->obj_id != 0);
312         ip->obj_asof = hmp->asof;
313         ip->hmp = hmp;
314         ip->flush_state = HAMMER_FST_IDLE;
315         ip->flags = HAMMER_INODE_DDIRTY | HAMMER_INODE_RDIRTY |
316                     HAMMER_INODE_ITIMES;
317
318         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
319         RB_INIT(&ip->rec_tree);
320         TAILQ_INIT(&ip->bio_list);
321         TAILQ_INIT(&ip->bio_alt_list);
322         TAILQ_INIT(&ip->target_list);
323
324         ip->ino_rec.ino_atime = trans->time;
325         ip->ino_rec.ino_mtime = trans->time;
326         ip->ino_rec.ino_size = 0;
327         ip->ino_rec.ino_nlinks = 0;
328         /* XXX */
329         ip->ino_rec.base.base.btype = HAMMER_BTREE_TYPE_RECORD;
330         ip->ino_rec.base.base.obj_id = ip->obj_id;
331         ip->ino_rec.base.base.key = 0;
332         ip->ino_rec.base.base.create_tid = 0;
333         ip->ino_rec.base.base.delete_tid = 0;
334         ip->ino_rec.base.base.rec_type = HAMMER_RECTYPE_INODE;
335         ip->ino_rec.base.base.obj_type = hammer_get_obj_type(vap->va_type);
336
337         ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
338         ip->ino_data.mode = vap->va_mode;
339         ip->ino_data.ctime = trans->time;
340         ip->ino_data.parent_obj_id = (dip) ? dip->ino_rec.base.base.obj_id : 0;
341
342         switch(ip->ino_rec.base.base.obj_type) {
343         case HAMMER_OBJTYPE_CDEV:
344         case HAMMER_OBJTYPE_BDEV:
345                 ip->ino_data.rmajor = vap->va_rmajor;
346                 ip->ino_data.rminor = vap->va_rminor;
347                 break;
348         default:
349                 break;
350         }
351
352         /*
353          * Calculate default uid/gid and overwrite with information from
354          * the vap.
355          */
356         xuid = hammer_to_unix_xid(&dip->ino_data.uid);
357         ip->ino_data.gid = dip->ino_data.gid;
358         xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode, xuid, cred,
359                                      &vap->va_mode);
360         ip->ino_data.mode = vap->va_mode;
361
362         if (vap->va_vaflags & VA_UID_UUID_VALID)
363                 ip->ino_data.uid = vap->va_uid_uuid;
364         else if (vap->va_uid != (uid_t)VNOVAL)
365                 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
366         if (vap->va_vaflags & VA_GID_UUID_VALID)
367                 ip->ino_data.gid = vap->va_gid_uuid;
368         else if (vap->va_gid != (gid_t)VNOVAL)
369                 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
370
371         hammer_ref(&ip->lock);
372         if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
373                 hammer_unref(&ip->lock);
374                 panic("hammer_create_inode: duplicate obj_id %llx", ip->obj_id);
375         }
376         *ipp = ip;
377         return(0);
378 }
379
380 /*
381  * Called by hammer_sync_inode().
382  */
383 static int
384 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
385 {
386         hammer_transaction_t trans = cursor->trans;
387         hammer_record_t record;
388         int error;
389
390 retry:
391         error = 0;
392
393         /*
394          * If the inode has a presence on-disk then locate it and mark
395          * it deleted, setting DELONDISK.
396          *
397          * The record may or may not be physically deleted, depending on
398          * the retention policy.
399          */
400         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
401             HAMMER_INODE_ONDISK) {
402                 hammer_normalize_cursor(cursor);
403                 cursor->key_beg.obj_id = ip->obj_id;
404                 cursor->key_beg.key = 0;
405                 cursor->key_beg.create_tid = 0;
406                 cursor->key_beg.delete_tid = 0;
407                 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
408                 cursor->key_beg.obj_type = 0;
409                 cursor->asof = ip->obj_asof;
410                 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
411                 cursor->flags |= HAMMER_CURSOR_GET_RECORD | HAMMER_CURSOR_ASOF;
412                 cursor->flags |= HAMMER_CURSOR_BACKEND;
413
414                 error = hammer_btree_lookup(cursor);
415                 if (hammer_debug_inode)
416                         kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
417                 if (error) {
418                         kprintf("error %d\n", error);
419                         Debugger("hammer_update_inode");
420                 }
421
422                 if (error == 0) {
423                         error = hammer_ip_delete_record(cursor, trans->tid);
424                         if (hammer_debug_inode)
425                                 kprintf(" error %d\n", error);
426                         if (error && error != EDEADLK) {
427                                 kprintf("error %d\n", error);
428                                 Debugger("hammer_update_inode2");
429                         }
430                         if (error == 0) {
431                                 ip->flags |= HAMMER_INODE_DELONDISK;
432                         }
433                         if (cursor->node)
434                                 hammer_cache_node(cursor->node, &ip->cache[0]);
435                 }
436                 if (error == EDEADLK) {
437                         hammer_done_cursor(cursor);
438                         error = hammer_init_cursor(trans, cursor,
439                                                    &ip->cache[0], ip);
440                         if (hammer_debug_inode)
441                                 kprintf("IPDED %p %d\n", ip, error);
442                         if (error == 0)
443                                 goto retry;
444                 }
445         }
446
447         /*
448          * Ok, write out the initial record or a new record (after deleting
449          * the old one), unless the DELETED flag is set.  This routine will
450          * clear DELONDISK if it writes out a record.
451          *
452          * Update our inode statistics if this is the first application of
453          * the inode on-disk.
454          */
455         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
456                 /*
457                  * Generate a record and write it to the media
458                  */
459                 record = hammer_alloc_mem_record(ip);
460                 record->type = HAMMER_MEM_RECORD_GENERAL;
461                 record->flush_state = HAMMER_FST_FLUSH;
462                 record->rec.inode = ip->sync_ino_rec;
463                 record->rec.inode.base.base.create_tid = trans->tid;
464                 record->rec.inode.base.data_len = sizeof(ip->sync_ino_data);
465                 record->data = (void *)&ip->sync_ino_data;
466                 record->flags |= HAMMER_RECF_INTERLOCK_BE;
467                 for (;;) {
468                         error = hammer_ip_sync_record_cursor(cursor, record);
469                         if (hammer_debug_inode)
470                                 kprintf("GENREC %p rec %08x %d\n",      
471                                         ip, record->flags, error);
472                         if (error != EDEADLK)
473                                 break;
474                         hammer_done_cursor(cursor);
475                         error = hammer_init_cursor(trans, cursor,
476                                                    &ip->cache[0], ip);
477                         if (hammer_debug_inode)
478                                 kprintf("GENREC reinit %d\n", error);
479                         if (error)
480                                 break;
481                 }
482                 if (error) {
483                         kprintf("error %d\n", error);
484                         Debugger("hammer_update_inode3");
485                 }
486
487                 /*
488                  * The record isn't managed by the inode's record tree,
489                  * destroy it whether we succeed or fail.
490                  */
491                 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
492                 record->flags |= HAMMER_RECF_DELETED_FE;
493                 record->flush_state = HAMMER_FST_IDLE;
494                 hammer_rel_mem_record(record);
495
496                 /*
497                  * Finish up.
498                  */
499                 if (error == 0) {
500                         if (hammer_debug_inode)
501                                 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
502                         ip->sync_flags &= ~(HAMMER_INODE_RDIRTY |
503                                             HAMMER_INODE_DDIRTY |
504                                             HAMMER_INODE_ITIMES);
505                         ip->flags &= ~HAMMER_INODE_DELONDISK;
506
507                         /*
508                          * Root volume count of inodes
509                          */
510                         if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
511                                 hammer_modify_volume_field(trans,
512                                                            trans->rootvol,
513                                                            vol0_stat_inodes);
514                                 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
515                                 hammer_modify_volume_done(trans->rootvol);
516                                 ip->flags |= HAMMER_INODE_ONDISK;
517                                 if (hammer_debug_inode)
518                                         kprintf("NOWONDISK %p\n", ip);
519                         }
520                 }
521         }
522
523         /*
524          * If the inode has been destroyed, clean out any left-over flags
525          * that may have been set by the frontend.
526          */
527         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) { 
528                 ip->sync_flags &= ~(HAMMER_INODE_RDIRTY |
529                                     HAMMER_INODE_DDIRTY |
530                                     HAMMER_INODE_ITIMES);
531         }
532         return(error);
533 }
534
535 /*
536  * Update only the itimes fields.  This is done no-historically.  The
537  * record is updated in-place on the disk.
538  */
539 static int
540 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
541 {
542         hammer_transaction_t trans = cursor->trans;
543         struct hammer_inode_record *rec;
544         int error;
545
546 retry:
547         error = 0;
548         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
549             HAMMER_INODE_ONDISK) {
550                 hammer_normalize_cursor(cursor);
551                 cursor->key_beg.obj_id = ip->obj_id;
552                 cursor->key_beg.key = 0;
553                 cursor->key_beg.create_tid = 0;
554                 cursor->key_beg.delete_tid = 0;
555                 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
556                 cursor->key_beg.obj_type = 0;
557                 cursor->asof = ip->obj_asof;
558                 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
559                 cursor->flags |= HAMMER_CURSOR_GET_RECORD | HAMMER_CURSOR_ASOF;
560                 cursor->flags |= HAMMER_CURSOR_BACKEND;
561
562                 error = hammer_btree_lookup(cursor);
563                 if (error) {
564                         kprintf("error %d\n", error);
565                         Debugger("hammer_update_itimes1");
566                 }
567                 if (error == 0) {
568                         /*
569                          * Do not generate UNDO records for atime/mtime
570                          * updates.
571                          */
572                         rec = &cursor->record->inode;
573                         hammer_modify_buffer(trans, cursor->record_buffer,
574                                              NULL, 0);
575                         rec->ino_atime = ip->sync_ino_rec.ino_atime;
576                         rec->ino_mtime = ip->sync_ino_rec.ino_mtime;
577                         hammer_modify_buffer_done(cursor->record_buffer);
578                         ip->sync_flags &= ~HAMMER_INODE_ITIMES;
579                         /* XXX recalculate crc */
580                         hammer_cache_node(cursor->node, &ip->cache[0]);
581                 }
582                 if (error == EDEADLK) {
583                         hammer_done_cursor(cursor);
584                         error = hammer_init_cursor(trans, cursor,
585                                                    &ip->cache[0], ip);
586                         if (error == 0)
587                                 goto retry;
588                 }
589         }
590         return(error);
591 }
592
593 /*
594  * Release a reference on an inode, flush as requested.
595  *
596  * On the last reference we queue the inode to the flusher for its final
597  * disposition.
598  */
599 void
600 hammer_rel_inode(struct hammer_inode *ip, int flush)
601 {
602         hammer_mount_t hmp = ip->hmp;
603
604         /*
605          * Handle disposition when dropping the last ref.
606          */
607         for (;;) {
608                 if (ip->lock.refs == 1) {
609                         /*
610                          * Determine whether on-disk action is needed for
611                          * the inode's final disposition.
612                          */
613                         KKASSERT(ip->vp == NULL);
614                         hammer_inode_unloadable_check(ip, 0);
615                         if (ip->flags & HAMMER_INODE_MODMASK) {
616                                 hammer_flush_inode(ip, 0);
617                         } else if (ip->lock.refs == 1) {
618                                 hammer_unload_inode(ip);
619                                 break;
620                         }
621                 } else {
622                         if (flush)
623                                 hammer_flush_inode(ip, 0);
624
625                         /*
626                          * The inode still has multiple refs, try to drop
627                          * one ref.
628                          */
629                         KKASSERT(ip->lock.refs >= 1);
630                         if (ip->lock.refs > 1) {
631                                 hammer_unref(&ip->lock);
632                                 break;
633                         }
634                 }
635         }
636
637         /*
638          * XXX bad hack until I add code to track inodes in SETUP.  We
639          * can queue a lot of inodes to the syncer but if we don't wake
640          * it up the undo sets will be too large or too many unflushed
641          * records will build up and blow our malloc limit.
642          */
643         if (++hmp->reclaim_count > 256) {
644                 hmp->reclaim_count = 0;
645                 hammer_flusher_async(hmp);
646         }
647 }
648
649 /*
650  * Unload and destroy the specified inode.  Must be called with one remaining
651  * reference.  The reference is disposed of.
652  *
653  * This can only be called in the context of the flusher.
654  */
655 static int
656 hammer_unload_inode(struct hammer_inode *ip)
657 {
658         KASSERT(ip->lock.refs == 1,
659                 ("hammer_unload_inode: %d refs\n", ip->lock.refs));
660         KKASSERT(ip->vp == NULL);
661         KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
662         KKASSERT(ip->cursor_ip_refs == 0);
663         KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
664
665         KKASSERT(RB_EMPTY(&ip->rec_tree));
666         KKASSERT(TAILQ_EMPTY(&ip->target_list));
667         KKASSERT(TAILQ_EMPTY(&ip->bio_list));
668         KKASSERT(TAILQ_EMPTY(&ip->bio_alt_list));
669
670         RB_REMOVE(hammer_ino_rb_tree, &ip->hmp->rb_inos_root, ip);
671
672         hammer_uncache_node(&ip->cache[0]);
673         hammer_uncache_node(&ip->cache[1]);
674         if (ip->objid_cache)
675                 hammer_clear_objid(ip);
676         --hammer_count_inodes;
677         kfree(ip, M_HAMMER);
678
679         return(0);
680 }
681
682 /*
683  * A transaction has modified an inode, requiring updates as specified by
684  * the passed flags.
685  *
686  * HAMMER_INODE_RDIRTY: Inode record has been updated
687  * HAMMER_INODE_DDIRTY: Inode data has been updated
688  * HAMMER_INODE_XDIRTY: Dirty in-memory records
689  * HAMMER_INODE_BUFS:   Dirty buffer cache buffers
690  * HAMMER_INODE_DELETED: Inode record/data must be deleted
691  * HAMMER_INODE_ITIMES: mtime/atime has been updated
692  */
693 void
694 hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags)
695 {
696         KKASSERT ((ip->flags & HAMMER_INODE_RO) == 0 ||
697                   (flags & (HAMMER_INODE_RDIRTY|HAMMER_INODE_DDIRTY|
698                    HAMMER_INODE_XDIRTY|HAMMER_INODE_BUFS|
699                    HAMMER_INODE_DELETED|HAMMER_INODE_ITIMES)) == 0);
700
701         ip->flags |= flags;
702 }
703
704 /*
705  * Request that an inode be flushed.  This whole mess cannot block and may
706  * recurse.  Once requested HAMMER will attempt to actively flush it until
707  * the flush can be done.
708  *
709  * The inode may already be flushing, or may be in a setup state.  We can
710  * place the inode in a flushing state if it is currently idle and flag it
711  * to reflush if it is currently flushing.
712  */
713 void
714 hammer_flush_inode(hammer_inode_t ip, int flags)
715 {
716         hammer_record_t depend;
717         int r, good;
718
719         /*
720          * Trivial 'nothing to flush' case.  If the inode is ina SETUP
721          * state we have to put it back into an IDLE state so we can
722          * drop the extra ref.
723          */
724         if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
725                 if (ip->flush_state == HAMMER_FST_SETUP) {
726                         ip->flush_state = HAMMER_FST_IDLE;
727                         hammer_rel_inode(ip, 0);
728                 }
729                 return;
730         }
731
732         /*
733          * Our flush action will depend on the current state.
734          */
735         switch(ip->flush_state) {
736         case HAMMER_FST_IDLE:
737                 /*
738                  * We have no dependancies and can flush immediately.  Some
739                  * our children may not be flushable so we have to re-test
740                  * with that additional knowledge.
741                  */
742                 hammer_flush_inode_core(ip, flags);
743                 break;
744         case HAMMER_FST_SETUP:
745                 /*
746                  * Recurse upwards through dependancies via target_list
747                  * and start their flusher actions going if possible.
748                  *
749                  * 'good' is our connectivity.  -1 means we have none and
750                  * can't flush, 0 means there weren't any dependancies, and
751                  * 1 means we have good connectivity.
752                  */
753                 good = 0;
754                 TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
755                         r = hammer_setup_parent_inodes(depend);
756                         if (r < 0 && good == 0)
757                                 good = -1;
758                         if (r > 0)
759                                 good = 1;
760                 }
761
762                 /*
763                  * We can continue if good >= 0.  Determine how many records
764                  * under our inode can be flushed (and mark them).
765                  */
766                 if (good >= 0) {
767                         hammer_flush_inode_core(ip, flags);
768                 } else {
769                         ip->flags |= HAMMER_INODE_REFLUSH;
770                         if (flags & HAMMER_FLUSH_SIGNAL) {
771                                 ip->flags |= HAMMER_INODE_RESIGNAL;
772                                 hammer_flusher_async(ip->hmp);
773                         }
774                 }
775                 break;
776         default:
777                 /*
778                  * We are already flushing, flag the inode to reflush
779                  * if needed after it completes its current flush.
780                  */
781                 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
782                         ip->flags |= HAMMER_INODE_REFLUSH;
783                 if (flags & HAMMER_FLUSH_SIGNAL) {
784                         ip->flags |= HAMMER_INODE_RESIGNAL;
785                         hammer_flusher_async(ip->hmp);
786                 }
787                 break;
788         }
789 }
790
791 /*
792  * We are asked to recurse upwards and convert the record from SETUP
793  * to FLUSH if possible.  record->ip is a parent of the caller's inode,
794  * and record->target_ip is the caller's inode.
795  *
796  * Return 1 if the record gives us connectivity
797  *
798  * Return 0 if the record is not relevant 
799  *
800  * Return -1 if we can't resolve the dependancy and there is no connectivity.
801  */
802 static int
803 hammer_setup_parent_inodes(hammer_record_t record)
804 {
805         hammer_mount_t hmp = record->ip->hmp;
806         hammer_record_t depend;
807         hammer_inode_t ip;
808         int r, good;
809
810         KKASSERT(record->flush_state != HAMMER_FST_IDLE);
811         ip = record->ip;
812
813         /*
814          * If the record is already flushing, is it in our flush group?
815          *
816          * If it is in our flush group but it is a general record or a 
817          * delete-on-disk, it does not improve our connectivity (return 0),
818          * and if the target inode is not trying to destroy itself we can't
819          * allow the operation yet anyway (the second return -1).
820          */
821         if (record->flush_state == HAMMER_FST_FLUSH) {
822                 if (record->flush_group != hmp->flusher_next) {
823                         ip->flags |= HAMMER_INODE_REFLUSH;
824                         return(-1);
825                 }
826                 if (record->type == HAMMER_MEM_RECORD_ADD)
827                         return(1);
828                 /* GENERAL or DEL */
829                 return(0);
830         }
831
832         /*
833          * It must be a setup record.  Try to resolve the setup dependancies
834          * by recursing upwards so we can place ip on the flush list.
835          */
836         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
837
838         good = 0;
839         TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
840                 r = hammer_setup_parent_inodes(depend);
841                 if (r < 0 && good == 0)
842                         good = -1;
843                 if (r > 0)
844                         good = 1;
845         }
846
847         /*
848          * We can't flush ip because it has no connectivity (XXX also check
849          * nlinks for pre-existing connectivity!).  Flag it so any resolution
850          * recurses back down.
851          */
852         if (good < 0) {
853                 ip->flags |= HAMMER_INODE_REFLUSH;
854                 return(good);
855         }
856
857         /*
858          * We are go, place the parent inode in a flushing state so we can
859          * place its record in a flushing state.  Note that the parent
860          * may already be flushing.  The record must be in the same flush
861          * group as the parent.
862          */
863         if (ip->flush_state != HAMMER_FST_FLUSH)
864                 hammer_flush_inode_core(ip, HAMMER_FLUSH_RECURSION);
865         KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
866         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
867
868 #if 0
869         if (record->type == HAMMER_MEM_RECORD_DEL &&
870             (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
871                 /*
872                  * Regardless of flushing state we cannot sync this path if the
873                  * record represents a delete-on-disk but the target inode
874                  * is not ready to sync its own deletion.
875                  *
876                  * XXX need to count effective nlinks to determine whether
877                  * the flush is ok, otherwise removing a hardlink will
878                  * just leave the DEL record to rot.
879                  */
880                 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
881                 return(-1);
882         } else
883 #endif
884         if (ip->flush_group == ip->hmp->flusher_next) {
885                 /*
886                  * This is the record we wanted to synchronize.
887                  */
888                 record->flush_state = HAMMER_FST_FLUSH;
889                 record->flush_group = ip->flush_group;
890                 hammer_ref(&record->lock);
891                 if (record->type == HAMMER_MEM_RECORD_ADD)
892                         return(1);
893
894                 /*
895                  * A general or delete-on-disk record does not contribute
896                  * to our visibility.  We can still flush it, however.
897                  */
898                 return(0);
899         } else {
900                 /*
901                  * We couldn't resolve the dependancies, request that the
902                  * inode be flushed when the dependancies can be resolved.
903                  */
904                 ip->flags |= HAMMER_INODE_REFLUSH;
905                 return(-1);
906         }
907 }
908
909 /*
910  * This is the core routine placing an inode into the FST_FLUSH state.
911  */
912 static void
913 hammer_flush_inode_core(hammer_inode_t ip, int flags)
914 {
915         int go_count;
916
917         /*
918          * Set flush state and prevent the flusher from cycling into
919          * the next flush group.  Do not place the ip on the list yet.
920          * Inodes not in the idle state get an extra reference.
921          */
922         KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
923         if (ip->flush_state == HAMMER_FST_IDLE)
924                 hammer_ref(&ip->lock);
925         ip->flush_state = HAMMER_FST_FLUSH;
926         ip->flush_group = ip->hmp->flusher_next;
927         ++ip->hmp->flusher_lock;
928
929         /*
930          * We need to be able to vfsync/truncate from the backend.
931          */
932         KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
933         if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
934                 ip->flags |= HAMMER_INODE_VHELD;
935                 vref(ip->vp);
936         }
937
938         /*
939          * Figure out how many in-memory records we can actually flush
940          * (not including inode meta-data, buffers, etc).
941          */
942         if (flags & HAMMER_FLUSH_RECURSION) {
943                 go_count = 1;
944         } else {
945                 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
946                                    hammer_setup_child_callback, NULL);
947         }
948
949         /*
950          * This is a more involved test that includes go_count.  If we
951          * can't flush, flag the inode and return.  If go_count is 0 we
952          * were are unable to flush any records in our rec_tree and
953          * must ignore the XDIRTY flag.
954          */
955         if (go_count == 0) {
956                 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
957                         ip->flags |= HAMMER_INODE_REFLUSH;
958                         ip->flush_state = HAMMER_FST_SETUP;
959                         if (ip->flags & HAMMER_INODE_VHELD) {
960                                 ip->flags &= ~HAMMER_INODE_VHELD;
961                                 vrele(ip->vp);
962                         }
963                         if (flags & HAMMER_FLUSH_SIGNAL) {
964                                 ip->flags |= HAMMER_INODE_RESIGNAL;
965                                 hammer_flusher_async(ip->hmp);
966                         }
967                         if (--ip->hmp->flusher_lock == 0)
968                                 wakeup(&ip->hmp->flusher_lock);
969                         return;
970                 }
971         }
972
973         /*
974          * Snapshot the state of the inode for the backend flusher.
975          *
976          * The truncation must be retained in the frontend until after
977          * we've actually performed the record deletion.
978          *
979          * NOTE: The DELETING flag is a mod flag, but it is also sticky,
980          * and stays in ip->flags.  Once set, it stays set until the
981          * inode is destroyed.
982          */
983         ip->sync_flags = (ip->flags & HAMMER_INODE_MODMASK);
984         ip->sync_trunc_off = ip->trunc_off;
985         ip->sync_ino_rec = ip->ino_rec;
986         ip->sync_ino_data = ip->ino_data;
987         ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
988
989         /*
990          * The flusher list inherits our inode and reference.
991          */
992         TAILQ_INSERT_TAIL(&ip->hmp->flush_list, ip, flush_entry);
993         if (--ip->hmp->flusher_lock == 0)
994                 wakeup(&ip->hmp->flusher_lock);
995
996         if (flags & HAMMER_FLUSH_SIGNAL)
997                 hammer_flusher_async(ip->hmp);
998 }
999
1000 /*
1001  * Callback for scan of ip->rec_tree.  Try to include each record in our
1002  * flush.  ip->flush_group has been set but the inode has not yet been
1003  * moved into a flushing state.
1004  *
1005  * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
1006  * both inodes.
1007  *
1008  * We return 1 for any record placed or found in FST_FLUSH, which prevents
1009  * the caller from shortcutting the flush.
1010  */
1011 static int
1012 hammer_setup_child_callback(hammer_record_t rec, void *data)
1013 {
1014         hammer_inode_t target_ip;
1015         hammer_inode_t ip;
1016         int r;
1017
1018         /*
1019          * If the record has been deleted by the backend (it's being held
1020          * by the frontend in a race), just ignore it.
1021          */
1022         if (rec->flags & HAMMER_RECF_DELETED_BE)
1023                 return(0);
1024
1025         /*
1026          * If the record is in an idle state it has no dependancies and
1027          * can be flushed.
1028          */
1029         ip = rec->ip;
1030         r = 0;
1031
1032         switch(rec->flush_state) {
1033         case HAMMER_FST_IDLE:
1034                 /*
1035                  * Record has no setup dependancy, we can flush it.
1036                  */
1037                 KKASSERT(rec->target_ip == NULL);
1038                 rec->flush_state = HAMMER_FST_FLUSH;
1039                 rec->flush_group = ip->flush_group;
1040                 hammer_ref(&rec->lock);
1041                 r = 1;
1042                 break;
1043         case HAMMER_FST_SETUP:
1044                 /*
1045                  * Record has a setup dependancy.  Try to include the
1046                  * target ip in the flush. 
1047                  *
1048                  * We have to be careful here, if we do not do the right
1049                  * thing we can lose track of dirty inodes and the system
1050                  * will lockup trying to allocate buffers.
1051                  */
1052                 target_ip = rec->target_ip;
1053                 KKASSERT(target_ip != NULL);
1054                 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
1055                 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
1056                         /*
1057                          * If the target IP is already flushing in our group
1058                          * we are golden, otherwise make sure the target
1059                          * reflushes.
1060                          */
1061                         if (target_ip->flush_group == ip->flush_group) {
1062                                 rec->flush_state = HAMMER_FST_FLUSH;
1063                                 rec->flush_group = ip->flush_group;
1064                                 hammer_ref(&rec->lock);
1065                                 r = 1;
1066                         } else {
1067                                 target_ip->flags |= HAMMER_INODE_REFLUSH;
1068                         }
1069                 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
1070                         /*
1071                          * If the target IP is not flushing we can force
1072                          * it to flush, even if it is unable to write out
1073                          * any of its own records we have at least one in
1074                          * hand that we CAN deal with.
1075                          */
1076                         rec->flush_state = HAMMER_FST_FLUSH;
1077                         rec->flush_group = ip->flush_group;
1078                         hammer_ref(&rec->lock);
1079                         hammer_flush_inode_core(target_ip,
1080                                                 HAMMER_FLUSH_RECURSION);
1081                         r = 1;
1082                 } else {
1083                         /*
1084                          * General or delete-on-disk record.
1085                          *
1086                          * XXX this needs help.  If a delete-on-disk we could
1087                          * disconnect the target.  If the target has its own
1088                          * dependancies they really need to be flushed.
1089                          *
1090                          * XXX
1091                          */
1092                         rec->flush_state = HAMMER_FST_FLUSH;
1093                         rec->flush_group = ip->flush_group;
1094                         hammer_ref(&rec->lock);
1095                         hammer_flush_inode_core(target_ip,
1096                                                 HAMMER_FLUSH_RECURSION);
1097                         r = 1;
1098                 }
1099                 break;
1100         case HAMMER_FST_FLUSH:
1101                 /* 
1102                  * Record already associated with a flush group.  It had
1103                  * better be ours.
1104                  */
1105                 KKASSERT(rec->flush_group == ip->flush_group);
1106                 r = 1;
1107                 break;
1108         }
1109         return(r);
1110 }
1111
1112 /*
1113  * Wait for a previously queued flush to complete
1114  */
1115 void
1116 hammer_wait_inode(hammer_inode_t ip)
1117 {
1118         while (ip->flush_state != HAMMER_FST_IDLE) {
1119                 ip->flags |= HAMMER_INODE_FLUSHW;
1120                 tsleep(&ip->flags, 0, "hmrwin", 0);
1121         }
1122 }
1123
1124 /*
1125  * Called by the backend code when a flush has been completed.
1126  * The inode has already been removed from the flush list.
1127  *
1128  * A pipelined flush can occur, in which case we must re-enter the
1129  * inode on the list and re-copy its fields.
1130  */
1131 void
1132 hammer_flush_inode_done(hammer_inode_t ip)
1133 {
1134         struct bio *bio;
1135         int dorel = 0;
1136
1137         KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
1138
1139         /*
1140          * Allow BIOs to queue to the inode's primary bioq again.
1141          */
1142         ip->flags &= ~HAMMER_INODE_WRITE_ALT;
1143
1144         /*
1145          * Merge left-over flags back into the frontend and fix the state.
1146          */
1147         ip->flags |= ip->sync_flags;
1148
1149         /*
1150          * The backend may have adjusted nlinks, so if the adjusted nlinks
1151          * does not match the fronttend set the frontend's RDIRTY flag again.
1152          */
1153         if (ip->ino_rec.ino_nlinks != ip->sync_ino_rec.ino_nlinks)
1154                 ip->flags |= HAMMER_INODE_RDIRTY;
1155
1156         /*
1157          * Reflush any BIOs that wound up in the alt list.  Our inode will
1158          * also wind up at the end of the flusher's list.
1159          */
1160         while ((bio = TAILQ_FIRST(&ip->bio_alt_list)) != NULL) {
1161                 TAILQ_REMOVE(&ip->bio_alt_list, bio, bio_act);
1162                 TAILQ_INSERT_TAIL(&ip->bio_list, bio, bio_act);
1163         }
1164         /*
1165          * Fix up the dirty buffer status.
1166          */
1167         if (TAILQ_FIRST(&ip->bio_list) ||
1168             (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree))) {
1169                 ip->flags |= HAMMER_INODE_BUFS;
1170         }
1171
1172         /*
1173          * Re-set the XDIRTY flag if some of the inode's in-memory records
1174          * could not be flushed.
1175          */
1176         if (RB_ROOT(&ip->rec_tree))
1177                 ip->flags |= HAMMER_INODE_XDIRTY;
1178
1179         /*
1180          * Do not lose track of inodes which no longer have vnode
1181          * assocations, otherwise they may never get flushed again.
1182          */
1183         if ((ip->flags & HAMMER_INODE_MODMASK) && ip->vp == NULL)
1184                 ip->flags |= HAMMER_INODE_REFLUSH;
1185
1186         /*
1187          * Adjust flush_state.  The target state (idle or setup) shouldn't
1188          * be terribly important since we will reflush if we really need
1189          * to do anything. XXX
1190          */
1191         if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
1192                 ip->flush_state = HAMMER_FST_IDLE;
1193                 dorel = 1;
1194         } else {
1195                 ip->flush_state = HAMMER_FST_SETUP;
1196         }
1197
1198         /*
1199          * Clean up the vnode ref
1200          */
1201         if (ip->flags & HAMMER_INODE_VHELD) {
1202                 ip->flags &= ~HAMMER_INODE_VHELD;
1203                 vrele(ip->vp);
1204         }
1205
1206         /*
1207          * If the frontend made more changes and requested another flush,
1208          * then try to get it running.
1209          */
1210         if (ip->flags & HAMMER_INODE_REFLUSH) {
1211                 ip->flags &= ~HAMMER_INODE_REFLUSH;
1212                 if (ip->flags & HAMMER_INODE_RESIGNAL) {
1213                         ip->flags &= ~HAMMER_INODE_RESIGNAL;
1214                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1215                 } else {
1216                         hammer_flush_inode(ip, 0);
1217                 }
1218         }
1219
1220         /*
1221          * Finally, if the frontend is waiting for a flush to complete,
1222          * wake it up.
1223          */
1224         if (ip->flush_state != HAMMER_FST_FLUSH) {
1225                 if (ip->flags & HAMMER_INODE_FLUSHW) {
1226                         ip->flags &= ~HAMMER_INODE_FLUSHW;
1227                         wakeup(&ip->flags);
1228                 }
1229         }
1230         if (dorel)
1231                 hammer_rel_inode(ip, 0);
1232 }
1233
1234 /*
1235  * Called from hammer_sync_inode() to synchronize in-memory records
1236  * to the media.
1237  */
1238 static int
1239 hammer_sync_record_callback(hammer_record_t record, void *data)
1240 {
1241         hammer_cursor_t cursor = data;
1242         hammer_transaction_t trans = cursor->trans;
1243         int error;
1244
1245         /*
1246          * Skip records that do not belong to the current flush.
1247          */
1248         if (record->flush_state != HAMMER_FST_FLUSH)
1249                 return(0);
1250         KKASSERT((record->flags & HAMMER_RECF_DELETED_BE) == 0);
1251 #if 1
1252         if (record->flush_group != record->ip->flush_group) {
1253                 kprintf("sync_record %p ip %p bad flush group %d %d\n", record, record->ip, record->flush_group ,record->ip->flush_group);
1254                 Debugger("blah2");
1255                 return(0);
1256         }
1257 #endif
1258         KKASSERT(record->flush_group == record->ip->flush_group);
1259
1260         /*
1261          * Interlock the record using the BE flag.  Once BE is set the
1262          * frontend cannot change the state of FE.
1263          *
1264          * NOTE: If FE is set prior to us setting BE we still sync the
1265          * record out, but the flush completion code converts it to 
1266          * a delete-on-disk record instead of destroying it.
1267          */
1268         KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
1269         record->flags |= HAMMER_RECF_INTERLOCK_BE;
1270
1271         /*
1272          * If DELETED_FE is set we may have already sent dependant pieces
1273          * to the disk and we must flush the record as if it hadn't been
1274          * deleted.  This creates a bit of a mess because we have to
1275          * have ip_sync_record convert the record to MEM_RECORD_DEL before
1276          * it inserts the B-Tree record.  Otherwise the media sync might
1277          * be visible to the frontend.
1278          */
1279         if (record->flags & HAMMER_RECF_DELETED_FE) {
1280                 if (record->type == HAMMER_MEM_RECORD_ADD) {
1281                         record->flags |= HAMMER_RECF_CONVERT_DELETE;
1282                 } else {
1283                         KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
1284                         return(0);
1285                 }
1286         }
1287
1288         /*
1289          * Assign the create_tid for new records.  Deletions already
1290          * have the record's entire key properly set up.
1291          */
1292         if (record->type != HAMMER_MEM_RECORD_DEL)
1293                 record->rec.inode.base.base.create_tid = trans->tid;
1294         for (;;) {
1295                 error = hammer_ip_sync_record_cursor(cursor, record);
1296                 if (error != EDEADLK)
1297                         break;
1298                 hammer_done_cursor(cursor);
1299                 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
1300                                            record->ip);
1301                 if (error)
1302                         break;
1303         }
1304         record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
1305
1306         if (error) {
1307                 error = -error;
1308                 if (error != -ENOSPC) {
1309                         kprintf("hammer_sync_record_callback: sync failed rec "
1310                                 "%p, error %d\n", record, error);
1311                         Debugger("sync failed rec");
1312                 }
1313         }
1314         hammer_flush_record_done(record, error);
1315         return(error);
1316 }
1317
1318 /*
1319  * XXX error handling
1320  */
1321 int
1322 hammer_sync_inode(hammer_inode_t ip)
1323 {
1324         struct hammer_transaction trans;
1325         struct hammer_cursor cursor;
1326         struct bio *bio;
1327         hammer_record_t depend;
1328         hammer_record_t next;
1329         int error, tmp_error;
1330         u_int64_t nlinks;
1331
1332         if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
1333                 return(0);
1334
1335         hammer_start_transaction_fls(&trans, ip->hmp);
1336         error = hammer_init_cursor(&trans, &cursor, &ip->cache[0], ip);
1337         if (error)
1338                 goto done;
1339
1340         /*
1341          * Any directory records referencing this inode which are not in
1342          * our current flush group must adjust our nlink count for the
1343          * purposes of synchronization to disk.
1344          *
1345          * Records which are in our flush group can be unlinked from our
1346          * inode now, allowing the inode to be physically deleted.
1347          */
1348         nlinks = ip->ino_rec.ino_nlinks;
1349         next = TAILQ_FIRST(&ip->target_list);
1350         while ((depend = next) != NULL) {
1351                 next = TAILQ_NEXT(depend, target_entry);
1352                 if (depend->flush_state == HAMMER_FST_FLUSH &&
1353                     depend->flush_group == ip->hmp->flusher_act) {
1354                         TAILQ_REMOVE(&ip->target_list, depend, target_entry);
1355                         depend->target_ip = NULL;
1356                         /* no need to signal target_ip, it is us */
1357                 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
1358                         switch(depend->type) {
1359                         case HAMMER_MEM_RECORD_ADD:
1360                                 --nlinks;
1361                                 break;
1362                         case HAMMER_MEM_RECORD_DEL:
1363                                 ++nlinks;
1364                                 break;
1365                         default:
1366                                 break;
1367                         }
1368                 }
1369         }
1370
1371         /*
1372          * Set dirty if we had to modify the link count.
1373          */
1374         if (ip->sync_ino_rec.ino_nlinks != nlinks) {
1375                 KKASSERT((int64_t)nlinks >= 0);
1376                 ip->sync_ino_rec.ino_nlinks = nlinks;
1377                 ip->sync_flags |= HAMMER_INODE_RDIRTY;
1378         }
1379
1380         /*
1381          * Queue up as many dirty buffers as we can then set a flag to
1382          * cause any further BIOs to go to the alternative queue.
1383          */
1384         if (ip->flags & HAMMER_INODE_VHELD)
1385                 error = vfsync(ip->vp, MNT_NOWAIT, 1, NULL, NULL);
1386         ip->flags |= HAMMER_INODE_WRITE_ALT;
1387
1388         /*
1389          * The buffer cache may contain dirty buffers beyond the inode
1390          * state we copied from the frontend to the backend.  Because
1391          * we are syncing our buffer cache on the backend, resync
1392          * the truncation point and the file size so we don't wipe out
1393          * any data.
1394          *
1395          * Syncing the buffer cache on the frontend has serious problems
1396          * because it prevents us from passively queueing dirty inodes
1397          * to the backend (the BIO's could stall indefinitely).
1398          */
1399         if (ip->flags & HAMMER_INODE_TRUNCATED) {
1400                 ip->sync_trunc_off = ip->trunc_off;
1401                 ip->sync_flags |= HAMMER_INODE_TRUNCATED;
1402         }
1403         if (ip->sync_ino_rec.ino_size != ip->ino_rec.ino_size) {
1404                 ip->sync_ino_rec.ino_size = ip->ino_rec.ino_size;
1405                 ip->sync_flags |= HAMMER_INODE_RDIRTY;
1406         }
1407
1408         /*
1409          * If there is a trunction queued destroy any data past the (aligned)
1410          * truncation point.  Userland will have dealt with the buffer
1411          * containing the truncation point for us.
1412          *
1413          * We don't flush pending frontend data buffers until after we've
1414          * dealth with the truncation.
1415          *
1416          * Don't bother if the inode is or has been deleted.
1417          */
1418         if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
1419                 /*
1420                  * Interlock trunc_off.  The VOP front-end may continue to
1421                  * make adjustments to it while we are blocked.
1422                  */
1423                 off_t trunc_off;
1424                 off_t aligned_trunc_off;
1425
1426                 trunc_off = ip->sync_trunc_off;
1427                 aligned_trunc_off = (trunc_off + HAMMER_BUFMASK) &
1428                                     ~HAMMER_BUFMASK64;
1429
1430                 /*
1431                  * Delete any whole blocks on-media.  The front-end has
1432                  * already cleaned out any partial block and made it
1433                  * pending.  The front-end may have updated trunc_off
1434                  * while we were blocked so do not just unconditionally
1435                  * set it to the maximum offset.
1436                  */
1437                 error = hammer_ip_delete_range(&cursor, ip,
1438                                                 aligned_trunc_off,
1439                                                 0x7FFFFFFFFFFFFFFFLL);
1440                 if (error)
1441                         Debugger("hammer_ip_delete_range errored");
1442                 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
1443                 if (ip->trunc_off >= trunc_off) {
1444                         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
1445                         ip->flags &= ~HAMMER_INODE_TRUNCATED;
1446                 }
1447         } else {
1448                 error = 0;
1449         }
1450
1451         /*
1452          * Now sync related records.  These will typically be directory
1453          * entries or delete-on-disk records.
1454          *
1455          * Not all records will be flushed, but clear XDIRTY anyway.  We
1456          * will set it again in the frontend hammer_flush_inode_done() 
1457          * if records remain.
1458          */
1459         if (error == 0) {
1460                 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1461                                     hammer_sync_record_callback, &cursor);
1462                 if (tmp_error < 0)
1463                         tmp_error = -error;
1464                 if (tmp_error)
1465                         error = tmp_error;
1466                 if (error == 0)
1467                         ip->sync_flags &= ~HAMMER_INODE_XDIRTY;
1468         }
1469
1470         /*
1471          * If we are deleting the inode the frontend had better not have
1472          * any active references on elements making up the inode.
1473          */
1474         if (error == 0 && ip->sync_ino_rec.ino_nlinks == 0 &&
1475                 RB_EMPTY(&ip->rec_tree)  &&
1476             (ip->sync_flags & HAMMER_INODE_DELETING) &&
1477             (ip->flags & HAMMER_INODE_DELETED) == 0) {
1478                 int count1 = 0;
1479
1480                 kprintf("Y");
1481                 ip->flags |= HAMMER_INODE_DELETED;
1482                 error = hammer_ip_delete_range_all(&cursor, ip, &count1);
1483                 if (error == 0) {
1484                         ip->sync_flags &= ~HAMMER_INODE_DELETING;
1485                         ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
1486                         KKASSERT(RB_EMPTY(&ip->rec_tree));
1487
1488                         /*
1489                          * Set delete_tid in both the frontend and backend
1490                          * copy of the inode record.  The DELETED flag handles
1491                          * this, do not set RDIRTY.
1492                          */
1493                         ip->ino_rec.base.base.delete_tid = trans.tid;
1494                         ip->sync_ino_rec.base.base.delete_tid = trans.tid;
1495
1496                         /*
1497                          * Adjust the inode count in the volume header
1498                          */
1499                         hammer_modify_volume_field(&trans, trans.rootvol,
1500                                                    vol0_stat_inodes);
1501                         --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1502                         hammer_modify_volume_done(trans.rootvol);
1503                 } else {
1504                         ip->flags &= ~HAMMER_INODE_DELETED;
1505                         Debugger("hammer_ip_delete_range_all errored");
1506                 }
1507         }
1508
1509         /*
1510          * Flush any queued BIOs.  These will just biodone() the IO's if
1511          * the inode has been deleted.
1512          */
1513         while ((bio = TAILQ_FIRST(&ip->bio_list)) != NULL) {
1514                 TAILQ_REMOVE(&ip->bio_list, bio, bio_act);
1515                 tmp_error = hammer_dowrite(&cursor, ip, bio);
1516                 if (tmp_error)
1517                         error = tmp_error;
1518         }
1519         ip->sync_flags &= ~HAMMER_INODE_BUFS;
1520
1521         if (error)
1522                 Debugger("RB_SCAN errored");
1523
1524         /*
1525          * Now update the inode's on-disk inode-data and/or on-disk record.
1526          * DELETED and ONDISK are managed only in ip->flags.
1527          */
1528         switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
1529         case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
1530                 /*
1531                  * If deleted and on-disk, don't set any additional flags.
1532                  * the delete flag takes care of things.
1533                  *
1534                  * Clear flags which may have been set by the frontend.
1535                  */
1536                 ip->sync_flags &= ~(HAMMER_INODE_RDIRTY|HAMMER_INODE_DDIRTY|
1537                                     HAMMER_INODE_XDIRTY|HAMMER_INODE_ITIMES|
1538                                     HAMMER_INODE_DELETING);
1539                 break;
1540         case HAMMER_INODE_DELETED:
1541                 /*
1542                  * Take care of the case where a deleted inode was never
1543                  * flushed to the disk in the first place.
1544                  *
1545                  * Clear flags which may have been set by the frontend.
1546                  */
1547                 ip->sync_flags &= ~(HAMMER_INODE_RDIRTY|HAMMER_INODE_DDIRTY|
1548                                     HAMMER_INODE_XDIRTY|HAMMER_INODE_ITIMES|
1549                                     HAMMER_INODE_DELETING);
1550                 while (RB_ROOT(&ip->rec_tree)) {
1551                         hammer_record_t record = RB_ROOT(&ip->rec_tree);
1552                         hammer_ref(&record->lock);
1553                         KKASSERT(record->lock.refs == 1);
1554                         record->flags |= HAMMER_RECF_DELETED_FE;
1555                         record->flags |= HAMMER_RECF_DELETED_BE;
1556                         hammer_rel_mem_record(record);
1557                 }
1558                 break;
1559         case HAMMER_INODE_ONDISK:
1560                 /*
1561                  * If already on-disk, do not set any additional flags.
1562                  */
1563                 break;
1564         default:
1565                 /*
1566                  * If not on-disk and not deleted, set both dirty flags
1567                  * to force an initial record to be written.  Also set
1568                  * the create_tid for the inode.
1569                  *
1570                  * Set create_tid in both the frontend and backend
1571                  * copy of the inode record.
1572                  */
1573                 ip->ino_rec.base.base.create_tid = trans.tid;
1574                 ip->sync_ino_rec.base.base.create_tid = trans.tid;
1575                 ip->sync_flags |= HAMMER_INODE_RDIRTY | HAMMER_INODE_DDIRTY;
1576                 break;
1577         }
1578
1579         /*
1580          * If RDIRTY or DDIRTY is set, write out a new record.  If the inode
1581          * is already on-disk the old record is marked as deleted.
1582          *
1583          * If DELETED is set hammer_update_inode() will delete the existing
1584          * record without writing out a new one.
1585          *
1586          * If *ONLY* the ITIMES flag is set we can update the record in-place.
1587          */
1588         if (ip->flags & HAMMER_INODE_DELETED) {
1589                 error = hammer_update_inode(&cursor, ip);
1590         } else 
1591         if ((ip->sync_flags & (HAMMER_INODE_RDIRTY | HAMMER_INODE_DDIRTY |
1592                                HAMMER_INODE_ITIMES)) == HAMMER_INODE_ITIMES) {
1593                 error = hammer_update_itimes(&cursor, ip);
1594         } else
1595         if (ip->sync_flags & (HAMMER_INODE_RDIRTY | HAMMER_INODE_DDIRTY |
1596                               HAMMER_INODE_ITIMES)) {
1597                 error = hammer_update_inode(&cursor, ip);
1598         }
1599         if (error)
1600                 Debugger("hammer_update_itimes/inode errored");
1601 done:
1602         /*
1603          * Save the TID we used to sync the inode with to make sure we
1604          * do not improperly reuse it.
1605          */
1606         hammer_done_cursor(&cursor);
1607         hammer_done_transaction(&trans);
1608         return(error);
1609 }
1610
1611 /*
1612  * This routine is called when the OS is no longer actively referencing
1613  * the inode (but might still be keeping it cached), or when releasing
1614  * the last reference to an inode.
1615  *
1616  * At this point if the inode's nlinks count is zero we want to destroy
1617  * it, which may mean destroying it on-media too.
1618  */
1619 void
1620 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
1621 {
1622         struct vnode *vp;
1623
1624         /*
1625          * If the inode is on-media and the link count is 0 we MUST delete
1626          * it on-media.  DELETING is a mod flag, DELETED is a state flag.
1627          */
1628         if (ip->ino_rec.ino_nlinks == 0 &&
1629             (ip->flags & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
1630                 ip->flags |= HAMMER_INODE_DELETING;
1631                 ip->flags |= HAMMER_INODE_TRUNCATED;
1632                 ip->trunc_off = 0;
1633                 vp = NULL;
1634                 if (getvp) {
1635                         if (hammer_get_vnode(ip, &vp) != 0)
1636                                 return;
1637                 }
1638                 if (ip->vp) {
1639                         vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
1640                         vnode_pager_setsize(ip->vp, 0);
1641                 }
1642                 if (getvp) {
1643                         vput(vp);
1644                 }
1645         }
1646 }
1647
1648 /*
1649  * Re-test an inode when a dependancy had gone away to see if we
1650  * can chain flush it.
1651  */
1652 void
1653 hammer_test_inode(hammer_inode_t ip)
1654 {
1655         if (ip->flags & HAMMER_INODE_REFLUSH) {
1656                 ip->flags &= ~HAMMER_INODE_REFLUSH;
1657                 hammer_ref(&ip->lock);
1658                 if (ip->flags & HAMMER_INODE_RESIGNAL) {
1659                         ip->flags &= ~HAMMER_INODE_RESIGNAL;
1660                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1661                 } else {
1662                         hammer_flush_inode(ip, 0);
1663                 }
1664                 hammer_rel_inode(ip, 0);
1665         }
1666 }
1667