HAMMER 42/Many: Cleanup.
[dragonfly.git] / sys / vfs / hammer / hammer_inode.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.54 2008/05/09 07:26:51 dillon Exp $
35  */
36
37 #include "hammer.h"
38 #include <vm/vm_extern.h>
39 #include <sys/buf.h>
40 #include <sys/buf2.h>
41
42 static int hammer_unload_inode(struct hammer_inode *ip);
43 static void hammer_flush_inode_core(hammer_inode_t ip, int flags);
44 static int hammer_setup_child_callback(hammer_record_t rec, void *data);
45 static int hammer_setup_parent_inodes(hammer_record_t record);
46
47 /*
48  * The kernel is not actively referencing this vnode but is still holding
49  * it cached.
50  *
51  * This is called from the frontend.
52  */
53 int
54 hammer_vop_inactive(struct vop_inactive_args *ap)
55 {
56         struct hammer_inode *ip = VTOI(ap->a_vp);
57
58         /*
59          * Degenerate case
60          */
61         if (ip == NULL) {
62                 vrecycle(ap->a_vp);
63                 return(0);
64         }
65
66         /*
67          * If the inode no longer has visibility in the filesystem and is
68          * fairly clean, try to recycle it immediately.  This can deadlock
69          * in vfsync() if we aren't careful.
70          */
71         hammer_inode_unloadable_check(ip, 0);
72         if (ip->flags & HAMMER_INODE_MODMASK)
73                 hammer_flush_inode(ip, 0);
74         else if (ip->ino_rec.ino_nlinks == 0)
75                 vrecycle(ap->a_vp);
76         return(0);
77 }
78
79 /*
80  * Release the vnode association.  This is typically (but not always)
81  * the last reference on the inode.
82  *
83  * Once the association is lost we are on our own with regards to
84  * flushing the inode.
85  */
86 int
87 hammer_vop_reclaim(struct vop_reclaim_args *ap)
88 {
89         struct hammer_inode *ip;
90         struct vnode *vp;
91
92         vp = ap->a_vp;
93
94         if ((ip = vp->v_data) != NULL) {
95                 vp->v_data = NULL;
96                 ip->vp = NULL;
97                 hammer_rel_inode(ip, 1);
98         }
99         return(0);
100 }
101
102 /*
103  * Return a locked vnode for the specified inode.  The inode must be
104  * referenced but NOT LOCKED on entry and will remain referenced on
105  * return.
106  *
107  * Called from the frontend.
108  */
109 int
110 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
111 {
112         struct vnode *vp;
113         int error = 0;
114
115         for (;;) {
116                 if ((vp = ip->vp) == NULL) {
117                         error = getnewvnode(VT_HAMMER, ip->hmp->mp, vpp, 0, 0);
118                         if (error)
119                                 break;
120                         hammer_lock_ex(&ip->lock);
121                         if (ip->vp != NULL) {
122                                 hammer_unlock(&ip->lock);
123                                 vp->v_type = VBAD;
124                                 vx_put(vp);
125                                 continue;
126                         }
127                         hammer_ref(&ip->lock);
128                         vp = *vpp;
129                         ip->vp = vp;
130                         vp->v_type = hammer_get_vnode_type(
131                                             ip->ino_rec.base.base.obj_type);
132
133                         switch(ip->ino_rec.base.base.obj_type) {
134                         case HAMMER_OBJTYPE_CDEV:
135                         case HAMMER_OBJTYPE_BDEV:
136                                 vp->v_ops = &ip->hmp->mp->mnt_vn_spec_ops;
137                                 addaliasu(vp, ip->ino_data.rmajor,
138                                           ip->ino_data.rminor);
139                                 break;
140                         case HAMMER_OBJTYPE_FIFO:
141                                 vp->v_ops = &ip->hmp->mp->mnt_vn_fifo_ops;
142                                 break;
143                         default:
144                                 break;
145                         }
146
147                         /*
148                          * Only mark as the root vnode if the ip is not
149                          * historical, otherwise the VFS cache will get
150                          * confused.  The other half of the special handling
151                          * is in hammer_vop_nlookupdotdot().
152                          */
153                         if (ip->obj_id == HAMMER_OBJID_ROOT &&
154                             ip->obj_asof == ip->hmp->asof) {
155                                 vp->v_flag |= VROOT;
156                         }
157
158                         vp->v_data = (void *)ip;
159                         /* vnode locked by getnewvnode() */
160                         /* make related vnode dirty if inode dirty? */
161                         hammer_unlock(&ip->lock);
162                         if (vp->v_type == VREG)
163                                 vinitvmio(vp, ip->ino_rec.ino_size);
164                         break;
165                 }
166
167                 /*
168                  * loop if the vget fails (aka races), or if the vp
169                  * no longer matches ip->vp.
170                  */
171                 if (vget(vp, LK_EXCLUSIVE) == 0) {
172                         if (vp == ip->vp)
173                                 break;
174                         vput(vp);
175                 }
176         }
177         *vpp = vp;
178         return(error);
179 }
180
181 /*
182  * Acquire a HAMMER inode.  The returned inode is not locked.  These functions
183  * do not attach or detach the related vnode (use hammer_get_vnode() for
184  * that).
185  *
186  * The flags argument is only applied for newly created inodes, and only
187  * certain flags are inherited.
188  *
189  * Called from the frontend.
190  */
191 struct hammer_inode *
192 hammer_get_inode(hammer_transaction_t trans, struct hammer_node **cache,
193                  u_int64_t obj_id, hammer_tid_t asof, int flags, int *errorp)
194 {
195         hammer_mount_t hmp = trans->hmp;
196         struct hammer_inode_info iinfo;
197         struct hammer_cursor cursor;
198         struct hammer_inode *ip;
199
200         /*
201          * Determine if we already have an inode cached.  If we do then
202          * we are golden.
203          */
204         iinfo.obj_id = obj_id;
205         iinfo.obj_asof = asof;
206 loop:
207         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
208         if (ip) {
209                 hammer_ref(&ip->lock);
210                 *errorp = 0;
211                 return(ip);
212         }
213
214         ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
215         ++hammer_count_inodes;
216         ip->obj_id = obj_id;
217         ip->obj_asof = iinfo.obj_asof;
218         ip->hmp = hmp;
219         ip->flags = flags & HAMMER_INODE_RO;
220         if (hmp->ronly)
221                 ip->flags |= HAMMER_INODE_RO;
222         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
223         RB_INIT(&ip->rec_tree);
224         TAILQ_INIT(&ip->bio_list);
225         TAILQ_INIT(&ip->bio_alt_list);
226         TAILQ_INIT(&ip->target_list);
227
228         /*
229          * Locate the on-disk inode.
230          */
231 retry:
232         hammer_init_cursor(trans, &cursor, cache, NULL);
233         cursor.key_beg.obj_id = ip->obj_id;
234         cursor.key_beg.key = 0;
235         cursor.key_beg.create_tid = 0;
236         cursor.key_beg.delete_tid = 0;
237         cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
238         cursor.key_beg.obj_type = 0;
239         cursor.asof = iinfo.obj_asof;
240         cursor.flags = HAMMER_CURSOR_GET_RECORD | HAMMER_CURSOR_GET_DATA |
241                        HAMMER_CURSOR_ASOF;
242
243         *errorp = hammer_btree_lookup(&cursor);
244         if (*errorp == EDEADLK) {
245                 hammer_done_cursor(&cursor);
246                 goto retry;
247         }
248
249         /*
250          * On success the B-Tree lookup will hold the appropriate
251          * buffer cache buffers and provide a pointer to the requested
252          * information.  Copy the information to the in-memory inode
253          * and cache the B-Tree node to improve future operations.
254          */
255         if (*errorp == 0) {
256                 ip->ino_rec = cursor.record->inode;
257                 ip->ino_data = cursor.data->inode;
258                 hammer_cache_node(cursor.node, &ip->cache[0]);
259                 if (cache)
260                         hammer_cache_node(cursor.node, cache);
261         }
262
263         /*
264          * On success load the inode's record and data and insert the
265          * inode into the B-Tree.  It is possible to race another lookup
266          * insertion of the same inode so deal with that condition too.
267          *
268          * The cursor's locked node interlocks against others creating and
269          * destroying ip while we were blocked.
270          */
271         if (*errorp == 0) {
272                 hammer_ref(&ip->lock);
273                 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
274                         hammer_uncache_node(&ip->cache[0]);
275                         hammer_uncache_node(&ip->cache[1]);
276                         KKASSERT(ip->lock.refs == 1);
277                         --hammer_count_inodes;
278                         kfree(ip, M_HAMMER);
279                         hammer_done_cursor(&cursor);
280                         goto loop;
281                 }
282                 ip->flags |= HAMMER_INODE_ONDISK;
283         } else {
284                 /*
285                  * Do not panic on read-only accesses which fail, particularly
286                  * historical accesses where the snapshot might not have
287                  * complete connectivity.
288                  */
289                 if ((flags & HAMMER_INODE_RO) == 0) {
290                         kprintf("hammer_get_inode: failed ip %p obj_id %016llx cursor %p error %d\n",
291                                 ip, ip->obj_id, &cursor, *errorp);
292                         Debugger("x");
293                 }
294                 --hammer_count_inodes;
295                 kfree(ip, M_HAMMER);
296                 ip = NULL;
297         }
298         hammer_done_cursor(&cursor);
299         return (ip);
300 }
301
302 /*
303  * Create a new filesystem object, returning the inode in *ipp.  The
304  * returned inode will be referenced.
305  *
306  * The inode is created in-memory.
307  */
308 int
309 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
310                     struct ucred *cred, hammer_inode_t dip,
311                     struct hammer_inode **ipp)
312 {
313         hammer_mount_t hmp;
314         hammer_inode_t ip;
315         uid_t xuid;
316
317         hmp = trans->hmp;
318         ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
319         ++hammer_count_inodes;
320         ip->obj_id = hammer_alloc_objid(trans, dip);
321         KKASSERT(ip->obj_id != 0);
322         ip->obj_asof = hmp->asof;
323         ip->hmp = hmp;
324         ip->flush_state = HAMMER_FST_IDLE;
325         ip->flags = HAMMER_INODE_DDIRTY | HAMMER_INODE_RDIRTY |
326                     HAMMER_INODE_ITIMES;
327
328         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
329         RB_INIT(&ip->rec_tree);
330         TAILQ_INIT(&ip->bio_list);
331         TAILQ_INIT(&ip->bio_alt_list);
332         TAILQ_INIT(&ip->target_list);
333
334         ip->ino_rec.ino_atime = trans->time;
335         ip->ino_rec.ino_mtime = trans->time;
336         ip->ino_rec.ino_size = 0;
337         ip->ino_rec.ino_nlinks = 0;
338         /* XXX */
339         ip->ino_rec.base.base.btype = HAMMER_BTREE_TYPE_RECORD;
340         ip->ino_rec.base.base.obj_id = ip->obj_id;
341         ip->ino_rec.base.base.key = 0;
342         ip->ino_rec.base.base.create_tid = 0;
343         ip->ino_rec.base.base.delete_tid = 0;
344         ip->ino_rec.base.base.rec_type = HAMMER_RECTYPE_INODE;
345         ip->ino_rec.base.base.obj_type = hammer_get_obj_type(vap->va_type);
346
347         ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
348         ip->ino_data.mode = vap->va_mode;
349         ip->ino_data.ctime = trans->time;
350         ip->ino_data.parent_obj_id = (dip) ? dip->ino_rec.base.base.obj_id : 0;
351
352         switch(ip->ino_rec.base.base.obj_type) {
353         case HAMMER_OBJTYPE_CDEV:
354         case HAMMER_OBJTYPE_BDEV:
355                 ip->ino_data.rmajor = vap->va_rmajor;
356                 ip->ino_data.rminor = vap->va_rminor;
357                 break;
358         default:
359                 break;
360         }
361
362         /*
363          * Calculate default uid/gid and overwrite with information from
364          * the vap.
365          */
366         xuid = hammer_to_unix_xid(&dip->ino_data.uid);
367         ip->ino_data.gid = dip->ino_data.gid;
368         xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode, xuid, cred,
369                                      &vap->va_mode);
370         ip->ino_data.mode = vap->va_mode;
371
372         if (vap->va_vaflags & VA_UID_UUID_VALID)
373                 ip->ino_data.uid = vap->va_uid_uuid;
374         else if (vap->va_uid != (uid_t)VNOVAL)
375                 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
376         if (vap->va_vaflags & VA_GID_UUID_VALID)
377                 ip->ino_data.gid = vap->va_gid_uuid;
378         else if (vap->va_gid != (gid_t)VNOVAL)
379                 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
380
381         hammer_ref(&ip->lock);
382         if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
383                 hammer_unref(&ip->lock);
384                 panic("hammer_create_inode: duplicate obj_id %llx", ip->obj_id);
385         }
386         *ipp = ip;
387         return(0);
388 }
389
390 /*
391  * Called by hammer_sync_inode().
392  */
393 static int
394 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
395 {
396         hammer_transaction_t trans = cursor->trans;
397         hammer_record_t record;
398         int error;
399
400 retry:
401         error = 0;
402
403         /*
404          * If the inode has a presence on-disk then locate it and mark
405          * it deleted, setting DELONDISK.
406          *
407          * The record may or may not be physically deleted, depending on
408          * the retention policy.
409          */
410         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
411             HAMMER_INODE_ONDISK) {
412                 hammer_normalize_cursor(cursor);
413                 cursor->key_beg.obj_id = ip->obj_id;
414                 cursor->key_beg.key = 0;
415                 cursor->key_beg.create_tid = 0;
416                 cursor->key_beg.delete_tid = 0;
417                 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
418                 cursor->key_beg.obj_type = 0;
419                 cursor->asof = ip->obj_asof;
420                 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
421                 cursor->flags |= HAMMER_CURSOR_GET_RECORD | HAMMER_CURSOR_ASOF;
422                 cursor->flags |= HAMMER_CURSOR_BACKEND;
423
424                 error = hammer_btree_lookup(cursor);
425                 if (hammer_debug_inode)
426                         kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
427                 if (error) {
428                         kprintf("error %d\n", error);
429                         Debugger("hammer_update_inode");
430                 }
431
432                 if (error == 0) {
433                         error = hammer_ip_delete_record(cursor, trans->tid);
434                         if (hammer_debug_inode)
435                                 kprintf(" error %d\n", error);
436                         if (error && error != EDEADLK) {
437                                 kprintf("error %d\n", error);
438                                 Debugger("hammer_update_inode2");
439                         }
440                         if (error == 0) {
441                                 ip->flags |= HAMMER_INODE_DELONDISK;
442                         }
443                         if (cursor->node)
444                                 hammer_cache_node(cursor->node, &ip->cache[0]);
445                 }
446                 if (error == EDEADLK) {
447                         hammer_done_cursor(cursor);
448                         error = hammer_init_cursor(trans, cursor,
449                                                    &ip->cache[0], ip);
450                         if (hammer_debug_inode)
451                                 kprintf("IPDED %p %d\n", ip, error);
452                         if (error == 0)
453                                 goto retry;
454                 }
455         }
456
457         /*
458          * Ok, write out the initial record or a new record (after deleting
459          * the old one), unless the DELETED flag is set.  This routine will
460          * clear DELONDISK if it writes out a record.
461          *
462          * Update our inode statistics if this is the first application of
463          * the inode on-disk.
464          */
465         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
466                 /*
467                  * Generate a record and write it to the media
468                  */
469                 record = hammer_alloc_mem_record(ip);
470                 record->type = HAMMER_MEM_RECORD_GENERAL;
471                 record->flush_state = HAMMER_FST_FLUSH;
472                 record->rec.inode = ip->sync_ino_rec;
473                 record->rec.inode.base.base.create_tid = trans->tid;
474                 record->rec.inode.base.data_len = sizeof(ip->sync_ino_data);
475                 record->rec.base.signature = HAMMER_RECORD_SIGNATURE_GOOD;
476                 record->data = (void *)&ip->sync_ino_data;
477                 record->flags |= HAMMER_RECF_INTERLOCK_BE;
478                 for (;;) {
479                         error = hammer_ip_sync_record_cursor(cursor, record);
480                         if (hammer_debug_inode)
481                                 kprintf("GENREC %p rec %08x %d\n",      
482                                         ip, record->flags, error);
483                         if (error != EDEADLK)
484                                 break;
485                         hammer_done_cursor(cursor);
486                         error = hammer_init_cursor(trans, cursor,
487                                                    &ip->cache[0], ip);
488                         if (hammer_debug_inode)
489                                 kprintf("GENREC reinit %d\n", error);
490                         if (error)
491                                 break;
492                 }
493                 if (error) {
494                         kprintf("error %d\n", error);
495                         Debugger("hammer_update_inode3");
496                 }
497
498                 /*
499                  * The record isn't managed by the inode's record tree,
500                  * destroy it whether we succeed or fail.
501                  */
502                 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
503                 record->flags |= HAMMER_RECF_DELETED_FE;
504                 record->flush_state = HAMMER_FST_IDLE;
505                 hammer_rel_mem_record(record);
506
507                 /*
508                  * Finish up.
509                  */
510                 if (error == 0) {
511                         if (hammer_debug_inode)
512                                 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
513                         ip->sync_flags &= ~(HAMMER_INODE_RDIRTY |
514                                             HAMMER_INODE_DDIRTY |
515                                             HAMMER_INODE_ITIMES);
516                         ip->flags &= ~HAMMER_INODE_DELONDISK;
517
518                         /*
519                          * Root volume count of inodes
520                          */
521                         if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
522                                 hammer_modify_volume_field(trans,
523                                                            trans->rootvol,
524                                                            vol0_stat_inodes);
525                                 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
526                                 hammer_modify_volume_done(trans->rootvol);
527                                 ip->flags |= HAMMER_INODE_ONDISK;
528                                 if (hammer_debug_inode)
529                                         kprintf("NOWONDISK %p\n", ip);
530                         }
531                 }
532         }
533
534         /*
535          * If the inode has been destroyed, clean out any left-over flags
536          * that may have been set by the frontend.
537          */
538         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) { 
539                 ip->sync_flags &= ~(HAMMER_INODE_RDIRTY |
540                                     HAMMER_INODE_DDIRTY |
541                                     HAMMER_INODE_ITIMES);
542         }
543         return(error);
544 }
545
546 /*
547  * Update only the itimes fields.  This is done no-historically.  The
548  * record is updated in-place on the disk.
549  */
550 static int
551 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
552 {
553         hammer_transaction_t trans = cursor->trans;
554         struct hammer_inode_record *rec;
555         int error;
556
557 retry:
558         error = 0;
559         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
560             HAMMER_INODE_ONDISK) {
561                 hammer_normalize_cursor(cursor);
562                 cursor->key_beg.obj_id = ip->obj_id;
563                 cursor->key_beg.key = 0;
564                 cursor->key_beg.create_tid = 0;
565                 cursor->key_beg.delete_tid = 0;
566                 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
567                 cursor->key_beg.obj_type = 0;
568                 cursor->asof = ip->obj_asof;
569                 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
570                 cursor->flags |= HAMMER_CURSOR_GET_RECORD | HAMMER_CURSOR_ASOF;
571                 cursor->flags |= HAMMER_CURSOR_BACKEND;
572
573                 error = hammer_btree_lookup(cursor);
574                 if (error) {
575                         kprintf("error %d\n", error);
576                         Debugger("hammer_update_itimes1");
577                 }
578                 if (error == 0) {
579                         /*
580                          * Do not generate UNDO records for atime/mtime
581                          * updates.
582                          */
583                         rec = &cursor->record->inode;
584                         hammer_modify_record_noundo(trans,
585                                                     cursor->record_buffer,
586                                                     cursor->record);
587                         rec->ino_atime = ip->sync_ino_rec.ino_atime;
588                         rec->ino_mtime = ip->sync_ino_rec.ino_mtime;
589                         hammer_modify_record_done(cursor->record_buffer,
590                                                   cursor->record);
591                         ip->sync_flags &= ~HAMMER_INODE_ITIMES;
592                         /* XXX recalculate crc */
593                         hammer_cache_node(cursor->node, &ip->cache[0]);
594                 }
595                 if (error == EDEADLK) {
596                         hammer_done_cursor(cursor);
597                         error = hammer_init_cursor(trans, cursor,
598                                                    &ip->cache[0], ip);
599                         if (error == 0)
600                                 goto retry;
601                 }
602         }
603         return(error);
604 }
605
606 /*
607  * Release a reference on an inode, flush as requested.
608  *
609  * On the last reference we queue the inode to the flusher for its final
610  * disposition.
611  */
612 void
613 hammer_rel_inode(struct hammer_inode *ip, int flush)
614 {
615         hammer_mount_t hmp = ip->hmp;
616
617         /*
618          * Handle disposition when dropping the last ref.
619          */
620         for (;;) {
621                 if (ip->lock.refs == 1) {
622                         /*
623                          * Determine whether on-disk action is needed for
624                          * the inode's final disposition.
625                          */
626                         KKASSERT(ip->vp == NULL);
627                         hammer_inode_unloadable_check(ip, 0);
628                         if (ip->flags & HAMMER_INODE_MODMASK) {
629                                 hammer_flush_inode(ip, 0);
630                         } else if (ip->lock.refs == 1) {
631                                 hammer_unload_inode(ip);
632                                 break;
633                         }
634                 } else {
635                         if (flush)
636                                 hammer_flush_inode(ip, 0);
637
638                         /*
639                          * The inode still has multiple refs, try to drop
640                          * one ref.
641                          */
642                         KKASSERT(ip->lock.refs >= 1);
643                         if (ip->lock.refs > 1) {
644                                 hammer_unref(&ip->lock);
645                                 break;
646                         }
647                 }
648         }
649
650         /*
651          * XXX bad hack until I add code to track inodes in SETUP.  We
652          * can queue a lot of inodes to the syncer but if we don't wake
653          * it up the undo sets will be too large or too many unflushed
654          * records will build up and blow our malloc limit.
655          */
656         if (++hmp->reclaim_count > 256) {
657                 hmp->reclaim_count = 0;
658                 hammer_flusher_async(hmp);
659         }
660 }
661
662 /*
663  * Unload and destroy the specified inode.  Must be called with one remaining
664  * reference.  The reference is disposed of.
665  *
666  * This can only be called in the context of the flusher.
667  */
668 static int
669 hammer_unload_inode(struct hammer_inode *ip)
670 {
671         KASSERT(ip->lock.refs == 1,
672                 ("hammer_unload_inode: %d refs\n", ip->lock.refs));
673         KKASSERT(ip->vp == NULL);
674         KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
675         KKASSERT(ip->cursor_ip_refs == 0);
676         KKASSERT(ip->lock.lockcount == 0);
677         KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
678
679         KKASSERT(RB_EMPTY(&ip->rec_tree));
680         KKASSERT(TAILQ_EMPTY(&ip->target_list));
681         KKASSERT(TAILQ_EMPTY(&ip->bio_list));
682         KKASSERT(TAILQ_EMPTY(&ip->bio_alt_list));
683
684         RB_REMOVE(hammer_ino_rb_tree, &ip->hmp->rb_inos_root, ip);
685
686         hammer_uncache_node(&ip->cache[0]);
687         hammer_uncache_node(&ip->cache[1]);
688         if (ip->objid_cache)
689                 hammer_clear_objid(ip);
690         --hammer_count_inodes;
691         kfree(ip, M_HAMMER);
692
693         return(0);
694 }
695
696 /*
697  * A transaction has modified an inode, requiring updates as specified by
698  * the passed flags.
699  *
700  * HAMMER_INODE_RDIRTY: Inode record has been updated
701  * HAMMER_INODE_DDIRTY: Inode data has been updated
702  * HAMMER_INODE_XDIRTY: Dirty in-memory records
703  * HAMMER_INODE_BUFS:   Dirty buffer cache buffers
704  * HAMMER_INODE_DELETED: Inode record/data must be deleted
705  * HAMMER_INODE_ITIMES: mtime/atime has been updated
706  */
707 void
708 hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags)
709 {
710         KKASSERT ((ip->flags & HAMMER_INODE_RO) == 0 ||
711                   (flags & (HAMMER_INODE_RDIRTY|HAMMER_INODE_DDIRTY|
712                    HAMMER_INODE_XDIRTY|HAMMER_INODE_BUFS|
713                    HAMMER_INODE_DELETED|HAMMER_INODE_ITIMES)) == 0);
714
715         ip->flags |= flags;
716 }
717
718 /*
719  * Request that an inode be flushed.  This whole mess cannot block and may
720  * recurse.  Once requested HAMMER will attempt to actively flush it until
721  * the flush can be done.
722  *
723  * The inode may already be flushing, or may be in a setup state.  We can
724  * place the inode in a flushing state if it is currently idle and flag it
725  * to reflush if it is currently flushing.
726  */
727 void
728 hammer_flush_inode(hammer_inode_t ip, int flags)
729 {
730         hammer_record_t depend;
731         int r, good;
732
733         /*
734          * Trivial 'nothing to flush' case.  If the inode is ina SETUP
735          * state we have to put it back into an IDLE state so we can
736          * drop the extra ref.
737          */
738         if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
739                 if (ip->flush_state == HAMMER_FST_SETUP) {
740                         ip->flush_state = HAMMER_FST_IDLE;
741                         hammer_rel_inode(ip, 0);
742                 }
743                 return;
744         }
745
746         /*
747          * Our flush action will depend on the current state.
748          */
749         switch(ip->flush_state) {
750         case HAMMER_FST_IDLE:
751                 /*
752                  * We have no dependancies and can flush immediately.  Some
753                  * our children may not be flushable so we have to re-test
754                  * with that additional knowledge.
755                  */
756                 hammer_flush_inode_core(ip, flags);
757                 break;
758         case HAMMER_FST_SETUP:
759                 /*
760                  * Recurse upwards through dependancies via target_list
761                  * and start their flusher actions going if possible.
762                  *
763                  * 'good' is our connectivity.  -1 means we have none and
764                  * can't flush, 0 means there weren't any dependancies, and
765                  * 1 means we have good connectivity.
766                  */
767                 good = 0;
768                 TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
769                         r = hammer_setup_parent_inodes(depend);
770                         if (r < 0 && good == 0)
771                                 good = -1;
772                         if (r > 0)
773                                 good = 1;
774                 }
775
776                 /*
777                  * We can continue if good >= 0.  Determine how many records
778                  * under our inode can be flushed (and mark them).
779                  */
780                 if (good >= 0) {
781                         hammer_flush_inode_core(ip, flags);
782                 } else {
783                         ip->flags |= HAMMER_INODE_REFLUSH;
784                         if (flags & HAMMER_FLUSH_SIGNAL) {
785                                 ip->flags |= HAMMER_INODE_RESIGNAL;
786                                 hammer_flusher_async(ip->hmp);
787                         }
788                 }
789                 break;
790         default:
791                 /*
792                  * We are already flushing, flag the inode to reflush
793                  * if needed after it completes its current flush.
794                  */
795                 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
796                         ip->flags |= HAMMER_INODE_REFLUSH;
797                 if (flags & HAMMER_FLUSH_SIGNAL) {
798                         ip->flags |= HAMMER_INODE_RESIGNAL;
799                         hammer_flusher_async(ip->hmp);
800                 }
801                 break;
802         }
803 }
804
805 /*
806  * We are asked to recurse upwards and convert the record from SETUP
807  * to FLUSH if possible.  record->ip is a parent of the caller's inode,
808  * and record->target_ip is the caller's inode.
809  *
810  * Return 1 if the record gives us connectivity
811  *
812  * Return 0 if the record is not relevant 
813  *
814  * Return -1 if we can't resolve the dependancy and there is no connectivity.
815  */
816 static int
817 hammer_setup_parent_inodes(hammer_record_t record)
818 {
819         hammer_mount_t hmp = record->ip->hmp;
820         hammer_record_t depend;
821         hammer_inode_t ip;
822         int r, good;
823
824         KKASSERT(record->flush_state != HAMMER_FST_IDLE);
825         ip = record->ip;
826
827         /*
828          * If the record is already flushing, is it in our flush group?
829          *
830          * If it is in our flush group but it is a general record or a 
831          * delete-on-disk, it does not improve our connectivity (return 0),
832          * and if the target inode is not trying to destroy itself we can't
833          * allow the operation yet anyway (the second return -1).
834          */
835         if (record->flush_state == HAMMER_FST_FLUSH) {
836                 if (record->flush_group != hmp->flusher_next) {
837                         ip->flags |= HAMMER_INODE_REFLUSH;
838                         return(-1);
839                 }
840                 if (record->type == HAMMER_MEM_RECORD_ADD)
841                         return(1);
842                 /* GENERAL or DEL */
843                 return(0);
844         }
845
846         /*
847          * It must be a setup record.  Try to resolve the setup dependancies
848          * by recursing upwards so we can place ip on the flush list.
849          */
850         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
851
852         good = 0;
853         TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
854                 r = hammer_setup_parent_inodes(depend);
855                 if (r < 0 && good == 0)
856                         good = -1;
857                 if (r > 0)
858                         good = 1;
859         }
860
861         /*
862          * We can't flush ip because it has no connectivity (XXX also check
863          * nlinks for pre-existing connectivity!).  Flag it so any resolution
864          * recurses back down.
865          */
866         if (good < 0) {
867                 ip->flags |= HAMMER_INODE_REFLUSH;
868                 return(good);
869         }
870
871         /*
872          * We are go, place the parent inode in a flushing state so we can
873          * place its record in a flushing state.  Note that the parent
874          * may already be flushing.  The record must be in the same flush
875          * group as the parent.
876          */
877         if (ip->flush_state != HAMMER_FST_FLUSH)
878                 hammer_flush_inode_core(ip, HAMMER_FLUSH_RECURSION);
879         KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
880         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
881
882 #if 0
883         if (record->type == HAMMER_MEM_RECORD_DEL &&
884             (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
885                 /*
886                  * Regardless of flushing state we cannot sync this path if the
887                  * record represents a delete-on-disk but the target inode
888                  * is not ready to sync its own deletion.
889                  *
890                  * XXX need to count effective nlinks to determine whether
891                  * the flush is ok, otherwise removing a hardlink will
892                  * just leave the DEL record to rot.
893                  */
894                 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
895                 return(-1);
896         } else
897 #endif
898         if (ip->flush_group == ip->hmp->flusher_next) {
899                 /*
900                  * This is the record we wanted to synchronize.
901                  */
902                 record->flush_state = HAMMER_FST_FLUSH;
903                 record->flush_group = ip->flush_group;
904                 hammer_ref(&record->lock);
905                 if (record->type == HAMMER_MEM_RECORD_ADD)
906                         return(1);
907
908                 /*
909                  * A general or delete-on-disk record does not contribute
910                  * to our visibility.  We can still flush it, however.
911                  */
912                 return(0);
913         } else {
914                 /*
915                  * We couldn't resolve the dependancies, request that the
916                  * inode be flushed when the dependancies can be resolved.
917                  */
918                 ip->flags |= HAMMER_INODE_REFLUSH;
919                 return(-1);
920         }
921 }
922
923 /*
924  * This is the core routine placing an inode into the FST_FLUSH state.
925  */
926 static void
927 hammer_flush_inode_core(hammer_inode_t ip, int flags)
928 {
929         int go_count;
930
931         /*
932          * Set flush state and prevent the flusher from cycling into
933          * the next flush group.  Do not place the ip on the list yet.
934          * Inodes not in the idle state get an extra reference.
935          */
936         KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
937         if (ip->flush_state == HAMMER_FST_IDLE)
938                 hammer_ref(&ip->lock);
939         ip->flush_state = HAMMER_FST_FLUSH;
940         ip->flush_group = ip->hmp->flusher_next;
941         ++ip->hmp->flusher_lock;
942
943         /*
944          * We need to be able to vfsync/truncate from the backend.
945          */
946         KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
947         if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
948                 ip->flags |= HAMMER_INODE_VHELD;
949                 vref(ip->vp);
950         }
951
952         /*
953          * Figure out how many in-memory records we can actually flush
954          * (not including inode meta-data, buffers, etc).
955          */
956         if (flags & HAMMER_FLUSH_RECURSION) {
957                 go_count = 1;
958         } else {
959                 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
960                                    hammer_setup_child_callback, NULL);
961         }
962
963         /*
964          * This is a more involved test that includes go_count.  If we
965          * can't flush, flag the inode and return.  If go_count is 0 we
966          * were are unable to flush any records in our rec_tree and
967          * must ignore the XDIRTY flag.
968          */
969         if (go_count == 0) {
970                 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
971                         ip->flags |= HAMMER_INODE_REFLUSH;
972                         ip->flush_state = HAMMER_FST_SETUP;
973                         if (ip->flags & HAMMER_INODE_VHELD) {
974                                 ip->flags &= ~HAMMER_INODE_VHELD;
975                                 vrele(ip->vp);
976                         }
977                         if (flags & HAMMER_FLUSH_SIGNAL) {
978                                 ip->flags |= HAMMER_INODE_RESIGNAL;
979                                 hammer_flusher_async(ip->hmp);
980                         }
981                         if (--ip->hmp->flusher_lock == 0)
982                                 wakeup(&ip->hmp->flusher_lock);
983                         return;
984                 }
985         }
986
987         /*
988          * Snapshot the state of the inode for the backend flusher.
989          *
990          * The truncation must be retained in the frontend until after
991          * we've actually performed the record deletion.
992          *
993          * NOTE: The DELETING flag is a mod flag, but it is also sticky,
994          * and stays in ip->flags.  Once set, it stays set until the
995          * inode is destroyed.
996          */
997         ip->sync_flags = (ip->flags & HAMMER_INODE_MODMASK);
998         ip->sync_trunc_off = ip->trunc_off;
999         ip->sync_ino_rec = ip->ino_rec;
1000         ip->sync_ino_data = ip->ino_data;
1001         ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
1002
1003         /*
1004          * The flusher list inherits our inode and reference.
1005          */
1006         TAILQ_INSERT_TAIL(&ip->hmp->flush_list, ip, flush_entry);
1007         if (--ip->hmp->flusher_lock == 0)
1008                 wakeup(&ip->hmp->flusher_lock);
1009
1010         if (flags & HAMMER_FLUSH_SIGNAL)
1011                 hammer_flusher_async(ip->hmp);
1012 }
1013
1014 /*
1015  * Callback for scan of ip->rec_tree.  Try to include each record in our
1016  * flush.  ip->flush_group has been set but the inode has not yet been
1017  * moved into a flushing state.
1018  *
1019  * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
1020  * both inodes.
1021  *
1022  * We return 1 for any record placed or found in FST_FLUSH, which prevents
1023  * the caller from shortcutting the flush.
1024  */
1025 static int
1026 hammer_setup_child_callback(hammer_record_t rec, void *data)
1027 {
1028         hammer_inode_t target_ip;
1029         hammer_inode_t ip;
1030         int r;
1031
1032         /*
1033          * If the record has been deleted by the backend (it's being held
1034          * by the frontend in a race), just ignore it.
1035          */
1036         if (rec->flags & HAMMER_RECF_DELETED_BE)
1037                 return(0);
1038
1039         /*
1040          * If the record is in an idle state it has no dependancies and
1041          * can be flushed.
1042          */
1043         ip = rec->ip;
1044         r = 0;
1045
1046         switch(rec->flush_state) {
1047         case HAMMER_FST_IDLE:
1048                 /*
1049                  * Record has no setup dependancy, we can flush it.
1050                  */
1051                 KKASSERT(rec->target_ip == NULL);
1052                 rec->flush_state = HAMMER_FST_FLUSH;
1053                 rec->flush_group = ip->flush_group;
1054                 hammer_ref(&rec->lock);
1055                 r = 1;
1056                 break;
1057         case HAMMER_FST_SETUP:
1058                 /*
1059                  * Record has a setup dependancy.  Try to include the
1060                  * target ip in the flush. 
1061                  *
1062                  * We have to be careful here, if we do not do the right
1063                  * thing we can lose track of dirty inodes and the system
1064                  * will lockup trying to allocate buffers.
1065                  */
1066                 target_ip = rec->target_ip;
1067                 KKASSERT(target_ip != NULL);
1068                 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
1069                 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
1070                         /*
1071                          * If the target IP is already flushing in our group
1072                          * we are golden, otherwise make sure the target
1073                          * reflushes.
1074                          */
1075                         if (target_ip->flush_group == ip->flush_group) {
1076                                 rec->flush_state = HAMMER_FST_FLUSH;
1077                                 rec->flush_group = ip->flush_group;
1078                                 hammer_ref(&rec->lock);
1079                                 r = 1;
1080                         } else {
1081                                 target_ip->flags |= HAMMER_INODE_REFLUSH;
1082                         }
1083                 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
1084                         /*
1085                          * If the target IP is not flushing we can force
1086                          * it to flush, even if it is unable to write out
1087                          * any of its own records we have at least one in
1088                          * hand that we CAN deal with.
1089                          */
1090                         rec->flush_state = HAMMER_FST_FLUSH;
1091                         rec->flush_group = ip->flush_group;
1092                         hammer_ref(&rec->lock);
1093                         hammer_flush_inode_core(target_ip,
1094                                                 HAMMER_FLUSH_RECURSION);
1095                         r = 1;
1096                 } else {
1097                         /*
1098                          * General or delete-on-disk record.
1099                          *
1100                          * XXX this needs help.  If a delete-on-disk we could
1101                          * disconnect the target.  If the target has its own
1102                          * dependancies they really need to be flushed.
1103                          *
1104                          * XXX
1105                          */
1106                         rec->flush_state = HAMMER_FST_FLUSH;
1107                         rec->flush_group = ip->flush_group;
1108                         hammer_ref(&rec->lock);
1109                         hammer_flush_inode_core(target_ip,
1110                                                 HAMMER_FLUSH_RECURSION);
1111                         r = 1;
1112                 }
1113                 break;
1114         case HAMMER_FST_FLUSH:
1115                 /* 
1116                  * Record already associated with a flush group.  It had
1117                  * better be ours.
1118                  */
1119                 KKASSERT(rec->flush_group == ip->flush_group);
1120                 r = 1;
1121                 break;
1122         }
1123         return(r);
1124 }
1125
1126 /*
1127  * Wait for a previously queued flush to complete
1128  */
1129 void
1130 hammer_wait_inode(hammer_inode_t ip)
1131 {
1132         while (ip->flush_state != HAMMER_FST_IDLE) {
1133                 ip->flags |= HAMMER_INODE_FLUSHW;
1134                 tsleep(&ip->flags, 0, "hmrwin", 0);
1135         }
1136 }
1137
1138 /*
1139  * Called by the backend code when a flush has been completed.
1140  * The inode has already been removed from the flush list.
1141  *
1142  * A pipelined flush can occur, in which case we must re-enter the
1143  * inode on the list and re-copy its fields.
1144  */
1145 void
1146 hammer_flush_inode_done(hammer_inode_t ip)
1147 {
1148         struct bio *bio;
1149         int dorel = 0;
1150
1151         KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
1152
1153         /*
1154          * Allow BIOs to queue to the inode's primary bioq again.
1155          */
1156         ip->flags &= ~HAMMER_INODE_WRITE_ALT;
1157
1158         /*
1159          * Merge left-over flags back into the frontend and fix the state.
1160          */
1161         ip->flags |= ip->sync_flags;
1162
1163         /*
1164          * The backend may have adjusted nlinks, so if the adjusted nlinks
1165          * does not match the fronttend set the frontend's RDIRTY flag again.
1166          */
1167         if (ip->ino_rec.ino_nlinks != ip->sync_ino_rec.ino_nlinks)
1168                 ip->flags |= HAMMER_INODE_RDIRTY;
1169
1170         /*
1171          * Reflush any BIOs that wound up in the alt list.  Our inode will
1172          * also wind up at the end of the flusher's list.
1173          */
1174         while ((bio = TAILQ_FIRST(&ip->bio_alt_list)) != NULL) {
1175                 TAILQ_REMOVE(&ip->bio_alt_list, bio, bio_act);
1176                 TAILQ_INSERT_TAIL(&ip->bio_list, bio, bio_act);
1177         }
1178         /*
1179          * Fix up the dirty buffer status.
1180          */
1181         if (TAILQ_FIRST(&ip->bio_list) ||
1182             (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree))) {
1183                 ip->flags |= HAMMER_INODE_BUFS;
1184         }
1185
1186         /*
1187          * Re-set the XDIRTY flag if some of the inode's in-memory records
1188          * could not be flushed.
1189          */
1190         if (RB_ROOT(&ip->rec_tree))
1191                 ip->flags |= HAMMER_INODE_XDIRTY;
1192
1193         /*
1194          * Do not lose track of inodes which no longer have vnode
1195          * assocations, otherwise they may never get flushed again.
1196          */
1197         if ((ip->flags & HAMMER_INODE_MODMASK) && ip->vp == NULL)
1198                 ip->flags |= HAMMER_INODE_REFLUSH;
1199
1200         /*
1201          * Adjust flush_state.  The target state (idle or setup) shouldn't
1202          * be terribly important since we will reflush if we really need
1203          * to do anything. XXX
1204          */
1205         if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
1206                 ip->flush_state = HAMMER_FST_IDLE;
1207                 dorel = 1;
1208         } else {
1209                 ip->flush_state = HAMMER_FST_SETUP;
1210         }
1211
1212         /*
1213          * Clean up the vnode ref
1214          */
1215         if (ip->flags & HAMMER_INODE_VHELD) {
1216                 ip->flags &= ~HAMMER_INODE_VHELD;
1217                 vrele(ip->vp);
1218         }
1219
1220         /*
1221          * If the frontend made more changes and requested another flush,
1222          * then try to get it running.
1223          */
1224         if (ip->flags & HAMMER_INODE_REFLUSH) {
1225                 ip->flags &= ~HAMMER_INODE_REFLUSH;
1226                 if (ip->flags & HAMMER_INODE_RESIGNAL) {
1227                         ip->flags &= ~HAMMER_INODE_RESIGNAL;
1228                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1229                 } else {
1230                         hammer_flush_inode(ip, 0);
1231                 }
1232         }
1233
1234         /*
1235          * Finally, if the frontend is waiting for a flush to complete,
1236          * wake it up.
1237          */
1238         if (ip->flush_state != HAMMER_FST_FLUSH) {
1239                 if (ip->flags & HAMMER_INODE_FLUSHW) {
1240                         ip->flags &= ~HAMMER_INODE_FLUSHW;
1241                         wakeup(&ip->flags);
1242                 }
1243         }
1244         if (dorel)
1245                 hammer_rel_inode(ip, 0);
1246 }
1247
1248 /*
1249  * Called from hammer_sync_inode() to synchronize in-memory records
1250  * to the media.
1251  */
1252 static int
1253 hammer_sync_record_callback(hammer_record_t record, void *data)
1254 {
1255         hammer_cursor_t cursor = data;
1256         hammer_transaction_t trans = cursor->trans;
1257         int error;
1258
1259         /*
1260          * Skip records that do not belong to the current flush.
1261          */
1262         if (record->flush_state != HAMMER_FST_FLUSH)
1263                 return(0);
1264         KKASSERT((record->flags & HAMMER_RECF_DELETED_BE) == 0);
1265 #if 1
1266         if (record->flush_group != record->ip->flush_group) {
1267                 kprintf("sync_record %p ip %p bad flush group %d %d\n", record, record->ip, record->flush_group ,record->ip->flush_group);
1268                 Debugger("blah2");
1269                 return(0);
1270         }
1271 #endif
1272         KKASSERT(record->flush_group == record->ip->flush_group);
1273
1274         /*
1275          * Interlock the record using the BE flag.  Once BE is set the
1276          * frontend cannot change the state of FE.
1277          *
1278          * NOTE: If FE is set prior to us setting BE we still sync the
1279          * record out, but the flush completion code converts it to 
1280          * a delete-on-disk record instead of destroying it.
1281          */
1282         KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
1283         record->flags |= HAMMER_RECF_INTERLOCK_BE;
1284
1285         /*
1286          * If DELETED_FE is set we may have already sent dependant pieces
1287          * to the disk and we must flush the record as if it hadn't been
1288          * deleted.  This creates a bit of a mess because we have to
1289          * have ip_sync_record convert the record to MEM_RECORD_DEL before
1290          * it inserts the B-Tree record.  Otherwise the media sync might
1291          * be visible to the frontend.
1292          */
1293         if (record->flags & HAMMER_RECF_DELETED_FE) {
1294                 if (record->type == HAMMER_MEM_RECORD_ADD) {
1295                         record->flags |= HAMMER_RECF_CONVERT_DELETE;
1296                 } else {
1297                         KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
1298                         return(0);
1299                 }
1300         }
1301
1302         /*
1303          * Assign the create_tid for new records.  Deletions already
1304          * have the record's entire key properly set up.
1305          */
1306         if (record->type != HAMMER_MEM_RECORD_DEL)
1307                 record->rec.inode.base.base.create_tid = trans->tid;
1308         for (;;) {
1309                 error = hammer_ip_sync_record_cursor(cursor, record);
1310                 if (error != EDEADLK)
1311                         break;
1312                 hammer_done_cursor(cursor);
1313                 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
1314                                            record->ip);
1315                 if (error)
1316                         break;
1317         }
1318         record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
1319
1320         if (error) {
1321                 error = -error;
1322                 if (error != -ENOSPC) {
1323                         kprintf("hammer_sync_record_callback: sync failed rec "
1324                                 "%p, error %d\n", record, error);
1325                         Debugger("sync failed rec");
1326                 }
1327         }
1328         hammer_flush_record_done(record, error);
1329         return(error);
1330 }
1331
1332 /*
1333  * XXX error handling
1334  */
1335 int
1336 hammer_sync_inode(hammer_inode_t ip)
1337 {
1338         struct hammer_transaction trans;
1339         struct hammer_cursor cursor;
1340         struct bio *bio;
1341         hammer_record_t depend;
1342         hammer_record_t next;
1343         int error, tmp_error;
1344         u_int64_t nlinks;
1345
1346         if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
1347                 return(0);
1348
1349         hammer_start_transaction_fls(&trans, ip->hmp);
1350         error = hammer_init_cursor(&trans, &cursor, &ip->cache[0], ip);
1351         if (error)
1352                 goto done;
1353
1354         /*
1355          * Any directory records referencing this inode which are not in
1356          * our current flush group must adjust our nlink count for the
1357          * purposes of synchronization to disk.
1358          *
1359          * Records which are in our flush group can be unlinked from our
1360          * inode now, potentially allowing the inode to be physically
1361          * deleted.
1362          */
1363         nlinks = ip->ino_rec.ino_nlinks;
1364         next = TAILQ_FIRST(&ip->target_list);
1365         while ((depend = next) != NULL) {
1366                 next = TAILQ_NEXT(depend, target_entry);
1367                 if (depend->flush_state == HAMMER_FST_FLUSH &&
1368                     depend->flush_group == ip->hmp->flusher_act) {
1369                         /*
1370                          * If this is an ADD that was deleted by the frontend
1371                          * the frontend nlinks count will have already been
1372                          * decremented, but the backend is going to sync its
1373                          * directory entry and must account for it.  The
1374                          * record will be converted to a delete-on-disk when
1375                          * it gets synced.
1376                          *
1377                          * If the ADD was not deleted by the frontend we
1378                          * can remove the dependancy from our target_list.
1379                          */
1380                         if (depend->flags & HAMMER_RECF_DELETED_FE) {
1381                                 ++nlinks;
1382                         } else {
1383                                 TAILQ_REMOVE(&ip->target_list, depend,
1384                                              target_entry);
1385                                 depend->target_ip = NULL;
1386                         }
1387                 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
1388                         /*
1389                          * Not part of our flush group
1390                          */
1391                         KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
1392                         switch(depend->type) {
1393                         case HAMMER_MEM_RECORD_ADD:
1394                                 --nlinks;
1395                                 break;
1396                         case HAMMER_MEM_RECORD_DEL:
1397                                 ++nlinks;
1398                                 break;
1399                         default:
1400                                 break;
1401                         }
1402                 }
1403         }
1404
1405         /*
1406          * Set dirty if we had to modify the link count.
1407          */
1408         if (ip->sync_ino_rec.ino_nlinks != nlinks) {
1409                 KKASSERT((int64_t)nlinks >= 0);
1410                 ip->sync_ino_rec.ino_nlinks = nlinks;
1411                 ip->sync_flags |= HAMMER_INODE_RDIRTY;
1412         }
1413
1414         /*
1415          * Queue up as many dirty buffers as we can then set a flag to
1416          * cause any further BIOs to go to the alternative queue.
1417          */
1418         if (ip->flags & HAMMER_INODE_VHELD)
1419                 error = vfsync(ip->vp, MNT_NOWAIT, 1, NULL, NULL);
1420         ip->flags |= HAMMER_INODE_WRITE_ALT;
1421
1422         /*
1423          * The buffer cache may contain dirty buffers beyond the inode
1424          * state we copied from the frontend to the backend.  Because
1425          * we are syncing our buffer cache on the backend, resync
1426          * the truncation point and the file size so we don't wipe out
1427          * any data.
1428          *
1429          * Syncing the buffer cache on the frontend has serious problems
1430          * because it prevents us from passively queueing dirty inodes
1431          * to the backend (the BIO's could stall indefinitely).
1432          */
1433         if (ip->flags & HAMMER_INODE_TRUNCATED) {
1434                 ip->sync_trunc_off = ip->trunc_off;
1435                 ip->sync_flags |= HAMMER_INODE_TRUNCATED;
1436         }
1437         if (ip->sync_ino_rec.ino_size != ip->ino_rec.ino_size) {
1438                 ip->sync_ino_rec.ino_size = ip->ino_rec.ino_size;
1439                 ip->sync_flags |= HAMMER_INODE_RDIRTY;
1440         }
1441
1442         /*
1443          * If there is a trunction queued destroy any data past the (aligned)
1444          * truncation point.  Userland will have dealt with the buffer
1445          * containing the truncation point for us.
1446          *
1447          * We don't flush pending frontend data buffers until after we've
1448          * dealth with the truncation.
1449          *
1450          * Don't bother if the inode is or has been deleted.
1451          */
1452         if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
1453                 /*
1454                  * Interlock trunc_off.  The VOP front-end may continue to
1455                  * make adjustments to it while we are blocked.
1456                  */
1457                 off_t trunc_off;
1458                 off_t aligned_trunc_off;
1459
1460                 trunc_off = ip->sync_trunc_off;
1461                 aligned_trunc_off = (trunc_off + HAMMER_BUFMASK) &
1462                                     ~HAMMER_BUFMASK64;
1463
1464                 /*
1465                  * Delete any whole blocks on-media.  The front-end has
1466                  * already cleaned out any partial block and made it
1467                  * pending.  The front-end may have updated trunc_off
1468                  * while we were blocked so do not just unconditionally
1469                  * set it to the maximum offset.
1470                  */
1471                 error = hammer_ip_delete_range(&cursor, ip,
1472                                                 aligned_trunc_off,
1473                                                 0x7FFFFFFFFFFFFFFFLL);
1474                 if (error)
1475                         Debugger("hammer_ip_delete_range errored");
1476                 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
1477                 if (ip->trunc_off >= trunc_off) {
1478                         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
1479                         ip->flags &= ~HAMMER_INODE_TRUNCATED;
1480                 }
1481         } else {
1482                 error = 0;
1483         }
1484
1485         /*
1486          * Now sync related records.  These will typically be directory
1487          * entries or delete-on-disk records.
1488          *
1489          * Not all records will be flushed, but clear XDIRTY anyway.  We
1490          * will set it again in the frontend hammer_flush_inode_done() 
1491          * if records remain.
1492          */
1493         if (error == 0) {
1494                 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1495                                     hammer_sync_record_callback, &cursor);
1496                 if (tmp_error < 0)
1497                         tmp_error = -error;
1498                 if (tmp_error)
1499                         error = tmp_error;
1500                 if (error == 0)
1501                         ip->sync_flags &= ~HAMMER_INODE_XDIRTY;
1502         }
1503
1504         /*
1505          * If we are deleting the inode the frontend had better not have
1506          * any active references on elements making up the inode.
1507          */
1508         if (error == 0 && ip->sync_ino_rec.ino_nlinks == 0 &&
1509                 RB_EMPTY(&ip->rec_tree)  &&
1510             (ip->sync_flags & HAMMER_INODE_DELETING) &&
1511             (ip->flags & HAMMER_INODE_DELETED) == 0) {
1512                 int count1 = 0;
1513
1514                 hkprintf("Y");
1515                 ip->flags |= HAMMER_INODE_DELETED;
1516                 error = hammer_ip_delete_range_all(&cursor, ip, &count1);
1517                 if (error == 0) {
1518                         ip->sync_flags &= ~HAMMER_INODE_DELETING;
1519                         ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
1520                         KKASSERT(RB_EMPTY(&ip->rec_tree));
1521
1522                         /*
1523                          * Set delete_tid in both the frontend and backend
1524                          * copy of the inode record.  The DELETED flag handles
1525                          * this, do not set RDIRTY.
1526                          */
1527                         ip->ino_rec.base.base.delete_tid = trans.tid;
1528                         ip->sync_ino_rec.base.base.delete_tid = trans.tid;
1529
1530                         /*
1531                          * Adjust the inode count in the volume header
1532                          */
1533                         hammer_modify_volume_field(&trans, trans.rootvol,
1534                                                    vol0_stat_inodes);
1535                         --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1536                         hammer_modify_volume_done(trans.rootvol);
1537                 } else {
1538                         ip->flags &= ~HAMMER_INODE_DELETED;
1539                         Debugger("hammer_ip_delete_range_all errored");
1540                 }
1541         }
1542
1543         /*
1544          * Flush any queued BIOs.  These will just biodone() the IO's if
1545          * the inode has been deleted.
1546          */
1547         while ((bio = TAILQ_FIRST(&ip->bio_list)) != NULL) {
1548                 TAILQ_REMOVE(&ip->bio_list, bio, bio_act);
1549                 tmp_error = hammer_dowrite(&cursor, ip, bio);
1550                 if (tmp_error)
1551                         error = tmp_error;
1552         }
1553         ip->sync_flags &= ~HAMMER_INODE_BUFS;
1554
1555         if (error)
1556                 Debugger("RB_SCAN errored");
1557
1558         /*
1559          * Now update the inode's on-disk inode-data and/or on-disk record.
1560          * DELETED and ONDISK are managed only in ip->flags.
1561          */
1562         switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
1563         case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
1564                 /*
1565                  * If deleted and on-disk, don't set any additional flags.
1566                  * the delete flag takes care of things.
1567                  *
1568                  * Clear flags which may have been set by the frontend.
1569                  */
1570                 ip->sync_flags &= ~(HAMMER_INODE_RDIRTY|HAMMER_INODE_DDIRTY|
1571                                     HAMMER_INODE_XDIRTY|HAMMER_INODE_ITIMES|
1572                                     HAMMER_INODE_DELETING);
1573                 break;
1574         case HAMMER_INODE_DELETED:
1575                 /*
1576                  * Take care of the case where a deleted inode was never
1577                  * flushed to the disk in the first place.
1578                  *
1579                  * Clear flags which may have been set by the frontend.
1580                  */
1581                 ip->sync_flags &= ~(HAMMER_INODE_RDIRTY|HAMMER_INODE_DDIRTY|
1582                                     HAMMER_INODE_XDIRTY|HAMMER_INODE_ITIMES|
1583                                     HAMMER_INODE_DELETING);
1584                 while (RB_ROOT(&ip->rec_tree)) {
1585                         hammer_record_t record = RB_ROOT(&ip->rec_tree);
1586                         hammer_ref(&record->lock);
1587                         KKASSERT(record->lock.refs == 1);
1588                         record->flags |= HAMMER_RECF_DELETED_FE;
1589                         record->flags |= HAMMER_RECF_DELETED_BE;
1590                         hammer_rel_mem_record(record);
1591                 }
1592                 break;
1593         case HAMMER_INODE_ONDISK:
1594                 /*
1595                  * If already on-disk, do not set any additional flags.
1596                  */
1597                 break;
1598         default:
1599                 /*
1600                  * If not on-disk and not deleted, set both dirty flags
1601                  * to force an initial record to be written.  Also set
1602                  * the create_tid for the inode.
1603                  *
1604                  * Set create_tid in both the frontend and backend
1605                  * copy of the inode record.
1606                  */
1607                 ip->ino_rec.base.base.create_tid = trans.tid;
1608                 ip->sync_ino_rec.base.base.create_tid = trans.tid;
1609                 ip->sync_flags |= HAMMER_INODE_RDIRTY | HAMMER_INODE_DDIRTY;
1610                 break;
1611         }
1612
1613         /*
1614          * If RDIRTY or DDIRTY is set, write out a new record.  If the inode
1615          * is already on-disk the old record is marked as deleted.
1616          *
1617          * If DELETED is set hammer_update_inode() will delete the existing
1618          * record without writing out a new one.
1619          *
1620          * If *ONLY* the ITIMES flag is set we can update the record in-place.
1621          */
1622         if (ip->flags & HAMMER_INODE_DELETED) {
1623                 error = hammer_update_inode(&cursor, ip);
1624         } else 
1625         if ((ip->sync_flags & (HAMMER_INODE_RDIRTY | HAMMER_INODE_DDIRTY |
1626                                HAMMER_INODE_ITIMES)) == HAMMER_INODE_ITIMES) {
1627                 error = hammer_update_itimes(&cursor, ip);
1628         } else
1629         if (ip->sync_flags & (HAMMER_INODE_RDIRTY | HAMMER_INODE_DDIRTY |
1630                               HAMMER_INODE_ITIMES)) {
1631                 error = hammer_update_inode(&cursor, ip);
1632         }
1633         if (error)
1634                 Debugger("hammer_update_itimes/inode errored");
1635 done:
1636         /*
1637          * Save the TID we used to sync the inode with to make sure we
1638          * do not improperly reuse it.
1639          */
1640         hammer_done_cursor(&cursor);
1641         hammer_done_transaction(&trans);
1642         return(error);
1643 }
1644
1645 /*
1646  * This routine is called when the OS is no longer actively referencing
1647  * the inode (but might still be keeping it cached), or when releasing
1648  * the last reference to an inode.
1649  *
1650  * At this point if the inode's nlinks count is zero we want to destroy
1651  * it, which may mean destroying it on-media too.
1652  */
1653 void
1654 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
1655 {
1656         struct vnode *vp;
1657
1658         /*
1659          * Set the DELETING flag when the link count drops to 0 and the
1660          * OS no longer has any opens on the inode.
1661          *
1662          * The backend will clear DELETING (a mod flag) and set DELETED
1663          * (a state flag) when it is actually able to perform the
1664          * operation.
1665          */
1666         if (ip->ino_rec.ino_nlinks == 0 &&
1667             (ip->flags & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
1668                 ip->flags |= HAMMER_INODE_DELETING;
1669                 ip->flags |= HAMMER_INODE_TRUNCATED;
1670                 ip->trunc_off = 0;
1671                 vp = NULL;
1672                 if (getvp) {
1673                         if (hammer_get_vnode(ip, &vp) != 0)
1674                                 return;
1675                 }
1676                 if (ip->vp) {
1677                         vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
1678                         vnode_pager_setsize(ip->vp, 0);
1679                 }
1680                 if (getvp) {
1681                         vput(vp);
1682                 }
1683         }
1684 }
1685
1686 /*
1687  * Re-test an inode when a dependancy had gone away to see if we
1688  * can chain flush it.
1689  */
1690 void
1691 hammer_test_inode(hammer_inode_t ip)
1692 {
1693         if (ip->flags & HAMMER_INODE_REFLUSH) {
1694                 ip->flags &= ~HAMMER_INODE_REFLUSH;
1695                 hammer_ref(&ip->lock);
1696                 if (ip->flags & HAMMER_INODE_RESIGNAL) {
1697                         ip->flags &= ~HAMMER_INODE_RESIGNAL;
1698                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1699                 } else {
1700                         hammer_flush_inode(ip, 0);
1701                 }
1702                 hammer_rel_inode(ip, 0);
1703         }
1704 }
1705