bdbc00b56beacd2d9ca9d0c548855f3d7898ab9c
[dragonfly.git] / sys / vfs / hammer / hammer_inode.c
1 /*
2  * Copyright (c) 2007-2008 The DragonFly Project.  All rights reserved.
3  * 
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  * 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  * 
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  * 
34  * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.75 2008/06/14 01:42:13 dillon Exp $
35  */
36
37 #include "hammer.h"
38 #include <vm/vm_extern.h>
39 #include <sys/buf.h>
40 #include <sys/buf2.h>
41
42 static int      hammer_unload_inode(struct hammer_inode *ip);
43 static void     hammer_flush_inode_core(hammer_inode_t ip, int flags);
44 static int      hammer_setup_child_callback(hammer_record_t rec, void *data);
45 static int      hammer_setup_parent_inodes(hammer_inode_t ip);
46 static int      hammer_setup_parent_inodes_helper(hammer_record_t record);
47 static void     hammer_inode_wakereclaims(hammer_inode_t ip);
48
49 #ifdef DEBUG_TRUNCATE
50 extern struct hammer_inode *HammerTruncIp;
51 #endif
52
53 /*
54  * The kernel is not actively referencing this vnode but is still holding
55  * it cached.
56  *
57  * This is called from the frontend.
58  */
59 int
60 hammer_vop_inactive(struct vop_inactive_args *ap)
61 {
62         struct hammer_inode *ip = VTOI(ap->a_vp);
63
64         /*
65          * Degenerate case
66          */
67         if (ip == NULL) {
68                 vrecycle(ap->a_vp);
69                 return(0);
70         }
71
72         /*
73          * If the inode no longer has visibility in the filesystem and is
74          * fairly clean, try to recycle it immediately.  This can deadlock
75          * in vfsync() if we aren't careful.
76          * 
77          * Do not queue the inode to the flusher if we still have visibility,
78          * otherwise namespace calls such as chmod will unnecessarily generate
79          * multiple inode updates.
80          */
81         hammer_inode_unloadable_check(ip, 0);
82         if (ip->ino_data.nlinks == 0) {
83                 if (ip->flags & HAMMER_INODE_MODMASK)
84                         hammer_flush_inode(ip, 0);
85                 else
86                         vrecycle(ap->a_vp);
87         }
88         return(0);
89 }
90
91 /*
92  * Release the vnode association.  This is typically (but not always)
93  * the last reference on the inode.
94  *
95  * Once the association is lost we are on our own with regards to
96  * flushing the inode.
97  */
98 int
99 hammer_vop_reclaim(struct vop_reclaim_args *ap)
100 {
101         struct hammer_reclaim reclaim;
102         struct hammer_inode *ip;
103         hammer_mount_t hmp;
104         struct vnode *vp;
105         int delay;
106
107         vp = ap->a_vp;
108
109         if ((ip = vp->v_data) != NULL) {
110                 hmp = ip->hmp;
111                 vp->v_data = NULL;
112                 ip->vp = NULL;
113
114                 /*
115                  * Setup our reclaim pipeline.  We only let so many detached
116                  * (and dirty) inodes build up before we start blocking.  Do
117                  * not bother tracking the immediate increment/decrement if
118                  * the inode is not actually dirty.
119                  *
120                  * When we block we don't care *which* inode has finished
121                  * reclaiming, as lone as one does.
122                  */
123                 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0 &&
124                     ((ip->flags|ip->sync_flags) & HAMMER_INODE_MODMASK)) {
125                         ++hammer_count_reclaiming;
126                         ++hmp->inode_reclaims;
127                         ip->flags |= HAMMER_INODE_RECLAIM;
128                         if (hmp->inode_reclaims > HAMMER_RECLAIM_PIPESIZE) {
129                                 reclaim.okydoky = 0;
130                                 TAILQ_INSERT_TAIL(&hmp->reclaim_list,
131                                                   &reclaim, entry);
132                         } else {
133                                 reclaim.okydoky = 1;
134                         }
135                 } else {
136                         reclaim.okydoky = 1;
137                 }
138                 hammer_rel_inode(ip, 1);
139
140                 /*
141                  * Reclaim pipeline.  We can't let too many reclaimed inodes
142                  * build-up in the flusher or the flusher loses its locality
143                  * of reference, or worse blows out our memory.  Once we have
144                  * exceeded the reclaim pipe size start slowing down.  Our
145                  * imposed delay can be cut short if the flusher catches up
146                  * to us.
147                  */
148                 if (reclaim.okydoky == 0) {
149                         delay = (hmp->inode_reclaims -
150                                  HAMMER_RECLAIM_PIPESIZE) * hz /
151                                 HAMMER_RECLAIM_PIPESIZE;
152                         if (delay <= 0)
153                                 delay = 1;
154                         hammer_flusher_async(hmp);
155                         if (reclaim.okydoky == 0) {
156                                 tsleep(&reclaim, 0, "hmrrcm", delay);
157                         }
158                         if (reclaim.okydoky == 0) {
159                                 TAILQ_REMOVE(&hmp->reclaim_list, &reclaim,
160                                              entry);
161                         }
162                 }
163         }
164         return(0);
165 }
166
167 /*
168  * Return a locked vnode for the specified inode.  The inode must be
169  * referenced but NOT LOCKED on entry and will remain referenced on
170  * return.
171  *
172  * Called from the frontend.
173  */
174 int
175 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
176 {
177         hammer_mount_t hmp;
178         struct vnode *vp;
179         int error = 0;
180
181         hmp = ip->hmp;
182
183         for (;;) {
184                 if ((vp = ip->vp) == NULL) {
185                         error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0);
186                         if (error)
187                                 break;
188                         hammer_lock_ex(&ip->lock);
189                         if (ip->vp != NULL) {
190                                 hammer_unlock(&ip->lock);
191                                 vp->v_type = VBAD;
192                                 vx_put(vp);
193                                 continue;
194                         }
195                         hammer_ref(&ip->lock);
196                         vp = *vpp;
197                         ip->vp = vp;
198                         vp->v_type =
199                                 hammer_get_vnode_type(ip->ino_data.obj_type);
200
201                         hammer_inode_wakereclaims(ip);
202
203                         switch(ip->ino_data.obj_type) {
204                         case HAMMER_OBJTYPE_CDEV:
205                         case HAMMER_OBJTYPE_BDEV:
206                                 vp->v_ops = &hmp->mp->mnt_vn_spec_ops;
207                                 addaliasu(vp, ip->ino_data.rmajor,
208                                           ip->ino_data.rminor);
209                                 break;
210                         case HAMMER_OBJTYPE_FIFO:
211                                 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops;
212                                 break;
213                         default:
214                                 break;
215                         }
216
217                         /*
218                          * Only mark as the root vnode if the ip is not
219                          * historical, otherwise the VFS cache will get
220                          * confused.  The other half of the special handling
221                          * is in hammer_vop_nlookupdotdot().
222                          */
223                         if (ip->obj_id == HAMMER_OBJID_ROOT &&
224                             ip->obj_asof == hmp->asof) {
225                                 vp->v_flag |= VROOT;
226                         }
227
228                         vp->v_data = (void *)ip;
229                         /* vnode locked by getnewvnode() */
230                         /* make related vnode dirty if inode dirty? */
231                         hammer_unlock(&ip->lock);
232                         if (vp->v_type == VREG)
233                                 vinitvmio(vp, ip->ino_data.size);
234                         break;
235                 }
236
237                 /*
238                  * loop if the vget fails (aka races), or if the vp
239                  * no longer matches ip->vp.
240                  */
241                 if (vget(vp, LK_EXCLUSIVE) == 0) {
242                         if (vp == ip->vp)
243                                 break;
244                         vput(vp);
245                 }
246         }
247         *vpp = vp;
248         return(error);
249 }
250
251 /*
252  * Acquire a HAMMER inode.  The returned inode is not locked.  These functions
253  * do not attach or detach the related vnode (use hammer_get_vnode() for
254  * that).
255  *
256  * The flags argument is only applied for newly created inodes, and only
257  * certain flags are inherited.
258  *
259  * Called from the frontend.
260  */
261 struct hammer_inode *
262 hammer_get_inode(hammer_transaction_t trans, struct hammer_node **cache,
263                  u_int64_t obj_id, hammer_tid_t asof, int flags, int *errorp)
264 {
265         hammer_mount_t hmp = trans->hmp;
266         struct hammer_inode_info iinfo;
267         struct hammer_cursor cursor;
268         struct hammer_inode *ip;
269
270         /*
271          * Determine if we already have an inode cached.  If we do then
272          * we are golden.
273          */
274         iinfo.obj_id = obj_id;
275         iinfo.obj_asof = asof;
276 loop:
277         ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
278         if (ip) {
279                 hammer_ref(&ip->lock);
280                 *errorp = 0;
281                 return(ip);
282         }
283
284         /*
285          * Allocate a new inode structure and deal with races later.
286          */
287         ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
288         ++hammer_count_inodes;
289         ++hmp->count_inodes;
290         ip->obj_id = obj_id;
291         ip->obj_asof = iinfo.obj_asof;
292         ip->hmp = hmp;
293         ip->flags = flags & HAMMER_INODE_RO;
294         if (hmp->ronly)
295                 ip->flags |= HAMMER_INODE_RO;
296         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
297         RB_INIT(&ip->rec_tree);
298         TAILQ_INIT(&ip->target_list);
299
300         /*
301          * Locate the on-disk inode.
302          */
303 retry:
304         hammer_init_cursor(trans, &cursor, cache, NULL);
305         cursor.key_beg.localization = HAMMER_LOCALIZE_INODE;
306         cursor.key_beg.obj_id = ip->obj_id;
307         cursor.key_beg.key = 0;
308         cursor.key_beg.create_tid = 0;
309         cursor.key_beg.delete_tid = 0;
310         cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
311         cursor.key_beg.obj_type = 0;
312         cursor.asof = iinfo.obj_asof;
313         cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
314                        HAMMER_CURSOR_ASOF;
315
316         *errorp = hammer_btree_lookup(&cursor);
317         if (*errorp == EDEADLK) {
318                 hammer_done_cursor(&cursor);
319                 goto retry;
320         }
321
322         /*
323          * On success the B-Tree lookup will hold the appropriate
324          * buffer cache buffers and provide a pointer to the requested
325          * information.  Copy the information to the in-memory inode
326          * and cache the B-Tree node to improve future operations.
327          */
328         if (*errorp == 0) {
329                 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
330                 ip->ino_data = cursor.data->inode;
331                 hammer_cache_node(cursor.node, &ip->cache[0]);
332                 if (cache)
333                         hammer_cache_node(cursor.node, cache);
334         }
335
336         /*
337          * On success load the inode's record and data and insert the
338          * inode into the B-Tree.  It is possible to race another lookup
339          * insertion of the same inode so deal with that condition too.
340          *
341          * The cursor's locked node interlocks against others creating and
342          * destroying ip while we were blocked.
343          */
344         if (*errorp == 0) {
345                 hammer_ref(&ip->lock);
346                 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
347                         hammer_uncache_node(&ip->cache[0]);
348                         hammer_uncache_node(&ip->cache[1]);
349                         KKASSERT(ip->lock.refs == 1);
350                         --hammer_count_inodes;
351                         --hmp->count_inodes;
352                         kfree(ip, M_HAMMER);
353                         hammer_done_cursor(&cursor);
354                         goto loop;
355                 }
356                 ip->flags |= HAMMER_INODE_ONDISK;
357         } else {
358                 /*
359                  * Do not panic on read-only accesses which fail, particularly
360                  * historical accesses where the snapshot might not have
361                  * complete connectivity.
362                  */
363                 if ((flags & HAMMER_INODE_RO) == 0) {
364                         kprintf("hammer_get_inode: failed ip %p obj_id %016llx cursor %p error %d\n",
365                                 ip, ip->obj_id, &cursor, *errorp);
366                         Debugger("x");
367                 }
368                 if (ip->flags & HAMMER_INODE_RSV_INODES) {
369                         ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
370                         --hmp->rsv_inodes;
371                 }
372                 hmp->rsv_databufs -= ip->rsv_databufs;
373                 ip->rsv_databufs = 0;                          /* sanity */
374
375                 --hammer_count_inodes;
376                 --hmp->count_inodes;
377                 kfree(ip, M_HAMMER);
378                 ip = NULL;
379         }
380         hammer_done_cursor(&cursor);
381         return (ip);
382 }
383
384 /*
385  * Create a new filesystem object, returning the inode in *ipp.  The
386  * returned inode will be referenced.
387  *
388  * The inode is created in-memory.
389  */
390 int
391 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
392                     struct ucred *cred, hammer_inode_t dip,
393                     struct hammer_inode **ipp)
394 {
395         hammer_mount_t hmp;
396         hammer_inode_t ip;
397         uid_t xuid;
398
399         hmp = trans->hmp;
400         ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
401         ++hammer_count_inodes;
402         ++hmp->count_inodes;
403         ip->obj_id = hammer_alloc_objid(trans, dip);
404         KKASSERT(ip->obj_id != 0);
405         ip->obj_asof = hmp->asof;
406         ip->hmp = hmp;
407         ip->flush_state = HAMMER_FST_IDLE;
408         ip->flags = HAMMER_INODE_DDIRTY | HAMMER_INODE_ITIMES;
409
410         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
411         RB_INIT(&ip->rec_tree);
412         TAILQ_INIT(&ip->target_list);
413
414         ip->ino_leaf.atime = trans->time;
415         ip->ino_data.mtime = trans->time;
416         ip->ino_data.size = 0;
417         ip->ino_data.nlinks = 0;
418
419         /*
420          * A nohistory designator on the parent directory is inherited by
421          * the child.
422          */
423         ip->ino_data.uflags = dip->ino_data.uflags &
424                               (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
425
426         ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
427         ip->ino_leaf.base.localization = HAMMER_LOCALIZE_INODE;
428         ip->ino_leaf.base.obj_id = ip->obj_id;
429         ip->ino_leaf.base.key = 0;
430         ip->ino_leaf.base.create_tid = 0;
431         ip->ino_leaf.base.delete_tid = 0;
432         ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
433         ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
434
435         ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
436         ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
437         ip->ino_data.mode = vap->va_mode;
438         ip->ino_data.ctime = trans->time;
439         ip->ino_data.parent_obj_id = (dip) ? dip->ino_leaf.base.obj_id : 0;
440
441         switch(ip->ino_leaf.base.obj_type) {
442         case HAMMER_OBJTYPE_CDEV:
443         case HAMMER_OBJTYPE_BDEV:
444                 ip->ino_data.rmajor = vap->va_rmajor;
445                 ip->ino_data.rminor = vap->va_rminor;
446                 break;
447         default:
448                 break;
449         }
450
451         /*
452          * Calculate default uid/gid and overwrite with information from
453          * the vap.
454          */
455         xuid = hammer_to_unix_xid(&dip->ino_data.uid);
456         xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode, xuid, cred,
457                                      &vap->va_mode);
458         ip->ino_data.mode = vap->va_mode;
459
460         if (vap->va_vaflags & VA_UID_UUID_VALID)
461                 ip->ino_data.uid = vap->va_uid_uuid;
462         else if (vap->va_uid != (uid_t)VNOVAL)
463                 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
464         else
465                 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
466
467         if (vap->va_vaflags & VA_GID_UUID_VALID)
468                 ip->ino_data.gid = vap->va_gid_uuid;
469         else if (vap->va_gid != (gid_t)VNOVAL)
470                 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
471         else
472                 ip->ino_data.gid = dip->ino_data.gid;
473
474         hammer_ref(&ip->lock);
475         if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
476                 hammer_unref(&ip->lock);
477                 panic("hammer_create_inode: duplicate obj_id %llx", ip->obj_id);
478         }
479         *ipp = ip;
480         return(0);
481 }
482
483 /*
484  * Called by hammer_sync_inode().
485  */
486 static int
487 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
488 {
489         hammer_transaction_t trans = cursor->trans;
490         hammer_record_t record;
491         int error;
492
493 retry:
494         error = 0;
495
496         /*
497          * If the inode has a presence on-disk then locate it and mark
498          * it deleted, setting DELONDISK.
499          *
500          * The record may or may not be physically deleted, depending on
501          * the retention policy.
502          */
503         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
504             HAMMER_INODE_ONDISK) {
505                 hammer_normalize_cursor(cursor);
506                 cursor->key_beg.localization = HAMMER_LOCALIZE_INODE;
507                 cursor->key_beg.obj_id = ip->obj_id;
508                 cursor->key_beg.key = 0;
509                 cursor->key_beg.create_tid = 0;
510                 cursor->key_beg.delete_tid = 0;
511                 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
512                 cursor->key_beg.obj_type = 0;
513                 cursor->asof = ip->obj_asof;
514                 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
515                 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
516                 cursor->flags |= HAMMER_CURSOR_BACKEND;
517
518                 error = hammer_btree_lookup(cursor);
519                 if (hammer_debug_inode)
520                         kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
521                 if (error) {
522                         kprintf("error %d\n", error);
523                         Debugger("hammer_update_inode");
524                 }
525
526                 if (error == 0) {
527                         error = hammer_ip_delete_record(cursor, ip, trans->tid);
528                         if (hammer_debug_inode)
529                                 kprintf(" error %d\n", error);
530                         if (error && error != EDEADLK) {
531                                 kprintf("error %d\n", error);
532                                 Debugger("hammer_update_inode2");
533                         }
534                         if (error == 0) {
535                                 ip->flags |= HAMMER_INODE_DELONDISK;
536                         }
537                         if (cursor->node)
538                                 hammer_cache_node(cursor->node, &ip->cache[0]);
539                 }
540                 if (error == EDEADLK) {
541                         hammer_done_cursor(cursor);
542                         error = hammer_init_cursor(trans, cursor,
543                                                    &ip->cache[0], ip);
544                         if (hammer_debug_inode)
545                                 kprintf("IPDED %p %d\n", ip, error);
546                         if (error == 0)
547                                 goto retry;
548                 }
549         }
550
551         /*
552          * Ok, write out the initial record or a new record (after deleting
553          * the old one), unless the DELETED flag is set.  This routine will
554          * clear DELONDISK if it writes out a record.
555          *
556          * Update our inode statistics if this is the first application of
557          * the inode on-disk.
558          */
559         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
560                 /*
561                  * Generate a record and write it to the media
562                  */
563                 record = hammer_alloc_mem_record(ip, 0);
564                 record->type = HAMMER_MEM_RECORD_INODE;
565                 record->flush_state = HAMMER_FST_FLUSH;
566                 record->leaf = ip->sync_ino_leaf;
567                 record->leaf.base.create_tid = trans->tid;
568                 record->leaf.data_len = sizeof(ip->sync_ino_data);
569                 record->data = (void *)&ip->sync_ino_data;
570                 record->flags |= HAMMER_RECF_INTERLOCK_BE;
571                 for (;;) {
572                         error = hammer_ip_sync_record_cursor(cursor, record);
573                         if (hammer_debug_inode)
574                                 kprintf("GENREC %p rec %08x %d\n",      
575                                         ip, record->flags, error);
576                         if (error != EDEADLK)
577                                 break;
578                         hammer_done_cursor(cursor);
579                         error = hammer_init_cursor(trans, cursor,
580                                                    &ip->cache[0], ip);
581                         if (hammer_debug_inode)
582                                 kprintf("GENREC reinit %d\n", error);
583                         if (error)
584                                 break;
585                 }
586                 if (error) {
587                         kprintf("error %d\n", error);
588                         Debugger("hammer_update_inode3");
589                 }
590
591                 /*
592                  * The record isn't managed by the inode's record tree,
593                  * destroy it whether we succeed or fail.
594                  */
595                 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
596                 record->flags |= HAMMER_RECF_DELETED_FE;
597                 record->flush_state = HAMMER_FST_IDLE;
598                 hammer_rel_mem_record(record);
599
600                 /*
601                  * Finish up.
602                  */
603                 if (error == 0) {
604                         if (hammer_debug_inode)
605                                 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
606                         ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
607                                             HAMMER_INODE_ITIMES);
608                         ip->flags &= ~HAMMER_INODE_DELONDISK;
609
610                         /*
611                          * Root volume count of inodes
612                          */
613                         if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
614                                 hammer_modify_volume_field(trans,
615                                                            trans->rootvol,
616                                                            vol0_stat_inodes);
617                                 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
618                                 hammer_modify_volume_done(trans->rootvol);
619                                 ip->flags |= HAMMER_INODE_ONDISK;
620                                 if (hammer_debug_inode)
621                                         kprintf("NOWONDISK %p\n", ip);
622                         }
623                 }
624         }
625
626         /*
627          * If the inode has been destroyed, clean out any left-over flags
628          * that may have been set by the frontend.
629          */
630         if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) { 
631                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
632                                     HAMMER_INODE_ITIMES);
633         }
634         return(error);
635 }
636
637 /*
638  * Update only the itimes fields.  This is done no-historically.  The
639  * record is updated in-place on the disk.
640  */
641 static int
642 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
643 {
644         hammer_transaction_t trans = cursor->trans;
645         struct hammer_btree_leaf_elm *leaf;
646         int error;
647
648 retry:
649         error = 0;
650         if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
651             HAMMER_INODE_ONDISK) {
652                 hammer_normalize_cursor(cursor);
653                 cursor->key_beg.localization = HAMMER_LOCALIZE_INODE;
654                 cursor->key_beg.obj_id = ip->obj_id;
655                 cursor->key_beg.key = 0;
656                 cursor->key_beg.create_tid = 0;
657                 cursor->key_beg.delete_tid = 0;
658                 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
659                 cursor->key_beg.obj_type = 0;
660                 cursor->asof = ip->obj_asof;
661                 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
662                 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
663                 cursor->flags |= HAMMER_CURSOR_BACKEND;
664
665                 error = hammer_btree_lookup(cursor);
666                 if (error) {
667                         kprintf("error %d\n", error);
668                         Debugger("hammer_update_itimes1");
669                 }
670                 if (error == 0) {
671                         /*
672                          * Do not generate UNDO records for atime updates.
673                          */
674                         leaf = cursor->leaf;
675                         hammer_modify_node(trans, cursor->node, 
676                                            &leaf->atime, sizeof(leaf->atime));
677                         leaf->atime = ip->sync_ino_leaf.atime;
678                         hammer_modify_node_done(cursor->node);
679                         /*rec->ino_mtime = ip->sync_ino_rec.ino_mtime;*/
680                         ip->sync_flags &= ~HAMMER_INODE_ITIMES;
681                         /* XXX recalculate crc */
682                         hammer_cache_node(cursor->node, &ip->cache[0]);
683                 }
684                 if (error == EDEADLK) {
685                         hammer_done_cursor(cursor);
686                         error = hammer_init_cursor(trans, cursor,
687                                                    &ip->cache[0], ip);
688                         if (error == 0)
689                                 goto retry;
690                 }
691         }
692         return(error);
693 }
694
695 /*
696  * Release a reference on an inode, flush as requested.
697  *
698  * On the last reference we queue the inode to the flusher for its final
699  * disposition.
700  */
701 void
702 hammer_rel_inode(struct hammer_inode *ip, int flush)
703 {
704         hammer_mount_t hmp = ip->hmp;
705
706         /*
707          * Handle disposition when dropping the last ref.
708          */
709         for (;;) {
710                 if (ip->lock.refs == 1) {
711                         /*
712                          * Determine whether on-disk action is needed for
713                          * the inode's final disposition.
714                          */
715                         KKASSERT(ip->vp == NULL);
716                         hammer_inode_unloadable_check(ip, 0);
717                         if (ip->flags & HAMMER_INODE_MODMASK) {
718                                 if (hmp->rsv_inodes > desiredvnodes) {
719                                         hammer_flush_inode(ip,
720                                                            HAMMER_FLUSH_SIGNAL);
721                                 } else {
722                                         hammer_flush_inode(ip, 0);
723                                 }
724                         } else if (ip->lock.refs == 1) {
725                                 hammer_unload_inode(ip);
726                                 break;
727                         }
728                 } else {
729                         if (flush)
730                                 hammer_flush_inode(ip, 0);
731
732                         /*
733                          * The inode still has multiple refs, try to drop
734                          * one ref.
735                          */
736                         KKASSERT(ip->lock.refs >= 1);
737                         if (ip->lock.refs > 1) {
738                                 hammer_unref(&ip->lock);
739                                 break;
740                         }
741                 }
742         }
743 }
744
745 /*
746  * Unload and destroy the specified inode.  Must be called with one remaining
747  * reference.  The reference is disposed of.
748  *
749  * This can only be called in the context of the flusher.
750  */
751 static int
752 hammer_unload_inode(struct hammer_inode *ip)
753 {
754         hammer_mount_t hmp = ip->hmp;
755
756         KASSERT(ip->lock.refs == 1,
757                 ("hammer_unload_inode: %d refs\n", ip->lock.refs));
758         KKASSERT(ip->vp == NULL);
759         KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
760         KKASSERT(ip->cursor_ip_refs == 0);
761         KKASSERT(ip->lock.lockcount == 0);
762         KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
763
764         KKASSERT(RB_EMPTY(&ip->rec_tree));
765         KKASSERT(TAILQ_EMPTY(&ip->target_list));
766
767         RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip);
768
769         hammer_uncache_node(&ip->cache[0]);
770         hammer_uncache_node(&ip->cache[1]);
771         if (ip->objid_cache)
772                 hammer_clear_objid(ip);
773         --hammer_count_inodes;
774         --hmp->count_inodes;
775
776         hammer_inode_wakereclaims(ip);
777         kfree(ip, M_HAMMER);
778
779         return(0);
780 }
781
782 /*
783  * Called on mount -u when switching from RW to RO or vise-versa.  Adjust
784  * the read-only flag for cached inodes.
785  *
786  * This routine is called from a RB_SCAN().
787  */
788 int
789 hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
790 {
791         hammer_mount_t hmp = ip->hmp;
792
793         if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
794                 ip->flags |= HAMMER_INODE_RO;
795         else
796                 ip->flags &= ~HAMMER_INODE_RO;
797         return(0);
798 }
799
800 /*
801  * A transaction has modified an inode, requiring updates as specified by
802  * the passed flags.
803  *
804  * HAMMER_INODE_DDIRTY: Inode data has been updated
805  * HAMMER_INODE_XDIRTY: Dirty in-memory records
806  * HAMMER_INODE_BUFS:   Dirty buffer cache buffers
807  * HAMMER_INODE_DELETED: Inode record/data must be deleted
808  * HAMMER_INODE_ITIMES: mtime/atime has been updated
809  */
810 void
811 hammer_modify_inode(hammer_inode_t ip, int flags)
812 {
813         KKASSERT ((ip->flags & HAMMER_INODE_RO) == 0 ||
814                   (flags & (HAMMER_INODE_DDIRTY |
815                             HAMMER_INODE_XDIRTY | HAMMER_INODE_BUFS |
816                             HAMMER_INODE_DELETED | HAMMER_INODE_ITIMES)) == 0);
817         if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
818                 ip->flags |= HAMMER_INODE_RSV_INODES;
819                 ++ip->hmp->rsv_inodes;
820         }
821
822         ip->flags |= flags;
823 }
824
825 /*
826  * Request that an inode be flushed.  This whole mess cannot block and may
827  * recurse (if not synchronous).  Once requested HAMMER will attempt to
828  * actively flush the inode until the flush can be done.
829  *
830  * The inode may already be flushing, or may be in a setup state.  We can
831  * place the inode in a flushing state if it is currently idle and flag it
832  * to reflush if it is currently flushing.
833  *
834  * If the HAMMER_FLUSH_SYNCHRONOUS flag is specified we will attempt to
835  * flush the indoe synchronously using the caller's context.
836  */
837 void
838 hammer_flush_inode(hammer_inode_t ip, int flags)
839 {
840         int good;
841
842         /*
843          * Trivial 'nothing to flush' case.  If the inode is ina SETUP
844          * state we have to put it back into an IDLE state so we can
845          * drop the extra ref.
846          */
847         if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
848                 if (ip->flush_state == HAMMER_FST_SETUP) {
849                         ip->flush_state = HAMMER_FST_IDLE;
850                         hammer_rel_inode(ip, 0);
851                 }
852                 return;
853         }
854
855         /*
856          * Our flush action will depend on the current state.
857          */
858         switch(ip->flush_state) {
859         case HAMMER_FST_IDLE:
860                 /*
861                  * We have no dependancies and can flush immediately.  Some
862                  * our children may not be flushable so we have to re-test
863                  * with that additional knowledge.
864                  */
865                 hammer_flush_inode_core(ip, flags);
866                 break;
867         case HAMMER_FST_SETUP:
868                 /*
869                  * Recurse upwards through dependancies via target_list
870                  * and start their flusher actions going if possible.
871                  *
872                  * 'good' is our connectivity.  -1 means we have none and
873                  * can't flush, 0 means there weren't any dependancies, and
874                  * 1 means we have good connectivity.
875                  */
876                 good = hammer_setup_parent_inodes(ip);
877
878                 /*
879                  * We can continue if good >= 0.  Determine how many records
880                  * under our inode can be flushed (and mark them).
881                  */
882                 if (good >= 0) {
883                         hammer_flush_inode_core(ip, flags);
884                 } else {
885                         ip->flags |= HAMMER_INODE_REFLUSH;
886                         if (flags & HAMMER_FLUSH_SIGNAL) {
887                                 ip->flags |= HAMMER_INODE_RESIGNAL;
888                                 hammer_flusher_async(ip->hmp);
889                         }
890                 }
891                 break;
892         default:
893                 /*
894                  * We are already flushing, flag the inode to reflush
895                  * if needed after it completes its current flush.
896                  */
897                 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
898                         ip->flags |= HAMMER_INODE_REFLUSH;
899                 if (flags & HAMMER_FLUSH_SIGNAL) {
900                         ip->flags |= HAMMER_INODE_RESIGNAL;
901                         hammer_flusher_async(ip->hmp);
902                 }
903                 break;
904         }
905 }
906
907 /*
908  * Scan ip->target_list, which is a list of records owned by PARENTS to our
909  * ip which reference our ip.
910  *
911  * XXX This is a huge mess of recursive code, but not one bit of it blocks
912  *     so for now do not ref/deref the structures.  Note that if we use the
913  *     ref/rel code later, the rel CAN block.
914  */
915 static int
916 hammer_setup_parent_inodes(hammer_inode_t ip)
917 {
918         hammer_record_t depend;
919 #if 0
920         hammer_record_t next;
921         hammer_inode_t  pip;
922 #endif
923         int good;
924         int r;
925
926         good = 0;
927         TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
928                 r = hammer_setup_parent_inodes_helper(depend);
929                 KKASSERT(depend->target_ip == ip);
930                 if (r < 0 && good == 0)
931                         good = -1;
932                 if (r > 0)
933                         good = 1;
934         }
935         return(good);
936
937 #if 0
938 retry:
939         good = 0;
940         next = TAILQ_FIRST(&ip->target_list);
941         if (next) {
942                 hammer_ref(&next->lock);
943                 hammer_ref(&next->ip->lock);
944         }
945         while ((depend = next) != NULL) {
946                 if (depend->target_ip == NULL) {
947                         pip = depend->ip;
948                         hammer_rel_mem_record(depend);
949                         hammer_rel_inode(pip, 0);
950                         goto retry;
951                 }
952                 KKASSERT(depend->target_ip == ip);
953                 next = TAILQ_NEXT(depend, target_entry);
954                 if (next) {
955                         hammer_ref(&next->lock);
956                         hammer_ref(&next->ip->lock);
957                 }
958                 r = hammer_setup_parent_inodes_helper(depend);
959                 if (r < 0 && good == 0)
960                         good = -1;
961                 if (r > 0)
962                         good = 1;
963                 pip = depend->ip;
964                 hammer_rel_mem_record(depend);
965                 hammer_rel_inode(pip, 0);
966         }
967         return(good);
968 #endif
969 }
970
971 /*
972  * This helper function takes a record representing the dependancy between
973  * the parent inode and child inode.
974  *
975  * record->ip           = parent inode
976  * record->target_ip    = child inode
977  * 
978  * We are asked to recurse upwards and convert the record from SETUP
979  * to FLUSH if possible.
980  *
981  * Return 1 if the record gives us connectivity
982  *
983  * Return 0 if the record is not relevant 
984  *
985  * Return -1 if we can't resolve the dependancy and there is no connectivity.
986  */
987 static int
988 hammer_setup_parent_inodes_helper(hammer_record_t record)
989 {
990         hammer_mount_t hmp;
991         hammer_inode_t pip;
992         int good;
993
994         KKASSERT(record->flush_state != HAMMER_FST_IDLE);
995         pip = record->ip;
996         hmp = pip->hmp;
997
998         /*
999          * If the record is already flushing, is it in our flush group?
1000          *
1001          * If it is in our flush group but it is a general record or a 
1002          * delete-on-disk, it does not improve our connectivity (return 0),
1003          * and if the target inode is not trying to destroy itself we can't
1004          * allow the operation yet anyway (the second return -1).
1005          */
1006         if (record->flush_state == HAMMER_FST_FLUSH) {
1007                 if (record->flush_group != hmp->flusher.next) {
1008                         pip->flags |= HAMMER_INODE_REFLUSH;
1009                         return(-1);
1010                 }
1011                 if (record->type == HAMMER_MEM_RECORD_ADD)
1012                         return(1);
1013                 /* GENERAL or DEL */
1014                 return(0);
1015         }
1016
1017         /*
1018          * It must be a setup record.  Try to resolve the setup dependancies
1019          * by recursing upwards so we can place ip on the flush list.
1020          */
1021         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1022
1023         good = hammer_setup_parent_inodes(pip);
1024
1025         /*
1026          * We can't flush ip because it has no connectivity (XXX also check
1027          * nlinks for pre-existing connectivity!).  Flag it so any resolution
1028          * recurses back down.
1029          */
1030         if (good < 0) {
1031                 pip->flags |= HAMMER_INODE_REFLUSH;
1032                 return(good);
1033         }
1034
1035         /*
1036          * We are go, place the parent inode in a flushing state so we can
1037          * place its record in a flushing state.  Note that the parent
1038          * may already be flushing.  The record must be in the same flush
1039          * group as the parent.
1040          */
1041         if (pip->flush_state != HAMMER_FST_FLUSH)
1042                 hammer_flush_inode_core(pip, HAMMER_FLUSH_RECURSION);
1043         KKASSERT(pip->flush_state == HAMMER_FST_FLUSH);
1044         KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1045
1046 #if 0
1047         if (record->type == HAMMER_MEM_RECORD_DEL &&
1048             (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
1049                 /*
1050                  * Regardless of flushing state we cannot sync this path if the
1051                  * record represents a delete-on-disk but the target inode
1052                  * is not ready to sync its own deletion.
1053                  *
1054                  * XXX need to count effective nlinks to determine whether
1055                  * the flush is ok, otherwise removing a hardlink will
1056                  * just leave the DEL record to rot.
1057                  */
1058                 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
1059                 return(-1);
1060         } else
1061 #endif
1062         if (pip->flush_group == pip->hmp->flusher.next) {
1063                 /*
1064                  * This is the record we wanted to synchronize.  If the
1065                  * record went into a flush state while we blocked it 
1066                  * had better be in the correct flush group.
1067                  */
1068                 if (record->flush_state != HAMMER_FST_FLUSH) {
1069                         record->flush_state = HAMMER_FST_FLUSH;
1070                         record->flush_group = pip->flush_group;
1071                         hammer_ref(&record->lock);
1072                 } else {
1073                         KKASSERT(record->flush_group == pip->flush_group);
1074                 }
1075                 if (record->type == HAMMER_MEM_RECORD_ADD)
1076                         return(1);
1077
1078                 /*
1079                  * A general or delete-on-disk record does not contribute
1080                  * to our visibility.  We can still flush it, however.
1081                  */
1082                 return(0);
1083         } else {
1084                 /*
1085                  * We couldn't resolve the dependancies, request that the
1086                  * inode be flushed when the dependancies can be resolved.
1087                  */
1088                 pip->flags |= HAMMER_INODE_REFLUSH;
1089                 return(-1);
1090         }
1091 }
1092
1093 /*
1094  * This is the core routine placing an inode into the FST_FLUSH state.
1095  */
1096 static void
1097 hammer_flush_inode_core(hammer_inode_t ip, int flags)
1098 {
1099         int go_count;
1100
1101         /*
1102          * Set flush state and prevent the flusher from cycling into
1103          * the next flush group.  Do not place the ip on the list yet.
1104          * Inodes not in the idle state get an extra reference.
1105          */
1106         KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
1107         if (ip->flush_state == HAMMER_FST_IDLE)
1108                 hammer_ref(&ip->lock);
1109         ip->flush_state = HAMMER_FST_FLUSH;
1110         ip->flush_group = ip->hmp->flusher.next;
1111         ++ip->hmp->flusher.group_lock;
1112         ++ip->hmp->count_iqueued;
1113         ++hammer_count_iqueued;
1114
1115         /*
1116          * We need to be able to vfsync/truncate from the backend.
1117          */
1118         KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
1119         if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
1120                 ip->flags |= HAMMER_INODE_VHELD;
1121                 vref(ip->vp);
1122         }
1123
1124         /*
1125          * Figure out how many in-memory records we can actually flush
1126          * (not including inode meta-data, buffers, etc).
1127          */
1128         if (flags & HAMMER_FLUSH_RECURSION) {
1129                 go_count = 1;
1130         } else {
1131                 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1132                                    hammer_setup_child_callback, NULL);
1133         }
1134
1135         /*
1136          * This is a more involved test that includes go_count.  If we
1137          * can't flush, flag the inode and return.  If go_count is 0 we
1138          * were are unable to flush any records in our rec_tree and
1139          * must ignore the XDIRTY flag.
1140          */
1141         if (go_count == 0) {
1142                 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
1143                         ip->flags |= HAMMER_INODE_REFLUSH;
1144
1145                         --ip->hmp->count_iqueued;
1146                         --hammer_count_iqueued;
1147
1148                         ip->flush_state = HAMMER_FST_SETUP;
1149                         if (ip->flags & HAMMER_INODE_VHELD) {
1150                                 ip->flags &= ~HAMMER_INODE_VHELD;
1151                                 vrele(ip->vp);
1152                         }
1153                         if (flags & HAMMER_FLUSH_SIGNAL) {
1154                                 ip->flags |= HAMMER_INODE_RESIGNAL;
1155                                 hammer_flusher_async(ip->hmp);
1156                         }
1157                         if (--ip->hmp->flusher.group_lock == 0)
1158                                 wakeup(&ip->hmp->flusher.group_lock);
1159                         return;
1160                 }
1161         }
1162
1163         /*
1164          * Snapshot the state of the inode for the backend flusher.
1165          *
1166          * The truncation must be retained in the frontend until after
1167          * we've actually performed the record deletion.
1168          *
1169          * NOTE: The DELETING flag is a mod flag, but it is also sticky,
1170          * and stays in ip->flags.  Once set, it stays set until the
1171          * inode is destroyed.
1172          */
1173         ip->sync_flags = (ip->flags & HAMMER_INODE_MODMASK);
1174         ip->sync_trunc_off = ip->trunc_off;
1175         ip->sync_ino_leaf = ip->ino_leaf;
1176         ip->sync_ino_data = ip->ino_data;
1177         ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
1178         ip->flags &= ~HAMMER_INODE_MODMASK;
1179 #ifdef DEBUG_TRUNCATE
1180         if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp)
1181                 kprintf("truncateS %016llx\n", ip->sync_trunc_off);
1182 #endif
1183
1184         /*
1185          * The flusher list inherits our inode and reference.
1186          */
1187         TAILQ_INSERT_TAIL(&ip->hmp->flush_list, ip, flush_entry);
1188         if (--ip->hmp->flusher.group_lock == 0)
1189                 wakeup(&ip->hmp->flusher.group_lock);
1190
1191         if (flags & HAMMER_FLUSH_SIGNAL) {
1192                 hammer_flusher_async(ip->hmp);
1193         }
1194 }
1195
1196 /*
1197  * Callback for scan of ip->rec_tree.  Try to include each record in our
1198  * flush.  ip->flush_group has been set but the inode has not yet been
1199  * moved into a flushing state.
1200  *
1201  * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
1202  * both inodes.
1203  *
1204  * We return 1 for any record placed or found in FST_FLUSH, which prevents
1205  * the caller from shortcutting the flush.
1206  */
1207 static int
1208 hammer_setup_child_callback(hammer_record_t rec, void *data)
1209 {
1210         hammer_inode_t target_ip;
1211         hammer_inode_t ip;
1212         int r;
1213
1214         /*
1215          * Deleted records are ignored.  Note that the flush detects deleted
1216          * front-end records at multiple points to deal with races.  This is
1217          * just the first line of defense.  The only time DELETED_FE cannot
1218          * be set is when HAMMER_RECF_INTERLOCK_BE is set. 
1219          *
1220          * Don't get confused between record deletion and, say, directory
1221          * entry deletion.  The deletion of a directory entry that is on
1222          * the media has nothing to do with the record deletion flags.
1223          */
1224         if (rec->flags & (HAMMER_RECF_DELETED_FE|HAMMER_RECF_DELETED_BE))
1225                 return(0);
1226
1227         /*
1228          * If the record is in an idle state it has no dependancies and
1229          * can be flushed.
1230          */
1231         ip = rec->ip;
1232         r = 0;
1233
1234         switch(rec->flush_state) {
1235         case HAMMER_FST_IDLE:
1236                 /*
1237                  * Record has no setup dependancy, we can flush it.
1238                  */
1239                 KKASSERT(rec->target_ip == NULL);
1240                 rec->flush_state = HAMMER_FST_FLUSH;
1241                 rec->flush_group = ip->flush_group;
1242                 hammer_ref(&rec->lock);
1243                 r = 1;
1244                 break;
1245         case HAMMER_FST_SETUP:
1246                 /*
1247                  * Record has a setup dependancy.  Try to include the
1248                  * target ip in the flush. 
1249                  *
1250                  * We have to be careful here, if we do not do the right
1251                  * thing we can lose track of dirty inodes and the system
1252                  * will lockup trying to allocate buffers.
1253                  */
1254                 target_ip = rec->target_ip;
1255                 KKASSERT(target_ip != NULL);
1256                 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
1257                 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
1258                         /*
1259                          * If the target IP is already flushing in our group
1260                          * we are golden, otherwise make sure the target
1261                          * reflushes.
1262                          */
1263                         if (target_ip->flush_group == ip->flush_group) {
1264                                 rec->flush_state = HAMMER_FST_FLUSH;
1265                                 rec->flush_group = ip->flush_group;
1266                                 hammer_ref(&rec->lock);
1267                                 r = 1;
1268                         } else {
1269                                 target_ip->flags |= HAMMER_INODE_REFLUSH;
1270                         }
1271                 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
1272                         /*
1273                          * If the target IP is not flushing we can force
1274                          * it to flush, even if it is unable to write out
1275                          * any of its own records we have at least one in
1276                          * hand that we CAN deal with.
1277                          */
1278                         rec->flush_state = HAMMER_FST_FLUSH;
1279                         rec->flush_group = ip->flush_group;
1280                         hammer_ref(&rec->lock);
1281                         hammer_flush_inode_core(target_ip,
1282                                                 HAMMER_FLUSH_RECURSION);
1283                         r = 1;
1284                 } else {
1285                         /*
1286                          * General or delete-on-disk record.
1287                          *
1288                          * XXX this needs help.  If a delete-on-disk we could
1289                          * disconnect the target.  If the target has its own
1290                          * dependancies they really need to be flushed.
1291                          *
1292                          * XXX
1293                          */
1294                         rec->flush_state = HAMMER_FST_FLUSH;
1295                         rec->flush_group = ip->flush_group;
1296                         hammer_ref(&rec->lock);
1297                         hammer_flush_inode_core(target_ip,
1298                                                 HAMMER_FLUSH_RECURSION);
1299                         r = 1;
1300                 }
1301                 break;
1302         case HAMMER_FST_FLUSH:
1303                 /* 
1304                  * Record already associated with a flush group.  It had
1305                  * better be ours.
1306                  */
1307                 KKASSERT(rec->flush_group == ip->flush_group);
1308                 r = 1;
1309                 break;
1310         }
1311         return(r);
1312 }
1313
1314 /*
1315  * Wait for a previously queued flush to complete
1316  */
1317 void
1318 hammer_wait_inode(hammer_inode_t ip)
1319 {
1320         while (ip->flush_state != HAMMER_FST_IDLE) {
1321                 if (ip->flush_state == HAMMER_FST_SETUP) {
1322                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1323                 } else {
1324                         ip->flags |= HAMMER_INODE_FLUSHW;
1325                         tsleep(&ip->flags, 0, "hmrwin", 0);
1326                 }
1327         }
1328 }
1329
1330 /*
1331  * Wait for records to drain
1332  */
1333 void
1334 hammer_wait_inode_recs(hammer_inode_t ip)
1335 {
1336         while (ip->rsv_recs > hammer_limit_irecs) {
1337                 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1338                 if (ip->rsv_recs > hammer_limit_irecs) {
1339                         ip->flags |= HAMMER_INODE_PARTIALW;
1340                         tsleep(&ip->flags, 0, "hmrwpp", 0);
1341                 }
1342         }
1343 }
1344
1345 /*
1346  * Called by the backend code when a flush has been completed.
1347  * The inode has already been removed from the flush list.
1348  *
1349  * A pipelined flush can occur, in which case we must re-enter the
1350  * inode on the list and re-copy its fields.
1351  */
1352 void
1353 hammer_flush_inode_done(hammer_inode_t ip)
1354 {
1355         hammer_mount_t hmp;
1356         int dorel;
1357
1358         KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
1359
1360         hmp = ip->hmp;
1361
1362         /*
1363          * Merge left-over flags back into the frontend and fix the state.
1364          */
1365         ip->flags |= ip->sync_flags;
1366
1367         /*
1368          * The backend may have adjusted nlinks, so if the adjusted nlinks
1369          * does not match the fronttend set the frontend's RDIRTY flag again.
1370          */
1371         if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
1372                 ip->flags |= HAMMER_INODE_DDIRTY;
1373
1374         /*
1375          * Fix up the dirty buffer status.  IO completions will also
1376          * try to clean up rsv_databufs.
1377          */
1378         if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) {
1379                 ip->flags |= HAMMER_INODE_BUFS;
1380         } else {
1381                 hmp->rsv_databufs -= ip->rsv_databufs;
1382                 ip->rsv_databufs = 0;
1383         }
1384
1385         /*
1386          * Re-set the XDIRTY flag if some of the inode's in-memory records
1387          * could not be flushed.
1388          */
1389         KKASSERT((RB_EMPTY(&ip->rec_tree) &&
1390                   (ip->flags & HAMMER_INODE_XDIRTY) == 0) ||
1391                  (!RB_EMPTY(&ip->rec_tree) &&
1392                   (ip->flags & HAMMER_INODE_XDIRTY) != 0));
1393
1394         /*
1395          * Do not lose track of inodes which no longer have vnode
1396          * assocations, otherwise they may never get flushed again.
1397          */
1398         if ((ip->flags & HAMMER_INODE_MODMASK) && ip->vp == NULL)
1399                 ip->flags |= HAMMER_INODE_REFLUSH;
1400
1401         /*
1402          * Adjust flush_state.  The target state (idle or setup) shouldn't
1403          * be terribly important since we will reflush if we really need
1404          * to do anything. XXX
1405          */
1406         if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
1407                 ip->flush_state = HAMMER_FST_IDLE;
1408                 dorel = 1;
1409         } else {
1410                 ip->flush_state = HAMMER_FST_SETUP;
1411                 dorel = 0;
1412         }
1413
1414         --hmp->count_iqueued;
1415         --hammer_count_iqueued;
1416
1417         /*
1418          * Clean up the vnode ref
1419          */
1420         if (ip->flags & HAMMER_INODE_VHELD) {
1421                 ip->flags &= ~HAMMER_INODE_VHELD;
1422                 vrele(ip->vp);
1423         }
1424
1425         /*
1426          * If the frontend made more changes and requested another flush,
1427          * then try to get it running.
1428          */
1429         if (ip->flags & HAMMER_INODE_REFLUSH) {
1430                 ip->flags &= ~HAMMER_INODE_REFLUSH;
1431                 if (ip->flags & HAMMER_INODE_RESIGNAL) {
1432                         ip->flags &= ~HAMMER_INODE_RESIGNAL;
1433                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1434                 } else {
1435                         hammer_flush_inode(ip, 0);
1436                 }
1437         }
1438
1439         /*
1440          * If the inode is now clean drop the space reservation.
1441          */
1442         if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1443             (ip->flags & HAMMER_INODE_RSV_INODES)) {
1444                 ip->flags &= ~HAMMER_INODE_RSV_INODES;
1445                 --hmp->rsv_inodes;
1446         }
1447
1448         /*
1449          * Finally, if the frontend is waiting for a flush to complete,
1450          * wake it up.
1451          */
1452         if (ip->flush_state != HAMMER_FST_FLUSH) {
1453                 if (ip->flags & HAMMER_INODE_FLUSHW) {
1454                         ip->flags &= ~HAMMER_INODE_FLUSHW;
1455                         wakeup(&ip->flags);
1456                 }
1457         }
1458         if (dorel)
1459                 hammer_rel_inode(ip, 0);
1460 }
1461
1462 /*
1463  * Called from hammer_sync_inode() to synchronize in-memory records
1464  * to the media.
1465  */
1466 static int
1467 hammer_sync_record_callback(hammer_record_t record, void *data)
1468 {
1469         hammer_cursor_t cursor = data;
1470         hammer_transaction_t trans = cursor->trans;
1471         int error;
1472
1473         /*
1474          * Skip records that do not belong to the current flush.
1475          */
1476         ++hammer_stats_record_iterations;
1477         if (record->flush_state != HAMMER_FST_FLUSH)
1478                 return(0);
1479
1480 #if 1
1481         if (record->flush_group != record->ip->flush_group) {
1482                 kprintf("sync_record %p ip %p bad flush group %d %d\n", record, record->ip, record->flush_group ,record->ip->flush_group);
1483                 Debugger("blah2");
1484                 return(0);
1485         }
1486 #endif
1487         KKASSERT(record->flush_group == record->ip->flush_group);
1488
1489         /*
1490          * Interlock the record using the BE flag.  Once BE is set the
1491          * frontend cannot change the state of FE.
1492          *
1493          * NOTE: If FE is set prior to us setting BE we still sync the
1494          * record out, but the flush completion code converts it to 
1495          * a delete-on-disk record instead of destroying it.
1496          */
1497         KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
1498         record->flags |= HAMMER_RECF_INTERLOCK_BE;
1499
1500         /*
1501          * The backend may have already disposed of the record.
1502          */
1503         if (record->flags & HAMMER_RECF_DELETED_BE) {
1504                 error = 0;
1505                 goto done;
1506         }
1507
1508         /*
1509          * If the whole inode is being deleting all on-disk records will
1510          * be deleted very soon, we can't sync any new records to disk
1511          * because they will be deleted in the same transaction they were
1512          * created in (delete_tid == create_tid), which will assert.
1513          *
1514          * XXX There may be a case with RECORD_ADD with DELETED_FE set
1515          * that we currently panic on.
1516          */
1517         if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
1518                 switch(record->type) {
1519                 case HAMMER_MEM_RECORD_DATA:
1520                         /*
1521                          * We don't have to do anything, if the record was
1522                          * committed the space will have been accounted for
1523                          * in the blockmap.
1524                          */
1525                         /* fall through */
1526                 case HAMMER_MEM_RECORD_GENERAL:
1527                         record->flags |= HAMMER_RECF_DELETED_FE;
1528                         record->flags |= HAMMER_RECF_DELETED_BE;
1529                         error = 0;
1530                         goto done;
1531                 case HAMMER_MEM_RECORD_ADD:
1532                         panic("hammer_sync_record_callback: illegal add "
1533                               "during inode deletion record %p", record);
1534                         break; /* NOT REACHED */
1535                 case HAMMER_MEM_RECORD_INODE:
1536                         panic("hammer_sync_record_callback: attempt to "
1537                               "sync inode record %p?", record);
1538                         break; /* NOT REACHED */
1539                 case HAMMER_MEM_RECORD_DEL:
1540                         /* 
1541                          * Follow through and issue the on-disk deletion
1542                          */
1543                         break;
1544                 }
1545         }
1546
1547         /*
1548          * If DELETED_FE is set special handling is needed for directory
1549          * entries.  Dependant pieces related to the directory entry may
1550          * have already been synced to disk.  If this occurs we have to
1551          * sync the directory entry and then change the in-memory record
1552          * from an ADD to a DELETE to cover the fact that it's been
1553          * deleted by the frontend.
1554          *
1555          * A directory delete covering record (MEM_RECORD_DEL) can never
1556          * be deleted by the frontend.
1557          *
1558          * Any other record type (aka DATA) can be deleted by the frontend.
1559          * XXX At the moment the flusher must skip it because there may
1560          * be another data record in the flush group for the same block,
1561          * meaning that some frontend data changes can leak into the backend's
1562          * synchronization point.
1563          */
1564         if (record->flags & HAMMER_RECF_DELETED_FE) {
1565                 if (record->type == HAMMER_MEM_RECORD_ADD) {
1566                         record->flags |= HAMMER_RECF_CONVERT_DELETE;
1567                 } else {
1568                         KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
1569                         record->flags |= HAMMER_RECF_DELETED_BE;
1570                         error = 0;
1571                         goto done;
1572                 }
1573         }
1574
1575         /*
1576          * Assign the create_tid for new records.  Deletions already
1577          * have the record's entire key properly set up.
1578          */
1579         if (record->type != HAMMER_MEM_RECORD_DEL)
1580                 record->leaf.base.create_tid = trans->tid;
1581         for (;;) {
1582                 error = hammer_ip_sync_record_cursor(cursor, record);
1583                 if (error != EDEADLK)
1584                         break;
1585                 hammer_done_cursor(cursor);
1586                 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
1587                                            record->ip);
1588                 if (error)
1589                         break;
1590         }
1591         record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
1592
1593         if (error) {
1594                 error = -error;
1595                 if (error != -ENOSPC) {
1596                         kprintf("hammer_sync_record_callback: sync failed rec "
1597                                 "%p, error %d\n", record, error);
1598                         Debugger("sync failed rec");
1599                 }
1600         }
1601 done:
1602         hammer_flush_record_done(record, error);
1603         return(error);
1604 }
1605
1606 /*
1607  * XXX error handling
1608  */
1609 int
1610 hammer_sync_inode(hammer_inode_t ip)
1611 {
1612         struct hammer_transaction trans;
1613         struct hammer_cursor cursor;
1614         hammer_record_t depend;
1615         hammer_record_t next;
1616         int error, tmp_error;
1617         u_int64_t nlinks;
1618
1619         if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
1620                 return(0);
1621
1622         hammer_start_transaction_fls(&trans, ip->hmp);
1623         error = hammer_init_cursor(&trans, &cursor, &ip->cache[0], ip);
1624         if (error)
1625                 goto done;
1626
1627         /*
1628          * Any directory records referencing this inode which are not in
1629          * our current flush group must adjust our nlink count for the
1630          * purposes of synchronization to disk.
1631          *
1632          * Records which are in our flush group can be unlinked from our
1633          * inode now, potentially allowing the inode to be physically
1634          * deleted.
1635          *
1636          * This cannot block.
1637          */
1638         nlinks = ip->ino_data.nlinks;
1639         next = TAILQ_FIRST(&ip->target_list);
1640         while ((depend = next) != NULL) {
1641                 next = TAILQ_NEXT(depend, target_entry);
1642                 if (depend->flush_state == HAMMER_FST_FLUSH &&
1643                     depend->flush_group == ip->hmp->flusher.act) {
1644                         /*
1645                          * If this is an ADD that was deleted by the frontend
1646                          * the frontend nlinks count will have already been
1647                          * decremented, but the backend is going to sync its
1648                          * directory entry and must account for it.  The
1649                          * record will be converted to a delete-on-disk when
1650                          * it gets synced.
1651                          *
1652                          * If the ADD was not deleted by the frontend we
1653                          * can remove the dependancy from our target_list.
1654                          */
1655                         if (depend->flags & HAMMER_RECF_DELETED_FE) {
1656                                 ++nlinks;
1657                         } else {
1658                                 TAILQ_REMOVE(&ip->target_list, depend,
1659                                              target_entry);
1660                                 depend->target_ip = NULL;
1661                         }
1662                 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
1663                         /*
1664                          * Not part of our flush group
1665                          */
1666                         KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
1667                         switch(depend->type) {
1668                         case HAMMER_MEM_RECORD_ADD:
1669                                 --nlinks;
1670                                 break;
1671                         case HAMMER_MEM_RECORD_DEL:
1672                                 ++nlinks;
1673                                 break;
1674                         default:
1675                                 break;
1676                         }
1677                 }
1678         }
1679
1680         /*
1681          * Set dirty if we had to modify the link count.
1682          */
1683         if (ip->sync_ino_data.nlinks != nlinks) {
1684                 KKASSERT((int64_t)nlinks >= 0);
1685                 ip->sync_ino_data.nlinks = nlinks;
1686                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1687         }
1688
1689         /*
1690          * If there is a trunction queued destroy any data past the (aligned)
1691          * truncation point.  Userland will have dealt with the buffer
1692          * containing the truncation point for us.
1693          *
1694          * We don't flush pending frontend data buffers until after we've
1695          * dealth with the truncation.
1696          *
1697          * Don't bother if the inode is or has been deleted.
1698          */
1699         if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
1700                 /*
1701                  * Interlock trunc_off.  The VOP front-end may continue to
1702                  * make adjustments to it while we are blocked.
1703                  */
1704                 off_t trunc_off;
1705                 off_t aligned_trunc_off;
1706
1707                 trunc_off = ip->sync_trunc_off;
1708                 aligned_trunc_off = (trunc_off + HAMMER_BUFMASK) &
1709                                     ~HAMMER_BUFMASK64;
1710
1711                 /*
1712                  * Delete any whole blocks on-media.  The front-end has
1713                  * already cleaned out any partial block and made it
1714                  * pending.  The front-end may have updated trunc_off
1715                  * while we were blocked so we only use sync_trunc_off.
1716                  */
1717                 error = hammer_ip_delete_range(&cursor, ip,
1718                                                 aligned_trunc_off,
1719                                                 0x7FFFFFFFFFFFFFFFLL, 1);
1720                 if (error)
1721                         Debugger("hammer_ip_delete_range errored");
1722
1723                 /*
1724                  * Clear the truncation flag on the backend after we have
1725                  * complete the deletions.  Backend data is now good again
1726                  * (including new records we are about to sync, below).
1727                  */
1728                 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
1729                 ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL;
1730         } else {
1731                 error = 0;
1732         }
1733
1734         /*
1735          * Now sync related records.  These will typically be directory
1736          * entries or delete-on-disk records.
1737          *
1738          * Not all records will be flushed, but clear XDIRTY anyway.  We
1739          * will set it again in the frontend hammer_flush_inode_done() 
1740          * if records remain.
1741          */
1742         if (error == 0) {
1743                 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1744                                     hammer_sync_record_callback, &cursor);
1745                 if (tmp_error < 0)
1746                         tmp_error = -error;
1747                 if (tmp_error)
1748                         error = tmp_error;
1749         }
1750
1751         /*
1752          * If we are deleting the inode the frontend had better not have
1753          * any active references on elements making up the inode.
1754          */
1755         if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
1756                 RB_EMPTY(&ip->rec_tree)  &&
1757             (ip->sync_flags & HAMMER_INODE_DELETING) &&
1758             (ip->flags & HAMMER_INODE_DELETED) == 0) {
1759                 int count1 = 0;
1760
1761                 ip->flags |= HAMMER_INODE_DELETED;
1762                 error = hammer_ip_delete_range_all(&cursor, ip, &count1);
1763                 if (error == 0) {
1764                         ip->sync_flags &= ~HAMMER_INODE_DELETING;
1765                         ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
1766                         KKASSERT(RB_EMPTY(&ip->rec_tree));
1767
1768                         /*
1769                          * Set delete_tid in both the frontend and backend
1770                          * copy of the inode record.  The DELETED flag handles
1771                          * this, do not set RDIRTY.
1772                          */
1773                         ip->ino_leaf.base.delete_tid = trans.tid;
1774                         ip->sync_ino_leaf.base.delete_tid = trans.tid;
1775
1776                         /*
1777                          * Adjust the inode count in the volume header
1778                          */
1779                         if (ip->flags & HAMMER_INODE_ONDISK) {
1780                                 hammer_modify_volume_field(&trans,
1781                                                            trans.rootvol,
1782                                                            vol0_stat_inodes);
1783                                 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1784                                 hammer_modify_volume_done(trans.rootvol);
1785                         }
1786                 } else {
1787                         ip->flags &= ~HAMMER_INODE_DELETED;
1788                         Debugger("hammer_ip_delete_range_all errored");
1789                 }
1790         }
1791
1792         ip->sync_flags &= ~HAMMER_INODE_BUFS;
1793
1794         if (error)
1795                 Debugger("RB_SCAN errored");
1796
1797         /*
1798          * Now update the inode's on-disk inode-data and/or on-disk record.
1799          * DELETED and ONDISK are managed only in ip->flags.
1800          */
1801         switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
1802         case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
1803                 /*
1804                  * If deleted and on-disk, don't set any additional flags.
1805                  * the delete flag takes care of things.
1806                  *
1807                  * Clear flags which may have been set by the frontend.
1808                  */
1809                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY|
1810                                     HAMMER_INODE_XDIRTY|HAMMER_INODE_ITIMES|
1811                                     HAMMER_INODE_DELETING);
1812                 break;
1813         case HAMMER_INODE_DELETED:
1814                 /*
1815                  * Take care of the case where a deleted inode was never
1816                  * flushed to the disk in the first place.
1817                  *
1818                  * Clear flags which may have been set by the frontend.
1819                  */
1820                 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY|
1821                                     HAMMER_INODE_XDIRTY|HAMMER_INODE_ITIMES|
1822                                     HAMMER_INODE_DELETING);
1823                 while (RB_ROOT(&ip->rec_tree)) {
1824                         hammer_record_t record = RB_ROOT(&ip->rec_tree);
1825                         hammer_ref(&record->lock);
1826                         KKASSERT(record->lock.refs == 1);
1827                         record->flags |= HAMMER_RECF_DELETED_FE;
1828                         record->flags |= HAMMER_RECF_DELETED_BE;
1829                         hammer_rel_mem_record(record);
1830                 }
1831                 break;
1832         case HAMMER_INODE_ONDISK:
1833                 /*
1834                  * If already on-disk, do not set any additional flags.
1835                  */
1836                 break;
1837         default:
1838                 /*
1839                  * If not on-disk and not deleted, set both dirty flags
1840                  * to force an initial record to be written.  Also set
1841                  * the create_tid for the inode.
1842                  *
1843                  * Set create_tid in both the frontend and backend
1844                  * copy of the inode record.
1845                  */
1846                 ip->ino_leaf.base.create_tid = trans.tid;
1847                 ip->sync_ino_leaf.base.create_tid = trans.tid;
1848                 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1849                 break;
1850         }
1851
1852         /*
1853          * If RDIRTY or DDIRTY is set, write out a new record.  If the inode
1854          * is already on-disk the old record is marked as deleted.
1855          *
1856          * If DELETED is set hammer_update_inode() will delete the existing
1857          * record without writing out a new one.
1858          *
1859          * If *ONLY* the ITIMES flag is set we can update the record in-place.
1860          */
1861         if (ip->flags & HAMMER_INODE_DELETED) {
1862                 error = hammer_update_inode(&cursor, ip);
1863         } else 
1864         if ((ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ITIMES)) ==
1865             HAMMER_INODE_ITIMES) {
1866                 error = hammer_update_itimes(&cursor, ip);
1867         } else
1868         if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ITIMES)) {
1869                 error = hammer_update_inode(&cursor, ip);
1870         }
1871         if (error)
1872                 Debugger("hammer_update_itimes/inode errored");
1873 done:
1874         /*
1875          * Save the TID we used to sync the inode with to make sure we
1876          * do not improperly reuse it.
1877          */
1878         hammer_done_cursor(&cursor);
1879         hammer_done_transaction(&trans);
1880         return(error);
1881 }
1882
1883 /*
1884  * This routine is called when the OS is no longer actively referencing
1885  * the inode (but might still be keeping it cached), or when releasing
1886  * the last reference to an inode.
1887  *
1888  * At this point if the inode's nlinks count is zero we want to destroy
1889  * it, which may mean destroying it on-media too.
1890  */
1891 void
1892 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
1893 {
1894         struct vnode *vp;
1895
1896         /*
1897          * Set the DELETING flag when the link count drops to 0 and the
1898          * OS no longer has any opens on the inode.
1899          *
1900          * The backend will clear DELETING (a mod flag) and set DELETED
1901          * (a state flag) when it is actually able to perform the
1902          * operation.
1903          */
1904         if (ip->ino_data.nlinks == 0 &&
1905             (ip->flags & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
1906                 ip->flags |= HAMMER_INODE_DELETING;
1907                 ip->flags |= HAMMER_INODE_TRUNCATED;
1908                 ip->trunc_off = 0;
1909                 vp = NULL;
1910                 if (getvp) {
1911                         if (hammer_get_vnode(ip, &vp) != 0)
1912                                 return;
1913                 }
1914
1915                 /*
1916                  * Final cleanup
1917                  */
1918                 if (ip->vp) {
1919                         vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
1920                         vnode_pager_setsize(ip->vp, 0);
1921                 }
1922                 if (getvp) {
1923                         vput(vp);
1924                 }
1925         }
1926 }
1927
1928 /*
1929  * Re-test an inode when a dependancy had gone away to see if we
1930  * can chain flush it.
1931  */
1932 void
1933 hammer_test_inode(hammer_inode_t ip)
1934 {
1935         if (ip->flags & HAMMER_INODE_REFLUSH) {
1936                 ip->flags &= ~HAMMER_INODE_REFLUSH;
1937                 hammer_ref(&ip->lock);
1938                 if (ip->flags & HAMMER_INODE_RESIGNAL) {
1939                         ip->flags &= ~HAMMER_INODE_RESIGNAL;
1940                         hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1941                 } else {
1942                         hammer_flush_inode(ip, 0);
1943                 }
1944                 hammer_rel_inode(ip, 0);
1945         }
1946 }
1947
1948 /*
1949  * Clear the RECLAIM flag on an inode.  This occurs when the inode is
1950  * reassociated with a vp or just before it gets freed.
1951  *
1952  * Wakeup one thread blocked waiting on reclaims to complete.  Note that
1953  * the inode the thread is waiting on behalf of is a different inode then
1954  * the inode we are called with.  This is to create a pipeline.
1955  */
1956 static void
1957 hammer_inode_wakereclaims(hammer_inode_t ip)
1958 {
1959         struct hammer_reclaim *reclaim;
1960         hammer_mount_t hmp = ip->hmp;
1961
1962         if ((ip->flags & HAMMER_INODE_RECLAIM) == 0)
1963                 return;
1964
1965         --hammer_count_reclaiming;
1966         --hmp->inode_reclaims;
1967         ip->flags &= ~HAMMER_INODE_RECLAIM;
1968
1969         if ((reclaim = TAILQ_FIRST(&hmp->reclaim_list)) != NULL) {
1970                 TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry);
1971                 reclaim->okydoky = 1;
1972                 wakeup(reclaim);
1973         }
1974 }
1975