2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.62 2008/05/25 18:41:33 dillon Exp $
38 #include <vm/vm_extern.h>
42 static int hammer_unload_inode(struct hammer_inode *ip);
43 static void hammer_flush_inode_core(hammer_inode_t ip, int flags);
44 static int hammer_setup_child_callback(hammer_record_t rec, void *data);
45 static int hammer_setup_parent_inodes(hammer_record_t record);
48 * The kernel is not actively referencing this vnode but is still holding
51 * This is called from the frontend.
54 hammer_vop_inactive(struct vop_inactive_args *ap)
56 struct hammer_inode *ip = VTOI(ap->a_vp);
67 * If the inode no longer has visibility in the filesystem and is
68 * fairly clean, try to recycle it immediately. This can deadlock
69 * in vfsync() if we aren't careful.
71 * Do not queue the inode to the flusher if we still have visibility,
72 * otherwise namespace calls such as chmod will unnecessarily generate
73 * multiple inode updates.
75 hammer_inode_unloadable_check(ip, 0);
76 if (ip->ino_data.nlinks == 0) {
77 if (ip->flags & HAMMER_INODE_MODMASK)
78 hammer_flush_inode(ip, 0);
86 * Release the vnode association. This is typically (but not always)
87 * the last reference on the inode.
89 * Once the association is lost we are on our own with regards to
93 hammer_vop_reclaim(struct vop_reclaim_args *ap)
95 struct hammer_inode *ip;
100 if ((ip = vp->v_data) != NULL) {
103 hammer_rel_inode(ip, 1);
109 * Return a locked vnode for the specified inode. The inode must be
110 * referenced but NOT LOCKED on entry and will remain referenced on
113 * Called from the frontend.
116 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
122 if ((vp = ip->vp) == NULL) {
123 error = getnewvnode(VT_HAMMER, ip->hmp->mp, vpp, 0, 0);
126 hammer_lock_ex(&ip->lock);
127 if (ip->vp != NULL) {
128 hammer_unlock(&ip->lock);
133 hammer_ref(&ip->lock);
137 hammer_get_vnode_type(ip->ino_data.obj_type);
139 switch(ip->ino_data.obj_type) {
140 case HAMMER_OBJTYPE_CDEV:
141 case HAMMER_OBJTYPE_BDEV:
142 vp->v_ops = &ip->hmp->mp->mnt_vn_spec_ops;
143 addaliasu(vp, ip->ino_data.rmajor,
144 ip->ino_data.rminor);
146 case HAMMER_OBJTYPE_FIFO:
147 vp->v_ops = &ip->hmp->mp->mnt_vn_fifo_ops;
154 * Only mark as the root vnode if the ip is not
155 * historical, otherwise the VFS cache will get
156 * confused. The other half of the special handling
157 * is in hammer_vop_nlookupdotdot().
159 if (ip->obj_id == HAMMER_OBJID_ROOT &&
160 ip->obj_asof == ip->hmp->asof) {
164 vp->v_data = (void *)ip;
165 /* vnode locked by getnewvnode() */
166 /* make related vnode dirty if inode dirty? */
167 hammer_unlock(&ip->lock);
168 if (vp->v_type == VREG)
169 vinitvmio(vp, ip->ino_data.size);
174 * loop if the vget fails (aka races), or if the vp
175 * no longer matches ip->vp.
177 if (vget(vp, LK_EXCLUSIVE) == 0) {
188 * Acquire a HAMMER inode. The returned inode is not locked. These functions
189 * do not attach or detach the related vnode (use hammer_get_vnode() for
192 * The flags argument is only applied for newly created inodes, and only
193 * certain flags are inherited.
195 * Called from the frontend.
197 struct hammer_inode *
198 hammer_get_inode(hammer_transaction_t trans, struct hammer_node **cache,
199 u_int64_t obj_id, hammer_tid_t asof, int flags, int *errorp)
201 hammer_mount_t hmp = trans->hmp;
202 struct hammer_inode_info iinfo;
203 struct hammer_cursor cursor;
204 struct hammer_inode *ip;
207 * Determine if we already have an inode cached. If we do then
210 iinfo.obj_id = obj_id;
211 iinfo.obj_asof = asof;
213 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
215 hammer_ref(&ip->lock);
220 ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
221 ++hammer_count_inodes;
223 ip->obj_asof = iinfo.obj_asof;
225 ip->flags = flags & HAMMER_INODE_RO;
227 ip->flags |= HAMMER_INODE_RO;
228 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
229 RB_INIT(&ip->rec_tree);
230 TAILQ_INIT(&ip->bio_list);
231 TAILQ_INIT(&ip->bio_alt_list);
232 TAILQ_INIT(&ip->target_list);
235 * Locate the on-disk inode.
238 hammer_init_cursor(trans, &cursor, cache, NULL);
239 cursor.key_beg.localization = HAMMER_LOCALIZE_INODE;
240 cursor.key_beg.obj_id = ip->obj_id;
241 cursor.key_beg.key = 0;
242 cursor.key_beg.create_tid = 0;
243 cursor.key_beg.delete_tid = 0;
244 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
245 cursor.key_beg.obj_type = 0;
246 cursor.asof = iinfo.obj_asof;
247 cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
250 *errorp = hammer_btree_lookup(&cursor);
251 if (*errorp == EDEADLK) {
252 hammer_done_cursor(&cursor);
257 * On success the B-Tree lookup will hold the appropriate
258 * buffer cache buffers and provide a pointer to the requested
259 * information. Copy the information to the in-memory inode
260 * and cache the B-Tree node to improve future operations.
263 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
264 ip->ino_data = cursor.data->inode;
265 hammer_cache_node(cursor.node, &ip->cache[0]);
267 hammer_cache_node(cursor.node, cache);
271 * On success load the inode's record and data and insert the
272 * inode into the B-Tree. It is possible to race another lookup
273 * insertion of the same inode so deal with that condition too.
275 * The cursor's locked node interlocks against others creating and
276 * destroying ip while we were blocked.
279 hammer_ref(&ip->lock);
280 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
281 hammer_uncache_node(&ip->cache[0]);
282 hammer_uncache_node(&ip->cache[1]);
283 KKASSERT(ip->lock.refs == 1);
284 --hammer_count_inodes;
286 hammer_done_cursor(&cursor);
289 ip->flags |= HAMMER_INODE_ONDISK;
292 * Do not panic on read-only accesses which fail, particularly
293 * historical accesses where the snapshot might not have
294 * complete connectivity.
296 if ((flags & HAMMER_INODE_RO) == 0) {
297 kprintf("hammer_get_inode: failed ip %p obj_id %016llx cursor %p error %d\n",
298 ip, ip->obj_id, &cursor, *errorp);
301 --hammer_count_inodes;
305 hammer_done_cursor(&cursor);
310 * Create a new filesystem object, returning the inode in *ipp. The
311 * returned inode will be referenced.
313 * The inode is created in-memory.
316 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
317 struct ucred *cred, hammer_inode_t dip,
318 struct hammer_inode **ipp)
325 ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
326 ++hammer_count_inodes;
327 ip->obj_id = hammer_alloc_objid(trans, dip);
328 KKASSERT(ip->obj_id != 0);
329 ip->obj_asof = hmp->asof;
331 ip->flush_state = HAMMER_FST_IDLE;
332 ip->flags = HAMMER_INODE_DDIRTY | HAMMER_INODE_ITIMES;
334 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
335 RB_INIT(&ip->rec_tree);
336 TAILQ_INIT(&ip->bio_list);
337 TAILQ_INIT(&ip->bio_alt_list);
338 TAILQ_INIT(&ip->target_list);
340 ip->ino_leaf.atime = trans->time;
341 ip->ino_data.mtime = trans->time;
342 ip->ino_data.size = 0;
343 ip->ino_data.nlinks = 0;
345 ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
346 ip->ino_leaf.base.localization = HAMMER_LOCALIZE_INODE;
347 ip->ino_leaf.base.obj_id = ip->obj_id;
348 ip->ino_leaf.base.key = 0;
349 ip->ino_leaf.base.create_tid = 0;
350 ip->ino_leaf.base.delete_tid = 0;
351 ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
352 ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
354 ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
355 ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
356 ip->ino_data.mode = vap->va_mode;
357 ip->ino_data.ctime = trans->time;
358 ip->ino_data.parent_obj_id = (dip) ? dip->ino_leaf.base.obj_id : 0;
360 switch(ip->ino_leaf.base.obj_type) {
361 case HAMMER_OBJTYPE_CDEV:
362 case HAMMER_OBJTYPE_BDEV:
363 ip->ino_data.rmajor = vap->va_rmajor;
364 ip->ino_data.rminor = vap->va_rminor;
371 * Calculate default uid/gid and overwrite with information from
374 xuid = hammer_to_unix_xid(&dip->ino_data.uid);
375 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode, xuid, cred,
377 ip->ino_data.mode = vap->va_mode;
379 if (vap->va_vaflags & VA_UID_UUID_VALID)
380 ip->ino_data.uid = vap->va_uid_uuid;
381 else if (vap->va_uid != (uid_t)VNOVAL)
382 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
384 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
386 if (vap->va_vaflags & VA_GID_UUID_VALID)
387 ip->ino_data.gid = vap->va_gid_uuid;
388 else if (vap->va_gid != (gid_t)VNOVAL)
389 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
391 ip->ino_data.gid = dip->ino_data.gid;
393 hammer_ref(&ip->lock);
394 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
395 hammer_unref(&ip->lock);
396 panic("hammer_create_inode: duplicate obj_id %llx", ip->obj_id);
403 * Called by hammer_sync_inode().
406 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
408 hammer_transaction_t trans = cursor->trans;
409 hammer_record_t record;
416 * If the inode has a presence on-disk then locate it and mark
417 * it deleted, setting DELONDISK.
419 * The record may or may not be physically deleted, depending on
420 * the retention policy.
422 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
423 HAMMER_INODE_ONDISK) {
424 hammer_normalize_cursor(cursor);
425 cursor->key_beg.localization = HAMMER_LOCALIZE_INODE;
426 cursor->key_beg.obj_id = ip->obj_id;
427 cursor->key_beg.key = 0;
428 cursor->key_beg.create_tid = 0;
429 cursor->key_beg.delete_tid = 0;
430 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
431 cursor->key_beg.obj_type = 0;
432 cursor->asof = ip->obj_asof;
433 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
434 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
435 cursor->flags |= HAMMER_CURSOR_BACKEND;
437 error = hammer_btree_lookup(cursor);
438 if (hammer_debug_inode)
439 kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
441 kprintf("error %d\n", error);
442 Debugger("hammer_update_inode");
446 error = hammer_ip_delete_record(cursor, trans->tid);
447 if (hammer_debug_inode)
448 kprintf(" error %d\n", error);
449 if (error && error != EDEADLK) {
450 kprintf("error %d\n", error);
451 Debugger("hammer_update_inode2");
454 ip->flags |= HAMMER_INODE_DELONDISK;
457 hammer_cache_node(cursor->node, &ip->cache[0]);
459 if (error == EDEADLK) {
460 hammer_done_cursor(cursor);
461 error = hammer_init_cursor(trans, cursor,
463 if (hammer_debug_inode)
464 kprintf("IPDED %p %d\n", ip, error);
471 * Ok, write out the initial record or a new record (after deleting
472 * the old one), unless the DELETED flag is set. This routine will
473 * clear DELONDISK if it writes out a record.
475 * Update our inode statistics if this is the first application of
478 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
480 * Generate a record and write it to the media
482 record = hammer_alloc_mem_record(ip, 0);
483 record->type = HAMMER_MEM_RECORD_INODE;
484 record->flush_state = HAMMER_FST_FLUSH;
485 record->leaf = ip->sync_ino_leaf;
486 record->leaf.base.create_tid = trans->tid;
487 record->leaf.data_len = sizeof(ip->sync_ino_data);
488 record->data = (void *)&ip->sync_ino_data;
489 record->flags |= HAMMER_RECF_INTERLOCK_BE;
491 error = hammer_ip_sync_record_cursor(cursor, record);
492 if (hammer_debug_inode)
493 kprintf("GENREC %p rec %08x %d\n",
494 ip, record->flags, error);
495 if (error != EDEADLK)
497 hammer_done_cursor(cursor);
498 error = hammer_init_cursor(trans, cursor,
500 if (hammer_debug_inode)
501 kprintf("GENREC reinit %d\n", error);
506 kprintf("error %d\n", error);
507 Debugger("hammer_update_inode3");
511 * The record isn't managed by the inode's record tree,
512 * destroy it whether we succeed or fail.
514 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
515 record->flags |= HAMMER_RECF_DELETED_FE;
516 record->flush_state = HAMMER_FST_IDLE;
517 hammer_rel_mem_record(record);
523 if (hammer_debug_inode)
524 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
525 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
526 HAMMER_INODE_ITIMES);
527 ip->flags &= ~HAMMER_INODE_DELONDISK;
530 * Root volume count of inodes
532 if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
533 hammer_modify_volume_field(trans,
536 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
537 hammer_modify_volume_done(trans->rootvol);
538 ip->flags |= HAMMER_INODE_ONDISK;
539 if (hammer_debug_inode)
540 kprintf("NOWONDISK %p\n", ip);
546 * If the inode has been destroyed, clean out any left-over flags
547 * that may have been set by the frontend.
549 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) {
550 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
551 HAMMER_INODE_ITIMES);
557 * Update only the itimes fields. This is done no-historically. The
558 * record is updated in-place on the disk.
561 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
563 hammer_transaction_t trans = cursor->trans;
564 struct hammer_btree_leaf_elm *leaf;
569 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
570 HAMMER_INODE_ONDISK) {
571 hammer_normalize_cursor(cursor);
572 cursor->key_beg.localization = HAMMER_LOCALIZE_INODE;
573 cursor->key_beg.obj_id = ip->obj_id;
574 cursor->key_beg.key = 0;
575 cursor->key_beg.create_tid = 0;
576 cursor->key_beg.delete_tid = 0;
577 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
578 cursor->key_beg.obj_type = 0;
579 cursor->asof = ip->obj_asof;
580 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
581 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
582 cursor->flags |= HAMMER_CURSOR_BACKEND;
584 error = hammer_btree_lookup(cursor);
586 kprintf("error %d\n", error);
587 Debugger("hammer_update_itimes1");
591 * Do not generate UNDO records for atime updates.
594 hammer_modify_node(trans, cursor->node,
595 &leaf->atime, sizeof(leaf->atime));
596 leaf->atime = ip->sync_ino_leaf.atime;
597 hammer_modify_node_done(cursor->node);
598 /*rec->ino_mtime = ip->sync_ino_rec.ino_mtime;*/
599 ip->sync_flags &= ~HAMMER_INODE_ITIMES;
600 /* XXX recalculate crc */
601 hammer_cache_node(cursor->node, &ip->cache[0]);
603 if (error == EDEADLK) {
604 hammer_done_cursor(cursor);
605 error = hammer_init_cursor(trans, cursor,
615 * Release a reference on an inode, flush as requested.
617 * On the last reference we queue the inode to the flusher for its final
621 hammer_rel_inode(struct hammer_inode *ip, int flush)
623 hammer_mount_t hmp = ip->hmp;
626 * Handle disposition when dropping the last ref.
629 if (ip->lock.refs == 1) {
631 * Determine whether on-disk action is needed for
632 * the inode's final disposition.
634 KKASSERT(ip->vp == NULL);
635 hammer_inode_unloadable_check(ip, 0);
636 if (ip->flags & HAMMER_INODE_MODMASK) {
637 hammer_flush_inode(ip, 0);
638 } else if (ip->lock.refs == 1) {
639 hammer_unload_inode(ip);
644 hammer_flush_inode(ip, 0);
647 * The inode still has multiple refs, try to drop
650 KKASSERT(ip->lock.refs >= 1);
651 if (ip->lock.refs > 1) {
652 hammer_unref(&ip->lock);
659 * XXX bad hack until I add code to track inodes in SETUP. We
660 * can queue a lot of inodes to the syncer but if we don't wake
661 * it up the undo sets will be too large or too many unflushed
662 * records will build up and blow our malloc limit.
664 if (++hmp->reclaim_count > 256) {
665 hmp->reclaim_count = 0;
666 hammer_flusher_async(hmp);
671 * Unload and destroy the specified inode. Must be called with one remaining
672 * reference. The reference is disposed of.
674 * This can only be called in the context of the flusher.
677 hammer_unload_inode(struct hammer_inode *ip)
679 KASSERT(ip->lock.refs == 1,
680 ("hammer_unload_inode: %d refs\n", ip->lock.refs));
681 KKASSERT(ip->vp == NULL);
682 KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
683 KKASSERT(ip->cursor_ip_refs == 0);
684 KKASSERT(ip->lock.lockcount == 0);
685 KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
687 KKASSERT(RB_EMPTY(&ip->rec_tree));
688 KKASSERT(TAILQ_EMPTY(&ip->target_list));
689 KKASSERT(TAILQ_EMPTY(&ip->bio_list));
690 KKASSERT(TAILQ_EMPTY(&ip->bio_alt_list));
692 RB_REMOVE(hammer_ino_rb_tree, &ip->hmp->rb_inos_root, ip);
694 hammer_uncache_node(&ip->cache[0]);
695 hammer_uncache_node(&ip->cache[1]);
697 hammer_clear_objid(ip);
698 --hammer_count_inodes;
705 * A transaction has modified an inode, requiring updates as specified by
708 * HAMMER_INODE_DDIRTY: Inode data has been updated
709 * HAMMER_INODE_XDIRTY: Dirty in-memory records
710 * HAMMER_INODE_BUFS: Dirty buffer cache buffers
711 * HAMMER_INODE_DELETED: Inode record/data must be deleted
712 * HAMMER_INODE_ITIMES: mtime/atime has been updated
715 hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags)
717 KKASSERT ((ip->flags & HAMMER_INODE_RO) == 0 ||
718 (flags & (HAMMER_INODE_DDIRTY |
719 HAMMER_INODE_XDIRTY | HAMMER_INODE_BUFS |
720 HAMMER_INODE_DELETED | HAMMER_INODE_ITIMES)) == 0);
726 * Request that an inode be flushed. This whole mess cannot block and may
727 * recurse. Once requested HAMMER will attempt to actively flush it until
728 * the flush can be done.
730 * The inode may already be flushing, or may be in a setup state. We can
731 * place the inode in a flushing state if it is currently idle and flag it
732 * to reflush if it is currently flushing.
735 hammer_flush_inode(hammer_inode_t ip, int flags)
737 hammer_record_t depend;
741 * Trivial 'nothing to flush' case. If the inode is ina SETUP
742 * state we have to put it back into an IDLE state so we can
743 * drop the extra ref.
745 if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
746 if (ip->flush_state == HAMMER_FST_SETUP) {
747 ip->flush_state = HAMMER_FST_IDLE;
748 hammer_rel_inode(ip, 0);
754 * Our flush action will depend on the current state.
756 switch(ip->flush_state) {
757 case HAMMER_FST_IDLE:
759 * We have no dependancies and can flush immediately. Some
760 * our children may not be flushable so we have to re-test
761 * with that additional knowledge.
763 hammer_flush_inode_core(ip, flags);
765 case HAMMER_FST_SETUP:
767 * Recurse upwards through dependancies via target_list
768 * and start their flusher actions going if possible.
770 * 'good' is our connectivity. -1 means we have none and
771 * can't flush, 0 means there weren't any dependancies, and
772 * 1 means we have good connectivity.
775 TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
776 r = hammer_setup_parent_inodes(depend);
777 if (r < 0 && good == 0)
784 * We can continue if good >= 0. Determine how many records
785 * under our inode can be flushed (and mark them).
788 hammer_flush_inode_core(ip, flags);
790 ip->flags |= HAMMER_INODE_REFLUSH;
791 if (flags & HAMMER_FLUSH_SIGNAL) {
792 ip->flags |= HAMMER_INODE_RESIGNAL;
793 hammer_flusher_async(ip->hmp);
799 * We are already flushing, flag the inode to reflush
800 * if needed after it completes its current flush.
802 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
803 ip->flags |= HAMMER_INODE_REFLUSH;
804 if (flags & HAMMER_FLUSH_SIGNAL) {
805 ip->flags |= HAMMER_INODE_RESIGNAL;
806 hammer_flusher_async(ip->hmp);
813 * We are asked to recurse upwards and convert the record from SETUP
814 * to FLUSH if possible. record->ip is a parent of the caller's inode,
815 * and record->target_ip is the caller's inode.
817 * Return 1 if the record gives us connectivity
819 * Return 0 if the record is not relevant
821 * Return -1 if we can't resolve the dependancy and there is no connectivity.
824 hammer_setup_parent_inodes(hammer_record_t record)
826 hammer_mount_t hmp = record->ip->hmp;
827 hammer_record_t depend;
831 KKASSERT(record->flush_state != HAMMER_FST_IDLE);
835 * If the record is already flushing, is it in our flush group?
837 * If it is in our flush group but it is a general record or a
838 * delete-on-disk, it does not improve our connectivity (return 0),
839 * and if the target inode is not trying to destroy itself we can't
840 * allow the operation yet anyway (the second return -1).
842 if (record->flush_state == HAMMER_FST_FLUSH) {
843 if (record->flush_group != hmp->flusher_next) {
844 ip->flags |= HAMMER_INODE_REFLUSH;
847 if (record->type == HAMMER_MEM_RECORD_ADD)
854 * It must be a setup record. Try to resolve the setup dependancies
855 * by recursing upwards so we can place ip on the flush list.
857 KKASSERT(record->flush_state == HAMMER_FST_SETUP);
860 TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
861 r = hammer_setup_parent_inodes(depend);
862 if (r < 0 && good == 0)
869 * We can't flush ip because it has no connectivity (XXX also check
870 * nlinks for pre-existing connectivity!). Flag it so any resolution
871 * recurses back down.
874 ip->flags |= HAMMER_INODE_REFLUSH;
879 * We are go, place the parent inode in a flushing state so we can
880 * place its record in a flushing state. Note that the parent
881 * may already be flushing. The record must be in the same flush
882 * group as the parent.
884 if (ip->flush_state != HAMMER_FST_FLUSH)
885 hammer_flush_inode_core(ip, HAMMER_FLUSH_RECURSION);
886 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
887 KKASSERT(record->flush_state == HAMMER_FST_SETUP);
890 if (record->type == HAMMER_MEM_RECORD_DEL &&
891 (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
893 * Regardless of flushing state we cannot sync this path if the
894 * record represents a delete-on-disk but the target inode
895 * is not ready to sync its own deletion.
897 * XXX need to count effective nlinks to determine whether
898 * the flush is ok, otherwise removing a hardlink will
899 * just leave the DEL record to rot.
901 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
905 if (ip->flush_group == ip->hmp->flusher_next) {
907 * This is the record we wanted to synchronize.
909 record->flush_state = HAMMER_FST_FLUSH;
910 record->flush_group = ip->flush_group;
911 hammer_ref(&record->lock);
912 if (record->type == HAMMER_MEM_RECORD_ADD)
916 * A general or delete-on-disk record does not contribute
917 * to our visibility. We can still flush it, however.
922 * We couldn't resolve the dependancies, request that the
923 * inode be flushed when the dependancies can be resolved.
925 ip->flags |= HAMMER_INODE_REFLUSH;
931 * This is the core routine placing an inode into the FST_FLUSH state.
934 hammer_flush_inode_core(hammer_inode_t ip, int flags)
939 * Set flush state and prevent the flusher from cycling into
940 * the next flush group. Do not place the ip on the list yet.
941 * Inodes not in the idle state get an extra reference.
943 KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
944 if (ip->flush_state == HAMMER_FST_IDLE)
945 hammer_ref(&ip->lock);
946 ip->flush_state = HAMMER_FST_FLUSH;
947 ip->flush_group = ip->hmp->flusher_next;
948 ++ip->hmp->flusher_lock;
951 * We need to be able to vfsync/truncate from the backend.
953 KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
954 if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
955 ip->flags |= HAMMER_INODE_VHELD;
960 * Figure out how many in-memory records we can actually flush
961 * (not including inode meta-data, buffers, etc).
963 if (flags & HAMMER_FLUSH_RECURSION) {
966 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
967 hammer_setup_child_callback, NULL);
971 * This is a more involved test that includes go_count. If we
972 * can't flush, flag the inode and return. If go_count is 0 we
973 * were are unable to flush any records in our rec_tree and
974 * must ignore the XDIRTY flag.
977 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
978 ip->flags |= HAMMER_INODE_REFLUSH;
979 ip->flush_state = HAMMER_FST_SETUP;
980 if (ip->flags & HAMMER_INODE_VHELD) {
981 ip->flags &= ~HAMMER_INODE_VHELD;
984 if (flags & HAMMER_FLUSH_SIGNAL) {
985 ip->flags |= HAMMER_INODE_RESIGNAL;
986 hammer_flusher_async(ip->hmp);
988 if (--ip->hmp->flusher_lock == 0)
989 wakeup(&ip->hmp->flusher_lock);
995 * Snapshot the state of the inode for the backend flusher.
997 * The truncation must be retained in the frontend until after
998 * we've actually performed the record deletion.
1000 * NOTE: The DELETING flag is a mod flag, but it is also sticky,
1001 * and stays in ip->flags. Once set, it stays set until the
1002 * inode is destroyed.
1004 ip->sync_flags = (ip->flags & HAMMER_INODE_MODMASK);
1005 ip->sync_trunc_off = ip->trunc_off;
1006 ip->sync_ino_leaf = ip->ino_leaf;
1007 ip->sync_ino_data = ip->ino_data;
1008 ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
1011 * The flusher list inherits our inode and reference.
1013 TAILQ_INSERT_TAIL(&ip->hmp->flush_list, ip, flush_entry);
1014 if (--ip->hmp->flusher_lock == 0)
1015 wakeup(&ip->hmp->flusher_lock);
1017 if (flags & HAMMER_FLUSH_SIGNAL)
1018 hammer_flusher_async(ip->hmp);
1022 * Callback for scan of ip->rec_tree. Try to include each record in our
1023 * flush. ip->flush_group has been set but the inode has not yet been
1024 * moved into a flushing state.
1026 * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
1029 * We return 1 for any record placed or found in FST_FLUSH, which prevents
1030 * the caller from shortcutting the flush.
1033 hammer_setup_child_callback(hammer_record_t rec, void *data)
1035 hammer_inode_t target_ip;
1040 * If the record has been deleted by the backend (it's being held
1041 * by the frontend in a race), just ignore it.
1043 if (rec->flags & HAMMER_RECF_DELETED_BE)
1047 * If the record is in an idle state it has no dependancies and
1053 switch(rec->flush_state) {
1054 case HAMMER_FST_IDLE:
1056 * Record has no setup dependancy, we can flush it.
1058 KKASSERT(rec->target_ip == NULL);
1059 rec->flush_state = HAMMER_FST_FLUSH;
1060 rec->flush_group = ip->flush_group;
1061 hammer_ref(&rec->lock);
1064 case HAMMER_FST_SETUP:
1066 * Record has a setup dependancy. Try to include the
1067 * target ip in the flush.
1069 * We have to be careful here, if we do not do the right
1070 * thing we can lose track of dirty inodes and the system
1071 * will lockup trying to allocate buffers.
1073 target_ip = rec->target_ip;
1074 KKASSERT(target_ip != NULL);
1075 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
1076 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
1078 * If the target IP is already flushing in our group
1079 * we are golden, otherwise make sure the target
1082 if (target_ip->flush_group == ip->flush_group) {
1083 rec->flush_state = HAMMER_FST_FLUSH;
1084 rec->flush_group = ip->flush_group;
1085 hammer_ref(&rec->lock);
1088 target_ip->flags |= HAMMER_INODE_REFLUSH;
1090 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
1092 * If the target IP is not flushing we can force
1093 * it to flush, even if it is unable to write out
1094 * any of its own records we have at least one in
1095 * hand that we CAN deal with.
1097 rec->flush_state = HAMMER_FST_FLUSH;
1098 rec->flush_group = ip->flush_group;
1099 hammer_ref(&rec->lock);
1100 hammer_flush_inode_core(target_ip,
1101 HAMMER_FLUSH_RECURSION);
1105 * General or delete-on-disk record.
1107 * XXX this needs help. If a delete-on-disk we could
1108 * disconnect the target. If the target has its own
1109 * dependancies they really need to be flushed.
1113 rec->flush_state = HAMMER_FST_FLUSH;
1114 rec->flush_group = ip->flush_group;
1115 hammer_ref(&rec->lock);
1116 hammer_flush_inode_core(target_ip,
1117 HAMMER_FLUSH_RECURSION);
1121 case HAMMER_FST_FLUSH:
1123 * Record already associated with a flush group. It had
1126 KKASSERT(rec->flush_group == ip->flush_group);
1134 * Wait for a previously queued flush to complete
1137 hammer_wait_inode(hammer_inode_t ip)
1139 while (ip->flush_state != HAMMER_FST_IDLE) {
1140 ip->flags |= HAMMER_INODE_FLUSHW;
1141 tsleep(&ip->flags, 0, "hmrwin", 0);
1146 * Called by the backend code when a flush has been completed.
1147 * The inode has already been removed from the flush list.
1149 * A pipelined flush can occur, in which case we must re-enter the
1150 * inode on the list and re-copy its fields.
1153 hammer_flush_inode_done(hammer_inode_t ip)
1158 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
1161 * Allow BIOs to queue to the inode's primary bioq again.
1163 ip->flags &= ~HAMMER_INODE_WRITE_ALT;
1166 * Merge left-over flags back into the frontend and fix the state.
1168 ip->flags |= ip->sync_flags;
1171 * The backend may have adjusted nlinks, so if the adjusted nlinks
1172 * does not match the fronttend set the frontend's RDIRTY flag again.
1174 if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
1175 ip->flags |= HAMMER_INODE_DDIRTY;
1178 * Reflush any BIOs that wound up in the alt list. Our inode will
1179 * also wind up at the end of the flusher's list.
1181 while ((bio = TAILQ_FIRST(&ip->bio_alt_list)) != NULL) {
1182 TAILQ_REMOVE(&ip->bio_alt_list, bio, bio_act);
1183 TAILQ_INSERT_TAIL(&ip->bio_list, bio, bio_act);
1186 * Fix up the dirty buffer status.
1188 if (TAILQ_FIRST(&ip->bio_list) ||
1189 (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree))) {
1190 ip->flags |= HAMMER_INODE_BUFS;
1194 * Re-set the XDIRTY flag if some of the inode's in-memory records
1195 * could not be flushed.
1197 if (RB_ROOT(&ip->rec_tree))
1198 ip->flags |= HAMMER_INODE_XDIRTY;
1201 * Do not lose track of inodes which no longer have vnode
1202 * assocations, otherwise they may never get flushed again.
1204 if ((ip->flags & HAMMER_INODE_MODMASK) && ip->vp == NULL)
1205 ip->flags |= HAMMER_INODE_REFLUSH;
1208 * Adjust flush_state. The target state (idle or setup) shouldn't
1209 * be terribly important since we will reflush if we really need
1210 * to do anything. XXX
1212 if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
1213 ip->flush_state = HAMMER_FST_IDLE;
1216 ip->flush_state = HAMMER_FST_SETUP;
1220 * Clean up the vnode ref
1222 if (ip->flags & HAMMER_INODE_VHELD) {
1223 ip->flags &= ~HAMMER_INODE_VHELD;
1228 * If the frontend made more changes and requested another flush,
1229 * then try to get it running.
1231 if (ip->flags & HAMMER_INODE_REFLUSH) {
1232 ip->flags &= ~HAMMER_INODE_REFLUSH;
1233 if (ip->flags & HAMMER_INODE_RESIGNAL) {
1234 ip->flags &= ~HAMMER_INODE_RESIGNAL;
1235 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1237 hammer_flush_inode(ip, 0);
1242 * Finally, if the frontend is waiting for a flush to complete,
1245 if (ip->flush_state != HAMMER_FST_FLUSH) {
1246 if (ip->flags & HAMMER_INODE_FLUSHW) {
1247 ip->flags &= ~HAMMER_INODE_FLUSHW;
1252 hammer_rel_inode(ip, 0);
1256 * Called from hammer_sync_inode() to synchronize in-memory records
1260 hammer_sync_record_callback(hammer_record_t record, void *data)
1262 hammer_cursor_t cursor = data;
1263 hammer_transaction_t trans = cursor->trans;
1267 * Skip records that do not belong to the current flush.
1269 if (record->flush_state != HAMMER_FST_FLUSH)
1271 KKASSERT((record->flags & HAMMER_RECF_DELETED_BE) == 0);
1273 if (record->flush_group != record->ip->flush_group) {
1274 kprintf("sync_record %p ip %p bad flush group %d %d\n", record, record->ip, record->flush_group ,record->ip->flush_group);
1279 KKASSERT(record->flush_group == record->ip->flush_group);
1282 * Interlock the record using the BE flag. Once BE is set the
1283 * frontend cannot change the state of FE.
1285 * NOTE: If FE is set prior to us setting BE we still sync the
1286 * record out, but the flush completion code converts it to
1287 * a delete-on-disk record instead of destroying it.
1289 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
1290 record->flags |= HAMMER_RECF_INTERLOCK_BE;
1293 * If the whole inode is being deleting all on-disk records will
1294 * be deleted very soon, we can't sync any new records to disk
1295 * because they will be deleted in the same transaction they were
1296 * created in (delete_tid == create_tid), which will assert.
1298 * XXX There may be a case with RECORD_ADD with DELETED_FE set
1299 * that we currently panic on.
1301 if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
1302 switch(record->type) {
1303 case HAMMER_MEM_RECORD_GENERAL:
1304 record->flags |= HAMMER_RECF_DELETED_FE;
1305 record->flags |= HAMMER_RECF_DELETED_BE;
1308 case HAMMER_MEM_RECORD_ADD:
1309 panic("hammer_sync_record_callback: illegal add "
1310 "during inode deletion record %p", record);
1311 break; /* NOT REACHED */
1312 case HAMMER_MEM_RECORD_INODE:
1313 panic("hammer_sync_record_callback: attempt to "
1314 "sync inode record %p?", record);
1315 break; /* NOT REACHED */
1316 case HAMMER_MEM_RECORD_DEL:
1318 * Follow through and issue the on-disk deletion
1325 * If DELETED_FE is set we may have already sent dependant pieces
1326 * to the disk and we must flush the record as if it hadn't been
1327 * deleted. This creates a bit of a mess because we have to
1328 * have ip_sync_record convert the record to MEM_RECORD_DEL before
1329 * it inserts the B-Tree record. Otherwise the media sync might
1330 * be visible to the frontend.
1332 if (record->flags & HAMMER_RECF_DELETED_FE) {
1333 if (record->type == HAMMER_MEM_RECORD_ADD) {
1334 record->flags |= HAMMER_RECF_CONVERT_DELETE;
1336 KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
1342 * Assign the create_tid for new records. Deletions already
1343 * have the record's entire key properly set up.
1345 if (record->type != HAMMER_MEM_RECORD_DEL)
1346 record->leaf.base.create_tid = trans->tid;
1348 error = hammer_ip_sync_record_cursor(cursor, record);
1349 if (error != EDEADLK)
1351 hammer_done_cursor(cursor);
1352 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
1357 record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
1361 if (error != -ENOSPC) {
1362 kprintf("hammer_sync_record_callback: sync failed rec "
1363 "%p, error %d\n", record, error);
1364 Debugger("sync failed rec");
1368 hammer_flush_record_done(record, error);
1373 * XXX error handling
1376 hammer_sync_inode(hammer_inode_t ip)
1378 struct hammer_transaction trans;
1379 struct hammer_cursor cursor;
1381 hammer_record_t depend;
1382 hammer_record_t next;
1383 int error, tmp_error;
1386 if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
1389 hammer_start_transaction_fls(&trans, ip->hmp);
1390 error = hammer_init_cursor(&trans, &cursor, &ip->cache[0], ip);
1395 * Any directory records referencing this inode which are not in
1396 * our current flush group must adjust our nlink count for the
1397 * purposes of synchronization to disk.
1399 * Records which are in our flush group can be unlinked from our
1400 * inode now, potentially allowing the inode to be physically
1403 nlinks = ip->ino_data.nlinks;
1404 next = TAILQ_FIRST(&ip->target_list);
1405 while ((depend = next) != NULL) {
1406 next = TAILQ_NEXT(depend, target_entry);
1407 if (depend->flush_state == HAMMER_FST_FLUSH &&
1408 depend->flush_group == ip->hmp->flusher_act) {
1410 * If this is an ADD that was deleted by the frontend
1411 * the frontend nlinks count will have already been
1412 * decremented, but the backend is going to sync its
1413 * directory entry and must account for it. The
1414 * record will be converted to a delete-on-disk when
1417 * If the ADD was not deleted by the frontend we
1418 * can remove the dependancy from our target_list.
1420 if (depend->flags & HAMMER_RECF_DELETED_FE) {
1423 TAILQ_REMOVE(&ip->target_list, depend,
1425 depend->target_ip = NULL;
1427 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
1429 * Not part of our flush group
1431 KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
1432 switch(depend->type) {
1433 case HAMMER_MEM_RECORD_ADD:
1436 case HAMMER_MEM_RECORD_DEL:
1446 * Set dirty if we had to modify the link count.
1448 if (ip->sync_ino_data.nlinks != nlinks) {
1449 KKASSERT((int64_t)nlinks >= 0);
1450 ip->sync_ino_data.nlinks = nlinks;
1451 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1455 * Queue up as many dirty buffers as we can then set a flag to
1456 * cause any further BIOs to go to the alternative queue.
1458 if (ip->flags & HAMMER_INODE_VHELD)
1459 error = vfsync(ip->vp, MNT_NOWAIT, 1, NULL, NULL);
1460 ip->flags |= HAMMER_INODE_WRITE_ALT;
1463 * The buffer cache may contain dirty buffers beyond the inode
1464 * state we copied from the frontend to the backend. Because
1465 * we are syncing our buffer cache on the backend, resync
1466 * the truncation point and the file size so we don't wipe out
1469 * Syncing the buffer cache on the frontend has serious problems
1470 * because it prevents us from passively queueing dirty inodes
1471 * to the backend (the BIO's could stall indefinitely).
1473 if (ip->flags & HAMMER_INODE_TRUNCATED) {
1474 ip->sync_trunc_off = ip->trunc_off;
1475 ip->sync_flags |= HAMMER_INODE_TRUNCATED;
1477 if (ip->sync_ino_data.size != ip->ino_data.size) {
1478 ip->sync_ino_data.size = ip->ino_data.size;
1479 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1483 * If there is a trunction queued destroy any data past the (aligned)
1484 * truncation point. Userland will have dealt with the buffer
1485 * containing the truncation point for us.
1487 * We don't flush pending frontend data buffers until after we've
1488 * dealth with the truncation.
1490 * Don't bother if the inode is or has been deleted.
1492 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
1494 * Interlock trunc_off. The VOP front-end may continue to
1495 * make adjustments to it while we are blocked.
1498 off_t aligned_trunc_off;
1500 trunc_off = ip->sync_trunc_off;
1501 aligned_trunc_off = (trunc_off + HAMMER_BUFMASK) &
1505 * Delete any whole blocks on-media. The front-end has
1506 * already cleaned out any partial block and made it
1507 * pending. The front-end may have updated trunc_off
1508 * while we were blocked so do not just unconditionally
1509 * set it to the maximum offset.
1511 error = hammer_ip_delete_range(&cursor, ip,
1513 0x7FFFFFFFFFFFFFFFLL);
1515 Debugger("hammer_ip_delete_range errored");
1516 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
1517 if (ip->trunc_off >= trunc_off) {
1518 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
1519 ip->flags &= ~HAMMER_INODE_TRUNCATED;
1526 * Now sync related records. These will typically be directory
1527 * entries or delete-on-disk records.
1529 * Not all records will be flushed, but clear XDIRTY anyway. We
1530 * will set it again in the frontend hammer_flush_inode_done()
1531 * if records remain.
1534 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1535 hammer_sync_record_callback, &cursor);
1540 if (RB_EMPTY(&ip->rec_tree))
1541 ip->sync_flags &= ~HAMMER_INODE_XDIRTY;
1545 * If we are deleting the inode the frontend had better not have
1546 * any active references on elements making up the inode.
1548 if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
1549 RB_EMPTY(&ip->rec_tree) &&
1550 (ip->sync_flags & HAMMER_INODE_DELETING) &&
1551 (ip->flags & HAMMER_INODE_DELETED) == 0) {
1555 ip->flags |= HAMMER_INODE_DELETED;
1556 error = hammer_ip_delete_range_all(&cursor, ip, &count1);
1558 ip->sync_flags &= ~HAMMER_INODE_DELETING;
1559 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
1560 KKASSERT(RB_EMPTY(&ip->rec_tree));
1563 * Set delete_tid in both the frontend and backend
1564 * copy of the inode record. The DELETED flag handles
1565 * this, do not set RDIRTY.
1567 ip->ino_leaf.base.delete_tid = trans.tid;
1568 ip->sync_ino_leaf.base.delete_tid = trans.tid;
1571 * Adjust the inode count in the volume header
1573 if (ip->flags & HAMMER_INODE_ONDISK) {
1574 hammer_modify_volume_field(&trans,
1577 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1578 hammer_modify_volume_done(trans.rootvol);
1581 ip->flags &= ~HAMMER_INODE_DELETED;
1582 Debugger("hammer_ip_delete_range_all errored");
1587 * Flush any queued BIOs. These will just biodone() the IO's if
1588 * the inode has been deleted.
1590 while ((bio = TAILQ_FIRST(&ip->bio_list)) != NULL) {
1591 TAILQ_REMOVE(&ip->bio_list, bio, bio_act);
1592 tmp_error = hammer_dowrite(&cursor, ip, bio);
1596 ip->sync_flags &= ~HAMMER_INODE_BUFS;
1599 Debugger("RB_SCAN errored");
1602 * Now update the inode's on-disk inode-data and/or on-disk record.
1603 * DELETED and ONDISK are managed only in ip->flags.
1605 switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
1606 case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
1608 * If deleted and on-disk, don't set any additional flags.
1609 * the delete flag takes care of things.
1611 * Clear flags which may have been set by the frontend.
1613 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY|
1614 HAMMER_INODE_XDIRTY|HAMMER_INODE_ITIMES|
1615 HAMMER_INODE_DELETING);
1617 case HAMMER_INODE_DELETED:
1619 * Take care of the case where a deleted inode was never
1620 * flushed to the disk in the first place.
1622 * Clear flags which may have been set by the frontend.
1624 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY|
1625 HAMMER_INODE_XDIRTY|HAMMER_INODE_ITIMES|
1626 HAMMER_INODE_DELETING);
1627 while (RB_ROOT(&ip->rec_tree)) {
1628 hammer_record_t record = RB_ROOT(&ip->rec_tree);
1629 hammer_ref(&record->lock);
1630 KKASSERT(record->lock.refs == 1);
1631 record->flags |= HAMMER_RECF_DELETED_FE;
1632 record->flags |= HAMMER_RECF_DELETED_BE;
1633 hammer_rel_mem_record(record);
1636 case HAMMER_INODE_ONDISK:
1638 * If already on-disk, do not set any additional flags.
1643 * If not on-disk and not deleted, set both dirty flags
1644 * to force an initial record to be written. Also set
1645 * the create_tid for the inode.
1647 * Set create_tid in both the frontend and backend
1648 * copy of the inode record.
1650 ip->ino_leaf.base.create_tid = trans.tid;
1651 ip->sync_ino_leaf.base.create_tid = trans.tid;
1652 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1657 * If RDIRTY or DDIRTY is set, write out a new record. If the inode
1658 * is already on-disk the old record is marked as deleted.
1660 * If DELETED is set hammer_update_inode() will delete the existing
1661 * record without writing out a new one.
1663 * If *ONLY* the ITIMES flag is set we can update the record in-place.
1665 if (ip->flags & HAMMER_INODE_DELETED) {
1666 error = hammer_update_inode(&cursor, ip);
1668 if ((ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ITIMES)) ==
1669 HAMMER_INODE_ITIMES) {
1670 error = hammer_update_itimes(&cursor, ip);
1672 if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ITIMES)) {
1673 error = hammer_update_inode(&cursor, ip);
1676 Debugger("hammer_update_itimes/inode errored");
1679 * Save the TID we used to sync the inode with to make sure we
1680 * do not improperly reuse it.
1682 hammer_done_cursor(&cursor);
1683 hammer_done_transaction(&trans);
1688 * This routine is called when the OS is no longer actively referencing
1689 * the inode (but might still be keeping it cached), or when releasing
1690 * the last reference to an inode.
1692 * At this point if the inode's nlinks count is zero we want to destroy
1693 * it, which may mean destroying it on-media too.
1696 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
1702 * Set the DELETING flag when the link count drops to 0 and the
1703 * OS no longer has any opens on the inode.
1705 * The backend will clear DELETING (a mod flag) and set DELETED
1706 * (a state flag) when it is actually able to perform the
1709 if (ip->ino_data.nlinks == 0 &&
1710 (ip->flags & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
1711 ip->flags |= HAMMER_INODE_DELETING;
1712 ip->flags |= HAMMER_INODE_TRUNCATED;
1716 if (hammer_get_vnode(ip, &vp) != 0)
1721 * biodone any buffers with pending IO. These buffers are
1722 * holding a BUF_KERNPROC() exclusive lock and our
1723 * vtruncbuf() call will deadlock if any remain.
1725 * (interlocked against hammer_vop_strategy_write via
1726 * HAMMER_INODE_DELETING|HAMMER_INODE_DELETED).
1728 while ((bio = TAILQ_FIRST(&ip->bio_list)) != NULL) {
1729 TAILQ_REMOVE(&ip->bio_list, bio, bio_act);
1730 bio->bio_buf->b_resid = 0;
1733 while ((bio = TAILQ_FIRST(&ip->bio_alt_list)) != NULL) {
1734 TAILQ_REMOVE(&ip->bio_alt_list, bio, bio_act);
1735 bio->bio_buf->b_resid = 0;
1743 vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
1744 vnode_pager_setsize(ip->vp, 0);
1753 * Re-test an inode when a dependancy had gone away to see if we
1754 * can chain flush it.
1757 hammer_test_inode(hammer_inode_t ip)
1759 if (ip->flags & HAMMER_INODE_REFLUSH) {
1760 ip->flags &= ~HAMMER_INODE_REFLUSH;
1761 hammer_ref(&ip->lock);
1762 if (ip->flags & HAMMER_INODE_RESIGNAL) {
1763 ip->flags &= ~HAMMER_INODE_RESIGNAL;
1764 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1766 hammer_flush_inode(ip, 0);
1768 hammer_rel_inode(ip, 0);