2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.67 2008/06/09 04:19:10 dillon Exp $
38 #include <vm/vm_extern.h>
42 static int hammer_unload_inode(struct hammer_inode *ip);
43 static void hammer_flush_inode_core(hammer_inode_t ip, int flags);
44 static int hammer_setup_child_callback(hammer_record_t rec, void *data);
45 static int hammer_setup_parent_inodes(hammer_record_t record);
48 extern struct hammer_inode *HammerTruncIp;
52 * The kernel is not actively referencing this vnode but is still holding
55 * This is called from the frontend.
58 hammer_vop_inactive(struct vop_inactive_args *ap)
60 struct hammer_inode *ip = VTOI(ap->a_vp);
71 * If the inode no longer has visibility in the filesystem and is
72 * fairly clean, try to recycle it immediately. This can deadlock
73 * in vfsync() if we aren't careful.
75 * Do not queue the inode to the flusher if we still have visibility,
76 * otherwise namespace calls such as chmod will unnecessarily generate
77 * multiple inode updates.
79 hammer_inode_unloadable_check(ip, 0);
80 if (ip->ino_data.nlinks == 0) {
81 if (ip->flags & HAMMER_INODE_MODMASK)
82 hammer_flush_inode(ip, 0);
90 * Release the vnode association. This is typically (but not always)
91 * the last reference on the inode.
93 * Once the association is lost we are on our own with regards to
97 hammer_vop_reclaim(struct vop_reclaim_args *ap)
99 struct hammer_inode *ip;
104 if ((ip = vp->v_data) != NULL) {
107 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) {
108 ++hammer_count_reclaiming;
109 ++ip->hmp->inode_reclaims;
110 ip->flags |= HAMMER_INODE_RECLAIM;
112 hammer_rel_inode(ip, 1);
118 * Return a locked vnode for the specified inode. The inode must be
119 * referenced but NOT LOCKED on entry and will remain referenced on
122 * Called from the frontend.
125 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
134 if ((vp = ip->vp) == NULL) {
135 error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0);
138 hammer_lock_ex(&ip->lock);
139 if (ip->vp != NULL) {
140 hammer_unlock(&ip->lock);
145 hammer_ref(&ip->lock);
149 hammer_get_vnode_type(ip->ino_data.obj_type);
151 if (ip->flags & HAMMER_INODE_RECLAIM) {
152 --hammer_count_reclaiming;
153 --hmp->inode_reclaims;
154 ip->flags &= ~HAMMER_INODE_RECLAIM;
155 if (hmp->flags & HAMMER_MOUNT_WAITIMAX)
156 hammer_inode_wakereclaims(hmp);
159 switch(ip->ino_data.obj_type) {
160 case HAMMER_OBJTYPE_CDEV:
161 case HAMMER_OBJTYPE_BDEV:
162 vp->v_ops = &hmp->mp->mnt_vn_spec_ops;
163 addaliasu(vp, ip->ino_data.rmajor,
164 ip->ino_data.rminor);
166 case HAMMER_OBJTYPE_FIFO:
167 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops;
174 * Only mark as the root vnode if the ip is not
175 * historical, otherwise the VFS cache will get
176 * confused. The other half of the special handling
177 * is in hammer_vop_nlookupdotdot().
179 if (ip->obj_id == HAMMER_OBJID_ROOT &&
180 ip->obj_asof == hmp->asof) {
184 vp->v_data = (void *)ip;
185 /* vnode locked by getnewvnode() */
186 /* make related vnode dirty if inode dirty? */
187 hammer_unlock(&ip->lock);
188 if (vp->v_type == VREG)
189 vinitvmio(vp, ip->ino_data.size);
194 * loop if the vget fails (aka races), or if the vp
195 * no longer matches ip->vp.
197 if (vget(vp, LK_EXCLUSIVE) == 0) {
208 * Acquire a HAMMER inode. The returned inode is not locked. These functions
209 * do not attach or detach the related vnode (use hammer_get_vnode() for
212 * The flags argument is only applied for newly created inodes, and only
213 * certain flags are inherited.
215 * Called from the frontend.
217 struct hammer_inode *
218 hammer_get_inode(hammer_transaction_t trans, struct hammer_node **cache,
219 u_int64_t obj_id, hammer_tid_t asof, int flags, int *errorp)
221 hammer_mount_t hmp = trans->hmp;
222 struct hammer_inode_info iinfo;
223 struct hammer_cursor cursor;
224 struct hammer_inode *ip;
227 * Determine if we already have an inode cached. If we do then
230 iinfo.obj_id = obj_id;
231 iinfo.obj_asof = asof;
233 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
235 hammer_ref(&ip->lock);
240 ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
241 ++hammer_count_inodes;
244 ip->obj_asof = iinfo.obj_asof;
246 ip->flags = flags & HAMMER_INODE_RO;
248 ip->flags |= HAMMER_INODE_RO;
249 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
250 RB_INIT(&ip->rec_tree);
251 TAILQ_INIT(&ip->target_list);
254 * Locate the on-disk inode.
257 hammer_init_cursor(trans, &cursor, cache, NULL);
258 cursor.key_beg.localization = HAMMER_LOCALIZE_INODE;
259 cursor.key_beg.obj_id = ip->obj_id;
260 cursor.key_beg.key = 0;
261 cursor.key_beg.create_tid = 0;
262 cursor.key_beg.delete_tid = 0;
263 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
264 cursor.key_beg.obj_type = 0;
265 cursor.asof = iinfo.obj_asof;
266 cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
269 *errorp = hammer_btree_lookup(&cursor);
270 if (*errorp == EDEADLK) {
271 hammer_done_cursor(&cursor);
276 * On success the B-Tree lookup will hold the appropriate
277 * buffer cache buffers and provide a pointer to the requested
278 * information. Copy the information to the in-memory inode
279 * and cache the B-Tree node to improve future operations.
282 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
283 ip->ino_data = cursor.data->inode;
284 hammer_cache_node(cursor.node, &ip->cache[0]);
286 hammer_cache_node(cursor.node, cache);
290 * On success load the inode's record and data and insert the
291 * inode into the B-Tree. It is possible to race another lookup
292 * insertion of the same inode so deal with that condition too.
294 * The cursor's locked node interlocks against others creating and
295 * destroying ip while we were blocked.
298 hammer_ref(&ip->lock);
299 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
300 hammer_uncache_node(&ip->cache[0]);
301 hammer_uncache_node(&ip->cache[1]);
302 KKASSERT(ip->lock.refs == 1);
303 --hammer_count_inodes;
306 hammer_done_cursor(&cursor);
309 ip->flags |= HAMMER_INODE_ONDISK;
312 * Do not panic on read-only accesses which fail, particularly
313 * historical accesses where the snapshot might not have
314 * complete connectivity.
316 if ((flags & HAMMER_INODE_RO) == 0) {
317 kprintf("hammer_get_inode: failed ip %p obj_id %016llx cursor %p error %d\n",
318 ip, ip->obj_id, &cursor, *errorp);
321 if (ip->flags & HAMMER_INODE_RSV_INODES) {
322 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
325 hmp->rsv_databufs -= ip->rsv_databufs;
326 ip->rsv_databufs = 0; /* sanity */
328 --hammer_count_inodes;
333 hammer_done_cursor(&cursor);
338 * Create a new filesystem object, returning the inode in *ipp. The
339 * returned inode will be referenced.
341 * The inode is created in-memory.
344 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
345 struct ucred *cred, hammer_inode_t dip,
346 struct hammer_inode **ipp)
353 ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
354 ++hammer_count_inodes;
356 ip->obj_id = hammer_alloc_objid(trans, dip);
357 KKASSERT(ip->obj_id != 0);
358 ip->obj_asof = hmp->asof;
360 ip->flush_state = HAMMER_FST_IDLE;
361 ip->flags = HAMMER_INODE_DDIRTY | HAMMER_INODE_ITIMES;
363 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
364 RB_INIT(&ip->rec_tree);
365 TAILQ_INIT(&ip->target_list);
367 ip->ino_leaf.atime = trans->time;
368 ip->ino_data.mtime = trans->time;
369 ip->ino_data.size = 0;
370 ip->ino_data.nlinks = 0;
373 * A nohistory designator on the parent directory is inherited by
376 ip->ino_data.uflags = dip->ino_data.uflags &
377 (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
379 ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
380 ip->ino_leaf.base.localization = HAMMER_LOCALIZE_INODE;
381 ip->ino_leaf.base.obj_id = ip->obj_id;
382 ip->ino_leaf.base.key = 0;
383 ip->ino_leaf.base.create_tid = 0;
384 ip->ino_leaf.base.delete_tid = 0;
385 ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
386 ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
388 ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
389 ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
390 ip->ino_data.mode = vap->va_mode;
391 ip->ino_data.ctime = trans->time;
392 ip->ino_data.parent_obj_id = (dip) ? dip->ino_leaf.base.obj_id : 0;
394 switch(ip->ino_leaf.base.obj_type) {
395 case HAMMER_OBJTYPE_CDEV:
396 case HAMMER_OBJTYPE_BDEV:
397 ip->ino_data.rmajor = vap->va_rmajor;
398 ip->ino_data.rminor = vap->va_rminor;
405 * Calculate default uid/gid and overwrite with information from
408 xuid = hammer_to_unix_xid(&dip->ino_data.uid);
409 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode, xuid, cred,
411 ip->ino_data.mode = vap->va_mode;
413 if (vap->va_vaflags & VA_UID_UUID_VALID)
414 ip->ino_data.uid = vap->va_uid_uuid;
415 else if (vap->va_uid != (uid_t)VNOVAL)
416 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
418 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
420 if (vap->va_vaflags & VA_GID_UUID_VALID)
421 ip->ino_data.gid = vap->va_gid_uuid;
422 else if (vap->va_gid != (gid_t)VNOVAL)
423 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
425 ip->ino_data.gid = dip->ino_data.gid;
427 hammer_ref(&ip->lock);
428 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
429 hammer_unref(&ip->lock);
430 panic("hammer_create_inode: duplicate obj_id %llx", ip->obj_id);
437 * Called by hammer_sync_inode().
440 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
442 hammer_transaction_t trans = cursor->trans;
443 hammer_record_t record;
450 * If the inode has a presence on-disk then locate it and mark
451 * it deleted, setting DELONDISK.
453 * The record may or may not be physically deleted, depending on
454 * the retention policy.
456 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
457 HAMMER_INODE_ONDISK) {
458 hammer_normalize_cursor(cursor);
459 cursor->key_beg.localization = HAMMER_LOCALIZE_INODE;
460 cursor->key_beg.obj_id = ip->obj_id;
461 cursor->key_beg.key = 0;
462 cursor->key_beg.create_tid = 0;
463 cursor->key_beg.delete_tid = 0;
464 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
465 cursor->key_beg.obj_type = 0;
466 cursor->asof = ip->obj_asof;
467 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
468 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
469 cursor->flags |= HAMMER_CURSOR_BACKEND;
471 error = hammer_btree_lookup(cursor);
472 if (hammer_debug_inode)
473 kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
475 kprintf("error %d\n", error);
476 Debugger("hammer_update_inode");
480 error = hammer_ip_delete_record(cursor, ip, trans->tid);
481 if (hammer_debug_inode)
482 kprintf(" error %d\n", error);
483 if (error && error != EDEADLK) {
484 kprintf("error %d\n", error);
485 Debugger("hammer_update_inode2");
488 ip->flags |= HAMMER_INODE_DELONDISK;
491 hammer_cache_node(cursor->node, &ip->cache[0]);
493 if (error == EDEADLK) {
494 hammer_done_cursor(cursor);
495 error = hammer_init_cursor(trans, cursor,
497 if (hammer_debug_inode)
498 kprintf("IPDED %p %d\n", ip, error);
505 * Ok, write out the initial record or a new record (after deleting
506 * the old one), unless the DELETED flag is set. This routine will
507 * clear DELONDISK if it writes out a record.
509 * Update our inode statistics if this is the first application of
512 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
514 * Generate a record and write it to the media
516 record = hammer_alloc_mem_record(ip, 0);
517 record->type = HAMMER_MEM_RECORD_INODE;
518 record->flush_state = HAMMER_FST_FLUSH;
519 record->leaf = ip->sync_ino_leaf;
520 record->leaf.base.create_tid = trans->tid;
521 record->leaf.data_len = sizeof(ip->sync_ino_data);
522 record->data = (void *)&ip->sync_ino_data;
523 record->flags |= HAMMER_RECF_INTERLOCK_BE;
525 error = hammer_ip_sync_record_cursor(cursor, record);
526 if (hammer_debug_inode)
527 kprintf("GENREC %p rec %08x %d\n",
528 ip, record->flags, error);
529 if (error != EDEADLK)
531 hammer_done_cursor(cursor);
532 error = hammer_init_cursor(trans, cursor,
534 if (hammer_debug_inode)
535 kprintf("GENREC reinit %d\n", error);
540 kprintf("error %d\n", error);
541 Debugger("hammer_update_inode3");
545 * The record isn't managed by the inode's record tree,
546 * destroy it whether we succeed or fail.
548 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
549 record->flags |= HAMMER_RECF_DELETED_FE;
550 record->flush_state = HAMMER_FST_IDLE;
551 hammer_rel_mem_record(record);
557 if (hammer_debug_inode)
558 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
559 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
560 HAMMER_INODE_ITIMES);
561 ip->flags &= ~HAMMER_INODE_DELONDISK;
564 * Root volume count of inodes
566 if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
567 hammer_modify_volume_field(trans,
570 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
571 hammer_modify_volume_done(trans->rootvol);
572 ip->flags |= HAMMER_INODE_ONDISK;
573 if (hammer_debug_inode)
574 kprintf("NOWONDISK %p\n", ip);
580 * If the inode has been destroyed, clean out any left-over flags
581 * that may have been set by the frontend.
583 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) {
584 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
585 HAMMER_INODE_ITIMES);
591 * Update only the itimes fields. This is done no-historically. The
592 * record is updated in-place on the disk.
595 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
597 hammer_transaction_t trans = cursor->trans;
598 struct hammer_btree_leaf_elm *leaf;
603 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
604 HAMMER_INODE_ONDISK) {
605 hammer_normalize_cursor(cursor);
606 cursor->key_beg.localization = HAMMER_LOCALIZE_INODE;
607 cursor->key_beg.obj_id = ip->obj_id;
608 cursor->key_beg.key = 0;
609 cursor->key_beg.create_tid = 0;
610 cursor->key_beg.delete_tid = 0;
611 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
612 cursor->key_beg.obj_type = 0;
613 cursor->asof = ip->obj_asof;
614 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
615 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
616 cursor->flags |= HAMMER_CURSOR_BACKEND;
618 error = hammer_btree_lookup(cursor);
620 kprintf("error %d\n", error);
621 Debugger("hammer_update_itimes1");
625 * Do not generate UNDO records for atime updates.
628 hammer_modify_node(trans, cursor->node,
629 &leaf->atime, sizeof(leaf->atime));
630 leaf->atime = ip->sync_ino_leaf.atime;
631 hammer_modify_node_done(cursor->node);
632 /*rec->ino_mtime = ip->sync_ino_rec.ino_mtime;*/
633 ip->sync_flags &= ~HAMMER_INODE_ITIMES;
634 /* XXX recalculate crc */
635 hammer_cache_node(cursor->node, &ip->cache[0]);
637 if (error == EDEADLK) {
638 hammer_done_cursor(cursor);
639 error = hammer_init_cursor(trans, cursor,
649 * Release a reference on an inode, flush as requested.
651 * On the last reference we queue the inode to the flusher for its final
655 hammer_rel_inode(struct hammer_inode *ip, int flush)
657 hammer_mount_t hmp = ip->hmp;
660 * Handle disposition when dropping the last ref.
663 if (ip->lock.refs == 1) {
665 * Determine whether on-disk action is needed for
666 * the inode's final disposition.
668 KKASSERT(ip->vp == NULL);
669 hammer_inode_unloadable_check(ip, 0);
670 if (ip->flags & HAMMER_INODE_MODMASK) {
671 if (hmp->rsv_inodes > desiredvnodes) {
672 hammer_flush_inode(ip,
673 HAMMER_FLUSH_SIGNAL);
675 hammer_flush_inode(ip, 0);
677 } else if (ip->lock.refs == 1) {
678 hammer_unload_inode(ip);
683 hammer_flush_inode(ip, 0);
686 * The inode still has multiple refs, try to drop
689 KKASSERT(ip->lock.refs >= 1);
690 if (ip->lock.refs > 1) {
691 hammer_unref(&ip->lock);
699 * Unload and destroy the specified inode. Must be called with one remaining
700 * reference. The reference is disposed of.
702 * This can only be called in the context of the flusher.
705 hammer_unload_inode(struct hammer_inode *ip)
707 hammer_mount_t hmp = ip->hmp;
709 KASSERT(ip->lock.refs == 1,
710 ("hammer_unload_inode: %d refs\n", ip->lock.refs));
711 KKASSERT(ip->vp == NULL);
712 KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
713 KKASSERT(ip->cursor_ip_refs == 0);
714 KKASSERT(ip->lock.lockcount == 0);
715 KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
717 KKASSERT(RB_EMPTY(&ip->rec_tree));
718 KKASSERT(TAILQ_EMPTY(&ip->target_list));
720 RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip);
722 hammer_uncache_node(&ip->cache[0]);
723 hammer_uncache_node(&ip->cache[1]);
725 hammer_clear_objid(ip);
726 --hammer_count_inodes;
728 if (hmp->flags & HAMMER_MOUNT_WAITIMAX)
729 hammer_inode_wakereclaims(hmp);
731 if (ip->flags & HAMMER_INODE_RECLAIM) {
732 --hammer_count_reclaiming;
733 --hmp->inode_reclaims;
734 ip->flags &= ~HAMMER_INODE_RECLAIM;
742 * Called on mount -u when switching from RW to RO or vise-versa. Adjust
743 * the read-only flag for cached inodes.
745 * This routine is called from a RB_SCAN().
748 hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
750 hammer_mount_t hmp = ip->hmp;
752 if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
753 ip->flags |= HAMMER_INODE_RO;
755 ip->flags &= ~HAMMER_INODE_RO;
760 * A transaction has modified an inode, requiring updates as specified by
763 * HAMMER_INODE_DDIRTY: Inode data has been updated
764 * HAMMER_INODE_XDIRTY: Dirty in-memory records
765 * HAMMER_INODE_BUFS: Dirty buffer cache buffers
766 * HAMMER_INODE_DELETED: Inode record/data must be deleted
767 * HAMMER_INODE_ITIMES: mtime/atime has been updated
770 hammer_modify_inode(hammer_inode_t ip, int flags)
772 KKASSERT ((ip->flags & HAMMER_INODE_RO) == 0 ||
773 (flags & (HAMMER_INODE_DDIRTY |
774 HAMMER_INODE_XDIRTY | HAMMER_INODE_BUFS |
775 HAMMER_INODE_DELETED | HAMMER_INODE_ITIMES)) == 0);
776 if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
777 ip->flags |= HAMMER_INODE_RSV_INODES;
778 ++ip->hmp->rsv_inodes;
785 * Request that an inode be flushed. This whole mess cannot block and may
786 * recurse. Once requested HAMMER will attempt to actively flush it until
787 * the flush can be done.
789 * The inode may already be flushing, or may be in a setup state. We can
790 * place the inode in a flushing state if it is currently idle and flag it
791 * to reflush if it is currently flushing.
794 hammer_flush_inode(hammer_inode_t ip, int flags)
796 hammer_record_t depend;
800 * Trivial 'nothing to flush' case. If the inode is ina SETUP
801 * state we have to put it back into an IDLE state so we can
802 * drop the extra ref.
804 if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
805 if (ip->flush_state == HAMMER_FST_SETUP) {
806 ip->flush_state = HAMMER_FST_IDLE;
807 hammer_rel_inode(ip, 0);
813 * Our flush action will depend on the current state.
815 switch(ip->flush_state) {
816 case HAMMER_FST_IDLE:
818 * We have no dependancies and can flush immediately. Some
819 * our children may not be flushable so we have to re-test
820 * with that additional knowledge.
822 hammer_flush_inode_core(ip, flags);
824 case HAMMER_FST_SETUP:
826 * Recurse upwards through dependancies via target_list
827 * and start their flusher actions going if possible.
829 * 'good' is our connectivity. -1 means we have none and
830 * can't flush, 0 means there weren't any dependancies, and
831 * 1 means we have good connectivity.
834 TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
835 r = hammer_setup_parent_inodes(depend);
836 if (r < 0 && good == 0)
843 * We can continue if good >= 0. Determine how many records
844 * under our inode can be flushed (and mark them).
847 hammer_flush_inode_core(ip, flags);
849 ip->flags |= HAMMER_INODE_REFLUSH;
850 if (flags & HAMMER_FLUSH_SIGNAL) {
851 ip->flags |= HAMMER_INODE_RESIGNAL;
852 hammer_flusher_async(ip->hmp);
858 * We are already flushing, flag the inode to reflush
859 * if needed after it completes its current flush.
861 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
862 ip->flags |= HAMMER_INODE_REFLUSH;
863 if (flags & HAMMER_FLUSH_SIGNAL) {
864 ip->flags |= HAMMER_INODE_RESIGNAL;
865 hammer_flusher_async(ip->hmp);
872 * We are asked to recurse upwards and convert the record from SETUP
873 * to FLUSH if possible. record->ip is a parent of the caller's inode,
874 * and record->target_ip is the caller's inode.
876 * Return 1 if the record gives us connectivity
878 * Return 0 if the record is not relevant
880 * Return -1 if we can't resolve the dependancy and there is no connectivity.
883 hammer_setup_parent_inodes(hammer_record_t record)
885 hammer_mount_t hmp = record->ip->hmp;
886 hammer_record_t depend;
890 KKASSERT(record->flush_state != HAMMER_FST_IDLE);
894 * If the record is already flushing, is it in our flush group?
896 * If it is in our flush group but it is a general record or a
897 * delete-on-disk, it does not improve our connectivity (return 0),
898 * and if the target inode is not trying to destroy itself we can't
899 * allow the operation yet anyway (the second return -1).
901 if (record->flush_state == HAMMER_FST_FLUSH) {
902 if (record->flush_group != hmp->flusher_next) {
903 ip->flags |= HAMMER_INODE_REFLUSH;
906 if (record->type == HAMMER_MEM_RECORD_ADD)
913 * It must be a setup record. Try to resolve the setup dependancies
914 * by recursing upwards so we can place ip on the flush list.
916 KKASSERT(record->flush_state == HAMMER_FST_SETUP);
919 TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
920 r = hammer_setup_parent_inodes(depend);
921 if (r < 0 && good == 0)
928 * We can't flush ip because it has no connectivity (XXX also check
929 * nlinks for pre-existing connectivity!). Flag it so any resolution
930 * recurses back down.
933 ip->flags |= HAMMER_INODE_REFLUSH;
938 * We are go, place the parent inode in a flushing state so we can
939 * place its record in a flushing state. Note that the parent
940 * may already be flushing. The record must be in the same flush
941 * group as the parent.
943 if (ip->flush_state != HAMMER_FST_FLUSH)
944 hammer_flush_inode_core(ip, HAMMER_FLUSH_RECURSION);
945 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
946 KKASSERT(record->flush_state == HAMMER_FST_SETUP);
949 if (record->type == HAMMER_MEM_RECORD_DEL &&
950 (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
952 * Regardless of flushing state we cannot sync this path if the
953 * record represents a delete-on-disk but the target inode
954 * is not ready to sync its own deletion.
956 * XXX need to count effective nlinks to determine whether
957 * the flush is ok, otherwise removing a hardlink will
958 * just leave the DEL record to rot.
960 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
964 if (ip->flush_group == ip->hmp->flusher_next) {
966 * This is the record we wanted to synchronize.
968 record->flush_state = HAMMER_FST_FLUSH;
969 record->flush_group = ip->flush_group;
970 hammer_ref(&record->lock);
971 if (record->type == HAMMER_MEM_RECORD_ADD)
975 * A general or delete-on-disk record does not contribute
976 * to our visibility. We can still flush it, however.
981 * We couldn't resolve the dependancies, request that the
982 * inode be flushed when the dependancies can be resolved.
984 ip->flags |= HAMMER_INODE_REFLUSH;
990 * This is the core routine placing an inode into the FST_FLUSH state.
993 hammer_flush_inode_core(hammer_inode_t ip, int flags)
998 * Set flush state and prevent the flusher from cycling into
999 * the next flush group. Do not place the ip on the list yet.
1000 * Inodes not in the idle state get an extra reference.
1002 KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
1003 if (ip->flush_state == HAMMER_FST_IDLE)
1004 hammer_ref(&ip->lock);
1005 ip->flush_state = HAMMER_FST_FLUSH;
1006 ip->flush_group = ip->hmp->flusher_next;
1007 ++ip->hmp->flusher_lock;
1010 * We need to be able to vfsync/truncate from the backend.
1012 KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
1013 if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
1014 ip->flags |= HAMMER_INODE_VHELD;
1019 * Figure out how many in-memory records we can actually flush
1020 * (not including inode meta-data, buffers, etc).
1022 if (flags & HAMMER_FLUSH_RECURSION) {
1025 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1026 hammer_setup_child_callback, NULL);
1030 * This is a more involved test that includes go_count. If we
1031 * can't flush, flag the inode and return. If go_count is 0 we
1032 * were are unable to flush any records in our rec_tree and
1033 * must ignore the XDIRTY flag.
1035 if (go_count == 0) {
1036 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
1037 ip->flags |= HAMMER_INODE_REFLUSH;
1038 ip->flush_state = HAMMER_FST_SETUP;
1039 if (ip->flags & HAMMER_INODE_VHELD) {
1040 ip->flags &= ~HAMMER_INODE_VHELD;
1043 if (flags & HAMMER_FLUSH_SIGNAL) {
1044 ip->flags |= HAMMER_INODE_RESIGNAL;
1045 hammer_flusher_async(ip->hmp);
1047 if (--ip->hmp->flusher_lock == 0)
1048 wakeup(&ip->hmp->flusher_lock);
1054 * Snapshot the state of the inode for the backend flusher.
1056 * The truncation must be retained in the frontend until after
1057 * we've actually performed the record deletion.
1059 * NOTE: The DELETING flag is a mod flag, but it is also sticky,
1060 * and stays in ip->flags. Once set, it stays set until the
1061 * inode is destroyed.
1063 ip->sync_flags = (ip->flags & HAMMER_INODE_MODMASK);
1064 ip->sync_trunc_off = ip->trunc_off;
1065 ip->sync_ino_leaf = ip->ino_leaf;
1066 ip->sync_ino_data = ip->ino_data;
1067 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
1068 ip->flags &= ~HAMMER_INODE_MODMASK;
1069 #ifdef DEBUG_TRUNCATE
1070 if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp)
1071 kprintf("truncateS %016llx\n", ip->sync_trunc_off);
1075 * The flusher list inherits our inode and reference.
1077 TAILQ_INSERT_TAIL(&ip->hmp->flush_list, ip, flush_entry);
1078 if (--ip->hmp->flusher_lock == 0)
1079 wakeup(&ip->hmp->flusher_lock);
1081 if (flags & HAMMER_FLUSH_SIGNAL) {
1082 hammer_flusher_async(ip->hmp);
1087 * Callback for scan of ip->rec_tree. Try to include each record in our
1088 * flush. ip->flush_group has been set but the inode has not yet been
1089 * moved into a flushing state.
1091 * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
1094 * We return 1 for any record placed or found in FST_FLUSH, which prevents
1095 * the caller from shortcutting the flush.
1098 hammer_setup_child_callback(hammer_record_t rec, void *data)
1100 hammer_inode_t target_ip;
1105 * If the record has been deleted by the backend (it's being held
1106 * by the frontend in a race), just ignore it.
1108 if (rec->flags & HAMMER_RECF_DELETED_BE)
1112 * If the record is in an idle state it has no dependancies and
1118 switch(rec->flush_state) {
1119 case HAMMER_FST_IDLE:
1121 * Record has no setup dependancy, we can flush it.
1123 KKASSERT(rec->target_ip == NULL);
1124 rec->flush_state = HAMMER_FST_FLUSH;
1125 rec->flush_group = ip->flush_group;
1126 hammer_ref(&rec->lock);
1129 case HAMMER_FST_SETUP:
1131 * Record has a setup dependancy. Try to include the
1132 * target ip in the flush.
1134 * We have to be careful here, if we do not do the right
1135 * thing we can lose track of dirty inodes and the system
1136 * will lockup trying to allocate buffers.
1138 target_ip = rec->target_ip;
1139 KKASSERT(target_ip != NULL);
1140 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
1141 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
1143 * If the target IP is already flushing in our group
1144 * we are golden, otherwise make sure the target
1147 if (target_ip->flush_group == ip->flush_group) {
1148 rec->flush_state = HAMMER_FST_FLUSH;
1149 rec->flush_group = ip->flush_group;
1150 hammer_ref(&rec->lock);
1153 target_ip->flags |= HAMMER_INODE_REFLUSH;
1155 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
1157 * If the target IP is not flushing we can force
1158 * it to flush, even if it is unable to write out
1159 * any of its own records we have at least one in
1160 * hand that we CAN deal with.
1162 rec->flush_state = HAMMER_FST_FLUSH;
1163 rec->flush_group = ip->flush_group;
1164 hammer_ref(&rec->lock);
1165 hammer_flush_inode_core(target_ip,
1166 HAMMER_FLUSH_RECURSION);
1170 * General or delete-on-disk record.
1172 * XXX this needs help. If a delete-on-disk we could
1173 * disconnect the target. If the target has its own
1174 * dependancies they really need to be flushed.
1178 rec->flush_state = HAMMER_FST_FLUSH;
1179 rec->flush_group = ip->flush_group;
1180 hammer_ref(&rec->lock);
1181 hammer_flush_inode_core(target_ip,
1182 HAMMER_FLUSH_RECURSION);
1186 case HAMMER_FST_FLUSH:
1188 * Record already associated with a flush group. It had
1191 KKASSERT(rec->flush_group == ip->flush_group);
1199 * Wait for a previously queued flush to complete
1202 hammer_wait_inode(hammer_inode_t ip)
1204 while (ip->flush_state != HAMMER_FST_IDLE) {
1205 if (ip->flush_state == HAMMER_FST_SETUP) {
1206 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1208 ip->flags |= HAMMER_INODE_FLUSHW;
1209 tsleep(&ip->flags, 0, "hmrwin", 0);
1215 * Called by the backend code when a flush has been completed.
1216 * The inode has already been removed from the flush list.
1218 * A pipelined flush can occur, in which case we must re-enter the
1219 * inode on the list and re-copy its fields.
1222 hammer_flush_inode_done(hammer_inode_t ip)
1226 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
1229 * Merge left-over flags back into the frontend and fix the state.
1231 ip->flags |= ip->sync_flags;
1234 * The backend may have adjusted nlinks, so if the adjusted nlinks
1235 * does not match the fronttend set the frontend's RDIRTY flag again.
1237 if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
1238 ip->flags |= HAMMER_INODE_DDIRTY;
1241 * Fix up the dirty buffer status. IO completions will also
1242 * try to clean up rsv_databufs.
1244 if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) {
1245 ip->flags |= HAMMER_INODE_BUFS;
1247 ip->hmp->rsv_databufs -= ip->rsv_databufs;
1248 ip->rsv_databufs = 0;
1252 * Re-set the XDIRTY flag if some of the inode's in-memory records
1253 * could not be flushed.
1255 KKASSERT((RB_EMPTY(&ip->rec_tree) &&
1256 (ip->flags & HAMMER_INODE_XDIRTY) == 0) ||
1257 (!RB_EMPTY(&ip->rec_tree) &&
1258 (ip->flags & HAMMER_INODE_XDIRTY) != 0));
1261 * Do not lose track of inodes which no longer have vnode
1262 * assocations, otherwise they may never get flushed again.
1264 if ((ip->flags & HAMMER_INODE_MODMASK) && ip->vp == NULL)
1265 ip->flags |= HAMMER_INODE_REFLUSH;
1268 * Adjust flush_state. The target state (idle or setup) shouldn't
1269 * be terribly important since we will reflush if we really need
1270 * to do anything. XXX
1272 if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
1273 ip->flush_state = HAMMER_FST_IDLE;
1276 ip->flush_state = HAMMER_FST_SETUP;
1280 * Clean up the vnode ref
1282 if (ip->flags & HAMMER_INODE_VHELD) {
1283 ip->flags &= ~HAMMER_INODE_VHELD;
1288 * If the frontend made more changes and requested another flush,
1289 * then try to get it running.
1291 if (ip->flags & HAMMER_INODE_REFLUSH) {
1292 ip->flags &= ~HAMMER_INODE_REFLUSH;
1293 if (ip->flags & HAMMER_INODE_RESIGNAL) {
1294 ip->flags &= ~HAMMER_INODE_RESIGNAL;
1295 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1297 hammer_flush_inode(ip, 0);
1302 * If the inode is now clean drop the space reservation.
1304 if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1305 (ip->flags & HAMMER_INODE_RSV_INODES)) {
1306 ip->flags &= ~HAMMER_INODE_RSV_INODES;
1307 --ip->hmp->rsv_inodes;
1311 * Finally, if the frontend is waiting for a flush to complete,
1314 if (ip->flush_state != HAMMER_FST_FLUSH) {
1315 if (ip->flags & HAMMER_INODE_FLUSHW) {
1316 ip->flags &= ~HAMMER_INODE_FLUSHW;
1321 hammer_rel_inode(ip, 0);
1325 * Called from hammer_sync_inode() to synchronize in-memory records
1329 hammer_sync_record_callback(hammer_record_t record, void *data)
1331 hammer_cursor_t cursor = data;
1332 hammer_transaction_t trans = cursor->trans;
1336 * Skip records that do not belong to the current flush.
1338 ++hammer_stats_record_iterations;
1339 if (record->flush_state != HAMMER_FST_FLUSH)
1343 if (record->flush_group != record->ip->flush_group) {
1344 kprintf("sync_record %p ip %p bad flush group %d %d\n", record, record->ip, record->flush_group ,record->ip->flush_group);
1349 KKASSERT(record->flush_group == record->ip->flush_group);
1352 * Interlock the record using the BE flag. Once BE is set the
1353 * frontend cannot change the state of FE.
1355 * NOTE: If FE is set prior to us setting BE we still sync the
1356 * record out, but the flush completion code converts it to
1357 * a delete-on-disk record instead of destroying it.
1359 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
1360 record->flags |= HAMMER_RECF_INTERLOCK_BE;
1363 * The backend may have already disposed of the record.
1365 if (record->flags & HAMMER_RECF_DELETED_BE) {
1371 * If the whole inode is being deleting all on-disk records will
1372 * be deleted very soon, we can't sync any new records to disk
1373 * because they will be deleted in the same transaction they were
1374 * created in (delete_tid == create_tid), which will assert.
1376 * XXX There may be a case with RECORD_ADD with DELETED_FE set
1377 * that we currently panic on.
1379 if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
1380 switch(record->type) {
1381 case HAMMER_MEM_RECORD_DATA:
1383 * We don't have to do anything, if the record was
1384 * committed the space will have been accounted for
1388 case HAMMER_MEM_RECORD_GENERAL:
1389 record->flags |= HAMMER_RECF_DELETED_FE;
1390 record->flags |= HAMMER_RECF_DELETED_BE;
1393 case HAMMER_MEM_RECORD_ADD:
1394 panic("hammer_sync_record_callback: illegal add "
1395 "during inode deletion record %p", record);
1396 break; /* NOT REACHED */
1397 case HAMMER_MEM_RECORD_INODE:
1398 panic("hammer_sync_record_callback: attempt to "
1399 "sync inode record %p?", record);
1400 break; /* NOT REACHED */
1401 case HAMMER_MEM_RECORD_DEL:
1403 * Follow through and issue the on-disk deletion
1410 * If DELETED_FE is set we may have already sent dependant pieces
1411 * to the disk and we must flush the record as if it hadn't been
1412 * deleted. This creates a bit of a mess because we have to
1413 * have ip_sync_record convert the record to MEM_RECORD_DEL before
1414 * it inserts the B-Tree record. Otherwise the media sync might
1415 * be visible to the frontend.
1417 if (record->flags & HAMMER_RECF_DELETED_FE) {
1418 if (record->type == HAMMER_MEM_RECORD_ADD) {
1419 record->flags |= HAMMER_RECF_CONVERT_DELETE;
1421 KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
1427 * Assign the create_tid for new records. Deletions already
1428 * have the record's entire key properly set up.
1430 if (record->type != HAMMER_MEM_RECORD_DEL)
1431 record->leaf.base.create_tid = trans->tid;
1433 error = hammer_ip_sync_record_cursor(cursor, record);
1434 if (error != EDEADLK)
1436 hammer_done_cursor(cursor);
1437 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
1442 record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
1446 if (error != -ENOSPC) {
1447 kprintf("hammer_sync_record_callback: sync failed rec "
1448 "%p, error %d\n", record, error);
1449 Debugger("sync failed rec");
1453 hammer_flush_record_done(record, error);
1458 * XXX error handling
1461 hammer_sync_inode(hammer_inode_t ip)
1463 struct hammer_transaction trans;
1464 struct hammer_cursor cursor;
1465 hammer_record_t depend;
1466 hammer_record_t next;
1467 int error, tmp_error;
1470 if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
1473 hammer_start_transaction_fls(&trans, ip->hmp);
1474 error = hammer_init_cursor(&trans, &cursor, &ip->cache[0], ip);
1479 * Any directory records referencing this inode which are not in
1480 * our current flush group must adjust our nlink count for the
1481 * purposes of synchronization to disk.
1483 * Records which are in our flush group can be unlinked from our
1484 * inode now, potentially allowing the inode to be physically
1487 nlinks = ip->ino_data.nlinks;
1488 next = TAILQ_FIRST(&ip->target_list);
1489 while ((depend = next) != NULL) {
1490 next = TAILQ_NEXT(depend, target_entry);
1491 if (depend->flush_state == HAMMER_FST_FLUSH &&
1492 depend->flush_group == ip->hmp->flusher_act) {
1494 * If this is an ADD that was deleted by the frontend
1495 * the frontend nlinks count will have already been
1496 * decremented, but the backend is going to sync its
1497 * directory entry and must account for it. The
1498 * record will be converted to a delete-on-disk when
1501 * If the ADD was not deleted by the frontend we
1502 * can remove the dependancy from our target_list.
1504 if (depend->flags & HAMMER_RECF_DELETED_FE) {
1507 TAILQ_REMOVE(&ip->target_list, depend,
1509 depend->target_ip = NULL;
1511 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
1513 * Not part of our flush group
1515 KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
1516 switch(depend->type) {
1517 case HAMMER_MEM_RECORD_ADD:
1520 case HAMMER_MEM_RECORD_DEL:
1530 * Set dirty if we had to modify the link count.
1532 if (ip->sync_ino_data.nlinks != nlinks) {
1533 KKASSERT((int64_t)nlinks >= 0);
1534 ip->sync_ino_data.nlinks = nlinks;
1535 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1540 * XXX DISABLED FOR NOW. With the new reservation support
1541 * we cannot resync pending data without confusing the hell
1542 * out of the in-memory record tree.
1545 * Queue up as many dirty buffers as we can then set a flag to
1546 * cause any further BIOs to go to the alternative queue.
1548 if (ip->flags & HAMMER_INODE_VHELD)
1549 error = vfsync(ip->vp, MNT_NOWAIT, 1, NULL, NULL);
1550 ip->flags |= HAMMER_INODE_WRITE_ALT;
1553 * The buffer cache may contain dirty buffers beyond the inode
1554 * state we copied from the frontend to the backend. Because
1555 * we are syncing our buffer cache on the backend, resync
1556 * the truncation point and the file size so we don't wipe out
1559 * Syncing the buffer cache on the frontend has serious problems
1560 * because it prevents us from passively queueing dirty inodes
1561 * to the backend (the BIO's could stall indefinitely).
1563 if (ip->flags & HAMMER_INODE_TRUNCATED) {
1564 ip->sync_trunc_off = ip->trunc_off;
1565 ip->sync_flags |= HAMMER_INODE_TRUNCATED;
1567 if (ip->sync_ino_data.size != ip->ino_data.size) {
1568 ip->sync_ino_data.size = ip->ino_data.size;
1569 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1574 * If there is a trunction queued destroy any data past the (aligned)
1575 * truncation point. Userland will have dealt with the buffer
1576 * containing the truncation point for us.
1578 * We don't flush pending frontend data buffers until after we've
1579 * dealth with the truncation.
1581 * Don't bother if the inode is or has been deleted.
1583 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
1585 * Interlock trunc_off. The VOP front-end may continue to
1586 * make adjustments to it while we are blocked.
1589 off_t aligned_trunc_off;
1591 trunc_off = ip->sync_trunc_off;
1592 aligned_trunc_off = (trunc_off + HAMMER_BUFMASK) &
1596 * Delete any whole blocks on-media. The front-end has
1597 * already cleaned out any partial block and made it
1598 * pending. The front-end may have updated trunc_off
1599 * while we were blocked so we only use sync_trunc_off.
1601 error = hammer_ip_delete_range(&cursor, ip,
1603 0x7FFFFFFFFFFFFFFFLL, 1);
1605 Debugger("hammer_ip_delete_range errored");
1608 * Clear the truncation flag on the backend after we have
1609 * complete the deletions. Backend data is now good again
1610 * (including new records we are about to sync, below).
1612 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
1613 ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL;
1619 * Now sync related records. These will typically be directory
1620 * entries or delete-on-disk records.
1622 * Not all records will be flushed, but clear XDIRTY anyway. We
1623 * will set it again in the frontend hammer_flush_inode_done()
1624 * if records remain.
1627 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1628 hammer_sync_record_callback, &cursor);
1636 * If we are deleting the inode the frontend had better not have
1637 * any active references on elements making up the inode.
1639 if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
1640 RB_EMPTY(&ip->rec_tree) &&
1641 (ip->sync_flags & HAMMER_INODE_DELETING) &&
1642 (ip->flags & HAMMER_INODE_DELETED) == 0) {
1645 ip->flags |= HAMMER_INODE_DELETED;
1646 error = hammer_ip_delete_range_all(&cursor, ip, &count1);
1648 ip->sync_flags &= ~HAMMER_INODE_DELETING;
1649 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
1650 KKASSERT(RB_EMPTY(&ip->rec_tree));
1653 * Set delete_tid in both the frontend and backend
1654 * copy of the inode record. The DELETED flag handles
1655 * this, do not set RDIRTY.
1657 ip->ino_leaf.base.delete_tid = trans.tid;
1658 ip->sync_ino_leaf.base.delete_tid = trans.tid;
1661 * Adjust the inode count in the volume header
1663 if (ip->flags & HAMMER_INODE_ONDISK) {
1664 hammer_modify_volume_field(&trans,
1667 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1668 hammer_modify_volume_done(trans.rootvol);
1671 ip->flags &= ~HAMMER_INODE_DELETED;
1672 Debugger("hammer_ip_delete_range_all errored");
1676 ip->sync_flags &= ~HAMMER_INODE_BUFS;
1679 Debugger("RB_SCAN errored");
1682 * Now update the inode's on-disk inode-data and/or on-disk record.
1683 * DELETED and ONDISK are managed only in ip->flags.
1685 switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
1686 case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
1688 * If deleted and on-disk, don't set any additional flags.
1689 * the delete flag takes care of things.
1691 * Clear flags which may have been set by the frontend.
1693 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY|
1694 HAMMER_INODE_XDIRTY|HAMMER_INODE_ITIMES|
1695 HAMMER_INODE_DELETING);
1697 case HAMMER_INODE_DELETED:
1699 * Take care of the case where a deleted inode was never
1700 * flushed to the disk in the first place.
1702 * Clear flags which may have been set by the frontend.
1704 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY|
1705 HAMMER_INODE_XDIRTY|HAMMER_INODE_ITIMES|
1706 HAMMER_INODE_DELETING);
1707 while (RB_ROOT(&ip->rec_tree)) {
1708 hammer_record_t record = RB_ROOT(&ip->rec_tree);
1709 hammer_ref(&record->lock);
1710 KKASSERT(record->lock.refs == 1);
1711 record->flags |= HAMMER_RECF_DELETED_FE;
1712 record->flags |= HAMMER_RECF_DELETED_BE;
1713 hammer_rel_mem_record(record);
1716 case HAMMER_INODE_ONDISK:
1718 * If already on-disk, do not set any additional flags.
1723 * If not on-disk and not deleted, set both dirty flags
1724 * to force an initial record to be written. Also set
1725 * the create_tid for the inode.
1727 * Set create_tid in both the frontend and backend
1728 * copy of the inode record.
1730 ip->ino_leaf.base.create_tid = trans.tid;
1731 ip->sync_ino_leaf.base.create_tid = trans.tid;
1732 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1737 * If RDIRTY or DDIRTY is set, write out a new record. If the inode
1738 * is already on-disk the old record is marked as deleted.
1740 * If DELETED is set hammer_update_inode() will delete the existing
1741 * record without writing out a new one.
1743 * If *ONLY* the ITIMES flag is set we can update the record in-place.
1745 if (ip->flags & HAMMER_INODE_DELETED) {
1746 error = hammer_update_inode(&cursor, ip);
1748 if ((ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ITIMES)) ==
1749 HAMMER_INODE_ITIMES) {
1750 error = hammer_update_itimes(&cursor, ip);
1752 if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ITIMES)) {
1753 error = hammer_update_inode(&cursor, ip);
1756 Debugger("hammer_update_itimes/inode errored");
1759 * Save the TID we used to sync the inode with to make sure we
1760 * do not improperly reuse it.
1762 hammer_done_cursor(&cursor);
1763 hammer_done_transaction(&trans);
1768 * This routine is called when the OS is no longer actively referencing
1769 * the inode (but might still be keeping it cached), or when releasing
1770 * the last reference to an inode.
1772 * At this point if the inode's nlinks count is zero we want to destroy
1773 * it, which may mean destroying it on-media too.
1776 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
1781 * Set the DELETING flag when the link count drops to 0 and the
1782 * OS no longer has any opens on the inode.
1784 * The backend will clear DELETING (a mod flag) and set DELETED
1785 * (a state flag) when it is actually able to perform the
1788 if (ip->ino_data.nlinks == 0 &&
1789 (ip->flags & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
1790 ip->flags |= HAMMER_INODE_DELETING;
1791 ip->flags |= HAMMER_INODE_TRUNCATED;
1795 if (hammer_get_vnode(ip, &vp) != 0)
1803 vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
1804 vnode_pager_setsize(ip->vp, 0);
1813 * Re-test an inode when a dependancy had gone away to see if we
1814 * can chain flush it.
1817 hammer_test_inode(hammer_inode_t ip)
1819 if (ip->flags & HAMMER_INODE_REFLUSH) {
1820 ip->flags &= ~HAMMER_INODE_REFLUSH;
1821 hammer_ref(&ip->lock);
1822 if (ip->flags & HAMMER_INODE_RESIGNAL) {
1823 ip->flags &= ~HAMMER_INODE_RESIGNAL;
1824 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1826 hammer_flush_inode(ip, 0);
1828 hammer_rel_inode(ip, 0);
1833 * When a HAMMER inode is reclaimed it may have to be queued to the backend
1834 * for its final sync to disk. Programs like blogbench can cause the backlog
1835 * to grow indefinitely. Put a cap on the number of inodes we allow to be
1836 * in this state by giving the flusher time to drain.
1839 hammer_inode_waitreclaims(hammer_mount_t hmp)
1841 while (hmp->inode_reclaims > HAMMER_RECLAIM_MIN &&
1842 hmp->inode_reclaims > hmp->count_inodes / HAMMER_RECLAIM_FACTOR) {
1843 hmp->flags |= HAMMER_MOUNT_WAITIMAX;
1844 hammer_flusher_async(hmp);
1845 tsleep(hmp, 0, "hmimax", hz / 10);
1850 hammer_inode_wakereclaims(hammer_mount_t hmp)
1852 if (hmp->inode_reclaims <= HAMMER_RECLAIM_MIN ||
1853 hmp->inode_reclaims <= hmp->count_inodes / HAMMER_RECLAIM_FACTOR) {
1854 hmp->flags &= ~HAMMER_MOUNT_WAITIMAX;