2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.114 2008/09/24 00:53:51 dillon Exp $
38 #include <vm/vm_extern.h>
42 static int hammer_unload_inode(struct hammer_inode *ip);
43 static void hammer_free_inode(hammer_inode_t ip);
44 static void hammer_flush_inode_core(hammer_inode_t ip,
45 hammer_flush_group_t flg, int flags);
46 static int hammer_setup_child_callback(hammer_record_t rec, void *data);
48 static int hammer_syncgrp_child_callback(hammer_record_t rec, void *data);
50 static int hammer_setup_parent_inodes(hammer_inode_t ip, int depth,
51 hammer_flush_group_t flg);
52 static int hammer_setup_parent_inodes_helper(hammer_record_t record,
53 int depth, hammer_flush_group_t flg);
54 static void hammer_inode_wakereclaims(hammer_inode_t ip, int dowake);
57 extern struct hammer_inode *HammerTruncIp;
61 * RB-Tree support for inode structures
64 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
66 if (ip1->obj_localization < ip2->obj_localization)
68 if (ip1->obj_localization > ip2->obj_localization)
70 if (ip1->obj_id < ip2->obj_id)
72 if (ip1->obj_id > ip2->obj_id)
74 if (ip1->obj_asof < ip2->obj_asof)
76 if (ip1->obj_asof > ip2->obj_asof)
82 * RB-Tree support for inode structures / special LOOKUP_INFO
85 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
87 if (info->obj_localization < ip->obj_localization)
89 if (info->obj_localization > ip->obj_localization)
91 if (info->obj_id < ip->obj_id)
93 if (info->obj_id > ip->obj_id)
95 if (info->obj_asof < ip->obj_asof)
97 if (info->obj_asof > ip->obj_asof)
103 * Used by hammer_scan_inode_snapshots() to locate all of an object's
104 * snapshots. Note that the asof field is not tested, which we can get
105 * away with because it is the lowest-priority field.
108 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data)
110 hammer_inode_info_t info = data;
112 if (ip->obj_localization > info->obj_localization)
114 if (ip->obj_localization < info->obj_localization)
116 if (ip->obj_id > info->obj_id)
118 if (ip->obj_id < info->obj_id)
124 * Used by hammer_unload_pseudofs() to locate all inodes associated with
128 hammer_inode_pfs_cmp(hammer_inode_t ip, void *data)
130 u_int32_t localization = *(u_int32_t *)data;
131 if (ip->obj_localization > localization)
133 if (ip->obj_localization < localization)
139 * RB-Tree support for pseudofs structures
142 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1, hammer_pseudofs_inmem_t p2)
144 if (p1->localization < p2->localization)
146 if (p1->localization > p2->localization)
152 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
153 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
154 hammer_inode_info_cmp, hammer_inode_info_t);
155 RB_GENERATE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node,
156 hammer_pfs_rb_compare, u_int32_t, localization);
159 * The kernel is not actively referencing this vnode but is still holding
162 * This is called from the frontend.
165 hammer_vop_inactive(struct vop_inactive_args *ap)
167 struct hammer_inode *ip = VTOI(ap->a_vp);
178 * If the inode no longer has visibility in the filesystem try to
179 * recycle it immediately, even if the inode is dirty. Recycling
180 * it quickly allows the system to reclaim buffer cache and VM
181 * resources which can matter a lot in a heavily loaded system.
183 * This can deadlock in vfsync() if we aren't careful.
185 * Do not queue the inode to the flusher if we still have visibility,
186 * otherwise namespace calls such as chmod will unnecessarily generate
187 * multiple inode updates.
189 hammer_inode_unloadable_check(ip, 0);
190 if (ip->ino_data.nlinks == 0) {
191 if (ip->flags & HAMMER_INODE_MODMASK)
192 hammer_flush_inode(ip, 0);
199 * Release the vnode association. This is typically (but not always)
200 * the last reference on the inode.
202 * Once the association is lost we are on our own with regards to
203 * flushing the inode.
206 hammer_vop_reclaim(struct vop_reclaim_args *ap)
208 struct hammer_inode *ip;
214 if ((ip = vp->v_data) != NULL) {
219 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) {
220 ++hammer_count_reclaiming;
221 ++hmp->inode_reclaims;
222 ip->flags |= HAMMER_INODE_RECLAIM;
224 hammer_rel_inode(ip, 1);
230 * Return a locked vnode for the specified inode. The inode must be
231 * referenced but NOT LOCKED on entry and will remain referenced on
234 * Called from the frontend.
237 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
247 if ((vp = ip->vp) == NULL) {
248 error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0);
251 hammer_lock_ex(&ip->lock);
252 if (ip->vp != NULL) {
253 hammer_unlock(&ip->lock);
259 hammer_ref(&ip->lock);
263 obj_type = ip->ino_data.obj_type;
264 vp->v_type = hammer_get_vnode_type(obj_type);
266 hammer_inode_wakereclaims(ip, 0);
268 switch(ip->ino_data.obj_type) {
269 case HAMMER_OBJTYPE_CDEV:
270 case HAMMER_OBJTYPE_BDEV:
271 vp->v_ops = &hmp->mp->mnt_vn_spec_ops;
272 addaliasu(vp, ip->ino_data.rmajor,
273 ip->ino_data.rminor);
275 case HAMMER_OBJTYPE_FIFO:
276 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops;
278 case HAMMER_OBJTYPE_REGFILE:
280 * MPSAFE read supported.
282 vp->v_flag |= VMP_READ;
287 vp->v_flag |= VMP_GETATTR;
290 * Only mark as the root vnode if the ip is not
291 * historical, otherwise the VFS cache will get
292 * confused. The other half of the special handling
293 * is in hammer_vop_nlookupdotdot().
295 * Pseudo-filesystem roots can be accessed via
296 * non-root filesystem paths and setting VROOT may
297 * confuse the namecache. Set VPFSROOT instead.
299 if (ip->obj_id == HAMMER_OBJID_ROOT &&
300 ip->obj_asof == hmp->asof) {
301 if (ip->obj_localization == 0)
304 vp->v_flag |= VPFSROOT;
307 vp->v_data = (void *)ip;
308 /* vnode locked by getnewvnode() */
309 /* make related vnode dirty if inode dirty? */
310 hammer_unlock(&ip->lock);
311 if (vp->v_type == VREG)
312 vinitvmio(vp, ip->ino_data.size);
317 * loop if the vget fails (aka races), or if the vp
318 * no longer matches ip->vp.
320 if (vget(vp, LK_EXCLUSIVE) == 0) {
331 * Locate all copies of the inode for obj_id compatible with the specified
332 * asof, reference, and issue the related call-back. This routine is used
333 * for direct-io invalidation and does not create any new inodes.
336 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo,
337 int (*callback)(hammer_inode_t ip, void *data),
340 hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root,
341 hammer_inode_info_cmp_all_history,
346 * Acquire a HAMMER inode. The returned inode is not locked. These functions
347 * do not attach or detach the related vnode (use hammer_get_vnode() for
350 * The flags argument is only applied for newly created inodes, and only
351 * certain flags are inherited.
353 * Called from the frontend.
355 struct hammer_inode *
356 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip,
357 int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
358 int flags, int *errorp)
360 hammer_mount_t hmp = trans->hmp;
361 struct hammer_node_cache *cachep;
362 struct hammer_inode_info iinfo;
363 struct hammer_cursor cursor;
364 struct hammer_inode *ip;
368 * Determine if we already have an inode cached. If we do then
371 * If we find an inode with no vnode we have to mark the
372 * transaction such that hammer_inode_waitreclaims() is
373 * called later on to avoid building up an infinite number
374 * of inodes. Otherwise we can continue to * add new inodes
375 * faster then they can be disposed of, even with the tsleep
378 * If we find a dummy inode we return a failure so dounlink
379 * (which does another lookup) doesn't try to mess with the
380 * link count. hammer_vop_nresolve() uses hammer_get_dummy_inode()
381 * to ref dummy inodes.
383 iinfo.obj_id = obj_id;
384 iinfo.obj_asof = asof;
385 iinfo.obj_localization = localization;
387 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
389 if (ip->flags & HAMMER_INODE_DUMMY) {
393 hammer_ref(&ip->lock);
399 * Allocate a new inode structure and deal with races later.
401 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
402 ++hammer_count_inodes;
405 ip->obj_asof = iinfo.obj_asof;
406 ip->obj_localization = localization;
408 ip->flags = flags & HAMMER_INODE_RO;
409 ip->cache[0].ip = ip;
410 ip->cache[1].ip = ip;
411 ip->cache[2].ip = ip;
412 ip->cache[3].ip = ip;
414 ip->flags |= HAMMER_INODE_RO;
415 ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
416 0x7FFFFFFFFFFFFFFFLL;
417 RB_INIT(&ip->rec_tree);
418 TAILQ_INIT(&ip->target_list);
419 hammer_ref(&ip->lock);
422 * Locate the on-disk inode. If this is a PFS root we always
423 * access the current version of the root inode and (if it is not
424 * a master) always access information under it with a snapshot
427 * We cache recent inode lookups in this directory in dip->cache[2].
428 * If we can't find it we assume the inode we are looking for is
429 * close to the directory inode.
434 if (dip->cache[2].node)
435 cachep = &dip->cache[2];
437 cachep = &dip->cache[0];
439 hammer_init_cursor(trans, &cursor, cachep, NULL);
440 cursor.key_beg.localization = localization + HAMMER_LOCALIZE_INODE;
441 cursor.key_beg.obj_id = ip->obj_id;
442 cursor.key_beg.key = 0;
443 cursor.key_beg.create_tid = 0;
444 cursor.key_beg.delete_tid = 0;
445 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
446 cursor.key_beg.obj_type = 0;
448 cursor.asof = iinfo.obj_asof;
449 cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
452 *errorp = hammer_btree_lookup(&cursor);
453 if (*errorp == EDEADLK) {
454 hammer_done_cursor(&cursor);
459 * On success the B-Tree lookup will hold the appropriate
460 * buffer cache buffers and provide a pointer to the requested
461 * information. Copy the information to the in-memory inode
462 * and cache the B-Tree node to improve future operations.
465 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
466 ip->ino_data = cursor.data->inode;
469 * cache[0] tries to cache the location of the object inode.
470 * The assumption is that it is near the directory inode.
472 * cache[1] tries to cache the location of the object data.
473 * We might have something in the governing directory from
474 * scan optimizations (see the strategy code in
477 * We update dip->cache[2], if possible, with the location
478 * of the object inode for future directory shortcuts.
480 hammer_cache_node(&ip->cache[0], cursor.node);
482 if (dip->cache[3].node) {
483 hammer_cache_node(&ip->cache[1],
486 hammer_cache_node(&dip->cache[2], cursor.node);
490 * The file should not contain any data past the file size
491 * stored in the inode. Setting save_trunc_off to the
492 * file size instead of max reduces B-Tree lookup overheads
493 * on append by allowing the flusher to avoid checking for
496 ip->save_trunc_off = ip->ino_data.size;
499 * Locate and assign the pseudofs management structure to
502 if (dip && dip->obj_localization == ip->obj_localization) {
503 ip->pfsm = dip->pfsm;
504 hammer_ref(&ip->pfsm->lock);
506 ip->pfsm = hammer_load_pseudofs(trans,
507 ip->obj_localization,
509 *errorp = 0; /* ignore ENOENT */
514 * The inode is placed on the red-black tree and will be synced to
515 * the media when flushed or by the filesystem sync. If this races
516 * another instantiation/lookup the insertion will fail.
519 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
520 hammer_free_inode(ip);
521 hammer_done_cursor(&cursor);
524 ip->flags |= HAMMER_INODE_ONDISK;
526 if (ip->flags & HAMMER_INODE_RSV_INODES) {
527 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
531 hammer_free_inode(ip);
534 hammer_done_cursor(&cursor);
535 trans->flags |= HAMMER_TRANSF_NEWINODE;
540 * Get a dummy inode to placemark a broken directory entry.
542 struct hammer_inode *
543 hammer_get_dummy_inode(hammer_transaction_t trans, hammer_inode_t dip,
544 int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
545 int flags, int *errorp)
547 hammer_mount_t hmp = trans->hmp;
548 struct hammer_inode_info iinfo;
549 struct hammer_inode *ip;
552 * Determine if we already have an inode cached. If we do then
555 * If we find an inode with no vnode we have to mark the
556 * transaction such that hammer_inode_waitreclaims() is
557 * called later on to avoid building up an infinite number
558 * of inodes. Otherwise we can continue to * add new inodes
559 * faster then they can be disposed of, even with the tsleep
562 * If we find a non-fake inode we return an error. Only fake
563 * inodes can be returned by this routine.
565 iinfo.obj_id = obj_id;
566 iinfo.obj_asof = asof;
567 iinfo.obj_localization = localization;
570 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
572 if ((ip->flags & HAMMER_INODE_DUMMY) == 0) {
576 hammer_ref(&ip->lock);
581 * Allocate a new inode structure and deal with races later.
583 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
584 ++hammer_count_inodes;
587 ip->obj_asof = iinfo.obj_asof;
588 ip->obj_localization = localization;
590 ip->flags = flags | HAMMER_INODE_RO | HAMMER_INODE_DUMMY;
591 ip->cache[0].ip = ip;
592 ip->cache[1].ip = ip;
593 ip->cache[2].ip = ip;
594 ip->cache[3].ip = ip;
595 ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
596 0x7FFFFFFFFFFFFFFFLL;
597 RB_INIT(&ip->rec_tree);
598 TAILQ_INIT(&ip->target_list);
599 hammer_ref(&ip->lock);
602 * Populate the dummy inode. Leave everything zero'd out.
604 * (ip->ino_leaf and ip->ino_data)
606 * Make the dummy inode a FIFO object which most copy programs
607 * will properly ignore.
609 ip->save_trunc_off = ip->ino_data.size;
610 ip->ino_data.obj_type = HAMMER_OBJTYPE_FIFO;
613 * Locate and assign the pseudofs management structure to
616 if (dip && dip->obj_localization == ip->obj_localization) {
617 ip->pfsm = dip->pfsm;
618 hammer_ref(&ip->pfsm->lock);
620 ip->pfsm = hammer_load_pseudofs(trans, ip->obj_localization,
622 *errorp = 0; /* ignore ENOENT */
626 * The inode is placed on the red-black tree and will be synced to
627 * the media when flushed or by the filesystem sync. If this races
628 * another instantiation/lookup the insertion will fail.
630 * NOTE: Do not set HAMMER_INODE_ONDISK. The inode is a fake.
633 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
634 hammer_free_inode(ip);
638 if (ip->flags & HAMMER_INODE_RSV_INODES) {
639 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
642 hammer_free_inode(ip);
645 trans->flags |= HAMMER_TRANSF_NEWINODE;
650 * Return a referenced inode only if it is in our inode cache.
652 * Dummy inodes do not count.
654 struct hammer_inode *
655 hammer_find_inode(hammer_transaction_t trans, int64_t obj_id,
656 hammer_tid_t asof, u_int32_t localization)
658 hammer_mount_t hmp = trans->hmp;
659 struct hammer_inode_info iinfo;
660 struct hammer_inode *ip;
662 iinfo.obj_id = obj_id;
663 iinfo.obj_asof = asof;
664 iinfo.obj_localization = localization;
666 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
668 if (ip->flags & HAMMER_INODE_DUMMY)
671 hammer_ref(&ip->lock);
677 * Create a new filesystem object, returning the inode in *ipp. The
678 * returned inode will be referenced. The inode is created in-memory.
680 * If pfsm is non-NULL the caller wishes to create the root inode for
684 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
686 hammer_inode_t dip, const char *name, int namelen,
687 hammer_pseudofs_inmem_t pfsm, struct hammer_inode **ipp)
698 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
699 ++hammer_count_inodes;
701 trans->flags |= HAMMER_TRANSF_NEWINODE;
704 KKASSERT(pfsm->localization != 0);
705 ip->obj_id = HAMMER_OBJID_ROOT;
706 ip->obj_localization = pfsm->localization;
708 KKASSERT(dip != NULL);
709 namekey = hammer_directory_namekey(dip, name, namelen, &dummy);
710 ip->obj_id = hammer_alloc_objid(hmp, dip, namekey);
711 ip->obj_localization = dip->obj_localization;
714 KKASSERT(ip->obj_id != 0);
715 ip->obj_asof = hmp->asof;
717 ip->flush_state = HAMMER_FST_IDLE;
718 ip->flags = HAMMER_INODE_DDIRTY |
719 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME;
720 ip->cache[0].ip = ip;
721 ip->cache[1].ip = ip;
722 ip->cache[2].ip = ip;
723 ip->cache[3].ip = ip;
725 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
726 /* ip->save_trunc_off = 0; (already zero) */
727 RB_INIT(&ip->rec_tree);
728 TAILQ_INIT(&ip->target_list);
730 ip->ino_data.atime = trans->time;
731 ip->ino_data.mtime = trans->time;
732 ip->ino_data.size = 0;
733 ip->ino_data.nlinks = 0;
736 * A nohistory designator on the parent directory is inherited by
737 * the child. We will do this even for pseudo-fs creation... the
738 * sysad can turn it off.
741 ip->ino_data.uflags = dip->ino_data.uflags &
742 (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
745 ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
746 ip->ino_leaf.base.localization = ip->obj_localization +
747 HAMMER_LOCALIZE_INODE;
748 ip->ino_leaf.base.obj_id = ip->obj_id;
749 ip->ino_leaf.base.key = 0;
750 ip->ino_leaf.base.create_tid = 0;
751 ip->ino_leaf.base.delete_tid = 0;
752 ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
753 ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
755 ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
756 ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
757 ip->ino_data.mode = vap->va_mode;
758 ip->ino_data.ctime = trans->time;
761 * If we are running version 2 or greater directory entries are
762 * inode-localized instead of data-localized.
764 if (trans->hmp->version >= HAMMER_VOL_VERSION_TWO) {
765 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
766 ip->ino_data.cap_flags |=
767 HAMMER_INODE_CAP_DIR_LOCAL_INO;
772 * Setup the ".." pointer. This only needs to be done for directories
773 * but we do it for all objects as a recovery aid.
776 ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id;
779 * The parent_obj_localization field only applies to pseudo-fs roots.
780 * XXX this is no longer applicable, PFSs are no longer directly
781 * tied into the parent's directory structure.
783 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY &&
784 ip->obj_id == HAMMER_OBJID_ROOT) {
785 ip->ino_data.ext.obj.parent_obj_localization =
786 dip->obj_localization;
790 switch(ip->ino_leaf.base.obj_type) {
791 case HAMMER_OBJTYPE_CDEV:
792 case HAMMER_OBJTYPE_BDEV:
793 ip->ino_data.rmajor = vap->va_rmajor;
794 ip->ino_data.rminor = vap->va_rminor;
801 * Calculate default uid/gid and overwrite with information from
805 xuid = hammer_to_unix_xid(&dip->ino_data.uid);
806 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode,
807 xuid, cred, &vap->va_mode);
811 ip->ino_data.mode = vap->va_mode;
813 if (vap->va_vaflags & VA_UID_UUID_VALID)
814 ip->ino_data.uid = vap->va_uid_uuid;
815 else if (vap->va_uid != (uid_t)VNOVAL)
816 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
818 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
820 if (vap->va_vaflags & VA_GID_UUID_VALID)
821 ip->ino_data.gid = vap->va_gid_uuid;
822 else if (vap->va_gid != (gid_t)VNOVAL)
823 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
825 ip->ino_data.gid = dip->ino_data.gid;
827 hammer_ref(&ip->lock);
831 hammer_ref(&pfsm->lock);
833 } else if (dip->obj_localization == ip->obj_localization) {
834 ip->pfsm = dip->pfsm;
835 hammer_ref(&ip->pfsm->lock);
838 ip->pfsm = hammer_load_pseudofs(trans,
839 ip->obj_localization,
841 error = 0; /* ignore ENOENT */
845 hammer_free_inode(ip);
847 } else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
848 panic("hammer_create_inode: duplicate obj_id %llx",
849 (long long)ip->obj_id);
851 hammer_free_inode(ip);
858 * Final cleanup / freeing of an inode structure
861 hammer_free_inode(hammer_inode_t ip)
863 struct hammer_mount *hmp;
866 KKASSERT(ip->lock.refs == 1);
867 hammer_uncache_node(&ip->cache[0]);
868 hammer_uncache_node(&ip->cache[1]);
869 hammer_uncache_node(&ip->cache[2]);
870 hammer_uncache_node(&ip->cache[3]);
871 hammer_inode_wakereclaims(ip, 1);
873 hammer_clear_objid(ip);
874 --hammer_count_inodes;
877 hammer_rel_pseudofs(hmp, ip->pfsm);
880 kfree(ip, hmp->m_inodes);
885 * Retrieve pseudo-fs data. NULL will never be returned.
887 * If an error occurs *errorp will be set and a default template is returned,
888 * otherwise *errorp is set to 0. Typically when an error occurs it will
891 hammer_pseudofs_inmem_t
892 hammer_load_pseudofs(hammer_transaction_t trans,
893 u_int32_t localization, int *errorp)
895 hammer_mount_t hmp = trans->hmp;
897 hammer_pseudofs_inmem_t pfsm;
898 struct hammer_cursor cursor;
902 pfsm = RB_LOOKUP(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, localization);
904 hammer_ref(&pfsm->lock);
910 * PFS records are stored in the root inode (not the PFS root inode,
911 * but the real root). Avoid an infinite recursion if loading
912 * the PFS for the real root.
915 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT,
917 HAMMER_DEF_LOCALIZATION, 0, errorp);
922 pfsm = kmalloc(sizeof(*pfsm), hmp->m_misc, M_WAITOK | M_ZERO);
923 pfsm->localization = localization;
924 pfsm->pfsd.unique_uuid = trans->rootvol->ondisk->vol_fsid;
925 pfsm->pfsd.shared_uuid = pfsm->pfsd.unique_uuid;
927 hammer_init_cursor(trans, &cursor, (ip ? &ip->cache[1] : NULL), ip);
928 cursor.key_beg.localization = HAMMER_DEF_LOCALIZATION +
929 HAMMER_LOCALIZE_MISC;
930 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
931 cursor.key_beg.create_tid = 0;
932 cursor.key_beg.delete_tid = 0;
933 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
934 cursor.key_beg.obj_type = 0;
935 cursor.key_beg.key = localization;
936 cursor.asof = HAMMER_MAX_TID;
937 cursor.flags |= HAMMER_CURSOR_ASOF;
940 *errorp = hammer_ip_lookup(&cursor);
942 *errorp = hammer_btree_lookup(&cursor);
944 *errorp = hammer_ip_resolve_data(&cursor);
946 if (cursor.data->pfsd.mirror_flags &
947 HAMMER_PFSD_DELETED) {
950 bytes = cursor.leaf->data_len;
951 if (bytes > sizeof(pfsm->pfsd))
952 bytes = sizeof(pfsm->pfsd);
953 bcopy(cursor.data, &pfsm->pfsd, bytes);
957 hammer_done_cursor(&cursor);
959 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
960 hammer_ref(&pfsm->lock);
962 hammer_rel_inode(ip, 0);
963 if (RB_INSERT(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm)) {
964 kfree(pfsm, hmp->m_misc);
971 * Store pseudo-fs data. The backend will automatically delete any prior
972 * on-disk pseudo-fs data but we have to delete in-memory versions.
975 hammer_save_pseudofs(hammer_transaction_t trans, hammer_pseudofs_inmem_t pfsm)
977 struct hammer_cursor cursor;
978 hammer_record_t record;
982 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
983 HAMMER_DEF_LOCALIZATION, 0, &error);
985 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
986 hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
987 cursor.key_beg.localization = ip->obj_localization +
988 HAMMER_LOCALIZE_MISC;
989 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
990 cursor.key_beg.create_tid = 0;
991 cursor.key_beg.delete_tid = 0;
992 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
993 cursor.key_beg.obj_type = 0;
994 cursor.key_beg.key = pfsm->localization;
995 cursor.asof = HAMMER_MAX_TID;
996 cursor.flags |= HAMMER_CURSOR_ASOF;
999 * Replace any in-memory version of the record.
1001 error = hammer_ip_lookup(&cursor);
1002 if (error == 0 && hammer_cursor_inmem(&cursor)) {
1003 record = cursor.iprec;
1004 if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
1005 KKASSERT(cursor.deadlk_rec == NULL);
1006 hammer_ref(&record->lock);
1007 cursor.deadlk_rec = record;
1010 record->flags |= HAMMER_RECF_DELETED_FE;
1016 * Allocate replacement general record. The backend flush will
1017 * delete any on-disk version of the record.
1019 if (error == 0 || error == ENOENT) {
1020 record = hammer_alloc_mem_record(ip, sizeof(pfsm->pfsd));
1021 record->type = HAMMER_MEM_RECORD_GENERAL;
1023 record->leaf.base.localization = ip->obj_localization +
1024 HAMMER_LOCALIZE_MISC;
1025 record->leaf.base.rec_type = HAMMER_RECTYPE_PFS;
1026 record->leaf.base.key = pfsm->localization;
1027 record->leaf.data_len = sizeof(pfsm->pfsd);
1028 bcopy(&pfsm->pfsd, record->data, sizeof(pfsm->pfsd));
1029 error = hammer_ip_add_record(trans, record);
1031 hammer_done_cursor(&cursor);
1032 if (error == EDEADLK)
1034 hammer_rel_inode(ip, 0);
1039 * Create a root directory for a PFS if one does not alredy exist.
1041 * The PFS root stands alone so we must also bump the nlinks count
1042 * to prevent it from being destroyed on release.
1045 hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred,
1046 hammer_pseudofs_inmem_t pfsm)
1052 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
1053 pfsm->localization, 0, &error);
1058 error = hammer_create_inode(trans, &vap, cred,
1062 ++ip->ino_data.nlinks;
1063 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
1067 hammer_rel_inode(ip, 0);
1072 * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY
1073 * if we are unable to disassociate all the inodes.
1077 hammer_unload_pseudofs_callback(hammer_inode_t ip, void *data)
1081 hammer_ref(&ip->lock);
1082 if (ip->lock.refs == 2 && ip->vp)
1083 vclean_unlocked(ip->vp);
1084 if (ip->lock.refs == 1 && ip->vp == NULL)
1087 res = -1; /* stop, someone is using the inode */
1088 hammer_rel_inode(ip, 0);
1093 hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization)
1098 for (try = res = 0; try < 4; ++try) {
1099 res = hammer_ino_rb_tree_RB_SCAN(&trans->hmp->rb_inos_root,
1100 hammer_inode_pfs_cmp,
1101 hammer_unload_pseudofs_callback,
1103 if (res == 0 && try > 1)
1105 hammer_flusher_sync(trans->hmp);
1114 * Release a reference on a PFS
1117 hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm)
1119 hammer_unref(&pfsm->lock);
1120 if (pfsm->lock.refs == 0) {
1121 RB_REMOVE(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm);
1122 kfree(pfsm, hmp->m_misc);
1127 * Called by hammer_sync_inode().
1130 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
1132 hammer_transaction_t trans = cursor->trans;
1133 hammer_record_t record;
1141 * If the inode has a presence on-disk then locate it and mark
1142 * it deleted, setting DELONDISK.
1144 * The record may or may not be physically deleted, depending on
1145 * the retention policy.
1147 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
1148 HAMMER_INODE_ONDISK) {
1149 hammer_normalize_cursor(cursor);
1150 cursor->key_beg.localization = ip->obj_localization +
1151 HAMMER_LOCALIZE_INODE;
1152 cursor->key_beg.obj_id = ip->obj_id;
1153 cursor->key_beg.key = 0;
1154 cursor->key_beg.create_tid = 0;
1155 cursor->key_beg.delete_tid = 0;
1156 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1157 cursor->key_beg.obj_type = 0;
1158 cursor->asof = ip->obj_asof;
1159 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1160 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
1161 cursor->flags |= HAMMER_CURSOR_BACKEND;
1163 error = hammer_btree_lookup(cursor);
1164 if (hammer_debug_inode)
1165 kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
1168 error = hammer_ip_delete_record(cursor, ip, trans->tid);
1169 if (hammer_debug_inode)
1170 kprintf(" error %d\n", error);
1172 ip->flags |= HAMMER_INODE_DELONDISK;
1175 hammer_cache_node(&ip->cache[0], cursor->node);
1177 if (error == EDEADLK) {
1178 hammer_done_cursor(cursor);
1179 error = hammer_init_cursor(trans, cursor,
1181 if (hammer_debug_inode)
1182 kprintf("IPDED %p %d\n", ip, error);
1189 * Ok, write out the initial record or a new record (after deleting
1190 * the old one), unless the DELETED flag is set. This routine will
1191 * clear DELONDISK if it writes out a record.
1193 * Update our inode statistics if this is the first application of
1194 * the inode on-disk.
1196 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
1198 * Generate a record and write it to the media. We clean-up
1199 * the state before releasing so we do not have to set-up
1202 record = hammer_alloc_mem_record(ip, 0);
1203 record->type = HAMMER_MEM_RECORD_INODE;
1204 record->flush_state = HAMMER_FST_FLUSH;
1205 record->leaf = ip->sync_ino_leaf;
1206 record->leaf.base.create_tid = trans->tid;
1207 record->leaf.data_len = sizeof(ip->sync_ino_data);
1208 record->leaf.create_ts = trans->time32;
1209 record->data = (void *)&ip->sync_ino_data;
1210 record->flags |= HAMMER_RECF_INTERLOCK_BE;
1213 * If this flag is set we cannot sync the new file size
1214 * because we haven't finished related truncations. The
1215 * inode will be flushed in another flush group to finish
1218 if ((ip->flags & HAMMER_INODE_WOULDBLOCK) &&
1219 ip->sync_ino_data.size != ip->ino_data.size) {
1221 ip->sync_ino_data.size = ip->ino_data.size;
1227 error = hammer_ip_sync_record_cursor(cursor, record);
1228 if (hammer_debug_inode)
1229 kprintf("GENREC %p rec %08x %d\n",
1230 ip, record->flags, error);
1231 if (error != EDEADLK)
1233 hammer_done_cursor(cursor);
1234 error = hammer_init_cursor(trans, cursor,
1236 if (hammer_debug_inode)
1237 kprintf("GENREC reinit %d\n", error);
1243 * Note: The record was never on the inode's record tree
1244 * so just wave our hands importantly and destroy it.
1246 record->flags |= HAMMER_RECF_COMMITTED;
1247 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
1248 record->flush_state = HAMMER_FST_IDLE;
1249 ++ip->rec_generation;
1250 hammer_rel_mem_record(record);
1256 if (hammer_debug_inode)
1257 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
1258 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1259 HAMMER_INODE_ATIME |
1260 HAMMER_INODE_MTIME);
1261 ip->flags &= ~HAMMER_INODE_DELONDISK;
1263 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1266 * Root volume count of inodes
1268 hammer_sync_lock_sh(trans);
1269 if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
1270 hammer_modify_volume_field(trans,
1273 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1274 hammer_modify_volume_done(trans->rootvol);
1275 ip->flags |= HAMMER_INODE_ONDISK;
1276 if (hammer_debug_inode)
1277 kprintf("NOWONDISK %p\n", ip);
1279 hammer_sync_unlock(trans);
1284 * If the inode has been destroyed, clean out any left-over flags
1285 * that may have been set by the frontend.
1287 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) {
1288 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1289 HAMMER_INODE_ATIME |
1290 HAMMER_INODE_MTIME);
1296 * Update only the itimes fields.
1298 * ATIME can be updated without generating any UNDO. MTIME is updated
1299 * with UNDO so it is guaranteed to be synchronized properly in case of
1302 * Neither field is included in the B-Tree leaf element's CRC, which is how
1303 * we can get away with updating ATIME the way we do.
1306 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
1308 hammer_transaction_t trans = cursor->trans;
1312 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) !=
1313 HAMMER_INODE_ONDISK) {
1317 hammer_normalize_cursor(cursor);
1318 cursor->key_beg.localization = ip->obj_localization +
1319 HAMMER_LOCALIZE_INODE;
1320 cursor->key_beg.obj_id = ip->obj_id;
1321 cursor->key_beg.key = 0;
1322 cursor->key_beg.create_tid = 0;
1323 cursor->key_beg.delete_tid = 0;
1324 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1325 cursor->key_beg.obj_type = 0;
1326 cursor->asof = ip->obj_asof;
1327 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1328 cursor->flags |= HAMMER_CURSOR_ASOF;
1329 cursor->flags |= HAMMER_CURSOR_GET_LEAF;
1330 cursor->flags |= HAMMER_CURSOR_GET_DATA;
1331 cursor->flags |= HAMMER_CURSOR_BACKEND;
1333 error = hammer_btree_lookup(cursor);
1335 hammer_cache_node(&ip->cache[0], cursor->node);
1336 if (ip->sync_flags & HAMMER_INODE_MTIME) {
1338 * Updating MTIME requires an UNDO. Just cover
1339 * both atime and mtime.
1341 hammer_sync_lock_sh(trans);
1342 hammer_modify_buffer(trans, cursor->data_buffer,
1343 HAMMER_ITIMES_BASE(&cursor->data->inode),
1344 HAMMER_ITIMES_BYTES);
1345 cursor->data->inode.atime = ip->sync_ino_data.atime;
1346 cursor->data->inode.mtime = ip->sync_ino_data.mtime;
1347 hammer_modify_buffer_done(cursor->data_buffer);
1348 hammer_sync_unlock(trans);
1349 } else if (ip->sync_flags & HAMMER_INODE_ATIME) {
1351 * Updating atime only can be done in-place with
1354 hammer_sync_lock_sh(trans);
1355 hammer_modify_buffer(trans, cursor->data_buffer,
1357 cursor->data->inode.atime = ip->sync_ino_data.atime;
1358 hammer_modify_buffer_done(cursor->data_buffer);
1359 hammer_sync_unlock(trans);
1361 ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME);
1363 if (error == EDEADLK) {
1364 hammer_done_cursor(cursor);
1365 error = hammer_init_cursor(trans, cursor,
1374 * Release a reference on an inode, flush as requested.
1376 * On the last reference we queue the inode to the flusher for its final
1380 hammer_rel_inode(struct hammer_inode *ip, int flush)
1382 /*hammer_mount_t hmp = ip->hmp;*/
1385 * Handle disposition when dropping the last ref.
1388 if (ip->lock.refs == 1) {
1390 * Determine whether on-disk action is needed for
1391 * the inode's final disposition.
1393 KKASSERT(ip->vp == NULL);
1394 hammer_inode_unloadable_check(ip, 0);
1395 if (ip->flags & HAMMER_INODE_MODMASK) {
1396 hammer_flush_inode(ip, 0);
1397 } else if (ip->lock.refs == 1) {
1398 hammer_unload_inode(ip);
1403 hammer_flush_inode(ip, 0);
1406 * The inode still has multiple refs, try to drop
1409 KKASSERT(ip->lock.refs >= 1);
1410 if (ip->lock.refs > 1) {
1411 hammer_unref(&ip->lock);
1419 * Unload and destroy the specified inode. Must be called with one remaining
1420 * reference. The reference is disposed of.
1422 * The inode must be completely clean.
1425 hammer_unload_inode(struct hammer_inode *ip)
1427 hammer_mount_t hmp = ip->hmp;
1429 KASSERT(ip->lock.refs == 1,
1430 ("hammer_unload_inode: %d refs\n", ip->lock.refs));
1431 KKASSERT(ip->vp == NULL);
1432 KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
1433 KKASSERT(ip->cursor_ip_refs == 0);
1434 KKASSERT(hammer_notlocked(&ip->lock));
1435 KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
1437 KKASSERT(RB_EMPTY(&ip->rec_tree));
1438 KKASSERT(TAILQ_EMPTY(&ip->target_list));
1440 RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip);
1442 hammer_free_inode(ip);
1447 * Called during unmounting if a critical error occured. The in-memory
1448 * inode and all related structures are destroyed.
1450 * If a critical error did not occur the unmount code calls the standard
1451 * release and asserts that the inode is gone.
1454 hammer_destroy_inode_callback(struct hammer_inode *ip, void *data __unused)
1456 hammer_record_t rec;
1459 * Get rid of the inodes in-memory records, regardless of their
1460 * state, and clear the mod-mask.
1462 while ((rec = TAILQ_FIRST(&ip->target_list)) != NULL) {
1463 TAILQ_REMOVE(&ip->target_list, rec, target_entry);
1464 rec->target_ip = NULL;
1465 if (rec->flush_state == HAMMER_FST_SETUP)
1466 rec->flush_state = HAMMER_FST_IDLE;
1468 while ((rec = RB_ROOT(&ip->rec_tree)) != NULL) {
1469 if (rec->flush_state == HAMMER_FST_FLUSH)
1470 --rec->flush_group->refs;
1472 hammer_ref(&rec->lock);
1473 KKASSERT(rec->lock.refs == 1);
1474 rec->flush_state = HAMMER_FST_IDLE;
1475 rec->flush_group = NULL;
1476 rec->flags |= HAMMER_RECF_DELETED_FE; /* wave hands */
1477 rec->flags |= HAMMER_RECF_DELETED_BE; /* wave hands */
1478 ++ip->rec_generation;
1479 hammer_rel_mem_record(rec);
1481 ip->flags &= ~HAMMER_INODE_MODMASK;
1482 ip->sync_flags &= ~HAMMER_INODE_MODMASK;
1483 KKASSERT(ip->vp == NULL);
1486 * Remove the inode from any flush group, force it idle. FLUSH
1487 * and SETUP states have an inode ref.
1489 switch(ip->flush_state) {
1490 case HAMMER_FST_FLUSH:
1491 TAILQ_REMOVE(&ip->flush_group->flush_list, ip, flush_entry);
1492 --ip->flush_group->refs;
1493 ip->flush_group = NULL;
1495 case HAMMER_FST_SETUP:
1496 hammer_unref(&ip->lock);
1497 ip->flush_state = HAMMER_FST_IDLE;
1499 case HAMMER_FST_IDLE:
1504 * There shouldn't be any associated vnode. The unload needs at
1505 * least one ref, if we do have a vp steal its ip ref.
1508 kprintf("hammer_destroy_inode_callback: Unexpected "
1509 "vnode association ip %p vp %p\n", ip, ip->vp);
1510 ip->vp->v_data = NULL;
1513 hammer_ref(&ip->lock);
1515 hammer_unload_inode(ip);
1520 * Called on mount -u when switching from RW to RO or vise-versa. Adjust
1521 * the read-only flag for cached inodes.
1523 * This routine is called from a RB_SCAN().
1526 hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
1528 hammer_mount_t hmp = ip->hmp;
1530 if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
1531 ip->flags |= HAMMER_INODE_RO;
1533 ip->flags &= ~HAMMER_INODE_RO;
1538 * A transaction has modified an inode, requiring updates as specified by
1541 * HAMMER_INODE_DDIRTY: Inode data has been updated
1542 * HAMMER_INODE_XDIRTY: Dirty in-memory records
1543 * HAMMER_INODE_BUFS: Dirty buffer cache buffers
1544 * HAMMER_INODE_DELETED: Inode record/data must be deleted
1545 * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
1548 hammer_modify_inode(hammer_inode_t ip, int flags)
1551 * ronly of 0 or 2 does not trigger assertion.
1552 * 2 is a special error state
1554 KKASSERT(ip->hmp->ronly != 1 ||
1555 (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
1556 HAMMER_INODE_BUFS | HAMMER_INODE_DELETED |
1557 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0);
1558 if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
1559 ip->flags |= HAMMER_INODE_RSV_INODES;
1560 ++ip->hmp->rsv_inodes;
1567 * Request that an inode be flushed. This whole mess cannot block and may
1568 * recurse (if not synchronous). Once requested HAMMER will attempt to
1569 * actively flush the inode until the flush can be done.
1571 * The inode may already be flushing, or may be in a setup state. We can
1572 * place the inode in a flushing state if it is currently idle and flag it
1573 * to reflush if it is currently flushing.
1575 * Upon return if the inode could not be flushed due to a setup
1576 * dependancy, then it will be automatically flushed when the dependancy
1580 hammer_flush_inode(hammer_inode_t ip, int flags)
1583 hammer_flush_group_t flg;
1587 * next_flush_group is the first flush group we can place the inode
1588 * in. It may be NULL. If it becomes full we append a new flush
1589 * group and make that the next_flush_group.
1592 while ((flg = hmp->next_flush_group) != NULL) {
1593 KKASSERT(flg->running == 0);
1594 if (flg->total_count + flg->refs <= ip->hmp->undo_rec_limit)
1596 hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
1597 hammer_flusher_async(ip->hmp, flg);
1600 flg = kmalloc(sizeof(*flg), hmp->m_misc, M_WAITOK|M_ZERO);
1601 hmp->next_flush_group = flg;
1602 TAILQ_INIT(&flg->flush_list);
1603 TAILQ_INSERT_TAIL(&hmp->flush_group_list, flg, flush_entry);
1607 * Trivial 'nothing to flush' case. If the inode is in a SETUP
1608 * state we have to put it back into an IDLE state so we can
1609 * drop the extra ref.
1611 * If we have a parent dependancy we must still fall through
1614 if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
1615 if (ip->flush_state == HAMMER_FST_SETUP &&
1616 TAILQ_EMPTY(&ip->target_list)) {
1617 ip->flush_state = HAMMER_FST_IDLE;
1618 hammer_rel_inode(ip, 0);
1620 if (ip->flush_state == HAMMER_FST_IDLE)
1625 * Our flush action will depend on the current state.
1627 switch(ip->flush_state) {
1628 case HAMMER_FST_IDLE:
1630 * We have no dependancies and can flush immediately. Some
1631 * our children may not be flushable so we have to re-test
1632 * with that additional knowledge.
1634 hammer_flush_inode_core(ip, flg, flags);
1636 case HAMMER_FST_SETUP:
1638 * Recurse upwards through dependancies via target_list
1639 * and start their flusher actions going if possible.
1641 * 'good' is our connectivity. -1 means we have none and
1642 * can't flush, 0 means there weren't any dependancies, and
1643 * 1 means we have good connectivity.
1645 good = hammer_setup_parent_inodes(ip, 0, flg);
1649 * We can continue if good >= 0. Determine how
1650 * many records under our inode can be flushed (and
1653 hammer_flush_inode_core(ip, flg, flags);
1656 * Parent has no connectivity, tell it to flush
1657 * us as soon as it does.
1659 * The REFLUSH flag is also needed to trigger
1660 * dependancy wakeups.
1662 ip->flags |= HAMMER_INODE_CONN_DOWN |
1663 HAMMER_INODE_REFLUSH;
1664 if (flags & HAMMER_FLUSH_SIGNAL) {
1665 ip->flags |= HAMMER_INODE_RESIGNAL;
1666 hammer_flusher_async(ip->hmp, flg);
1670 case HAMMER_FST_FLUSH:
1672 * We are already flushing, flag the inode to reflush
1673 * if needed after it completes its current flush.
1675 * The REFLUSH flag is also needed to trigger
1676 * dependancy wakeups.
1678 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
1679 ip->flags |= HAMMER_INODE_REFLUSH;
1680 if (flags & HAMMER_FLUSH_SIGNAL) {
1681 ip->flags |= HAMMER_INODE_RESIGNAL;
1682 hammer_flusher_async(ip->hmp, flg);
1689 * Scan ip->target_list, which is a list of records owned by PARENTS to our
1690 * ip which reference our ip.
1692 * XXX This is a huge mess of recursive code, but not one bit of it blocks
1693 * so for now do not ref/deref the structures. Note that if we use the
1694 * ref/rel code later, the rel CAN block.
1697 hammer_setup_parent_inodes(hammer_inode_t ip, int depth,
1698 hammer_flush_group_t flg)
1700 hammer_record_t depend;
1705 * If we hit our recursion limit and we have parent dependencies
1706 * We cannot continue. Returning < 0 will cause us to be flagged
1707 * for reflush. Returning -2 cuts off additional dependency checks
1708 * because they are likely to also hit the depth limit.
1710 * We cannot return < 0 if there are no dependencies or there might
1711 * not be anything to wakeup (ip).
1713 if (depth == 20 && TAILQ_FIRST(&ip->target_list)) {
1714 kprintf("HAMMER Warning: depth limit reached on "
1715 "setup recursion, inode %p %016llx\n",
1716 ip, (long long)ip->obj_id);
1724 TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
1725 r = hammer_setup_parent_inodes_helper(depend, depth, flg);
1726 KKASSERT(depend->target_ip == ip);
1727 if (r < 0 && good == 0)
1733 * If we failed due to the recursion depth limit then stop
1743 * This helper function takes a record representing the dependancy between
1744 * the parent inode and child inode.
1746 * record->ip = parent inode
1747 * record->target_ip = child inode
1749 * We are asked to recurse upwards and convert the record from SETUP
1750 * to FLUSH if possible.
1752 * Return 1 if the record gives us connectivity
1754 * Return 0 if the record is not relevant
1756 * Return -1 if we can't resolve the dependancy and there is no connectivity.
1759 hammer_setup_parent_inodes_helper(hammer_record_t record, int depth,
1760 hammer_flush_group_t flg)
1766 KKASSERT(record->flush_state != HAMMER_FST_IDLE);
1771 * If the record is already flushing, is it in our flush group?
1773 * If it is in our flush group but it is a general record or a
1774 * delete-on-disk, it does not improve our connectivity (return 0),
1775 * and if the target inode is not trying to destroy itself we can't
1776 * allow the operation yet anyway (the second return -1).
1778 if (record->flush_state == HAMMER_FST_FLUSH) {
1780 * If not in our flush group ask the parent to reflush
1781 * us as soon as possible.
1783 if (record->flush_group != flg) {
1784 pip->flags |= HAMMER_INODE_REFLUSH;
1785 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1790 * If in our flush group everything is already set up,
1791 * just return whether the record will improve our
1792 * visibility or not.
1794 if (record->type == HAMMER_MEM_RECORD_ADD)
1800 * It must be a setup record. Try to resolve the setup dependancies
1801 * by recursing upwards so we can place ip on the flush list.
1803 * Limit ourselves to 20 levels of recursion to avoid blowing out
1804 * the kernel stack. If we hit the recursion limit we can't flush
1805 * until the parent flushes. The parent will flush independantly
1806 * on its own and ultimately a deep recursion will be resolved.
1808 KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1810 good = hammer_setup_parent_inodes(pip, depth + 1, flg);
1813 * If good < 0 the parent has no connectivity and we cannot safely
1814 * flush the directory entry, which also means we can't flush our
1815 * ip. Flag us for downward recursion once the parent's
1816 * connectivity is resolved. Flag the parent for [re]flush or it
1817 * may not check for downward recursions.
1820 pip->flags |= HAMMER_INODE_REFLUSH;
1821 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1826 * We are go, place the parent inode in a flushing state so we can
1827 * place its record in a flushing state. Note that the parent
1828 * may already be flushing. The record must be in the same flush
1829 * group as the parent.
1831 if (pip->flush_state != HAMMER_FST_FLUSH)
1832 hammer_flush_inode_core(pip, flg, HAMMER_FLUSH_RECURSION);
1833 KKASSERT(pip->flush_state == HAMMER_FST_FLUSH);
1834 KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1837 if (record->type == HAMMER_MEM_RECORD_DEL &&
1838 (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
1840 * Regardless of flushing state we cannot sync this path if the
1841 * record represents a delete-on-disk but the target inode
1842 * is not ready to sync its own deletion.
1844 * XXX need to count effective nlinks to determine whether
1845 * the flush is ok, otherwise removing a hardlink will
1846 * just leave the DEL record to rot.
1848 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
1852 if (pip->flush_group == flg) {
1854 * Because we have not calculated nlinks yet we can just
1855 * set records to the flush state if the parent is in
1856 * the same flush group as we are.
1858 record->flush_state = HAMMER_FST_FLUSH;
1859 record->flush_group = flg;
1860 ++record->flush_group->refs;
1861 hammer_ref(&record->lock);
1864 * A general directory-add contributes to our visibility.
1866 * Otherwise it is probably a directory-delete or
1867 * delete-on-disk record and does not contribute to our
1868 * visbility (but we can still flush it).
1870 if (record->type == HAMMER_MEM_RECORD_ADD)
1875 * If the parent is not in our flush group we cannot
1876 * flush this record yet, there is no visibility.
1877 * We tell the parent to reflush and mark ourselves
1878 * so the parent knows it should flush us too.
1880 pip->flags |= HAMMER_INODE_REFLUSH;
1881 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1887 * This is the core routine placing an inode into the FST_FLUSH state.
1890 hammer_flush_inode_core(hammer_inode_t ip, hammer_flush_group_t flg, int flags)
1895 * Set flush state and prevent the flusher from cycling into
1896 * the next flush group. Do not place the ip on the list yet.
1897 * Inodes not in the idle state get an extra reference.
1899 KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
1900 if (ip->flush_state == HAMMER_FST_IDLE)
1901 hammer_ref(&ip->lock);
1902 ip->flush_state = HAMMER_FST_FLUSH;
1903 ip->flush_group = flg;
1904 ++ip->hmp->flusher.group_lock;
1905 ++ip->hmp->count_iqueued;
1906 ++hammer_count_iqueued;
1910 * If the flush group reaches the autoflush limit we want to signal
1911 * the flusher. This is particularly important for remove()s.
1913 if (flg->total_count == hammer_autoflush)
1914 flags |= HAMMER_FLUSH_SIGNAL;
1917 * We need to be able to vfsync/truncate from the backend.
1919 KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
1920 if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
1921 ip->flags |= HAMMER_INODE_VHELD;
1926 * Figure out how many in-memory records we can actually flush
1927 * (not including inode meta-data, buffers, etc).
1929 KKASSERT((ip->flags & HAMMER_INODE_WOULDBLOCK) == 0);
1930 if (flags & HAMMER_FLUSH_RECURSION) {
1932 * If this is a upwards recursion we do not want to
1933 * recurse down again!
1937 } else if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
1939 * No new records are added if we must complete a flush
1940 * from a previous cycle, but we do have to move the records
1941 * from the previous cycle to the current one.
1944 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1945 hammer_syncgrp_child_callback, NULL);
1951 * Normal flush, scan records and bring them into the flush.
1952 * Directory adds and deletes are usually skipped (they are
1953 * grouped with the related inode rather then with the
1956 * go_count can be negative, which means the scan aborted
1957 * due to the flush group being over-full and we should
1958 * flush what we have.
1960 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1961 hammer_setup_child_callback, NULL);
1965 * This is a more involved test that includes go_count. If we
1966 * can't flush, flag the inode and return. If go_count is 0 we
1967 * were are unable to flush any records in our rec_tree and
1968 * must ignore the XDIRTY flag.
1970 if (go_count == 0) {
1971 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
1972 --ip->hmp->count_iqueued;
1973 --hammer_count_iqueued;
1976 ip->flush_state = HAMMER_FST_SETUP;
1977 ip->flush_group = NULL;
1978 if (ip->flags & HAMMER_INODE_VHELD) {
1979 ip->flags &= ~HAMMER_INODE_VHELD;
1984 * REFLUSH is needed to trigger dependancy wakeups
1985 * when an inode is in SETUP.
1987 ip->flags |= HAMMER_INODE_REFLUSH;
1988 if (flags & HAMMER_FLUSH_SIGNAL) {
1989 ip->flags |= HAMMER_INODE_RESIGNAL;
1990 hammer_flusher_async(ip->hmp, flg);
1992 if (--ip->hmp->flusher.group_lock == 0)
1993 wakeup(&ip->hmp->flusher.group_lock);
1999 * Snapshot the state of the inode for the backend flusher.
2001 * We continue to retain save_trunc_off even when all truncations
2002 * have been resolved as an optimization to determine if we can
2003 * skip the B-Tree lookup for overwrite deletions.
2005 * NOTE: The DELETING flag is a mod flag, but it is also sticky,
2006 * and stays in ip->flags. Once set, it stays set until the
2007 * inode is destroyed.
2009 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2010 KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0);
2011 ip->sync_trunc_off = ip->trunc_off;
2012 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
2013 ip->flags &= ~HAMMER_INODE_TRUNCATED;
2014 ip->sync_flags |= HAMMER_INODE_TRUNCATED;
2017 * The save_trunc_off used to cache whether the B-Tree
2018 * holds any records past that point is not used until
2019 * after the truncation has succeeded, so we can safely
2022 if (ip->save_trunc_off > ip->sync_trunc_off)
2023 ip->save_trunc_off = ip->sync_trunc_off;
2025 ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK &
2026 ~HAMMER_INODE_TRUNCATED);
2027 ip->sync_ino_leaf = ip->ino_leaf;
2028 ip->sync_ino_data = ip->ino_data;
2029 ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
2030 #ifdef DEBUG_TRUNCATE
2031 if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp)
2032 kprintf("truncateS %016llx\n", ip->sync_trunc_off);
2036 * The flusher list inherits our inode and reference.
2038 KKASSERT(flg->running == 0);
2039 TAILQ_INSERT_TAIL(&flg->flush_list, ip, flush_entry);
2040 if (--ip->hmp->flusher.group_lock == 0)
2041 wakeup(&ip->hmp->flusher.group_lock);
2043 if (flags & HAMMER_FLUSH_SIGNAL) {
2044 hammer_flusher_async(ip->hmp, flg);
2049 * Callback for scan of ip->rec_tree. Try to include each record in our
2050 * flush. ip->flush_group has been set but the inode has not yet been
2051 * moved into a flushing state.
2053 * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
2056 * We return 1 for any record placed or found in FST_FLUSH, which prevents
2057 * the caller from shortcutting the flush.
2060 hammer_setup_child_callback(hammer_record_t rec, void *data)
2062 hammer_flush_group_t flg;
2063 hammer_inode_t target_ip;
2068 * Records deleted or committed by the backend are ignored.
2069 * Note that the flush detects deleted frontend records at
2070 * multiple points to deal with races. This is just the first
2071 * line of defense. The only time HAMMER_RECF_DELETED_FE cannot
2072 * be set is when HAMMER_RECF_INTERLOCK_BE is set, because it
2073 * messes up link-count calculations.
2075 * NOTE: Don't get confused between record deletion and, say,
2076 * directory entry deletion. The deletion of a directory entry
2077 * which is on-media has nothing to do with the record deletion
2080 if (rec->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
2081 HAMMER_RECF_COMMITTED)) {
2082 if (rec->flush_state == HAMMER_FST_FLUSH) {
2083 KKASSERT(rec->flush_group == rec->ip->flush_group);
2092 * If the record is in an idle state it has no dependancies and
2096 flg = ip->flush_group;
2099 switch(rec->flush_state) {
2100 case HAMMER_FST_IDLE:
2102 * The record has no setup dependancy, we can flush it.
2104 KKASSERT(rec->target_ip == NULL);
2105 rec->flush_state = HAMMER_FST_FLUSH;
2106 rec->flush_group = flg;
2108 hammer_ref(&rec->lock);
2111 case HAMMER_FST_SETUP:
2113 * The record has a setup dependancy. These are typically
2114 * directory entry adds and deletes. Such entries will be
2115 * flushed when their inodes are flushed so we do not
2116 * usually have to add them to the flush here. However,
2117 * if the target_ip has set HAMMER_INODE_CONN_DOWN then
2118 * it is asking us to flush this record (and it).
2120 target_ip = rec->target_ip;
2121 KKASSERT(target_ip != NULL);
2122 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
2125 * If the target IP is already flushing in our group
2126 * we could associate the record, but target_ip has
2127 * already synced ino_data to sync_ino_data and we
2128 * would also have to adjust nlinks. Plus there are
2129 * ordering issues for adds and deletes.
2131 * Reflush downward if this is an ADD, and upward if
2134 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
2135 if (rec->flush_state == HAMMER_MEM_RECORD_ADD)
2136 ip->flags |= HAMMER_INODE_REFLUSH;
2138 target_ip->flags |= HAMMER_INODE_REFLUSH;
2143 * Target IP is not yet flushing. This can get complex
2144 * because we have to be careful about the recursion.
2146 * Directories create an issue for us in that if a flush
2147 * of a directory is requested the expectation is to flush
2148 * any pending directory entries, but this will cause the
2149 * related inodes to recursively flush as well. We can't
2150 * really defer the operation so just get as many as we
2154 if ((target_ip->flags & HAMMER_INODE_RECLAIM) == 0 &&
2155 (target_ip->flags & HAMMER_INODE_CONN_DOWN) == 0) {
2157 * We aren't reclaiming and the target ip was not
2158 * previously prevented from flushing due to this
2159 * record dependancy. Do not flush this record.
2164 if (flg->total_count + flg->refs >
2165 ip->hmp->undo_rec_limit) {
2167 * Our flush group is over-full and we risk blowing
2168 * out the UNDO FIFO. Stop the scan, flush what we
2169 * have, then reflush the directory.
2171 * The directory may be forced through multiple
2172 * flush groups before it can be completely
2175 ip->flags |= HAMMER_INODE_RESIGNAL |
2176 HAMMER_INODE_REFLUSH;
2178 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
2180 * If the target IP is not flushing we can force
2181 * it to flush, even if it is unable to write out
2182 * any of its own records we have at least one in
2183 * hand that we CAN deal with.
2185 rec->flush_state = HAMMER_FST_FLUSH;
2186 rec->flush_group = flg;
2188 hammer_ref(&rec->lock);
2189 hammer_flush_inode_core(target_ip, flg,
2190 HAMMER_FLUSH_RECURSION);
2194 * General or delete-on-disk record.
2196 * XXX this needs help. If a delete-on-disk we could
2197 * disconnect the target. If the target has its own
2198 * dependancies they really need to be flushed.
2202 rec->flush_state = HAMMER_FST_FLUSH;
2203 rec->flush_group = flg;
2205 hammer_ref(&rec->lock);
2206 hammer_flush_inode_core(target_ip, flg,
2207 HAMMER_FLUSH_RECURSION);
2211 case HAMMER_FST_FLUSH:
2213 * The flush_group should already match.
2215 KKASSERT(rec->flush_group == flg);
2224 * This version just moves records already in a flush state to the new
2225 * flush group and that is it.
2228 hammer_syncgrp_child_callback(hammer_record_t rec, void *data)
2230 hammer_inode_t ip = rec->ip;
2232 switch(rec->flush_state) {
2233 case HAMMER_FST_FLUSH:
2234 KKASSERT(rec->flush_group == ip->flush_group);
2244 * Wait for a previously queued flush to complete.
2246 * If a critical error occured we don't try to wait.
2249 hammer_wait_inode(hammer_inode_t ip)
2251 hammer_flush_group_t flg;
2254 if ((ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) {
2255 while (ip->flush_state != HAMMER_FST_IDLE &&
2256 (ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) {
2257 if (ip->flush_state == HAMMER_FST_SETUP)
2258 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2259 if (ip->flush_state != HAMMER_FST_IDLE) {
2260 ip->flags |= HAMMER_INODE_FLUSHW;
2261 tsleep(&ip->flags, 0, "hmrwin", 0);
2268 * Called by the backend code when a flush has been completed.
2269 * The inode has already been removed from the flush list.
2271 * A pipelined flush can occur, in which case we must re-enter the
2272 * inode on the list and re-copy its fields.
2275 hammer_flush_inode_done(hammer_inode_t ip, int error)
2280 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
2285 * Auto-reflush if the backend could not completely flush
2286 * the inode. This fixes a case where a deferred buffer flush
2287 * could cause fsync to return early.
2289 if (ip->sync_flags & HAMMER_INODE_MODMASK)
2290 ip->flags |= HAMMER_INODE_REFLUSH;
2293 * Merge left-over flags back into the frontend and fix the state.
2294 * Incomplete truncations are retained by the backend.
2297 ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED;
2298 ip->sync_flags &= HAMMER_INODE_TRUNCATED;
2301 * The backend may have adjusted nlinks, so if the adjusted nlinks
2302 * does not match the fronttend set the frontend's RDIRTY flag again.
2304 if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
2305 ip->flags |= HAMMER_INODE_DDIRTY;
2308 * Fix up the dirty buffer status.
2310 if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) {
2311 ip->flags |= HAMMER_INODE_BUFS;
2315 * Re-set the XDIRTY flag if some of the inode's in-memory records
2316 * could not be flushed.
2318 KKASSERT((RB_EMPTY(&ip->rec_tree) &&
2319 (ip->flags & HAMMER_INODE_XDIRTY) == 0) ||
2320 (!RB_EMPTY(&ip->rec_tree) &&
2321 (ip->flags & HAMMER_INODE_XDIRTY) != 0));
2324 * Do not lose track of inodes which no longer have vnode
2325 * assocations, otherwise they may never get flushed again.
2327 * The reflush flag can be set superfluously, causing extra pain
2328 * for no reason. If the inode is no longer modified it no longer
2329 * needs to be flushed.
2331 if (ip->flags & HAMMER_INODE_MODMASK) {
2333 ip->flags |= HAMMER_INODE_REFLUSH;
2335 ip->flags &= ~HAMMER_INODE_REFLUSH;
2339 * Adjust the flush state.
2341 if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
2343 * We were unable to flush out all our records, leave the
2344 * inode in a flush state and in the current flush group.
2345 * The flush group will be re-run.
2347 * This occurs if the UNDO block gets too full or there is
2348 * too much dirty meta-data and allows the flusher to
2349 * finalize the UNDO block and then re-flush.
2351 ip->flags &= ~HAMMER_INODE_WOULDBLOCK;
2355 * Remove from the flush_group
2357 TAILQ_REMOVE(&ip->flush_group->flush_list, ip, flush_entry);
2358 ip->flush_group = NULL;
2361 * Clean up the vnode ref and tracking counts.
2363 if (ip->flags & HAMMER_INODE_VHELD) {
2364 ip->flags &= ~HAMMER_INODE_VHELD;
2367 --hmp->count_iqueued;
2368 --hammer_count_iqueued;
2371 * And adjust the state.
2373 if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
2374 ip->flush_state = HAMMER_FST_IDLE;
2377 ip->flush_state = HAMMER_FST_SETUP;
2382 * If the frontend is waiting for a flush to complete,
2385 if (ip->flags & HAMMER_INODE_FLUSHW) {
2386 ip->flags &= ~HAMMER_INODE_FLUSHW;
2391 * If the frontend made more changes and requested another
2392 * flush, then try to get it running.
2394 * Reflushes are aborted when the inode is errored out.
2396 if (ip->flags & HAMMER_INODE_REFLUSH) {
2397 ip->flags &= ~HAMMER_INODE_REFLUSH;
2398 if (ip->flags & HAMMER_INODE_RESIGNAL) {
2399 ip->flags &= ~HAMMER_INODE_RESIGNAL;
2400 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2402 hammer_flush_inode(ip, 0);
2408 * If we have no parent dependancies we can clear CONN_DOWN
2410 if (TAILQ_EMPTY(&ip->target_list))
2411 ip->flags &= ~HAMMER_INODE_CONN_DOWN;
2414 * If the inode is now clean drop the space reservation.
2416 if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
2417 (ip->flags & HAMMER_INODE_RSV_INODES)) {
2418 ip->flags &= ~HAMMER_INODE_RSV_INODES;
2423 hammer_rel_inode(ip, 0);
2427 * Called from hammer_sync_inode() to synchronize in-memory records
2431 hammer_sync_record_callback(hammer_record_t record, void *data)
2433 hammer_cursor_t cursor = data;
2434 hammer_transaction_t trans = cursor->trans;
2435 hammer_mount_t hmp = trans->hmp;
2439 * Skip records that do not belong to the current flush.
2441 ++hammer_stats_record_iterations;
2442 if (record->flush_state != HAMMER_FST_FLUSH)
2446 if (record->flush_group != record->ip->flush_group) {
2447 kprintf("sync_record %p ip %p bad flush group %p %p\n", record, record->ip, record->flush_group ,record->ip->flush_group);
2452 KKASSERT(record->flush_group == record->ip->flush_group);
2455 * Interlock the record using the BE flag. Once BE is set the
2456 * frontend cannot change the state of FE.
2458 * NOTE: If FE is set prior to us setting BE we still sync the
2459 * record out, but the flush completion code converts it to
2460 * a delete-on-disk record instead of destroying it.
2462 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
2463 record->flags |= HAMMER_RECF_INTERLOCK_BE;
2466 * The backend has already disposed of the record.
2468 if (record->flags & (HAMMER_RECF_DELETED_BE | HAMMER_RECF_COMMITTED)) {
2474 * If the whole inode is being deleting all on-disk records will
2475 * be deleted very soon, we can't sync any new records to disk
2476 * because they will be deleted in the same transaction they were
2477 * created in (delete_tid == create_tid), which will assert.
2479 * XXX There may be a case with RECORD_ADD with DELETED_FE set
2480 * that we currently panic on.
2482 if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
2483 switch(record->type) {
2484 case HAMMER_MEM_RECORD_DATA:
2486 * We don't have to do anything, if the record was
2487 * committed the space will have been accounted for
2491 case HAMMER_MEM_RECORD_GENERAL:
2493 * Set deleted-by-backend flag. Do not set the
2494 * backend committed flag, because we are throwing
2497 record->flags |= HAMMER_RECF_DELETED_BE;
2498 ++record->ip->rec_generation;
2501 case HAMMER_MEM_RECORD_ADD:
2502 panic("hammer_sync_record_callback: illegal add "
2503 "during inode deletion record %p", record);
2504 break; /* NOT REACHED */
2505 case HAMMER_MEM_RECORD_INODE:
2506 panic("hammer_sync_record_callback: attempt to "
2507 "sync inode record %p?", record);
2508 break; /* NOT REACHED */
2509 case HAMMER_MEM_RECORD_DEL:
2511 * Follow through and issue the on-disk deletion
2518 * If DELETED_FE is set special handling is needed for directory
2519 * entries. Dependant pieces related to the directory entry may
2520 * have already been synced to disk. If this occurs we have to
2521 * sync the directory entry and then change the in-memory record
2522 * from an ADD to a DELETE to cover the fact that it's been
2523 * deleted by the frontend.
2525 * A directory delete covering record (MEM_RECORD_DEL) can never
2526 * be deleted by the frontend.
2528 * Any other record type (aka DATA) can be deleted by the frontend.
2529 * XXX At the moment the flusher must skip it because there may
2530 * be another data record in the flush group for the same block,
2531 * meaning that some frontend data changes can leak into the backend's
2532 * synchronization point.
2534 if (record->flags & HAMMER_RECF_DELETED_FE) {
2535 if (record->type == HAMMER_MEM_RECORD_ADD) {
2537 * Convert a front-end deleted directory-add to
2538 * a directory-delete entry later.
2540 record->flags |= HAMMER_RECF_CONVERT_DELETE;
2543 * Dispose of the record (race case). Mark as
2544 * deleted by backend (and not committed).
2546 KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
2547 record->flags |= HAMMER_RECF_DELETED_BE;
2548 ++record->ip->rec_generation;
2555 * Assign the create_tid for new records. Deletions already
2556 * have the record's entire key properly set up.
2558 if (record->type != HAMMER_MEM_RECORD_DEL) {
2559 record->leaf.base.create_tid = trans->tid;
2560 record->leaf.create_ts = trans->time32;
2563 error = hammer_ip_sync_record_cursor(cursor, record);
2564 if (error != EDEADLK)
2566 hammer_done_cursor(cursor);
2567 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
2572 record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
2577 hammer_flush_record_done(record, error);
2580 * Do partial finalization if we have built up too many dirty
2581 * buffers. Otherwise a buffer cache deadlock can occur when
2582 * doing things like creating tens of thousands of tiny files.
2584 * We must release our cursor lock to avoid a 3-way deadlock
2585 * due to the exclusive sync lock the finalizer must get.
2587 if (hammer_flusher_meta_limit(hmp)) {
2588 hammer_unlock_cursor(cursor);
2589 hammer_flusher_finalize(trans, 0);
2590 hammer_lock_cursor(cursor);
2597 * Backend function called by the flusher to sync an inode to media.
2600 hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip)
2602 struct hammer_cursor cursor;
2603 hammer_node_t tmp_node;
2604 hammer_record_t depend;
2605 hammer_record_t next;
2606 int error, tmp_error;
2609 if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
2612 error = hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
2617 * Any directory records referencing this inode which are not in
2618 * our current flush group must adjust our nlink count for the
2619 * purposes of synchronization to disk.
2621 * Records which are in our flush group can be unlinked from our
2622 * inode now, potentially allowing the inode to be physically
2625 * This cannot block.
2627 nlinks = ip->ino_data.nlinks;
2628 next = TAILQ_FIRST(&ip->target_list);
2629 while ((depend = next) != NULL) {
2630 next = TAILQ_NEXT(depend, target_entry);
2631 if (depend->flush_state == HAMMER_FST_FLUSH &&
2632 depend->flush_group == ip->flush_group) {
2634 * If this is an ADD that was deleted by the frontend
2635 * the frontend nlinks count will have already been
2636 * decremented, but the backend is going to sync its
2637 * directory entry and must account for it. The
2638 * record will be converted to a delete-on-disk when
2641 * If the ADD was not deleted by the frontend we
2642 * can remove the dependancy from our target_list.
2644 if (depend->flags & HAMMER_RECF_DELETED_FE) {
2647 TAILQ_REMOVE(&ip->target_list, depend,
2649 depend->target_ip = NULL;
2651 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
2653 * Not part of our flush group and not deleted by
2654 * the front-end, adjust the link count synced to
2655 * the media (undo what the frontend did when it
2656 * queued the record).
2658 KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
2659 switch(depend->type) {
2660 case HAMMER_MEM_RECORD_ADD:
2663 case HAMMER_MEM_RECORD_DEL:
2673 * Set dirty if we had to modify the link count.
2675 if (ip->sync_ino_data.nlinks != nlinks) {
2676 KKASSERT((int64_t)nlinks >= 0);
2677 ip->sync_ino_data.nlinks = nlinks;
2678 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2682 * If there is a trunction queued destroy any data past the (aligned)
2683 * truncation point. Userland will have dealt with the buffer
2684 * containing the truncation point for us.
2686 * We don't flush pending frontend data buffers until after we've
2687 * dealt with the truncation.
2689 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2691 * Interlock trunc_off. The VOP front-end may continue to
2692 * make adjustments to it while we are blocked.
2695 off_t aligned_trunc_off;
2698 trunc_off = ip->sync_trunc_off;
2699 blkmask = hammer_blocksize(trunc_off) - 1;
2700 aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask;
2703 * Delete any whole blocks on-media. The front-end has
2704 * already cleaned out any partial block and made it
2705 * pending. The front-end may have updated trunc_off
2706 * while we were blocked so we only use sync_trunc_off.
2708 * This operation can blow out the buffer cache, EWOULDBLOCK
2709 * means we were unable to complete the deletion. The
2710 * deletion will update sync_trunc_off in that case.
2712 error = hammer_ip_delete_range(&cursor, ip,
2714 0x7FFFFFFFFFFFFFFFLL, 2);
2715 if (error == EWOULDBLOCK) {
2716 ip->flags |= HAMMER_INODE_WOULDBLOCK;
2718 goto defer_buffer_flush;
2725 * Clear the truncation flag on the backend after we have
2726 * complete the deletions. Backend data is now good again
2727 * (including new records we are about to sync, below).
2729 * Leave sync_trunc_off intact. As we write additional
2730 * records the backend will update sync_trunc_off. This
2731 * tells the backend whether it can skip the overwrite
2732 * test. This should work properly even when the backend
2733 * writes full blocks where the truncation point straddles
2734 * the block because the comparison is against the base
2735 * offset of the record.
2737 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2738 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
2744 * Now sync related records. These will typically be directory
2745 * entries, records tracking direct-writes, or delete-on-disk records.
2748 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2749 hammer_sync_record_callback, &cursor);
2755 hammer_cache_node(&ip->cache[1], cursor.node);
2758 * Re-seek for inode update, assuming our cache hasn't been ripped
2759 * out from under us.
2762 tmp_node = hammer_ref_node_safe(trans, &ip->cache[0], &error);
2764 hammer_cursor_downgrade(&cursor);
2765 hammer_lock_sh(&tmp_node->lock);
2766 if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0)
2767 hammer_cursor_seek(&cursor, tmp_node, 0);
2768 hammer_unlock(&tmp_node->lock);
2769 hammer_rel_node(tmp_node);
2775 * If we are deleting the inode the frontend had better not have
2776 * any active references on elements making up the inode.
2778 * The call to hammer_ip_delete_clean() cleans up auxillary records
2779 * but not DB or DATA records. Those must have already been deleted
2780 * by the normal truncation mechanic.
2782 if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
2783 RB_EMPTY(&ip->rec_tree) &&
2784 (ip->sync_flags & HAMMER_INODE_DELETING) &&
2785 (ip->flags & HAMMER_INODE_DELETED) == 0) {
2788 error = hammer_ip_delete_clean(&cursor, ip, &count1);
2790 ip->flags |= HAMMER_INODE_DELETED;
2791 ip->sync_flags &= ~HAMMER_INODE_DELETING;
2792 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2793 KKASSERT(RB_EMPTY(&ip->rec_tree));
2796 * Set delete_tid in both the frontend and backend
2797 * copy of the inode record. The DELETED flag handles
2798 * this, do not set RDIRTY.
2800 ip->ino_leaf.base.delete_tid = trans->tid;
2801 ip->sync_ino_leaf.base.delete_tid = trans->tid;
2802 ip->ino_leaf.delete_ts = trans->time32;
2803 ip->sync_ino_leaf.delete_ts = trans->time32;
2807 * Adjust the inode count in the volume header
2809 hammer_sync_lock_sh(trans);
2810 if (ip->flags & HAMMER_INODE_ONDISK) {
2811 hammer_modify_volume_field(trans,
2814 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
2815 hammer_modify_volume_done(trans->rootvol);
2817 hammer_sync_unlock(trans);
2823 ip->sync_flags &= ~HAMMER_INODE_BUFS;
2827 * Now update the inode's on-disk inode-data and/or on-disk record.
2828 * DELETED and ONDISK are managed only in ip->flags.
2830 * In the case of a defered buffer flush we still update the on-disk
2831 * inode to satisfy visibility requirements if there happen to be
2832 * directory dependancies.
2834 switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
2835 case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
2837 * If deleted and on-disk, don't set any additional flags.
2838 * the delete flag takes care of things.
2840 * Clear flags which may have been set by the frontend.
2842 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2843 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2844 HAMMER_INODE_DELETING);
2846 case HAMMER_INODE_DELETED:
2848 * Take care of the case where a deleted inode was never
2849 * flushed to the disk in the first place.
2851 * Clear flags which may have been set by the frontend.
2853 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2854 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2855 HAMMER_INODE_DELETING);
2856 while (RB_ROOT(&ip->rec_tree)) {
2857 hammer_record_t record = RB_ROOT(&ip->rec_tree);
2858 hammer_ref(&record->lock);
2859 KKASSERT(record->lock.refs == 1);
2860 record->flags |= HAMMER_RECF_DELETED_BE;
2861 ++record->ip->rec_generation;
2862 hammer_rel_mem_record(record);
2865 case HAMMER_INODE_ONDISK:
2867 * If already on-disk, do not set any additional flags.
2872 * If not on-disk and not deleted, set DDIRTY to force
2873 * an initial record to be written.
2875 * Also set the create_tid in both the frontend and backend
2876 * copy of the inode record.
2878 ip->ino_leaf.base.create_tid = trans->tid;
2879 ip->ino_leaf.create_ts = trans->time32;
2880 ip->sync_ino_leaf.base.create_tid = trans->tid;
2881 ip->sync_ino_leaf.create_ts = trans->time32;
2882 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2887 * If RDIRTY or DDIRTY is set, write out a new record. If the inode
2888 * is already on-disk the old record is marked as deleted.
2890 * If DELETED is set hammer_update_inode() will delete the existing
2891 * record without writing out a new one.
2893 * If *ONLY* the ITIMES flag is set we can update the record in-place.
2895 if (ip->flags & HAMMER_INODE_DELETED) {
2896 error = hammer_update_inode(&cursor, ip);
2898 if ((ip->sync_flags & HAMMER_INODE_DDIRTY) == 0 &&
2899 (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) {
2900 error = hammer_update_itimes(&cursor, ip);
2902 if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) {
2903 error = hammer_update_inode(&cursor, ip);
2907 hammer_critical_error(ip->hmp, ip, error,
2908 "while syncing inode");
2910 hammer_done_cursor(&cursor);
2915 * This routine is called when the OS is no longer actively referencing
2916 * the inode (but might still be keeping it cached), or when releasing
2917 * the last reference to an inode.
2919 * At this point if the inode's nlinks count is zero we want to destroy
2920 * it, which may mean destroying it on-media too.
2923 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
2928 * Set the DELETING flag when the link count drops to 0 and the
2929 * OS no longer has any opens on the inode.
2931 * The backend will clear DELETING (a mod flag) and set DELETED
2932 * (a state flag) when it is actually able to perform the
2935 * Don't reflag the deletion if the flusher is currently syncing
2936 * one that was already flagged. A previously set DELETING flag
2937 * may bounce around flags and sync_flags until the operation is
2940 if (ip->ino_data.nlinks == 0 &&
2941 ((ip->flags | ip->sync_flags) & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
2942 ip->flags |= HAMMER_INODE_DELETING;
2943 ip->flags |= HAMMER_INODE_TRUNCATED;
2947 if (hammer_get_vnode(ip, &vp) != 0)
2955 vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
2956 vnode_pager_setsize(ip->vp, 0);
2965 * After potentially resolving a dependancy the inode is tested
2966 * to determine whether it needs to be reflushed.
2969 hammer_test_inode(hammer_inode_t ip)
2971 if (ip->flags & HAMMER_INODE_REFLUSH) {
2972 ip->flags &= ~HAMMER_INODE_REFLUSH;
2973 hammer_ref(&ip->lock);
2974 if (ip->flags & HAMMER_INODE_RESIGNAL) {
2975 ip->flags &= ~HAMMER_INODE_RESIGNAL;
2976 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2978 hammer_flush_inode(ip, 0);
2980 hammer_rel_inode(ip, 0);
2985 * Clear the RECLAIM flag on an inode. This occurs when the inode is
2986 * reassociated with a vp or just before it gets freed.
2988 * Pipeline wakeups to threads blocked due to an excessive number of
2989 * detached inodes. The reclaim count generates a bit of negative
2993 hammer_inode_wakereclaims(hammer_inode_t ip, int dowake)
2995 struct hammer_reclaim *reclaim;
2996 hammer_mount_t hmp = ip->hmp;
2998 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0)
3001 --hammer_count_reclaiming;
3002 --hmp->inode_reclaims;
3003 ip->flags &= ~HAMMER_INODE_RECLAIM;
3005 if (hmp->inode_reclaims < HAMMER_RECLAIM_WAIT || dowake) {
3006 reclaim = TAILQ_FIRST(&hmp->reclaim_list);
3007 if (reclaim && reclaim->count > 0 && --reclaim->count == 0) {
3008 TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry);
3015 * Setup our reclaim pipeline. We only let so many detached (and dirty)
3016 * inodes build up before we start blocking.
3018 * When we block we don't care *which* inode has finished reclaiming,
3019 * as lone as one does. This is somewhat heuristical... we also put a
3020 * cap on how long we are willing to wait.
3023 hammer_inode_waitreclaims(hammer_mount_t hmp)
3025 struct hammer_reclaim reclaim;
3028 if (hmp->inode_reclaims < HAMMER_RECLAIM_WAIT)
3030 delay = (hmp->inode_reclaims - HAMMER_RECLAIM_WAIT) * hz /
3031 (HAMMER_RECLAIM_WAIT * 3) + 1;
3034 TAILQ_INSERT_TAIL(&hmp->reclaim_list, &reclaim, entry);
3035 tsleep(&reclaim, 0, "hmrrcm", delay);
3036 if (reclaim.count > 0)
3037 TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry);
3042 * A larger then normal backlog of inodes is sitting in the flusher,
3043 * enforce a general slowdown to let it catch up. This routine is only
3044 * called on completion of a non-flusher-related transaction which
3045 * performed B-Tree node I/O.
3047 * It is possible for the flusher to stall in a continuous load.
3048 * blogbench -i1000 -o seems to do a good job generating this sort of load.
3049 * If the flusher is unable to catch up the inode count can bloat until
3050 * we run out of kvm.
3052 * This is a bit of a hack.
3055 hammer_inode_waithard(hammer_mount_t hmp)
3060 if (hmp->flags & HAMMER_MOUNT_FLUSH_RECOVERY) {
3061 if (hmp->inode_reclaims < HAMMER_RECLAIM_WAIT / 2 &&
3062 hmp->count_iqueued < hmp->count_inodes / 20) {
3063 hmp->flags &= ~HAMMER_MOUNT_FLUSH_RECOVERY;
3067 if (hmp->inode_reclaims < HAMMER_RECLAIM_WAIT ||
3068 hmp->count_iqueued < hmp->count_inodes / 10) {
3071 hmp->flags |= HAMMER_MOUNT_FLUSH_RECOVERY;
3075 * Block for one flush cycle.
3077 hammer_flusher_wait_next(hmp);