2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.114 2008/09/24 00:53:51 dillon Exp $
38 #include <vm/vm_extern.h>
42 static int hammer_unload_inode(struct hammer_inode *ip);
43 static void hammer_free_inode(hammer_inode_t ip);
44 static void hammer_flush_inode_core(hammer_inode_t ip,
45 hammer_flush_group_t flg, int flags);
46 static int hammer_setup_child_callback(hammer_record_t rec, void *data);
48 static int hammer_syncgrp_child_callback(hammer_record_t rec, void *data);
50 static int hammer_setup_parent_inodes(hammer_inode_t ip,
51 hammer_flush_group_t flg);
52 static int hammer_setup_parent_inodes_helper(hammer_record_t record,
53 hammer_flush_group_t flg);
54 static void hammer_inode_wakereclaims(hammer_inode_t ip, int dowake);
57 extern struct hammer_inode *HammerTruncIp;
61 * RB-Tree support for inode structures
64 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
66 if (ip1->obj_localization < ip2->obj_localization)
68 if (ip1->obj_localization > ip2->obj_localization)
70 if (ip1->obj_id < ip2->obj_id)
72 if (ip1->obj_id > ip2->obj_id)
74 if (ip1->obj_asof < ip2->obj_asof)
76 if (ip1->obj_asof > ip2->obj_asof)
82 * RB-Tree support for inode structures / special LOOKUP_INFO
85 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
87 if (info->obj_localization < ip->obj_localization)
89 if (info->obj_localization > ip->obj_localization)
91 if (info->obj_id < ip->obj_id)
93 if (info->obj_id > ip->obj_id)
95 if (info->obj_asof < ip->obj_asof)
97 if (info->obj_asof > ip->obj_asof)
103 * Used by hammer_scan_inode_snapshots() to locate all of an object's
104 * snapshots. Note that the asof field is not tested, which we can get
105 * away with because it is the lowest-priority field.
108 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data)
110 hammer_inode_info_t info = data;
112 if (ip->obj_localization > info->obj_localization)
114 if (ip->obj_localization < info->obj_localization)
116 if (ip->obj_id > info->obj_id)
118 if (ip->obj_id < info->obj_id)
124 * Used by hammer_unload_pseudofs() to locate all inodes associated with
128 hammer_inode_pfs_cmp(hammer_inode_t ip, void *data)
130 u_int32_t localization = *(u_int32_t *)data;
131 if (ip->obj_localization > localization)
133 if (ip->obj_localization < localization)
139 * RB-Tree support for pseudofs structures
142 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1, hammer_pseudofs_inmem_t p2)
144 if (p1->localization < p2->localization)
146 if (p1->localization > p2->localization)
152 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
153 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
154 hammer_inode_info_cmp, hammer_inode_info_t);
155 RB_GENERATE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node,
156 hammer_pfs_rb_compare, u_int32_t, localization);
159 * The kernel is not actively referencing this vnode but is still holding
162 * This is called from the frontend.
165 hammer_vop_inactive(struct vop_inactive_args *ap)
167 struct hammer_inode *ip = VTOI(ap->a_vp);
178 * If the inode no longer has visibility in the filesystem try to
179 * recycle it immediately, even if the inode is dirty. Recycling
180 * it quickly allows the system to reclaim buffer cache and VM
181 * resources which can matter a lot in a heavily loaded system.
183 * This can deadlock in vfsync() if we aren't careful.
185 * Do not queue the inode to the flusher if we still have visibility,
186 * otherwise namespace calls such as chmod will unnecessarily generate
187 * multiple inode updates.
189 hammer_inode_unloadable_check(ip, 0);
190 if (ip->ino_data.nlinks == 0) {
191 if (ip->flags & HAMMER_INODE_MODMASK)
192 hammer_flush_inode(ip, 0);
199 * Release the vnode association. This is typically (but not always)
200 * the last reference on the inode.
202 * Once the association is lost we are on our own with regards to
203 * flushing the inode.
206 hammer_vop_reclaim(struct vop_reclaim_args *ap)
208 struct hammer_inode *ip;
214 if ((ip = vp->v_data) != NULL) {
219 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) {
220 ++hammer_count_reclaiming;
221 ++hmp->inode_reclaims;
222 ip->flags |= HAMMER_INODE_RECLAIM;
224 hammer_rel_inode(ip, 1);
230 * Return a locked vnode for the specified inode. The inode must be
231 * referenced but NOT LOCKED on entry and will remain referenced on
234 * Called from the frontend.
237 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
247 if ((vp = ip->vp) == NULL) {
248 error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0);
251 hammer_lock_ex(&ip->lock);
252 if (ip->vp != NULL) {
253 hammer_unlock(&ip->lock);
258 hammer_ref(&ip->lock);
262 obj_type = ip->ino_data.obj_type;
263 vp->v_type = hammer_get_vnode_type(obj_type);
265 hammer_inode_wakereclaims(ip, 0);
267 switch(ip->ino_data.obj_type) {
268 case HAMMER_OBJTYPE_CDEV:
269 case HAMMER_OBJTYPE_BDEV:
270 vp->v_ops = &hmp->mp->mnt_vn_spec_ops;
271 addaliasu(vp, ip->ino_data.rmajor,
272 ip->ino_data.rminor);
274 case HAMMER_OBJTYPE_FIFO:
275 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops;
282 * Only mark as the root vnode if the ip is not
283 * historical, otherwise the VFS cache will get
284 * confused. The other half of the special handling
285 * is in hammer_vop_nlookupdotdot().
287 * Pseudo-filesystem roots can be accessed via
288 * non-root filesystem paths and setting VROOT may
289 * confuse the namecache. Set VPFSROOT instead.
291 if (ip->obj_id == HAMMER_OBJID_ROOT &&
292 ip->obj_asof == hmp->asof) {
293 if (ip->obj_localization == 0)
296 vp->v_flag |= VPFSROOT;
299 vp->v_data = (void *)ip;
300 /* vnode locked by getnewvnode() */
301 /* make related vnode dirty if inode dirty? */
302 hammer_unlock(&ip->lock);
303 if (vp->v_type == VREG)
304 vinitvmio(vp, ip->ino_data.size);
309 * loop if the vget fails (aka races), or if the vp
310 * no longer matches ip->vp.
312 if (vget(vp, LK_EXCLUSIVE) == 0) {
323 * Locate all copies of the inode for obj_id compatible with the specified
324 * asof, reference, and issue the related call-back. This routine is used
325 * for direct-io invalidation and does not create any new inodes.
328 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo,
329 int (*callback)(hammer_inode_t ip, void *data),
332 hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root,
333 hammer_inode_info_cmp_all_history,
338 * Acquire a HAMMER inode. The returned inode is not locked. These functions
339 * do not attach or detach the related vnode (use hammer_get_vnode() for
342 * The flags argument is only applied for newly created inodes, and only
343 * certain flags are inherited.
345 * Called from the frontend.
347 struct hammer_inode *
348 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip,
349 int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
350 int flags, int *errorp)
352 hammer_mount_t hmp = trans->hmp;
353 struct hammer_inode_info iinfo;
354 struct hammer_cursor cursor;
355 struct hammer_inode *ip;
359 * Determine if we already have an inode cached. If we do then
362 * If we find an inode with no vnode we have to mark the
363 * transaction such that hammer_inode_waitreclaims() is
364 * called later on to avoid building up an infinite number
365 * of inodes. Otherwise we can continue to * add new inodes
366 * faster then they can be disposed of, even with the tsleep
369 * If we find a dummy inode we return a failure so dounlink
370 * (which does another lookup) doesn't try to mess with the
371 * link count. hammer_vop_nresolve() uses hammer_get_dummy_inode()
372 * to ref dummy inodes.
374 iinfo.obj_id = obj_id;
375 iinfo.obj_asof = asof;
376 iinfo.obj_localization = localization;
378 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
380 if (ip->flags & HAMMER_INODE_DUMMY) {
384 hammer_ref(&ip->lock);
390 * Allocate a new inode structure and deal with races later.
392 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
393 ++hammer_count_inodes;
396 ip->obj_asof = iinfo.obj_asof;
397 ip->obj_localization = localization;
399 ip->flags = flags & HAMMER_INODE_RO;
400 ip->cache[0].ip = ip;
401 ip->cache[1].ip = ip;
403 ip->flags |= HAMMER_INODE_RO;
404 ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
405 0x7FFFFFFFFFFFFFFFLL;
406 RB_INIT(&ip->rec_tree);
407 TAILQ_INIT(&ip->target_list);
408 hammer_ref(&ip->lock);
411 * Locate the on-disk inode. If this is a PFS root we always
412 * access the current version of the root inode and (if it is not
413 * a master) always access information under it with a snapshot
417 hammer_init_cursor(trans, &cursor, (dip ? &dip->cache[0] : NULL), NULL);
418 cursor.key_beg.localization = localization + HAMMER_LOCALIZE_INODE;
419 cursor.key_beg.obj_id = ip->obj_id;
420 cursor.key_beg.key = 0;
421 cursor.key_beg.create_tid = 0;
422 cursor.key_beg.delete_tid = 0;
423 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
424 cursor.key_beg.obj_type = 0;
426 cursor.asof = iinfo.obj_asof;
427 cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
430 *errorp = hammer_btree_lookup(&cursor);
431 if (*errorp == EDEADLK) {
432 hammer_done_cursor(&cursor);
437 * On success the B-Tree lookup will hold the appropriate
438 * buffer cache buffers and provide a pointer to the requested
439 * information. Copy the information to the in-memory inode
440 * and cache the B-Tree node to improve future operations.
443 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
444 ip->ino_data = cursor.data->inode;
447 * cache[0] tries to cache the location of the object inode.
448 * The assumption is that it is near the directory inode.
450 * cache[1] tries to cache the location of the object data.
451 * The assumption is that it is near the directory data.
453 hammer_cache_node(&ip->cache[0], cursor.node);
454 if (dip && dip->cache[1].node)
455 hammer_cache_node(&ip->cache[1], dip->cache[1].node);
458 * The file should not contain any data past the file size
459 * stored in the inode. Setting save_trunc_off to the
460 * file size instead of max reduces B-Tree lookup overheads
461 * on append by allowing the flusher to avoid checking for
464 ip->save_trunc_off = ip->ino_data.size;
467 * Locate and assign the pseudofs management structure to
470 if (dip && dip->obj_localization == ip->obj_localization) {
471 ip->pfsm = dip->pfsm;
472 hammer_ref(&ip->pfsm->lock);
474 ip->pfsm = hammer_load_pseudofs(trans,
475 ip->obj_localization,
477 *errorp = 0; /* ignore ENOENT */
482 * The inode is placed on the red-black tree and will be synced to
483 * the media when flushed or by the filesystem sync. If this races
484 * another instantiation/lookup the insertion will fail.
487 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
488 hammer_free_inode(ip);
489 hammer_done_cursor(&cursor);
492 ip->flags |= HAMMER_INODE_ONDISK;
494 if (ip->flags & HAMMER_INODE_RSV_INODES) {
495 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
499 hammer_free_inode(ip);
502 hammer_done_cursor(&cursor);
503 trans->flags |= HAMMER_TRANSF_NEWINODE;
508 * Get a dummy inode to placemark a broken directory entry.
510 struct hammer_inode *
511 hammer_get_dummy_inode(hammer_transaction_t trans, hammer_inode_t dip,
512 int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
513 int flags, int *errorp)
515 hammer_mount_t hmp = trans->hmp;
516 struct hammer_inode_info iinfo;
517 struct hammer_inode *ip;
520 * Determine if we already have an inode cached. If we do then
523 * If we find an inode with no vnode we have to mark the
524 * transaction such that hammer_inode_waitreclaims() is
525 * called later on to avoid building up an infinite number
526 * of inodes. Otherwise we can continue to * add new inodes
527 * faster then they can be disposed of, even with the tsleep
530 * If we find a non-fake inode we return an error. Only fake
531 * inodes can be returned by this routine.
533 iinfo.obj_id = obj_id;
534 iinfo.obj_asof = asof;
535 iinfo.obj_localization = localization;
538 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
540 if ((ip->flags & HAMMER_INODE_DUMMY) == 0) {
544 hammer_ref(&ip->lock);
549 * Allocate a new inode structure and deal with races later.
551 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
552 ++hammer_count_inodes;
555 ip->obj_asof = iinfo.obj_asof;
556 ip->obj_localization = localization;
558 ip->flags = flags | HAMMER_INODE_RO | HAMMER_INODE_DUMMY;
559 ip->cache[0].ip = ip;
560 ip->cache[1].ip = ip;
561 ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
562 0x7FFFFFFFFFFFFFFFLL;
563 RB_INIT(&ip->rec_tree);
564 TAILQ_INIT(&ip->target_list);
565 hammer_ref(&ip->lock);
568 * Populate the dummy inode. Leave everything zero'd out.
570 * (ip->ino_leaf and ip->ino_data)
572 * Make the dummy inode a FIFO object which most copy programs
573 * will properly ignore.
575 ip->save_trunc_off = ip->ino_data.size;
576 ip->ino_data.obj_type = HAMMER_OBJTYPE_FIFO;
579 * Locate and assign the pseudofs management structure to
582 if (dip && dip->obj_localization == ip->obj_localization) {
583 ip->pfsm = dip->pfsm;
584 hammer_ref(&ip->pfsm->lock);
586 ip->pfsm = hammer_load_pseudofs(trans, ip->obj_localization,
588 *errorp = 0; /* ignore ENOENT */
592 * The inode is placed on the red-black tree and will be synced to
593 * the media when flushed or by the filesystem sync. If this races
594 * another instantiation/lookup the insertion will fail.
596 * NOTE: Do not set HAMMER_INODE_ONDISK. The inode is a fake.
599 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
600 hammer_free_inode(ip);
604 if (ip->flags & HAMMER_INODE_RSV_INODES) {
605 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
608 hammer_free_inode(ip);
611 trans->flags |= HAMMER_TRANSF_NEWINODE;
616 * Create a new filesystem object, returning the inode in *ipp. The
617 * returned inode will be referenced. The inode is created in-memory.
619 * If pfsm is non-NULL the caller wishes to create the root inode for
623 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
624 struct ucred *cred, hammer_inode_t dip,
625 hammer_pseudofs_inmem_t pfsm, struct hammer_inode **ipp)
634 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
635 ++hammer_count_inodes;
637 trans->flags |= HAMMER_TRANSF_NEWINODE;
640 KKASSERT(pfsm->localization != 0);
641 ip->obj_id = HAMMER_OBJID_ROOT;
642 ip->obj_localization = pfsm->localization;
644 KKASSERT(dip != NULL);
645 ip->obj_id = hammer_alloc_objid(hmp, dip);
646 ip->obj_localization = dip->obj_localization;
649 KKASSERT(ip->obj_id != 0);
650 ip->obj_asof = hmp->asof;
652 ip->flush_state = HAMMER_FST_IDLE;
653 ip->flags = HAMMER_INODE_DDIRTY |
654 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME;
655 ip->cache[0].ip = ip;
656 ip->cache[1].ip = ip;
658 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
659 /* ip->save_trunc_off = 0; (already zero) */
660 RB_INIT(&ip->rec_tree);
661 TAILQ_INIT(&ip->target_list);
663 ip->ino_data.atime = trans->time;
664 ip->ino_data.mtime = trans->time;
665 ip->ino_data.size = 0;
666 ip->ino_data.nlinks = 0;
669 * A nohistory designator on the parent directory is inherited by
670 * the child. We will do this even for pseudo-fs creation... the
671 * sysad can turn it off.
674 ip->ino_data.uflags = dip->ino_data.uflags &
675 (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
678 ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
679 ip->ino_leaf.base.localization = ip->obj_localization +
680 HAMMER_LOCALIZE_INODE;
681 ip->ino_leaf.base.obj_id = ip->obj_id;
682 ip->ino_leaf.base.key = 0;
683 ip->ino_leaf.base.create_tid = 0;
684 ip->ino_leaf.base.delete_tid = 0;
685 ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
686 ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
688 ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
689 ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
690 ip->ino_data.mode = vap->va_mode;
691 ip->ino_data.ctime = trans->time;
694 * If we are running version 2 or greater we use dirhash algorithm #1
695 * which is semi-sorted. Algorithm #0 was just a pure crc.
697 if (trans->hmp->version >= HAMMER_VOL_VERSION_TWO) {
698 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
699 ip->ino_data.cap_flags |= HAMMER_INODE_CAP_DIRHASH_ALG1;
704 * Setup the ".." pointer. This only needs to be done for directories
705 * but we do it for all objects as a recovery aid.
708 ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id;
711 * The parent_obj_localization field only applies to pseudo-fs roots.
712 * XXX this is no longer applicable, PFSs are no longer directly
713 * tied into the parent's directory structure.
715 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY &&
716 ip->obj_id == HAMMER_OBJID_ROOT) {
717 ip->ino_data.ext.obj.parent_obj_localization =
718 dip->obj_localization;
722 switch(ip->ino_leaf.base.obj_type) {
723 case HAMMER_OBJTYPE_CDEV:
724 case HAMMER_OBJTYPE_BDEV:
725 ip->ino_data.rmajor = vap->va_rmajor;
726 ip->ino_data.rminor = vap->va_rminor;
733 * Calculate default uid/gid and overwrite with information from
737 xuid = hammer_to_unix_xid(&dip->ino_data.uid);
738 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode,
739 xuid, cred, &vap->va_mode);
743 ip->ino_data.mode = vap->va_mode;
745 if (vap->va_vaflags & VA_UID_UUID_VALID)
746 ip->ino_data.uid = vap->va_uid_uuid;
747 else if (vap->va_uid != (uid_t)VNOVAL)
748 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
750 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
752 if (vap->va_vaflags & VA_GID_UUID_VALID)
753 ip->ino_data.gid = vap->va_gid_uuid;
754 else if (vap->va_gid != (gid_t)VNOVAL)
755 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
757 ip->ino_data.gid = dip->ino_data.gid;
759 hammer_ref(&ip->lock);
763 hammer_ref(&pfsm->lock);
765 } else if (dip->obj_localization == ip->obj_localization) {
766 ip->pfsm = dip->pfsm;
767 hammer_ref(&ip->pfsm->lock);
770 ip->pfsm = hammer_load_pseudofs(trans,
771 ip->obj_localization,
773 error = 0; /* ignore ENOENT */
777 hammer_free_inode(ip);
779 } else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
780 panic("hammer_create_inode: duplicate obj_id %llx", ip->obj_id);
782 hammer_free_inode(ip);
789 * Final cleanup / freeing of an inode structure
792 hammer_free_inode(hammer_inode_t ip)
794 struct hammer_mount *hmp;
797 KKASSERT(ip->lock.refs == 1);
798 hammer_uncache_node(&ip->cache[0]);
799 hammer_uncache_node(&ip->cache[1]);
800 hammer_inode_wakereclaims(ip, 1);
802 hammer_clear_objid(ip);
803 --hammer_count_inodes;
806 hammer_rel_pseudofs(hmp, ip->pfsm);
809 kfree(ip, hmp->m_inodes);
814 * Retrieve pseudo-fs data. NULL will never be returned.
816 * If an error occurs *errorp will be set and a default template is returned,
817 * otherwise *errorp is set to 0. Typically when an error occurs it will
820 hammer_pseudofs_inmem_t
821 hammer_load_pseudofs(hammer_transaction_t trans,
822 u_int32_t localization, int *errorp)
824 hammer_mount_t hmp = trans->hmp;
826 hammer_pseudofs_inmem_t pfsm;
827 struct hammer_cursor cursor;
831 pfsm = RB_LOOKUP(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, localization);
833 hammer_ref(&pfsm->lock);
839 * PFS records are stored in the root inode (not the PFS root inode,
840 * but the real root). Avoid an infinite recursion if loading
841 * the PFS for the real root.
844 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT,
846 HAMMER_DEF_LOCALIZATION, 0, errorp);
851 pfsm = kmalloc(sizeof(*pfsm), hmp->m_misc, M_WAITOK | M_ZERO);
852 pfsm->localization = localization;
853 pfsm->pfsd.unique_uuid = trans->rootvol->ondisk->vol_fsid;
854 pfsm->pfsd.shared_uuid = pfsm->pfsd.unique_uuid;
856 hammer_init_cursor(trans, &cursor, (ip ? &ip->cache[1] : NULL), ip);
857 cursor.key_beg.localization = HAMMER_DEF_LOCALIZATION +
858 HAMMER_LOCALIZE_MISC;
859 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
860 cursor.key_beg.create_tid = 0;
861 cursor.key_beg.delete_tid = 0;
862 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
863 cursor.key_beg.obj_type = 0;
864 cursor.key_beg.key = localization;
865 cursor.asof = HAMMER_MAX_TID;
866 cursor.flags |= HAMMER_CURSOR_ASOF;
869 *errorp = hammer_ip_lookup(&cursor);
871 *errorp = hammer_btree_lookup(&cursor);
873 *errorp = hammer_ip_resolve_data(&cursor);
875 if (cursor.data->pfsd.mirror_flags &
876 HAMMER_PFSD_DELETED) {
879 bytes = cursor.leaf->data_len;
880 if (bytes > sizeof(pfsm->pfsd))
881 bytes = sizeof(pfsm->pfsd);
882 bcopy(cursor.data, &pfsm->pfsd, bytes);
886 hammer_done_cursor(&cursor);
888 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
889 hammer_ref(&pfsm->lock);
891 hammer_rel_inode(ip, 0);
892 if (RB_INSERT(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm)) {
893 kfree(pfsm, hmp->m_misc);
900 * Store pseudo-fs data. The backend will automatically delete any prior
901 * on-disk pseudo-fs data but we have to delete in-memory versions.
904 hammer_save_pseudofs(hammer_transaction_t trans, hammer_pseudofs_inmem_t pfsm)
906 struct hammer_cursor cursor;
907 hammer_record_t record;
911 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
912 HAMMER_DEF_LOCALIZATION, 0, &error);
914 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
915 hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
916 cursor.key_beg.localization = ip->obj_localization +
917 HAMMER_LOCALIZE_MISC;
918 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
919 cursor.key_beg.create_tid = 0;
920 cursor.key_beg.delete_tid = 0;
921 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
922 cursor.key_beg.obj_type = 0;
923 cursor.key_beg.key = pfsm->localization;
924 cursor.asof = HAMMER_MAX_TID;
925 cursor.flags |= HAMMER_CURSOR_ASOF;
927 error = hammer_ip_lookup(&cursor);
928 if (error == 0 && hammer_cursor_inmem(&cursor)) {
929 record = cursor.iprec;
930 if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
931 KKASSERT(cursor.deadlk_rec == NULL);
932 hammer_ref(&record->lock);
933 cursor.deadlk_rec = record;
936 record->flags |= HAMMER_RECF_DELETED_FE;
940 if (error == 0 || error == ENOENT) {
941 record = hammer_alloc_mem_record(ip, sizeof(pfsm->pfsd));
942 record->type = HAMMER_MEM_RECORD_GENERAL;
944 record->leaf.base.localization = ip->obj_localization +
945 HAMMER_LOCALIZE_MISC;
946 record->leaf.base.rec_type = HAMMER_RECTYPE_PFS;
947 record->leaf.base.key = pfsm->localization;
948 record->leaf.data_len = sizeof(pfsm->pfsd);
949 bcopy(&pfsm->pfsd, record->data, sizeof(pfsm->pfsd));
950 error = hammer_ip_add_record(trans, record);
952 hammer_done_cursor(&cursor);
953 if (error == EDEADLK)
955 hammer_rel_inode(ip, 0);
960 * Create a root directory for a PFS if one does not alredy exist.
962 * The PFS root stands alone so we must also bump the nlinks count
963 * to prevent it from being destroyed on release.
966 hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred,
967 hammer_pseudofs_inmem_t pfsm)
973 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
974 pfsm->localization, 0, &error);
979 error = hammer_create_inode(trans, &vap, cred, NULL, pfsm, &ip);
981 ++ip->ino_data.nlinks;
982 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
986 hammer_rel_inode(ip, 0);
991 * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY
992 * if we are unable to disassociate all the inodes.
996 hammer_unload_pseudofs_callback(hammer_inode_t ip, void *data)
1000 hammer_ref(&ip->lock);
1001 if (ip->lock.refs == 2 && ip->vp)
1002 vclean_unlocked(ip->vp);
1003 if (ip->lock.refs == 1 && ip->vp == NULL)
1006 res = -1; /* stop, someone is using the inode */
1007 hammer_rel_inode(ip, 0);
1012 hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization)
1017 for (try = res = 0; try < 4; ++try) {
1018 res = hammer_ino_rb_tree_RB_SCAN(&trans->hmp->rb_inos_root,
1019 hammer_inode_pfs_cmp,
1020 hammer_unload_pseudofs_callback,
1022 if (res == 0 && try > 1)
1024 hammer_flusher_sync(trans->hmp);
1033 * Release a reference on a PFS
1036 hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm)
1038 hammer_unref(&pfsm->lock);
1039 if (pfsm->lock.refs == 0) {
1040 RB_REMOVE(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm);
1041 kfree(pfsm, hmp->m_misc);
1046 * Called by hammer_sync_inode().
1049 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
1051 hammer_transaction_t trans = cursor->trans;
1052 hammer_record_t record;
1060 * If the inode has a presence on-disk then locate it and mark
1061 * it deleted, setting DELONDISK.
1063 * The record may or may not be physically deleted, depending on
1064 * the retention policy.
1066 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
1067 HAMMER_INODE_ONDISK) {
1068 hammer_normalize_cursor(cursor);
1069 cursor->key_beg.localization = ip->obj_localization +
1070 HAMMER_LOCALIZE_INODE;
1071 cursor->key_beg.obj_id = ip->obj_id;
1072 cursor->key_beg.key = 0;
1073 cursor->key_beg.create_tid = 0;
1074 cursor->key_beg.delete_tid = 0;
1075 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1076 cursor->key_beg.obj_type = 0;
1077 cursor->asof = ip->obj_asof;
1078 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1079 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
1080 cursor->flags |= HAMMER_CURSOR_BACKEND;
1082 error = hammer_btree_lookup(cursor);
1083 if (hammer_debug_inode)
1084 kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
1087 error = hammer_ip_delete_record(cursor, ip, trans->tid);
1088 if (hammer_debug_inode)
1089 kprintf(" error %d\n", error);
1091 ip->flags |= HAMMER_INODE_DELONDISK;
1094 hammer_cache_node(&ip->cache[0], cursor->node);
1096 if (error == EDEADLK) {
1097 hammer_done_cursor(cursor);
1098 error = hammer_init_cursor(trans, cursor,
1100 if (hammer_debug_inode)
1101 kprintf("IPDED %p %d\n", ip, error);
1108 * Ok, write out the initial record or a new record (after deleting
1109 * the old one), unless the DELETED flag is set. This routine will
1110 * clear DELONDISK if it writes out a record.
1112 * Update our inode statistics if this is the first application of
1113 * the inode on-disk.
1115 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
1117 * Generate a record and write it to the media. We clean-up
1118 * the state before releasing so we do not have to set-up
1121 record = hammer_alloc_mem_record(ip, 0);
1122 record->type = HAMMER_MEM_RECORD_INODE;
1123 record->flush_state = HAMMER_FST_FLUSH;
1124 record->leaf = ip->sync_ino_leaf;
1125 record->leaf.base.create_tid = trans->tid;
1126 record->leaf.data_len = sizeof(ip->sync_ino_data);
1127 record->leaf.create_ts = trans->time32;
1128 record->data = (void *)&ip->sync_ino_data;
1129 record->flags |= HAMMER_RECF_INTERLOCK_BE;
1132 * If this flag is set we cannot sync the new file size
1133 * because we haven't finished related truncations. The
1134 * inode will be flushed in another flush group to finish
1137 if ((ip->flags & HAMMER_INODE_WOULDBLOCK) &&
1138 ip->sync_ino_data.size != ip->ino_data.size) {
1140 ip->sync_ino_data.size = ip->ino_data.size;
1146 error = hammer_ip_sync_record_cursor(cursor, record);
1147 if (hammer_debug_inode)
1148 kprintf("GENREC %p rec %08x %d\n",
1149 ip, record->flags, error);
1150 if (error != EDEADLK)
1152 hammer_done_cursor(cursor);
1153 error = hammer_init_cursor(trans, cursor,
1155 if (hammer_debug_inode)
1156 kprintf("GENREC reinit %d\n", error);
1162 * The record isn't managed by the inode's record tree,
1163 * destroy it whether we succeed or fail.
1165 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
1166 record->flags |= HAMMER_RECF_DELETED_FE | HAMMER_RECF_COMMITTED;
1167 record->flush_state = HAMMER_FST_IDLE;
1168 hammer_rel_mem_record(record);
1174 if (hammer_debug_inode)
1175 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
1176 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1177 HAMMER_INODE_ATIME |
1178 HAMMER_INODE_MTIME);
1179 ip->flags &= ~HAMMER_INODE_DELONDISK;
1181 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1184 * Root volume count of inodes
1186 hammer_sync_lock_sh(trans);
1187 if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
1188 hammer_modify_volume_field(trans,
1191 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1192 hammer_modify_volume_done(trans->rootvol);
1193 ip->flags |= HAMMER_INODE_ONDISK;
1194 if (hammer_debug_inode)
1195 kprintf("NOWONDISK %p\n", ip);
1197 hammer_sync_unlock(trans);
1202 * If the inode has been destroyed, clean out any left-over flags
1203 * that may have been set by the frontend.
1205 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) {
1206 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1207 HAMMER_INODE_ATIME |
1208 HAMMER_INODE_MTIME);
1214 * Update only the itimes fields.
1216 * ATIME can be updated without generating any UNDO. MTIME is updated
1217 * with UNDO so it is guaranteed to be synchronized properly in case of
1220 * Neither field is included in the B-Tree leaf element's CRC, which is how
1221 * we can get away with updating ATIME the way we do.
1224 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
1226 hammer_transaction_t trans = cursor->trans;
1230 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) !=
1231 HAMMER_INODE_ONDISK) {
1235 hammer_normalize_cursor(cursor);
1236 cursor->key_beg.localization = ip->obj_localization +
1237 HAMMER_LOCALIZE_INODE;
1238 cursor->key_beg.obj_id = ip->obj_id;
1239 cursor->key_beg.key = 0;
1240 cursor->key_beg.create_tid = 0;
1241 cursor->key_beg.delete_tid = 0;
1242 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1243 cursor->key_beg.obj_type = 0;
1244 cursor->asof = ip->obj_asof;
1245 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1246 cursor->flags |= HAMMER_CURSOR_ASOF;
1247 cursor->flags |= HAMMER_CURSOR_GET_LEAF;
1248 cursor->flags |= HAMMER_CURSOR_GET_DATA;
1249 cursor->flags |= HAMMER_CURSOR_BACKEND;
1251 error = hammer_btree_lookup(cursor);
1253 hammer_cache_node(&ip->cache[0], cursor->node);
1254 if (ip->sync_flags & HAMMER_INODE_MTIME) {
1256 * Updating MTIME requires an UNDO. Just cover
1257 * both atime and mtime.
1259 hammer_sync_lock_sh(trans);
1260 hammer_modify_buffer(trans, cursor->data_buffer,
1261 HAMMER_ITIMES_BASE(&cursor->data->inode),
1262 HAMMER_ITIMES_BYTES);
1263 cursor->data->inode.atime = ip->sync_ino_data.atime;
1264 cursor->data->inode.mtime = ip->sync_ino_data.mtime;
1265 hammer_modify_buffer_done(cursor->data_buffer);
1266 hammer_sync_unlock(trans);
1267 } else if (ip->sync_flags & HAMMER_INODE_ATIME) {
1269 * Updating atime only can be done in-place with
1272 hammer_sync_lock_sh(trans);
1273 hammer_modify_buffer(trans, cursor->data_buffer,
1275 cursor->data->inode.atime = ip->sync_ino_data.atime;
1276 hammer_modify_buffer_done(cursor->data_buffer);
1277 hammer_sync_unlock(trans);
1279 ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME);
1281 if (error == EDEADLK) {
1282 hammer_done_cursor(cursor);
1283 error = hammer_init_cursor(trans, cursor,
1292 * Release a reference on an inode, flush as requested.
1294 * On the last reference we queue the inode to the flusher for its final
1298 hammer_rel_inode(struct hammer_inode *ip, int flush)
1300 /*hammer_mount_t hmp = ip->hmp;*/
1303 * Handle disposition when dropping the last ref.
1306 if (ip->lock.refs == 1) {
1308 * Determine whether on-disk action is needed for
1309 * the inode's final disposition.
1311 KKASSERT(ip->vp == NULL);
1312 hammer_inode_unloadable_check(ip, 0);
1313 if (ip->flags & HAMMER_INODE_MODMASK) {
1314 hammer_flush_inode(ip, 0);
1315 } else if (ip->lock.refs == 1) {
1316 hammer_unload_inode(ip);
1321 hammer_flush_inode(ip, 0);
1324 * The inode still has multiple refs, try to drop
1327 KKASSERT(ip->lock.refs >= 1);
1328 if (ip->lock.refs > 1) {
1329 hammer_unref(&ip->lock);
1337 * Unload and destroy the specified inode. Must be called with one remaining
1338 * reference. The reference is disposed of.
1340 * The inode must be completely clean.
1343 hammer_unload_inode(struct hammer_inode *ip)
1345 hammer_mount_t hmp = ip->hmp;
1347 KASSERT(ip->lock.refs == 1,
1348 ("hammer_unload_inode: %d refs\n", ip->lock.refs));
1349 KKASSERT(ip->vp == NULL);
1350 KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
1351 KKASSERT(ip->cursor_ip_refs == 0);
1352 KKASSERT(ip->lock.lockcount == 0);
1353 KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
1355 KKASSERT(RB_EMPTY(&ip->rec_tree));
1356 KKASSERT(TAILQ_EMPTY(&ip->target_list));
1358 RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip);
1360 hammer_free_inode(ip);
1365 * Called during unmounting if a critical error occured. The in-memory
1366 * inode and all related structures are destroyed.
1368 * If a critical error did not occur the unmount code calls the standard
1369 * release and asserts that the inode is gone.
1372 hammer_destroy_inode_callback(struct hammer_inode *ip, void *data __unused)
1374 hammer_record_t rec;
1377 * Get rid of the inodes in-memory records, regardless of their
1378 * state, and clear the mod-mask.
1380 while ((rec = TAILQ_FIRST(&ip->target_list)) != NULL) {
1381 TAILQ_REMOVE(&ip->target_list, rec, target_entry);
1382 rec->target_ip = NULL;
1383 if (rec->flush_state == HAMMER_FST_SETUP)
1384 rec->flush_state = HAMMER_FST_IDLE;
1386 while ((rec = RB_ROOT(&ip->rec_tree)) != NULL) {
1387 if (rec->flush_state == HAMMER_FST_FLUSH)
1388 --rec->flush_group->refs;
1390 hammer_ref(&rec->lock);
1391 KKASSERT(rec->lock.refs == 1);
1392 rec->flush_state = HAMMER_FST_IDLE;
1393 rec->flush_group = NULL;
1394 rec->flags |= HAMMER_RECF_DELETED_FE;
1395 rec->flags |= HAMMER_RECF_DELETED_BE;
1396 hammer_rel_mem_record(rec);
1398 ip->flags &= ~HAMMER_INODE_MODMASK;
1399 ip->sync_flags &= ~HAMMER_INODE_MODMASK;
1400 KKASSERT(ip->vp == NULL);
1403 * Remove the inode from any flush group, force it idle. FLUSH
1404 * and SETUP states have an inode ref.
1406 switch(ip->flush_state) {
1407 case HAMMER_FST_FLUSH:
1408 TAILQ_REMOVE(&ip->flush_group->flush_list, ip, flush_entry);
1409 --ip->flush_group->refs;
1410 ip->flush_group = NULL;
1412 case HAMMER_FST_SETUP:
1413 hammer_unref(&ip->lock);
1414 ip->flush_state = HAMMER_FST_IDLE;
1416 case HAMMER_FST_IDLE:
1421 * There shouldn't be any associated vnode. The unload needs at
1422 * least one ref, if we do have a vp steal its ip ref.
1425 kprintf("hammer_destroy_inode_callback: Unexpected "
1426 "vnode association ip %p vp %p\n", ip, ip->vp);
1427 ip->vp->v_data = NULL;
1430 hammer_ref(&ip->lock);
1432 hammer_unload_inode(ip);
1437 * Called on mount -u when switching from RW to RO or vise-versa. Adjust
1438 * the read-only flag for cached inodes.
1440 * This routine is called from a RB_SCAN().
1443 hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
1445 hammer_mount_t hmp = ip->hmp;
1447 if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
1448 ip->flags |= HAMMER_INODE_RO;
1450 ip->flags &= ~HAMMER_INODE_RO;
1455 * A transaction has modified an inode, requiring updates as specified by
1458 * HAMMER_INODE_DDIRTY: Inode data has been updated
1459 * HAMMER_INODE_XDIRTY: Dirty in-memory records
1460 * HAMMER_INODE_BUFS: Dirty buffer cache buffers
1461 * HAMMER_INODE_DELETED: Inode record/data must be deleted
1462 * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
1465 hammer_modify_inode(hammer_inode_t ip, int flags)
1468 * ronly of 0 or 2 does not trigger assertion.
1469 * 2 is a special error state
1471 KKASSERT(ip->hmp->ronly != 1 ||
1472 (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
1473 HAMMER_INODE_BUFS | HAMMER_INODE_DELETED |
1474 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0);
1475 if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
1476 ip->flags |= HAMMER_INODE_RSV_INODES;
1477 ++ip->hmp->rsv_inodes;
1484 * Request that an inode be flushed. This whole mess cannot block and may
1485 * recurse (if not synchronous). Once requested HAMMER will attempt to
1486 * actively flush the inode until the flush can be done.
1488 * The inode may already be flushing, or may be in a setup state. We can
1489 * place the inode in a flushing state if it is currently idle and flag it
1490 * to reflush if it is currently flushing.
1492 * Upon return if the inode could not be flushed due to a setup
1493 * dependancy, then it will be automatically flushed when the dependancy
1497 hammer_flush_inode(hammer_inode_t ip, int flags)
1500 hammer_flush_group_t flg;
1504 * next_flush_group is the first flush group we can place the inode
1505 * in. It may be NULL. If it becomes full we append a new flush
1506 * group and make that the next_flush_group.
1509 while ((flg = hmp->next_flush_group) != NULL) {
1510 KKASSERT(flg->running == 0);
1511 if (flg->total_count + flg->refs <= ip->hmp->undo_rec_limit)
1513 hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
1514 hammer_flusher_async(ip->hmp, flg);
1517 flg = kmalloc(sizeof(*flg), hmp->m_misc, M_WAITOK|M_ZERO);
1518 hmp->next_flush_group = flg;
1519 TAILQ_INIT(&flg->flush_list);
1520 TAILQ_INSERT_TAIL(&hmp->flush_group_list, flg, flush_entry);
1524 * Trivial 'nothing to flush' case. If the inode is in a SETUP
1525 * state we have to put it back into an IDLE state so we can
1526 * drop the extra ref.
1528 * If we have a parent dependancy we must still fall through
1531 if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
1532 if (ip->flush_state == HAMMER_FST_SETUP &&
1533 TAILQ_EMPTY(&ip->target_list)) {
1534 ip->flush_state = HAMMER_FST_IDLE;
1535 hammer_rel_inode(ip, 0);
1537 if (ip->flush_state == HAMMER_FST_IDLE)
1542 * Our flush action will depend on the current state.
1544 switch(ip->flush_state) {
1545 case HAMMER_FST_IDLE:
1547 * We have no dependancies and can flush immediately. Some
1548 * our children may not be flushable so we have to re-test
1549 * with that additional knowledge.
1551 hammer_flush_inode_core(ip, flg, flags);
1553 case HAMMER_FST_SETUP:
1555 * Recurse upwards through dependancies via target_list
1556 * and start their flusher actions going if possible.
1558 * 'good' is our connectivity. -1 means we have none and
1559 * can't flush, 0 means there weren't any dependancies, and
1560 * 1 means we have good connectivity.
1562 good = hammer_setup_parent_inodes(ip, flg);
1566 * We can continue if good >= 0. Determine how
1567 * many records under our inode can be flushed (and
1570 hammer_flush_inode_core(ip, flg, flags);
1573 * Parent has no connectivity, tell it to flush
1574 * us as soon as it does.
1576 * The REFLUSH flag is also needed to trigger
1577 * dependancy wakeups.
1579 ip->flags |= HAMMER_INODE_CONN_DOWN |
1580 HAMMER_INODE_REFLUSH;
1581 if (flags & HAMMER_FLUSH_SIGNAL) {
1582 ip->flags |= HAMMER_INODE_RESIGNAL;
1583 hammer_flusher_async(ip->hmp, flg);
1587 case HAMMER_FST_FLUSH:
1589 * We are already flushing, flag the inode to reflush
1590 * if needed after it completes its current flush.
1592 * The REFLUSH flag is also needed to trigger
1593 * dependancy wakeups.
1595 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
1596 ip->flags |= HAMMER_INODE_REFLUSH;
1597 if (flags & HAMMER_FLUSH_SIGNAL) {
1598 ip->flags |= HAMMER_INODE_RESIGNAL;
1599 hammer_flusher_async(ip->hmp, flg);
1606 * Scan ip->target_list, which is a list of records owned by PARENTS to our
1607 * ip which reference our ip.
1609 * XXX This is a huge mess of recursive code, but not one bit of it blocks
1610 * so for now do not ref/deref the structures. Note that if we use the
1611 * ref/rel code later, the rel CAN block.
1614 hammer_setup_parent_inodes(hammer_inode_t ip, hammer_flush_group_t flg)
1616 hammer_record_t depend;
1621 TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
1622 r = hammer_setup_parent_inodes_helper(depend, flg);
1623 KKASSERT(depend->target_ip == ip);
1624 if (r < 0 && good == 0)
1633 * This helper function takes a record representing the dependancy between
1634 * the parent inode and child inode.
1636 * record->ip = parent inode
1637 * record->target_ip = child inode
1639 * We are asked to recurse upwards and convert the record from SETUP
1640 * to FLUSH if possible.
1642 * Return 1 if the record gives us connectivity
1644 * Return 0 if the record is not relevant
1646 * Return -1 if we can't resolve the dependancy and there is no connectivity.
1649 hammer_setup_parent_inodes_helper(hammer_record_t record,
1650 hammer_flush_group_t flg)
1656 KKASSERT(record->flush_state != HAMMER_FST_IDLE);
1661 * If the record is already flushing, is it in our flush group?
1663 * If it is in our flush group but it is a general record or a
1664 * delete-on-disk, it does not improve our connectivity (return 0),
1665 * and if the target inode is not trying to destroy itself we can't
1666 * allow the operation yet anyway (the second return -1).
1668 if (record->flush_state == HAMMER_FST_FLUSH) {
1670 * If not in our flush group ask the parent to reflush
1671 * us as soon as possible.
1673 if (record->flush_group != flg) {
1674 pip->flags |= HAMMER_INODE_REFLUSH;
1675 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1680 * If in our flush group everything is already set up,
1681 * just return whether the record will improve our
1682 * visibility or not.
1684 if (record->type == HAMMER_MEM_RECORD_ADD)
1690 * It must be a setup record. Try to resolve the setup dependancies
1691 * by recursing upwards so we can place ip on the flush list.
1693 KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1695 good = hammer_setup_parent_inodes(pip, flg);
1698 * If good < 0 the parent has no connectivity and we cannot safely
1699 * flush the directory entry, which also means we can't flush our
1700 * ip. Flag the parent and us for downward recursion once the
1701 * parent's connectivity is resolved.
1704 /* pip->flags |= HAMMER_INODE_CONN_DOWN; set by recursion */
1705 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1710 * We are go, place the parent inode in a flushing state so we can
1711 * place its record in a flushing state. Note that the parent
1712 * may already be flushing. The record must be in the same flush
1713 * group as the parent.
1715 if (pip->flush_state != HAMMER_FST_FLUSH)
1716 hammer_flush_inode_core(pip, flg, HAMMER_FLUSH_RECURSION);
1717 KKASSERT(pip->flush_state == HAMMER_FST_FLUSH);
1718 KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1721 if (record->type == HAMMER_MEM_RECORD_DEL &&
1722 (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
1724 * Regardless of flushing state we cannot sync this path if the
1725 * record represents a delete-on-disk but the target inode
1726 * is not ready to sync its own deletion.
1728 * XXX need to count effective nlinks to determine whether
1729 * the flush is ok, otherwise removing a hardlink will
1730 * just leave the DEL record to rot.
1732 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
1736 if (pip->flush_group == flg) {
1738 * Because we have not calculated nlinks yet we can just
1739 * set records to the flush state if the parent is in
1740 * the same flush group as we are.
1742 record->flush_state = HAMMER_FST_FLUSH;
1743 record->flush_group = flg;
1744 ++record->flush_group->refs;
1745 hammer_ref(&record->lock);
1748 * A general directory-add contributes to our visibility.
1750 * Otherwise it is probably a directory-delete or
1751 * delete-on-disk record and does not contribute to our
1752 * visbility (but we can still flush it).
1754 if (record->type == HAMMER_MEM_RECORD_ADD)
1759 * If the parent is not in our flush group we cannot
1760 * flush this record yet, there is no visibility.
1761 * We tell the parent to reflush and mark ourselves
1762 * so the parent knows it should flush us too.
1764 pip->flags |= HAMMER_INODE_REFLUSH;
1765 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1771 * This is the core routine placing an inode into the FST_FLUSH state.
1774 hammer_flush_inode_core(hammer_inode_t ip, hammer_flush_group_t flg, int flags)
1779 * Set flush state and prevent the flusher from cycling into
1780 * the next flush group. Do not place the ip on the list yet.
1781 * Inodes not in the idle state get an extra reference.
1783 KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
1784 if (ip->flush_state == HAMMER_FST_IDLE)
1785 hammer_ref(&ip->lock);
1786 ip->flush_state = HAMMER_FST_FLUSH;
1787 ip->flush_group = flg;
1788 ++ip->hmp->flusher.group_lock;
1789 ++ip->hmp->count_iqueued;
1790 ++hammer_count_iqueued;
1794 * If the flush group reaches the autoflush limit we want to signal
1795 * the flusher. This is particularly important for remove()s.
1797 if (flg->total_count == hammer_autoflush)
1798 flags |= HAMMER_FLUSH_SIGNAL;
1801 * We need to be able to vfsync/truncate from the backend.
1803 KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
1804 if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
1805 ip->flags |= HAMMER_INODE_VHELD;
1810 * Figure out how many in-memory records we can actually flush
1811 * (not including inode meta-data, buffers, etc).
1813 KKASSERT((ip->flags & HAMMER_INODE_WOULDBLOCK) == 0);
1814 if (flags & HAMMER_FLUSH_RECURSION) {
1816 * If this is a upwards recursion we do not want to
1817 * recurse down again!
1821 } else if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
1823 * No new records are added if we must complete a flush
1824 * from a previous cycle, but we do have to move the records
1825 * from the previous cycle to the current one.
1828 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1829 hammer_syncgrp_child_callback, NULL);
1835 * Normal flush, scan records and bring them into the flush.
1836 * Directory adds and deletes are usually skipped (they are
1837 * grouped with the related inode rather then with the
1840 * go_count can be negative, which means the scan aborted
1841 * due to the flush group being over-full and we should
1842 * flush what we have.
1844 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1845 hammer_setup_child_callback, NULL);
1849 * This is a more involved test that includes go_count. If we
1850 * can't flush, flag the inode and return. If go_count is 0 we
1851 * were are unable to flush any records in our rec_tree and
1852 * must ignore the XDIRTY flag.
1854 if (go_count == 0) {
1855 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
1856 --ip->hmp->count_iqueued;
1857 --hammer_count_iqueued;
1860 ip->flush_state = HAMMER_FST_SETUP;
1861 ip->flush_group = NULL;
1862 if (ip->flags & HAMMER_INODE_VHELD) {
1863 ip->flags &= ~HAMMER_INODE_VHELD;
1868 * REFLUSH is needed to trigger dependancy wakeups
1869 * when an inode is in SETUP.
1871 ip->flags |= HAMMER_INODE_REFLUSH;
1872 if (flags & HAMMER_FLUSH_SIGNAL) {
1873 ip->flags |= HAMMER_INODE_RESIGNAL;
1874 hammer_flusher_async(ip->hmp, flg);
1876 if (--ip->hmp->flusher.group_lock == 0)
1877 wakeup(&ip->hmp->flusher.group_lock);
1883 * Snapshot the state of the inode for the backend flusher.
1885 * We continue to retain save_trunc_off even when all truncations
1886 * have been resolved as an optimization to determine if we can
1887 * skip the B-Tree lookup for overwrite deletions.
1889 * NOTE: The DELETING flag is a mod flag, but it is also sticky,
1890 * and stays in ip->flags. Once set, it stays set until the
1891 * inode is destroyed.
1893 if (ip->flags & HAMMER_INODE_TRUNCATED) {
1894 KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0);
1895 ip->sync_trunc_off = ip->trunc_off;
1896 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
1897 ip->flags &= ~HAMMER_INODE_TRUNCATED;
1898 ip->sync_flags |= HAMMER_INODE_TRUNCATED;
1901 * The save_trunc_off used to cache whether the B-Tree
1902 * holds any records past that point is not used until
1903 * after the truncation has succeeded, so we can safely
1906 if (ip->save_trunc_off > ip->sync_trunc_off)
1907 ip->save_trunc_off = ip->sync_trunc_off;
1909 ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK &
1910 ~HAMMER_INODE_TRUNCATED);
1911 ip->sync_ino_leaf = ip->ino_leaf;
1912 ip->sync_ino_data = ip->ino_data;
1913 ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
1914 #ifdef DEBUG_TRUNCATE
1915 if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp)
1916 kprintf("truncateS %016llx\n", ip->sync_trunc_off);
1920 * The flusher list inherits our inode and reference.
1922 KKASSERT(flg->running == 0);
1923 TAILQ_INSERT_TAIL(&flg->flush_list, ip, flush_entry);
1924 if (--ip->hmp->flusher.group_lock == 0)
1925 wakeup(&ip->hmp->flusher.group_lock);
1927 if (flags & HAMMER_FLUSH_SIGNAL) {
1928 hammer_flusher_async(ip->hmp, flg);
1933 * Callback for scan of ip->rec_tree. Try to include each record in our
1934 * flush. ip->flush_group has been set but the inode has not yet been
1935 * moved into a flushing state.
1937 * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
1940 * We return 1 for any record placed or found in FST_FLUSH, which prevents
1941 * the caller from shortcutting the flush.
1944 hammer_setup_child_callback(hammer_record_t rec, void *data)
1946 hammer_flush_group_t flg;
1947 hammer_inode_t target_ip;
1952 * Deleted records are ignored. Note that the flush detects deleted
1953 * front-end records at multiple points to deal with races. This is
1954 * just the first line of defense. The only time DELETED_FE cannot
1955 * be set is when HAMMER_RECF_INTERLOCK_BE is set.
1957 * Don't get confused between record deletion and, say, directory
1958 * entry deletion. The deletion of a directory entry that is on
1959 * the media has nothing to do with the record deletion flags.
1961 if (rec->flags & (HAMMER_RECF_DELETED_FE|HAMMER_RECF_DELETED_BE)) {
1962 if (rec->flush_state == HAMMER_FST_FLUSH) {
1963 KKASSERT(rec->flush_group == rec->ip->flush_group);
1972 * If the record is in an idle state it has no dependancies and
1976 flg = ip->flush_group;
1979 switch(rec->flush_state) {
1980 case HAMMER_FST_IDLE:
1982 * The record has no setup dependancy, we can flush it.
1984 KKASSERT(rec->target_ip == NULL);
1985 rec->flush_state = HAMMER_FST_FLUSH;
1986 rec->flush_group = flg;
1988 hammer_ref(&rec->lock);
1991 case HAMMER_FST_SETUP:
1993 * The record has a setup dependancy. These are typically
1994 * directory entry adds and deletes. Such entries will be
1995 * flushed when their inodes are flushed so we do not
1996 * usually have to add them to the flush here. However,
1997 * if the target_ip has set HAMMER_INODE_CONN_DOWN then
1998 * it is asking us to flush this record (and it).
2000 target_ip = rec->target_ip;
2001 KKASSERT(target_ip != NULL);
2002 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
2005 * If the target IP is already flushing in our group
2006 * we could associate the record, but target_ip has
2007 * already synced ino_data to sync_ino_data and we
2008 * would also have to adjust nlinks. Plus there are
2009 * ordering issues for adds and deletes.
2011 * Reflush downward if this is an ADD, and upward if
2014 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
2015 if (rec->flush_state == HAMMER_MEM_RECORD_ADD)
2016 ip->flags |= HAMMER_INODE_REFLUSH;
2018 target_ip->flags |= HAMMER_INODE_REFLUSH;
2023 * Target IP is not yet flushing. This can get complex
2024 * because we have to be careful about the recursion.
2026 * Directories create an issue for us in that if a flush
2027 * of a directory is requested the expectation is to flush
2028 * any pending directory entries, but this will cause the
2029 * related inodes to recursively flush as well. We can't
2030 * really defer the operation so just get as many as we
2034 if ((target_ip->flags & HAMMER_INODE_RECLAIM) == 0 &&
2035 (target_ip->flags & HAMMER_INODE_CONN_DOWN) == 0) {
2037 * We aren't reclaiming and the target ip was not
2038 * previously prevented from flushing due to this
2039 * record dependancy. Do not flush this record.
2044 if (flg->total_count + flg->refs >
2045 ip->hmp->undo_rec_limit) {
2047 * Our flush group is over-full and we risk blowing
2048 * out the UNDO FIFO. Stop the scan, flush what we
2049 * have, then reflush the directory.
2051 * The directory may be forced through multiple
2052 * flush groups before it can be completely
2055 ip->flags |= HAMMER_INODE_RESIGNAL |
2056 HAMMER_INODE_REFLUSH;
2058 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
2060 * If the target IP is not flushing we can force
2061 * it to flush, even if it is unable to write out
2062 * any of its own records we have at least one in
2063 * hand that we CAN deal with.
2065 rec->flush_state = HAMMER_FST_FLUSH;
2066 rec->flush_group = flg;
2068 hammer_ref(&rec->lock);
2069 hammer_flush_inode_core(target_ip, flg,
2070 HAMMER_FLUSH_RECURSION);
2074 * General or delete-on-disk record.
2076 * XXX this needs help. If a delete-on-disk we could
2077 * disconnect the target. If the target has its own
2078 * dependancies they really need to be flushed.
2082 rec->flush_state = HAMMER_FST_FLUSH;
2083 rec->flush_group = flg;
2085 hammer_ref(&rec->lock);
2086 hammer_flush_inode_core(target_ip, flg,
2087 HAMMER_FLUSH_RECURSION);
2091 case HAMMER_FST_FLUSH:
2093 * The flush_group should already match.
2095 KKASSERT(rec->flush_group == flg);
2104 * This version just moves records already in a flush state to the new
2105 * flush group and that is it.
2108 hammer_syncgrp_child_callback(hammer_record_t rec, void *data)
2110 hammer_inode_t ip = rec->ip;
2112 switch(rec->flush_state) {
2113 case HAMMER_FST_FLUSH:
2114 KKASSERT(rec->flush_group == ip->flush_group);
2124 * Wait for a previously queued flush to complete.
2126 * If a critical error occured we don't try to wait.
2129 hammer_wait_inode(hammer_inode_t ip)
2131 hammer_flush_group_t flg;
2134 if ((ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) {
2135 while (ip->flush_state != HAMMER_FST_IDLE &&
2136 (ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) {
2137 if (ip->flush_state == HAMMER_FST_SETUP)
2138 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2139 if (ip->flush_state != HAMMER_FST_IDLE) {
2140 ip->flags |= HAMMER_INODE_FLUSHW;
2141 tsleep(&ip->flags, 0, "hmrwin", 0);
2148 * Called by the backend code when a flush has been completed.
2149 * The inode has already been removed from the flush list.
2151 * A pipelined flush can occur, in which case we must re-enter the
2152 * inode on the list and re-copy its fields.
2155 hammer_flush_inode_done(hammer_inode_t ip, int error)
2160 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
2165 * Auto-reflush if the backend could not completely flush
2166 * the inode. This fixes a case where a deferred buffer flush
2167 * could cause fsync to return early.
2169 if (ip->sync_flags & HAMMER_INODE_MODMASK)
2170 ip->flags |= HAMMER_INODE_REFLUSH;
2173 * Merge left-over flags back into the frontend and fix the state.
2174 * Incomplete truncations are retained by the backend.
2177 ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED;
2178 ip->sync_flags &= HAMMER_INODE_TRUNCATED;
2181 * The backend may have adjusted nlinks, so if the adjusted nlinks
2182 * does not match the fronttend set the frontend's RDIRTY flag again.
2184 if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
2185 ip->flags |= HAMMER_INODE_DDIRTY;
2188 * Fix up the dirty buffer status.
2190 if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) {
2191 ip->flags |= HAMMER_INODE_BUFS;
2195 * Re-set the XDIRTY flag if some of the inode's in-memory records
2196 * could not be flushed.
2198 KKASSERT((RB_EMPTY(&ip->rec_tree) &&
2199 (ip->flags & HAMMER_INODE_XDIRTY) == 0) ||
2200 (!RB_EMPTY(&ip->rec_tree) &&
2201 (ip->flags & HAMMER_INODE_XDIRTY) != 0));
2204 * Do not lose track of inodes which no longer have vnode
2205 * assocations, otherwise they may never get flushed again.
2207 * The reflush flag can be set superfluously, causing extra pain
2208 * for no reason. If the inode is no longer modified it no longer
2209 * needs to be flushed.
2211 if (ip->flags & HAMMER_INODE_MODMASK) {
2213 ip->flags |= HAMMER_INODE_REFLUSH;
2215 ip->flags &= ~HAMMER_INODE_REFLUSH;
2219 * Adjust the flush state.
2221 if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
2223 * We were unable to flush out all our records, leave the
2224 * inode in a flush state and in the current flush group.
2225 * The flush group will be re-run.
2227 * This occurs if the UNDO block gets too full or there is
2228 * too much dirty meta-data and allows the flusher to
2229 * finalize the UNDO block and then re-flush.
2231 ip->flags &= ~HAMMER_INODE_WOULDBLOCK;
2235 * Remove from the flush_group
2237 TAILQ_REMOVE(&ip->flush_group->flush_list, ip, flush_entry);
2238 ip->flush_group = NULL;
2241 * Clean up the vnode ref and tracking counts.
2243 if (ip->flags & HAMMER_INODE_VHELD) {
2244 ip->flags &= ~HAMMER_INODE_VHELD;
2247 --hmp->count_iqueued;
2248 --hammer_count_iqueued;
2251 * And adjust the state.
2253 if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
2254 ip->flush_state = HAMMER_FST_IDLE;
2257 ip->flush_state = HAMMER_FST_SETUP;
2262 * If the frontend is waiting for a flush to complete,
2265 if (ip->flags & HAMMER_INODE_FLUSHW) {
2266 ip->flags &= ~HAMMER_INODE_FLUSHW;
2271 * If the frontend made more changes and requested another
2272 * flush, then try to get it running.
2274 * Reflushes are aborted when the inode is errored out.
2276 if (ip->flags & HAMMER_INODE_REFLUSH) {
2277 ip->flags &= ~HAMMER_INODE_REFLUSH;
2278 if (ip->flags & HAMMER_INODE_RESIGNAL) {
2279 ip->flags &= ~HAMMER_INODE_RESIGNAL;
2280 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2282 hammer_flush_inode(ip, 0);
2288 * If we have no parent dependancies we can clear CONN_DOWN
2290 if (TAILQ_EMPTY(&ip->target_list))
2291 ip->flags &= ~HAMMER_INODE_CONN_DOWN;
2294 * If the inode is now clean drop the space reservation.
2296 if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
2297 (ip->flags & HAMMER_INODE_RSV_INODES)) {
2298 ip->flags &= ~HAMMER_INODE_RSV_INODES;
2303 hammer_rel_inode(ip, 0);
2307 * Called from hammer_sync_inode() to synchronize in-memory records
2311 hammer_sync_record_callback(hammer_record_t record, void *data)
2313 hammer_cursor_t cursor = data;
2314 hammer_transaction_t trans = cursor->trans;
2315 hammer_mount_t hmp = trans->hmp;
2319 * Skip records that do not belong to the current flush.
2321 ++hammer_stats_record_iterations;
2322 if (record->flush_state != HAMMER_FST_FLUSH)
2326 if (record->flush_group != record->ip->flush_group) {
2327 kprintf("sync_record %p ip %p bad flush group %p %p\n", record, record->ip, record->flush_group ,record->ip->flush_group);
2332 KKASSERT(record->flush_group == record->ip->flush_group);
2335 * Interlock the record using the BE flag. Once BE is set the
2336 * frontend cannot change the state of FE.
2338 * NOTE: If FE is set prior to us setting BE we still sync the
2339 * record out, but the flush completion code converts it to
2340 * a delete-on-disk record instead of destroying it.
2342 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
2343 record->flags |= HAMMER_RECF_INTERLOCK_BE;
2346 * The backend may have already disposed of the record.
2348 if (record->flags & HAMMER_RECF_DELETED_BE) {
2354 * If the whole inode is being deleting all on-disk records will
2355 * be deleted very soon, we can't sync any new records to disk
2356 * because they will be deleted in the same transaction they were
2357 * created in (delete_tid == create_tid), which will assert.
2359 * XXX There may be a case with RECORD_ADD with DELETED_FE set
2360 * that we currently panic on.
2362 if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
2363 switch(record->type) {
2364 case HAMMER_MEM_RECORD_DATA:
2366 * We don't have to do anything, if the record was
2367 * committed the space will have been accounted for
2371 case HAMMER_MEM_RECORD_GENERAL:
2372 record->flags |= HAMMER_RECF_DELETED_FE;
2373 record->flags |= HAMMER_RECF_DELETED_BE;
2376 case HAMMER_MEM_RECORD_ADD:
2377 panic("hammer_sync_record_callback: illegal add "
2378 "during inode deletion record %p", record);
2379 break; /* NOT REACHED */
2380 case HAMMER_MEM_RECORD_INODE:
2381 panic("hammer_sync_record_callback: attempt to "
2382 "sync inode record %p?", record);
2383 break; /* NOT REACHED */
2384 case HAMMER_MEM_RECORD_DEL:
2386 * Follow through and issue the on-disk deletion
2393 * If DELETED_FE is set special handling is needed for directory
2394 * entries. Dependant pieces related to the directory entry may
2395 * have already been synced to disk. If this occurs we have to
2396 * sync the directory entry and then change the in-memory record
2397 * from an ADD to a DELETE to cover the fact that it's been
2398 * deleted by the frontend.
2400 * A directory delete covering record (MEM_RECORD_DEL) can never
2401 * be deleted by the frontend.
2403 * Any other record type (aka DATA) can be deleted by the frontend.
2404 * XXX At the moment the flusher must skip it because there may
2405 * be another data record in the flush group for the same block,
2406 * meaning that some frontend data changes can leak into the backend's
2407 * synchronization point.
2409 if (record->flags & HAMMER_RECF_DELETED_FE) {
2410 if (record->type == HAMMER_MEM_RECORD_ADD) {
2411 record->flags |= HAMMER_RECF_CONVERT_DELETE;
2413 KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
2414 record->flags |= HAMMER_RECF_DELETED_BE;
2421 * Assign the create_tid for new records. Deletions already
2422 * have the record's entire key properly set up.
2424 if (record->type != HAMMER_MEM_RECORD_DEL)
2425 record->leaf.base.create_tid = trans->tid;
2426 record->leaf.create_ts = trans->time32;
2428 error = hammer_ip_sync_record_cursor(cursor, record);
2429 if (error != EDEADLK)
2431 hammer_done_cursor(cursor);
2432 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
2437 record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
2442 hammer_flush_record_done(record, error);
2445 * Do partial finalization if we have built up too many dirty
2446 * buffers. Otherwise a buffer cache deadlock can occur when
2447 * doing things like creating tens of thousands of tiny files.
2449 * We must release our cursor lock to avoid a 3-way deadlock
2450 * due to the exclusive sync lock the finalizer must get.
2452 if (hammer_flusher_meta_limit(hmp)) {
2453 hammer_unlock_cursor(cursor);
2454 hammer_flusher_finalize(trans, 0);
2455 hammer_lock_cursor(cursor);
2462 * Backend function called by the flusher to sync an inode to media.
2465 hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip)
2467 struct hammer_cursor cursor;
2468 hammer_node_t tmp_node;
2469 hammer_record_t depend;
2470 hammer_record_t next;
2471 int error, tmp_error;
2474 if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
2477 error = hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
2482 * Any directory records referencing this inode which are not in
2483 * our current flush group must adjust our nlink count for the
2484 * purposes of synchronization to disk.
2486 * Records which are in our flush group can be unlinked from our
2487 * inode now, potentially allowing the inode to be physically
2490 * This cannot block.
2492 nlinks = ip->ino_data.nlinks;
2493 next = TAILQ_FIRST(&ip->target_list);
2494 while ((depend = next) != NULL) {
2495 next = TAILQ_NEXT(depend, target_entry);
2496 if (depend->flush_state == HAMMER_FST_FLUSH &&
2497 depend->flush_group == ip->flush_group) {
2499 * If this is an ADD that was deleted by the frontend
2500 * the frontend nlinks count will have already been
2501 * decremented, but the backend is going to sync its
2502 * directory entry and must account for it. The
2503 * record will be converted to a delete-on-disk when
2506 * If the ADD was not deleted by the frontend we
2507 * can remove the dependancy from our target_list.
2509 if (depend->flags & HAMMER_RECF_DELETED_FE) {
2512 TAILQ_REMOVE(&ip->target_list, depend,
2514 depend->target_ip = NULL;
2516 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
2518 * Not part of our flush group
2520 KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
2521 switch(depend->type) {
2522 case HAMMER_MEM_RECORD_ADD:
2525 case HAMMER_MEM_RECORD_DEL:
2535 * Set dirty if we had to modify the link count.
2537 if (ip->sync_ino_data.nlinks != nlinks) {
2538 KKASSERT((int64_t)nlinks >= 0);
2539 ip->sync_ino_data.nlinks = nlinks;
2540 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2544 * If there is a trunction queued destroy any data past the (aligned)
2545 * truncation point. Userland will have dealt with the buffer
2546 * containing the truncation point for us.
2548 * We don't flush pending frontend data buffers until after we've
2549 * dealt with the truncation.
2551 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2553 * Interlock trunc_off. The VOP front-end may continue to
2554 * make adjustments to it while we are blocked.
2557 off_t aligned_trunc_off;
2560 trunc_off = ip->sync_trunc_off;
2561 blkmask = hammer_blocksize(trunc_off) - 1;
2562 aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask;
2565 * Delete any whole blocks on-media. The front-end has
2566 * already cleaned out any partial block and made it
2567 * pending. The front-end may have updated trunc_off
2568 * while we were blocked so we only use sync_trunc_off.
2570 * This operation can blow out the buffer cache, EWOULDBLOCK
2571 * means we were unable to complete the deletion. The
2572 * deletion will update sync_trunc_off in that case.
2574 error = hammer_ip_delete_range(&cursor, ip,
2576 0x7FFFFFFFFFFFFFFFLL, 2);
2577 if (error == EWOULDBLOCK) {
2578 ip->flags |= HAMMER_INODE_WOULDBLOCK;
2580 goto defer_buffer_flush;
2587 * Clear the truncation flag on the backend after we have
2588 * complete the deletions. Backend data is now good again
2589 * (including new records we are about to sync, below).
2591 * Leave sync_trunc_off intact. As we write additional
2592 * records the backend will update sync_trunc_off. This
2593 * tells the backend whether it can skip the overwrite
2594 * test. This should work properly even when the backend
2595 * writes full blocks where the truncation point straddles
2596 * the block because the comparison is against the base
2597 * offset of the record.
2599 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2600 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
2606 * Now sync related records. These will typically be directory
2607 * entries, records tracking direct-writes, or delete-on-disk records.
2610 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2611 hammer_sync_record_callback, &cursor);
2617 hammer_cache_node(&ip->cache[1], cursor.node);
2620 * Re-seek for inode update, assuming our cache hasn't been ripped
2621 * out from under us.
2624 tmp_node = hammer_ref_node_safe(trans, &ip->cache[0], &error);
2626 hammer_cursor_downgrade(&cursor);
2627 hammer_lock_sh(&tmp_node->lock);
2628 if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0)
2629 hammer_cursor_seek(&cursor, tmp_node, 0);
2630 hammer_unlock(&tmp_node->lock);
2631 hammer_rel_node(tmp_node);
2637 * If we are deleting the inode the frontend had better not have
2638 * any active references on elements making up the inode.
2640 * The call to hammer_ip_delete_clean() cleans up auxillary records
2641 * but not DB or DATA records. Those must have already been deleted
2642 * by the normal truncation mechanic.
2644 if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
2645 RB_EMPTY(&ip->rec_tree) &&
2646 (ip->sync_flags & HAMMER_INODE_DELETING) &&
2647 (ip->flags & HAMMER_INODE_DELETED) == 0) {
2650 error = hammer_ip_delete_clean(&cursor, ip, &count1);
2652 ip->flags |= HAMMER_INODE_DELETED;
2653 ip->sync_flags &= ~HAMMER_INODE_DELETING;
2654 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2655 KKASSERT(RB_EMPTY(&ip->rec_tree));
2658 * Set delete_tid in both the frontend and backend
2659 * copy of the inode record. The DELETED flag handles
2660 * this, do not set RDIRTY.
2662 ip->ino_leaf.base.delete_tid = trans->tid;
2663 ip->sync_ino_leaf.base.delete_tid = trans->tid;
2664 ip->ino_leaf.delete_ts = trans->time32;
2665 ip->sync_ino_leaf.delete_ts = trans->time32;
2669 * Adjust the inode count in the volume header
2671 hammer_sync_lock_sh(trans);
2672 if (ip->flags & HAMMER_INODE_ONDISK) {
2673 hammer_modify_volume_field(trans,
2676 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
2677 hammer_modify_volume_done(trans->rootvol);
2679 hammer_sync_unlock(trans);
2685 ip->sync_flags &= ~HAMMER_INODE_BUFS;
2689 * Now update the inode's on-disk inode-data and/or on-disk record.
2690 * DELETED and ONDISK are managed only in ip->flags.
2692 * In the case of a defered buffer flush we still update the on-disk
2693 * inode to satisfy visibility requirements if there happen to be
2694 * directory dependancies.
2696 switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
2697 case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
2699 * If deleted and on-disk, don't set any additional flags.
2700 * the delete flag takes care of things.
2702 * Clear flags which may have been set by the frontend.
2704 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2705 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2706 HAMMER_INODE_DELETING);
2708 case HAMMER_INODE_DELETED:
2710 * Take care of the case where a deleted inode was never
2711 * flushed to the disk in the first place.
2713 * Clear flags which may have been set by the frontend.
2715 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2716 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2717 HAMMER_INODE_DELETING);
2718 while (RB_ROOT(&ip->rec_tree)) {
2719 hammer_record_t record = RB_ROOT(&ip->rec_tree);
2720 hammer_ref(&record->lock);
2721 KKASSERT(record->lock.refs == 1);
2722 record->flags |= HAMMER_RECF_DELETED_FE;
2723 record->flags |= HAMMER_RECF_DELETED_BE;
2724 hammer_rel_mem_record(record);
2727 case HAMMER_INODE_ONDISK:
2729 * If already on-disk, do not set any additional flags.
2734 * If not on-disk and not deleted, set DDIRTY to force
2735 * an initial record to be written.
2737 * Also set the create_tid in both the frontend and backend
2738 * copy of the inode record.
2740 ip->ino_leaf.base.create_tid = trans->tid;
2741 ip->ino_leaf.create_ts = trans->time32;
2742 ip->sync_ino_leaf.base.create_tid = trans->tid;
2743 ip->sync_ino_leaf.create_ts = trans->time32;
2744 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2749 * If RDIRTY or DDIRTY is set, write out a new record. If the inode
2750 * is already on-disk the old record is marked as deleted.
2752 * If DELETED is set hammer_update_inode() will delete the existing
2753 * record without writing out a new one.
2755 * If *ONLY* the ITIMES flag is set we can update the record in-place.
2757 if (ip->flags & HAMMER_INODE_DELETED) {
2758 error = hammer_update_inode(&cursor, ip);
2760 if ((ip->sync_flags & HAMMER_INODE_DDIRTY) == 0 &&
2761 (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) {
2762 error = hammer_update_itimes(&cursor, ip);
2764 if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) {
2765 error = hammer_update_inode(&cursor, ip);
2769 hammer_critical_error(ip->hmp, ip, error,
2770 "while syncing inode");
2772 hammer_done_cursor(&cursor);
2777 * This routine is called when the OS is no longer actively referencing
2778 * the inode (but might still be keeping it cached), or when releasing
2779 * the last reference to an inode.
2781 * At this point if the inode's nlinks count is zero we want to destroy
2782 * it, which may mean destroying it on-media too.
2785 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
2790 * Set the DELETING flag when the link count drops to 0 and the
2791 * OS no longer has any opens on the inode.
2793 * The backend will clear DELETING (a mod flag) and set DELETED
2794 * (a state flag) when it is actually able to perform the
2797 * Don't reflag the deletion if the flusher is currently syncing
2798 * one that was already flagged. A previously set DELETING flag
2799 * may bounce around flags and sync_flags until the operation is
2802 if (ip->ino_data.nlinks == 0 &&
2803 ((ip->flags | ip->sync_flags) & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
2804 ip->flags |= HAMMER_INODE_DELETING;
2805 ip->flags |= HAMMER_INODE_TRUNCATED;
2809 if (hammer_get_vnode(ip, &vp) != 0)
2817 vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
2818 vnode_pager_setsize(ip->vp, 0);
2827 * After potentially resolving a dependancy the inode is tested
2828 * to determine whether it needs to be reflushed.
2831 hammer_test_inode(hammer_inode_t ip)
2833 if (ip->flags & HAMMER_INODE_REFLUSH) {
2834 ip->flags &= ~HAMMER_INODE_REFLUSH;
2835 hammer_ref(&ip->lock);
2836 if (ip->flags & HAMMER_INODE_RESIGNAL) {
2837 ip->flags &= ~HAMMER_INODE_RESIGNAL;
2838 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2840 hammer_flush_inode(ip, 0);
2842 hammer_rel_inode(ip, 0);
2847 * Clear the RECLAIM flag on an inode. This occurs when the inode is
2848 * reassociated with a vp or just before it gets freed.
2850 * Pipeline wakeups to threads blocked due to an excessive number of
2851 * detached inodes. The reclaim count generates a bit of negative
2855 hammer_inode_wakereclaims(hammer_inode_t ip, int dowake)
2857 struct hammer_reclaim *reclaim;
2858 hammer_mount_t hmp = ip->hmp;
2860 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0)
2863 --hammer_count_reclaiming;
2864 --hmp->inode_reclaims;
2865 ip->flags &= ~HAMMER_INODE_RECLAIM;
2867 if (hmp->inode_reclaims < HAMMER_RECLAIM_WAIT || dowake) {
2868 reclaim = TAILQ_FIRST(&hmp->reclaim_list);
2869 if (reclaim && reclaim->count > 0 && --reclaim->count == 0) {
2870 TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry);
2877 * Setup our reclaim pipeline. We only let so many detached (and dirty)
2878 * inodes build up before we start blocking.
2880 * When we block we don't care *which* inode has finished reclaiming,
2881 * as lone as one does. This is somewhat heuristical... we also put a
2882 * cap on how long we are willing to wait.
2885 hammer_inode_waitreclaims(hammer_mount_t hmp)
2887 struct hammer_reclaim reclaim;
2890 if (hmp->inode_reclaims < HAMMER_RECLAIM_WAIT)
2892 delay = (hmp->inode_reclaims - HAMMER_RECLAIM_WAIT) * hz /
2893 (HAMMER_RECLAIM_WAIT * 3) + 1;
2896 TAILQ_INSERT_TAIL(&hmp->reclaim_list, &reclaim, entry);
2897 tsleep(&reclaim, 0, "hmrrcm", delay);
2898 if (reclaim.count > 0)
2899 TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry);
2904 * A larger then normal backlog of inodes is sitting in the flusher,
2905 * enforce a general slowdown to let it catch up. This routine is only
2906 * called on completion of a non-flusher-related transaction which
2907 * performed B-Tree node I/O.
2909 * It is possible for the flusher to stall in a continuous load.
2910 * blogbench -i1000 -o seems to do a good job generating this sort of load.
2911 * If the flusher is unable to catch up the inode count can bloat until
2912 * we run out of kvm.
2914 * This is a bit of a hack.
2917 hammer_inode_waithard(hammer_mount_t hmp)
2922 if (hmp->flags & HAMMER_MOUNT_FLUSH_RECOVERY) {
2923 if (hmp->inode_reclaims < HAMMER_RECLAIM_WAIT / 2 &&
2924 hmp->count_iqueued < hmp->count_inodes / 20) {
2925 hmp->flags &= ~HAMMER_MOUNT_FLUSH_RECOVERY;
2929 if (hmp->inode_reclaims < HAMMER_RECLAIM_WAIT ||
2930 hmp->count_iqueued < hmp->count_inodes / 10) {
2933 hmp->flags |= HAMMER_MOUNT_FLUSH_RECOVERY;
2937 * Block for one flush cycle.
2939 hammer_flusher_wait_next(hmp);