2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.114 2008/09/24 00:53:51 dillon Exp $
38 #include <vm/vm_extern.h>
40 static int hammer_unload_inode(struct hammer_inode *ip);
41 static void hammer_free_inode(hammer_inode_t ip);
42 static void hammer_flush_inode_core(hammer_inode_t ip,
43 hammer_flush_group_t flg, int flags);
44 static int hammer_setup_child_callback(hammer_record_t rec, void *data);
46 static int hammer_syncgrp_child_callback(hammer_record_t rec, void *data);
48 static int hammer_setup_parent_inodes(hammer_inode_t ip, int depth,
49 hammer_flush_group_t flg);
50 static int hammer_setup_parent_inodes_helper(hammer_record_t record,
51 int depth, hammer_flush_group_t flg);
52 static void hammer_inode_wakereclaims(hammer_inode_t ip);
53 static struct hammer_inostats *hammer_inode_inostats(hammer_mount_t hmp,
57 extern struct hammer_inode *HammerTruncIp;
61 * RB-Tree support for inode structures
64 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
66 if (ip1->obj_localization < ip2->obj_localization)
68 if (ip1->obj_localization > ip2->obj_localization)
70 if (ip1->obj_id < ip2->obj_id)
72 if (ip1->obj_id > ip2->obj_id)
74 if (ip1->obj_asof < ip2->obj_asof)
76 if (ip1->obj_asof > ip2->obj_asof)
82 hammer_redo_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
84 if (ip1->redo_fifo_start < ip2->redo_fifo_start)
86 if (ip1->redo_fifo_start > ip2->redo_fifo_start)
92 * RB-Tree support for inode structures / special LOOKUP_INFO
95 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
97 if (info->obj_localization < ip->obj_localization)
99 if (info->obj_localization > ip->obj_localization)
101 if (info->obj_id < ip->obj_id)
103 if (info->obj_id > ip->obj_id)
105 if (info->obj_asof < ip->obj_asof)
107 if (info->obj_asof > ip->obj_asof)
113 * Used by hammer_scan_inode_snapshots() to locate all of an object's
114 * snapshots. Note that the asof field is not tested, which we can get
115 * away with because it is the lowest-priority field.
118 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data)
120 hammer_inode_info_t info = data;
122 if (ip->obj_localization > info->obj_localization)
124 if (ip->obj_localization < info->obj_localization)
126 if (ip->obj_id > info->obj_id)
128 if (ip->obj_id < info->obj_id)
134 * Used by hammer_unload_pseudofs() to locate all inodes associated with
138 hammer_inode_pfs_cmp(hammer_inode_t ip, void *data)
140 u_int32_t localization = *(u_int32_t *)data;
141 if (ip->obj_localization > localization)
143 if (ip->obj_localization < localization)
149 * RB-Tree support for pseudofs structures
152 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1, hammer_pseudofs_inmem_t p2)
154 if (p1->localization < p2->localization)
156 if (p1->localization > p2->localization)
162 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
163 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
164 hammer_inode_info_cmp, hammer_inode_info_t);
165 RB_GENERATE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node,
166 hammer_pfs_rb_compare, u_int32_t, localization);
169 * The kernel is not actively referencing this vnode but is still holding
172 * This is called from the frontend.
177 hammer_vop_inactive(struct vop_inactive_args *ap)
179 struct hammer_inode *ip = VTOI(ap->a_vp);
191 * If the inode no longer has visibility in the filesystem try to
192 * recycle it immediately, even if the inode is dirty. Recycling
193 * it quickly allows the system to reclaim buffer cache and VM
194 * resources which can matter a lot in a heavily loaded system.
196 * This can deadlock in vfsync() if we aren't careful.
198 * Do not queue the inode to the flusher if we still have visibility,
199 * otherwise namespace calls such as chmod will unnecessarily generate
200 * multiple inode updates.
202 if (ip->ino_data.nlinks == 0) {
204 lwkt_gettoken(&hmp->fs_token);
205 hammer_inode_unloadable_check(ip, 0);
206 if (ip->flags & HAMMER_INODE_MODMASK)
207 hammer_flush_inode(ip, 0);
208 lwkt_reltoken(&hmp->fs_token);
215 * Release the vnode association. This is typically (but not always)
216 * the last reference on the inode.
218 * Once the association is lost we are on our own with regards to
219 * flushing the inode.
221 * We must interlock ip->vp so hammer_get_vnode() can avoid races.
224 hammer_vop_reclaim(struct vop_reclaim_args *ap)
226 struct hammer_inode *ip;
232 if ((ip = vp->v_data) != NULL) {
234 lwkt_gettoken(&hmp->fs_token);
235 hammer_lock_ex(&ip->lock);
239 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) {
240 ++hammer_count_reclaiming;
241 ++hmp->inode_reclaims;
242 ip->flags |= HAMMER_INODE_RECLAIM;
244 hammer_unlock(&ip->lock);
245 hammer_rel_inode(ip, 1);
246 lwkt_reltoken(&hmp->fs_token);
252 * Return a locked vnode for the specified inode. The inode must be
253 * referenced but NOT LOCKED on entry and will remain referenced on
256 * Called from the frontend.
259 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
269 if ((vp = ip->vp) == NULL) {
270 error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0);
273 hammer_lock_ex(&ip->lock);
274 if (ip->vp != NULL) {
275 hammer_unlock(&ip->lock);
281 hammer_ref(&ip->lock);
285 obj_type = ip->ino_data.obj_type;
286 vp->v_type = hammer_get_vnode_type(obj_type);
288 hammer_inode_wakereclaims(ip);
290 switch(ip->ino_data.obj_type) {
291 case HAMMER_OBJTYPE_CDEV:
292 case HAMMER_OBJTYPE_BDEV:
293 vp->v_ops = &hmp->mp->mnt_vn_spec_ops;
294 addaliasu(vp, ip->ino_data.rmajor,
295 ip->ino_data.rminor);
297 case HAMMER_OBJTYPE_FIFO:
298 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops;
300 case HAMMER_OBJTYPE_REGFILE:
307 * Only mark as the root vnode if the ip is not
308 * historical, otherwise the VFS cache will get
309 * confused. The other half of the special handling
310 * is in hammer_vop_nlookupdotdot().
312 * Pseudo-filesystem roots can be accessed via
313 * non-root filesystem paths and setting VROOT may
314 * confuse the namecache. Set VPFSROOT instead.
316 if (ip->obj_id == HAMMER_OBJID_ROOT &&
317 ip->obj_asof == hmp->asof) {
318 if (ip->obj_localization == 0)
319 vsetflags(vp, VROOT);
321 vsetflags(vp, VPFSROOT);
324 vp->v_data = (void *)ip;
325 /* vnode locked by getnewvnode() */
326 /* make related vnode dirty if inode dirty? */
327 hammer_unlock(&ip->lock);
328 if (vp->v_type == VREG) {
329 vinitvmio(vp, ip->ino_data.size,
330 hammer_blocksize(ip->ino_data.size),
331 hammer_blockoff(ip->ino_data.size));
337 * Interlock vnode clearing. This does not prevent the
338 * vnode from going into a reclaimed state but it does
339 * prevent it from being destroyed or reused so the vget()
340 * will properly fail.
342 hammer_lock_ex(&ip->lock);
343 if ((vp = ip->vp) == NULL) {
344 hammer_unlock(&ip->lock);
347 vhold_interlocked(vp);
348 hammer_unlock(&ip->lock);
351 * loop if the vget fails (aka races), or if the vp
352 * no longer matches ip->vp.
354 if (vget(vp, LK_EXCLUSIVE) == 0) {
368 * Locate all copies of the inode for obj_id compatible with the specified
369 * asof, reference, and issue the related call-back. This routine is used
370 * for direct-io invalidation and does not create any new inodes.
373 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo,
374 int (*callback)(hammer_inode_t ip, void *data),
377 hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root,
378 hammer_inode_info_cmp_all_history,
383 * Acquire a HAMMER inode. The returned inode is not locked. These functions
384 * do not attach or detach the related vnode (use hammer_get_vnode() for
387 * The flags argument is only applied for newly created inodes, and only
388 * certain flags are inherited.
390 * Called from the frontend.
392 struct hammer_inode *
393 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip,
394 int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
395 int flags, int *errorp)
397 hammer_mount_t hmp = trans->hmp;
398 struct hammer_node_cache *cachep;
399 struct hammer_inode_info iinfo;
400 struct hammer_cursor cursor;
401 struct hammer_inode *ip;
405 * Determine if we already have an inode cached. If we do then
408 * If we find an inode with no vnode we have to mark the
409 * transaction such that hammer_inode_waitreclaims() is
410 * called later on to avoid building up an infinite number
411 * of inodes. Otherwise we can continue to * add new inodes
412 * faster then they can be disposed of, even with the tsleep
415 * If we find a dummy inode we return a failure so dounlink
416 * (which does another lookup) doesn't try to mess with the
417 * link count. hammer_vop_nresolve() uses hammer_get_dummy_inode()
418 * to ref dummy inodes.
420 iinfo.obj_id = obj_id;
421 iinfo.obj_asof = asof;
422 iinfo.obj_localization = localization;
424 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
426 if (ip->flags & HAMMER_INODE_DUMMY) {
430 hammer_ref(&ip->lock);
436 * Allocate a new inode structure and deal with races later.
438 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
439 ++hammer_count_inodes;
442 ip->obj_asof = iinfo.obj_asof;
443 ip->obj_localization = localization;
445 ip->flags = flags & HAMMER_INODE_RO;
446 ip->cache[0].ip = ip;
447 ip->cache[1].ip = ip;
448 ip->cache[2].ip = ip;
449 ip->cache[3].ip = ip;
451 ip->flags |= HAMMER_INODE_RO;
452 ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
453 0x7FFFFFFFFFFFFFFFLL;
454 RB_INIT(&ip->rec_tree);
455 TAILQ_INIT(&ip->target_list);
456 hammer_ref(&ip->lock);
459 * Locate the on-disk inode. If this is a PFS root we always
460 * access the current version of the root inode and (if it is not
461 * a master) always access information under it with a snapshot
464 * We cache recent inode lookups in this directory in dip->cache[2].
465 * If we can't find it we assume the inode we are looking for is
466 * close to the directory inode.
471 if (dip->cache[2].node)
472 cachep = &dip->cache[2];
474 cachep = &dip->cache[0];
476 hammer_init_cursor(trans, &cursor, cachep, NULL);
477 cursor.key_beg.localization = localization + HAMMER_LOCALIZE_INODE;
478 cursor.key_beg.obj_id = ip->obj_id;
479 cursor.key_beg.key = 0;
480 cursor.key_beg.create_tid = 0;
481 cursor.key_beg.delete_tid = 0;
482 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
483 cursor.key_beg.obj_type = 0;
485 cursor.asof = iinfo.obj_asof;
486 cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
489 *errorp = hammer_btree_lookup(&cursor);
490 if (*errorp == EDEADLK) {
491 hammer_done_cursor(&cursor);
496 * On success the B-Tree lookup will hold the appropriate
497 * buffer cache buffers and provide a pointer to the requested
498 * information. Copy the information to the in-memory inode
499 * and cache the B-Tree node to improve future operations.
502 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
503 ip->ino_data = cursor.data->inode;
506 * cache[0] tries to cache the location of the object inode.
507 * The assumption is that it is near the directory inode.
509 * cache[1] tries to cache the location of the object data.
510 * We might have something in the governing directory from
511 * scan optimizations (see the strategy code in
514 * We update dip->cache[2], if possible, with the location
515 * of the object inode for future directory shortcuts.
517 hammer_cache_node(&ip->cache[0], cursor.node);
519 if (dip->cache[3].node) {
520 hammer_cache_node(&ip->cache[1],
523 hammer_cache_node(&dip->cache[2], cursor.node);
527 * The file should not contain any data past the file size
528 * stored in the inode. Setting save_trunc_off to the
529 * file size instead of max reduces B-Tree lookup overheads
530 * on append by allowing the flusher to avoid checking for
533 ip->save_trunc_off = ip->ino_data.size;
536 * Locate and assign the pseudofs management structure to
539 if (dip && dip->obj_localization == ip->obj_localization) {
540 ip->pfsm = dip->pfsm;
541 hammer_ref(&ip->pfsm->lock);
543 ip->pfsm = hammer_load_pseudofs(trans,
544 ip->obj_localization,
546 *errorp = 0; /* ignore ENOENT */
551 * The inode is placed on the red-black tree and will be synced to
552 * the media when flushed or by the filesystem sync. If this races
553 * another instantiation/lookup the insertion will fail.
556 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
557 hammer_free_inode(ip);
558 hammer_done_cursor(&cursor);
561 ip->flags |= HAMMER_INODE_ONDISK;
563 if (ip->flags & HAMMER_INODE_RSV_INODES) {
564 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
568 hammer_free_inode(ip);
571 hammer_done_cursor(&cursor);
574 * NEWINODE is only set if the inode becomes dirty later,
575 * setting it here just leads to unnecessary stalls.
577 * trans->flags |= HAMMER_TRANSF_NEWINODE;
583 * Get a dummy inode to placemark a broken directory entry.
585 struct hammer_inode *
586 hammer_get_dummy_inode(hammer_transaction_t trans, hammer_inode_t dip,
587 int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
588 int flags, int *errorp)
590 hammer_mount_t hmp = trans->hmp;
591 struct hammer_inode_info iinfo;
592 struct hammer_inode *ip;
595 * Determine if we already have an inode cached. If we do then
598 * If we find an inode with no vnode we have to mark the
599 * transaction such that hammer_inode_waitreclaims() is
600 * called later on to avoid building up an infinite number
601 * of inodes. Otherwise we can continue to * add new inodes
602 * faster then they can be disposed of, even with the tsleep
605 * If we find a non-fake inode we return an error. Only fake
606 * inodes can be returned by this routine.
608 iinfo.obj_id = obj_id;
609 iinfo.obj_asof = asof;
610 iinfo.obj_localization = localization;
613 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
615 if ((ip->flags & HAMMER_INODE_DUMMY) == 0) {
619 hammer_ref(&ip->lock);
624 * Allocate a new inode structure and deal with races later.
626 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
627 ++hammer_count_inodes;
630 ip->obj_asof = iinfo.obj_asof;
631 ip->obj_localization = localization;
633 ip->flags = flags | HAMMER_INODE_RO | HAMMER_INODE_DUMMY;
634 ip->cache[0].ip = ip;
635 ip->cache[1].ip = ip;
636 ip->cache[2].ip = ip;
637 ip->cache[3].ip = ip;
638 ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
639 0x7FFFFFFFFFFFFFFFLL;
640 RB_INIT(&ip->rec_tree);
641 TAILQ_INIT(&ip->target_list);
642 hammer_ref(&ip->lock);
645 * Populate the dummy inode. Leave everything zero'd out.
647 * (ip->ino_leaf and ip->ino_data)
649 * Make the dummy inode a FIFO object which most copy programs
650 * will properly ignore.
652 ip->save_trunc_off = ip->ino_data.size;
653 ip->ino_data.obj_type = HAMMER_OBJTYPE_FIFO;
656 * Locate and assign the pseudofs management structure to
659 if (dip && dip->obj_localization == ip->obj_localization) {
660 ip->pfsm = dip->pfsm;
661 hammer_ref(&ip->pfsm->lock);
663 ip->pfsm = hammer_load_pseudofs(trans, ip->obj_localization,
665 *errorp = 0; /* ignore ENOENT */
669 * The inode is placed on the red-black tree and will be synced to
670 * the media when flushed or by the filesystem sync. If this races
671 * another instantiation/lookup the insertion will fail.
673 * NOTE: Do not set HAMMER_INODE_ONDISK. The inode is a fake.
676 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
677 hammer_free_inode(ip);
681 if (ip->flags & HAMMER_INODE_RSV_INODES) {
682 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
685 hammer_free_inode(ip);
688 trans->flags |= HAMMER_TRANSF_NEWINODE;
693 * Return a referenced inode only if it is in our inode cache.
695 * Dummy inodes do not count.
697 struct hammer_inode *
698 hammer_find_inode(hammer_transaction_t trans, int64_t obj_id,
699 hammer_tid_t asof, u_int32_t localization)
701 hammer_mount_t hmp = trans->hmp;
702 struct hammer_inode_info iinfo;
703 struct hammer_inode *ip;
705 iinfo.obj_id = obj_id;
706 iinfo.obj_asof = asof;
707 iinfo.obj_localization = localization;
709 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
711 if (ip->flags & HAMMER_INODE_DUMMY)
714 hammer_ref(&ip->lock);
720 * Create a new filesystem object, returning the inode in *ipp. The
721 * returned inode will be referenced. The inode is created in-memory.
723 * If pfsm is non-NULL the caller wishes to create the root inode for
727 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
729 hammer_inode_t dip, const char *name, int namelen,
730 hammer_pseudofs_inmem_t pfsm, struct hammer_inode **ipp)
741 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
742 ++hammer_count_inodes;
744 trans->flags |= HAMMER_TRANSF_NEWINODE;
747 KKASSERT(pfsm->localization != 0);
748 ip->obj_id = HAMMER_OBJID_ROOT;
749 ip->obj_localization = pfsm->localization;
751 KKASSERT(dip != NULL);
752 namekey = hammer_directory_namekey(dip, name, namelen, &dummy);
753 ip->obj_id = hammer_alloc_objid(hmp, dip, namekey);
754 ip->obj_localization = dip->obj_localization;
757 KKASSERT(ip->obj_id != 0);
758 ip->obj_asof = hmp->asof;
760 ip->flush_state = HAMMER_FST_IDLE;
761 ip->flags = HAMMER_INODE_DDIRTY |
762 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME;
763 ip->cache[0].ip = ip;
764 ip->cache[1].ip = ip;
765 ip->cache[2].ip = ip;
766 ip->cache[3].ip = ip;
768 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
769 /* ip->save_trunc_off = 0; (already zero) */
770 RB_INIT(&ip->rec_tree);
771 TAILQ_INIT(&ip->target_list);
773 ip->ino_data.atime = trans->time;
774 ip->ino_data.mtime = trans->time;
775 ip->ino_data.size = 0;
776 ip->ino_data.nlinks = 0;
779 * A nohistory designator on the parent directory is inherited by
780 * the child. We will do this even for pseudo-fs creation... the
781 * sysad can turn it off.
784 ip->ino_data.uflags = dip->ino_data.uflags &
785 (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
788 ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
789 ip->ino_leaf.base.localization = ip->obj_localization +
790 HAMMER_LOCALIZE_INODE;
791 ip->ino_leaf.base.obj_id = ip->obj_id;
792 ip->ino_leaf.base.key = 0;
793 ip->ino_leaf.base.create_tid = 0;
794 ip->ino_leaf.base.delete_tid = 0;
795 ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
796 ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
798 ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
799 ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
800 ip->ino_data.mode = vap->va_mode;
801 ip->ino_data.ctime = trans->time;
804 * If we are running version 2 or greater directory entries are
805 * inode-localized instead of data-localized.
807 if (trans->hmp->version >= HAMMER_VOL_VERSION_TWO) {
808 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
809 ip->ino_data.cap_flags |=
810 HAMMER_INODE_CAP_DIR_LOCAL_INO;
815 * Setup the ".." pointer. This only needs to be done for directories
816 * but we do it for all objects as a recovery aid.
819 ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id;
822 * The parent_obj_localization field only applies to pseudo-fs roots.
823 * XXX this is no longer applicable, PFSs are no longer directly
824 * tied into the parent's directory structure.
826 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY &&
827 ip->obj_id == HAMMER_OBJID_ROOT) {
828 ip->ino_data.ext.obj.parent_obj_localization =
829 dip->obj_localization;
833 switch(ip->ino_leaf.base.obj_type) {
834 case HAMMER_OBJTYPE_CDEV:
835 case HAMMER_OBJTYPE_BDEV:
836 ip->ino_data.rmajor = vap->va_rmajor;
837 ip->ino_data.rminor = vap->va_rminor;
844 * Calculate default uid/gid and overwrite with information from
848 xuid = hammer_to_unix_xid(&dip->ino_data.uid);
849 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode,
850 xuid, cred, &vap->va_mode);
854 ip->ino_data.mode = vap->va_mode;
856 if (vap->va_vaflags & VA_UID_UUID_VALID)
857 ip->ino_data.uid = vap->va_uid_uuid;
858 else if (vap->va_uid != (uid_t)VNOVAL)
859 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
861 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
863 if (vap->va_vaflags & VA_GID_UUID_VALID)
864 ip->ino_data.gid = vap->va_gid_uuid;
865 else if (vap->va_gid != (gid_t)VNOVAL)
866 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
868 ip->ino_data.gid = dip->ino_data.gid;
870 hammer_ref(&ip->lock);
874 hammer_ref(&pfsm->lock);
876 } else if (dip->obj_localization == ip->obj_localization) {
877 ip->pfsm = dip->pfsm;
878 hammer_ref(&ip->pfsm->lock);
881 ip->pfsm = hammer_load_pseudofs(trans,
882 ip->obj_localization,
884 error = 0; /* ignore ENOENT */
888 hammer_free_inode(ip);
890 } else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
891 panic("hammer_create_inode: duplicate obj_id %llx",
892 (long long)ip->obj_id);
894 hammer_free_inode(ip);
901 * Final cleanup / freeing of an inode structure
904 hammer_free_inode(hammer_inode_t ip)
906 struct hammer_mount *hmp;
909 KKASSERT(hammer_oneref(&ip->lock));
910 hammer_uncache_node(&ip->cache[0]);
911 hammer_uncache_node(&ip->cache[1]);
912 hammer_uncache_node(&ip->cache[2]);
913 hammer_uncache_node(&ip->cache[3]);
914 hammer_inode_wakereclaims(ip);
916 hammer_clear_objid(ip);
917 --hammer_count_inodes;
920 hammer_rel_pseudofs(hmp, ip->pfsm);
923 kfree(ip, hmp->m_inodes);
928 * Retrieve pseudo-fs data. NULL will never be returned.
930 * If an error occurs *errorp will be set and a default template is returned,
931 * otherwise *errorp is set to 0. Typically when an error occurs it will
934 hammer_pseudofs_inmem_t
935 hammer_load_pseudofs(hammer_transaction_t trans,
936 u_int32_t localization, int *errorp)
938 hammer_mount_t hmp = trans->hmp;
940 hammer_pseudofs_inmem_t pfsm;
941 struct hammer_cursor cursor;
945 pfsm = RB_LOOKUP(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, localization);
947 hammer_ref(&pfsm->lock);
953 * PFS records are stored in the root inode (not the PFS root inode,
954 * but the real root). Avoid an infinite recursion if loading
955 * the PFS for the real root.
958 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT,
960 HAMMER_DEF_LOCALIZATION, 0, errorp);
965 pfsm = kmalloc(sizeof(*pfsm), hmp->m_misc, M_WAITOK | M_ZERO);
966 pfsm->localization = localization;
967 pfsm->pfsd.unique_uuid = trans->rootvol->ondisk->vol_fsid;
968 pfsm->pfsd.shared_uuid = pfsm->pfsd.unique_uuid;
970 hammer_init_cursor(trans, &cursor, (ip ? &ip->cache[1] : NULL), ip);
971 cursor.key_beg.localization = HAMMER_DEF_LOCALIZATION +
972 HAMMER_LOCALIZE_MISC;
973 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
974 cursor.key_beg.create_tid = 0;
975 cursor.key_beg.delete_tid = 0;
976 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
977 cursor.key_beg.obj_type = 0;
978 cursor.key_beg.key = localization;
979 cursor.asof = HAMMER_MAX_TID;
980 cursor.flags |= HAMMER_CURSOR_ASOF;
983 *errorp = hammer_ip_lookup(&cursor);
985 *errorp = hammer_btree_lookup(&cursor);
987 *errorp = hammer_ip_resolve_data(&cursor);
989 if (cursor.data->pfsd.mirror_flags &
990 HAMMER_PFSD_DELETED) {
993 bytes = cursor.leaf->data_len;
994 if (bytes > sizeof(pfsm->pfsd))
995 bytes = sizeof(pfsm->pfsd);
996 bcopy(cursor.data, &pfsm->pfsd, bytes);
1000 hammer_done_cursor(&cursor);
1002 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
1003 hammer_ref(&pfsm->lock);
1005 hammer_rel_inode(ip, 0);
1006 if (RB_INSERT(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm)) {
1007 kfree(pfsm, hmp->m_misc);
1014 * Store pseudo-fs data. The backend will automatically delete any prior
1015 * on-disk pseudo-fs data but we have to delete in-memory versions.
1018 hammer_save_pseudofs(hammer_transaction_t trans, hammer_pseudofs_inmem_t pfsm)
1020 struct hammer_cursor cursor;
1021 hammer_record_t record;
1025 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
1026 HAMMER_DEF_LOCALIZATION, 0, &error);
1028 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
1029 hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
1030 cursor.key_beg.localization = ip->obj_localization +
1031 HAMMER_LOCALIZE_MISC;
1032 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
1033 cursor.key_beg.create_tid = 0;
1034 cursor.key_beg.delete_tid = 0;
1035 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
1036 cursor.key_beg.obj_type = 0;
1037 cursor.key_beg.key = pfsm->localization;
1038 cursor.asof = HAMMER_MAX_TID;
1039 cursor.flags |= HAMMER_CURSOR_ASOF;
1042 * Replace any in-memory version of the record.
1044 error = hammer_ip_lookup(&cursor);
1045 if (error == 0 && hammer_cursor_inmem(&cursor)) {
1046 record = cursor.iprec;
1047 if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
1048 KKASSERT(cursor.deadlk_rec == NULL);
1049 hammer_ref(&record->lock);
1050 cursor.deadlk_rec = record;
1053 record->flags |= HAMMER_RECF_DELETED_FE;
1059 * Allocate replacement general record. The backend flush will
1060 * delete any on-disk version of the record.
1062 if (error == 0 || error == ENOENT) {
1063 record = hammer_alloc_mem_record(ip, sizeof(pfsm->pfsd));
1064 record->type = HAMMER_MEM_RECORD_GENERAL;
1066 record->leaf.base.localization = ip->obj_localization +
1067 HAMMER_LOCALIZE_MISC;
1068 record->leaf.base.rec_type = HAMMER_RECTYPE_PFS;
1069 record->leaf.base.key = pfsm->localization;
1070 record->leaf.data_len = sizeof(pfsm->pfsd);
1071 bcopy(&pfsm->pfsd, record->data, sizeof(pfsm->pfsd));
1072 error = hammer_ip_add_record(trans, record);
1074 hammer_done_cursor(&cursor);
1075 if (error == EDEADLK)
1077 hammer_rel_inode(ip, 0);
1082 * Create a root directory for a PFS if one does not alredy exist.
1084 * The PFS root stands alone so we must also bump the nlinks count
1085 * to prevent it from being destroyed on release.
1088 hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred,
1089 hammer_pseudofs_inmem_t pfsm)
1095 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
1096 pfsm->localization, 0, &error);
1101 error = hammer_create_inode(trans, &vap, cred,
1105 ++ip->ino_data.nlinks;
1106 hammer_modify_inode(trans, ip, HAMMER_INODE_DDIRTY);
1110 hammer_rel_inode(ip, 0);
1115 * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY
1116 * if we are unable to disassociate all the inodes.
1120 hammer_unload_pseudofs_callback(hammer_inode_t ip, void *data)
1124 hammer_ref(&ip->lock);
1125 if (hammer_isactive(&ip->lock) == 2 && ip->vp)
1126 vclean_unlocked(ip->vp);
1127 if (hammer_isactive(&ip->lock) == 1 && ip->vp == NULL)
1130 res = -1; /* stop, someone is using the inode */
1131 hammer_rel_inode(ip, 0);
1136 hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization)
1141 for (try = res = 0; try < 4; ++try) {
1142 res = hammer_ino_rb_tree_RB_SCAN(&trans->hmp->rb_inos_root,
1143 hammer_inode_pfs_cmp,
1144 hammer_unload_pseudofs_callback,
1146 if (res == 0 && try > 1)
1148 hammer_flusher_sync(trans->hmp);
1157 * Release a reference on a PFS
1160 hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm)
1162 hammer_rel(&pfsm->lock);
1163 if (hammer_norefs(&pfsm->lock)) {
1164 RB_REMOVE(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm);
1165 kfree(pfsm, hmp->m_misc);
1170 * Called by hammer_sync_inode().
1173 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
1175 hammer_transaction_t trans = cursor->trans;
1176 hammer_record_t record;
1184 * If the inode has a presence on-disk then locate it and mark
1185 * it deleted, setting DELONDISK.
1187 * The record may or may not be physically deleted, depending on
1188 * the retention policy.
1190 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
1191 HAMMER_INODE_ONDISK) {
1192 hammer_normalize_cursor(cursor);
1193 cursor->key_beg.localization = ip->obj_localization +
1194 HAMMER_LOCALIZE_INODE;
1195 cursor->key_beg.obj_id = ip->obj_id;
1196 cursor->key_beg.key = 0;
1197 cursor->key_beg.create_tid = 0;
1198 cursor->key_beg.delete_tid = 0;
1199 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1200 cursor->key_beg.obj_type = 0;
1201 cursor->asof = ip->obj_asof;
1202 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1203 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
1204 cursor->flags |= HAMMER_CURSOR_BACKEND;
1206 error = hammer_btree_lookup(cursor);
1207 if (hammer_debug_inode)
1208 kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
1211 error = hammer_ip_delete_record(cursor, ip, trans->tid);
1212 if (hammer_debug_inode)
1213 kprintf(" error %d\n", error);
1215 ip->flags |= HAMMER_INODE_DELONDISK;
1218 hammer_cache_node(&ip->cache[0], cursor->node);
1220 if (error == EDEADLK) {
1221 hammer_done_cursor(cursor);
1222 error = hammer_init_cursor(trans, cursor,
1224 if (hammer_debug_inode)
1225 kprintf("IPDED %p %d\n", ip, error);
1232 * Ok, write out the initial record or a new record (after deleting
1233 * the old one), unless the DELETED flag is set. This routine will
1234 * clear DELONDISK if it writes out a record.
1236 * Update our inode statistics if this is the first application of
1237 * the inode on-disk.
1239 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
1241 * Generate a record and write it to the media. We clean-up
1242 * the state before releasing so we do not have to set-up
1245 record = hammer_alloc_mem_record(ip, 0);
1246 record->type = HAMMER_MEM_RECORD_INODE;
1247 record->flush_state = HAMMER_FST_FLUSH;
1248 record->leaf = ip->sync_ino_leaf;
1249 record->leaf.base.create_tid = trans->tid;
1250 record->leaf.data_len = sizeof(ip->sync_ino_data);
1251 record->leaf.create_ts = trans->time32;
1252 record->data = (void *)&ip->sync_ino_data;
1253 record->flags |= HAMMER_RECF_INTERLOCK_BE;
1256 * If this flag is set we cannot sync the new file size
1257 * because we haven't finished related truncations. The
1258 * inode will be flushed in another flush group to finish
1261 if ((ip->flags & HAMMER_INODE_WOULDBLOCK) &&
1262 ip->sync_ino_data.size != ip->ino_data.size) {
1264 ip->sync_ino_data.size = ip->ino_data.size;
1270 error = hammer_ip_sync_record_cursor(cursor, record);
1271 if (hammer_debug_inode)
1272 kprintf("GENREC %p rec %08x %d\n",
1273 ip, record->flags, error);
1274 if (error != EDEADLK)
1276 hammer_done_cursor(cursor);
1277 error = hammer_init_cursor(trans, cursor,
1279 if (hammer_debug_inode)
1280 kprintf("GENREC reinit %d\n", error);
1286 * Note: The record was never on the inode's record tree
1287 * so just wave our hands importantly and destroy it.
1289 record->flags |= HAMMER_RECF_COMMITTED;
1290 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
1291 record->flush_state = HAMMER_FST_IDLE;
1292 ++ip->rec_generation;
1293 hammer_rel_mem_record(record);
1299 if (hammer_debug_inode)
1300 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
1301 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1302 HAMMER_INODE_SDIRTY |
1303 HAMMER_INODE_ATIME |
1304 HAMMER_INODE_MTIME);
1305 ip->flags &= ~HAMMER_INODE_DELONDISK;
1307 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1310 * Root volume count of inodes
1312 hammer_sync_lock_sh(trans);
1313 if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
1314 hammer_modify_volume_field(trans,
1317 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1318 hammer_modify_volume_done(trans->rootvol);
1319 ip->flags |= HAMMER_INODE_ONDISK;
1320 if (hammer_debug_inode)
1321 kprintf("NOWONDISK %p\n", ip);
1323 hammer_sync_unlock(trans);
1328 * If the inode has been destroyed, clean out any left-over flags
1329 * that may have been set by the frontend.
1331 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) {
1332 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1333 HAMMER_INODE_SDIRTY |
1334 HAMMER_INODE_ATIME |
1335 HAMMER_INODE_MTIME);
1341 * Update only the itimes fields.
1343 * ATIME can be updated without generating any UNDO. MTIME is updated
1344 * with UNDO so it is guaranteed to be synchronized properly in case of
1347 * Neither field is included in the B-Tree leaf element's CRC, which is how
1348 * we can get away with updating ATIME the way we do.
1351 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
1353 hammer_transaction_t trans = cursor->trans;
1357 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) !=
1358 HAMMER_INODE_ONDISK) {
1362 hammer_normalize_cursor(cursor);
1363 cursor->key_beg.localization = ip->obj_localization +
1364 HAMMER_LOCALIZE_INODE;
1365 cursor->key_beg.obj_id = ip->obj_id;
1366 cursor->key_beg.key = 0;
1367 cursor->key_beg.create_tid = 0;
1368 cursor->key_beg.delete_tid = 0;
1369 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1370 cursor->key_beg.obj_type = 0;
1371 cursor->asof = ip->obj_asof;
1372 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1373 cursor->flags |= HAMMER_CURSOR_ASOF;
1374 cursor->flags |= HAMMER_CURSOR_GET_LEAF;
1375 cursor->flags |= HAMMER_CURSOR_GET_DATA;
1376 cursor->flags |= HAMMER_CURSOR_BACKEND;
1378 error = hammer_btree_lookup(cursor);
1380 hammer_cache_node(&ip->cache[0], cursor->node);
1381 if (ip->sync_flags & HAMMER_INODE_MTIME) {
1383 * Updating MTIME requires an UNDO. Just cover
1384 * both atime and mtime.
1386 hammer_sync_lock_sh(trans);
1387 hammer_modify_buffer(trans, cursor->data_buffer,
1388 HAMMER_ITIMES_BASE(&cursor->data->inode),
1389 HAMMER_ITIMES_BYTES);
1390 cursor->data->inode.atime = ip->sync_ino_data.atime;
1391 cursor->data->inode.mtime = ip->sync_ino_data.mtime;
1392 hammer_modify_buffer_done(cursor->data_buffer);
1393 hammer_sync_unlock(trans);
1394 } else if (ip->sync_flags & HAMMER_INODE_ATIME) {
1396 * Updating atime only can be done in-place with
1399 hammer_sync_lock_sh(trans);
1400 hammer_modify_buffer(trans, cursor->data_buffer,
1402 cursor->data->inode.atime = ip->sync_ino_data.atime;
1403 hammer_modify_buffer_done(cursor->data_buffer);
1404 hammer_sync_unlock(trans);
1406 ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME);
1408 if (error == EDEADLK) {
1409 hammer_done_cursor(cursor);
1410 error = hammer_init_cursor(trans, cursor,
1419 * Release a reference on an inode, flush as requested.
1421 * On the last reference we queue the inode to the flusher for its final
1425 hammer_rel_inode(struct hammer_inode *ip, int flush)
1427 /*hammer_mount_t hmp = ip->hmp;*/
1430 * Handle disposition when dropping the last ref.
1433 if (hammer_oneref(&ip->lock)) {
1435 * Determine whether on-disk action is needed for
1436 * the inode's final disposition.
1438 KKASSERT(ip->vp == NULL);
1439 hammer_inode_unloadable_check(ip, 0);
1440 if (ip->flags & HAMMER_INODE_MODMASK) {
1441 hammer_flush_inode(ip, 0);
1442 } else if (hammer_oneref(&ip->lock)) {
1443 hammer_unload_inode(ip);
1448 hammer_flush_inode(ip, 0);
1451 * The inode still has multiple refs, try to drop
1454 KKASSERT(hammer_isactive(&ip->lock) >= 1);
1455 if (hammer_isactive(&ip->lock) > 1) {
1456 hammer_rel(&ip->lock);
1464 * Unload and destroy the specified inode. Must be called with one remaining
1465 * reference. The reference is disposed of.
1467 * The inode must be completely clean.
1470 hammer_unload_inode(struct hammer_inode *ip)
1472 hammer_mount_t hmp = ip->hmp;
1474 KASSERT(hammer_oneref(&ip->lock),
1475 ("hammer_unload_inode: %d refs\n", hammer_isactive(&ip->lock)));
1476 KKASSERT(ip->vp == NULL);
1477 KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
1478 KKASSERT(ip->cursor_ip_refs == 0);
1479 KKASSERT(hammer_notlocked(&ip->lock));
1480 KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
1482 KKASSERT(RB_EMPTY(&ip->rec_tree));
1483 KKASSERT(TAILQ_EMPTY(&ip->target_list));
1485 if (ip->flags & HAMMER_INODE_RDIRTY) {
1486 RB_REMOVE(hammer_redo_rb_tree, &hmp->rb_redo_root, ip);
1487 ip->flags &= ~HAMMER_INODE_RDIRTY;
1489 RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip);
1491 hammer_free_inode(ip);
1496 * Called during unmounting if a critical error occured. The in-memory
1497 * inode and all related structures are destroyed.
1499 * If a critical error did not occur the unmount code calls the standard
1500 * release and asserts that the inode is gone.
1503 hammer_destroy_inode_callback(struct hammer_inode *ip, void *data __unused)
1505 hammer_record_t rec;
1508 * Get rid of the inodes in-memory records, regardless of their
1509 * state, and clear the mod-mask.
1511 while ((rec = TAILQ_FIRST(&ip->target_list)) != NULL) {
1512 TAILQ_REMOVE(&ip->target_list, rec, target_entry);
1513 rec->target_ip = NULL;
1514 if (rec->flush_state == HAMMER_FST_SETUP)
1515 rec->flush_state = HAMMER_FST_IDLE;
1517 while ((rec = RB_ROOT(&ip->rec_tree)) != NULL) {
1518 if (rec->flush_state == HAMMER_FST_FLUSH)
1519 --rec->flush_group->refs;
1521 hammer_ref(&rec->lock);
1522 KKASSERT(hammer_oneref(&rec->lock));
1523 rec->flush_state = HAMMER_FST_IDLE;
1524 rec->flush_group = NULL;
1525 rec->flags |= HAMMER_RECF_DELETED_FE; /* wave hands */
1526 rec->flags |= HAMMER_RECF_DELETED_BE; /* wave hands */
1527 ++ip->rec_generation;
1528 hammer_rel_mem_record(rec);
1530 ip->flags &= ~HAMMER_INODE_MODMASK;
1531 ip->sync_flags &= ~HAMMER_INODE_MODMASK;
1532 KKASSERT(ip->vp == NULL);
1535 * Remove the inode from any flush group, force it idle. FLUSH
1536 * and SETUP states have an inode ref.
1538 switch(ip->flush_state) {
1539 case HAMMER_FST_FLUSH:
1540 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip);
1541 --ip->flush_group->refs;
1542 ip->flush_group = NULL;
1544 case HAMMER_FST_SETUP:
1545 hammer_rel(&ip->lock);
1546 ip->flush_state = HAMMER_FST_IDLE;
1548 case HAMMER_FST_IDLE:
1553 * There shouldn't be any associated vnode. The unload needs at
1554 * least one ref, if we do have a vp steal its ip ref.
1557 kprintf("hammer_destroy_inode_callback: Unexpected "
1558 "vnode association ip %p vp %p\n", ip, ip->vp);
1559 ip->vp->v_data = NULL;
1562 hammer_ref(&ip->lock);
1564 hammer_unload_inode(ip);
1569 * Called on mount -u when switching from RW to RO or vise-versa. Adjust
1570 * the read-only flag for cached inodes.
1572 * This routine is called from a RB_SCAN().
1575 hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
1577 hammer_mount_t hmp = ip->hmp;
1579 if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
1580 ip->flags |= HAMMER_INODE_RO;
1582 ip->flags &= ~HAMMER_INODE_RO;
1587 * A transaction has modified an inode, requiring updates as specified by
1590 * HAMMER_INODE_DDIRTY: Inode data has been updated, not incl mtime/atime,
1591 * and not including size changes due to write-append
1592 * (but other size changes are included).
1593 * HAMMER_INODE_SDIRTY: Inode data has been updated, size changes due to
1595 * HAMMER_INODE_XDIRTY: Dirty in-memory records
1596 * HAMMER_INODE_BUFS: Dirty buffer cache buffers
1597 * HAMMER_INODE_DELETED: Inode record/data must be deleted
1598 * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
1601 hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags)
1604 * ronly of 0 or 2 does not trigger assertion.
1605 * 2 is a special error state
1607 KKASSERT(ip->hmp->ronly != 1 ||
1608 (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
1609 HAMMER_INODE_SDIRTY |
1610 HAMMER_INODE_BUFS | HAMMER_INODE_DELETED |
1611 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0);
1612 if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
1613 ip->flags |= HAMMER_INODE_RSV_INODES;
1614 ++ip->hmp->rsv_inodes;
1618 * Set the NEWINODE flag in the transaction if the inode
1619 * transitions to a dirty state. This is used to track
1620 * the load on the inode cache.
1623 (ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1624 (flags & HAMMER_INODE_MODMASK)) {
1625 trans->flags |= HAMMER_TRANSF_NEWINODE;
1632 * Request that an inode be flushed. This whole mess cannot block and may
1633 * recurse (if not synchronous). Once requested HAMMER will attempt to
1634 * actively flush the inode until the flush can be done.
1636 * The inode may already be flushing, or may be in a setup state. We can
1637 * place the inode in a flushing state if it is currently idle and flag it
1638 * to reflush if it is currently flushing.
1640 * Upon return if the inode could not be flushed due to a setup
1641 * dependancy, then it will be automatically flushed when the dependancy
1645 hammer_flush_inode(hammer_inode_t ip, int flags)
1648 hammer_flush_group_t flg;
1652 * next_flush_group is the first flush group we can place the inode
1653 * in. It may be NULL. If it becomes full we append a new flush
1654 * group and make that the next_flush_group.
1657 while ((flg = hmp->next_flush_group) != NULL) {
1658 KKASSERT(flg->running == 0);
1659 if (flg->total_count + flg->refs <= ip->hmp->undo_rec_limit)
1661 hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
1662 hammer_flusher_async(ip->hmp, flg);
1665 flg = kmalloc(sizeof(*flg), hmp->m_misc, M_WAITOK|M_ZERO);
1666 hmp->next_flush_group = flg;
1667 RB_INIT(&flg->flush_tree);
1668 TAILQ_INSERT_TAIL(&hmp->flush_group_list, flg, flush_entry);
1672 * Trivial 'nothing to flush' case. If the inode is in a SETUP
1673 * state we have to put it back into an IDLE state so we can
1674 * drop the extra ref.
1676 * If we have a parent dependancy we must still fall through
1679 if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
1680 if (ip->flush_state == HAMMER_FST_SETUP &&
1681 TAILQ_EMPTY(&ip->target_list)) {
1682 ip->flush_state = HAMMER_FST_IDLE;
1683 hammer_rel_inode(ip, 0);
1685 if (ip->flush_state == HAMMER_FST_IDLE)
1690 * Our flush action will depend on the current state.
1692 switch(ip->flush_state) {
1693 case HAMMER_FST_IDLE:
1695 * We have no dependancies and can flush immediately. Some
1696 * our children may not be flushable so we have to re-test
1697 * with that additional knowledge.
1699 hammer_flush_inode_core(ip, flg, flags);
1701 case HAMMER_FST_SETUP:
1703 * Recurse upwards through dependancies via target_list
1704 * and start their flusher actions going if possible.
1706 * 'good' is our connectivity. -1 means we have none and
1707 * can't flush, 0 means there weren't any dependancies, and
1708 * 1 means we have good connectivity.
1710 good = hammer_setup_parent_inodes(ip, 0, flg);
1714 * We can continue if good >= 0. Determine how
1715 * many records under our inode can be flushed (and
1718 hammer_flush_inode_core(ip, flg, flags);
1721 * Parent has no connectivity, tell it to flush
1722 * us as soon as it does.
1724 * The REFLUSH flag is also needed to trigger
1725 * dependancy wakeups.
1727 ip->flags |= HAMMER_INODE_CONN_DOWN |
1728 HAMMER_INODE_REFLUSH;
1729 if (flags & HAMMER_FLUSH_SIGNAL) {
1730 ip->flags |= HAMMER_INODE_RESIGNAL;
1731 hammer_flusher_async(ip->hmp, flg);
1735 case HAMMER_FST_FLUSH:
1737 * We are already flushing, flag the inode to reflush
1738 * if needed after it completes its current flush.
1740 * The REFLUSH flag is also needed to trigger
1741 * dependancy wakeups.
1743 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
1744 ip->flags |= HAMMER_INODE_REFLUSH;
1745 if (flags & HAMMER_FLUSH_SIGNAL) {
1746 ip->flags |= HAMMER_INODE_RESIGNAL;
1747 hammer_flusher_async(ip->hmp, flg);
1754 * Scan ip->target_list, which is a list of records owned by PARENTS to our
1755 * ip which reference our ip.
1757 * XXX This is a huge mess of recursive code, but not one bit of it blocks
1758 * so for now do not ref/deref the structures. Note that if we use the
1759 * ref/rel code later, the rel CAN block.
1762 hammer_setup_parent_inodes(hammer_inode_t ip, int depth,
1763 hammer_flush_group_t flg)
1765 hammer_record_t depend;
1770 * If we hit our recursion limit and we have parent dependencies
1771 * We cannot continue. Returning < 0 will cause us to be flagged
1772 * for reflush. Returning -2 cuts off additional dependency checks
1773 * because they are likely to also hit the depth limit.
1775 * We cannot return < 0 if there are no dependencies or there might
1776 * not be anything to wakeup (ip).
1778 if (depth == 20 && TAILQ_FIRST(&ip->target_list)) {
1779 kprintf("HAMMER Warning: depth limit reached on "
1780 "setup recursion, inode %p %016llx\n",
1781 ip, (long long)ip->obj_id);
1789 TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
1790 r = hammer_setup_parent_inodes_helper(depend, depth, flg);
1791 KKASSERT(depend->target_ip == ip);
1792 if (r < 0 && good == 0)
1798 * If we failed due to the recursion depth limit then stop
1808 * This helper function takes a record representing the dependancy between
1809 * the parent inode and child inode.
1811 * record->ip = parent inode
1812 * record->target_ip = child inode
1814 * We are asked to recurse upwards and convert the record from SETUP
1815 * to FLUSH if possible.
1817 * Return 1 if the record gives us connectivity
1819 * Return 0 if the record is not relevant
1821 * Return -1 if we can't resolve the dependancy and there is no connectivity.
1824 hammer_setup_parent_inodes_helper(hammer_record_t record, int depth,
1825 hammer_flush_group_t flg)
1831 KKASSERT(record->flush_state != HAMMER_FST_IDLE);
1836 * If the record is already flushing, is it in our flush group?
1838 * If it is in our flush group but it is a general record or a
1839 * delete-on-disk, it does not improve our connectivity (return 0),
1840 * and if the target inode is not trying to destroy itself we can't
1841 * allow the operation yet anyway (the second return -1).
1843 if (record->flush_state == HAMMER_FST_FLUSH) {
1845 * If not in our flush group ask the parent to reflush
1846 * us as soon as possible.
1848 if (record->flush_group != flg) {
1849 pip->flags |= HAMMER_INODE_REFLUSH;
1850 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1855 * If in our flush group everything is already set up,
1856 * just return whether the record will improve our
1857 * visibility or not.
1859 if (record->type == HAMMER_MEM_RECORD_ADD)
1865 * It must be a setup record. Try to resolve the setup dependancies
1866 * by recursing upwards so we can place ip on the flush list.
1868 * Limit ourselves to 20 levels of recursion to avoid blowing out
1869 * the kernel stack. If we hit the recursion limit we can't flush
1870 * until the parent flushes. The parent will flush independantly
1871 * on its own and ultimately a deep recursion will be resolved.
1873 KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1875 good = hammer_setup_parent_inodes(pip, depth + 1, flg);
1878 * If good < 0 the parent has no connectivity and we cannot safely
1879 * flush the directory entry, which also means we can't flush our
1880 * ip. Flag us for downward recursion once the parent's
1881 * connectivity is resolved. Flag the parent for [re]flush or it
1882 * may not check for downward recursions.
1885 pip->flags |= HAMMER_INODE_REFLUSH;
1886 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1891 * We are go, place the parent inode in a flushing state so we can
1892 * place its record in a flushing state. Note that the parent
1893 * may already be flushing. The record must be in the same flush
1894 * group as the parent.
1896 if (pip->flush_state != HAMMER_FST_FLUSH)
1897 hammer_flush_inode_core(pip, flg, HAMMER_FLUSH_RECURSION);
1898 KKASSERT(pip->flush_state == HAMMER_FST_FLUSH);
1901 * It is possible for a rename to create a loop in the recursion
1902 * and revisit a record. This will result in the record being
1903 * placed in a flush state unexpectedly. This check deals with
1906 if (record->flush_state == HAMMER_FST_FLUSH) {
1907 if (record->type == HAMMER_MEM_RECORD_ADD)
1912 KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1915 if (record->type == HAMMER_MEM_RECORD_DEL &&
1916 (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
1918 * Regardless of flushing state we cannot sync this path if the
1919 * record represents a delete-on-disk but the target inode
1920 * is not ready to sync its own deletion.
1922 * XXX need to count effective nlinks to determine whether
1923 * the flush is ok, otherwise removing a hardlink will
1924 * just leave the DEL record to rot.
1926 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
1930 if (pip->flush_group == flg) {
1932 * Because we have not calculated nlinks yet we can just
1933 * set records to the flush state if the parent is in
1934 * the same flush group as we are.
1936 record->flush_state = HAMMER_FST_FLUSH;
1937 record->flush_group = flg;
1938 ++record->flush_group->refs;
1939 hammer_ref(&record->lock);
1942 * A general directory-add contributes to our visibility.
1944 * Otherwise it is probably a directory-delete or
1945 * delete-on-disk record and does not contribute to our
1946 * visbility (but we can still flush it).
1948 if (record->type == HAMMER_MEM_RECORD_ADD)
1953 * If the parent is not in our flush group we cannot
1954 * flush this record yet, there is no visibility.
1955 * We tell the parent to reflush and mark ourselves
1956 * so the parent knows it should flush us too.
1958 pip->flags |= HAMMER_INODE_REFLUSH;
1959 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1965 * This is the core routine placing an inode into the FST_FLUSH state.
1968 hammer_flush_inode_core(hammer_inode_t ip, hammer_flush_group_t flg, int flags)
1973 * Set flush state and prevent the flusher from cycling into
1974 * the next flush group. Do not place the ip on the list yet.
1975 * Inodes not in the idle state get an extra reference.
1977 KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
1978 if (ip->flush_state == HAMMER_FST_IDLE)
1979 hammer_ref(&ip->lock);
1980 ip->flush_state = HAMMER_FST_FLUSH;
1981 ip->flush_group = flg;
1982 ++ip->hmp->flusher.group_lock;
1983 ++ip->hmp->count_iqueued;
1984 ++hammer_count_iqueued;
1986 hammer_redo_fifo_start_flush(ip);
1989 * If the flush group reaches the autoflush limit we want to signal
1990 * the flusher. This is particularly important for remove()s.
1992 * If the default hammer_limit_reclaim is changed via sysctl
1993 * make sure we don't hit a degenerate case where we don't start
1994 * a flush but blocked on further inode ops.
1996 if (flg->total_count == hammer_autoflush ||
1997 flg->total_count >= hammer_limit_reclaim / 4)
1998 flags |= HAMMER_FLUSH_SIGNAL;
2002 * We need to be able to vfsync/truncate from the backend.
2004 * XXX Any truncation from the backend will acquire the vnode
2007 KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
2008 if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
2009 ip->flags |= HAMMER_INODE_VHELD;
2015 * Figure out how many in-memory records we can actually flush
2016 * (not including inode meta-data, buffers, etc).
2018 KKASSERT((ip->flags & HAMMER_INODE_WOULDBLOCK) == 0);
2019 if (flags & HAMMER_FLUSH_RECURSION) {
2021 * If this is a upwards recursion we do not want to
2022 * recurse down again!
2026 } else if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
2028 * No new records are added if we must complete a flush
2029 * from a previous cycle, but we do have to move the records
2030 * from the previous cycle to the current one.
2033 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2034 hammer_syncgrp_child_callback, NULL);
2040 * Normal flush, scan records and bring them into the flush.
2041 * Directory adds and deletes are usually skipped (they are
2042 * grouped with the related inode rather then with the
2045 * go_count can be negative, which means the scan aborted
2046 * due to the flush group being over-full and we should
2047 * flush what we have.
2049 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2050 hammer_setup_child_callback, NULL);
2054 * This is a more involved test that includes go_count. If we
2055 * can't flush, flag the inode and return. If go_count is 0 we
2056 * were are unable to flush any records in our rec_tree and
2057 * must ignore the XDIRTY flag.
2059 if (go_count == 0) {
2060 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
2061 --ip->hmp->count_iqueued;
2062 --hammer_count_iqueued;
2065 ip->flush_state = HAMMER_FST_SETUP;
2066 ip->flush_group = NULL;
2068 if (ip->flags & HAMMER_INODE_VHELD) {
2069 ip->flags &= ~HAMMER_INODE_VHELD;
2075 * REFLUSH is needed to trigger dependancy wakeups
2076 * when an inode is in SETUP.
2078 ip->flags |= HAMMER_INODE_REFLUSH;
2079 if (flags & HAMMER_FLUSH_SIGNAL) {
2080 ip->flags |= HAMMER_INODE_RESIGNAL;
2081 hammer_flusher_async(ip->hmp, flg);
2083 if (--ip->hmp->flusher.group_lock == 0)
2084 wakeup(&ip->hmp->flusher.group_lock);
2090 * Snapshot the state of the inode for the backend flusher.
2092 * We continue to retain save_trunc_off even when all truncations
2093 * have been resolved as an optimization to determine if we can
2094 * skip the B-Tree lookup for overwrite deletions.
2096 * NOTE: The DELETING flag is a mod flag, but it is also sticky,
2097 * and stays in ip->flags. Once set, it stays set until the
2098 * inode is destroyed.
2100 if (ip->flags & HAMMER_INODE_TRUNCATED) {
2101 KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0);
2102 ip->sync_trunc_off = ip->trunc_off;
2103 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
2104 ip->flags &= ~HAMMER_INODE_TRUNCATED;
2105 ip->sync_flags |= HAMMER_INODE_TRUNCATED;
2108 * The save_trunc_off used to cache whether the B-Tree
2109 * holds any records past that point is not used until
2110 * after the truncation has succeeded, so we can safely
2113 if (ip->save_trunc_off > ip->sync_trunc_off)
2114 ip->save_trunc_off = ip->sync_trunc_off;
2116 ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK &
2117 ~HAMMER_INODE_TRUNCATED);
2118 ip->sync_ino_leaf = ip->ino_leaf;
2119 ip->sync_ino_data = ip->ino_data;
2120 ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
2121 #ifdef DEBUG_TRUNCATE
2122 if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp)
2123 kprintf("truncateS %016llx\n", ip->sync_trunc_off);
2127 * The flusher list inherits our inode and reference.
2129 KKASSERT(flg->running == 0);
2130 RB_INSERT(hammer_fls_rb_tree, &flg->flush_tree, ip);
2131 if (--ip->hmp->flusher.group_lock == 0)
2132 wakeup(&ip->hmp->flusher.group_lock);
2134 if (flags & HAMMER_FLUSH_SIGNAL) {
2135 hammer_flusher_async(ip->hmp, flg);
2140 * Callback for scan of ip->rec_tree. Try to include each record in our
2141 * flush. ip->flush_group has been set but the inode has not yet been
2142 * moved into a flushing state.
2144 * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
2147 * We return 1 for any record placed or found in FST_FLUSH, which prevents
2148 * the caller from shortcutting the flush.
2151 hammer_setup_child_callback(hammer_record_t rec, void *data)
2153 hammer_flush_group_t flg;
2154 hammer_inode_t target_ip;
2159 * Records deleted or committed by the backend are ignored.
2160 * Note that the flush detects deleted frontend records at
2161 * multiple points to deal with races. This is just the first
2162 * line of defense. The only time HAMMER_RECF_DELETED_FE cannot
2163 * be set is when HAMMER_RECF_INTERLOCK_BE is set, because it
2164 * messes up link-count calculations.
2166 * NOTE: Don't get confused between record deletion and, say,
2167 * directory entry deletion. The deletion of a directory entry
2168 * which is on-media has nothing to do with the record deletion
2171 if (rec->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
2172 HAMMER_RECF_COMMITTED)) {
2173 if (rec->flush_state == HAMMER_FST_FLUSH) {
2174 KKASSERT(rec->flush_group == rec->ip->flush_group);
2183 * If the record is in an idle state it has no dependancies and
2187 flg = ip->flush_group;
2190 switch(rec->flush_state) {
2191 case HAMMER_FST_IDLE:
2193 * The record has no setup dependancy, we can flush it.
2195 KKASSERT(rec->target_ip == NULL);
2196 rec->flush_state = HAMMER_FST_FLUSH;
2197 rec->flush_group = flg;
2199 hammer_ref(&rec->lock);
2202 case HAMMER_FST_SETUP:
2204 * The record has a setup dependancy. These are typically
2205 * directory entry adds and deletes. Such entries will be
2206 * flushed when their inodes are flushed so we do not
2207 * usually have to add them to the flush here. However,
2208 * if the target_ip has set HAMMER_INODE_CONN_DOWN then
2209 * it is asking us to flush this record (and it).
2211 target_ip = rec->target_ip;
2212 KKASSERT(target_ip != NULL);
2213 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
2216 * If the target IP is already flushing in our group
2217 * we could associate the record, but target_ip has
2218 * already synced ino_data to sync_ino_data and we
2219 * would also have to adjust nlinks. Plus there are
2220 * ordering issues for adds and deletes.
2222 * Reflush downward if this is an ADD, and upward if
2225 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
2226 if (rec->flush_state == HAMMER_MEM_RECORD_ADD)
2227 ip->flags |= HAMMER_INODE_REFLUSH;
2229 target_ip->flags |= HAMMER_INODE_REFLUSH;
2234 * Target IP is not yet flushing. This can get complex
2235 * because we have to be careful about the recursion.
2237 * Directories create an issue for us in that if a flush
2238 * of a directory is requested the expectation is to flush
2239 * any pending directory entries, but this will cause the
2240 * related inodes to recursively flush as well. We can't
2241 * really defer the operation so just get as many as we
2245 if ((target_ip->flags & HAMMER_INODE_RECLAIM) == 0 &&
2246 (target_ip->flags & HAMMER_INODE_CONN_DOWN) == 0) {
2248 * We aren't reclaiming and the target ip was not
2249 * previously prevented from flushing due to this
2250 * record dependancy. Do not flush this record.
2255 if (flg->total_count + flg->refs >
2256 ip->hmp->undo_rec_limit) {
2258 * Our flush group is over-full and we risk blowing
2259 * out the UNDO FIFO. Stop the scan, flush what we
2260 * have, then reflush the directory.
2262 * The directory may be forced through multiple
2263 * flush groups before it can be completely
2266 ip->flags |= HAMMER_INODE_RESIGNAL |
2267 HAMMER_INODE_REFLUSH;
2269 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
2271 * If the target IP is not flushing we can force
2272 * it to flush, even if it is unable to write out
2273 * any of its own records we have at least one in
2274 * hand that we CAN deal with.
2276 rec->flush_state = HAMMER_FST_FLUSH;
2277 rec->flush_group = flg;
2279 hammer_ref(&rec->lock);
2280 hammer_flush_inode_core(target_ip, flg,
2281 HAMMER_FLUSH_RECURSION);
2285 * General or delete-on-disk record.
2287 * XXX this needs help. If a delete-on-disk we could
2288 * disconnect the target. If the target has its own
2289 * dependancies they really need to be flushed.
2293 rec->flush_state = HAMMER_FST_FLUSH;
2294 rec->flush_group = flg;
2296 hammer_ref(&rec->lock);
2297 hammer_flush_inode_core(target_ip, flg,
2298 HAMMER_FLUSH_RECURSION);
2302 case HAMMER_FST_FLUSH:
2304 * The flush_group should already match.
2306 KKASSERT(rec->flush_group == flg);
2315 * This version just moves records already in a flush state to the new
2316 * flush group and that is it.
2319 hammer_syncgrp_child_callback(hammer_record_t rec, void *data)
2321 hammer_inode_t ip = rec->ip;
2323 switch(rec->flush_state) {
2324 case HAMMER_FST_FLUSH:
2325 KKASSERT(rec->flush_group == ip->flush_group);
2335 * Wait for a previously queued flush to complete.
2337 * If a critical error occured we don't try to wait.
2340 hammer_wait_inode(hammer_inode_t ip)
2342 hammer_flush_group_t flg;
2345 if ((ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) {
2346 while (ip->flush_state != HAMMER_FST_IDLE &&
2347 (ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) {
2348 if (ip->flush_state == HAMMER_FST_SETUP)
2349 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2350 if (ip->flush_state != HAMMER_FST_IDLE) {
2351 ip->flags |= HAMMER_INODE_FLUSHW;
2352 tsleep(&ip->flags, 0, "hmrwin", 0);
2359 * Called by the backend code when a flush has been completed.
2360 * The inode has already been removed from the flush list.
2362 * A pipelined flush can occur, in which case we must re-enter the
2363 * inode on the list and re-copy its fields.
2366 hammer_flush_inode_done(hammer_inode_t ip, int error)
2371 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
2376 * Auto-reflush if the backend could not completely flush
2377 * the inode. This fixes a case where a deferred buffer flush
2378 * could cause fsync to return early.
2380 if (ip->sync_flags & HAMMER_INODE_MODMASK)
2381 ip->flags |= HAMMER_INODE_REFLUSH;
2384 * Merge left-over flags back into the frontend and fix the state.
2385 * Incomplete truncations are retained by the backend.
2388 ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED;
2389 ip->sync_flags &= HAMMER_INODE_TRUNCATED;
2392 * The backend may have adjusted nlinks, so if the adjusted nlinks
2393 * does not match the fronttend set the frontend's DDIRTY flag again.
2395 if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
2396 ip->flags |= HAMMER_INODE_DDIRTY;
2399 * Fix up the dirty buffer status.
2401 if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) {
2402 ip->flags |= HAMMER_INODE_BUFS;
2404 hammer_redo_fifo_end_flush(ip);
2407 * Re-set the XDIRTY flag if some of the inode's in-memory records
2408 * could not be flushed.
2410 KKASSERT((RB_EMPTY(&ip->rec_tree) &&
2411 (ip->flags & HAMMER_INODE_XDIRTY) == 0) ||
2412 (!RB_EMPTY(&ip->rec_tree) &&
2413 (ip->flags & HAMMER_INODE_XDIRTY) != 0));
2416 * Do not lose track of inodes which no longer have vnode
2417 * assocations, otherwise they may never get flushed again.
2419 * The reflush flag can be set superfluously, causing extra pain
2420 * for no reason. If the inode is no longer modified it no longer
2421 * needs to be flushed.
2423 if (ip->flags & HAMMER_INODE_MODMASK) {
2425 ip->flags |= HAMMER_INODE_REFLUSH;
2427 ip->flags &= ~HAMMER_INODE_REFLUSH;
2431 * Adjust the flush state.
2433 if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
2435 * We were unable to flush out all our records, leave the
2436 * inode in a flush state and in the current flush group.
2437 * The flush group will be re-run.
2439 * This occurs if the UNDO block gets too full or there is
2440 * too much dirty meta-data and allows the flusher to
2441 * finalize the UNDO block and then re-flush.
2443 ip->flags &= ~HAMMER_INODE_WOULDBLOCK;
2447 * Remove from the flush_group
2449 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip);
2450 ip->flush_group = NULL;
2454 * Clean up the vnode ref and tracking counts.
2456 if (ip->flags & HAMMER_INODE_VHELD) {
2457 ip->flags &= ~HAMMER_INODE_VHELD;
2461 --hmp->count_iqueued;
2462 --hammer_count_iqueued;
2465 * And adjust the state.
2467 if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
2468 ip->flush_state = HAMMER_FST_IDLE;
2471 ip->flush_state = HAMMER_FST_SETUP;
2476 * If the frontend is waiting for a flush to complete,
2479 if (ip->flags & HAMMER_INODE_FLUSHW) {
2480 ip->flags &= ~HAMMER_INODE_FLUSHW;
2485 * If the frontend made more changes and requested another
2486 * flush, then try to get it running.
2488 * Reflushes are aborted when the inode is errored out.
2490 if (ip->flags & HAMMER_INODE_REFLUSH) {
2491 ip->flags &= ~HAMMER_INODE_REFLUSH;
2492 if (ip->flags & HAMMER_INODE_RESIGNAL) {
2493 ip->flags &= ~HAMMER_INODE_RESIGNAL;
2494 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2496 hammer_flush_inode(ip, 0);
2502 * If we have no parent dependancies we can clear CONN_DOWN
2504 if (TAILQ_EMPTY(&ip->target_list))
2505 ip->flags &= ~HAMMER_INODE_CONN_DOWN;
2508 * If the inode is now clean drop the space reservation.
2510 if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
2511 (ip->flags & HAMMER_INODE_RSV_INODES)) {
2512 ip->flags &= ~HAMMER_INODE_RSV_INODES;
2517 hammer_rel_inode(ip, 0);
2521 * Called from hammer_sync_inode() to synchronize in-memory records
2525 hammer_sync_record_callback(hammer_record_t record, void *data)
2527 hammer_cursor_t cursor = data;
2528 hammer_transaction_t trans = cursor->trans;
2529 hammer_mount_t hmp = trans->hmp;
2533 * Skip records that do not belong to the current flush.
2535 ++hammer_stats_record_iterations;
2536 if (record->flush_state != HAMMER_FST_FLUSH)
2540 if (record->flush_group != record->ip->flush_group) {
2541 kprintf("sync_record %p ip %p bad flush group %p %p\n", record, record->ip, record->flush_group ,record->ip->flush_group);
2542 if (hammer_debug_critical)
2547 KKASSERT(record->flush_group == record->ip->flush_group);
2550 * Interlock the record using the BE flag. Once BE is set the
2551 * frontend cannot change the state of FE.
2553 * NOTE: If FE is set prior to us setting BE we still sync the
2554 * record out, but the flush completion code converts it to
2555 * a delete-on-disk record instead of destroying it.
2557 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
2558 record->flags |= HAMMER_RECF_INTERLOCK_BE;
2561 * The backend has already disposed of the record.
2563 if (record->flags & (HAMMER_RECF_DELETED_BE | HAMMER_RECF_COMMITTED)) {
2569 * If the whole inode is being deleting all on-disk records will
2570 * be deleted very soon, we can't sync any new records to disk
2571 * because they will be deleted in the same transaction they were
2572 * created in (delete_tid == create_tid), which will assert.
2574 * XXX There may be a case with RECORD_ADD with DELETED_FE set
2575 * that we currently panic on.
2577 if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
2578 switch(record->type) {
2579 case HAMMER_MEM_RECORD_DATA:
2581 * We don't have to do anything, if the record was
2582 * committed the space will have been accounted for
2586 case HAMMER_MEM_RECORD_GENERAL:
2588 * Set deleted-by-backend flag. Do not set the
2589 * backend committed flag, because we are throwing
2592 record->flags |= HAMMER_RECF_DELETED_BE;
2593 ++record->ip->rec_generation;
2596 case HAMMER_MEM_RECORD_ADD:
2597 panic("hammer_sync_record_callback: illegal add "
2598 "during inode deletion record %p", record);
2599 break; /* NOT REACHED */
2600 case HAMMER_MEM_RECORD_INODE:
2601 panic("hammer_sync_record_callback: attempt to "
2602 "sync inode record %p?", record);
2603 break; /* NOT REACHED */
2604 case HAMMER_MEM_RECORD_DEL:
2606 * Follow through and issue the on-disk deletion
2613 * If DELETED_FE is set special handling is needed for directory
2614 * entries. Dependant pieces related to the directory entry may
2615 * have already been synced to disk. If this occurs we have to
2616 * sync the directory entry and then change the in-memory record
2617 * from an ADD to a DELETE to cover the fact that it's been
2618 * deleted by the frontend.
2620 * A directory delete covering record (MEM_RECORD_DEL) can never
2621 * be deleted by the frontend.
2623 * Any other record type (aka DATA) can be deleted by the frontend.
2624 * XXX At the moment the flusher must skip it because there may
2625 * be another data record in the flush group for the same block,
2626 * meaning that some frontend data changes can leak into the backend's
2627 * synchronization point.
2629 if (record->flags & HAMMER_RECF_DELETED_FE) {
2630 if (record->type == HAMMER_MEM_RECORD_ADD) {
2632 * Convert a front-end deleted directory-add to
2633 * a directory-delete entry later.
2635 record->flags |= HAMMER_RECF_CONVERT_DELETE;
2638 * Dispose of the record (race case). Mark as
2639 * deleted by backend (and not committed).
2641 KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
2642 record->flags |= HAMMER_RECF_DELETED_BE;
2643 ++record->ip->rec_generation;
2650 * Assign the create_tid for new records. Deletions already
2651 * have the record's entire key properly set up.
2653 if (record->type != HAMMER_MEM_RECORD_DEL) {
2654 record->leaf.base.create_tid = trans->tid;
2655 record->leaf.create_ts = trans->time32;
2659 * This actually moves the record to the on-media B-Tree. We
2660 * must also generate REDO_TERM entries in the UNDO/REDO FIFO
2661 * indicating that the related REDO_WRITE(s) have been committed.
2663 * During recovery any REDO_TERM's within the nominal recovery span
2664 * are ignored since the related meta-data is being undone, causing
2665 * any matching REDO_WRITEs to execute. The REDO_TERMs outside
2666 * the nominal recovery span will match against REDO_WRITEs and
2667 * prevent them from being executed (because the meta-data has
2668 * already been synchronized).
2670 if (record->flags & HAMMER_RECF_REDO) {
2671 KKASSERT(record->type == HAMMER_MEM_RECORD_DATA);
2672 hammer_generate_redo(trans, record->ip,
2673 record->leaf.base.key -
2674 record->leaf.data_len,
2675 HAMMER_REDO_TERM_WRITE,
2677 record->leaf.data_len);
2680 error = hammer_ip_sync_record_cursor(cursor, record);
2681 if (error != EDEADLK)
2683 hammer_done_cursor(cursor);
2684 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
2689 record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
2694 hammer_flush_record_done(record, error);
2697 * Do partial finalization if we have built up too many dirty
2698 * buffers. Otherwise a buffer cache deadlock can occur when
2699 * doing things like creating tens of thousands of tiny files.
2701 * We must release our cursor lock to avoid a 3-way deadlock
2702 * due to the exclusive sync lock the finalizer must get.
2704 * WARNING: See warnings in hammer_unlock_cursor() function.
2706 if (hammer_flusher_meta_limit(hmp)) {
2707 hammer_unlock_cursor(cursor);
2708 hammer_flusher_finalize(trans, 0);
2709 hammer_lock_cursor(cursor);
2716 * Backend function called by the flusher to sync an inode to media.
2719 hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip)
2721 struct hammer_cursor cursor;
2722 hammer_node_t tmp_node;
2723 hammer_record_t depend;
2724 hammer_record_t next;
2725 int error, tmp_error;
2728 if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
2731 error = hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
2736 * Any directory records referencing this inode which are not in
2737 * our current flush group must adjust our nlink count for the
2738 * purposes of synchronizating to disk.
2740 * Records which are in our flush group can be unlinked from our
2741 * inode now, potentially allowing the inode to be physically
2744 * This cannot block.
2746 nlinks = ip->ino_data.nlinks;
2747 next = TAILQ_FIRST(&ip->target_list);
2748 while ((depend = next) != NULL) {
2749 next = TAILQ_NEXT(depend, target_entry);
2750 if (depend->flush_state == HAMMER_FST_FLUSH &&
2751 depend->flush_group == ip->flush_group) {
2753 * If this is an ADD that was deleted by the frontend
2754 * the frontend nlinks count will have already been
2755 * decremented, but the backend is going to sync its
2756 * directory entry and must account for it. The
2757 * record will be converted to a delete-on-disk when
2760 * If the ADD was not deleted by the frontend we
2761 * can remove the dependancy from our target_list.
2763 if (depend->flags & HAMMER_RECF_DELETED_FE) {
2766 TAILQ_REMOVE(&ip->target_list, depend,
2768 depend->target_ip = NULL;
2770 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
2772 * Not part of our flush group and not deleted by
2773 * the front-end, adjust the link count synced to
2774 * the media (undo what the frontend did when it
2775 * queued the record).
2777 KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
2778 switch(depend->type) {
2779 case HAMMER_MEM_RECORD_ADD:
2782 case HAMMER_MEM_RECORD_DEL:
2792 * Set dirty if we had to modify the link count.
2794 if (ip->sync_ino_data.nlinks != nlinks) {
2795 KKASSERT((int64_t)nlinks >= 0);
2796 ip->sync_ino_data.nlinks = nlinks;
2797 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2801 * If there is a trunction queued destroy any data past the (aligned)
2802 * truncation point. Userland will have dealt with the buffer
2803 * containing the truncation point for us.
2805 * We don't flush pending frontend data buffers until after we've
2806 * dealt with the truncation.
2808 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2810 * Interlock trunc_off. The VOP front-end may continue to
2811 * make adjustments to it while we are blocked.
2814 off_t aligned_trunc_off;
2817 trunc_off = ip->sync_trunc_off;
2818 blkmask = hammer_blocksize(trunc_off) - 1;
2819 aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask;
2822 * Delete any whole blocks on-media. The front-end has
2823 * already cleaned out any partial block and made it
2824 * pending. The front-end may have updated trunc_off
2825 * while we were blocked so we only use sync_trunc_off.
2827 * This operation can blow out the buffer cache, EWOULDBLOCK
2828 * means we were unable to complete the deletion. The
2829 * deletion will update sync_trunc_off in that case.
2831 error = hammer_ip_delete_range(&cursor, ip,
2833 0x7FFFFFFFFFFFFFFFLL, 2);
2834 if (error == EWOULDBLOCK) {
2835 ip->flags |= HAMMER_INODE_WOULDBLOCK;
2837 goto defer_buffer_flush;
2844 * Generate a REDO_TERM_TRUNC entry in the UNDO/REDO FIFO.
2846 * XXX we do this even if we did not previously generate
2847 * a REDO_TRUNC record. This operation may enclosed the
2848 * range for multiple prior truncation entries in the REDO
2851 if (trans->hmp->version >= HAMMER_VOL_VERSION_FOUR &&
2852 (ip->flags & HAMMER_INODE_RDIRTY)) {
2853 hammer_generate_redo(trans, ip, aligned_trunc_off,
2854 HAMMER_REDO_TERM_TRUNC,
2859 * Clear the truncation flag on the backend after we have
2860 * completed the deletions. Backend data is now good again
2861 * (including new records we are about to sync, below).
2863 * Leave sync_trunc_off intact. As we write additional
2864 * records the backend will update sync_trunc_off. This
2865 * tells the backend whether it can skip the overwrite
2866 * test. This should work properly even when the backend
2867 * writes full blocks where the truncation point straddles
2868 * the block because the comparison is against the base
2869 * offset of the record.
2871 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2872 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
2878 * Now sync related records. These will typically be directory
2879 * entries, records tracking direct-writes, or delete-on-disk records.
2882 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2883 hammer_sync_record_callback, &cursor);
2889 hammer_cache_node(&ip->cache[1], cursor.node);
2892 * Re-seek for inode update, assuming our cache hasn't been ripped
2893 * out from under us.
2896 tmp_node = hammer_ref_node_safe(trans, &ip->cache[0], &error);
2898 hammer_cursor_downgrade(&cursor);
2899 hammer_lock_sh(&tmp_node->lock);
2900 if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0)
2901 hammer_cursor_seek(&cursor, tmp_node, 0);
2902 hammer_unlock(&tmp_node->lock);
2903 hammer_rel_node(tmp_node);
2909 * If we are deleting the inode the frontend had better not have
2910 * any active references on elements making up the inode.
2912 * The call to hammer_ip_delete_clean() cleans up auxillary records
2913 * but not DB or DATA records. Those must have already been deleted
2914 * by the normal truncation mechanic.
2916 if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
2917 RB_EMPTY(&ip->rec_tree) &&
2918 (ip->sync_flags & HAMMER_INODE_DELETING) &&
2919 (ip->flags & HAMMER_INODE_DELETED) == 0) {
2922 error = hammer_ip_delete_clean(&cursor, ip, &count1);
2924 ip->flags |= HAMMER_INODE_DELETED;
2925 ip->sync_flags &= ~HAMMER_INODE_DELETING;
2926 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2927 KKASSERT(RB_EMPTY(&ip->rec_tree));
2930 * Set delete_tid in both the frontend and backend
2931 * copy of the inode record. The DELETED flag handles
2932 * this, do not set DDIRTY.
2934 ip->ino_leaf.base.delete_tid = trans->tid;
2935 ip->sync_ino_leaf.base.delete_tid = trans->tid;
2936 ip->ino_leaf.delete_ts = trans->time32;
2937 ip->sync_ino_leaf.delete_ts = trans->time32;
2941 * Adjust the inode count in the volume header
2943 hammer_sync_lock_sh(trans);
2944 if (ip->flags & HAMMER_INODE_ONDISK) {
2945 hammer_modify_volume_field(trans,
2948 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
2949 hammer_modify_volume_done(trans->rootvol);
2951 hammer_sync_unlock(trans);
2957 ip->sync_flags &= ~HAMMER_INODE_BUFS;
2961 * Now update the inode's on-disk inode-data and/or on-disk record.
2962 * DELETED and ONDISK are managed only in ip->flags.
2964 * In the case of a defered buffer flush we still update the on-disk
2965 * inode to satisfy visibility requirements if there happen to be
2966 * directory dependancies.
2968 switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
2969 case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
2971 * If deleted and on-disk, don't set any additional flags.
2972 * the delete flag takes care of things.
2974 * Clear flags which may have been set by the frontend.
2976 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2977 HAMMER_INODE_SDIRTY |
2978 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2979 HAMMER_INODE_DELETING);
2981 case HAMMER_INODE_DELETED:
2983 * Take care of the case where a deleted inode was never
2984 * flushed to the disk in the first place.
2986 * Clear flags which may have been set by the frontend.
2988 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2989 HAMMER_INODE_SDIRTY |
2990 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2991 HAMMER_INODE_DELETING);
2992 while (RB_ROOT(&ip->rec_tree)) {
2993 hammer_record_t record = RB_ROOT(&ip->rec_tree);
2994 hammer_ref(&record->lock);
2995 KKASSERT(hammer_oneref(&record->lock));
2996 record->flags |= HAMMER_RECF_DELETED_BE;
2997 ++record->ip->rec_generation;
2998 hammer_rel_mem_record(record);
3001 case HAMMER_INODE_ONDISK:
3003 * If already on-disk, do not set any additional flags.
3008 * If not on-disk and not deleted, set DDIRTY to force
3009 * an initial record to be written.
3011 * Also set the create_tid in both the frontend and backend
3012 * copy of the inode record.
3014 ip->ino_leaf.base.create_tid = trans->tid;
3015 ip->ino_leaf.create_ts = trans->time32;
3016 ip->sync_ino_leaf.base.create_tid = trans->tid;
3017 ip->sync_ino_leaf.create_ts = trans->time32;
3018 ip->sync_flags |= HAMMER_INODE_DDIRTY;
3023 * If DDIRTY or SDIRTY is set, write out a new record.
3024 * If the inode is already on-disk the old record is marked as
3027 * If DELETED is set hammer_update_inode() will delete the existing
3028 * record without writing out a new one.
3030 * If *ONLY* the ITIMES flag is set we can update the record in-place.
3032 if (ip->flags & HAMMER_INODE_DELETED) {
3033 error = hammer_update_inode(&cursor, ip);
3035 if (!(ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY)) &&
3036 (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) {
3037 error = hammer_update_itimes(&cursor, ip);
3039 if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY |
3040 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) {
3041 error = hammer_update_inode(&cursor, ip);
3045 hammer_critical_error(ip->hmp, ip, error,
3046 "while syncing inode");
3048 hammer_done_cursor(&cursor);
3053 * This routine is called when the OS is no longer actively referencing
3054 * the inode (but might still be keeping it cached), or when releasing
3055 * the last reference to an inode.
3057 * At this point if the inode's nlinks count is zero we want to destroy
3058 * it, which may mean destroying it on-media too.
3061 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
3066 * Set the DELETING flag when the link count drops to 0 and the
3067 * OS no longer has any opens on the inode.
3069 * The backend will clear DELETING (a mod flag) and set DELETED
3070 * (a state flag) when it is actually able to perform the
3073 * Don't reflag the deletion if the flusher is currently syncing
3074 * one that was already flagged. A previously set DELETING flag
3075 * may bounce around flags and sync_flags until the operation is
3078 * Do not attempt to modify a snapshot inode (one set to read-only).
3080 if (ip->ino_data.nlinks == 0 &&
3081 ((ip->flags | ip->sync_flags) & (HAMMER_INODE_RO|HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
3082 ip->flags |= HAMMER_INODE_DELETING;
3083 ip->flags |= HAMMER_INODE_TRUNCATED;
3087 if (hammer_get_vnode(ip, &vp) != 0)
3095 nvtruncbuf(ip->vp, 0, HAMMER_BUFSIZE, 0);
3102 * After potentially resolving a dependancy the inode is tested
3103 * to determine whether it needs to be reflushed.
3106 hammer_test_inode(hammer_inode_t ip)
3108 if (ip->flags & HAMMER_INODE_REFLUSH) {
3109 ip->flags &= ~HAMMER_INODE_REFLUSH;
3110 hammer_ref(&ip->lock);
3111 if (ip->flags & HAMMER_INODE_RESIGNAL) {
3112 ip->flags &= ~HAMMER_INODE_RESIGNAL;
3113 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
3115 hammer_flush_inode(ip, 0);
3117 hammer_rel_inode(ip, 0);
3122 * Clear the RECLAIM flag on an inode. This occurs when the inode is
3123 * reassociated with a vp or just before it gets freed.
3125 * Pipeline wakeups to threads blocked due to an excessive number of
3126 * detached inodes. This typically occurs when atime updates accumulate
3127 * while scanning a directory tree.
3130 hammer_inode_wakereclaims(hammer_inode_t ip)
3132 struct hammer_reclaim *reclaim;
3133 hammer_mount_t hmp = ip->hmp;
3135 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0)
3138 --hammer_count_reclaiming;
3139 --hmp->inode_reclaims;
3140 ip->flags &= ~HAMMER_INODE_RECLAIM;
3142 while ((reclaim = TAILQ_FIRST(&hmp->reclaim_list)) != NULL) {
3143 if (reclaim->count > 0 && --reclaim->count == 0) {
3144 TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry);
3147 if (hmp->inode_reclaims > hammer_limit_reclaim / 2)
3153 * Setup our reclaim pipeline. We only let so many detached (and dirty)
3154 * inodes build up before we start blocking. This routine is called
3155 * if a new inode is created or an inode is loaded from media.
3157 * When we block we don't care *which* inode has finished reclaiming,
3158 * as lone as one does.
3161 hammer_inode_waitreclaims(hammer_transaction_t trans)
3163 hammer_mount_t hmp = trans->hmp;
3164 struct hammer_reclaim reclaim;
3169 if (curthread->td_proc) {
3170 struct hammer_inostats *stats;
3173 stats = hammer_inode_inostats(hmp, curthread->td_proc->p_pid);
3176 if (stats->count > hammer_limit_reclaim / 2)
3177 stats->count = hammer_limit_reclaim / 2;
3178 lower_limit = hammer_limit_reclaim - stats->count;
3179 if (hammer_debug_general & 0x10000)
3180 kprintf("pid %5d limit %d\n", (int)curthread->td_proc->p_pid, lower_limit);
3182 if (hmp->inode_reclaims < lower_limit)
3188 if (hmp->inode_reclaims < hammer_limit_reclaim)
3192 TAILQ_INSERT_TAIL(&hmp->reclaim_list, &reclaim, entry);
3193 tsleep(&reclaim, 0, "hmrrcm", hz);
3194 if (reclaim.count > 0)
3195 TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry);
3199 * Keep track of reclaim statistics on a per-pid basis using a loose
3200 * 4-way set associative hash table. Collisions inherit the count of
3201 * the previous entry.
3203 * NOTE: We want to be careful here to limit the chain size. If the chain
3204 * size is too large a pid will spread its stats out over too many
3205 * entries under certain types of heavy filesystem activity and
3206 * wind up not delaying long enough.
3209 struct hammer_inostats *
3210 hammer_inode_inostats(hammer_mount_t hmp, pid_t pid)
3212 struct hammer_inostats *stats;
3215 static volatile int iterator; /* we don't care about MP races */
3218 * Chain up to 4 times to find our entry.
3220 for (chain = 0; chain < 4; ++chain) {
3221 stats = &hmp->inostats[(pid + chain) & HAMMER_INOSTATS_HMASK];
3222 if (stats->pid == pid)
3227 * Replace one of the four chaining entries with our new entry.
3230 stats = &hmp->inostats[(pid + (iterator++ & 3)) &
3231 HAMMER_INOSTATS_HMASK];
3238 if (stats->count && stats->ltick != ticks) {
3239 delta = ticks - stats->ltick;
3240 stats->ltick = ticks;
3241 if (delta <= 0 || delta > hz * 60)
3244 stats->count = stats->count * hz / (hz + delta);
3246 if (hammer_debug_general & 0x10000)
3247 kprintf("pid %5d stats %d\n", (int)pid, stats->count);
3254 * XXX not used, doesn't work very well due to the large batching nature
3257 * A larger then normal backlog of inodes is sitting in the flusher,
3258 * enforce a general slowdown to let it catch up. This routine is only
3259 * called on completion of a non-flusher-related transaction which
3260 * performed B-Tree node I/O.
3262 * It is possible for the flusher to stall in a continuous load.
3263 * blogbench -i1000 -o seems to do a good job generating this sort of load.
3264 * If the flusher is unable to catch up the inode count can bloat until
3265 * we run out of kvm.
3267 * This is a bit of a hack.
3270 hammer_inode_waithard(hammer_mount_t hmp)
3275 if (hmp->flags & HAMMER_MOUNT_FLUSH_RECOVERY) {
3276 if (hmp->inode_reclaims < hammer_limit_reclaim / 2 &&
3277 hmp->count_iqueued < hmp->count_inodes / 20) {
3278 hmp->flags &= ~HAMMER_MOUNT_FLUSH_RECOVERY;
3282 if (hmp->inode_reclaims < hammer_limit_reclaim ||
3283 hmp->count_iqueued < hmp->count_inodes / 10) {
3286 hmp->flags |= HAMMER_MOUNT_FLUSH_RECOVERY;
3290 * Block for one flush cycle.
3292 hammer_flusher_wait_next(hmp);