2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.111 2008/09/17 21:44:20 dillon Exp $
38 #include <vm/vm_extern.h>
42 static int hammer_unload_inode(struct hammer_inode *ip);
43 static void hammer_free_inode(hammer_inode_t ip);
44 static void hammer_flush_inode_core(hammer_inode_t ip,
45 hammer_flush_group_t flg, int flags);
46 static int hammer_setup_child_callback(hammer_record_t rec, void *data);
48 static int hammer_syncgrp_child_callback(hammer_record_t rec, void *data);
50 static int hammer_setup_parent_inodes(hammer_inode_t ip,
51 hammer_flush_group_t flg);
52 static int hammer_setup_parent_inodes_helper(hammer_record_t record,
53 hammer_flush_group_t flg);
54 static void hammer_inode_wakereclaims(hammer_inode_t ip);
57 extern struct hammer_inode *HammerTruncIp;
61 * RB-Tree support for inode structures
64 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
66 if (ip1->obj_localization < ip2->obj_localization)
68 if (ip1->obj_localization > ip2->obj_localization)
70 if (ip1->obj_id < ip2->obj_id)
72 if (ip1->obj_id > ip2->obj_id)
74 if (ip1->obj_asof < ip2->obj_asof)
76 if (ip1->obj_asof > ip2->obj_asof)
82 * RB-Tree support for inode structures / special LOOKUP_INFO
85 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
87 if (info->obj_localization < ip->obj_localization)
89 if (info->obj_localization > ip->obj_localization)
91 if (info->obj_id < ip->obj_id)
93 if (info->obj_id > ip->obj_id)
95 if (info->obj_asof < ip->obj_asof)
97 if (info->obj_asof > ip->obj_asof)
103 * Used by hammer_scan_inode_snapshots() to locate all of an object's
104 * snapshots. Note that the asof field is not tested, which we can get
105 * away with because it is the lowest-priority field.
108 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data)
110 hammer_inode_info_t info = data;
112 if (ip->obj_localization > info->obj_localization)
114 if (ip->obj_localization < info->obj_localization)
116 if (ip->obj_id > info->obj_id)
118 if (ip->obj_id < info->obj_id)
124 * Used by hammer_unload_pseudofs() to locate all inodes associated with
128 hammer_inode_pfs_cmp(hammer_inode_t ip, void *data)
130 u_int32_t localization = *(u_int32_t *)data;
131 if (ip->obj_localization > localization)
133 if (ip->obj_localization < localization)
139 * RB-Tree support for pseudofs structures
142 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1, hammer_pseudofs_inmem_t p2)
144 if (p1->localization < p2->localization)
146 if (p1->localization > p2->localization)
152 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
153 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
154 hammer_inode_info_cmp, hammer_inode_info_t);
155 RB_GENERATE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node,
156 hammer_pfs_rb_compare, u_int32_t, localization);
159 * The kernel is not actively referencing this vnode but is still holding
162 * This is called from the frontend.
165 hammer_vop_inactive(struct vop_inactive_args *ap)
167 struct hammer_inode *ip = VTOI(ap->a_vp);
178 * If the inode no longer has visibility in the filesystem try to
179 * recycle it immediately, even if the inode is dirty. Recycling
180 * it quickly allows the system to reclaim buffer cache and VM
181 * resources which can matter a lot in a heavily loaded system.
183 * This can deadlock in vfsync() if we aren't careful.
185 * Do not queue the inode to the flusher if we still have visibility,
186 * otherwise namespace calls such as chmod will unnecessarily generate
187 * multiple inode updates.
189 hammer_inode_unloadable_check(ip, 0);
190 if (ip->ino_data.nlinks == 0) {
191 if (ip->flags & HAMMER_INODE_MODMASK)
192 hammer_flush_inode(ip, 0);
199 * Release the vnode association. This is typically (but not always)
200 * the last reference on the inode.
202 * Once the association is lost we are on our own with regards to
203 * flushing the inode.
206 hammer_vop_reclaim(struct vop_reclaim_args *ap)
208 struct hammer_inode *ip;
214 if ((ip = vp->v_data) != NULL) {
219 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) {
220 ++hammer_count_reclaiming;
221 ++hmp->inode_reclaims;
222 ip->flags |= HAMMER_INODE_RECLAIM;
225 * Poke the flusher. If we don't do this programs
226 * will start to stall on the reclaiming count.
228 if (hmp->inode_reclaims > HAMMER_RECLAIM_FLUSH &&
229 (hmp->inode_reclaims & 255) == 0) {
230 hammer_flusher_async(hmp, NULL);
233 hammer_rel_inode(ip, 1);
239 * Return a locked vnode for the specified inode. The inode must be
240 * referenced but NOT LOCKED on entry and will remain referenced on
243 * Called from the frontend.
246 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
256 if ((vp = ip->vp) == NULL) {
257 error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0);
260 hammer_lock_ex(&ip->lock);
261 if (ip->vp != NULL) {
262 hammer_unlock(&ip->lock);
267 hammer_ref(&ip->lock);
271 obj_type = ip->ino_data.obj_type;
272 vp->v_type = hammer_get_vnode_type(obj_type);
274 hammer_inode_wakereclaims(ip);
276 switch(ip->ino_data.obj_type) {
277 case HAMMER_OBJTYPE_CDEV:
278 case HAMMER_OBJTYPE_BDEV:
279 vp->v_ops = &hmp->mp->mnt_vn_spec_ops;
280 addaliasu(vp, ip->ino_data.rmajor,
281 ip->ino_data.rminor);
283 case HAMMER_OBJTYPE_FIFO:
284 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops;
291 * Only mark as the root vnode if the ip is not
292 * historical, otherwise the VFS cache will get
293 * confused. The other half of the special handling
294 * is in hammer_vop_nlookupdotdot().
296 * Pseudo-filesystem roots can be accessed via
297 * non-root filesystem paths and setting VROOT may
298 * confuse the namecache. Set VPFSROOT instead.
300 if (ip->obj_id == HAMMER_OBJID_ROOT &&
301 ip->obj_asof == hmp->asof) {
302 if (ip->obj_localization == 0)
305 vp->v_flag |= VPFSROOT;
308 vp->v_data = (void *)ip;
309 /* vnode locked by getnewvnode() */
310 /* make related vnode dirty if inode dirty? */
311 hammer_unlock(&ip->lock);
312 if (vp->v_type == VREG)
313 vinitvmio(vp, ip->ino_data.size);
318 * loop if the vget fails (aka races), or if the vp
319 * no longer matches ip->vp.
321 if (vget(vp, LK_EXCLUSIVE) == 0) {
332 * Locate all copies of the inode for obj_id compatible with the specified
333 * asof, reference, and issue the related call-back. This routine is used
334 * for direct-io invalidation and does not create any new inodes.
337 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo,
338 int (*callback)(hammer_inode_t ip, void *data),
341 hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root,
342 hammer_inode_info_cmp_all_history,
347 * Acquire a HAMMER inode. The returned inode is not locked. These functions
348 * do not attach or detach the related vnode (use hammer_get_vnode() for
351 * The flags argument is only applied for newly created inodes, and only
352 * certain flags are inherited.
354 * Called from the frontend.
356 struct hammer_inode *
357 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip,
358 int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
359 int flags, int *errorp)
361 hammer_mount_t hmp = trans->hmp;
362 struct hammer_inode_info iinfo;
363 struct hammer_cursor cursor;
364 struct hammer_inode *ip;
368 * Determine if we already have an inode cached. If we do then
371 iinfo.obj_id = obj_id;
372 iinfo.obj_asof = asof;
373 iinfo.obj_localization = localization;
375 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
377 hammer_ref(&ip->lock);
383 * Allocate a new inode structure and deal with races later.
385 ip = kmalloc(sizeof(*ip), M_HAMMER_INO, M_WAITOK|M_ZERO);
386 ++hammer_count_inodes;
389 ip->obj_asof = iinfo.obj_asof;
390 ip->obj_localization = localization;
392 ip->flags = flags & HAMMER_INODE_RO;
393 ip->cache[0].ip = ip;
394 ip->cache[1].ip = ip;
396 ip->flags |= HAMMER_INODE_RO;
397 ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
398 0x7FFFFFFFFFFFFFFFLL;
399 RB_INIT(&ip->rec_tree);
400 TAILQ_INIT(&ip->target_list);
401 hammer_ref(&ip->lock);
404 * Locate the on-disk inode. If this is a PFS root we always
405 * access the current version of the root inode and (if it is not
406 * a master) always access information under it with a snapshot
410 hammer_init_cursor(trans, &cursor, (dip ? &dip->cache[0] : NULL), NULL);
411 cursor.key_beg.localization = localization + HAMMER_LOCALIZE_INODE;
412 cursor.key_beg.obj_id = ip->obj_id;
413 cursor.key_beg.key = 0;
414 cursor.key_beg.create_tid = 0;
415 cursor.key_beg.delete_tid = 0;
416 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
417 cursor.key_beg.obj_type = 0;
419 cursor.asof = iinfo.obj_asof;
420 cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
423 *errorp = hammer_btree_lookup(&cursor);
424 if (*errorp == EDEADLK) {
425 hammer_done_cursor(&cursor);
430 * On success the B-Tree lookup will hold the appropriate
431 * buffer cache buffers and provide a pointer to the requested
432 * information. Copy the information to the in-memory inode
433 * and cache the B-Tree node to improve future operations.
436 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
437 ip->ino_data = cursor.data->inode;
440 * cache[0] tries to cache the location of the object inode.
441 * The assumption is that it is near the directory inode.
443 * cache[1] tries to cache the location of the object data.
444 * The assumption is that it is near the directory data.
446 hammer_cache_node(&ip->cache[0], cursor.node);
447 if (dip && dip->cache[1].node)
448 hammer_cache_node(&ip->cache[1], dip->cache[1].node);
451 * The file should not contain any data past the file size
452 * stored in the inode. Setting save_trunc_off to the
453 * file size instead of max reduces B-Tree lookup overheads
454 * on append by allowing the flusher to avoid checking for
457 ip->save_trunc_off = ip->ino_data.size;
460 * Locate and assign the pseudofs management structure to
463 if (dip && dip->obj_localization == ip->obj_localization) {
464 ip->pfsm = dip->pfsm;
465 hammer_ref(&ip->pfsm->lock);
467 ip->pfsm = hammer_load_pseudofs(trans,
468 ip->obj_localization,
470 *errorp = 0; /* ignore ENOENT */
475 * The inode is placed on the red-black tree and will be synced to
476 * the media when flushed or by the filesystem sync. If this races
477 * another instantiation/lookup the insertion will fail.
480 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
481 hammer_free_inode(ip);
482 hammer_done_cursor(&cursor);
485 ip->flags |= HAMMER_INODE_ONDISK;
487 if (ip->flags & HAMMER_INODE_RSV_INODES) {
488 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
492 hammer_free_inode(ip);
495 hammer_done_cursor(&cursor);
500 * Create a new filesystem object, returning the inode in *ipp. The
501 * returned inode will be referenced. The inode is created in-memory.
503 * If pfsm is non-NULL the caller wishes to create the root inode for
507 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
508 struct ucred *cred, hammer_inode_t dip,
509 hammer_pseudofs_inmem_t pfsm, struct hammer_inode **ipp)
518 ip = kmalloc(sizeof(*ip), M_HAMMER_INO, M_WAITOK|M_ZERO);
519 ++hammer_count_inodes;
523 KKASSERT(pfsm->localization != 0);
524 ip->obj_id = HAMMER_OBJID_ROOT;
525 ip->obj_localization = pfsm->localization;
527 KKASSERT(dip != NULL);
528 ip->obj_id = hammer_alloc_objid(hmp, dip);
529 ip->obj_localization = dip->obj_localization;
532 KKASSERT(ip->obj_id != 0);
533 ip->obj_asof = hmp->asof;
535 ip->flush_state = HAMMER_FST_IDLE;
536 ip->flags = HAMMER_INODE_DDIRTY |
537 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME;
538 ip->cache[0].ip = ip;
539 ip->cache[1].ip = ip;
541 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
542 /* ip->save_trunc_off = 0; (already zero) */
543 RB_INIT(&ip->rec_tree);
544 TAILQ_INIT(&ip->target_list);
546 ip->ino_data.atime = trans->time;
547 ip->ino_data.mtime = trans->time;
548 ip->ino_data.size = 0;
549 ip->ino_data.nlinks = 0;
552 * A nohistory designator on the parent directory is inherited by
553 * the child. We will do this even for pseudo-fs creation... the
554 * sysad can turn it off.
557 ip->ino_data.uflags = dip->ino_data.uflags &
558 (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
561 ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
562 ip->ino_leaf.base.localization = ip->obj_localization +
563 HAMMER_LOCALIZE_INODE;
564 ip->ino_leaf.base.obj_id = ip->obj_id;
565 ip->ino_leaf.base.key = 0;
566 ip->ino_leaf.base.create_tid = 0;
567 ip->ino_leaf.base.delete_tid = 0;
568 ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
569 ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
571 ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
572 ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
573 ip->ino_data.mode = vap->va_mode;
574 ip->ino_data.ctime = trans->time;
577 * Setup the ".." pointer. This only needs to be done for directories
578 * but we do it for all objects as a recovery aid.
581 ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id;
584 * The parent_obj_localization field only applies to pseudo-fs roots.
585 * XXX this is no longer applicable, PFSs are no longer directly
586 * tied into the parent's directory structure.
588 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY &&
589 ip->obj_id == HAMMER_OBJID_ROOT) {
590 ip->ino_data.ext.obj.parent_obj_localization =
591 dip->obj_localization;
595 switch(ip->ino_leaf.base.obj_type) {
596 case HAMMER_OBJTYPE_CDEV:
597 case HAMMER_OBJTYPE_BDEV:
598 ip->ino_data.rmajor = vap->va_rmajor;
599 ip->ino_data.rminor = vap->va_rminor;
606 * Calculate default uid/gid and overwrite with information from
610 xuid = hammer_to_unix_xid(&dip->ino_data.uid);
611 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode,
612 xuid, cred, &vap->va_mode);
616 ip->ino_data.mode = vap->va_mode;
618 if (vap->va_vaflags & VA_UID_UUID_VALID)
619 ip->ino_data.uid = vap->va_uid_uuid;
620 else if (vap->va_uid != (uid_t)VNOVAL)
621 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
623 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
625 if (vap->va_vaflags & VA_GID_UUID_VALID)
626 ip->ino_data.gid = vap->va_gid_uuid;
627 else if (vap->va_gid != (gid_t)VNOVAL)
628 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
630 ip->ino_data.gid = dip->ino_data.gid;
632 hammer_ref(&ip->lock);
636 hammer_ref(&pfsm->lock);
638 } else if (dip->obj_localization == ip->obj_localization) {
639 ip->pfsm = dip->pfsm;
640 hammer_ref(&ip->pfsm->lock);
643 ip->pfsm = hammer_load_pseudofs(trans,
644 ip->obj_localization,
646 error = 0; /* ignore ENOENT */
650 hammer_free_inode(ip);
652 } else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
653 panic("hammer_create_inode: duplicate obj_id %llx", ip->obj_id);
655 hammer_free_inode(ip);
662 * Final cleanup / freeing of an inode structure
665 hammer_free_inode(hammer_inode_t ip)
667 KKASSERT(ip->lock.refs == 1);
668 hammer_uncache_node(&ip->cache[0]);
669 hammer_uncache_node(&ip->cache[1]);
670 hammer_inode_wakereclaims(ip);
672 hammer_clear_objid(ip);
673 --hammer_count_inodes;
674 --ip->hmp->count_inodes;
676 hammer_rel_pseudofs(ip->hmp, ip->pfsm);
679 kfree(ip, M_HAMMER_INO);
684 * Retrieve pseudo-fs data. NULL will never be returned.
686 * If an error occurs *errorp will be set and a default template is returned,
687 * otherwise *errorp is set to 0. Typically when an error occurs it will
690 hammer_pseudofs_inmem_t
691 hammer_load_pseudofs(hammer_transaction_t trans,
692 u_int32_t localization, int *errorp)
694 hammer_mount_t hmp = trans->hmp;
696 hammer_pseudofs_inmem_t pfsm;
697 struct hammer_cursor cursor;
701 pfsm = RB_LOOKUP(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, localization);
703 hammer_ref(&pfsm->lock);
709 * PFS records are stored in the root inode (not the PFS root inode,
710 * but the real root). Avoid an infinite recursion if loading
711 * the PFS for the real root.
714 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT,
716 HAMMER_DEF_LOCALIZATION, 0, errorp);
721 pfsm = kmalloc(sizeof(*pfsm), M_HAMMER, M_WAITOK | M_ZERO);
722 pfsm->localization = localization;
723 pfsm->pfsd.unique_uuid = trans->rootvol->ondisk->vol_fsid;
724 pfsm->pfsd.shared_uuid = pfsm->pfsd.unique_uuid;
726 hammer_init_cursor(trans, &cursor, (ip ? &ip->cache[1] : NULL), ip);
727 cursor.key_beg.localization = HAMMER_DEF_LOCALIZATION +
728 HAMMER_LOCALIZE_MISC;
729 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
730 cursor.key_beg.create_tid = 0;
731 cursor.key_beg.delete_tid = 0;
732 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
733 cursor.key_beg.obj_type = 0;
734 cursor.key_beg.key = localization;
735 cursor.asof = HAMMER_MAX_TID;
736 cursor.flags |= HAMMER_CURSOR_ASOF;
739 *errorp = hammer_ip_lookup(&cursor);
741 *errorp = hammer_btree_lookup(&cursor);
743 *errorp = hammer_ip_resolve_data(&cursor);
745 if (cursor.data->pfsd.mirror_flags &
746 HAMMER_PFSD_DELETED) {
749 bytes = cursor.leaf->data_len;
750 if (bytes > sizeof(pfsm->pfsd))
751 bytes = sizeof(pfsm->pfsd);
752 bcopy(cursor.data, &pfsm->pfsd, bytes);
756 hammer_done_cursor(&cursor);
758 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
759 hammer_ref(&pfsm->lock);
761 hammer_rel_inode(ip, 0);
762 if (RB_INSERT(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm)) {
763 kfree(pfsm, M_HAMMER);
770 * Store pseudo-fs data. The backend will automatically delete any prior
771 * on-disk pseudo-fs data but we have to delete in-memory versions.
774 hammer_save_pseudofs(hammer_transaction_t trans, hammer_pseudofs_inmem_t pfsm)
776 struct hammer_cursor cursor;
777 hammer_record_t record;
781 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
782 HAMMER_DEF_LOCALIZATION, 0, &error);
784 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
785 hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
786 cursor.key_beg.localization = ip->obj_localization +
787 HAMMER_LOCALIZE_MISC;
788 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
789 cursor.key_beg.create_tid = 0;
790 cursor.key_beg.delete_tid = 0;
791 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
792 cursor.key_beg.obj_type = 0;
793 cursor.key_beg.key = pfsm->localization;
794 cursor.asof = HAMMER_MAX_TID;
795 cursor.flags |= HAMMER_CURSOR_ASOF;
797 error = hammer_ip_lookup(&cursor);
798 if (error == 0 && hammer_cursor_inmem(&cursor)) {
799 record = cursor.iprec;
800 if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
801 KKASSERT(cursor.deadlk_rec == NULL);
802 hammer_ref(&record->lock);
803 cursor.deadlk_rec = record;
806 record->flags |= HAMMER_RECF_DELETED_FE;
810 if (error == 0 || error == ENOENT) {
811 record = hammer_alloc_mem_record(ip, sizeof(pfsm->pfsd));
812 record->type = HAMMER_MEM_RECORD_GENERAL;
814 record->leaf.base.localization = ip->obj_localization +
815 HAMMER_LOCALIZE_MISC;
816 record->leaf.base.rec_type = HAMMER_RECTYPE_PFS;
817 record->leaf.base.key = pfsm->localization;
818 record->leaf.data_len = sizeof(pfsm->pfsd);
819 bcopy(&pfsm->pfsd, record->data, sizeof(pfsm->pfsd));
820 error = hammer_ip_add_record(trans, record);
822 hammer_done_cursor(&cursor);
823 if (error == EDEADLK)
825 hammer_rel_inode(ip, 0);
830 * Create a root directory for a PFS if one does not alredy exist.
832 * The PFS root stands alone so we must also bump the nlinks count
833 * to prevent it from being destroyed on release.
836 hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred,
837 hammer_pseudofs_inmem_t pfsm)
843 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
844 pfsm->localization, 0, &error);
849 error = hammer_create_inode(trans, &vap, cred, NULL, pfsm, &ip);
851 ++ip->ino_data.nlinks;
852 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
856 hammer_rel_inode(ip, 0);
861 * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY
862 * if we are unable to disassociate all the inodes.
866 hammer_unload_pseudofs_callback(hammer_inode_t ip, void *data)
870 hammer_ref(&ip->lock);
871 if (ip->lock.refs == 2 && ip->vp)
872 vclean_unlocked(ip->vp);
873 if (ip->lock.refs == 1 && ip->vp == NULL)
876 res = -1; /* stop, someone is using the inode */
877 hammer_rel_inode(ip, 0);
882 hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization)
887 for (try = res = 0; try < 4; ++try) {
888 res = hammer_ino_rb_tree_RB_SCAN(&trans->hmp->rb_inos_root,
889 hammer_inode_pfs_cmp,
890 hammer_unload_pseudofs_callback,
892 if (res == 0 && try > 1)
894 hammer_flusher_sync(trans->hmp);
903 * Release a reference on a PFS
906 hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm)
908 hammer_unref(&pfsm->lock);
909 if (pfsm->lock.refs == 0) {
910 RB_REMOVE(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm);
911 kfree(pfsm, M_HAMMER);
916 * Called by hammer_sync_inode().
919 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
921 hammer_transaction_t trans = cursor->trans;
922 hammer_record_t record;
930 * If the inode has a presence on-disk then locate it and mark
931 * it deleted, setting DELONDISK.
933 * The record may or may not be physically deleted, depending on
934 * the retention policy.
936 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
937 HAMMER_INODE_ONDISK) {
938 hammer_normalize_cursor(cursor);
939 cursor->key_beg.localization = ip->obj_localization +
940 HAMMER_LOCALIZE_INODE;
941 cursor->key_beg.obj_id = ip->obj_id;
942 cursor->key_beg.key = 0;
943 cursor->key_beg.create_tid = 0;
944 cursor->key_beg.delete_tid = 0;
945 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
946 cursor->key_beg.obj_type = 0;
947 cursor->asof = ip->obj_asof;
948 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
949 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
950 cursor->flags |= HAMMER_CURSOR_BACKEND;
952 error = hammer_btree_lookup(cursor);
953 if (hammer_debug_inode)
954 kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
957 error = hammer_ip_delete_record(cursor, ip, trans->tid);
958 if (hammer_debug_inode)
959 kprintf(" error %d\n", error);
961 ip->flags |= HAMMER_INODE_DELONDISK;
964 hammer_cache_node(&ip->cache[0], cursor->node);
966 if (error == EDEADLK) {
967 hammer_done_cursor(cursor);
968 error = hammer_init_cursor(trans, cursor,
970 if (hammer_debug_inode)
971 kprintf("IPDED %p %d\n", ip, error);
978 * Ok, write out the initial record or a new record (after deleting
979 * the old one), unless the DELETED flag is set. This routine will
980 * clear DELONDISK if it writes out a record.
982 * Update our inode statistics if this is the first application of
985 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
987 * Generate a record and write it to the media. We clean-up
988 * the state before releasing so we do not have to set-up
991 record = hammer_alloc_mem_record(ip, 0);
992 record->type = HAMMER_MEM_RECORD_INODE;
993 record->flush_state = HAMMER_FST_FLUSH;
994 record->leaf = ip->sync_ino_leaf;
995 record->leaf.base.create_tid = trans->tid;
996 record->leaf.data_len = sizeof(ip->sync_ino_data);
997 record->leaf.create_ts = trans->time32;
998 record->data = (void *)&ip->sync_ino_data;
999 record->flags |= HAMMER_RECF_INTERLOCK_BE;
1002 * If this flag is set we cannot sync the new file size
1003 * because we haven't finished related truncations. The
1004 * inode will be flushed in another flush group to finish
1007 if ((ip->flags & HAMMER_INODE_WOULDBLOCK) &&
1008 ip->sync_ino_data.size != ip->ino_data.size) {
1010 ip->sync_ino_data.size = ip->ino_data.size;
1016 error = hammer_ip_sync_record_cursor(cursor, record);
1017 if (hammer_debug_inode)
1018 kprintf("GENREC %p rec %08x %d\n",
1019 ip, record->flags, error);
1020 if (error != EDEADLK)
1022 hammer_done_cursor(cursor);
1023 error = hammer_init_cursor(trans, cursor,
1025 if (hammer_debug_inode)
1026 kprintf("GENREC reinit %d\n", error);
1032 * The record isn't managed by the inode's record tree,
1033 * destroy it whether we succeed or fail.
1035 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
1036 record->flags |= HAMMER_RECF_DELETED_FE | HAMMER_RECF_COMMITTED;
1037 record->flush_state = HAMMER_FST_IDLE;
1038 hammer_rel_mem_record(record);
1044 if (hammer_debug_inode)
1045 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
1046 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1047 HAMMER_INODE_ATIME |
1048 HAMMER_INODE_MTIME);
1049 ip->flags &= ~HAMMER_INODE_DELONDISK;
1051 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1054 * Root volume count of inodes
1056 hammer_sync_lock_sh(trans);
1057 if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
1058 hammer_modify_volume_field(trans,
1061 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1062 hammer_modify_volume_done(trans->rootvol);
1063 ip->flags |= HAMMER_INODE_ONDISK;
1064 if (hammer_debug_inode)
1065 kprintf("NOWONDISK %p\n", ip);
1067 hammer_sync_unlock(trans);
1072 * If the inode has been destroyed, clean out any left-over flags
1073 * that may have been set by the frontend.
1075 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) {
1076 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1077 HAMMER_INODE_ATIME |
1078 HAMMER_INODE_MTIME);
1084 * Update only the itimes fields.
1086 * ATIME can be updated without generating any UNDO. MTIME is updated
1087 * with UNDO so it is guaranteed to be synchronized properly in case of
1090 * Neither field is included in the B-Tree leaf element's CRC, which is how
1091 * we can get away with updating ATIME the way we do.
1094 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
1096 hammer_transaction_t trans = cursor->trans;
1100 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) !=
1101 HAMMER_INODE_ONDISK) {
1105 hammer_normalize_cursor(cursor);
1106 cursor->key_beg.localization = ip->obj_localization +
1107 HAMMER_LOCALIZE_INODE;
1108 cursor->key_beg.obj_id = ip->obj_id;
1109 cursor->key_beg.key = 0;
1110 cursor->key_beg.create_tid = 0;
1111 cursor->key_beg.delete_tid = 0;
1112 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1113 cursor->key_beg.obj_type = 0;
1114 cursor->asof = ip->obj_asof;
1115 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1116 cursor->flags |= HAMMER_CURSOR_ASOF;
1117 cursor->flags |= HAMMER_CURSOR_GET_LEAF;
1118 cursor->flags |= HAMMER_CURSOR_GET_DATA;
1119 cursor->flags |= HAMMER_CURSOR_BACKEND;
1121 error = hammer_btree_lookup(cursor);
1123 hammer_cache_node(&ip->cache[0], cursor->node);
1124 if (ip->sync_flags & HAMMER_INODE_MTIME) {
1126 * Updating MTIME requires an UNDO. Just cover
1127 * both atime and mtime.
1129 hammer_sync_lock_sh(trans);
1130 hammer_modify_buffer(trans, cursor->data_buffer,
1131 HAMMER_ITIMES_BASE(&cursor->data->inode),
1132 HAMMER_ITIMES_BYTES);
1133 cursor->data->inode.atime = ip->sync_ino_data.atime;
1134 cursor->data->inode.mtime = ip->sync_ino_data.mtime;
1135 hammer_modify_buffer_done(cursor->data_buffer);
1136 hammer_sync_unlock(trans);
1137 } else if (ip->sync_flags & HAMMER_INODE_ATIME) {
1139 * Updating atime only can be done in-place with
1142 hammer_sync_lock_sh(trans);
1143 hammer_modify_buffer(trans, cursor->data_buffer,
1145 cursor->data->inode.atime = ip->sync_ino_data.atime;
1146 hammer_modify_buffer_done(cursor->data_buffer);
1147 hammer_sync_unlock(trans);
1149 ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME);
1151 if (error == EDEADLK) {
1152 hammer_done_cursor(cursor);
1153 error = hammer_init_cursor(trans, cursor,
1162 * Release a reference on an inode, flush as requested.
1164 * On the last reference we queue the inode to the flusher for its final
1168 hammer_rel_inode(struct hammer_inode *ip, int flush)
1170 hammer_mount_t hmp = ip->hmp;
1173 * Handle disposition when dropping the last ref.
1176 if (ip->lock.refs == 1) {
1178 * Determine whether on-disk action is needed for
1179 * the inode's final disposition.
1181 KKASSERT(ip->vp == NULL);
1182 hammer_inode_unloadable_check(ip, 0);
1183 if (ip->flags & HAMMER_INODE_MODMASK) {
1184 if (hmp->rsv_inodes > desiredvnodes) {
1185 hammer_flush_inode(ip,
1186 HAMMER_FLUSH_SIGNAL);
1188 hammer_flush_inode(ip, 0);
1190 } else if (ip->lock.refs == 1) {
1191 hammer_unload_inode(ip);
1196 hammer_flush_inode(ip, 0);
1199 * The inode still has multiple refs, try to drop
1202 KKASSERT(ip->lock.refs >= 1);
1203 if (ip->lock.refs > 1) {
1204 hammer_unref(&ip->lock);
1212 * Unload and destroy the specified inode. Must be called with one remaining
1213 * reference. The reference is disposed of.
1215 * The inode must be completely clean.
1218 hammer_unload_inode(struct hammer_inode *ip)
1220 hammer_mount_t hmp = ip->hmp;
1222 KASSERT(ip->lock.refs == 1,
1223 ("hammer_unload_inode: %d refs\n", ip->lock.refs));
1224 KKASSERT(ip->vp == NULL);
1225 KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
1226 KKASSERT(ip->cursor_ip_refs == 0);
1227 KKASSERT(ip->lock.lockcount == 0);
1228 KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
1230 KKASSERT(RB_EMPTY(&ip->rec_tree));
1231 KKASSERT(TAILQ_EMPTY(&ip->target_list));
1233 RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip);
1235 hammer_free_inode(ip);
1240 * Called during unmounting if a critical error occured. The in-memory
1241 * inode and all related structures are destroyed.
1243 * If a critical error did not occur the unmount code calls the standard
1244 * release and asserts that the inode is gone.
1247 hammer_destroy_inode_callback(struct hammer_inode *ip, void *data __unused)
1249 hammer_record_t rec;
1252 * Get rid of the inodes in-memory records, regardless of their
1253 * state, and clear the mod-mask.
1255 while ((rec = TAILQ_FIRST(&ip->target_list)) != NULL) {
1256 TAILQ_REMOVE(&ip->target_list, rec, target_entry);
1257 rec->target_ip = NULL;
1258 if (rec->flush_state == HAMMER_FST_SETUP)
1259 rec->flush_state = HAMMER_FST_IDLE;
1261 while ((rec = RB_ROOT(&ip->rec_tree)) != NULL) {
1262 if (rec->flush_state == HAMMER_FST_FLUSH)
1263 --rec->flush_group->refs;
1265 hammer_ref(&rec->lock);
1266 KKASSERT(rec->lock.refs == 1);
1267 rec->flush_state = HAMMER_FST_IDLE;
1268 rec->flush_group = NULL;
1269 rec->flags |= HAMMER_RECF_DELETED_FE;
1270 rec->flags |= HAMMER_RECF_DELETED_BE;
1271 hammer_rel_mem_record(rec);
1273 ip->flags &= ~HAMMER_INODE_MODMASK;
1274 ip->sync_flags &= ~HAMMER_INODE_MODMASK;
1275 KKASSERT(ip->vp == NULL);
1278 * Remove the inode from any flush group, force it idle. FLUSH
1279 * and SETUP states have an inode ref.
1281 switch(ip->flush_state) {
1282 case HAMMER_FST_FLUSH:
1283 TAILQ_REMOVE(&ip->flush_group->flush_list, ip, flush_entry);
1284 --ip->flush_group->refs;
1285 ip->flush_group = NULL;
1287 case HAMMER_FST_SETUP:
1288 hammer_unref(&ip->lock);
1289 ip->flush_state = HAMMER_FST_IDLE;
1291 case HAMMER_FST_IDLE:
1296 * There shouldn't be any associated vnode. The unload needs at
1297 * least one ref, if we do have a vp steal its ip ref.
1300 kprintf("hammer_destroy_inode_callback: Unexpected "
1301 "vnode association ip %p vp %p\n", ip, ip->vp);
1302 ip->vp->v_data = NULL;
1305 hammer_ref(&ip->lock);
1307 hammer_unload_inode(ip);
1312 * Called on mount -u when switching from RW to RO or vise-versa. Adjust
1313 * the read-only flag for cached inodes.
1315 * This routine is called from a RB_SCAN().
1318 hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
1320 hammer_mount_t hmp = ip->hmp;
1322 if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
1323 ip->flags |= HAMMER_INODE_RO;
1325 ip->flags &= ~HAMMER_INODE_RO;
1330 * A transaction has modified an inode, requiring updates as specified by
1333 * HAMMER_INODE_DDIRTY: Inode data has been updated
1334 * HAMMER_INODE_XDIRTY: Dirty in-memory records
1335 * HAMMER_INODE_BUFS: Dirty buffer cache buffers
1336 * HAMMER_INODE_DELETED: Inode record/data must be deleted
1337 * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
1340 hammer_modify_inode(hammer_inode_t ip, int flags)
1343 * ronly of 0 or 2 does not trigger assertion.
1344 * 2 is a special error state
1346 KKASSERT(ip->hmp->ronly != 1 ||
1347 (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
1348 HAMMER_INODE_BUFS | HAMMER_INODE_DELETED |
1349 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0);
1350 if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
1351 ip->flags |= HAMMER_INODE_RSV_INODES;
1352 ++ip->hmp->rsv_inodes;
1359 * Request that an inode be flushed. This whole mess cannot block and may
1360 * recurse (if not synchronous). Once requested HAMMER will attempt to
1361 * actively flush the inode until the flush can be done.
1363 * The inode may already be flushing, or may be in a setup state. We can
1364 * place the inode in a flushing state if it is currently idle and flag it
1365 * to reflush if it is currently flushing.
1367 * Upon return if the inode could not be flushed due to a setup
1368 * dependancy, then it will be automatically flushed when the dependancy
1372 hammer_flush_inode(hammer_inode_t ip, int flags)
1375 hammer_flush_group_t flg;
1379 * next_flush_group is the first flush group we can place the inode
1380 * in. It may be NULL. If it becomes full we append a new flush
1381 * group and make that the next_flush_group.
1384 while ((flg = hmp->next_flush_group) != NULL) {
1385 KKASSERT(flg->running == 0);
1386 if (flg->total_count + flg->refs <= ip->hmp->undo_rec_limit)
1388 hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
1389 hammer_flusher_async(ip->hmp, flg);
1392 flg = kmalloc(sizeof(*flg), M_HAMMER, M_WAITOK|M_ZERO);
1393 hmp->next_flush_group = flg;
1394 TAILQ_INIT(&flg->flush_list);
1395 TAILQ_INSERT_TAIL(&hmp->flush_group_list, flg, flush_entry);
1399 * Trivial 'nothing to flush' case. If the inode is in a SETUP
1400 * state we have to put it back into an IDLE state so we can
1401 * drop the extra ref.
1403 * If we have a parent dependancy we must still fall through
1406 if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
1407 if (ip->flush_state == HAMMER_FST_SETUP &&
1408 TAILQ_EMPTY(&ip->target_list)) {
1409 ip->flush_state = HAMMER_FST_IDLE;
1410 hammer_rel_inode(ip, 0);
1412 if (ip->flush_state == HAMMER_FST_IDLE)
1417 * Our flush action will depend on the current state.
1419 switch(ip->flush_state) {
1420 case HAMMER_FST_IDLE:
1422 * We have no dependancies and can flush immediately. Some
1423 * our children may not be flushable so we have to re-test
1424 * with that additional knowledge.
1426 hammer_flush_inode_core(ip, flg, flags);
1428 case HAMMER_FST_SETUP:
1430 * Recurse upwards through dependancies via target_list
1431 * and start their flusher actions going if possible.
1433 * 'good' is our connectivity. -1 means we have none and
1434 * can't flush, 0 means there weren't any dependancies, and
1435 * 1 means we have good connectivity.
1437 good = hammer_setup_parent_inodes(ip, flg);
1441 * We can continue if good >= 0. Determine how
1442 * many records under our inode can be flushed (and
1445 hammer_flush_inode_core(ip, flg, flags);
1448 * Parent has no connectivity, tell it to flush
1449 * us as soon as it does.
1451 * The REFLUSH flag is also needed to trigger
1452 * dependancy wakeups.
1454 ip->flags |= HAMMER_INODE_CONN_DOWN |
1455 HAMMER_INODE_REFLUSH;
1456 if (flags & HAMMER_FLUSH_SIGNAL) {
1457 ip->flags |= HAMMER_INODE_RESIGNAL;
1458 hammer_flusher_async(ip->hmp, flg);
1462 case HAMMER_FST_FLUSH:
1464 * We are already flushing, flag the inode to reflush
1465 * if needed after it completes its current flush.
1467 * The REFLUSH flag is also needed to trigger
1468 * dependancy wakeups.
1470 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
1471 ip->flags |= HAMMER_INODE_REFLUSH;
1472 if (flags & HAMMER_FLUSH_SIGNAL) {
1473 ip->flags |= HAMMER_INODE_RESIGNAL;
1474 hammer_flusher_async(ip->hmp, flg);
1481 * Scan ip->target_list, which is a list of records owned by PARENTS to our
1482 * ip which reference our ip.
1484 * XXX This is a huge mess of recursive code, but not one bit of it blocks
1485 * so for now do not ref/deref the structures. Note that if we use the
1486 * ref/rel code later, the rel CAN block.
1489 hammer_setup_parent_inodes(hammer_inode_t ip, hammer_flush_group_t flg)
1491 hammer_record_t depend;
1496 TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
1497 r = hammer_setup_parent_inodes_helper(depend, flg);
1498 KKASSERT(depend->target_ip == ip);
1499 if (r < 0 && good == 0)
1508 * This helper function takes a record representing the dependancy between
1509 * the parent inode and child inode.
1511 * record->ip = parent inode
1512 * record->target_ip = child inode
1514 * We are asked to recurse upwards and convert the record from SETUP
1515 * to FLUSH if possible.
1517 * Return 1 if the record gives us connectivity
1519 * Return 0 if the record is not relevant
1521 * Return -1 if we can't resolve the dependancy and there is no connectivity.
1524 hammer_setup_parent_inodes_helper(hammer_record_t record,
1525 hammer_flush_group_t flg)
1531 KKASSERT(record->flush_state != HAMMER_FST_IDLE);
1536 * If the record is already flushing, is it in our flush group?
1538 * If it is in our flush group but it is a general record or a
1539 * delete-on-disk, it does not improve our connectivity (return 0),
1540 * and if the target inode is not trying to destroy itself we can't
1541 * allow the operation yet anyway (the second return -1).
1543 if (record->flush_state == HAMMER_FST_FLUSH) {
1545 * If not in our flush group ask the parent to reflush
1546 * us as soon as possible.
1548 if (record->flush_group != flg) {
1549 pip->flags |= HAMMER_INODE_REFLUSH;
1550 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1555 * If in our flush group everything is already set up,
1556 * just return whether the record will improve our
1557 * visibility or not.
1559 if (record->type == HAMMER_MEM_RECORD_ADD)
1565 * It must be a setup record. Try to resolve the setup dependancies
1566 * by recursing upwards so we can place ip on the flush list.
1568 KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1570 good = hammer_setup_parent_inodes(pip, flg);
1573 * If good < 0 the parent has no connectivity and we cannot safely
1574 * flush the directory entry, which also means we can't flush our
1575 * ip. Flag the parent and us for downward recursion once the
1576 * parent's connectivity is resolved.
1579 /* pip->flags |= HAMMER_INODE_CONN_DOWN; set by recursion */
1580 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1585 * We are go, place the parent inode in a flushing state so we can
1586 * place its record in a flushing state. Note that the parent
1587 * may already be flushing. The record must be in the same flush
1588 * group as the parent.
1590 if (pip->flush_state != HAMMER_FST_FLUSH)
1591 hammer_flush_inode_core(pip, flg, HAMMER_FLUSH_RECURSION);
1592 KKASSERT(pip->flush_state == HAMMER_FST_FLUSH);
1593 KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1596 if (record->type == HAMMER_MEM_RECORD_DEL &&
1597 (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
1599 * Regardless of flushing state we cannot sync this path if the
1600 * record represents a delete-on-disk but the target inode
1601 * is not ready to sync its own deletion.
1603 * XXX need to count effective nlinks to determine whether
1604 * the flush is ok, otherwise removing a hardlink will
1605 * just leave the DEL record to rot.
1607 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
1611 if (pip->flush_group == flg) {
1613 * Because we have not calculated nlinks yet we can just
1614 * set records to the flush state if the parent is in
1615 * the same flush group as we are.
1617 record->flush_state = HAMMER_FST_FLUSH;
1618 record->flush_group = flg;
1619 ++record->flush_group->refs;
1620 hammer_ref(&record->lock);
1623 * A general directory-add contributes to our visibility.
1625 * Otherwise it is probably a directory-delete or
1626 * delete-on-disk record and does not contribute to our
1627 * visbility (but we can still flush it).
1629 if (record->type == HAMMER_MEM_RECORD_ADD)
1634 * If the parent is not in our flush group we cannot
1635 * flush this record yet, there is no visibility.
1636 * We tell the parent to reflush and mark ourselves
1637 * so the parent knows it should flush us too.
1639 pip->flags |= HAMMER_INODE_REFLUSH;
1640 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1646 * This is the core routine placing an inode into the FST_FLUSH state.
1649 hammer_flush_inode_core(hammer_inode_t ip, hammer_flush_group_t flg, int flags)
1654 * Set flush state and prevent the flusher from cycling into
1655 * the next flush group. Do not place the ip on the list yet.
1656 * Inodes not in the idle state get an extra reference.
1658 KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
1659 if (ip->flush_state == HAMMER_FST_IDLE)
1660 hammer_ref(&ip->lock);
1661 ip->flush_state = HAMMER_FST_FLUSH;
1662 ip->flush_group = flg;
1663 ++ip->hmp->flusher.group_lock;
1664 ++ip->hmp->count_iqueued;
1665 ++hammer_count_iqueued;
1669 * We need to be able to vfsync/truncate from the backend.
1671 KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
1672 if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
1673 ip->flags |= HAMMER_INODE_VHELD;
1678 * Figure out how many in-memory records we can actually flush
1679 * (not including inode meta-data, buffers, etc).
1681 KKASSERT((ip->flags & HAMMER_INODE_WOULDBLOCK) == 0);
1682 if (flags & HAMMER_FLUSH_RECURSION) {
1684 * If this is a upwards recursion we do not want to
1685 * recurse down again!
1689 } else if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
1691 * No new records are added if we must complete a flush
1692 * from a previous cycle, but we do have to move the records
1693 * from the previous cycle to the current one.
1696 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1697 hammer_syncgrp_child_callback, NULL);
1703 * Normal flush, scan records and bring them into the flush.
1704 * Directory adds and deletes are usually skipped (they are
1705 * grouped with the related inode rather then with the
1708 * go_count can be negative, which means the scan aborted
1709 * due to the flush group being over-full and we should
1710 * flush what we have.
1712 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1713 hammer_setup_child_callback, NULL);
1717 * This is a more involved test that includes go_count. If we
1718 * can't flush, flag the inode and return. If go_count is 0 we
1719 * were are unable to flush any records in our rec_tree and
1720 * must ignore the XDIRTY flag.
1722 if (go_count == 0) {
1723 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
1724 --ip->hmp->count_iqueued;
1725 --hammer_count_iqueued;
1728 ip->flush_state = HAMMER_FST_SETUP;
1729 ip->flush_group = NULL;
1730 if (ip->flags & HAMMER_INODE_VHELD) {
1731 ip->flags &= ~HAMMER_INODE_VHELD;
1736 * REFLUSH is needed to trigger dependancy wakeups
1737 * when an inode is in SETUP.
1739 ip->flags |= HAMMER_INODE_REFLUSH;
1740 if (flags & HAMMER_FLUSH_SIGNAL) {
1741 ip->flags |= HAMMER_INODE_RESIGNAL;
1742 hammer_flusher_async(ip->hmp, flg);
1744 if (--ip->hmp->flusher.group_lock == 0)
1745 wakeup(&ip->hmp->flusher.group_lock);
1751 * Snapshot the state of the inode for the backend flusher.
1753 * We continue to retain save_trunc_off even when all truncations
1754 * have been resolved as an optimization to determine if we can
1755 * skip the B-Tree lookup for overwrite deletions.
1757 * NOTE: The DELETING flag is a mod flag, but it is also sticky,
1758 * and stays in ip->flags. Once set, it stays set until the
1759 * inode is destroyed.
1761 if (ip->flags & HAMMER_INODE_TRUNCATED) {
1762 KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0);
1763 ip->sync_trunc_off = ip->trunc_off;
1764 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
1765 ip->flags &= ~HAMMER_INODE_TRUNCATED;
1766 ip->sync_flags |= HAMMER_INODE_TRUNCATED;
1769 * The save_trunc_off used to cache whether the B-Tree
1770 * holds any records past that point is not used until
1771 * after the truncation has succeeded, so we can safely
1774 if (ip->save_trunc_off > ip->sync_trunc_off)
1775 ip->save_trunc_off = ip->sync_trunc_off;
1777 ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK &
1778 ~HAMMER_INODE_TRUNCATED);
1779 ip->sync_ino_leaf = ip->ino_leaf;
1780 ip->sync_ino_data = ip->ino_data;
1781 ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
1782 #ifdef DEBUG_TRUNCATE
1783 if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp)
1784 kprintf("truncateS %016llx\n", ip->sync_trunc_off);
1788 * The flusher list inherits our inode and reference.
1790 KKASSERT(flg->running == 0);
1791 TAILQ_INSERT_TAIL(&flg->flush_list, ip, flush_entry);
1792 if (--ip->hmp->flusher.group_lock == 0)
1793 wakeup(&ip->hmp->flusher.group_lock);
1795 if (flags & HAMMER_FLUSH_SIGNAL) {
1796 hammer_flusher_async(ip->hmp, flg);
1801 * Callback for scan of ip->rec_tree. Try to include each record in our
1802 * flush. ip->flush_group has been set but the inode has not yet been
1803 * moved into a flushing state.
1805 * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
1808 * We return 1 for any record placed or found in FST_FLUSH, which prevents
1809 * the caller from shortcutting the flush.
1812 hammer_setup_child_callback(hammer_record_t rec, void *data)
1814 hammer_flush_group_t flg;
1815 hammer_inode_t target_ip;
1820 * Deleted records are ignored. Note that the flush detects deleted
1821 * front-end records at multiple points to deal with races. This is
1822 * just the first line of defense. The only time DELETED_FE cannot
1823 * be set is when HAMMER_RECF_INTERLOCK_BE is set.
1825 * Don't get confused between record deletion and, say, directory
1826 * entry deletion. The deletion of a directory entry that is on
1827 * the media has nothing to do with the record deletion flags.
1829 if (rec->flags & (HAMMER_RECF_DELETED_FE|HAMMER_RECF_DELETED_BE)) {
1830 if (rec->flush_state == HAMMER_FST_FLUSH) {
1831 KKASSERT(rec->flush_group == rec->ip->flush_group);
1840 * If the record is in an idle state it has no dependancies and
1844 flg = ip->flush_group;
1847 switch(rec->flush_state) {
1848 case HAMMER_FST_IDLE:
1850 * The record has no setup dependancy, we can flush it.
1852 KKASSERT(rec->target_ip == NULL);
1853 rec->flush_state = HAMMER_FST_FLUSH;
1854 rec->flush_group = flg;
1856 hammer_ref(&rec->lock);
1859 case HAMMER_FST_SETUP:
1861 * The record has a setup dependancy. These are typically
1862 * directory entry adds and deletes. Such entries will be
1863 * flushed when their inodes are flushed so we do not
1864 * usually have to add them to the flush here. However,
1865 * if the target_ip has set HAMMER_INODE_CONN_DOWN then
1866 * it is asking us to flush this record (and it).
1868 target_ip = rec->target_ip;
1869 KKASSERT(target_ip != NULL);
1870 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
1873 * If the target IP is already flushing in our group
1874 * we could associate the record, but target_ip has
1875 * already synced ino_data to sync_ino_data and we
1876 * would also have to adjust nlinks. Plus there are
1877 * ordering issues for adds and deletes.
1879 * Reflush downward if this is an ADD, and upward if
1882 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
1883 if (rec->flush_state == HAMMER_MEM_RECORD_ADD)
1884 ip->flags |= HAMMER_INODE_REFLUSH;
1886 target_ip->flags |= HAMMER_INODE_REFLUSH;
1891 * Target IP is not yet flushing. This can get complex
1892 * because we have to be careful about the recursion.
1894 * Directories create an issue for us in that if a flush
1895 * of a directory is requested the expectation is to flush
1896 * any pending directory entries, but this will cause the
1897 * related inodes to recursively flush as well. We can't
1898 * really defer the operation so just get as many as we
1902 if ((target_ip->flags & HAMMER_INODE_RECLAIM) == 0 &&
1903 (target_ip->flags & HAMMER_INODE_CONN_DOWN) == 0) {
1905 * We aren't reclaiming and the target ip was not
1906 * previously prevented from flushing due to this
1907 * record dependancy. Do not flush this record.
1912 if (flg->total_count + flg->refs >
1913 ip->hmp->undo_rec_limit) {
1915 * Our flush group is over-full and we risk blowing
1916 * out the UNDO FIFO. Stop the scan, flush what we
1917 * have, then reflush the directory.
1919 * The directory may be forced through multiple
1920 * flush groups before it can be completely
1923 ip->flags |= HAMMER_INODE_RESIGNAL |
1924 HAMMER_INODE_REFLUSH;
1926 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
1928 * If the target IP is not flushing we can force
1929 * it to flush, even if it is unable to write out
1930 * any of its own records we have at least one in
1931 * hand that we CAN deal with.
1933 rec->flush_state = HAMMER_FST_FLUSH;
1934 rec->flush_group = flg;
1936 hammer_ref(&rec->lock);
1937 hammer_flush_inode_core(target_ip, flg,
1938 HAMMER_FLUSH_RECURSION);
1942 * General or delete-on-disk record.
1944 * XXX this needs help. If a delete-on-disk we could
1945 * disconnect the target. If the target has its own
1946 * dependancies they really need to be flushed.
1950 rec->flush_state = HAMMER_FST_FLUSH;
1951 rec->flush_group = flg;
1953 hammer_ref(&rec->lock);
1954 hammer_flush_inode_core(target_ip, flg,
1955 HAMMER_FLUSH_RECURSION);
1959 case HAMMER_FST_FLUSH:
1961 * The flush_group should already match.
1963 KKASSERT(rec->flush_group == flg);
1972 * This version just moves records already in a flush state to the new
1973 * flush group and that is it.
1976 hammer_syncgrp_child_callback(hammer_record_t rec, void *data)
1978 hammer_inode_t ip = rec->ip;
1980 switch(rec->flush_state) {
1981 case HAMMER_FST_FLUSH:
1982 KKASSERT(rec->flush_group == ip->flush_group);
1992 * Wait for a previously queued flush to complete.
1994 * If a critical error occured we don't try to wait.
1997 hammer_wait_inode(hammer_inode_t ip)
1999 hammer_flush_group_t flg;
2002 if ((ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) {
2003 while (ip->flush_state != HAMMER_FST_IDLE &&
2004 (ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) {
2005 if (ip->flush_state == HAMMER_FST_SETUP)
2006 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2007 if (ip->flush_state != HAMMER_FST_IDLE) {
2008 ip->flags |= HAMMER_INODE_FLUSHW;
2009 tsleep(&ip->flags, 0, "hmrwin", 0);
2016 * Called by the backend code when a flush has been completed.
2017 * The inode has already been removed from the flush list.
2019 * A pipelined flush can occur, in which case we must re-enter the
2020 * inode on the list and re-copy its fields.
2023 hammer_flush_inode_done(hammer_inode_t ip, int error)
2028 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
2033 * Merge left-over flags back into the frontend and fix the state.
2034 * Incomplete truncations are retained by the backend.
2037 ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED;
2038 ip->sync_flags &= HAMMER_INODE_TRUNCATED;
2041 * The backend may have adjusted nlinks, so if the adjusted nlinks
2042 * does not match the fronttend set the frontend's RDIRTY flag again.
2044 if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
2045 ip->flags |= HAMMER_INODE_DDIRTY;
2048 * Fix up the dirty buffer status.
2050 if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) {
2051 ip->flags |= HAMMER_INODE_BUFS;
2055 * Re-set the XDIRTY flag if some of the inode's in-memory records
2056 * could not be flushed.
2058 KKASSERT((RB_EMPTY(&ip->rec_tree) &&
2059 (ip->flags & HAMMER_INODE_XDIRTY) == 0) ||
2060 (!RB_EMPTY(&ip->rec_tree) &&
2061 (ip->flags & HAMMER_INODE_XDIRTY) != 0));
2064 * Do not lose track of inodes which no longer have vnode
2065 * assocations, otherwise they may never get flushed again.
2067 if ((ip->flags & HAMMER_INODE_MODMASK) && ip->vp == NULL)
2068 ip->flags |= HAMMER_INODE_REFLUSH;
2071 * Adjust the flush state.
2073 if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
2075 * We were unable to flush out all our records, leave the
2076 * inode in a flush state and in the current flush group.
2077 * The flush group will be re-run.
2079 * This occurs if the UNDO block gets too full or there is
2080 * too much dirty meta-data and allows the flusher to
2081 * finalize the UNDO block and then re-flush.
2083 ip->flags &= ~HAMMER_INODE_WOULDBLOCK;
2087 * Remove from the flush_group
2089 TAILQ_REMOVE(&ip->flush_group->flush_list, ip, flush_entry);
2090 ip->flush_group = NULL;
2093 * Clean up the vnode ref and tracking counts.
2095 if (ip->flags & HAMMER_INODE_VHELD) {
2096 ip->flags &= ~HAMMER_INODE_VHELD;
2099 --hmp->count_iqueued;
2100 --hammer_count_iqueued;
2103 * And adjust the state.
2105 if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
2106 ip->flush_state = HAMMER_FST_IDLE;
2109 ip->flush_state = HAMMER_FST_SETUP;
2114 * If the frontend is waiting for a flush to complete,
2117 if (ip->flags & HAMMER_INODE_FLUSHW) {
2118 ip->flags &= ~HAMMER_INODE_FLUSHW;
2123 * If the frontend made more changes and requested another
2124 * flush, then try to get it running.
2126 * Reflushes are aborted when the inode is errored out.
2128 if (ip->flags & HAMMER_INODE_REFLUSH) {
2129 ip->flags &= ~HAMMER_INODE_REFLUSH;
2130 if (ip->flags & HAMMER_INODE_RESIGNAL) {
2131 ip->flags &= ~HAMMER_INODE_RESIGNAL;
2132 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2134 hammer_flush_inode(ip, 0);
2140 * If we have no parent dependancies we can clear CONN_DOWN
2142 if (TAILQ_EMPTY(&ip->target_list))
2143 ip->flags &= ~HAMMER_INODE_CONN_DOWN;
2146 * If the inode is now clean drop the space reservation.
2148 if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
2149 (ip->flags & HAMMER_INODE_RSV_INODES)) {
2150 ip->flags &= ~HAMMER_INODE_RSV_INODES;
2155 hammer_rel_inode(ip, 0);
2159 * Called from hammer_sync_inode() to synchronize in-memory records
2163 hammer_sync_record_callback(hammer_record_t record, void *data)
2165 hammer_cursor_t cursor = data;
2166 hammer_transaction_t trans = cursor->trans;
2167 hammer_mount_t hmp = trans->hmp;
2171 * Skip records that do not belong to the current flush.
2173 ++hammer_stats_record_iterations;
2174 if (record->flush_state != HAMMER_FST_FLUSH)
2178 if (record->flush_group != record->ip->flush_group) {
2179 kprintf("sync_record %p ip %p bad flush group %p %p\n", record, record->ip, record->flush_group ,record->ip->flush_group);
2184 KKASSERT(record->flush_group == record->ip->flush_group);
2187 * Interlock the record using the BE flag. Once BE is set the
2188 * frontend cannot change the state of FE.
2190 * NOTE: If FE is set prior to us setting BE we still sync the
2191 * record out, but the flush completion code converts it to
2192 * a delete-on-disk record instead of destroying it.
2194 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
2195 record->flags |= HAMMER_RECF_INTERLOCK_BE;
2198 * The backend may have already disposed of the record.
2200 if (record->flags & HAMMER_RECF_DELETED_BE) {
2206 * If the whole inode is being deleting all on-disk records will
2207 * be deleted very soon, we can't sync any new records to disk
2208 * because they will be deleted in the same transaction they were
2209 * created in (delete_tid == create_tid), which will assert.
2211 * XXX There may be a case with RECORD_ADD with DELETED_FE set
2212 * that we currently panic on.
2214 if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
2215 switch(record->type) {
2216 case HAMMER_MEM_RECORD_DATA:
2218 * We don't have to do anything, if the record was
2219 * committed the space will have been accounted for
2223 case HAMMER_MEM_RECORD_GENERAL:
2224 record->flags |= HAMMER_RECF_DELETED_FE;
2225 record->flags |= HAMMER_RECF_DELETED_BE;
2228 case HAMMER_MEM_RECORD_ADD:
2229 panic("hammer_sync_record_callback: illegal add "
2230 "during inode deletion record %p", record);
2231 break; /* NOT REACHED */
2232 case HAMMER_MEM_RECORD_INODE:
2233 panic("hammer_sync_record_callback: attempt to "
2234 "sync inode record %p?", record);
2235 break; /* NOT REACHED */
2236 case HAMMER_MEM_RECORD_DEL:
2238 * Follow through and issue the on-disk deletion
2245 * If DELETED_FE is set special handling is needed for directory
2246 * entries. Dependant pieces related to the directory entry may
2247 * have already been synced to disk. If this occurs we have to
2248 * sync the directory entry and then change the in-memory record
2249 * from an ADD to a DELETE to cover the fact that it's been
2250 * deleted by the frontend.
2252 * A directory delete covering record (MEM_RECORD_DEL) can never
2253 * be deleted by the frontend.
2255 * Any other record type (aka DATA) can be deleted by the frontend.
2256 * XXX At the moment the flusher must skip it because there may
2257 * be another data record in the flush group for the same block,
2258 * meaning that some frontend data changes can leak into the backend's
2259 * synchronization point.
2261 if (record->flags & HAMMER_RECF_DELETED_FE) {
2262 if (record->type == HAMMER_MEM_RECORD_ADD) {
2263 record->flags |= HAMMER_RECF_CONVERT_DELETE;
2265 KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
2266 record->flags |= HAMMER_RECF_DELETED_BE;
2273 * Assign the create_tid for new records. Deletions already
2274 * have the record's entire key properly set up.
2276 if (record->type != HAMMER_MEM_RECORD_DEL)
2277 record->leaf.base.create_tid = trans->tid;
2278 record->leaf.create_ts = trans->time32;
2280 error = hammer_ip_sync_record_cursor(cursor, record);
2281 if (error != EDEADLK)
2283 hammer_done_cursor(cursor);
2284 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
2289 record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
2294 hammer_flush_record_done(record, error);
2297 * Do partial finalization if we have built up too many dirty
2298 * buffers. Otherwise a buffer cache deadlock can occur when
2299 * doing things like creating tens of thousands of tiny files.
2301 * We must release our cursor lock to avoid a 3-way deadlock
2302 * due to the exclusive sync lock the finalizer must get.
2304 if (hammer_flusher_meta_limit(hmp)) {
2305 hammer_unlock_cursor(cursor, 0);
2306 hammer_flusher_finalize(trans, 0);
2307 hammer_lock_cursor(cursor, 0);
2314 * Backend function called by the flusher to sync an inode to media.
2317 hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip)
2319 struct hammer_cursor cursor;
2320 hammer_node_t tmp_node;
2321 hammer_record_t depend;
2322 hammer_record_t next;
2323 int error, tmp_error;
2326 if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
2329 error = hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
2334 * Any directory records referencing this inode which are not in
2335 * our current flush group must adjust our nlink count for the
2336 * purposes of synchronization to disk.
2338 * Records which are in our flush group can be unlinked from our
2339 * inode now, potentially allowing the inode to be physically
2342 * This cannot block.
2344 nlinks = ip->ino_data.nlinks;
2345 next = TAILQ_FIRST(&ip->target_list);
2346 while ((depend = next) != NULL) {
2347 next = TAILQ_NEXT(depend, target_entry);
2348 if (depend->flush_state == HAMMER_FST_FLUSH &&
2349 depend->flush_group == ip->flush_group) {
2351 * If this is an ADD that was deleted by the frontend
2352 * the frontend nlinks count will have already been
2353 * decremented, but the backend is going to sync its
2354 * directory entry and must account for it. The
2355 * record will be converted to a delete-on-disk when
2358 * If the ADD was not deleted by the frontend we
2359 * can remove the dependancy from our target_list.
2361 if (depend->flags & HAMMER_RECF_DELETED_FE) {
2364 TAILQ_REMOVE(&ip->target_list, depend,
2366 depend->target_ip = NULL;
2368 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
2370 * Not part of our flush group
2372 KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
2373 switch(depend->type) {
2374 case HAMMER_MEM_RECORD_ADD:
2377 case HAMMER_MEM_RECORD_DEL:
2387 * Set dirty if we had to modify the link count.
2389 if (ip->sync_ino_data.nlinks != nlinks) {
2390 KKASSERT((int64_t)nlinks >= 0);
2391 ip->sync_ino_data.nlinks = nlinks;
2392 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2396 * If there is a trunction queued destroy any data past the (aligned)
2397 * truncation point. Userland will have dealt with the buffer
2398 * containing the truncation point for us.
2400 * We don't flush pending frontend data buffers until after we've
2401 * dealt with the truncation.
2403 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2405 * Interlock trunc_off. The VOP front-end may continue to
2406 * make adjustments to it while we are blocked.
2409 off_t aligned_trunc_off;
2412 trunc_off = ip->sync_trunc_off;
2413 blkmask = hammer_blocksize(trunc_off) - 1;
2414 aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask;
2417 * Delete any whole blocks on-media. The front-end has
2418 * already cleaned out any partial block and made it
2419 * pending. The front-end may have updated trunc_off
2420 * while we were blocked so we only use sync_trunc_off.
2422 * This operation can blow out the buffer cache, EWOULDBLOCK
2423 * means we were unable to complete the deletion. The
2424 * deletion will update sync_trunc_off in that case.
2426 error = hammer_ip_delete_range(&cursor, ip,
2428 0x7FFFFFFFFFFFFFFFLL, 2);
2429 if (error == EWOULDBLOCK) {
2430 ip->flags |= HAMMER_INODE_WOULDBLOCK;
2432 goto defer_buffer_flush;
2439 * Clear the truncation flag on the backend after we have
2440 * complete the deletions. Backend data is now good again
2441 * (including new records we are about to sync, below).
2443 * Leave sync_trunc_off intact. As we write additional
2444 * records the backend will update sync_trunc_off. This
2445 * tells the backend whether it can skip the overwrite
2446 * test. This should work properly even when the backend
2447 * writes full blocks where the truncation point straddles
2448 * the block because the comparison is against the base
2449 * offset of the record.
2451 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2452 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
2458 * Now sync related records. These will typically be directory
2459 * entries, records tracking direct-writes, or delete-on-disk records.
2462 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2463 hammer_sync_record_callback, &cursor);
2469 hammer_cache_node(&ip->cache[1], cursor.node);
2472 * Re-seek for inode update, assuming our cache hasn't been ripped
2473 * out from under us.
2476 tmp_node = hammer_ref_node_safe(ip->hmp, &ip->cache[0], &error);
2478 hammer_cursor_downgrade(&cursor);
2479 hammer_lock_sh(&tmp_node->lock);
2480 if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0)
2481 hammer_cursor_seek(&cursor, tmp_node, 0);
2482 hammer_unlock(&tmp_node->lock);
2483 hammer_rel_node(tmp_node);
2489 * If we are deleting the inode the frontend had better not have
2490 * any active references on elements making up the inode.
2492 * The call to hammer_ip_delete_clean() cleans up auxillary records
2493 * but not DB or DATA records. Those must have already been deleted
2494 * by the normal truncation mechanic.
2496 if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
2497 RB_EMPTY(&ip->rec_tree) &&
2498 (ip->sync_flags & HAMMER_INODE_DELETING) &&
2499 (ip->flags & HAMMER_INODE_DELETED) == 0) {
2502 error = hammer_ip_delete_clean(&cursor, ip, &count1);
2504 ip->flags |= HAMMER_INODE_DELETED;
2505 ip->sync_flags &= ~HAMMER_INODE_DELETING;
2506 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2507 KKASSERT(RB_EMPTY(&ip->rec_tree));
2510 * Set delete_tid in both the frontend and backend
2511 * copy of the inode record. The DELETED flag handles
2512 * this, do not set RDIRTY.
2514 ip->ino_leaf.base.delete_tid = trans->tid;
2515 ip->sync_ino_leaf.base.delete_tid = trans->tid;
2516 ip->ino_leaf.delete_ts = trans->time32;
2517 ip->sync_ino_leaf.delete_ts = trans->time32;
2521 * Adjust the inode count in the volume header
2523 hammer_sync_lock_sh(trans);
2524 if (ip->flags & HAMMER_INODE_ONDISK) {
2525 hammer_modify_volume_field(trans,
2528 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
2529 hammer_modify_volume_done(trans->rootvol);
2531 hammer_sync_unlock(trans);
2537 ip->sync_flags &= ~HAMMER_INODE_BUFS;
2541 * Now update the inode's on-disk inode-data and/or on-disk record.
2542 * DELETED and ONDISK are managed only in ip->flags.
2544 * In the case of a defered buffer flush we still update the on-disk
2545 * inode to satisfy visibility requirements if there happen to be
2546 * directory dependancies.
2548 switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
2549 case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
2551 * If deleted and on-disk, don't set any additional flags.
2552 * the delete flag takes care of things.
2554 * Clear flags which may have been set by the frontend.
2556 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2557 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2558 HAMMER_INODE_DELETING);
2560 case HAMMER_INODE_DELETED:
2562 * Take care of the case where a deleted inode was never
2563 * flushed to the disk in the first place.
2565 * Clear flags which may have been set by the frontend.
2567 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2568 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2569 HAMMER_INODE_DELETING);
2570 while (RB_ROOT(&ip->rec_tree)) {
2571 hammer_record_t record = RB_ROOT(&ip->rec_tree);
2572 hammer_ref(&record->lock);
2573 KKASSERT(record->lock.refs == 1);
2574 record->flags |= HAMMER_RECF_DELETED_FE;
2575 record->flags |= HAMMER_RECF_DELETED_BE;
2576 hammer_rel_mem_record(record);
2579 case HAMMER_INODE_ONDISK:
2581 * If already on-disk, do not set any additional flags.
2586 * If not on-disk and not deleted, set DDIRTY to force
2587 * an initial record to be written.
2589 * Also set the create_tid in both the frontend and backend
2590 * copy of the inode record.
2592 ip->ino_leaf.base.create_tid = trans->tid;
2593 ip->ino_leaf.create_ts = trans->time32;
2594 ip->sync_ino_leaf.base.create_tid = trans->tid;
2595 ip->sync_ino_leaf.create_ts = trans->time32;
2596 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2601 * If RDIRTY or DDIRTY is set, write out a new record. If the inode
2602 * is already on-disk the old record is marked as deleted.
2604 * If DELETED is set hammer_update_inode() will delete the existing
2605 * record without writing out a new one.
2607 * If *ONLY* the ITIMES flag is set we can update the record in-place.
2609 if (ip->flags & HAMMER_INODE_DELETED) {
2610 error = hammer_update_inode(&cursor, ip);
2612 if ((ip->sync_flags & HAMMER_INODE_DDIRTY) == 0 &&
2613 (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) {
2614 error = hammer_update_itimes(&cursor, ip);
2616 if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) {
2617 error = hammer_update_inode(&cursor, ip);
2621 hammer_critical_error(ip->hmp, ip, error,
2622 "while syncing inode");
2624 hammer_done_cursor(&cursor);
2629 * This routine is called when the OS is no longer actively referencing
2630 * the inode (but might still be keeping it cached), or when releasing
2631 * the last reference to an inode.
2633 * At this point if the inode's nlinks count is zero we want to destroy
2634 * it, which may mean destroying it on-media too.
2637 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
2642 * Set the DELETING flag when the link count drops to 0 and the
2643 * OS no longer has any opens on the inode.
2645 * The backend will clear DELETING (a mod flag) and set DELETED
2646 * (a state flag) when it is actually able to perform the
2649 if (ip->ino_data.nlinks == 0 &&
2650 (ip->flags & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
2651 ip->flags |= HAMMER_INODE_DELETING;
2652 ip->flags |= HAMMER_INODE_TRUNCATED;
2656 if (hammer_get_vnode(ip, &vp) != 0)
2664 vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
2665 vnode_pager_setsize(ip->vp, 0);
2674 * After potentially resolving a dependancy the inode is tested
2675 * to determine whether it needs to be reflushed.
2678 hammer_test_inode(hammer_inode_t ip)
2680 if (ip->flags & HAMMER_INODE_REFLUSH) {
2681 ip->flags &= ~HAMMER_INODE_REFLUSH;
2682 hammer_ref(&ip->lock);
2683 if (ip->flags & HAMMER_INODE_RESIGNAL) {
2684 ip->flags &= ~HAMMER_INODE_RESIGNAL;
2685 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2687 hammer_flush_inode(ip, 0);
2689 hammer_rel_inode(ip, 0);
2694 * Clear the RECLAIM flag on an inode. This occurs when the inode is
2695 * reassociated with a vp or just before it gets freed.
2697 * Wakeup one thread blocked waiting on reclaims to complete. Note that
2698 * the inode the thread is waiting on behalf of is a different inode then
2699 * the inode we are called with. This is to create a pipeline.
2702 hammer_inode_wakereclaims(hammer_inode_t ip)
2704 struct hammer_reclaim *reclaim;
2705 hammer_mount_t hmp = ip->hmp;
2707 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0)
2710 --hammer_count_reclaiming;
2711 --hmp->inode_reclaims;
2712 ip->flags &= ~HAMMER_INODE_RECLAIM;
2714 if ((reclaim = TAILQ_FIRST(&hmp->reclaim_list)) != NULL) {
2715 TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry);
2716 reclaim->okydoky = 1;
2722 * Setup our reclaim pipeline. We only let so many detached (and dirty)
2723 * inodes build up before we start blocking.
2725 * When we block we don't care *which* inode has finished reclaiming,
2726 * as lone as one does. This is somewhat heuristical... we also put a
2727 * cap on how long we are willing to wait.
2730 hammer_inode_waitreclaims(hammer_mount_t hmp)
2732 struct hammer_reclaim reclaim;
2735 if (hmp->inode_reclaims > HAMMER_RECLAIM_WAIT) {
2736 reclaim.okydoky = 0;
2737 TAILQ_INSERT_TAIL(&hmp->reclaim_list,
2740 reclaim.okydoky = 1;
2743 if (reclaim.okydoky == 0) {
2744 delay = (hmp->inode_reclaims - HAMMER_RECLAIM_WAIT) * hz /
2745 HAMMER_RECLAIM_WAIT;
2747 tsleep(&reclaim, 0, "hmrrcm", delay + 1);
2748 if (reclaim.okydoky == 0)
2749 TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry);