2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.100 2008/07/12 02:47:39 dillon Exp $
38 #include <vm/vm_extern.h>
42 static int hammer_unload_inode(struct hammer_inode *ip);
43 static void hammer_free_inode(hammer_inode_t ip);
44 static void hammer_flush_inode_core(hammer_inode_t ip, int flags);
45 static int hammer_setup_child_callback(hammer_record_t rec, void *data);
46 static int hammer_syncgrp_child_callback(hammer_record_t rec, void *data);
47 static int hammer_setup_parent_inodes(hammer_inode_t ip);
48 static int hammer_setup_parent_inodes_helper(hammer_record_t record);
49 static void hammer_inode_wakereclaims(hammer_inode_t ip);
52 extern struct hammer_inode *HammerTruncIp;
56 * RB-Tree support for inode structures
59 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
61 if (ip1->obj_localization < ip2->obj_localization)
63 if (ip1->obj_localization > ip2->obj_localization)
65 if (ip1->obj_id < ip2->obj_id)
67 if (ip1->obj_id > ip2->obj_id)
69 if (ip1->obj_asof < ip2->obj_asof)
71 if (ip1->obj_asof > ip2->obj_asof)
77 * RB-Tree support for inode structures / special LOOKUP_INFO
80 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
82 if (info->obj_localization < ip->obj_localization)
84 if (info->obj_localization > ip->obj_localization)
86 if (info->obj_id < ip->obj_id)
88 if (info->obj_id > ip->obj_id)
90 if (info->obj_asof < ip->obj_asof)
92 if (info->obj_asof > ip->obj_asof)
98 * Used by hammer_scan_inode_snapshots() to locate all of an object's
99 * snapshots. Note that the asof field is not tested, which we can get
100 * away with because it is the lowest-priority field.
103 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data)
105 hammer_inode_info_t info = data;
107 if (ip->obj_localization > info->obj_localization)
109 if (ip->obj_localization < info->obj_localization)
111 if (ip->obj_id > info->obj_id)
113 if (ip->obj_id < info->obj_id)
119 * Used by hammer_unload_pseudofs() to locate all inodes associated with
123 hammer_inode_pfs_cmp(hammer_inode_t ip, void *data)
125 u_int32_t localization = *(u_int32_t *)data;
126 if (ip->obj_localization > localization)
128 if (ip->obj_localization < localization)
134 * RB-Tree support for pseudofs structures
137 hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1, hammer_pseudofs_inmem_t p2)
139 if (p1->localization < p2->localization)
141 if (p1->localization > p2->localization)
147 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
148 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
149 hammer_inode_info_cmp, hammer_inode_info_t);
150 RB_GENERATE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node,
151 hammer_pfs_rb_compare, u_int32_t, localization);
154 * The kernel is not actively referencing this vnode but is still holding
157 * This is called from the frontend.
160 hammer_vop_inactive(struct vop_inactive_args *ap)
162 struct hammer_inode *ip = VTOI(ap->a_vp);
173 * If the inode no longer has visibility in the filesystem try to
174 * recycle it immediately, even if the inode is dirty. Recycling
175 * it quickly allows the system to reclaim buffer cache and VM
176 * resources which can matter a lot in a heavily loaded system.
178 * This can deadlock in vfsync() if we aren't careful.
180 * Do not queue the inode to the flusher if we still have visibility,
181 * otherwise namespace calls such as chmod will unnecessarily generate
182 * multiple inode updates.
184 hammer_inode_unloadable_check(ip, 0);
185 if (ip->ino_data.nlinks == 0) {
186 if (ip->flags & HAMMER_INODE_MODMASK)
187 hammer_flush_inode(ip, 0);
194 * Release the vnode association. This is typically (but not always)
195 * the last reference on the inode.
197 * Once the association is lost we are on our own with regards to
198 * flushing the inode.
201 hammer_vop_reclaim(struct vop_reclaim_args *ap)
203 struct hammer_inode *ip;
209 if ((ip = vp->v_data) != NULL) {
214 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) {
215 ++hammer_count_reclaiming;
216 ++hmp->inode_reclaims;
217 ip->flags |= HAMMER_INODE_RECLAIM;
218 if (hmp->inode_reclaims > HAMMER_RECLAIM_FLUSH &&
219 (hmp->inode_reclaims & 255) == 0) {
220 hammer_flusher_async(hmp);
223 hammer_rel_inode(ip, 1);
229 * Return a locked vnode for the specified inode. The inode must be
230 * referenced but NOT LOCKED on entry and will remain referenced on
233 * Called from the frontend.
236 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
246 if ((vp = ip->vp) == NULL) {
247 error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0);
250 hammer_lock_ex(&ip->lock);
251 if (ip->vp != NULL) {
252 hammer_unlock(&ip->lock);
257 hammer_ref(&ip->lock);
261 obj_type = ip->ino_data.obj_type;
262 vp->v_type = hammer_get_vnode_type(obj_type);
264 hammer_inode_wakereclaims(ip);
266 switch(ip->ino_data.obj_type) {
267 case HAMMER_OBJTYPE_CDEV:
268 case HAMMER_OBJTYPE_BDEV:
269 vp->v_ops = &hmp->mp->mnt_vn_spec_ops;
270 addaliasu(vp, ip->ino_data.rmajor,
271 ip->ino_data.rminor);
273 case HAMMER_OBJTYPE_FIFO:
274 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops;
281 * Only mark as the root vnode if the ip is not
282 * historical, otherwise the VFS cache will get
283 * confused. The other half of the special handling
284 * is in hammer_vop_nlookupdotdot().
286 * Pseudo-filesystem roots also do not count.
288 if (ip->obj_id == HAMMER_OBJID_ROOT &&
289 ip->obj_asof == hmp->asof &&
290 ip->obj_localization == 0) {
294 vp->v_data = (void *)ip;
295 /* vnode locked by getnewvnode() */
296 /* make related vnode dirty if inode dirty? */
297 hammer_unlock(&ip->lock);
298 if (vp->v_type == VREG)
299 vinitvmio(vp, ip->ino_data.size);
304 * loop if the vget fails (aka races), or if the vp
305 * no longer matches ip->vp.
307 if (vget(vp, LK_EXCLUSIVE) == 0) {
318 * Locate all copies of the inode for obj_id compatible with the specified
319 * asof, reference, and issue the related call-back. This routine is used
320 * for direct-io invalidation and does not create any new inodes.
323 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo,
324 int (*callback)(hammer_inode_t ip, void *data),
327 hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root,
328 hammer_inode_info_cmp_all_history,
333 * Acquire a HAMMER inode. The returned inode is not locked. These functions
334 * do not attach or detach the related vnode (use hammer_get_vnode() for
337 * The flags argument is only applied for newly created inodes, and only
338 * certain flags are inherited.
340 * Called from the frontend.
342 struct hammer_inode *
343 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip,
344 int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
345 int flags, int *errorp)
347 hammer_mount_t hmp = trans->hmp;
348 struct hammer_inode_info iinfo;
349 struct hammer_cursor cursor;
350 struct hammer_inode *ip;
354 * Determine if we already have an inode cached. If we do then
357 iinfo.obj_id = obj_id;
358 iinfo.obj_asof = asof;
359 iinfo.obj_localization = localization;
361 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
363 hammer_ref(&ip->lock);
369 * Allocate a new inode structure and deal with races later.
371 ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
372 ++hammer_count_inodes;
375 ip->obj_asof = iinfo.obj_asof;
376 ip->obj_localization = localization;
378 ip->flags = flags & HAMMER_INODE_RO;
379 ip->cache[0].ip = ip;
380 ip->cache[1].ip = ip;
382 ip->flags |= HAMMER_INODE_RO;
383 ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
384 0x7FFFFFFFFFFFFFFFLL;
385 RB_INIT(&ip->rec_tree);
386 TAILQ_INIT(&ip->target_list);
387 hammer_ref(&ip->lock);
390 * Locate the on-disk inode. If this is a PFS root we always
391 * access the current version of the root inode and (if it is not
392 * a master) always access information under it with a snapshot
396 hammer_init_cursor(trans, &cursor, (dip ? &dip->cache[0] : NULL), NULL);
397 cursor.key_beg.localization = localization + HAMMER_LOCALIZE_INODE;
398 cursor.key_beg.obj_id = ip->obj_id;
399 cursor.key_beg.key = 0;
400 cursor.key_beg.create_tid = 0;
401 cursor.key_beg.delete_tid = 0;
402 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
403 cursor.key_beg.obj_type = 0;
405 cursor.asof = iinfo.obj_asof;
406 cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
409 *errorp = hammer_btree_lookup(&cursor);
410 if (*errorp == EDEADLK) {
411 hammer_done_cursor(&cursor);
416 * On success the B-Tree lookup will hold the appropriate
417 * buffer cache buffers and provide a pointer to the requested
418 * information. Copy the information to the in-memory inode
419 * and cache the B-Tree node to improve future operations.
422 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
423 ip->ino_data = cursor.data->inode;
426 * cache[0] tries to cache the location of the object inode.
427 * The assumption is that it is near the directory inode.
429 * cache[1] tries to cache the location of the object data.
430 * The assumption is that it is near the directory data.
432 hammer_cache_node(&ip->cache[0], cursor.node);
433 if (dip && dip->cache[1].node)
434 hammer_cache_node(&ip->cache[1], dip->cache[1].node);
437 * The file should not contain any data past the file size
438 * stored in the inode. Setting save_trunc_off to the
439 * file size instead of max reduces B-Tree lookup overheads
440 * on append by allowing the flusher to avoid checking for
443 ip->save_trunc_off = ip->ino_data.size;
446 * Locate and assign the pseudofs management structure to
449 if (dip && dip->obj_localization == ip->obj_localization) {
450 ip->pfsm = dip->pfsm;
451 hammer_ref(&ip->pfsm->lock);
453 ip->pfsm = hammer_load_pseudofs(trans,
454 ip->obj_localization,
456 *errorp = 0; /* ignore ENOENT */
461 * The inode is placed on the red-black tree and will be synced to
462 * the media when flushed or by the filesystem sync. If this races
463 * another instantiation/lookup the insertion will fail.
466 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
467 hammer_free_inode(ip);
468 hammer_done_cursor(&cursor);
471 ip->flags |= HAMMER_INODE_ONDISK;
473 if (ip->flags & HAMMER_INODE_RSV_INODES) {
474 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
478 hammer_free_inode(ip);
481 hammer_done_cursor(&cursor);
486 * Create a new filesystem object, returning the inode in *ipp. The
487 * returned inode will be referenced. The inode is created in-memory.
489 * If pfsm is non-NULL the caller wishes to create the root inode for
493 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
494 struct ucred *cred, hammer_inode_t dip,
495 hammer_pseudofs_inmem_t pfsm, struct hammer_inode **ipp)
504 ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
505 ++hammer_count_inodes;
509 KKASSERT(pfsm->localization != 0);
510 ip->obj_id = HAMMER_OBJID_ROOT;
511 ip->obj_localization = pfsm->localization;
513 KKASSERT(dip != NULL);
514 ip->obj_id = hammer_alloc_objid(hmp, dip);
515 ip->obj_localization = dip->obj_localization;
518 KKASSERT(ip->obj_id != 0);
519 ip->obj_asof = hmp->asof;
521 ip->flush_state = HAMMER_FST_IDLE;
522 ip->flags = HAMMER_INODE_DDIRTY |
523 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME;
524 ip->cache[0].ip = ip;
525 ip->cache[1].ip = ip;
527 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
528 /* ip->save_trunc_off = 0; (already zero) */
529 RB_INIT(&ip->rec_tree);
530 TAILQ_INIT(&ip->target_list);
532 ip->ino_data.atime = trans->time;
533 ip->ino_data.mtime = trans->time;
534 ip->ino_data.size = 0;
535 ip->ino_data.nlinks = 0;
538 * A nohistory designator on the parent directory is inherited by
539 * the child. We will do this even for pseudo-fs creation... the
540 * sysad can turn it off.
543 ip->ino_data.uflags = dip->ino_data.uflags &
544 (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
547 ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
548 ip->ino_leaf.base.localization = ip->obj_localization +
549 HAMMER_LOCALIZE_INODE;
550 ip->ino_leaf.base.obj_id = ip->obj_id;
551 ip->ino_leaf.base.key = 0;
552 ip->ino_leaf.base.create_tid = 0;
553 ip->ino_leaf.base.delete_tid = 0;
554 ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
555 ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
557 ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
558 ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
559 ip->ino_data.mode = vap->va_mode;
560 ip->ino_data.ctime = trans->time;
563 * Setup the ".." pointer. This only needs to be done for directories
564 * but we do it for all objects as a recovery aid.
567 ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id;
570 * The parent_obj_localization field only applies to pseudo-fs roots.
571 * XXX this is no longer applicable, PFSs are no longer directly
572 * tied into the parent's directory structure.
574 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY &&
575 ip->obj_id == HAMMER_OBJID_ROOT) {
576 ip->ino_data.ext.obj.parent_obj_localization =
577 dip->obj_localization;
581 switch(ip->ino_leaf.base.obj_type) {
582 case HAMMER_OBJTYPE_CDEV:
583 case HAMMER_OBJTYPE_BDEV:
584 ip->ino_data.rmajor = vap->va_rmajor;
585 ip->ino_data.rminor = vap->va_rminor;
592 * Calculate default uid/gid and overwrite with information from
596 xuid = hammer_to_unix_xid(&dip->ino_data.uid);
597 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode,
598 xuid, cred, &vap->va_mode);
602 ip->ino_data.mode = vap->va_mode;
604 if (vap->va_vaflags & VA_UID_UUID_VALID)
605 ip->ino_data.uid = vap->va_uid_uuid;
606 else if (vap->va_uid != (uid_t)VNOVAL)
607 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
609 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
611 if (vap->va_vaflags & VA_GID_UUID_VALID)
612 ip->ino_data.gid = vap->va_gid_uuid;
613 else if (vap->va_gid != (gid_t)VNOVAL)
614 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
616 ip->ino_data.gid = dip->ino_data.gid;
618 hammer_ref(&ip->lock);
622 hammer_ref(&pfsm->lock);
624 } else if (dip->obj_localization == ip->obj_localization) {
625 ip->pfsm = dip->pfsm;
626 hammer_ref(&ip->pfsm->lock);
629 ip->pfsm = hammer_load_pseudofs(trans,
630 ip->obj_localization,
632 error = 0; /* ignore ENOENT */
636 hammer_free_inode(ip);
638 } else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
639 panic("hammer_create_inode: duplicate obj_id %llx", ip->obj_id);
641 hammer_free_inode(ip);
648 * Final cleanup / freeing of an inode structure
651 hammer_free_inode(hammer_inode_t ip)
653 KKASSERT(ip->lock.refs == 1);
654 hammer_uncache_node(&ip->cache[0]);
655 hammer_uncache_node(&ip->cache[1]);
656 hammer_inode_wakereclaims(ip);
658 hammer_clear_objid(ip);
659 --hammer_count_inodes;
660 --ip->hmp->count_inodes;
662 hammer_rel_pseudofs(ip->hmp, ip->pfsm);
670 * Retrieve pseudo-fs data. NULL will never be returned.
672 * If an error occurs *errorp will be set and a default template is returned,
673 * otherwise *errorp is set to 0. Typically when an error occurs it will
676 hammer_pseudofs_inmem_t
677 hammer_load_pseudofs(hammer_transaction_t trans,
678 u_int32_t localization, int *errorp)
680 hammer_mount_t hmp = trans->hmp;
682 hammer_pseudofs_inmem_t pfsm;
683 struct hammer_cursor cursor;
687 pfsm = RB_LOOKUP(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, localization);
689 hammer_ref(&pfsm->lock);
695 * PFS records are stored in the root inode (not the PFS root inode,
696 * but the real root). Avoid an infinite recursion if loading
697 * the PFS for the real root.
700 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT,
702 HAMMER_DEF_LOCALIZATION, 0, errorp);
707 pfsm = kmalloc(sizeof(*pfsm), M_HAMMER, M_WAITOK | M_ZERO);
708 pfsm->localization = localization;
709 pfsm->pfsd.unique_uuid = trans->rootvol->ondisk->vol_fsid;
710 pfsm->pfsd.shared_uuid = pfsm->pfsd.unique_uuid;
712 hammer_init_cursor(trans, &cursor, (ip ? &ip->cache[1] : NULL), ip);
713 cursor.key_beg.localization = HAMMER_DEF_LOCALIZATION +
714 HAMMER_LOCALIZE_MISC;
715 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
716 cursor.key_beg.create_tid = 0;
717 cursor.key_beg.delete_tid = 0;
718 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
719 cursor.key_beg.obj_type = 0;
720 cursor.key_beg.key = localization;
721 cursor.asof = HAMMER_MAX_TID;
722 cursor.flags |= HAMMER_CURSOR_ASOF;
725 *errorp = hammer_ip_lookup(&cursor);
727 *errorp = hammer_btree_lookup(&cursor);
729 *errorp = hammer_ip_resolve_data(&cursor);
731 if (cursor.data->pfsd.mirror_flags &
732 HAMMER_PFSD_DELETED) {
735 bytes = cursor.leaf->data_len;
736 if (bytes > sizeof(pfsm->pfsd))
737 bytes = sizeof(pfsm->pfsd);
738 bcopy(cursor.data, &pfsm->pfsd, bytes);
742 hammer_done_cursor(&cursor);
744 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
745 hammer_ref(&pfsm->lock);
747 hammer_rel_inode(ip, 0);
748 if (RB_INSERT(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm)) {
749 kfree(pfsm, M_HAMMER);
756 * Store pseudo-fs data. The backend will automatically delete any prior
757 * on-disk pseudo-fs data but we have to delete in-memory versions.
760 hammer_save_pseudofs(hammer_transaction_t trans, hammer_pseudofs_inmem_t pfsm)
762 struct hammer_cursor cursor;
763 hammer_record_t record;
767 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
768 HAMMER_DEF_LOCALIZATION, 0, &error);
770 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
771 hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
772 cursor.key_beg.localization = ip->obj_localization +
773 HAMMER_LOCALIZE_MISC;
774 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
775 cursor.key_beg.create_tid = 0;
776 cursor.key_beg.delete_tid = 0;
777 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
778 cursor.key_beg.obj_type = 0;
779 cursor.key_beg.key = pfsm->localization;
780 cursor.asof = HAMMER_MAX_TID;
781 cursor.flags |= HAMMER_CURSOR_ASOF;
783 error = hammer_ip_lookup(&cursor);
784 if (error == 0 && hammer_cursor_inmem(&cursor)) {
785 record = cursor.iprec;
786 if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
787 KKASSERT(cursor.deadlk_rec == NULL);
788 hammer_ref(&record->lock);
789 cursor.deadlk_rec = record;
792 record->flags |= HAMMER_RECF_DELETED_FE;
796 if (error == 0 || error == ENOENT) {
797 record = hammer_alloc_mem_record(ip, sizeof(pfsm->pfsd));
798 record->type = HAMMER_MEM_RECORD_GENERAL;
800 record->leaf.base.localization = ip->obj_localization +
801 HAMMER_LOCALIZE_MISC;
802 record->leaf.base.rec_type = HAMMER_RECTYPE_PFS;
803 record->leaf.base.key = pfsm->localization;
804 record->leaf.data_len = sizeof(pfsm->pfsd);
805 bcopy(&pfsm->pfsd, record->data, sizeof(pfsm->pfsd));
806 error = hammer_ip_add_record(trans, record);
808 hammer_done_cursor(&cursor);
809 if (error == EDEADLK)
811 hammer_rel_inode(ip, 0);
816 * Create a root directory for a PFS if one does not alredy exist.
818 * The PFS root stands alone so we must also bump the nlinks count
819 * to prevent it from being destroyed on release.
822 hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred,
823 hammer_pseudofs_inmem_t pfsm)
829 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
830 pfsm->localization, 0, &error);
835 error = hammer_create_inode(trans, &vap, cred, NULL, pfsm, &ip);
837 ++ip->ino_data.nlinks;
838 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
842 hammer_rel_inode(ip, 0);
847 * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY
848 * if we are unable to disassociate all the inodes.
852 hammer_unload_pseudofs_callback(hammer_inode_t ip, void *data)
856 hammer_ref(&ip->lock);
857 if (ip->lock.refs == 2 && ip->vp)
858 vclean_unlocked(ip->vp);
859 if (ip->lock.refs == 1 && ip->vp == NULL)
862 res = -1; /* stop, someone is using the inode */
863 hammer_rel_inode(ip, 0);
868 hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization)
873 for (try = res = 0; try < 4; ++try) {
874 res = hammer_ino_rb_tree_RB_SCAN(&trans->hmp->rb_inos_root,
875 hammer_inode_pfs_cmp,
876 hammer_unload_pseudofs_callback,
878 if (res == 0 && try > 1)
880 hammer_flusher_sync(trans->hmp);
889 * Release a reference on a PFS
892 hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm)
894 hammer_unref(&pfsm->lock);
895 if (pfsm->lock.refs == 0) {
896 RB_REMOVE(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm);
897 kfree(pfsm, M_HAMMER);
902 * Called by hammer_sync_inode().
905 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
907 hammer_transaction_t trans = cursor->trans;
908 hammer_record_t record;
916 * If the inode has a presence on-disk then locate it and mark
917 * it deleted, setting DELONDISK.
919 * The record may or may not be physically deleted, depending on
920 * the retention policy.
922 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
923 HAMMER_INODE_ONDISK) {
924 hammer_normalize_cursor(cursor);
925 cursor->key_beg.localization = ip->obj_localization +
926 HAMMER_LOCALIZE_INODE;
927 cursor->key_beg.obj_id = ip->obj_id;
928 cursor->key_beg.key = 0;
929 cursor->key_beg.create_tid = 0;
930 cursor->key_beg.delete_tid = 0;
931 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
932 cursor->key_beg.obj_type = 0;
933 cursor->asof = ip->obj_asof;
934 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
935 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
936 cursor->flags |= HAMMER_CURSOR_BACKEND;
938 error = hammer_btree_lookup(cursor);
939 if (hammer_debug_inode)
940 kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
942 kprintf("error %d\n", error);
943 Debugger("hammer_update_inode");
947 error = hammer_ip_delete_record(cursor, ip, trans->tid);
948 if (hammer_debug_inode)
949 kprintf(" error %d\n", error);
950 if (error && error != EDEADLK) {
951 kprintf("error %d\n", error);
952 Debugger("hammer_update_inode2");
955 ip->flags |= HAMMER_INODE_DELONDISK;
958 hammer_cache_node(&ip->cache[0], cursor->node);
960 if (error == EDEADLK) {
961 hammer_done_cursor(cursor);
962 error = hammer_init_cursor(trans, cursor,
964 if (hammer_debug_inode)
965 kprintf("IPDED %p %d\n", ip, error);
972 * Ok, write out the initial record or a new record (after deleting
973 * the old one), unless the DELETED flag is set. This routine will
974 * clear DELONDISK if it writes out a record.
976 * Update our inode statistics if this is the first application of
979 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
981 * Generate a record and write it to the media
983 record = hammer_alloc_mem_record(ip, 0);
984 record->type = HAMMER_MEM_RECORD_INODE;
985 record->flush_state = HAMMER_FST_FLUSH;
986 record->leaf = ip->sync_ino_leaf;
987 record->leaf.base.create_tid = trans->tid;
988 record->leaf.data_len = sizeof(ip->sync_ino_data);
989 record->leaf.create_ts = trans->time32;
990 record->data = (void *)&ip->sync_ino_data;
991 record->flags |= HAMMER_RECF_INTERLOCK_BE;
994 * If this flag is set we cannot sync the new file size
995 * because we haven't finished related truncations. The
996 * inode will be flushed in another flush group to finish
999 if ((ip->flags & HAMMER_INODE_WOULDBLOCK) &&
1000 ip->sync_ino_data.size != ip->ino_data.size) {
1002 ip->sync_ino_data.size = ip->ino_data.size;
1008 error = hammer_ip_sync_record_cursor(cursor, record);
1009 if (hammer_debug_inode)
1010 kprintf("GENREC %p rec %08x %d\n",
1011 ip, record->flags, error);
1012 if (error != EDEADLK)
1014 hammer_done_cursor(cursor);
1015 error = hammer_init_cursor(trans, cursor,
1017 if (hammer_debug_inode)
1018 kprintf("GENREC reinit %d\n", error);
1023 kprintf("error %d\n", error);
1024 Debugger("hammer_update_inode3");
1028 * The record isn't managed by the inode's record tree,
1029 * destroy it whether we succeed or fail.
1031 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
1032 record->flags |= HAMMER_RECF_DELETED_FE;
1033 record->flush_state = HAMMER_FST_IDLE;
1034 hammer_rel_mem_record(record);
1040 if (hammer_debug_inode)
1041 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
1042 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1043 HAMMER_INODE_ATIME |
1044 HAMMER_INODE_MTIME);
1045 ip->flags &= ~HAMMER_INODE_DELONDISK;
1047 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1050 * Root volume count of inodes
1052 hammer_sync_lock_sh(trans);
1053 if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
1054 hammer_modify_volume_field(trans,
1057 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1058 hammer_modify_volume_done(trans->rootvol);
1059 ip->flags |= HAMMER_INODE_ONDISK;
1060 if (hammer_debug_inode)
1061 kprintf("NOWONDISK %p\n", ip);
1063 hammer_sync_unlock(trans);
1068 * If the inode has been destroyed, clean out any left-over flags
1069 * that may have been set by the frontend.
1071 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) {
1072 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
1073 HAMMER_INODE_ATIME |
1074 HAMMER_INODE_MTIME);
1080 * Update only the itimes fields.
1082 * ATIME can be updated without generating any UNDO. MTIME is updated
1083 * with UNDO so it is guaranteed to be synchronized properly in case of
1086 * Neither field is included in the B-Tree leaf element's CRC, which is how
1087 * we can get away with updating ATIME the way we do.
1090 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
1092 hammer_transaction_t trans = cursor->trans;
1096 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) !=
1097 HAMMER_INODE_ONDISK) {
1101 hammer_normalize_cursor(cursor);
1102 cursor->key_beg.localization = ip->obj_localization +
1103 HAMMER_LOCALIZE_INODE;
1104 cursor->key_beg.obj_id = ip->obj_id;
1105 cursor->key_beg.key = 0;
1106 cursor->key_beg.create_tid = 0;
1107 cursor->key_beg.delete_tid = 0;
1108 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1109 cursor->key_beg.obj_type = 0;
1110 cursor->asof = ip->obj_asof;
1111 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1112 cursor->flags |= HAMMER_CURSOR_ASOF;
1113 cursor->flags |= HAMMER_CURSOR_GET_LEAF;
1114 cursor->flags |= HAMMER_CURSOR_GET_DATA;
1115 cursor->flags |= HAMMER_CURSOR_BACKEND;
1117 error = hammer_btree_lookup(cursor);
1119 kprintf("error %d\n", error);
1120 Debugger("hammer_update_itimes1");
1123 hammer_cache_node(&ip->cache[0], cursor->node);
1124 if (ip->sync_flags & HAMMER_INODE_MTIME) {
1126 * Updating MTIME requires an UNDO. Just cover
1127 * both atime and mtime.
1129 hammer_sync_lock_sh(trans);
1130 hammer_modify_buffer(trans, cursor->data_buffer,
1131 HAMMER_ITIMES_BASE(&cursor->data->inode),
1132 HAMMER_ITIMES_BYTES);
1133 cursor->data->inode.atime = ip->sync_ino_data.atime;
1134 cursor->data->inode.mtime = ip->sync_ino_data.mtime;
1135 hammer_modify_buffer_done(cursor->data_buffer);
1136 hammer_sync_unlock(trans);
1137 } else if (ip->sync_flags & HAMMER_INODE_ATIME) {
1139 * Updating atime only can be done in-place with
1142 hammer_sync_lock_sh(trans);
1143 hammer_modify_buffer(trans, cursor->data_buffer,
1145 cursor->data->inode.atime = ip->sync_ino_data.atime;
1146 hammer_modify_buffer_done(cursor->data_buffer);
1147 hammer_sync_unlock(trans);
1149 ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME);
1151 if (error == EDEADLK) {
1152 hammer_done_cursor(cursor);
1153 error = hammer_init_cursor(trans, cursor,
1162 * Release a reference on an inode, flush as requested.
1164 * On the last reference we queue the inode to the flusher for its final
1168 hammer_rel_inode(struct hammer_inode *ip, int flush)
1170 hammer_mount_t hmp = ip->hmp;
1173 * Handle disposition when dropping the last ref.
1176 if (ip->lock.refs == 1) {
1178 * Determine whether on-disk action is needed for
1179 * the inode's final disposition.
1181 KKASSERT(ip->vp == NULL);
1182 hammer_inode_unloadable_check(ip, 0);
1183 if (ip->flags & HAMMER_INODE_MODMASK) {
1184 if (hmp->rsv_inodes > desiredvnodes) {
1185 hammer_flush_inode(ip,
1186 HAMMER_FLUSH_SIGNAL);
1188 hammer_flush_inode(ip, 0);
1190 } else if (ip->lock.refs == 1) {
1191 hammer_unload_inode(ip);
1196 hammer_flush_inode(ip, 0);
1199 * The inode still has multiple refs, try to drop
1202 KKASSERT(ip->lock.refs >= 1);
1203 if (ip->lock.refs > 1) {
1204 hammer_unref(&ip->lock);
1212 * Unload and destroy the specified inode. Must be called with one remaining
1213 * reference. The reference is disposed of.
1215 * This can only be called in the context of the flusher.
1218 hammer_unload_inode(struct hammer_inode *ip)
1220 hammer_mount_t hmp = ip->hmp;
1222 KASSERT(ip->lock.refs == 1,
1223 ("hammer_unload_inode: %d refs\n", ip->lock.refs));
1224 KKASSERT(ip->vp == NULL);
1225 KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
1226 KKASSERT(ip->cursor_ip_refs == 0);
1227 KKASSERT(ip->lock.lockcount == 0);
1228 KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
1230 KKASSERT(RB_EMPTY(&ip->rec_tree));
1231 KKASSERT(TAILQ_EMPTY(&ip->target_list));
1233 RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip);
1235 hammer_free_inode(ip);
1240 * Called on mount -u when switching from RW to RO or vise-versa. Adjust
1241 * the read-only flag for cached inodes.
1243 * This routine is called from a RB_SCAN().
1246 hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
1248 hammer_mount_t hmp = ip->hmp;
1250 if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
1251 ip->flags |= HAMMER_INODE_RO;
1253 ip->flags &= ~HAMMER_INODE_RO;
1258 * A transaction has modified an inode, requiring updates as specified by
1261 * HAMMER_INODE_DDIRTY: Inode data has been updated
1262 * HAMMER_INODE_XDIRTY: Dirty in-memory records
1263 * HAMMER_INODE_BUFS: Dirty buffer cache buffers
1264 * HAMMER_INODE_DELETED: Inode record/data must be deleted
1265 * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
1268 hammer_modify_inode(hammer_inode_t ip, int flags)
1270 KKASSERT(ip->hmp->ronly == 0 ||
1271 (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
1272 HAMMER_INODE_BUFS | HAMMER_INODE_DELETED |
1273 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0);
1274 if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
1275 ip->flags |= HAMMER_INODE_RSV_INODES;
1276 ++ip->hmp->rsv_inodes;
1283 * Request that an inode be flushed. This whole mess cannot block and may
1284 * recurse (if not synchronous). Once requested HAMMER will attempt to
1285 * actively flush the inode until the flush can be done.
1287 * The inode may already be flushing, or may be in a setup state. We can
1288 * place the inode in a flushing state if it is currently idle and flag it
1289 * to reflush if it is currently flushing.
1291 * If the HAMMER_FLUSH_SYNCHRONOUS flag is specified we will attempt to
1292 * flush the indoe synchronously using the caller's context.
1295 hammer_flush_inode(hammer_inode_t ip, int flags)
1300 * Trivial 'nothing to flush' case. If the inode is ina SETUP
1301 * state we have to put it back into an IDLE state so we can
1302 * drop the extra ref.
1304 if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
1305 if (ip->flush_state == HAMMER_FST_SETUP) {
1306 ip->flush_state = HAMMER_FST_IDLE;
1307 hammer_rel_inode(ip, 0);
1313 * Our flush action will depend on the current state.
1315 switch(ip->flush_state) {
1316 case HAMMER_FST_IDLE:
1318 * We have no dependancies and can flush immediately. Some
1319 * our children may not be flushable so we have to re-test
1320 * with that additional knowledge.
1322 hammer_flush_inode_core(ip, flags);
1324 case HAMMER_FST_SETUP:
1326 * Recurse upwards through dependancies via target_list
1327 * and start their flusher actions going if possible.
1329 * 'good' is our connectivity. -1 means we have none and
1330 * can't flush, 0 means there weren't any dependancies, and
1331 * 1 means we have good connectivity.
1333 good = hammer_setup_parent_inodes(ip);
1336 * We can continue if good >= 0. Determine how many records
1337 * under our inode can be flushed (and mark them).
1340 hammer_flush_inode_core(ip, flags);
1342 ip->flags |= HAMMER_INODE_REFLUSH;
1343 if (flags & HAMMER_FLUSH_SIGNAL) {
1344 ip->flags |= HAMMER_INODE_RESIGNAL;
1345 hammer_flusher_async(ip->hmp);
1351 * We are already flushing, flag the inode to reflush
1352 * if needed after it completes its current flush.
1354 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
1355 ip->flags |= HAMMER_INODE_REFLUSH;
1356 if (flags & HAMMER_FLUSH_SIGNAL) {
1357 ip->flags |= HAMMER_INODE_RESIGNAL;
1358 hammer_flusher_async(ip->hmp);
1365 * Scan ip->target_list, which is a list of records owned by PARENTS to our
1366 * ip which reference our ip.
1368 * XXX This is a huge mess of recursive code, but not one bit of it blocks
1369 * so for now do not ref/deref the structures. Note that if we use the
1370 * ref/rel code later, the rel CAN block.
1373 hammer_setup_parent_inodes(hammer_inode_t ip)
1375 hammer_record_t depend;
1377 hammer_record_t next;
1384 TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
1385 r = hammer_setup_parent_inodes_helper(depend);
1386 KKASSERT(depend->target_ip == ip);
1387 if (r < 0 && good == 0)
1397 next = TAILQ_FIRST(&ip->target_list);
1399 hammer_ref(&next->lock);
1400 hammer_ref(&next->ip->lock);
1402 while ((depend = next) != NULL) {
1403 if (depend->target_ip == NULL) {
1405 hammer_rel_mem_record(depend);
1406 hammer_rel_inode(pip, 0);
1409 KKASSERT(depend->target_ip == ip);
1410 next = TAILQ_NEXT(depend, target_entry);
1412 hammer_ref(&next->lock);
1413 hammer_ref(&next->ip->lock);
1415 r = hammer_setup_parent_inodes_helper(depend);
1416 if (r < 0 && good == 0)
1421 hammer_rel_mem_record(depend);
1422 hammer_rel_inode(pip, 0);
1429 * This helper function takes a record representing the dependancy between
1430 * the parent inode and child inode.
1432 * record->ip = parent inode
1433 * record->target_ip = child inode
1435 * We are asked to recurse upwards and convert the record from SETUP
1436 * to FLUSH if possible.
1438 * Return 1 if the record gives us connectivity
1440 * Return 0 if the record is not relevant
1442 * Return -1 if we can't resolve the dependancy and there is no connectivity.
1445 hammer_setup_parent_inodes_helper(hammer_record_t record)
1451 KKASSERT(record->flush_state != HAMMER_FST_IDLE);
1456 * If the record is already flushing, is it in our flush group?
1458 * If it is in our flush group but it is a general record or a
1459 * delete-on-disk, it does not improve our connectivity (return 0),
1460 * and if the target inode is not trying to destroy itself we can't
1461 * allow the operation yet anyway (the second return -1).
1463 if (record->flush_state == HAMMER_FST_FLUSH) {
1464 if (record->flush_group != hmp->flusher.next) {
1465 pip->flags |= HAMMER_INODE_REFLUSH;
1468 if (record->type == HAMMER_MEM_RECORD_ADD)
1470 /* GENERAL or DEL */
1475 * It must be a setup record. Try to resolve the setup dependancies
1476 * by recursing upwards so we can place ip on the flush list.
1478 KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1480 good = hammer_setup_parent_inodes(pip);
1483 * We can't flush ip because it has no connectivity (XXX also check
1484 * nlinks for pre-existing connectivity!). Flag it so any resolution
1485 * recurses back down.
1488 pip->flags |= HAMMER_INODE_REFLUSH;
1493 * We are go, place the parent inode in a flushing state so we can
1494 * place its record in a flushing state. Note that the parent
1495 * may already be flushing. The record must be in the same flush
1496 * group as the parent.
1498 if (pip->flush_state != HAMMER_FST_FLUSH)
1499 hammer_flush_inode_core(pip, HAMMER_FLUSH_RECURSION);
1500 KKASSERT(pip->flush_state == HAMMER_FST_FLUSH);
1501 KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1504 if (record->type == HAMMER_MEM_RECORD_DEL &&
1505 (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
1507 * Regardless of flushing state we cannot sync this path if the
1508 * record represents a delete-on-disk but the target inode
1509 * is not ready to sync its own deletion.
1511 * XXX need to count effective nlinks to determine whether
1512 * the flush is ok, otherwise removing a hardlink will
1513 * just leave the DEL record to rot.
1515 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
1519 if (pip->flush_group == pip->hmp->flusher.next) {
1521 * This is the record we wanted to synchronize. If the
1522 * record went into a flush state while we blocked it
1523 * had better be in the correct flush group.
1525 if (record->flush_state != HAMMER_FST_FLUSH) {
1526 record->flush_state = HAMMER_FST_FLUSH;
1527 record->flush_group = pip->flush_group;
1528 hammer_ref(&record->lock);
1530 KKASSERT(record->flush_group == pip->flush_group);
1532 if (record->type == HAMMER_MEM_RECORD_ADD)
1536 * A general or delete-on-disk record does not contribute
1537 * to our visibility. We can still flush it, however.
1542 * We couldn't resolve the dependancies, request that the
1543 * inode be flushed when the dependancies can be resolved.
1545 pip->flags |= HAMMER_INODE_REFLUSH;
1551 * This is the core routine placing an inode into the FST_FLUSH state.
1554 hammer_flush_inode_core(hammer_inode_t ip, int flags)
1559 * Set flush state and prevent the flusher from cycling into
1560 * the next flush group. Do not place the ip on the list yet.
1561 * Inodes not in the idle state get an extra reference.
1563 KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
1564 if (ip->flush_state == HAMMER_FST_IDLE)
1565 hammer_ref(&ip->lock);
1566 ip->flush_state = HAMMER_FST_FLUSH;
1567 ip->flush_group = ip->hmp->flusher.next;
1568 ++ip->hmp->flusher.group_lock;
1569 ++ip->hmp->count_iqueued;
1570 ++hammer_count_iqueued;
1573 * We need to be able to vfsync/truncate from the backend.
1575 KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
1576 if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
1577 ip->flags |= HAMMER_INODE_VHELD;
1582 * Figure out how many in-memory records we can actually flush
1583 * (not including inode meta-data, buffers, etc).
1585 * Do not add new records to the flush if this is a recursion or
1586 * if we must still complete a flush from the previous flush cycle.
1588 if (flags & HAMMER_FLUSH_RECURSION) {
1590 } else if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
1591 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1592 hammer_syncgrp_child_callback, NULL);
1595 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1596 hammer_setup_child_callback, NULL);
1600 * This is a more involved test that includes go_count. If we
1601 * can't flush, flag the inode and return. If go_count is 0 we
1602 * were are unable to flush any records in our rec_tree and
1603 * must ignore the XDIRTY flag.
1605 if (go_count == 0) {
1606 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
1607 ip->flags |= HAMMER_INODE_REFLUSH;
1609 --ip->hmp->count_iqueued;
1610 --hammer_count_iqueued;
1612 ip->flush_state = HAMMER_FST_SETUP;
1613 if (ip->flags & HAMMER_INODE_VHELD) {
1614 ip->flags &= ~HAMMER_INODE_VHELD;
1617 if (flags & HAMMER_FLUSH_SIGNAL) {
1618 ip->flags |= HAMMER_INODE_RESIGNAL;
1619 hammer_flusher_async(ip->hmp);
1621 if (--ip->hmp->flusher.group_lock == 0)
1622 wakeup(&ip->hmp->flusher.group_lock);
1628 * Snapshot the state of the inode for the backend flusher.
1630 * We continue to retain save_trunc_off even when all truncations
1631 * have been resolved as an optimization to determine if we can
1632 * skip the B-Tree lookup for overwrite deletions.
1634 * NOTE: The DELETING flag is a mod flag, but it is also sticky,
1635 * and stays in ip->flags. Once set, it stays set until the
1636 * inode is destroyed.
1638 * NOTE: If a truncation from a previous flush cycle had to be
1639 * continued into this one, the TRUNCATED flag will still be
1640 * set in sync_flags as will WOULDBLOCK. When this occurs
1641 * we CANNOT safely integrate a new truncation from the front-end
1642 * because there may be data records in-memory assigned a flush
1643 * state from the previous cycle that are supposed to be flushed
1644 * before the next frontend truncation.
1646 if ((ip->flags & (HAMMER_INODE_TRUNCATED | HAMMER_INODE_WOULDBLOCK)) ==
1647 HAMMER_INODE_TRUNCATED) {
1648 KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0);
1649 ip->sync_trunc_off = ip->trunc_off;
1650 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
1651 ip->flags &= ~HAMMER_INODE_TRUNCATED;
1652 ip->sync_flags |= HAMMER_INODE_TRUNCATED;
1655 * The save_trunc_off used to cache whether the B-Tree
1656 * holds any records past that point is not used until
1657 * after the truncation has succeeded, so we can safely
1660 if (ip->save_trunc_off > ip->sync_trunc_off)
1661 ip->save_trunc_off = ip->sync_trunc_off;
1663 ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK &
1664 ~HAMMER_INODE_TRUNCATED);
1665 ip->sync_ino_leaf = ip->ino_leaf;
1666 ip->sync_ino_data = ip->ino_data;
1667 ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
1668 #ifdef DEBUG_TRUNCATE
1669 if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp)
1670 kprintf("truncateS %016llx\n", ip->sync_trunc_off);
1674 * The flusher list inherits our inode and reference.
1676 TAILQ_INSERT_TAIL(&ip->hmp->flush_list, ip, flush_entry);
1677 if (--ip->hmp->flusher.group_lock == 0)
1678 wakeup(&ip->hmp->flusher.group_lock);
1680 if (flags & HAMMER_FLUSH_SIGNAL) {
1681 hammer_flusher_async(ip->hmp);
1686 * Callback for scan of ip->rec_tree. Try to include each record in our
1687 * flush. ip->flush_group has been set but the inode has not yet been
1688 * moved into a flushing state.
1690 * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
1693 * We return 1 for any record placed or found in FST_FLUSH, which prevents
1694 * the caller from shortcutting the flush.
1697 hammer_setup_child_callback(hammer_record_t rec, void *data)
1699 hammer_inode_t target_ip;
1704 * Deleted records are ignored. Note that the flush detects deleted
1705 * front-end records at multiple points to deal with races. This is
1706 * just the first line of defense. The only time DELETED_FE cannot
1707 * be set is when HAMMER_RECF_INTERLOCK_BE is set.
1709 * Don't get confused between record deletion and, say, directory
1710 * entry deletion. The deletion of a directory entry that is on
1711 * the media has nothing to do with the record deletion flags.
1713 * The flush_group for a record already in a flush state must
1714 * be updated. This case can only occur if the inode deleting
1715 * too many records had to be moved to the next flush group.
1717 if (rec->flags & (HAMMER_RECF_DELETED_FE|HAMMER_RECF_DELETED_BE)) {
1718 if (rec->flush_state == HAMMER_FST_FLUSH) {
1719 KKASSERT(rec->ip->flags & HAMMER_INODE_WOULDBLOCK);
1720 rec->flush_group = rec->ip->flush_group;
1729 * If the record is in an idle state it has no dependancies and
1735 switch(rec->flush_state) {
1736 case HAMMER_FST_IDLE:
1738 * Record has no setup dependancy, we can flush it.
1740 KKASSERT(rec->target_ip == NULL);
1741 rec->flush_state = HAMMER_FST_FLUSH;
1742 rec->flush_group = ip->flush_group;
1743 hammer_ref(&rec->lock);
1746 case HAMMER_FST_SETUP:
1748 * Record has a setup dependancy. Try to include the
1749 * target ip in the flush.
1751 * We have to be careful here, if we do not do the right
1752 * thing we can lose track of dirty inodes and the system
1753 * will lockup trying to allocate buffers.
1755 target_ip = rec->target_ip;
1756 KKASSERT(target_ip != NULL);
1757 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
1758 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
1760 * If the target IP is already flushing in our group
1761 * we are golden, otherwise make sure the target
1764 if (target_ip->flush_group == ip->flush_group) {
1765 rec->flush_state = HAMMER_FST_FLUSH;
1766 rec->flush_group = ip->flush_group;
1767 hammer_ref(&rec->lock);
1770 target_ip->flags |= HAMMER_INODE_REFLUSH;
1772 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
1774 * If the target IP is not flushing we can force
1775 * it to flush, even if it is unable to write out
1776 * any of its own records we have at least one in
1777 * hand that we CAN deal with.
1779 rec->flush_state = HAMMER_FST_FLUSH;
1780 rec->flush_group = ip->flush_group;
1781 hammer_ref(&rec->lock);
1782 hammer_flush_inode_core(target_ip,
1783 HAMMER_FLUSH_RECURSION);
1787 * General or delete-on-disk record.
1789 * XXX this needs help. If a delete-on-disk we could
1790 * disconnect the target. If the target has its own
1791 * dependancies they really need to be flushed.
1795 rec->flush_state = HAMMER_FST_FLUSH;
1796 rec->flush_group = ip->flush_group;
1797 hammer_ref(&rec->lock);
1798 hammer_flush_inode_core(target_ip,
1799 HAMMER_FLUSH_RECURSION);
1803 case HAMMER_FST_FLUSH:
1805 * If the WOULDBLOCK flag is set records may have been left
1806 * over from a previous flush attempt and should be moved
1807 * to the current flush group. If it is not set then all
1808 * such records had better have been flushed already or
1809 * already associated with the current flush group.
1811 if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
1812 rec->flush_group = ip->flush_group;
1814 KKASSERT(rec->flush_group == ip->flush_group);
1823 * This version just moves records already in a flush state to the new
1824 * flush group and that is it.
1827 hammer_syncgrp_child_callback(hammer_record_t rec, void *data)
1829 hammer_inode_t ip = rec->ip;
1831 switch(rec->flush_state) {
1832 case HAMMER_FST_FLUSH:
1833 if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
1834 rec->flush_group = ip->flush_group;
1836 KKASSERT(rec->flush_group == ip->flush_group);
1846 * Wait for a previously queued flush to complete. Not only do we need to
1847 * wait for the inode to sync out, we also may have to run the flusher again
1848 * to get it past the UNDO position pertaining to the flush so a crash does
1849 * not 'undo' our flush.
1852 hammer_wait_inode(hammer_inode_t ip)
1854 hammer_mount_t hmp = ip->hmp;
1858 sync_group = ip->flush_group;
1859 waitcount = (ip->flags & HAMMER_INODE_REFLUSH) ? 2 : 1;
1861 if (ip->flush_state == HAMMER_FST_SETUP) {
1862 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1864 /* XXX can we make this != FST_IDLE ? check SETUP depends */
1865 while (ip->flush_state == HAMMER_FST_FLUSH &&
1866 (ip->flush_group - sync_group) < waitcount) {
1867 ip->flags |= HAMMER_INODE_FLUSHW;
1868 tsleep(&ip->flags, 0, "hmrwin", 0);
1870 while (hmp->flusher.done - sync_group < waitcount) {
1872 hammer_flusher_sync(hmp);
1877 * Called by the backend code when a flush has been completed.
1878 * The inode has already been removed from the flush list.
1880 * A pipelined flush can occur, in which case we must re-enter the
1881 * inode on the list and re-copy its fields.
1884 hammer_flush_inode_done(hammer_inode_t ip)
1889 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
1894 * Merge left-over flags back into the frontend and fix the state.
1895 * Incomplete truncations are retained by the backend.
1897 ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED;
1898 ip->sync_flags &= HAMMER_INODE_TRUNCATED;
1901 * The backend may have adjusted nlinks, so if the adjusted nlinks
1902 * does not match the fronttend set the frontend's RDIRTY flag again.
1904 if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
1905 ip->flags |= HAMMER_INODE_DDIRTY;
1908 * Fix up the dirty buffer status.
1910 if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) {
1911 ip->flags |= HAMMER_INODE_BUFS;
1915 * Re-set the XDIRTY flag if some of the inode's in-memory records
1916 * could not be flushed.
1918 KKASSERT((RB_EMPTY(&ip->rec_tree) &&
1919 (ip->flags & HAMMER_INODE_XDIRTY) == 0) ||
1920 (!RB_EMPTY(&ip->rec_tree) &&
1921 (ip->flags & HAMMER_INODE_XDIRTY) != 0));
1924 * Do not lose track of inodes which no longer have vnode
1925 * assocations, otherwise they may never get flushed again.
1927 if ((ip->flags & HAMMER_INODE_MODMASK) && ip->vp == NULL)
1928 ip->flags |= HAMMER_INODE_REFLUSH;
1931 * Clean up the vnode ref
1933 if (ip->flags & HAMMER_INODE_VHELD) {
1934 ip->flags &= ~HAMMER_INODE_VHELD;
1939 * Adjust flush_state. The target state (idle or setup) shouldn't
1940 * be terribly important since we will reflush if we really need
1943 * If the WOULDBLOCK flag is set we must re-flush immediately
1944 * to continue a potentially large deletion. The flag also causes
1945 * the hammer_setup_child_callback() to move records in the old
1946 * flush group to the new one.
1948 if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
1949 ip->flush_state = HAMMER_FST_IDLE;
1950 hammer_flush_inode_core(ip, HAMMER_FLUSH_SIGNAL);
1951 ip->flags &= ~HAMMER_INODE_WOULDBLOCK;
1953 } else if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
1954 ip->flush_state = HAMMER_FST_IDLE;
1957 ip->flush_state = HAMMER_FST_SETUP;
1961 --hmp->count_iqueued;
1962 --hammer_count_iqueued;
1965 * If the frontend made more changes and requested another flush,
1966 * then try to get it running.
1968 if (ip->flags & HAMMER_INODE_REFLUSH) {
1969 ip->flags &= ~HAMMER_INODE_REFLUSH;
1970 if (ip->flags & HAMMER_INODE_RESIGNAL) {
1971 ip->flags &= ~HAMMER_INODE_RESIGNAL;
1972 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1974 hammer_flush_inode(ip, 0);
1979 * If the inode is now clean drop the space reservation.
1981 if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1982 (ip->flags & HAMMER_INODE_RSV_INODES)) {
1983 ip->flags &= ~HAMMER_INODE_RSV_INODES;
1988 * Finally, if the frontend is waiting for a flush to complete,
1991 if (ip->flush_state != HAMMER_FST_FLUSH) {
1992 if (ip->flags & HAMMER_INODE_FLUSHW) {
1993 ip->flags &= ~HAMMER_INODE_FLUSHW;
1998 hammer_rel_inode(ip, 0);
2002 * Called from hammer_sync_inode() to synchronize in-memory records
2006 hammer_sync_record_callback(hammer_record_t record, void *data)
2008 hammer_cursor_t cursor = data;
2009 hammer_transaction_t trans = cursor->trans;
2010 hammer_mount_t hmp = trans->hmp;
2014 * Skip records that do not belong to the current flush.
2016 ++hammer_stats_record_iterations;
2017 if (record->flush_state != HAMMER_FST_FLUSH)
2021 if (record->flush_group != record->ip->flush_group) {
2022 kprintf("sync_record %p ip %p bad flush group %d %d\n", record, record->ip, record->flush_group ,record->ip->flush_group);
2027 KKASSERT(record->flush_group == record->ip->flush_group);
2030 * Interlock the record using the BE flag. Once BE is set the
2031 * frontend cannot change the state of FE.
2033 * NOTE: If FE is set prior to us setting BE we still sync the
2034 * record out, but the flush completion code converts it to
2035 * a delete-on-disk record instead of destroying it.
2037 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
2038 record->flags |= HAMMER_RECF_INTERLOCK_BE;
2041 * The backend may have already disposed of the record.
2043 if (record->flags & HAMMER_RECF_DELETED_BE) {
2049 * If the whole inode is being deleting all on-disk records will
2050 * be deleted very soon, we can't sync any new records to disk
2051 * because they will be deleted in the same transaction they were
2052 * created in (delete_tid == create_tid), which will assert.
2054 * XXX There may be a case with RECORD_ADD with DELETED_FE set
2055 * that we currently panic on.
2057 if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
2058 switch(record->type) {
2059 case HAMMER_MEM_RECORD_DATA:
2061 * We don't have to do anything, if the record was
2062 * committed the space will have been accounted for
2066 case HAMMER_MEM_RECORD_GENERAL:
2067 record->flags |= HAMMER_RECF_DELETED_FE;
2068 record->flags |= HAMMER_RECF_DELETED_BE;
2071 case HAMMER_MEM_RECORD_ADD:
2072 panic("hammer_sync_record_callback: illegal add "
2073 "during inode deletion record %p", record);
2074 break; /* NOT REACHED */
2075 case HAMMER_MEM_RECORD_INODE:
2076 panic("hammer_sync_record_callback: attempt to "
2077 "sync inode record %p?", record);
2078 break; /* NOT REACHED */
2079 case HAMMER_MEM_RECORD_DEL:
2081 * Follow through and issue the on-disk deletion
2088 * If DELETED_FE is set special handling is needed for directory
2089 * entries. Dependant pieces related to the directory entry may
2090 * have already been synced to disk. If this occurs we have to
2091 * sync the directory entry and then change the in-memory record
2092 * from an ADD to a DELETE to cover the fact that it's been
2093 * deleted by the frontend.
2095 * A directory delete covering record (MEM_RECORD_DEL) can never
2096 * be deleted by the frontend.
2098 * Any other record type (aka DATA) can be deleted by the frontend.
2099 * XXX At the moment the flusher must skip it because there may
2100 * be another data record in the flush group for the same block,
2101 * meaning that some frontend data changes can leak into the backend's
2102 * synchronization point.
2104 if (record->flags & HAMMER_RECF_DELETED_FE) {
2105 if (record->type == HAMMER_MEM_RECORD_ADD) {
2106 record->flags |= HAMMER_RECF_CONVERT_DELETE;
2108 KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
2109 record->flags |= HAMMER_RECF_DELETED_BE;
2116 * Assign the create_tid for new records. Deletions already
2117 * have the record's entire key properly set up.
2119 if (record->type != HAMMER_MEM_RECORD_DEL)
2120 record->leaf.base.create_tid = trans->tid;
2121 record->leaf.create_ts = trans->time32;
2123 error = hammer_ip_sync_record_cursor(cursor, record);
2124 if (error != EDEADLK)
2126 hammer_done_cursor(cursor);
2127 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
2132 record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
2136 if (error != -ENOSPC) {
2137 kprintf("hammer_sync_record_callback: sync failed rec "
2138 "%p, error %d\n", record, error);
2139 Debugger("sync failed rec");
2143 hammer_flush_record_done(record, error);
2146 * Do partial finalization if we have built up too many dirty
2147 * buffers. Otherwise a buffer cache deadlock can occur when
2148 * doing things like creating tens of thousands of tiny files.
2150 * We must release our cursor lock to avoid a 3-way deadlock
2151 * due to the exclusive sync lock the finalizer must get.
2153 if (hammer_flusher_meta_limit(hmp)) {
2154 hammer_unlock_cursor(cursor, 0);
2155 hammer_flusher_finalize(trans, 0);
2156 hammer_lock_cursor(cursor, 0);
2163 * XXX error handling
2166 hammer_sync_inode(hammer_inode_t ip)
2168 struct hammer_transaction trans;
2169 struct hammer_cursor cursor;
2170 hammer_node_t tmp_node;
2171 hammer_record_t depend;
2172 hammer_record_t next;
2173 int error, tmp_error;
2176 if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
2179 hammer_start_transaction_fls(&trans, ip->hmp);
2180 error = hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
2185 * Any directory records referencing this inode which are not in
2186 * our current flush group must adjust our nlink count for the
2187 * purposes of synchronization to disk.
2189 * Records which are in our flush group can be unlinked from our
2190 * inode now, potentially allowing the inode to be physically
2193 * This cannot block.
2195 nlinks = ip->ino_data.nlinks;
2196 next = TAILQ_FIRST(&ip->target_list);
2197 while ((depend = next) != NULL) {
2198 next = TAILQ_NEXT(depend, target_entry);
2199 if (depend->flush_state == HAMMER_FST_FLUSH &&
2200 depend->flush_group == ip->hmp->flusher.act) {
2202 * If this is an ADD that was deleted by the frontend
2203 * the frontend nlinks count will have already been
2204 * decremented, but the backend is going to sync its
2205 * directory entry and must account for it. The
2206 * record will be converted to a delete-on-disk when
2209 * If the ADD was not deleted by the frontend we
2210 * can remove the dependancy from our target_list.
2212 if (depend->flags & HAMMER_RECF_DELETED_FE) {
2215 TAILQ_REMOVE(&ip->target_list, depend,
2217 depend->target_ip = NULL;
2219 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
2221 * Not part of our flush group
2223 KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
2224 switch(depend->type) {
2225 case HAMMER_MEM_RECORD_ADD:
2228 case HAMMER_MEM_RECORD_DEL:
2238 * Set dirty if we had to modify the link count.
2240 if (ip->sync_ino_data.nlinks != nlinks) {
2241 KKASSERT((int64_t)nlinks >= 0);
2242 ip->sync_ino_data.nlinks = nlinks;
2243 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2247 * If there is a trunction queued destroy any data past the (aligned)
2248 * truncation point. Userland will have dealt with the buffer
2249 * containing the truncation point for us.
2251 * We don't flush pending frontend data buffers until after we've
2252 * dealt with the truncation.
2254 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
2256 * Interlock trunc_off. The VOP front-end may continue to
2257 * make adjustments to it while we are blocked.
2260 off_t aligned_trunc_off;
2263 trunc_off = ip->sync_trunc_off;
2264 blkmask = hammer_blocksize(trunc_off) - 1;
2265 aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask;
2268 * Delete any whole blocks on-media. The front-end has
2269 * already cleaned out any partial block and made it
2270 * pending. The front-end may have updated trunc_off
2271 * while we were blocked so we only use sync_trunc_off.
2273 * This operation can blow out the buffer cache, EWOULDBLOCK
2274 * means we were unable to complete the deletion. The
2275 * deletion will update sync_trunc_off in that case.
2277 error = hammer_ip_delete_range(&cursor, ip,
2279 0x7FFFFFFFFFFFFFFFLL, 2);
2280 if (error == EWOULDBLOCK) {
2281 ip->flags |= HAMMER_INODE_WOULDBLOCK;
2283 goto defer_buffer_flush;
2287 Debugger("hammer_ip_delete_range errored");
2290 * Clear the truncation flag on the backend after we have
2291 * complete the deletions. Backend data is now good again
2292 * (including new records we are about to sync, below).
2294 * Leave sync_trunc_off intact. As we write additional
2295 * records the backend will update sync_trunc_off. This
2296 * tells the backend whether it can skip the overwrite
2297 * test. This should work properly even when the backend
2298 * writes full blocks where the truncation point straddles
2299 * the block because the comparison is against the base
2300 * offset of the record.
2302 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2303 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
2309 * Now sync related records. These will typically be directory
2310 * entries, records tracking direct-writes, or delete-on-disk records.
2313 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2314 hammer_sync_record_callback, &cursor);
2320 hammer_cache_node(&ip->cache[1], cursor.node);
2323 * Re-seek for inode update, assuming our cache hasn't been ripped
2324 * out from under us.
2327 tmp_node = hammer_ref_node_safe(ip->hmp, &ip->cache[0], &error);
2329 hammer_cursor_downgrade(&cursor);
2330 hammer_lock_sh(&tmp_node->lock);
2331 if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0)
2332 hammer_cursor_seek(&cursor, tmp_node, 0);
2333 hammer_unlock(&tmp_node->lock);
2334 hammer_rel_node(tmp_node);
2340 * If we are deleting the inode the frontend had better not have
2341 * any active references on elements making up the inode.
2343 * The call to hammer_ip_delete_clean() cleans up auxillary records
2344 * but not DB or DATA records. Those must have already been deleted
2345 * by the normal truncation mechanic.
2347 if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
2348 RB_EMPTY(&ip->rec_tree) &&
2349 (ip->sync_flags & HAMMER_INODE_DELETING) &&
2350 (ip->flags & HAMMER_INODE_DELETED) == 0) {
2353 error = hammer_ip_delete_clean(&cursor, ip, &count1);
2355 ip->flags |= HAMMER_INODE_DELETED;
2356 ip->sync_flags &= ~HAMMER_INODE_DELETING;
2357 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2358 KKASSERT(RB_EMPTY(&ip->rec_tree));
2361 * Set delete_tid in both the frontend and backend
2362 * copy of the inode record. The DELETED flag handles
2363 * this, do not set RDIRTY.
2365 ip->ino_leaf.base.delete_tid = trans.tid;
2366 ip->sync_ino_leaf.base.delete_tid = trans.tid;
2367 ip->ino_leaf.delete_ts = trans.time32;
2368 ip->sync_ino_leaf.delete_ts = trans.time32;
2372 * Adjust the inode count in the volume header
2374 hammer_sync_lock_sh(&trans);
2375 if (ip->flags & HAMMER_INODE_ONDISK) {
2376 hammer_modify_volume_field(&trans,
2379 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
2380 hammer_modify_volume_done(trans.rootvol);
2382 hammer_sync_unlock(&trans);
2384 Debugger("hammer_ip_delete_clean errored");
2388 ip->sync_flags &= ~HAMMER_INODE_BUFS;
2391 Debugger("RB_SCAN errored");
2395 * Now update the inode's on-disk inode-data and/or on-disk record.
2396 * DELETED and ONDISK are managed only in ip->flags.
2398 * In the case of a defered buffer flush we still update the on-disk
2399 * inode to satisfy visibility requirements if there happen to be
2400 * directory dependancies.
2402 switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
2403 case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
2405 * If deleted and on-disk, don't set any additional flags.
2406 * the delete flag takes care of things.
2408 * Clear flags which may have been set by the frontend.
2410 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2411 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2412 HAMMER_INODE_DELETING);
2414 case HAMMER_INODE_DELETED:
2416 * Take care of the case where a deleted inode was never
2417 * flushed to the disk in the first place.
2419 * Clear flags which may have been set by the frontend.
2421 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
2422 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
2423 HAMMER_INODE_DELETING);
2424 while (RB_ROOT(&ip->rec_tree)) {
2425 hammer_record_t record = RB_ROOT(&ip->rec_tree);
2426 hammer_ref(&record->lock);
2427 KKASSERT(record->lock.refs == 1);
2428 record->flags |= HAMMER_RECF_DELETED_FE;
2429 record->flags |= HAMMER_RECF_DELETED_BE;
2430 hammer_rel_mem_record(record);
2433 case HAMMER_INODE_ONDISK:
2435 * If already on-disk, do not set any additional flags.
2440 * If not on-disk and not deleted, set DDIRTY to force
2441 * an initial record to be written.
2443 * Also set the create_tid in both the frontend and backend
2444 * copy of the inode record.
2446 ip->ino_leaf.base.create_tid = trans.tid;
2447 ip->ino_leaf.create_ts = trans.time32;
2448 ip->sync_ino_leaf.base.create_tid = trans.tid;
2449 ip->sync_ino_leaf.create_ts = trans.time32;
2450 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2455 * If RDIRTY or DDIRTY is set, write out a new record. If the inode
2456 * is already on-disk the old record is marked as deleted.
2458 * If DELETED is set hammer_update_inode() will delete the existing
2459 * record without writing out a new one.
2461 * If *ONLY* the ITIMES flag is set we can update the record in-place.
2463 if (ip->flags & HAMMER_INODE_DELETED) {
2464 error = hammer_update_inode(&cursor, ip);
2466 if ((ip->sync_flags & HAMMER_INODE_DDIRTY) == 0 &&
2467 (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) {
2468 error = hammer_update_itimes(&cursor, ip);
2470 if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) {
2471 error = hammer_update_inode(&cursor, ip);
2474 Debugger("hammer_update_itimes/inode errored");
2477 * Save the TID we used to sync the inode with to make sure we
2478 * do not improperly reuse it.
2480 hammer_done_cursor(&cursor);
2481 hammer_done_transaction(&trans);
2486 * This routine is called when the OS is no longer actively referencing
2487 * the inode (but might still be keeping it cached), or when releasing
2488 * the last reference to an inode.
2490 * At this point if the inode's nlinks count is zero we want to destroy
2491 * it, which may mean destroying it on-media too.
2494 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
2499 * Set the DELETING flag when the link count drops to 0 and the
2500 * OS no longer has any opens on the inode.
2502 * The backend will clear DELETING (a mod flag) and set DELETED
2503 * (a state flag) when it is actually able to perform the
2506 if (ip->ino_data.nlinks == 0 &&
2507 (ip->flags & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
2508 ip->flags |= HAMMER_INODE_DELETING;
2509 ip->flags |= HAMMER_INODE_TRUNCATED;
2513 if (hammer_get_vnode(ip, &vp) != 0)
2521 vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
2522 vnode_pager_setsize(ip->vp, 0);
2531 * Re-test an inode when a dependancy had gone away to see if we
2532 * can chain flush it.
2535 hammer_test_inode(hammer_inode_t ip)
2537 if (ip->flags & HAMMER_INODE_REFLUSH) {
2538 ip->flags &= ~HAMMER_INODE_REFLUSH;
2539 hammer_ref(&ip->lock);
2540 if (ip->flags & HAMMER_INODE_RESIGNAL) {
2541 ip->flags &= ~HAMMER_INODE_RESIGNAL;
2542 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2544 hammer_flush_inode(ip, 0);
2546 hammer_rel_inode(ip, 0);
2551 * Clear the RECLAIM flag on an inode. This occurs when the inode is
2552 * reassociated with a vp or just before it gets freed.
2554 * Wakeup one thread blocked waiting on reclaims to complete. Note that
2555 * the inode the thread is waiting on behalf of is a different inode then
2556 * the inode we are called with. This is to create a pipeline.
2559 hammer_inode_wakereclaims(hammer_inode_t ip)
2561 struct hammer_reclaim *reclaim;
2562 hammer_mount_t hmp = ip->hmp;
2564 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0)
2567 --hammer_count_reclaiming;
2568 --hmp->inode_reclaims;
2569 ip->flags &= ~HAMMER_INODE_RECLAIM;
2571 if ((reclaim = TAILQ_FIRST(&hmp->reclaim_list)) != NULL) {
2572 TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry);
2573 reclaim->okydoky = 1;
2579 * Setup our reclaim pipeline. We only let so many detached (and dirty)
2580 * inodes build up before we start blocking.
2582 * When we block we don't care *which* inode has finished reclaiming,
2583 * as lone as one does. This is somewhat heuristical... we also put a
2584 * cap on how long we are willing to wait.
2587 hammer_inode_waitreclaims(hammer_mount_t hmp)
2589 struct hammer_reclaim reclaim;
2592 if (hmp->inode_reclaims > HAMMER_RECLAIM_WAIT) {
2593 reclaim.okydoky = 0;
2594 TAILQ_INSERT_TAIL(&hmp->reclaim_list,
2597 reclaim.okydoky = 1;
2600 if (reclaim.okydoky == 0) {
2601 delay = (hmp->inode_reclaims - HAMMER_RECLAIM_WAIT) * hz /
2602 HAMMER_RECLAIM_WAIT;
2604 tsleep(&reclaim, 0, "hmrrcm", delay + 1);
2605 if (reclaim.okydoky == 0)
2606 TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry);