2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.82 2008/06/23 07:31:14 dillon Exp $
38 #include <vm/vm_extern.h>
42 static int hammer_unload_inode(struct hammer_inode *ip);
43 static void hammer_flush_inode_core(hammer_inode_t ip, int flags);
44 static int hammer_setup_child_callback(hammer_record_t rec, void *data);
45 static int hammer_setup_parent_inodes(hammer_inode_t ip);
46 static int hammer_setup_parent_inodes_helper(hammer_record_t record);
47 static void hammer_inode_wakereclaims(hammer_inode_t ip);
50 extern struct hammer_inode *HammerTruncIp;
54 * Red-Black tree support for inode structures.
59 hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
61 if (ip1->obj_localization < ip2->obj_localization)
63 if (ip1->obj_localization > ip2->obj_localization)
65 if (ip1->obj_id < ip2->obj_id)
67 if (ip1->obj_id > ip2->obj_id)
69 if (ip1->obj_asof < ip2->obj_asof)
71 if (ip1->obj_asof > ip2->obj_asof)
80 hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
82 if (info->obj_localization < ip->obj_localization)
84 if (info->obj_localization > ip->obj_localization)
86 if (info->obj_id < ip->obj_id)
88 if (info->obj_id > ip->obj_id)
90 if (info->obj_asof < ip->obj_asof)
92 if (info->obj_asof > ip->obj_asof)
98 * Used by hammer_scan_inode_snapshots() to locate all of an object's
99 * snapshots. Note that the asof field is not tested, which we can get
100 * away with because it is the lowest-priority field.
103 hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data)
105 hammer_inode_info_t info = data;
107 if (ip->obj_localization > info->obj_localization)
109 if (ip->obj_localization < info->obj_localization)
111 if (ip->obj_id > info->obj_id)
113 if (ip->obj_id < info->obj_id)
118 RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
119 RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
120 hammer_inode_info_cmp, hammer_inode_info_t);
123 * The kernel is not actively referencing this vnode but is still holding
126 * This is called from the frontend.
129 hammer_vop_inactive(struct vop_inactive_args *ap)
131 struct hammer_inode *ip = VTOI(ap->a_vp);
142 * If the inode no longer has visibility in the filesystem try to
143 * recycle it immediately, even if the inode is dirty. Recycling
144 * it quickly allows the system to reclaim buffer cache and VM
145 * resources which can matter a lot in a heavily loaded system.
147 * This can deadlock in vfsync() if we aren't careful.
149 * Do not queue the inode to the flusher if we still have visibility,
150 * otherwise namespace calls such as chmod will unnecessarily generate
151 * multiple inode updates.
153 hammer_inode_unloadable_check(ip, 0);
154 if (ip->ino_data.nlinks == 0) {
155 if (ip->flags & HAMMER_INODE_MODMASK)
156 hammer_flush_inode(ip, 0);
163 * Release the vnode association. This is typically (but not always)
164 * the last reference on the inode.
166 * Once the association is lost we are on our own with regards to
167 * flushing the inode.
170 hammer_vop_reclaim(struct vop_reclaim_args *ap)
172 struct hammer_inode *ip;
178 if ((ip = vp->v_data) != NULL) {
183 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) {
184 ++hammer_count_reclaiming;
185 ++hmp->inode_reclaims;
186 ip->flags |= HAMMER_INODE_RECLAIM;
187 if (hmp->inode_reclaims > HAMMER_RECLAIM_FLUSH &&
188 (hmp->inode_reclaims & 255) == 0) {
189 hammer_flusher_async(hmp);
192 hammer_rel_inode(ip, 1);
198 * Return a locked vnode for the specified inode. The inode must be
199 * referenced but NOT LOCKED on entry and will remain referenced on
202 * Called from the frontend.
205 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
214 if ((vp = ip->vp) == NULL) {
215 error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0);
218 hammer_lock_ex(&ip->lock);
219 if (ip->vp != NULL) {
220 hammer_unlock(&ip->lock);
225 hammer_ref(&ip->lock);
229 hammer_get_vnode_type(ip->ino_data.obj_type);
231 hammer_inode_wakereclaims(ip);
233 switch(ip->ino_data.obj_type) {
234 case HAMMER_OBJTYPE_CDEV:
235 case HAMMER_OBJTYPE_BDEV:
236 vp->v_ops = &hmp->mp->mnt_vn_spec_ops;
237 addaliasu(vp, ip->ino_data.rmajor,
238 ip->ino_data.rminor);
240 case HAMMER_OBJTYPE_FIFO:
241 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops;
248 * Only mark as the root vnode if the ip is not
249 * historical, otherwise the VFS cache will get
250 * confused. The other half of the special handling
251 * is in hammer_vop_nlookupdotdot().
253 * Pseudo-filesystem roots also do not count.
255 if (ip->obj_id == HAMMER_OBJID_ROOT &&
256 ip->obj_asof == hmp->asof &&
257 ip->obj_localization == 0) {
261 vp->v_data = (void *)ip;
262 /* vnode locked by getnewvnode() */
263 /* make related vnode dirty if inode dirty? */
264 hammer_unlock(&ip->lock);
265 if (vp->v_type == VREG)
266 vinitvmio(vp, ip->ino_data.size);
271 * loop if the vget fails (aka races), or if the vp
272 * no longer matches ip->vp.
274 if (vget(vp, LK_EXCLUSIVE) == 0) {
285 * Locate all copies of the inode for obj_id compatible with the specified
286 * asof, reference, and issue the related call-back. This routine is used
287 * for direct-io invalidation and does not create any new inodes.
290 hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo,
291 int (*callback)(hammer_inode_t ip, void *data),
294 hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root,
295 hammer_inode_info_cmp_all_history,
300 * Acquire a HAMMER inode. The returned inode is not locked. These functions
301 * do not attach or detach the related vnode (use hammer_get_vnode() for
304 * The flags argument is only applied for newly created inodes, and only
305 * certain flags are inherited.
307 * Called from the frontend.
309 struct hammer_inode *
310 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip,
311 u_int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
312 int flags, int *errorp)
314 hammer_mount_t hmp = trans->hmp;
315 struct hammer_inode_info iinfo;
316 struct hammer_cursor cursor;
317 struct hammer_inode *ip;
320 * Determine if we already have an inode cached. If we do then
323 iinfo.obj_id = obj_id;
324 iinfo.obj_asof = asof;
325 iinfo.obj_localization = localization;
327 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
329 hammer_ref(&ip->lock);
335 * Allocate a new inode structure and deal with races later.
337 ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
338 ++hammer_count_inodes;
341 ip->obj_asof = iinfo.obj_asof;
342 ip->obj_localization = localization;
344 ip->flags = flags & HAMMER_INODE_RO;
345 ip->cache[0].ip = ip;
346 ip->cache[1].ip = ip;
348 ip->flags |= HAMMER_INODE_RO;
349 ip->sync_trunc_off = ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
350 RB_INIT(&ip->rec_tree);
351 TAILQ_INIT(&ip->target_list);
354 * Locate the on-disk inode.
357 hammer_init_cursor(trans, &cursor, (dip ? &dip->cache[0] : NULL), NULL);
358 cursor.key_beg.localization = localization + HAMMER_LOCALIZE_INODE;
359 cursor.key_beg.obj_id = ip->obj_id;
360 cursor.key_beg.key = 0;
361 cursor.key_beg.create_tid = 0;
362 cursor.key_beg.delete_tid = 0;
363 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
364 cursor.key_beg.obj_type = 0;
365 cursor.asof = iinfo.obj_asof;
366 cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
369 *errorp = hammer_btree_lookup(&cursor);
370 if (*errorp == EDEADLK) {
371 hammer_done_cursor(&cursor);
376 * On success the B-Tree lookup will hold the appropriate
377 * buffer cache buffers and provide a pointer to the requested
378 * information. Copy the information to the in-memory inode
379 * and cache the B-Tree node to improve future operations.
382 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
383 ip->ino_data = cursor.data->inode;
386 * cache[0] tries to cache the location of the object inode.
387 * The assumption is that it is near the directory inode.
389 * cache[1] tries to cache the location of the object data.
390 * The assumption is that it is near the directory data.
392 hammer_cache_node(&ip->cache[0], cursor.node);
393 if (dip && dip->cache[1].node)
394 hammer_cache_node(&ip->cache[1], dip->cache[1].node);
397 * The file should not contain any data past the file size
398 * stored in the inode. Setting sync_trunc_off to the
399 * file size instead of max reduces B-Tree lookup overheads
400 * on append by allowing the flusher to avoid checking for
403 ip->sync_trunc_off = ip->ino_data.size;
407 * The inode is placed on the red-black tree and will be synced to
408 * the media when flushed or by the filesystem sync. If this races
409 * another instantiation/lookup the insertion will fail.
412 hammer_ref(&ip->lock);
413 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
414 hammer_uncache_node(&ip->cache[0]);
415 hammer_uncache_node(&ip->cache[1]);
416 KKASSERT(ip->lock.refs == 1);
417 --hammer_count_inodes;
420 hammer_done_cursor(&cursor);
423 ip->flags |= HAMMER_INODE_ONDISK;
425 if (ip->flags & HAMMER_INODE_RSV_INODES) {
426 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
429 hmp->rsv_databufs -= ip->rsv_databufs;
430 ip->rsv_databufs = 0; /* sanity */
432 --hammer_count_inodes;
437 hammer_done_cursor(&cursor);
442 * Create a new filesystem object, returning the inode in *ipp. The
443 * returned inode will be referenced.
445 * The inode is created in-memory.
448 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
449 struct ucred *cred, hammer_inode_t dip,
450 int pseudofs, struct hammer_inode **ipp)
455 u_int32_t localization;
461 * Assign the localization domain. If if dip is NULL we are creating
462 * a pseudo-fs and must locate an unused localization domain.
465 for (localization = HAMMER_DEF_LOCALIZATION;
466 localization < HAMMER_LOCALIZE_PSEUDOFS_MASK;
467 localization += HAMMER_LOCALIZE_PSEUDOFS_INC) {
468 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT,
469 hmp->asof, localization,
477 hammer_rel_inode(ip, 0);
480 localization = dip->obj_localization;
483 ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
484 ++hammer_count_inodes;
488 * Allocate a new object id. If creating a new pseudo-fs the
492 ip->obj_id = HAMMER_OBJID_ROOT;
494 ip->obj_id = hammer_alloc_objid(trans, dip);
495 ip->obj_localization = localization;
497 KKASSERT(ip->obj_id != 0);
498 ip->obj_asof = hmp->asof;
500 ip->flush_state = HAMMER_FST_IDLE;
501 ip->flags = HAMMER_INODE_DDIRTY |
502 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME;
503 ip->cache[0].ip = ip;
504 ip->cache[1].ip = ip;
506 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
507 RB_INIT(&ip->rec_tree);
508 TAILQ_INIT(&ip->target_list);
510 ip->ino_data.atime = trans->time;
511 ip->ino_data.mtime = trans->time;
512 ip->ino_data.size = 0;
513 ip->ino_data.nlinks = 0;
516 * A nohistory designator on the parent directory is inherited by
517 * the child. We will do this even for pseudo-fs creation... the
518 * sysad can turn it off.
520 ip->ino_data.uflags = dip->ino_data.uflags &
521 (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
523 ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
524 ip->ino_leaf.base.localization = ip->obj_localization +
525 HAMMER_LOCALIZE_INODE;
526 ip->ino_leaf.base.obj_id = ip->obj_id;
527 ip->ino_leaf.base.key = 0;
528 ip->ino_leaf.base.create_tid = 0;
529 ip->ino_leaf.base.delete_tid = 0;
530 ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
531 ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
533 ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
534 ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
535 ip->ino_data.mode = vap->va_mode;
536 ip->ino_data.ctime = trans->time;
539 * Setup the ".." pointer. This only needs to be done for directories
540 * but we do it for all objects as a recovery aid.
542 * The parent_obj_localization field only applies to pseudo-fs roots.
544 ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id;
545 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY &&
546 ip->obj_id == HAMMER_OBJID_ROOT) {
547 ip->ino_data.ext.obj.parent_obj_localization =
548 dip->obj_localization;
551 switch(ip->ino_leaf.base.obj_type) {
552 case HAMMER_OBJTYPE_CDEV:
553 case HAMMER_OBJTYPE_BDEV:
554 ip->ino_data.rmajor = vap->va_rmajor;
555 ip->ino_data.rminor = vap->va_rminor;
562 * Calculate default uid/gid and overwrite with information from
565 xuid = hammer_to_unix_xid(&dip->ino_data.uid);
566 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode, xuid, cred,
568 ip->ino_data.mode = vap->va_mode;
570 if (vap->va_vaflags & VA_UID_UUID_VALID)
571 ip->ino_data.uid = vap->va_uid_uuid;
572 else if (vap->va_uid != (uid_t)VNOVAL)
573 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
575 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
577 if (vap->va_vaflags & VA_GID_UUID_VALID)
578 ip->ino_data.gid = vap->va_gid_uuid;
579 else if (vap->va_gid != (gid_t)VNOVAL)
580 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
582 ip->ino_data.gid = dip->ino_data.gid;
584 hammer_ref(&ip->lock);
585 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
586 hammer_unref(&ip->lock);
587 panic("hammer_create_inode: duplicate obj_id %llx", ip->obj_id);
594 * Called by hammer_sync_inode().
597 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
599 hammer_transaction_t trans = cursor->trans;
600 hammer_record_t record;
607 * If the inode has a presence on-disk then locate it and mark
608 * it deleted, setting DELONDISK.
610 * The record may or may not be physically deleted, depending on
611 * the retention policy.
613 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
614 HAMMER_INODE_ONDISK) {
615 hammer_normalize_cursor(cursor);
616 cursor->key_beg.localization = ip->obj_localization +
617 HAMMER_LOCALIZE_INODE;
618 cursor->key_beg.obj_id = ip->obj_id;
619 cursor->key_beg.key = 0;
620 cursor->key_beg.create_tid = 0;
621 cursor->key_beg.delete_tid = 0;
622 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
623 cursor->key_beg.obj_type = 0;
624 cursor->asof = ip->obj_asof;
625 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
626 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
627 cursor->flags |= HAMMER_CURSOR_BACKEND;
629 error = hammer_btree_lookup(cursor);
630 if (hammer_debug_inode)
631 kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
633 kprintf("error %d\n", error);
634 Debugger("hammer_update_inode");
638 error = hammer_ip_delete_record(cursor, ip, trans->tid);
639 if (hammer_debug_inode)
640 kprintf(" error %d\n", error);
641 if (error && error != EDEADLK) {
642 kprintf("error %d\n", error);
643 Debugger("hammer_update_inode2");
646 ip->flags |= HAMMER_INODE_DELONDISK;
649 hammer_cache_node(&ip->cache[0], cursor->node);
651 if (error == EDEADLK) {
652 hammer_done_cursor(cursor);
653 error = hammer_init_cursor(trans, cursor,
655 if (hammer_debug_inode)
656 kprintf("IPDED %p %d\n", ip, error);
663 * Ok, write out the initial record or a new record (after deleting
664 * the old one), unless the DELETED flag is set. This routine will
665 * clear DELONDISK if it writes out a record.
667 * Update our inode statistics if this is the first application of
670 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
672 * Generate a record and write it to the media
674 record = hammer_alloc_mem_record(ip, 0);
675 record->type = HAMMER_MEM_RECORD_INODE;
676 record->flush_state = HAMMER_FST_FLUSH;
677 record->leaf = ip->sync_ino_leaf;
678 record->leaf.base.create_tid = trans->tid;
679 record->leaf.data_len = sizeof(ip->sync_ino_data);
680 record->data = (void *)&ip->sync_ino_data;
681 record->flags |= HAMMER_RECF_INTERLOCK_BE;
683 error = hammer_ip_sync_record_cursor(cursor, record);
684 if (hammer_debug_inode)
685 kprintf("GENREC %p rec %08x %d\n",
686 ip, record->flags, error);
687 if (error != EDEADLK)
689 hammer_done_cursor(cursor);
690 error = hammer_init_cursor(trans, cursor,
692 if (hammer_debug_inode)
693 kprintf("GENREC reinit %d\n", error);
698 kprintf("error %d\n", error);
699 Debugger("hammer_update_inode3");
703 * The record isn't managed by the inode's record tree,
704 * destroy it whether we succeed or fail.
706 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
707 record->flags |= HAMMER_RECF_DELETED_FE;
708 record->flush_state = HAMMER_FST_IDLE;
709 hammer_rel_mem_record(record);
715 if (hammer_debug_inode)
716 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
717 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
720 ip->flags &= ~HAMMER_INODE_DELONDISK;
723 * Root volume count of inodes
725 if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
726 hammer_modify_volume_field(trans,
729 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
730 hammer_modify_volume_done(trans->rootvol);
731 ip->flags |= HAMMER_INODE_ONDISK;
732 if (hammer_debug_inode)
733 kprintf("NOWONDISK %p\n", ip);
739 * If the inode has been destroyed, clean out any left-over flags
740 * that may have been set by the frontend.
742 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) {
743 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
751 * Update only the itimes fields.
753 * ATIME can be updated without generating any UNDO. MTIME is updated
754 * with UNDO so it is guaranteed to be synchronized properly in case of
757 * Neither field is included in the B-Tree leaf element's CRC, which is how
758 * we can get away with updating ATIME the way we do.
761 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
763 hammer_transaction_t trans = cursor->trans;
767 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) !=
768 HAMMER_INODE_ONDISK) {
772 hammer_normalize_cursor(cursor);
773 cursor->key_beg.localization = ip->obj_localization +
774 HAMMER_LOCALIZE_INODE;
775 cursor->key_beg.obj_id = ip->obj_id;
776 cursor->key_beg.key = 0;
777 cursor->key_beg.create_tid = 0;
778 cursor->key_beg.delete_tid = 0;
779 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
780 cursor->key_beg.obj_type = 0;
781 cursor->asof = ip->obj_asof;
782 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
783 cursor->flags |= HAMMER_CURSOR_ASOF;
784 cursor->flags |= HAMMER_CURSOR_GET_LEAF;
785 cursor->flags |= HAMMER_CURSOR_GET_DATA;
786 cursor->flags |= HAMMER_CURSOR_BACKEND;
788 error = hammer_btree_lookup(cursor);
790 kprintf("error %d\n", error);
791 Debugger("hammer_update_itimes1");
794 hammer_cache_node(&ip->cache[0], cursor->node);
795 if (ip->sync_flags & HAMMER_INODE_MTIME) {
797 * Updating MTIME requires an UNDO. Just cover
798 * both atime and mtime.
800 hammer_modify_buffer(trans, cursor->data_buffer,
801 HAMMER_ITIMES_BASE(&cursor->data->inode),
802 HAMMER_ITIMES_BYTES);
803 cursor->data->inode.atime = ip->sync_ino_data.atime;
804 cursor->data->inode.mtime = ip->sync_ino_data.mtime;
805 hammer_modify_buffer_done(cursor->data_buffer);
806 } else if (ip->sync_flags & HAMMER_INODE_ATIME) {
808 * Updating atime only can be done in-place with
811 hammer_modify_buffer(trans, cursor->data_buffer,
813 cursor->data->inode.atime = ip->sync_ino_data.atime;
814 hammer_modify_buffer_done(cursor->data_buffer);
816 ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME);
818 if (error == EDEADLK) {
819 hammer_done_cursor(cursor);
820 error = hammer_init_cursor(trans, cursor,
829 * Release a reference on an inode, flush as requested.
831 * On the last reference we queue the inode to the flusher for its final
835 hammer_rel_inode(struct hammer_inode *ip, int flush)
837 hammer_mount_t hmp = ip->hmp;
840 * Handle disposition when dropping the last ref.
843 if (ip->lock.refs == 1) {
845 * Determine whether on-disk action is needed for
846 * the inode's final disposition.
848 KKASSERT(ip->vp == NULL);
849 hammer_inode_unloadable_check(ip, 0);
850 if (ip->flags & HAMMER_INODE_MODMASK) {
851 if (hmp->rsv_inodes > desiredvnodes) {
852 hammer_flush_inode(ip,
853 HAMMER_FLUSH_SIGNAL);
855 hammer_flush_inode(ip, 0);
857 } else if (ip->lock.refs == 1) {
858 hammer_unload_inode(ip);
863 hammer_flush_inode(ip, 0);
866 * The inode still has multiple refs, try to drop
869 KKASSERT(ip->lock.refs >= 1);
870 if (ip->lock.refs > 1) {
871 hammer_unref(&ip->lock);
879 * Unload and destroy the specified inode. Must be called with one remaining
880 * reference. The reference is disposed of.
882 * This can only be called in the context of the flusher.
885 hammer_unload_inode(struct hammer_inode *ip)
887 hammer_mount_t hmp = ip->hmp;
889 KASSERT(ip->lock.refs == 1,
890 ("hammer_unload_inode: %d refs\n", ip->lock.refs));
891 KKASSERT(ip->vp == NULL);
892 KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
893 KKASSERT(ip->cursor_ip_refs == 0);
894 KKASSERT(ip->lock.lockcount == 0);
895 KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
897 KKASSERT(RB_EMPTY(&ip->rec_tree));
898 KKASSERT(TAILQ_EMPTY(&ip->target_list));
900 RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip);
902 hammer_uncache_node(&ip->cache[0]);
903 hammer_uncache_node(&ip->cache[1]);
905 hammer_clear_objid(ip);
906 --hammer_count_inodes;
909 hammer_inode_wakereclaims(ip);
916 * Called on mount -u when switching from RW to RO or vise-versa. Adjust
917 * the read-only flag for cached inodes.
919 * This routine is called from a RB_SCAN().
922 hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
924 hammer_mount_t hmp = ip->hmp;
926 if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
927 ip->flags |= HAMMER_INODE_RO;
929 ip->flags &= ~HAMMER_INODE_RO;
934 * A transaction has modified an inode, requiring updates as specified by
937 * HAMMER_INODE_DDIRTY: Inode data has been updated
938 * HAMMER_INODE_XDIRTY: Dirty in-memory records
939 * HAMMER_INODE_BUFS: Dirty buffer cache buffers
940 * HAMMER_INODE_DELETED: Inode record/data must be deleted
941 * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
944 hammer_modify_inode(hammer_inode_t ip, int flags)
946 KKASSERT ((ip->flags & HAMMER_INODE_RO) == 0 ||
947 (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
948 HAMMER_INODE_BUFS | HAMMER_INODE_DELETED |
949 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0);
950 if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
951 ip->flags |= HAMMER_INODE_RSV_INODES;
952 ++ip->hmp->rsv_inodes;
959 * Request that an inode be flushed. This whole mess cannot block and may
960 * recurse (if not synchronous). Once requested HAMMER will attempt to
961 * actively flush the inode until the flush can be done.
963 * The inode may already be flushing, or may be in a setup state. We can
964 * place the inode in a flushing state if it is currently idle and flag it
965 * to reflush if it is currently flushing.
967 * If the HAMMER_FLUSH_SYNCHRONOUS flag is specified we will attempt to
968 * flush the indoe synchronously using the caller's context.
971 hammer_flush_inode(hammer_inode_t ip, int flags)
976 * Trivial 'nothing to flush' case. If the inode is ina SETUP
977 * state we have to put it back into an IDLE state so we can
978 * drop the extra ref.
980 if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
981 if (ip->flush_state == HAMMER_FST_SETUP) {
982 ip->flush_state = HAMMER_FST_IDLE;
983 hammer_rel_inode(ip, 0);
989 * Our flush action will depend on the current state.
991 switch(ip->flush_state) {
992 case HAMMER_FST_IDLE:
994 * We have no dependancies and can flush immediately. Some
995 * our children may not be flushable so we have to re-test
996 * with that additional knowledge.
998 hammer_flush_inode_core(ip, flags);
1000 case HAMMER_FST_SETUP:
1002 * Recurse upwards through dependancies via target_list
1003 * and start their flusher actions going if possible.
1005 * 'good' is our connectivity. -1 means we have none and
1006 * can't flush, 0 means there weren't any dependancies, and
1007 * 1 means we have good connectivity.
1009 good = hammer_setup_parent_inodes(ip);
1012 * We can continue if good >= 0. Determine how many records
1013 * under our inode can be flushed (and mark them).
1016 hammer_flush_inode_core(ip, flags);
1018 ip->flags |= HAMMER_INODE_REFLUSH;
1019 if (flags & HAMMER_FLUSH_SIGNAL) {
1020 ip->flags |= HAMMER_INODE_RESIGNAL;
1021 hammer_flusher_async(ip->hmp);
1027 * We are already flushing, flag the inode to reflush
1028 * if needed after it completes its current flush.
1030 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
1031 ip->flags |= HAMMER_INODE_REFLUSH;
1032 if (flags & HAMMER_FLUSH_SIGNAL) {
1033 ip->flags |= HAMMER_INODE_RESIGNAL;
1034 hammer_flusher_async(ip->hmp);
1041 * Scan ip->target_list, which is a list of records owned by PARENTS to our
1042 * ip which reference our ip.
1044 * XXX This is a huge mess of recursive code, but not one bit of it blocks
1045 * so for now do not ref/deref the structures. Note that if we use the
1046 * ref/rel code later, the rel CAN block.
1049 hammer_setup_parent_inodes(hammer_inode_t ip)
1051 hammer_record_t depend;
1053 hammer_record_t next;
1060 TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
1061 r = hammer_setup_parent_inodes_helper(depend);
1062 KKASSERT(depend->target_ip == ip);
1063 if (r < 0 && good == 0)
1073 next = TAILQ_FIRST(&ip->target_list);
1075 hammer_ref(&next->lock);
1076 hammer_ref(&next->ip->lock);
1078 while ((depend = next) != NULL) {
1079 if (depend->target_ip == NULL) {
1081 hammer_rel_mem_record(depend);
1082 hammer_rel_inode(pip, 0);
1085 KKASSERT(depend->target_ip == ip);
1086 next = TAILQ_NEXT(depend, target_entry);
1088 hammer_ref(&next->lock);
1089 hammer_ref(&next->ip->lock);
1091 r = hammer_setup_parent_inodes_helper(depend);
1092 if (r < 0 && good == 0)
1097 hammer_rel_mem_record(depend);
1098 hammer_rel_inode(pip, 0);
1105 * This helper function takes a record representing the dependancy between
1106 * the parent inode and child inode.
1108 * record->ip = parent inode
1109 * record->target_ip = child inode
1111 * We are asked to recurse upwards and convert the record from SETUP
1112 * to FLUSH if possible.
1114 * Return 1 if the record gives us connectivity
1116 * Return 0 if the record is not relevant
1118 * Return -1 if we can't resolve the dependancy and there is no connectivity.
1121 hammer_setup_parent_inodes_helper(hammer_record_t record)
1127 KKASSERT(record->flush_state != HAMMER_FST_IDLE);
1132 * If the record is already flushing, is it in our flush group?
1134 * If it is in our flush group but it is a general record or a
1135 * delete-on-disk, it does not improve our connectivity (return 0),
1136 * and if the target inode is not trying to destroy itself we can't
1137 * allow the operation yet anyway (the second return -1).
1139 if (record->flush_state == HAMMER_FST_FLUSH) {
1140 if (record->flush_group != hmp->flusher.next) {
1141 pip->flags |= HAMMER_INODE_REFLUSH;
1144 if (record->type == HAMMER_MEM_RECORD_ADD)
1146 /* GENERAL or DEL */
1151 * It must be a setup record. Try to resolve the setup dependancies
1152 * by recursing upwards so we can place ip on the flush list.
1154 KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1156 good = hammer_setup_parent_inodes(pip);
1159 * We can't flush ip because it has no connectivity (XXX also check
1160 * nlinks for pre-existing connectivity!). Flag it so any resolution
1161 * recurses back down.
1164 pip->flags |= HAMMER_INODE_REFLUSH;
1169 * We are go, place the parent inode in a flushing state so we can
1170 * place its record in a flushing state. Note that the parent
1171 * may already be flushing. The record must be in the same flush
1172 * group as the parent.
1174 if (pip->flush_state != HAMMER_FST_FLUSH)
1175 hammer_flush_inode_core(pip, HAMMER_FLUSH_RECURSION);
1176 KKASSERT(pip->flush_state == HAMMER_FST_FLUSH);
1177 KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1180 if (record->type == HAMMER_MEM_RECORD_DEL &&
1181 (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
1183 * Regardless of flushing state we cannot sync this path if the
1184 * record represents a delete-on-disk but the target inode
1185 * is not ready to sync its own deletion.
1187 * XXX need to count effective nlinks to determine whether
1188 * the flush is ok, otherwise removing a hardlink will
1189 * just leave the DEL record to rot.
1191 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
1195 if (pip->flush_group == pip->hmp->flusher.next) {
1197 * This is the record we wanted to synchronize. If the
1198 * record went into a flush state while we blocked it
1199 * had better be in the correct flush group.
1201 if (record->flush_state != HAMMER_FST_FLUSH) {
1202 record->flush_state = HAMMER_FST_FLUSH;
1203 record->flush_group = pip->flush_group;
1204 hammer_ref(&record->lock);
1206 KKASSERT(record->flush_group == pip->flush_group);
1208 if (record->type == HAMMER_MEM_RECORD_ADD)
1212 * A general or delete-on-disk record does not contribute
1213 * to our visibility. We can still flush it, however.
1218 * We couldn't resolve the dependancies, request that the
1219 * inode be flushed when the dependancies can be resolved.
1221 pip->flags |= HAMMER_INODE_REFLUSH;
1227 * This is the core routine placing an inode into the FST_FLUSH state.
1230 hammer_flush_inode_core(hammer_inode_t ip, int flags)
1235 * Set flush state and prevent the flusher from cycling into
1236 * the next flush group. Do not place the ip on the list yet.
1237 * Inodes not in the idle state get an extra reference.
1239 KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
1240 if (ip->flush_state == HAMMER_FST_IDLE)
1241 hammer_ref(&ip->lock);
1242 ip->flush_state = HAMMER_FST_FLUSH;
1243 ip->flush_group = ip->hmp->flusher.next;
1244 ++ip->hmp->flusher.group_lock;
1245 ++ip->hmp->count_iqueued;
1246 ++hammer_count_iqueued;
1249 * We need to be able to vfsync/truncate from the backend.
1251 KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
1252 if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
1253 ip->flags |= HAMMER_INODE_VHELD;
1258 * Figure out how many in-memory records we can actually flush
1259 * (not including inode meta-data, buffers, etc).
1261 if (flags & HAMMER_FLUSH_RECURSION) {
1264 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1265 hammer_setup_child_callback, NULL);
1269 * This is a more involved test that includes go_count. If we
1270 * can't flush, flag the inode and return. If go_count is 0 we
1271 * were are unable to flush any records in our rec_tree and
1272 * must ignore the XDIRTY flag.
1274 if (go_count == 0) {
1275 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
1276 ip->flags |= HAMMER_INODE_REFLUSH;
1278 --ip->hmp->count_iqueued;
1279 --hammer_count_iqueued;
1281 ip->flush_state = HAMMER_FST_SETUP;
1282 if (ip->flags & HAMMER_INODE_VHELD) {
1283 ip->flags &= ~HAMMER_INODE_VHELD;
1286 if (flags & HAMMER_FLUSH_SIGNAL) {
1287 ip->flags |= HAMMER_INODE_RESIGNAL;
1288 hammer_flusher_async(ip->hmp);
1290 if (--ip->hmp->flusher.group_lock == 0)
1291 wakeup(&ip->hmp->flusher.group_lock);
1297 * Snapshot the state of the inode for the backend flusher.
1299 * The truncation must be retained in the frontend until after
1300 * we've actually performed the record deletion.
1302 * We continue to retain sync_trunc_off even when all truncations
1303 * have been resolved as an optimization to determine if we can
1304 * skip the B-Tree lookup for overwrite deletions.
1306 * NOTE: The DELETING flag is a mod flag, but it is also sticky,
1307 * and stays in ip->flags. Once set, it stays set until the
1308 * inode is destroyed.
1310 ip->sync_flags = (ip->flags & HAMMER_INODE_MODMASK);
1311 if (ip->sync_flags & HAMMER_INODE_TRUNCATED)
1312 ip->sync_trunc_off = ip->trunc_off;
1313 ip->sync_ino_leaf = ip->ino_leaf;
1314 ip->sync_ino_data = ip->ino_data;
1315 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
1316 ip->flags &= ~HAMMER_INODE_MODMASK;
1317 #ifdef DEBUG_TRUNCATE
1318 if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp)
1319 kprintf("truncateS %016llx\n", ip->sync_trunc_off);
1323 * The flusher list inherits our inode and reference.
1325 TAILQ_INSERT_TAIL(&ip->hmp->flush_list, ip, flush_entry);
1326 if (--ip->hmp->flusher.group_lock == 0)
1327 wakeup(&ip->hmp->flusher.group_lock);
1329 if (flags & HAMMER_FLUSH_SIGNAL) {
1330 hammer_flusher_async(ip->hmp);
1335 * Callback for scan of ip->rec_tree. Try to include each record in our
1336 * flush. ip->flush_group has been set but the inode has not yet been
1337 * moved into a flushing state.
1339 * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
1342 * We return 1 for any record placed or found in FST_FLUSH, which prevents
1343 * the caller from shortcutting the flush.
1346 hammer_setup_child_callback(hammer_record_t rec, void *data)
1348 hammer_inode_t target_ip;
1353 * Deleted records are ignored. Note that the flush detects deleted
1354 * front-end records at multiple points to deal with races. This is
1355 * just the first line of defense. The only time DELETED_FE cannot
1356 * be set is when HAMMER_RECF_INTERLOCK_BE is set.
1358 * Don't get confused between record deletion and, say, directory
1359 * entry deletion. The deletion of a directory entry that is on
1360 * the media has nothing to do with the record deletion flags.
1362 if (rec->flags & (HAMMER_RECF_DELETED_FE|HAMMER_RECF_DELETED_BE))
1366 * If the record is in an idle state it has no dependancies and
1372 switch(rec->flush_state) {
1373 case HAMMER_FST_IDLE:
1375 * Record has no setup dependancy, we can flush it.
1377 KKASSERT(rec->target_ip == NULL);
1378 rec->flush_state = HAMMER_FST_FLUSH;
1379 rec->flush_group = ip->flush_group;
1380 hammer_ref(&rec->lock);
1383 case HAMMER_FST_SETUP:
1385 * Record has a setup dependancy. Try to include the
1386 * target ip in the flush.
1388 * We have to be careful here, if we do not do the right
1389 * thing we can lose track of dirty inodes and the system
1390 * will lockup trying to allocate buffers.
1392 target_ip = rec->target_ip;
1393 KKASSERT(target_ip != NULL);
1394 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
1395 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
1397 * If the target IP is already flushing in our group
1398 * we are golden, otherwise make sure the target
1401 if (target_ip->flush_group == ip->flush_group) {
1402 rec->flush_state = HAMMER_FST_FLUSH;
1403 rec->flush_group = ip->flush_group;
1404 hammer_ref(&rec->lock);
1407 target_ip->flags |= HAMMER_INODE_REFLUSH;
1409 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
1411 * If the target IP is not flushing we can force
1412 * it to flush, even if it is unable to write out
1413 * any of its own records we have at least one in
1414 * hand that we CAN deal with.
1416 rec->flush_state = HAMMER_FST_FLUSH;
1417 rec->flush_group = ip->flush_group;
1418 hammer_ref(&rec->lock);
1419 hammer_flush_inode_core(target_ip,
1420 HAMMER_FLUSH_RECURSION);
1424 * General or delete-on-disk record.
1426 * XXX this needs help. If a delete-on-disk we could
1427 * disconnect the target. If the target has its own
1428 * dependancies they really need to be flushed.
1432 rec->flush_state = HAMMER_FST_FLUSH;
1433 rec->flush_group = ip->flush_group;
1434 hammer_ref(&rec->lock);
1435 hammer_flush_inode_core(target_ip,
1436 HAMMER_FLUSH_RECURSION);
1440 case HAMMER_FST_FLUSH:
1442 * Record already associated with a flush group. It had
1445 KKASSERT(rec->flush_group == ip->flush_group);
1453 * Wait for a previously queued flush to complete. Not only do we need to
1454 * wait for the inode to sync out, we also may have to run the flusher again
1455 * to get it past the UNDO position pertaining to the flush so a crash does
1456 * not 'undo' our flush.
1459 hammer_wait_inode(hammer_inode_t ip)
1461 hammer_mount_t hmp = ip->hmp;
1465 sync_group = ip->flush_group;
1466 waitcount = (ip->flags & HAMMER_INODE_REFLUSH) ? 2 : 1;
1468 if (ip->flush_state == HAMMER_FST_SETUP) {
1470 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1472 /* XXX can we make this != FST_IDLE ? check SETUP depends */
1473 while (ip->flush_state == HAMMER_FST_FLUSH &&
1474 (ip->flush_group - sync_group) < waitcount) {
1475 ip->flags |= HAMMER_INODE_FLUSHW;
1476 tsleep(&ip->flags, 0, "hmrwin", 0);
1478 while (hmp->flusher.done - sync_group < waitcount) {
1480 hammer_flusher_sync(hmp);
1485 * Called by the backend code when a flush has been completed.
1486 * The inode has already been removed from the flush list.
1488 * A pipelined flush can occur, in which case we must re-enter the
1489 * inode on the list and re-copy its fields.
1492 hammer_flush_inode_done(hammer_inode_t ip)
1497 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
1502 * Merge left-over flags back into the frontend and fix the state.
1504 ip->flags |= ip->sync_flags;
1507 * The backend may have adjusted nlinks, so if the adjusted nlinks
1508 * does not match the fronttend set the frontend's RDIRTY flag again.
1510 if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
1511 ip->flags |= HAMMER_INODE_DDIRTY;
1514 * Fix up the dirty buffer status. IO completions will also
1515 * try to clean up rsv_databufs.
1517 if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) {
1518 ip->flags |= HAMMER_INODE_BUFS;
1520 hmp->rsv_databufs -= ip->rsv_databufs;
1521 ip->rsv_databufs = 0;
1525 * Re-set the XDIRTY flag if some of the inode's in-memory records
1526 * could not be flushed.
1528 KKASSERT((RB_EMPTY(&ip->rec_tree) &&
1529 (ip->flags & HAMMER_INODE_XDIRTY) == 0) ||
1530 (!RB_EMPTY(&ip->rec_tree) &&
1531 (ip->flags & HAMMER_INODE_XDIRTY) != 0));
1534 * Do not lose track of inodes which no longer have vnode
1535 * assocations, otherwise they may never get flushed again.
1537 if ((ip->flags & HAMMER_INODE_MODMASK) && ip->vp == NULL)
1538 ip->flags |= HAMMER_INODE_REFLUSH;
1541 * Adjust flush_state. The target state (idle or setup) shouldn't
1542 * be terribly important since we will reflush if we really need
1543 * to do anything. XXX
1545 if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
1546 ip->flush_state = HAMMER_FST_IDLE;
1549 ip->flush_state = HAMMER_FST_SETUP;
1553 --hmp->count_iqueued;
1554 --hammer_count_iqueued;
1557 * Clean up the vnode ref
1559 if (ip->flags & HAMMER_INODE_VHELD) {
1560 ip->flags &= ~HAMMER_INODE_VHELD;
1565 * If the frontend made more changes and requested another flush,
1566 * then try to get it running.
1568 if (ip->flags & HAMMER_INODE_REFLUSH) {
1569 ip->flags &= ~HAMMER_INODE_REFLUSH;
1570 if (ip->flags & HAMMER_INODE_RESIGNAL) {
1571 ip->flags &= ~HAMMER_INODE_RESIGNAL;
1572 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1574 hammer_flush_inode(ip, 0);
1579 * If the inode is now clean drop the space reservation.
1581 if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1582 (ip->flags & HAMMER_INODE_RSV_INODES)) {
1583 ip->flags &= ~HAMMER_INODE_RSV_INODES;
1588 * Finally, if the frontend is waiting for a flush to complete,
1591 if (ip->flush_state != HAMMER_FST_FLUSH) {
1592 if (ip->flags & HAMMER_INODE_FLUSHW) {
1593 ip->flags &= ~HAMMER_INODE_FLUSHW;
1598 hammer_rel_inode(ip, 0);
1602 * Called from hammer_sync_inode() to synchronize in-memory records
1606 hammer_sync_record_callback(hammer_record_t record, void *data)
1608 hammer_cursor_t cursor = data;
1609 hammer_transaction_t trans = cursor->trans;
1613 * Skip records that do not belong to the current flush.
1615 ++hammer_stats_record_iterations;
1616 if (record->flush_state != HAMMER_FST_FLUSH)
1620 if (record->flush_group != record->ip->flush_group) {
1621 kprintf("sync_record %p ip %p bad flush group %d %d\n", record, record->ip, record->flush_group ,record->ip->flush_group);
1626 KKASSERT(record->flush_group == record->ip->flush_group);
1629 * Interlock the record using the BE flag. Once BE is set the
1630 * frontend cannot change the state of FE.
1632 * NOTE: If FE is set prior to us setting BE we still sync the
1633 * record out, but the flush completion code converts it to
1634 * a delete-on-disk record instead of destroying it.
1636 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
1637 record->flags |= HAMMER_RECF_INTERLOCK_BE;
1640 * The backend may have already disposed of the record.
1642 if (record->flags & HAMMER_RECF_DELETED_BE) {
1648 * If the whole inode is being deleting all on-disk records will
1649 * be deleted very soon, we can't sync any new records to disk
1650 * because they will be deleted in the same transaction they were
1651 * created in (delete_tid == create_tid), which will assert.
1653 * XXX There may be a case with RECORD_ADD with DELETED_FE set
1654 * that we currently panic on.
1656 if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
1657 switch(record->type) {
1658 case HAMMER_MEM_RECORD_DATA:
1660 * We don't have to do anything, if the record was
1661 * committed the space will have been accounted for
1665 case HAMMER_MEM_RECORD_GENERAL:
1666 record->flags |= HAMMER_RECF_DELETED_FE;
1667 record->flags |= HAMMER_RECF_DELETED_BE;
1670 case HAMMER_MEM_RECORD_ADD:
1671 panic("hammer_sync_record_callback: illegal add "
1672 "during inode deletion record %p", record);
1673 break; /* NOT REACHED */
1674 case HAMMER_MEM_RECORD_INODE:
1675 panic("hammer_sync_record_callback: attempt to "
1676 "sync inode record %p?", record);
1677 break; /* NOT REACHED */
1678 case HAMMER_MEM_RECORD_DEL:
1680 * Follow through and issue the on-disk deletion
1687 * If DELETED_FE is set special handling is needed for directory
1688 * entries. Dependant pieces related to the directory entry may
1689 * have already been synced to disk. If this occurs we have to
1690 * sync the directory entry and then change the in-memory record
1691 * from an ADD to a DELETE to cover the fact that it's been
1692 * deleted by the frontend.
1694 * A directory delete covering record (MEM_RECORD_DEL) can never
1695 * be deleted by the frontend.
1697 * Any other record type (aka DATA) can be deleted by the frontend.
1698 * XXX At the moment the flusher must skip it because there may
1699 * be another data record in the flush group for the same block,
1700 * meaning that some frontend data changes can leak into the backend's
1701 * synchronization point.
1703 if (record->flags & HAMMER_RECF_DELETED_FE) {
1704 if (record->type == HAMMER_MEM_RECORD_ADD) {
1705 record->flags |= HAMMER_RECF_CONVERT_DELETE;
1707 KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
1708 record->flags |= HAMMER_RECF_DELETED_BE;
1715 * Assign the create_tid for new records. Deletions already
1716 * have the record's entire key properly set up.
1718 if (record->type != HAMMER_MEM_RECORD_DEL)
1719 record->leaf.base.create_tid = trans->tid;
1721 error = hammer_ip_sync_record_cursor(cursor, record);
1722 if (error != EDEADLK)
1724 hammer_done_cursor(cursor);
1725 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
1730 record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
1734 if (error != -ENOSPC) {
1735 kprintf("hammer_sync_record_callback: sync failed rec "
1736 "%p, error %d\n", record, error);
1737 Debugger("sync failed rec");
1741 hammer_flush_record_done(record, error);
1746 * XXX error handling
1749 hammer_sync_inode(hammer_inode_t ip)
1751 struct hammer_transaction trans;
1752 struct hammer_cursor cursor;
1753 hammer_node_t tmp_node;
1754 hammer_record_t depend;
1755 hammer_record_t next;
1756 int error, tmp_error;
1759 if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
1762 hammer_start_transaction_fls(&trans, ip->hmp);
1763 error = hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1768 * Any directory records referencing this inode which are not in
1769 * our current flush group must adjust our nlink count for the
1770 * purposes of synchronization to disk.
1772 * Records which are in our flush group can be unlinked from our
1773 * inode now, potentially allowing the inode to be physically
1776 * This cannot block.
1778 nlinks = ip->ino_data.nlinks;
1779 next = TAILQ_FIRST(&ip->target_list);
1780 while ((depend = next) != NULL) {
1781 next = TAILQ_NEXT(depend, target_entry);
1782 if (depend->flush_state == HAMMER_FST_FLUSH &&
1783 depend->flush_group == ip->hmp->flusher.act) {
1785 * If this is an ADD that was deleted by the frontend
1786 * the frontend nlinks count will have already been
1787 * decremented, but the backend is going to sync its
1788 * directory entry and must account for it. The
1789 * record will be converted to a delete-on-disk when
1792 * If the ADD was not deleted by the frontend we
1793 * can remove the dependancy from our target_list.
1795 if (depend->flags & HAMMER_RECF_DELETED_FE) {
1798 TAILQ_REMOVE(&ip->target_list, depend,
1800 depend->target_ip = NULL;
1802 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
1804 * Not part of our flush group
1806 KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
1807 switch(depend->type) {
1808 case HAMMER_MEM_RECORD_ADD:
1811 case HAMMER_MEM_RECORD_DEL:
1821 * Set dirty if we had to modify the link count.
1823 if (ip->sync_ino_data.nlinks != nlinks) {
1824 KKASSERT((int64_t)nlinks >= 0);
1825 ip->sync_ino_data.nlinks = nlinks;
1826 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1830 * If there is a trunction queued destroy any data past the (aligned)
1831 * truncation point. Userland will have dealt with the buffer
1832 * containing the truncation point for us.
1834 * We don't flush pending frontend data buffers until after we've
1835 * dealt with the truncation.
1837 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
1839 * Interlock trunc_off. The VOP front-end may continue to
1840 * make adjustments to it while we are blocked.
1843 off_t aligned_trunc_off;
1846 trunc_off = ip->sync_trunc_off;
1847 blkmask = hammer_blocksize(trunc_off) - 1;
1848 aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask;
1851 * Delete any whole blocks on-media. The front-end has
1852 * already cleaned out any partial block and made it
1853 * pending. The front-end may have updated trunc_off
1854 * while we were blocked so we only use sync_trunc_off.
1856 error = hammer_ip_delete_range(&cursor, ip,
1858 0x7FFFFFFFFFFFFFFFLL, 1);
1860 Debugger("hammer_ip_delete_range errored");
1863 * Clear the truncation flag on the backend after we have
1864 * complete the deletions. Backend data is now good again
1865 * (including new records we are about to sync, below).
1867 * Leave sync_trunc_off intact. As we write additional
1868 * records the backend will update sync_trunc_off. This
1869 * tells the backend whether it can skip the overwrite
1870 * test. This should work properly even when the backend
1871 * writes full blocks where the truncation point straddles
1872 * the block because the comparison is against the base
1873 * offset of the record.
1875 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
1876 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
1882 * Now sync related records. These will typically be directory
1883 * entries or delete-on-disk records.
1885 * Not all records will be flushed, but clear XDIRTY anyway. We
1886 * will set it again in the frontend hammer_flush_inode_done()
1887 * if records remain.
1890 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1891 hammer_sync_record_callback, &cursor);
1897 hammer_cache_node(&ip->cache[1], cursor.node);
1900 * Re-seek for inode update, assuming our cache hasn't been ripped
1901 * out from under us.
1904 tmp_node = hammer_ref_node_safe(ip->hmp, &ip->cache[0], &error);
1906 if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0)
1907 hammer_cursor_seek(&cursor, tmp_node, 0);
1908 hammer_rel_node(tmp_node);
1914 * If we are deleting the inode the frontend had better not have
1915 * any active references on elements making up the inode.
1917 if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
1918 RB_EMPTY(&ip->rec_tree) &&
1919 (ip->sync_flags & HAMMER_INODE_DELETING) &&
1920 (ip->flags & HAMMER_INODE_DELETED) == 0) {
1923 ip->flags |= HAMMER_INODE_DELETED;
1924 error = hammer_ip_delete_range_all(&cursor, ip, &count1);
1926 ip->sync_flags &= ~HAMMER_INODE_DELETING;
1927 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
1928 KKASSERT(RB_EMPTY(&ip->rec_tree));
1931 * Set delete_tid in both the frontend and backend
1932 * copy of the inode record. The DELETED flag handles
1933 * this, do not set RDIRTY.
1935 ip->ino_leaf.base.delete_tid = trans.tid;
1936 ip->sync_ino_leaf.base.delete_tid = trans.tid;
1939 * Adjust the inode count in the volume header
1941 if (ip->flags & HAMMER_INODE_ONDISK) {
1942 hammer_modify_volume_field(&trans,
1945 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1946 hammer_modify_volume_done(trans.rootvol);
1949 ip->flags &= ~HAMMER_INODE_DELETED;
1950 Debugger("hammer_ip_delete_range_all errored");
1954 ip->sync_flags &= ~HAMMER_INODE_BUFS;
1957 Debugger("RB_SCAN errored");
1960 * Now update the inode's on-disk inode-data and/or on-disk record.
1961 * DELETED and ONDISK are managed only in ip->flags.
1963 switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
1964 case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
1966 * If deleted and on-disk, don't set any additional flags.
1967 * the delete flag takes care of things.
1969 * Clear flags which may have been set by the frontend.
1971 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
1972 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
1973 HAMMER_INODE_DELETING);
1975 case HAMMER_INODE_DELETED:
1977 * Take care of the case where a deleted inode was never
1978 * flushed to the disk in the first place.
1980 * Clear flags which may have been set by the frontend.
1982 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
1983 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
1984 HAMMER_INODE_DELETING);
1985 while (RB_ROOT(&ip->rec_tree)) {
1986 hammer_record_t record = RB_ROOT(&ip->rec_tree);
1987 hammer_ref(&record->lock);
1988 KKASSERT(record->lock.refs == 1);
1989 record->flags |= HAMMER_RECF_DELETED_FE;
1990 record->flags |= HAMMER_RECF_DELETED_BE;
1991 hammer_rel_mem_record(record);
1994 case HAMMER_INODE_ONDISK:
1996 * If already on-disk, do not set any additional flags.
2001 * If not on-disk and not deleted, set DDIRTY to force
2002 * an initial record to be written.
2004 * Also set the create_tid in both the frontend and backend
2005 * copy of the inode record.
2007 ip->ino_leaf.base.create_tid = trans.tid;
2008 ip->sync_ino_leaf.base.create_tid = trans.tid;
2009 ip->sync_flags |= HAMMER_INODE_DDIRTY;
2014 * If RDIRTY or DDIRTY is set, write out a new record. If the inode
2015 * is already on-disk the old record is marked as deleted.
2017 * If DELETED is set hammer_update_inode() will delete the existing
2018 * record without writing out a new one.
2020 * If *ONLY* the ITIMES flag is set we can update the record in-place.
2022 if (ip->flags & HAMMER_INODE_DELETED) {
2023 error = hammer_update_inode(&cursor, ip);
2025 if ((ip->sync_flags & HAMMER_INODE_DDIRTY) == 0 &&
2026 (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) {
2027 error = hammer_update_itimes(&cursor, ip);
2029 if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) {
2030 error = hammer_update_inode(&cursor, ip);
2033 Debugger("hammer_update_itimes/inode errored");
2036 * Save the TID we used to sync the inode with to make sure we
2037 * do not improperly reuse it.
2039 hammer_done_cursor(&cursor);
2040 hammer_done_transaction(&trans);
2045 * This routine is called when the OS is no longer actively referencing
2046 * the inode (but might still be keeping it cached), or when releasing
2047 * the last reference to an inode.
2049 * At this point if the inode's nlinks count is zero we want to destroy
2050 * it, which may mean destroying it on-media too.
2053 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
2058 * Set the DELETING flag when the link count drops to 0 and the
2059 * OS no longer has any opens on the inode.
2061 * The backend will clear DELETING (a mod flag) and set DELETED
2062 * (a state flag) when it is actually able to perform the
2065 if (ip->ino_data.nlinks == 0 &&
2066 (ip->flags & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
2067 ip->flags |= HAMMER_INODE_DELETING;
2068 ip->flags |= HAMMER_INODE_TRUNCATED;
2072 if (hammer_get_vnode(ip, &vp) != 0)
2080 vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
2081 vnode_pager_setsize(ip->vp, 0);
2090 * Re-test an inode when a dependancy had gone away to see if we
2091 * can chain flush it.
2094 hammer_test_inode(hammer_inode_t ip)
2096 if (ip->flags & HAMMER_INODE_REFLUSH) {
2097 ip->flags &= ~HAMMER_INODE_REFLUSH;
2098 hammer_ref(&ip->lock);
2099 if (ip->flags & HAMMER_INODE_RESIGNAL) {
2100 ip->flags &= ~HAMMER_INODE_RESIGNAL;
2101 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2103 hammer_flush_inode(ip, 0);
2105 hammer_rel_inode(ip, 0);
2110 * Clear the RECLAIM flag on an inode. This occurs when the inode is
2111 * reassociated with a vp or just before it gets freed.
2113 * Wakeup one thread blocked waiting on reclaims to complete. Note that
2114 * the inode the thread is waiting on behalf of is a different inode then
2115 * the inode we are called with. This is to create a pipeline.
2118 hammer_inode_wakereclaims(hammer_inode_t ip)
2120 struct hammer_reclaim *reclaim;
2121 hammer_mount_t hmp = ip->hmp;
2123 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0)
2126 --hammer_count_reclaiming;
2127 --hmp->inode_reclaims;
2128 ip->flags &= ~HAMMER_INODE_RECLAIM;
2130 if ((reclaim = TAILQ_FIRST(&hmp->reclaim_list)) != NULL) {
2131 TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry);
2132 reclaim->okydoky = 1;
2138 * Setup our reclaim pipeline. We only let so many detached (and dirty)
2139 * inodes build up before we start blocking.
2141 * When we block we don't care *which* inode has finished reclaiming,
2142 * as lone as one does. This is somewhat heuristical... we also put a
2143 * cap on how long we are willing to wait.
2146 hammer_inode_waitreclaims(hammer_mount_t hmp)
2148 struct hammer_reclaim reclaim;
2151 if (hmp->inode_reclaims > HAMMER_RECLAIM_WAIT) {
2152 reclaim.okydoky = 0;
2153 TAILQ_INSERT_TAIL(&hmp->reclaim_list,
2156 reclaim.okydoky = 1;
2159 if (reclaim.okydoky == 0) {
2160 delay = (hmp->inode_reclaims - HAMMER_RECLAIM_WAIT) * hz /
2161 HAMMER_RECLAIM_WAIT;
2163 tsleep(&reclaim, 0, "hmrrcm", delay + 1);
2164 if (reclaim.okydoky == 0)
2165 TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry);