2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.79 2008/06/20 21:24:53 dillon Exp $
38 #include <vm/vm_extern.h>
42 static int hammer_unload_inode(struct hammer_inode *ip);
43 static void hammer_flush_inode_core(hammer_inode_t ip, int flags);
44 static int hammer_setup_child_callback(hammer_record_t rec, void *data);
45 static int hammer_setup_parent_inodes(hammer_inode_t ip);
46 static int hammer_setup_parent_inodes_helper(hammer_record_t record);
47 static void hammer_inode_wakereclaims(hammer_inode_t ip);
50 extern struct hammer_inode *HammerTruncIp;
54 * The kernel is not actively referencing this vnode but is still holding
57 * This is called from the frontend.
60 hammer_vop_inactive(struct vop_inactive_args *ap)
62 struct hammer_inode *ip = VTOI(ap->a_vp);
73 * If the inode no longer has visibility in the filesystem try to
74 * recycle it immediately, even if the inode is dirty. Recycling
75 * it quickly allows the system to reclaim buffer cache and VM
76 * resources which can matter a lot in a heavily loaded system.
78 * This can deadlock in vfsync() if we aren't careful.
80 * Do not queue the inode to the flusher if we still have visibility,
81 * otherwise namespace calls such as chmod will unnecessarily generate
82 * multiple inode updates.
84 hammer_inode_unloadable_check(ip, 0);
85 if (ip->ino_data.nlinks == 0) {
86 if (ip->flags & HAMMER_INODE_MODMASK)
87 hammer_flush_inode(ip, 0);
94 * Release the vnode association. This is typically (but not always)
95 * the last reference on the inode.
97 * Once the association is lost we are on our own with regards to
101 hammer_vop_reclaim(struct vop_reclaim_args *ap)
103 struct hammer_inode *ip;
109 if ((ip = vp->v_data) != NULL) {
114 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) {
115 ++hammer_count_reclaiming;
116 ++hmp->inode_reclaims;
117 ip->flags |= HAMMER_INODE_RECLAIM;
118 if (hmp->inode_reclaims > HAMMER_RECLAIM_FLUSH &&
119 (hmp->inode_reclaims & 255) == 0) {
120 hammer_flusher_async(hmp);
123 hammer_rel_inode(ip, 1);
129 * Return a locked vnode for the specified inode. The inode must be
130 * referenced but NOT LOCKED on entry and will remain referenced on
133 * Called from the frontend.
136 hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
145 if ((vp = ip->vp) == NULL) {
146 error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0);
149 hammer_lock_ex(&ip->lock);
150 if (ip->vp != NULL) {
151 hammer_unlock(&ip->lock);
156 hammer_ref(&ip->lock);
160 hammer_get_vnode_type(ip->ino_data.obj_type);
162 hammer_inode_wakereclaims(ip);
164 switch(ip->ino_data.obj_type) {
165 case HAMMER_OBJTYPE_CDEV:
166 case HAMMER_OBJTYPE_BDEV:
167 vp->v_ops = &hmp->mp->mnt_vn_spec_ops;
168 addaliasu(vp, ip->ino_data.rmajor,
169 ip->ino_data.rminor);
171 case HAMMER_OBJTYPE_FIFO:
172 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops;
179 * Only mark as the root vnode if the ip is not
180 * historical, otherwise the VFS cache will get
181 * confused. The other half of the special handling
182 * is in hammer_vop_nlookupdotdot().
184 * Pseudo-filesystem roots also do not count.
186 if (ip->obj_id == HAMMER_OBJID_ROOT &&
187 ip->obj_asof == hmp->asof &&
188 ip->obj_localization == 0) {
192 vp->v_data = (void *)ip;
193 /* vnode locked by getnewvnode() */
194 /* make related vnode dirty if inode dirty? */
195 hammer_unlock(&ip->lock);
196 if (vp->v_type == VREG)
197 vinitvmio(vp, ip->ino_data.size);
202 * loop if the vget fails (aka races), or if the vp
203 * no longer matches ip->vp.
205 if (vget(vp, LK_EXCLUSIVE) == 0) {
216 * Acquire a HAMMER inode. The returned inode is not locked. These functions
217 * do not attach or detach the related vnode (use hammer_get_vnode() for
220 * The flags argument is only applied for newly created inodes, and only
221 * certain flags are inherited.
223 * Called from the frontend.
225 struct hammer_inode *
226 hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip,
227 u_int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
228 int flags, int *errorp)
230 hammer_mount_t hmp = trans->hmp;
231 struct hammer_inode_info iinfo;
232 struct hammer_cursor cursor;
233 struct hammer_inode *ip;
236 * Determine if we already have an inode cached. If we do then
239 iinfo.obj_id = obj_id;
240 iinfo.obj_asof = asof;
241 iinfo.obj_localization = localization;
243 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
245 hammer_ref(&ip->lock);
251 * Allocate a new inode structure and deal with races later.
253 ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
254 ++hammer_count_inodes;
257 ip->obj_asof = iinfo.obj_asof;
258 ip->obj_localization = localization;
260 ip->flags = flags & HAMMER_INODE_RO;
261 ip->cache[0].ip = ip;
262 ip->cache[1].ip = ip;
264 ip->flags |= HAMMER_INODE_RO;
265 ip->sync_trunc_off = ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
266 RB_INIT(&ip->rec_tree);
267 TAILQ_INIT(&ip->target_list);
270 * Locate the on-disk inode.
273 hammer_init_cursor(trans, &cursor, (dip ? &dip->cache[0] : NULL), NULL);
274 cursor.key_beg.localization = HAMMER_LOCALIZE_INODE;
275 cursor.key_beg.obj_id = ip->obj_id;
276 cursor.key_beg.key = 0;
277 cursor.key_beg.create_tid = 0;
278 cursor.key_beg.delete_tid = 0;
279 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
280 cursor.key_beg.obj_type = 0;
281 cursor.asof = iinfo.obj_asof;
282 cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
285 *errorp = hammer_btree_lookup(&cursor);
286 if (*errorp == EDEADLK) {
287 hammer_done_cursor(&cursor);
292 * On success the B-Tree lookup will hold the appropriate
293 * buffer cache buffers and provide a pointer to the requested
294 * information. Copy the information to the in-memory inode
295 * and cache the B-Tree node to improve future operations.
298 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
299 ip->ino_data = cursor.data->inode;
302 * cache[0] tries to cache the location of the object inode.
303 * The assumption is that it is near the directory inode.
305 * cache[1] tries to cache the location of the object data.
306 * The assumption is that it is near the directory data.
308 hammer_cache_node(&ip->cache[0], cursor.node);
309 if (dip && dip->cache[1].node)
310 hammer_cache_node(&ip->cache[1], dip->cache[1].node);
313 * The file should not contain any data past the file size
314 * stored in the inode. Setting sync_trunc_off to the
315 * file size instead of max reduces B-Tree lookup overheads
316 * on append by allowing the flusher to avoid checking for
319 ip->sync_trunc_off = ip->ino_data.size;
323 * The inode is placed on the red-black tree and will be synced to
324 * the media when flushed or by the filesystem sync. If this races
325 * another instantiation/lookup the insertion will fail.
328 hammer_ref(&ip->lock);
329 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
330 hammer_uncache_node(&ip->cache[0]);
331 hammer_uncache_node(&ip->cache[1]);
332 KKASSERT(ip->lock.refs == 1);
333 --hammer_count_inodes;
336 hammer_done_cursor(&cursor);
339 ip->flags |= HAMMER_INODE_ONDISK;
342 * Do not panic on read-only accesses which fail, particularly
343 * historical accesses where the snapshot might not have
344 * complete connectivity.
346 if ((flags & HAMMER_INODE_RO) == 0) {
347 kprintf("hammer_get_inode: failed ip %p obj_id %016llx cursor %p error %d\n",
348 ip, ip->obj_id, &cursor, *errorp);
351 if (ip->flags & HAMMER_INODE_RSV_INODES) {
352 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
355 hmp->rsv_databufs -= ip->rsv_databufs;
356 ip->rsv_databufs = 0; /* sanity */
358 --hammer_count_inodes;
363 hammer_done_cursor(&cursor);
368 * Create a new filesystem object, returning the inode in *ipp. The
369 * returned inode will be referenced.
371 * The inode is created in-memory.
374 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
375 struct ucred *cred, hammer_inode_t dip,
376 struct hammer_inode **ipp)
383 ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
384 ++hammer_count_inodes;
386 ip->obj_id = hammer_alloc_objid(trans, dip);
387 KKASSERT(ip->obj_id != 0);
388 ip->obj_asof = hmp->asof;
389 ip->obj_localization = dip->obj_localization;
391 ip->flush_state = HAMMER_FST_IDLE;
392 ip->flags = HAMMER_INODE_DDIRTY |
393 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME;
394 ip->cache[0].ip = ip;
395 ip->cache[1].ip = ip;
397 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
398 RB_INIT(&ip->rec_tree);
399 TAILQ_INIT(&ip->target_list);
401 ip->ino_data.atime = trans->time;
402 ip->ino_data.mtime = trans->time;
403 ip->ino_data.size = 0;
404 ip->ino_data.nlinks = 0;
407 * A nohistory designator on the parent directory is inherited by
410 ip->ino_data.uflags = dip->ino_data.uflags &
411 (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
413 ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
414 ip->ino_leaf.base.localization = HAMMER_LOCALIZE_INODE;
415 ip->ino_leaf.base.obj_id = ip->obj_id;
416 ip->ino_leaf.base.key = 0;
417 ip->ino_leaf.base.create_tid = 0;
418 ip->ino_leaf.base.delete_tid = 0;
419 ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
420 ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
422 ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
423 ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
424 ip->ino_data.mode = vap->va_mode;
425 ip->ino_data.ctime = trans->time;
426 ip->ino_data.parent_obj_id = (dip) ? dip->ino_leaf.base.obj_id : 0;
428 switch(ip->ino_leaf.base.obj_type) {
429 case HAMMER_OBJTYPE_CDEV:
430 case HAMMER_OBJTYPE_BDEV:
431 ip->ino_data.rmajor = vap->va_rmajor;
432 ip->ino_data.rminor = vap->va_rminor;
439 * Calculate default uid/gid and overwrite with information from
442 xuid = hammer_to_unix_xid(&dip->ino_data.uid);
443 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode, xuid, cred,
445 ip->ino_data.mode = vap->va_mode;
447 if (vap->va_vaflags & VA_UID_UUID_VALID)
448 ip->ino_data.uid = vap->va_uid_uuid;
449 else if (vap->va_uid != (uid_t)VNOVAL)
450 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
452 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
454 if (vap->va_vaflags & VA_GID_UUID_VALID)
455 ip->ino_data.gid = vap->va_gid_uuid;
456 else if (vap->va_gid != (gid_t)VNOVAL)
457 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
459 ip->ino_data.gid = dip->ino_data.gid;
461 hammer_ref(&ip->lock);
462 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
463 hammer_unref(&ip->lock);
464 panic("hammer_create_inode: duplicate obj_id %llx", ip->obj_id);
471 * Called by hammer_sync_inode().
474 hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
476 hammer_transaction_t trans = cursor->trans;
477 hammer_record_t record;
484 * If the inode has a presence on-disk then locate it and mark
485 * it deleted, setting DELONDISK.
487 * The record may or may not be physically deleted, depending on
488 * the retention policy.
490 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
491 HAMMER_INODE_ONDISK) {
492 hammer_normalize_cursor(cursor);
493 cursor->key_beg.localization = HAMMER_LOCALIZE_INODE;
494 cursor->key_beg.obj_id = ip->obj_id;
495 cursor->key_beg.key = 0;
496 cursor->key_beg.create_tid = 0;
497 cursor->key_beg.delete_tid = 0;
498 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
499 cursor->key_beg.obj_type = 0;
500 cursor->asof = ip->obj_asof;
501 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
502 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
503 cursor->flags |= HAMMER_CURSOR_BACKEND;
505 error = hammer_btree_lookup(cursor);
506 if (hammer_debug_inode)
507 kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
509 kprintf("error %d\n", error);
510 Debugger("hammer_update_inode");
514 error = hammer_ip_delete_record(cursor, ip, trans->tid);
515 if (hammer_debug_inode)
516 kprintf(" error %d\n", error);
517 if (error && error != EDEADLK) {
518 kprintf("error %d\n", error);
519 Debugger("hammer_update_inode2");
522 ip->flags |= HAMMER_INODE_DELONDISK;
525 hammer_cache_node(&ip->cache[0], cursor->node);
527 if (error == EDEADLK) {
528 hammer_done_cursor(cursor);
529 error = hammer_init_cursor(trans, cursor,
531 if (hammer_debug_inode)
532 kprintf("IPDED %p %d\n", ip, error);
539 * Ok, write out the initial record or a new record (after deleting
540 * the old one), unless the DELETED flag is set. This routine will
541 * clear DELONDISK if it writes out a record.
543 * Update our inode statistics if this is the first application of
546 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
548 * Generate a record and write it to the media
550 record = hammer_alloc_mem_record(ip, 0);
551 record->type = HAMMER_MEM_RECORD_INODE;
552 record->flush_state = HAMMER_FST_FLUSH;
553 record->leaf = ip->sync_ino_leaf;
554 record->leaf.base.create_tid = trans->tid;
555 record->leaf.data_len = sizeof(ip->sync_ino_data);
556 record->data = (void *)&ip->sync_ino_data;
557 record->flags |= HAMMER_RECF_INTERLOCK_BE;
559 error = hammer_ip_sync_record_cursor(cursor, record);
560 if (hammer_debug_inode)
561 kprintf("GENREC %p rec %08x %d\n",
562 ip, record->flags, error);
563 if (error != EDEADLK)
565 hammer_done_cursor(cursor);
566 error = hammer_init_cursor(trans, cursor,
568 if (hammer_debug_inode)
569 kprintf("GENREC reinit %d\n", error);
574 kprintf("error %d\n", error);
575 Debugger("hammer_update_inode3");
579 * The record isn't managed by the inode's record tree,
580 * destroy it whether we succeed or fail.
582 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
583 record->flags |= HAMMER_RECF_DELETED_FE;
584 record->flush_state = HAMMER_FST_IDLE;
585 hammer_rel_mem_record(record);
591 if (hammer_debug_inode)
592 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
593 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
596 ip->flags &= ~HAMMER_INODE_DELONDISK;
599 * Root volume count of inodes
601 if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
602 hammer_modify_volume_field(trans,
605 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
606 hammer_modify_volume_done(trans->rootvol);
607 ip->flags |= HAMMER_INODE_ONDISK;
608 if (hammer_debug_inode)
609 kprintf("NOWONDISK %p\n", ip);
615 * If the inode has been destroyed, clean out any left-over flags
616 * that may have been set by the frontend.
618 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) {
619 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
627 * Update only the itimes fields.
629 * ATIME can be updated without generating any UNDO. MTIME is updated
630 * with UNDO so it is guaranteed to be synchronized properly in case of
633 * Neither field is included in the B-Tree leaf element's CRC, which is how
634 * we can get away with updating ATIME the way we do.
637 hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
639 hammer_transaction_t trans = cursor->trans;
643 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) !=
644 HAMMER_INODE_ONDISK) {
648 hammer_normalize_cursor(cursor);
649 cursor->key_beg.localization = HAMMER_LOCALIZE_INODE;
650 cursor->key_beg.obj_id = ip->obj_id;
651 cursor->key_beg.key = 0;
652 cursor->key_beg.create_tid = 0;
653 cursor->key_beg.delete_tid = 0;
654 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
655 cursor->key_beg.obj_type = 0;
656 cursor->asof = ip->obj_asof;
657 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
658 cursor->flags |= HAMMER_CURSOR_ASOF;
659 cursor->flags |= HAMMER_CURSOR_GET_LEAF;
660 cursor->flags |= HAMMER_CURSOR_GET_DATA;
661 cursor->flags |= HAMMER_CURSOR_BACKEND;
663 error = hammer_btree_lookup(cursor);
665 kprintf("error %d\n", error);
666 Debugger("hammer_update_itimes1");
669 hammer_cache_node(&ip->cache[0], cursor->node);
670 if (ip->sync_flags & HAMMER_INODE_MTIME) {
672 * Updating MTIME requires an UNDO. Just cover
673 * both atime and mtime.
675 hammer_modify_buffer(trans, cursor->data_buffer,
676 HAMMER_ITIMES_BASE(&cursor->data->inode),
677 HAMMER_ITIMES_BYTES);
678 cursor->data->inode.atime = ip->sync_ino_data.atime;
679 cursor->data->inode.mtime = ip->sync_ino_data.mtime;
680 hammer_modify_buffer_done(cursor->data_buffer);
681 } else if (ip->sync_flags & HAMMER_INODE_ATIME) {
683 * Updating atime only can be done in-place with
686 hammer_modify_buffer(trans, cursor->data_buffer,
688 cursor->data->inode.atime = ip->sync_ino_data.atime;
689 hammer_modify_buffer_done(cursor->data_buffer);
691 ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME);
693 if (error == EDEADLK) {
694 hammer_done_cursor(cursor);
695 error = hammer_init_cursor(trans, cursor,
704 * Release a reference on an inode, flush as requested.
706 * On the last reference we queue the inode to the flusher for its final
710 hammer_rel_inode(struct hammer_inode *ip, int flush)
712 hammer_mount_t hmp = ip->hmp;
715 * Handle disposition when dropping the last ref.
718 if (ip->lock.refs == 1) {
720 * Determine whether on-disk action is needed for
721 * the inode's final disposition.
723 KKASSERT(ip->vp == NULL);
724 hammer_inode_unloadable_check(ip, 0);
725 if (ip->flags & HAMMER_INODE_MODMASK) {
726 if (hmp->rsv_inodes > desiredvnodes) {
727 hammer_flush_inode(ip,
728 HAMMER_FLUSH_SIGNAL);
730 hammer_flush_inode(ip, 0);
732 } else if (ip->lock.refs == 1) {
733 hammer_unload_inode(ip);
738 hammer_flush_inode(ip, 0);
741 * The inode still has multiple refs, try to drop
744 KKASSERT(ip->lock.refs >= 1);
745 if (ip->lock.refs > 1) {
746 hammer_unref(&ip->lock);
754 * Unload and destroy the specified inode. Must be called with one remaining
755 * reference. The reference is disposed of.
757 * This can only be called in the context of the flusher.
760 hammer_unload_inode(struct hammer_inode *ip)
762 hammer_mount_t hmp = ip->hmp;
764 KASSERT(ip->lock.refs == 1,
765 ("hammer_unload_inode: %d refs\n", ip->lock.refs));
766 KKASSERT(ip->vp == NULL);
767 KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
768 KKASSERT(ip->cursor_ip_refs == 0);
769 KKASSERT(ip->lock.lockcount == 0);
770 KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
772 KKASSERT(RB_EMPTY(&ip->rec_tree));
773 KKASSERT(TAILQ_EMPTY(&ip->target_list));
775 RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip);
777 hammer_uncache_node(&ip->cache[0]);
778 hammer_uncache_node(&ip->cache[1]);
780 hammer_clear_objid(ip);
781 --hammer_count_inodes;
784 hammer_inode_wakereclaims(ip);
791 * Called on mount -u when switching from RW to RO or vise-versa. Adjust
792 * the read-only flag for cached inodes.
794 * This routine is called from a RB_SCAN().
797 hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
799 hammer_mount_t hmp = ip->hmp;
801 if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
802 ip->flags |= HAMMER_INODE_RO;
804 ip->flags &= ~HAMMER_INODE_RO;
809 * A transaction has modified an inode, requiring updates as specified by
812 * HAMMER_INODE_DDIRTY: Inode data has been updated
813 * HAMMER_INODE_XDIRTY: Dirty in-memory records
814 * HAMMER_INODE_BUFS: Dirty buffer cache buffers
815 * HAMMER_INODE_DELETED: Inode record/data must be deleted
816 * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
819 hammer_modify_inode(hammer_inode_t ip, int flags)
821 KKASSERT ((ip->flags & HAMMER_INODE_RO) == 0 ||
822 (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
823 HAMMER_INODE_BUFS | HAMMER_INODE_DELETED |
824 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0);
825 if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
826 ip->flags |= HAMMER_INODE_RSV_INODES;
827 ++ip->hmp->rsv_inodes;
834 * Request that an inode be flushed. This whole mess cannot block and may
835 * recurse (if not synchronous). Once requested HAMMER will attempt to
836 * actively flush the inode until the flush can be done.
838 * The inode may already be flushing, or may be in a setup state. We can
839 * place the inode in a flushing state if it is currently idle and flag it
840 * to reflush if it is currently flushing.
842 * If the HAMMER_FLUSH_SYNCHRONOUS flag is specified we will attempt to
843 * flush the indoe synchronously using the caller's context.
846 hammer_flush_inode(hammer_inode_t ip, int flags)
851 * Trivial 'nothing to flush' case. If the inode is ina SETUP
852 * state we have to put it back into an IDLE state so we can
853 * drop the extra ref.
855 if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
856 if (ip->flush_state == HAMMER_FST_SETUP) {
857 ip->flush_state = HAMMER_FST_IDLE;
858 hammer_rel_inode(ip, 0);
864 * Our flush action will depend on the current state.
866 switch(ip->flush_state) {
867 case HAMMER_FST_IDLE:
869 * We have no dependancies and can flush immediately. Some
870 * our children may not be flushable so we have to re-test
871 * with that additional knowledge.
873 hammer_flush_inode_core(ip, flags);
875 case HAMMER_FST_SETUP:
877 * Recurse upwards through dependancies via target_list
878 * and start their flusher actions going if possible.
880 * 'good' is our connectivity. -1 means we have none and
881 * can't flush, 0 means there weren't any dependancies, and
882 * 1 means we have good connectivity.
884 good = hammer_setup_parent_inodes(ip);
887 * We can continue if good >= 0. Determine how many records
888 * under our inode can be flushed (and mark them).
891 hammer_flush_inode_core(ip, flags);
893 ip->flags |= HAMMER_INODE_REFLUSH;
894 if (flags & HAMMER_FLUSH_SIGNAL) {
895 ip->flags |= HAMMER_INODE_RESIGNAL;
896 hammer_flusher_async(ip->hmp);
902 * We are already flushing, flag the inode to reflush
903 * if needed after it completes its current flush.
905 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
906 ip->flags |= HAMMER_INODE_REFLUSH;
907 if (flags & HAMMER_FLUSH_SIGNAL) {
908 ip->flags |= HAMMER_INODE_RESIGNAL;
909 hammer_flusher_async(ip->hmp);
916 * Scan ip->target_list, which is a list of records owned by PARENTS to our
917 * ip which reference our ip.
919 * XXX This is a huge mess of recursive code, but not one bit of it blocks
920 * so for now do not ref/deref the structures. Note that if we use the
921 * ref/rel code later, the rel CAN block.
924 hammer_setup_parent_inodes(hammer_inode_t ip)
926 hammer_record_t depend;
928 hammer_record_t next;
935 TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
936 r = hammer_setup_parent_inodes_helper(depend);
937 KKASSERT(depend->target_ip == ip);
938 if (r < 0 && good == 0)
948 next = TAILQ_FIRST(&ip->target_list);
950 hammer_ref(&next->lock);
951 hammer_ref(&next->ip->lock);
953 while ((depend = next) != NULL) {
954 if (depend->target_ip == NULL) {
956 hammer_rel_mem_record(depend);
957 hammer_rel_inode(pip, 0);
960 KKASSERT(depend->target_ip == ip);
961 next = TAILQ_NEXT(depend, target_entry);
963 hammer_ref(&next->lock);
964 hammer_ref(&next->ip->lock);
966 r = hammer_setup_parent_inodes_helper(depend);
967 if (r < 0 && good == 0)
972 hammer_rel_mem_record(depend);
973 hammer_rel_inode(pip, 0);
980 * This helper function takes a record representing the dependancy between
981 * the parent inode and child inode.
983 * record->ip = parent inode
984 * record->target_ip = child inode
986 * We are asked to recurse upwards and convert the record from SETUP
987 * to FLUSH if possible.
989 * Return 1 if the record gives us connectivity
991 * Return 0 if the record is not relevant
993 * Return -1 if we can't resolve the dependancy and there is no connectivity.
996 hammer_setup_parent_inodes_helper(hammer_record_t record)
1002 KKASSERT(record->flush_state != HAMMER_FST_IDLE);
1007 * If the record is already flushing, is it in our flush group?
1009 * If it is in our flush group but it is a general record or a
1010 * delete-on-disk, it does not improve our connectivity (return 0),
1011 * and if the target inode is not trying to destroy itself we can't
1012 * allow the operation yet anyway (the second return -1).
1014 if (record->flush_state == HAMMER_FST_FLUSH) {
1015 if (record->flush_group != hmp->flusher.next) {
1016 pip->flags |= HAMMER_INODE_REFLUSH;
1019 if (record->type == HAMMER_MEM_RECORD_ADD)
1021 /* GENERAL or DEL */
1026 * It must be a setup record. Try to resolve the setup dependancies
1027 * by recursing upwards so we can place ip on the flush list.
1029 KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1031 good = hammer_setup_parent_inodes(pip);
1034 * We can't flush ip because it has no connectivity (XXX also check
1035 * nlinks for pre-existing connectivity!). Flag it so any resolution
1036 * recurses back down.
1039 pip->flags |= HAMMER_INODE_REFLUSH;
1044 * We are go, place the parent inode in a flushing state so we can
1045 * place its record in a flushing state. Note that the parent
1046 * may already be flushing. The record must be in the same flush
1047 * group as the parent.
1049 if (pip->flush_state != HAMMER_FST_FLUSH)
1050 hammer_flush_inode_core(pip, HAMMER_FLUSH_RECURSION);
1051 KKASSERT(pip->flush_state == HAMMER_FST_FLUSH);
1052 KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1055 if (record->type == HAMMER_MEM_RECORD_DEL &&
1056 (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
1058 * Regardless of flushing state we cannot sync this path if the
1059 * record represents a delete-on-disk but the target inode
1060 * is not ready to sync its own deletion.
1062 * XXX need to count effective nlinks to determine whether
1063 * the flush is ok, otherwise removing a hardlink will
1064 * just leave the DEL record to rot.
1066 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
1070 if (pip->flush_group == pip->hmp->flusher.next) {
1072 * This is the record we wanted to synchronize. If the
1073 * record went into a flush state while we blocked it
1074 * had better be in the correct flush group.
1076 if (record->flush_state != HAMMER_FST_FLUSH) {
1077 record->flush_state = HAMMER_FST_FLUSH;
1078 record->flush_group = pip->flush_group;
1079 hammer_ref(&record->lock);
1081 KKASSERT(record->flush_group == pip->flush_group);
1083 if (record->type == HAMMER_MEM_RECORD_ADD)
1087 * A general or delete-on-disk record does not contribute
1088 * to our visibility. We can still flush it, however.
1093 * We couldn't resolve the dependancies, request that the
1094 * inode be flushed when the dependancies can be resolved.
1096 pip->flags |= HAMMER_INODE_REFLUSH;
1102 * This is the core routine placing an inode into the FST_FLUSH state.
1105 hammer_flush_inode_core(hammer_inode_t ip, int flags)
1110 * Set flush state and prevent the flusher from cycling into
1111 * the next flush group. Do not place the ip on the list yet.
1112 * Inodes not in the idle state get an extra reference.
1114 KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
1115 if (ip->flush_state == HAMMER_FST_IDLE)
1116 hammer_ref(&ip->lock);
1117 ip->flush_state = HAMMER_FST_FLUSH;
1118 ip->flush_group = ip->hmp->flusher.next;
1119 ++ip->hmp->flusher.group_lock;
1120 ++ip->hmp->count_iqueued;
1121 ++hammer_count_iqueued;
1124 * We need to be able to vfsync/truncate from the backend.
1126 KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
1127 if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
1128 ip->flags |= HAMMER_INODE_VHELD;
1133 * Figure out how many in-memory records we can actually flush
1134 * (not including inode meta-data, buffers, etc).
1136 if (flags & HAMMER_FLUSH_RECURSION) {
1139 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1140 hammer_setup_child_callback, NULL);
1144 * This is a more involved test that includes go_count. If we
1145 * can't flush, flag the inode and return. If go_count is 0 we
1146 * were are unable to flush any records in our rec_tree and
1147 * must ignore the XDIRTY flag.
1149 if (go_count == 0) {
1150 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
1151 ip->flags |= HAMMER_INODE_REFLUSH;
1153 --ip->hmp->count_iqueued;
1154 --hammer_count_iqueued;
1156 ip->flush_state = HAMMER_FST_SETUP;
1157 if (ip->flags & HAMMER_INODE_VHELD) {
1158 ip->flags &= ~HAMMER_INODE_VHELD;
1161 if (flags & HAMMER_FLUSH_SIGNAL) {
1162 ip->flags |= HAMMER_INODE_RESIGNAL;
1163 hammer_flusher_async(ip->hmp);
1165 if (--ip->hmp->flusher.group_lock == 0)
1166 wakeup(&ip->hmp->flusher.group_lock);
1172 * Snapshot the state of the inode for the backend flusher.
1174 * The truncation must be retained in the frontend until after
1175 * we've actually performed the record deletion.
1177 * We continue to retain sync_trunc_off even when all truncations
1178 * have been resolved as an optimization to determine if we can
1179 * skip the B-Tree lookup for overwrite deletions.
1181 * NOTE: The DELETING flag is a mod flag, but it is also sticky,
1182 * and stays in ip->flags. Once set, it stays set until the
1183 * inode is destroyed.
1185 ip->sync_flags = (ip->flags & HAMMER_INODE_MODMASK);
1186 if (ip->sync_flags & HAMMER_INODE_TRUNCATED)
1187 ip->sync_trunc_off = ip->trunc_off;
1188 ip->sync_ino_leaf = ip->ino_leaf;
1189 ip->sync_ino_data = ip->ino_data;
1190 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
1191 ip->flags &= ~HAMMER_INODE_MODMASK;
1192 #ifdef DEBUG_TRUNCATE
1193 if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp)
1194 kprintf("truncateS %016llx\n", ip->sync_trunc_off);
1198 * The flusher list inherits our inode and reference.
1200 TAILQ_INSERT_TAIL(&ip->hmp->flush_list, ip, flush_entry);
1201 if (--ip->hmp->flusher.group_lock == 0)
1202 wakeup(&ip->hmp->flusher.group_lock);
1204 if (flags & HAMMER_FLUSH_SIGNAL) {
1205 hammer_flusher_async(ip->hmp);
1210 * Callback for scan of ip->rec_tree. Try to include each record in our
1211 * flush. ip->flush_group has been set but the inode has not yet been
1212 * moved into a flushing state.
1214 * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
1217 * We return 1 for any record placed or found in FST_FLUSH, which prevents
1218 * the caller from shortcutting the flush.
1221 hammer_setup_child_callback(hammer_record_t rec, void *data)
1223 hammer_inode_t target_ip;
1228 * Deleted records are ignored. Note that the flush detects deleted
1229 * front-end records at multiple points to deal with races. This is
1230 * just the first line of defense. The only time DELETED_FE cannot
1231 * be set is when HAMMER_RECF_INTERLOCK_BE is set.
1233 * Don't get confused between record deletion and, say, directory
1234 * entry deletion. The deletion of a directory entry that is on
1235 * the media has nothing to do with the record deletion flags.
1237 if (rec->flags & (HAMMER_RECF_DELETED_FE|HAMMER_RECF_DELETED_BE))
1241 * If the record is in an idle state it has no dependancies and
1247 switch(rec->flush_state) {
1248 case HAMMER_FST_IDLE:
1250 * Record has no setup dependancy, we can flush it.
1252 KKASSERT(rec->target_ip == NULL);
1253 rec->flush_state = HAMMER_FST_FLUSH;
1254 rec->flush_group = ip->flush_group;
1255 hammer_ref(&rec->lock);
1258 case HAMMER_FST_SETUP:
1260 * Record has a setup dependancy. Try to include the
1261 * target ip in the flush.
1263 * We have to be careful here, if we do not do the right
1264 * thing we can lose track of dirty inodes and the system
1265 * will lockup trying to allocate buffers.
1267 target_ip = rec->target_ip;
1268 KKASSERT(target_ip != NULL);
1269 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
1270 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
1272 * If the target IP is already flushing in our group
1273 * we are golden, otherwise make sure the target
1276 if (target_ip->flush_group == ip->flush_group) {
1277 rec->flush_state = HAMMER_FST_FLUSH;
1278 rec->flush_group = ip->flush_group;
1279 hammer_ref(&rec->lock);
1282 target_ip->flags |= HAMMER_INODE_REFLUSH;
1284 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
1286 * If the target IP is not flushing we can force
1287 * it to flush, even if it is unable to write out
1288 * any of its own records we have at least one in
1289 * hand that we CAN deal with.
1291 rec->flush_state = HAMMER_FST_FLUSH;
1292 rec->flush_group = ip->flush_group;
1293 hammer_ref(&rec->lock);
1294 hammer_flush_inode_core(target_ip,
1295 HAMMER_FLUSH_RECURSION);
1299 * General or delete-on-disk record.
1301 * XXX this needs help. If a delete-on-disk we could
1302 * disconnect the target. If the target has its own
1303 * dependancies they really need to be flushed.
1307 rec->flush_state = HAMMER_FST_FLUSH;
1308 rec->flush_group = ip->flush_group;
1309 hammer_ref(&rec->lock);
1310 hammer_flush_inode_core(target_ip,
1311 HAMMER_FLUSH_RECURSION);
1315 case HAMMER_FST_FLUSH:
1317 * Record already associated with a flush group. It had
1320 KKASSERT(rec->flush_group == ip->flush_group);
1328 * Wait for a previously queued flush to complete. Not only do we need to
1329 * wait for the inode to sync out, we also may have to run the flusher again
1330 * to get it past the UNDO position pertaining to the flush so a crash does
1331 * not 'undo' our flush.
1334 hammer_wait_inode(hammer_inode_t ip)
1336 hammer_mount_t hmp = ip->hmp;
1340 sync_group = ip->flush_group;
1341 waitcount = (ip->flags & HAMMER_INODE_REFLUSH) ? 2 : 1;
1343 while (ip->flush_state != HAMMER_FST_IDLE &&
1344 (ip->flush_group - sync_group) < 2) {
1345 if (ip->flush_state == HAMMER_FST_SETUP) {
1347 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1349 ip->flags |= HAMMER_INODE_FLUSHW;
1350 tsleep(&ip->flags, 0, "hmrwin", 0);
1353 while (hmp->flusher.done - sync_group < waitcount) {
1355 hammer_flusher_sync(hmp);
1360 * Called by the backend code when a flush has been completed.
1361 * The inode has already been removed from the flush list.
1363 * A pipelined flush can occur, in which case we must re-enter the
1364 * inode on the list and re-copy its fields.
1367 hammer_flush_inode_done(hammer_inode_t ip)
1372 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
1377 * Merge left-over flags back into the frontend and fix the state.
1379 ip->flags |= ip->sync_flags;
1382 * The backend may have adjusted nlinks, so if the adjusted nlinks
1383 * does not match the fronttend set the frontend's RDIRTY flag again.
1385 if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
1386 ip->flags |= HAMMER_INODE_DDIRTY;
1389 * Fix up the dirty buffer status. IO completions will also
1390 * try to clean up rsv_databufs.
1392 if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) {
1393 ip->flags |= HAMMER_INODE_BUFS;
1395 hmp->rsv_databufs -= ip->rsv_databufs;
1396 ip->rsv_databufs = 0;
1400 * Re-set the XDIRTY flag if some of the inode's in-memory records
1401 * could not be flushed.
1403 KKASSERT((RB_EMPTY(&ip->rec_tree) &&
1404 (ip->flags & HAMMER_INODE_XDIRTY) == 0) ||
1405 (!RB_EMPTY(&ip->rec_tree) &&
1406 (ip->flags & HAMMER_INODE_XDIRTY) != 0));
1409 * Do not lose track of inodes which no longer have vnode
1410 * assocations, otherwise they may never get flushed again.
1412 if ((ip->flags & HAMMER_INODE_MODMASK) && ip->vp == NULL)
1413 ip->flags |= HAMMER_INODE_REFLUSH;
1416 * Adjust flush_state. The target state (idle or setup) shouldn't
1417 * be terribly important since we will reflush if we really need
1418 * to do anything. XXX
1420 if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
1421 ip->flush_state = HAMMER_FST_IDLE;
1424 ip->flush_state = HAMMER_FST_SETUP;
1428 --hmp->count_iqueued;
1429 --hammer_count_iqueued;
1432 * Clean up the vnode ref
1434 if (ip->flags & HAMMER_INODE_VHELD) {
1435 ip->flags &= ~HAMMER_INODE_VHELD;
1440 * If the frontend made more changes and requested another flush,
1441 * then try to get it running.
1443 if (ip->flags & HAMMER_INODE_REFLUSH) {
1444 ip->flags &= ~HAMMER_INODE_REFLUSH;
1445 if (ip->flags & HAMMER_INODE_RESIGNAL) {
1446 ip->flags &= ~HAMMER_INODE_RESIGNAL;
1447 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1449 hammer_flush_inode(ip, 0);
1454 * If the inode is now clean drop the space reservation.
1456 if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1457 (ip->flags & HAMMER_INODE_RSV_INODES)) {
1458 ip->flags &= ~HAMMER_INODE_RSV_INODES;
1463 * Finally, if the frontend is waiting for a flush to complete,
1466 if (ip->flush_state != HAMMER_FST_FLUSH) {
1467 if (ip->flags & HAMMER_INODE_FLUSHW) {
1468 ip->flags &= ~HAMMER_INODE_FLUSHW;
1473 hammer_rel_inode(ip, 0);
1477 * Called from hammer_sync_inode() to synchronize in-memory records
1481 hammer_sync_record_callback(hammer_record_t record, void *data)
1483 hammer_cursor_t cursor = data;
1484 hammer_transaction_t trans = cursor->trans;
1488 * Skip records that do not belong to the current flush.
1490 ++hammer_stats_record_iterations;
1491 if (record->flush_state != HAMMER_FST_FLUSH)
1495 if (record->flush_group != record->ip->flush_group) {
1496 kprintf("sync_record %p ip %p bad flush group %d %d\n", record, record->ip, record->flush_group ,record->ip->flush_group);
1501 KKASSERT(record->flush_group == record->ip->flush_group);
1504 * Interlock the record using the BE flag. Once BE is set the
1505 * frontend cannot change the state of FE.
1507 * NOTE: If FE is set prior to us setting BE we still sync the
1508 * record out, but the flush completion code converts it to
1509 * a delete-on-disk record instead of destroying it.
1511 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
1512 record->flags |= HAMMER_RECF_INTERLOCK_BE;
1515 * The backend may have already disposed of the record.
1517 if (record->flags & HAMMER_RECF_DELETED_BE) {
1523 * If the whole inode is being deleting all on-disk records will
1524 * be deleted very soon, we can't sync any new records to disk
1525 * because they will be deleted in the same transaction they were
1526 * created in (delete_tid == create_tid), which will assert.
1528 * XXX There may be a case with RECORD_ADD with DELETED_FE set
1529 * that we currently panic on.
1531 if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
1532 switch(record->type) {
1533 case HAMMER_MEM_RECORD_DATA:
1535 * We don't have to do anything, if the record was
1536 * committed the space will have been accounted for
1540 case HAMMER_MEM_RECORD_GENERAL:
1541 record->flags |= HAMMER_RECF_DELETED_FE;
1542 record->flags |= HAMMER_RECF_DELETED_BE;
1545 case HAMMER_MEM_RECORD_ADD:
1546 panic("hammer_sync_record_callback: illegal add "
1547 "during inode deletion record %p", record);
1548 break; /* NOT REACHED */
1549 case HAMMER_MEM_RECORD_INODE:
1550 panic("hammer_sync_record_callback: attempt to "
1551 "sync inode record %p?", record);
1552 break; /* NOT REACHED */
1553 case HAMMER_MEM_RECORD_DEL:
1555 * Follow through and issue the on-disk deletion
1562 * If DELETED_FE is set special handling is needed for directory
1563 * entries. Dependant pieces related to the directory entry may
1564 * have already been synced to disk. If this occurs we have to
1565 * sync the directory entry and then change the in-memory record
1566 * from an ADD to a DELETE to cover the fact that it's been
1567 * deleted by the frontend.
1569 * A directory delete covering record (MEM_RECORD_DEL) can never
1570 * be deleted by the frontend.
1572 * Any other record type (aka DATA) can be deleted by the frontend.
1573 * XXX At the moment the flusher must skip it because there may
1574 * be another data record in the flush group for the same block,
1575 * meaning that some frontend data changes can leak into the backend's
1576 * synchronization point.
1578 if (record->flags & HAMMER_RECF_DELETED_FE) {
1579 if (record->type == HAMMER_MEM_RECORD_ADD) {
1580 record->flags |= HAMMER_RECF_CONVERT_DELETE;
1582 KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
1583 record->flags |= HAMMER_RECF_DELETED_BE;
1590 * Assign the create_tid for new records. Deletions already
1591 * have the record's entire key properly set up.
1593 if (record->type != HAMMER_MEM_RECORD_DEL)
1594 record->leaf.base.create_tid = trans->tid;
1596 error = hammer_ip_sync_record_cursor(cursor, record);
1597 if (error != EDEADLK)
1599 hammer_done_cursor(cursor);
1600 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
1605 record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
1609 if (error != -ENOSPC) {
1610 kprintf("hammer_sync_record_callback: sync failed rec "
1611 "%p, error %d\n", record, error);
1612 Debugger("sync failed rec");
1616 hammer_flush_record_done(record, error);
1621 * XXX error handling
1624 hammer_sync_inode(hammer_inode_t ip)
1626 struct hammer_transaction trans;
1627 struct hammer_cursor cursor;
1628 hammer_node_t tmp_node;
1629 hammer_record_t depend;
1630 hammer_record_t next;
1631 int error, tmp_error;
1634 if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
1637 hammer_start_transaction_fls(&trans, ip->hmp);
1638 error = hammer_init_cursor(&trans, &cursor, &ip->cache[1], ip);
1643 * Any directory records referencing this inode which are not in
1644 * our current flush group must adjust our nlink count for the
1645 * purposes of synchronization to disk.
1647 * Records which are in our flush group can be unlinked from our
1648 * inode now, potentially allowing the inode to be physically
1651 * This cannot block.
1653 nlinks = ip->ino_data.nlinks;
1654 next = TAILQ_FIRST(&ip->target_list);
1655 while ((depend = next) != NULL) {
1656 next = TAILQ_NEXT(depend, target_entry);
1657 if (depend->flush_state == HAMMER_FST_FLUSH &&
1658 depend->flush_group == ip->hmp->flusher.act) {
1660 * If this is an ADD that was deleted by the frontend
1661 * the frontend nlinks count will have already been
1662 * decremented, but the backend is going to sync its
1663 * directory entry and must account for it. The
1664 * record will be converted to a delete-on-disk when
1667 * If the ADD was not deleted by the frontend we
1668 * can remove the dependancy from our target_list.
1670 if (depend->flags & HAMMER_RECF_DELETED_FE) {
1673 TAILQ_REMOVE(&ip->target_list, depend,
1675 depend->target_ip = NULL;
1677 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
1679 * Not part of our flush group
1681 KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
1682 switch(depend->type) {
1683 case HAMMER_MEM_RECORD_ADD:
1686 case HAMMER_MEM_RECORD_DEL:
1696 * Set dirty if we had to modify the link count.
1698 if (ip->sync_ino_data.nlinks != nlinks) {
1699 KKASSERT((int64_t)nlinks >= 0);
1700 ip->sync_ino_data.nlinks = nlinks;
1701 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1705 * If there is a trunction queued destroy any data past the (aligned)
1706 * truncation point. Userland will have dealt with the buffer
1707 * containing the truncation point for us.
1709 * We don't flush pending frontend data buffers until after we've
1710 * dealt with the truncation.
1712 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
1714 * Interlock trunc_off. The VOP front-end may continue to
1715 * make adjustments to it while we are blocked.
1718 off_t aligned_trunc_off;
1721 trunc_off = ip->sync_trunc_off;
1722 blkmask = hammer_blocksize(trunc_off) - 1;
1723 aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask;
1726 * Delete any whole blocks on-media. The front-end has
1727 * already cleaned out any partial block and made it
1728 * pending. The front-end may have updated trunc_off
1729 * while we were blocked so we only use sync_trunc_off.
1731 error = hammer_ip_delete_range(&cursor, ip,
1733 0x7FFFFFFFFFFFFFFFLL, 1);
1735 Debugger("hammer_ip_delete_range errored");
1738 * Clear the truncation flag on the backend after we have
1739 * complete the deletions. Backend data is now good again
1740 * (including new records we are about to sync, below).
1742 * Leave sync_trunc_off intact. As we write additional
1743 * records the backend will update sync_trunc_off. This
1744 * tells the backend whether it can skip the overwrite
1745 * test. This should work properly even when the backend
1746 * writes full blocks where the truncation point straddles
1747 * the block because the comparison is against the base
1748 * offset of the record.
1750 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
1751 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
1757 * Now sync related records. These will typically be directory
1758 * entries or delete-on-disk records.
1760 * Not all records will be flushed, but clear XDIRTY anyway. We
1761 * will set it again in the frontend hammer_flush_inode_done()
1762 * if records remain.
1765 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1766 hammer_sync_record_callback, &cursor);
1772 hammer_cache_node(&ip->cache[1], cursor.node);
1775 * Re-seek for inode update.
1778 tmp_node = hammer_ref_node_safe(ip->hmp, &ip->cache[0], &error);
1780 hammer_cursor_seek(&cursor, tmp_node, 0);
1781 hammer_rel_node(tmp_node);
1787 * If we are deleting the inode the frontend had better not have
1788 * any active references on elements making up the inode.
1790 if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
1791 RB_EMPTY(&ip->rec_tree) &&
1792 (ip->sync_flags & HAMMER_INODE_DELETING) &&
1793 (ip->flags & HAMMER_INODE_DELETED) == 0) {
1796 ip->flags |= HAMMER_INODE_DELETED;
1797 error = hammer_ip_delete_range_all(&cursor, ip, &count1);
1799 ip->sync_flags &= ~HAMMER_INODE_DELETING;
1800 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
1801 KKASSERT(RB_EMPTY(&ip->rec_tree));
1804 * Set delete_tid in both the frontend and backend
1805 * copy of the inode record. The DELETED flag handles
1806 * this, do not set RDIRTY.
1808 ip->ino_leaf.base.delete_tid = trans.tid;
1809 ip->sync_ino_leaf.base.delete_tid = trans.tid;
1812 * Adjust the inode count in the volume header
1814 if (ip->flags & HAMMER_INODE_ONDISK) {
1815 hammer_modify_volume_field(&trans,
1818 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1819 hammer_modify_volume_done(trans.rootvol);
1822 ip->flags &= ~HAMMER_INODE_DELETED;
1823 Debugger("hammer_ip_delete_range_all errored");
1827 ip->sync_flags &= ~HAMMER_INODE_BUFS;
1830 Debugger("RB_SCAN errored");
1833 * Now update the inode's on-disk inode-data and/or on-disk record.
1834 * DELETED and ONDISK are managed only in ip->flags.
1836 switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
1837 case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
1839 * If deleted and on-disk, don't set any additional flags.
1840 * the delete flag takes care of things.
1842 * Clear flags which may have been set by the frontend.
1844 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
1845 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
1846 HAMMER_INODE_DELETING);
1848 case HAMMER_INODE_DELETED:
1850 * Take care of the case where a deleted inode was never
1851 * flushed to the disk in the first place.
1853 * Clear flags which may have been set by the frontend.
1855 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
1856 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
1857 HAMMER_INODE_DELETING);
1858 while (RB_ROOT(&ip->rec_tree)) {
1859 hammer_record_t record = RB_ROOT(&ip->rec_tree);
1860 hammer_ref(&record->lock);
1861 KKASSERT(record->lock.refs == 1);
1862 record->flags |= HAMMER_RECF_DELETED_FE;
1863 record->flags |= HAMMER_RECF_DELETED_BE;
1864 hammer_rel_mem_record(record);
1867 case HAMMER_INODE_ONDISK:
1869 * If already on-disk, do not set any additional flags.
1874 * If not on-disk and not deleted, set DDIRTY to force
1875 * an initial record to be written.
1877 * Also set the create_tid in both the frontend and backend
1878 * copy of the inode record.
1880 ip->ino_leaf.base.create_tid = trans.tid;
1881 ip->sync_ino_leaf.base.create_tid = trans.tid;
1882 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1887 * If RDIRTY or DDIRTY is set, write out a new record. If the inode
1888 * is already on-disk the old record is marked as deleted.
1890 * If DELETED is set hammer_update_inode() will delete the existing
1891 * record without writing out a new one.
1893 * If *ONLY* the ITIMES flag is set we can update the record in-place.
1895 if (ip->flags & HAMMER_INODE_DELETED) {
1896 error = hammer_update_inode(&cursor, ip);
1898 if ((ip->sync_flags & HAMMER_INODE_DDIRTY) == 0 &&
1899 (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) {
1900 error = hammer_update_itimes(&cursor, ip);
1902 if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) {
1903 error = hammer_update_inode(&cursor, ip);
1906 Debugger("hammer_update_itimes/inode errored");
1909 * Save the TID we used to sync the inode with to make sure we
1910 * do not improperly reuse it.
1912 hammer_done_cursor(&cursor);
1913 hammer_done_transaction(&trans);
1918 * This routine is called when the OS is no longer actively referencing
1919 * the inode (but might still be keeping it cached), or when releasing
1920 * the last reference to an inode.
1922 * At this point if the inode's nlinks count is zero we want to destroy
1923 * it, which may mean destroying it on-media too.
1926 hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
1931 * Set the DELETING flag when the link count drops to 0 and the
1932 * OS no longer has any opens on the inode.
1934 * The backend will clear DELETING (a mod flag) and set DELETED
1935 * (a state flag) when it is actually able to perform the
1938 if (ip->ino_data.nlinks == 0 &&
1939 (ip->flags & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
1940 ip->flags |= HAMMER_INODE_DELETING;
1941 ip->flags |= HAMMER_INODE_TRUNCATED;
1945 if (hammer_get_vnode(ip, &vp) != 0)
1953 vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
1954 vnode_pager_setsize(ip->vp, 0);
1963 * Re-test an inode when a dependancy had gone away to see if we
1964 * can chain flush it.
1967 hammer_test_inode(hammer_inode_t ip)
1969 if (ip->flags & HAMMER_INODE_REFLUSH) {
1970 ip->flags &= ~HAMMER_INODE_REFLUSH;
1971 hammer_ref(&ip->lock);
1972 if (ip->flags & HAMMER_INODE_RESIGNAL) {
1973 ip->flags &= ~HAMMER_INODE_RESIGNAL;
1974 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1976 hammer_flush_inode(ip, 0);
1978 hammer_rel_inode(ip, 0);
1983 * Clear the RECLAIM flag on an inode. This occurs when the inode is
1984 * reassociated with a vp or just before it gets freed.
1986 * Wakeup one thread blocked waiting on reclaims to complete. Note that
1987 * the inode the thread is waiting on behalf of is a different inode then
1988 * the inode we are called with. This is to create a pipeline.
1991 hammer_inode_wakereclaims(hammer_inode_t ip)
1993 struct hammer_reclaim *reclaim;
1994 hammer_mount_t hmp = ip->hmp;
1996 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0)
1999 --hammer_count_reclaiming;
2000 --hmp->inode_reclaims;
2001 ip->flags &= ~HAMMER_INODE_RECLAIM;
2003 if ((reclaim = TAILQ_FIRST(&hmp->reclaim_list)) != NULL) {
2004 TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry);
2005 reclaim->okydoky = 1;
2011 * Setup our reclaim pipeline. We only let so many detached (and dirty)
2012 * inodes build up before we start blocking.
2014 * When we block we don't care *which* inode has finished reclaiming,
2015 * as lone as one does. This is somewhat heuristical... we also put a
2016 * cap on how long we are willing to wait.
2019 hammer_inode_waitreclaims(hammer_mount_t hmp)
2021 struct hammer_reclaim reclaim;
2024 if (hmp->inode_reclaims > HAMMER_RECLAIM_WAIT) {
2025 reclaim.okydoky = 0;
2026 TAILQ_INSERT_TAIL(&hmp->reclaim_list,
2029 reclaim.okydoky = 1;
2032 if (reclaim.okydoky == 0) {
2033 delay = (hmp->inode_reclaims - HAMMER_RECLAIM_WAIT) * hz /
2034 HAMMER_RECLAIM_WAIT;
2036 tsleep(&reclaim, 0, "hmrrcm", delay + 1);
2037 if (reclaim.okydoky == 0)
2038 TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry);