2 * Copyright (c) 2007 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.34 2008/04/22 19:00:15 dillon Exp $
42 * The kernel is not actively referencing this vnode but is still holding
46 hammer_vop_inactive(struct vop_inactive_args *ap)
48 struct hammer_inode *ip = VTOI(ap->a_vp);
59 * If the inode no longer has any references we recover its
60 * in-memory resources immediately.
62 if (ip->ino_rec.ino_nlinks == 0)
68 * Release the vnode association. This is typically (but not always)
69 * the last reference on the inode and will flush the inode to the
72 * XXX Currently our sync code only runs through inodes with vnode
73 * associations, so we depend on hammer_rel_inode() to sync any inode
74 * record data to the block device prior to losing the association.
75 * Otherwise transactions that the user expected to be distinct by
76 * doing a manual sync may be merged.
79 hammer_vop_reclaim(struct vop_reclaim_args *ap)
81 struct hammer_inode *ip;
86 if ((ip = vp->v_data) != NULL) {
89 hammer_rel_inode(ip, 0);
95 * Return a locked vnode for the specified inode. The inode must be
96 * referenced but NOT LOCKED on entry and will remain referenced on
100 hammer_get_vnode(struct hammer_inode *ip, int lktype, struct vnode **vpp)
106 if ((vp = ip->vp) == NULL) {
107 error = getnewvnode(VT_HAMMER, ip->hmp->mp, vpp, 0, 0);
110 hammer_lock_ex(&ip->lock);
111 if (ip->vp != NULL) {
112 hammer_unlock(&ip->lock);
117 hammer_ref(&ip->lock);
120 vp->v_type = hammer_get_vnode_type(
121 ip->ino_rec.base.base.obj_type);
123 switch(ip->ino_rec.base.base.obj_type) {
124 case HAMMER_OBJTYPE_CDEV:
125 case HAMMER_OBJTYPE_BDEV:
126 vp->v_ops = &ip->hmp->mp->mnt_vn_spec_ops;
127 addaliasu(vp, ip->ino_data.rmajor,
128 ip->ino_data.rminor);
130 case HAMMER_OBJTYPE_FIFO:
131 vp->v_ops = &ip->hmp->mp->mnt_vn_fifo_ops;
138 * Only mark as the root vnode if the ip is not
139 * historical, otherwise the VFS cache will get
140 * confused. The other half of the special handling
141 * is in hammer_vop_nlookupdotdot().
143 if (ip->obj_id == HAMMER_OBJID_ROOT &&
144 ip->obj_asof == ip->hmp->asof) {
148 vp->v_data = (void *)ip;
149 /* vnode locked by getnewvnode() */
150 /* make related vnode dirty if inode dirty? */
151 hammer_unlock(&ip->lock);
152 if (vp->v_type == VREG)
153 vinitvmio(vp, ip->ino_rec.ino_size);
158 * loop if the vget fails (aka races), or if the vp
159 * no longer matches ip->vp.
161 if (vget(vp, LK_EXCLUSIVE) == 0) {
172 * Acquire a HAMMER inode. The returned inode is not locked. These functions
173 * do not attach or detach the related vnode (use hammer_get_vnode() for
176 * The flags argument is only applied for newly created inodes, and only
177 * certain flags are inherited.
179 struct hammer_inode *
180 hammer_get_inode(hammer_transaction_t trans, struct hammer_node **cache,
181 u_int64_t obj_id, hammer_tid_t asof, int flags, int *errorp)
183 hammer_mount_t hmp = trans->hmp;
184 struct hammer_inode_info iinfo;
185 struct hammer_cursor cursor;
186 struct hammer_inode *ip;
189 * Determine if we already have an inode cached. If we do then
192 iinfo.obj_id = obj_id;
193 iinfo.obj_asof = asof;
195 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
197 hammer_ref(&ip->lock);
202 ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
203 ++hammer_count_inodes;
205 ip->obj_asof = iinfo.obj_asof;
207 ip->flags = flags & HAMMER_INODE_RO;
209 ip->flags |= HAMMER_INODE_RO;
210 RB_INIT(&ip->rec_tree);
211 TAILQ_INIT(&ip->bio_list);
214 * Locate the on-disk inode.
217 hammer_init_cursor(trans, &cursor, cache);
218 cursor.key_beg.obj_id = ip->obj_id;
219 cursor.key_beg.key = 0;
220 cursor.key_beg.create_tid = 0;
221 cursor.key_beg.delete_tid = 0;
222 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
223 cursor.key_beg.obj_type = 0;
224 cursor.asof = iinfo.obj_asof;
225 cursor.flags = HAMMER_CURSOR_GET_RECORD | HAMMER_CURSOR_GET_DATA |
228 *errorp = hammer_btree_lookup(&cursor);
229 if (*errorp == EDEADLK) {
230 hammer_done_cursor(&cursor);
235 * On success the B-Tree lookup will hold the appropriate
236 * buffer cache buffers and provide a pointer to the requested
237 * information. Copy the information to the in-memory inode
238 * and cache the B-Tree node to improve future operations.
241 ip->ino_rec = cursor.record->inode;
242 ip->ino_data = cursor.data->inode;
243 ip->sync_tid = ip->ino_rec.base.base.create_tid;
244 hammer_cache_node(cursor.node, &ip->cache[0]);
246 hammer_cache_node(cursor.node, cache);
250 * On success load the inode's record and data and insert the
251 * inode into the B-Tree. It is possible to race another lookup
252 * insertion of the same inode so deal with that condition too.
254 * The cursor's locked node interlocks against others creating and
255 * destroying ip while we were blocked.
258 hammer_ref(&ip->lock);
259 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
260 hammer_uncache_node(&ip->cache[0]);
261 hammer_uncache_node(&ip->cache[1]);
262 hammer_unref(&ip->lock);
263 --hammer_count_inodes;
265 hammer_done_cursor(&cursor);
268 ip->flags |= HAMMER_INODE_ONDISK;
270 --hammer_count_inodes;
274 hammer_done_cursor(&cursor);
279 * Create a new filesystem object, returning the inode in *ipp. The
280 * returned inode will be referenced but not locked.
282 * The inode is created in-memory and will be delay-synchronized to the
286 hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
287 struct ucred *cred, hammer_inode_t dip,
288 struct hammer_inode **ipp)
295 ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
296 ++hammer_count_inodes;
297 ip->obj_id = hammer_alloc_tid(trans);
298 KKASSERT(ip->obj_id != 0);
299 ip->obj_asof = hmp->asof;
301 ip->flags = HAMMER_INODE_DDIRTY | HAMMER_INODE_RDIRTY |
302 HAMMER_INODE_ITIMES | HAMMER_INODE_TIDLOCKED;
303 ip->last_tid = trans->tid;
305 RB_INIT(&ip->rec_tree);
306 TAILQ_INIT(&ip->bio_list);
308 ip->ino_rec.ino_atime = trans->tid;
309 ip->ino_rec.ino_mtime = trans->tid;
310 ip->ino_rec.ino_size = 0;
311 ip->ino_rec.ino_nlinks = 0;
313 ip->ino_rec.base.base.btype = HAMMER_BTREE_TYPE_RECORD;
314 ip->ino_rec.base.base.obj_id = ip->obj_id;
315 ip->ino_rec.base.base.key = 0;
316 ip->ino_rec.base.base.create_tid = trans->tid;
317 ip->ino_rec.base.base.delete_tid = 0;
318 ip->ino_rec.base.base.rec_type = HAMMER_RECTYPE_INODE;
319 ip->ino_rec.base.base.obj_type = hammer_get_obj_type(vap->va_type);
321 ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
322 ip->ino_data.mode = vap->va_mode;
323 ip->ino_data.ctime = trans->tid;
324 ip->ino_data.parent_obj_id = (dip) ? dip->ino_rec.base.base.obj_id : 0;
326 switch(ip->ino_rec.base.base.obj_type) {
327 case HAMMER_OBJTYPE_CDEV:
328 case HAMMER_OBJTYPE_BDEV:
329 ip->ino_data.rmajor = vap->va_rmajor;
330 ip->ino_data.rminor = vap->va_rminor;
337 * Calculate default uid/gid and overwrite with information from
340 xuid = hammer_to_unix_xid(&dip->ino_data.uid);
341 ip->ino_data.gid = dip->ino_data.gid;
342 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode, xuid, cred,
344 ip->ino_data.mode = vap->va_mode;
346 if (vap->va_vaflags & VA_UID_UUID_VALID)
347 ip->ino_data.uid = vap->va_uid_uuid;
348 else if (vap->va_uid != (uid_t)VNOVAL)
349 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
350 if (vap->va_vaflags & VA_GID_UUID_VALID)
351 ip->ino_data.gid = vap->va_gid_uuid;
352 else if (vap->va_gid != (gid_t)VNOVAL)
353 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
355 hammer_ref(&ip->lock);
356 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
357 hammer_unref(&ip->lock);
358 panic("hammer_create_inode: duplicate obj_id %llx", ip->obj_id);
365 * Called by hammer_sync_inode().
368 hammer_update_inode(hammer_transaction_t trans, hammer_inode_t ip)
370 struct hammer_cursor cursor;
371 hammer_record_t record;
375 * Locate the record on-disk and mark it as deleted. Both the B-Tree
376 * node and the record must be marked deleted. The record may or
377 * may not be physically deleted, depending on the retention policy.
379 * If the inode has already been deleted on-disk we have nothing
382 * XXX Update the inode record and data in-place if the retention
388 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
389 HAMMER_INODE_ONDISK) {
390 hammer_init_cursor(trans, &cursor, &ip->cache[0]);
391 cursor.key_beg.obj_id = ip->obj_id;
392 cursor.key_beg.key = 0;
393 cursor.key_beg.create_tid = 0;
394 cursor.key_beg.delete_tid = 0;
395 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
396 cursor.key_beg.obj_type = 0;
397 cursor.asof = ip->obj_asof;
398 cursor.flags |= HAMMER_CURSOR_GET_RECORD | HAMMER_CURSOR_ASOF;
400 error = hammer_btree_lookup(&cursor);
403 error = hammer_ip_delete_record(&cursor, trans->tid);
405 ip->flags |= HAMMER_INODE_DELONDISK;
406 hammer_cache_node(cursor.node, &ip->cache[0]);
408 hammer_done_cursor(&cursor);
409 if (error == EDEADLK)
414 * Write out a new record if the in-memory inode is not marked
415 * as having been deleted. Update our inode statistics if this
416 * is the first application of the inode on-disk.
418 * If the inode has been deleted permanently, HAMMER_INODE_DELONDISK
419 * will remain set and prevent further updates.
421 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
422 record = hammer_alloc_mem_record(ip);
423 record->rec.inode = ip->ino_rec;
424 record->rec.inode.base.base.create_tid = trans->tid;
425 record->rec.inode.base.data_len = sizeof(ip->ino_data);
426 record->data = (void *)&ip->ino_data;
427 error = hammer_ip_sync_record(trans, record);
428 record->flags |= HAMMER_RECF_DELETED;
429 hammer_rel_mem_record(record);
431 ip->flags &= ~(HAMMER_INODE_RDIRTY |
432 HAMMER_INODE_DDIRTY |
433 HAMMER_INODE_DELONDISK |
434 HAMMER_INODE_ITIMES);
435 if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
436 hammer_modify_volume(trans, ip->hmp->rootvol,
438 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
439 ip->flags |= HAMMER_INODE_ONDISK;
443 * Unlock the sync TID if it was locked, now that
444 * we have written it out to disk.
446 ip->flags &= ~HAMMER_INODE_TIDLOCKED;
447 ip->sync_tid = trans->tid;
454 * Update only the itimes fields. This is done no-historically. The
455 * record is updated in-place on the disk.
458 hammer_update_itimes(hammer_transaction_t trans, hammer_inode_t ip)
460 struct hammer_cursor cursor;
461 struct hammer_inode_record *rec;
466 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
467 HAMMER_INODE_ONDISK) {
468 hammer_init_cursor(trans, &cursor, &ip->cache[0]);
469 cursor.key_beg.obj_id = ip->obj_id;
470 cursor.key_beg.key = 0;
471 cursor.key_beg.create_tid = 0;
472 cursor.key_beg.delete_tid = 0;
473 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
474 cursor.key_beg.obj_type = 0;
475 cursor.asof = ip->obj_asof;
476 cursor.flags |= HAMMER_CURSOR_GET_RECORD | HAMMER_CURSOR_ASOF;
478 error = hammer_btree_lookup(&cursor);
480 rec = &cursor.record->inode;
481 hammer_modify_buffer(cursor.trans, cursor.record_buffer,
483 rec->ino_atime = ip->ino_rec.ino_atime;
484 rec->ino_mtime = ip->ino_rec.ino_mtime;
485 ip->flags &= ~HAMMER_INODE_ITIMES;
486 /* XXX recalculate crc */
487 hammer_cache_node(cursor.node, &ip->cache[0]);
489 hammer_done_cursor(&cursor);
490 if (error == EDEADLK)
497 * Release a reference on an inode. If asked to flush the last release
498 * will flush the inode.
501 hammer_rel_inode(struct hammer_inode *ip, int flush)
503 hammer_unref(&ip->lock);
505 ip->flags |= HAMMER_INODE_FLUSH;
506 if (ip->lock.refs == 0) {
507 KKASSERT(ip->cursor_ip_refs == 0);
508 if (ip->flags & HAMMER_INODE_FLUSH)
509 hammer_unload_inode(ip, (void *)MNT_WAIT);
511 hammer_unload_inode(ip, (void *)MNT_NOWAIT);
516 * Unload and destroy the specified inode.
518 * (typically called via RB_SCAN)
521 hammer_unload_inode(struct hammer_inode *ip, void *data)
525 KASSERT(ip->lock.refs == 0,
526 ("hammer_unload_inode: %d refs\n", ip->lock.refs));
527 KKASSERT(ip->vp == NULL);
528 hammer_ref(&ip->lock);
530 error = hammer_sync_inode(ip, (int)data, 1);
532 kprintf("hammer_sync_inode failed error %d\n", error);
533 if (ip->lock.refs == 1) {
534 KKASSERT(RB_EMPTY(&ip->rec_tree));
535 KKASSERT(TAILQ_EMPTY(&ip->bio_list));
536 RB_REMOVE(hammer_ino_rb_tree, &ip->hmp->rb_inos_root, ip);
538 hammer_uncache_node(&ip->cache[0]);
539 hammer_uncache_node(&ip->cache[1]);
540 --hammer_count_inodes;
543 hammer_unref(&ip->lock);
549 * A transaction has modified an inode, requiring updates as specified by
552 * HAMMER_INODE_RDIRTY: Inode record has been updated
553 * HAMMER_INODE_DDIRTY: Inode data has been updated
554 * HAMMER_INODE_DELETED: Inode record/data must be deleted
555 * HAMMER_INODE_ITIMES: mtime/atime has been updated
557 * last_tid is the TID to use to generate the correct TID when the inode
558 * is synced to disk. The first inode record laid out on disk must match
559 * the transaction id of the related directory entry so only update last_tid
560 * if that has already occured.
563 hammer_modify_inode(struct hammer_transaction *trans,
564 struct hammer_inode *ip, int flags)
566 KKASSERT ((ip->flags & HAMMER_INODE_RO) == 0 ||
567 (HAMMER_INODE_RDIRTY|HAMMER_INODE_DDIRTY|
568 HAMMER_INODE_DELETED|HAMMER_INODE_ITIMES) == 0);
571 (HAMMER_INODE_RDIRTY|HAMMER_INODE_DDIRTY|HAMMER_INODE_DELETED)) {
572 if (hammer_debug_tid) {
573 kprintf("hammer_modify_inode: %016llx (%08x)\n",
574 trans->tid, (int)(trans->tid / 1000000000LL));
578 * Update the inode sync transaction id unless it's locked
579 * due to some prior required synchroznization. Locking the
580 * tid in the new flags overrides this (used by rename).
582 if ((ip->flags & HAMMER_INODE_TIDLOCKED) == 0)
583 ip->last_tid = trans->tid;
584 else if (flags & HAMMER_INODE_TIDLOCKED)
585 ip->last_tid = trans->tid;
591 * Sync any dirty buffers and records associated with an inode. The
592 * inode's last_tid field is used as the transaction id for the sync,
593 * overriding any intermediate TIDs that were used for records. Note
594 * that the dirty buffer cache buffers do not have any knowledge of
595 * the transaction id they were modified under.
598 hammer_sync_inode_callback(hammer_record_t rec, void *data)
600 hammer_transaction_t trans = data;
603 hammer_ref(&rec->lock);
604 error = hammer_ip_sync_record(trans, rec);
605 hammer_rel_mem_record(rec);
609 if (error != -ENOSPC) {
610 kprintf("hammer_sync_inode_callback: sync failed rec "
611 "%p, error %d\n", rec, error);
621 hammer_sync_inode(hammer_inode_t ip, int waitfor, int handle_delete)
623 struct hammer_transaction trans;
627 if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
631 hammer_lock_ex(&ip->lock);
634 * Use the transaction id of the last operation to sync. But we
635 * can't reuse a previous sync TID.
637 if (ip->last_tid && ip->last_tid != ip->sync_tid)
638 hammer_start_transaction_tid(&trans, ip->hmp, ip->last_tid);
640 hammer_start_transaction(&trans, ip->hmp);
643 * If the inode has been deleted (nlinks == 0), and the OS no longer
644 * has any references to it (handle_delete != 0), clean up in-memory
647 * NOTE: We do not set the RDIRTY flag when updating the delete_tid,
648 * setting HAMMER_INODE_DELETED takes care of it.
650 * NOTE: Because we may sync records within this new transaction,
651 * force the inode update later on to use our transaction id or
652 * the delete_tid of the inode may be less then the create_tid of
653 * the inode update. XXX shouldn't happen but don't take the chance.
655 if (ip->ino_rec.ino_nlinks == 0 && handle_delete &&
656 (ip->flags & HAMMER_INODE_GONE) == 0) {
657 ip->flags |= HAMMER_INODE_GONE;
659 vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
660 error = hammer_ip_delete_range_all(&trans, ip);
661 KKASSERT(RB_EMPTY(&ip->rec_tree));
662 ip->ino_rec.base.base.delete_tid = trans.tid;
663 hammer_modify_inode(&trans, ip, HAMMER_INODE_DELETED);
664 hammer_modify_volume(&trans, ip->hmp->rootvol, NULL, 0);
665 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
669 * Sync the buffer cache. This will queue the BIOs
671 if (ip->vp != NULL) {
672 error = vfsync(ip->vp, MNT_NOWAIT, 1, NULL, NULL);
673 if (RB_ROOT(&ip->vp->v_rbdirty_tree) == NULL)
674 ip->flags &= ~HAMMER_INODE_BUFS;
680 * Flush the queued BIOs
682 while ((bio = TAILQ_FIRST(&ip->bio_list)) != NULL) {
683 TAILQ_REMOVE(&ip->bio_list, bio, bio_act);
684 hammer_dowrite(&trans, ip, bio);
689 * Now sync related records
692 error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
693 hammer_sync_inode_callback, &trans);
694 KKASSERT(error <= 0);
699 if (RB_EMPTY(&ip->rec_tree) && TAILQ_EMPTY(&ip->bio_list))
700 ip->flags &= ~HAMMER_INODE_XDIRTY;
703 * Now update the inode's on-disk inode-data and/or on-disk record.
705 switch(ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK)) {
706 case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
708 * If deleted and on-disk, don't set any additional flags.
709 * the delete flag takes care of things.
712 case HAMMER_INODE_DELETED:
714 * Take care of the case where a deleted inode was never
715 * flushed to the disk in the first place.
717 ip->flags &= ~(HAMMER_INODE_RDIRTY|HAMMER_INODE_DDIRTY|
718 HAMMER_INODE_XDIRTY|HAMMER_INODE_ITIMES);
719 while (RB_ROOT(&ip->rec_tree)) {
720 hammer_record_t rec = RB_ROOT(&ip->rec_tree);
721 hammer_ref(&rec->lock);
722 rec->flags |= HAMMER_RECF_DELETED;
723 hammer_rel_mem_record(rec);
726 case HAMMER_INODE_ONDISK:
728 * If already on-disk, do not set any additional flags.
733 * If not on-disk and not deleted, set both dirty flags
734 * to force an initial record to be written.
736 ip->flags |= HAMMER_INODE_RDIRTY | HAMMER_INODE_DDIRTY;
741 * If RDIRTY or DDIRTY is set, write out a new record. If the inode
742 * is already on-disk the old record is marked as deleted.
744 * If DELETED is set hammer_update_inode() will delete the existing
745 * record without writing out a new one.
747 * If *ONLY* the ITIMES flag is set we can update the record in-place.
749 if ((ip->flags & (HAMMER_INODE_RDIRTY | HAMMER_INODE_DDIRTY |
750 HAMMER_INODE_ITIMES | HAMMER_INODE_DELETED)) ==
751 HAMMER_INODE_ITIMES) {
752 error = hammer_update_itimes(&trans, ip);
754 if (ip->flags & (HAMMER_INODE_RDIRTY | HAMMER_INODE_DDIRTY |
755 HAMMER_INODE_ITIMES | HAMMER_INODE_DELETED)) {
756 error = hammer_update_inode(&trans, ip);
758 hammer_commit_transaction(&trans);
759 hammer_unlock(&ip->lock);