wi(4) depends on wlan(4)
[dragonfly.git] / sys / vfs / hammer / hammer_inode.c
CommitLineData
427e5fc6 1/*
b84de5af 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
427e5fc6
MD
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
ec4e8497 34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.39 2008/04/26 08:02:17 dillon Exp $
427e5fc6
MD
35 */
36
37#include "hammer.h"
38#include <sys/buf.h>
39#include <sys/buf2.h>
40
ec4e8497 41static int hammer_unload_inode(struct hammer_inode *ip);
b84de5af
MD
42static void hammer_flush_inode_copysync(hammer_inode_t ip);
43static int hammer_mark_record_callback(hammer_record_t rec, void *data);
44
d113fda1
MD
45/*
46 * The kernel is not actively referencing this vnode but is still holding
47 * it cached.
b84de5af
MD
48 *
49 * This is called from the frontend.
d113fda1 50 */
427e5fc6
MD
51int
52hammer_vop_inactive(struct vop_inactive_args *ap)
53{
66325755 54 struct hammer_inode *ip = VTOI(ap->a_vp);
27ea2398 55
c0ade690
MD
56 /*
57 * Degenerate case
58 */
59 if (ip == NULL) {
66325755 60 vrecycle(ap->a_vp);
c0ade690
MD
61 return(0);
62 }
63
64 /*
65 * If the inode no longer has any references we recover its
66 * in-memory resources immediately.
b84de5af
MD
67 *
68 * NOTE: called from frontend, use ino_rec instead of sync_ino_rec.
c0ade690 69 */
d113fda1
MD
70 if (ip->ino_rec.ino_nlinks == 0)
71 vrecycle(ap->a_vp);
427e5fc6
MD
72 return(0);
73}
74
d113fda1
MD
75/*
76 * Release the vnode association. This is typically (but not always)
77 * the last reference on the inode and will flush the inode to the
78 * buffer cache.
79 *
80 * XXX Currently our sync code only runs through inodes with vnode
81 * associations, so we depend on hammer_rel_inode() to sync any inode
82 * record data to the block device prior to losing the association.
83 * Otherwise transactions that the user expected to be distinct by
84 * doing a manual sync may be merged.
85 */
427e5fc6
MD
86int
87hammer_vop_reclaim(struct vop_reclaim_args *ap)
88{
427e5fc6
MD
89 struct hammer_inode *ip;
90 struct vnode *vp;
91
92 vp = ap->a_vp;
c0ade690 93
a89aec1b
MD
94 if ((ip = vp->v_data) != NULL) {
95 vp->v_data = NULL;
96 ip->vp = NULL;
ec4e8497
MD
97
98 /*
99 * Don't let too many dependancies build up on unreferenced
100 * inodes or we could run ourselves out of memory.
101 */
102 if (TAILQ_FIRST(&ip->depend_list)) {
103 ip->hmp->reclaim_count += ip->depend_count;
104 if (ip->hmp->reclaim_count > 256) {
105 ip->hmp->reclaim_count = 0;
106 hammer_flusher_async(ip->hmp);
107 }
108 }
109 hammer_rel_inode(ip, 1);
a89aec1b 110 }
427e5fc6
MD
111 return(0);
112}
113
66325755
MD
114/*
115 * Return a locked vnode for the specified inode. The inode must be
116 * referenced but NOT LOCKED on entry and will remain referenced on
117 * return.
b84de5af
MD
118 *
119 * Called from the frontend.
66325755
MD
120 */
121int
122hammer_get_vnode(struct hammer_inode *ip, int lktype, struct vnode **vpp)
123{
124 struct vnode *vp;
125 int error = 0;
126
127 for (;;) {
128 if ((vp = ip->vp) == NULL) {
129 error = getnewvnode(VT_HAMMER, ip->hmp->mp, vpp, 0, 0);
130 if (error)
131 break;
8cd0a023
MD
132 hammer_lock_ex(&ip->lock);
133 if (ip->vp != NULL) {
134 hammer_unlock(&ip->lock);
135 vp->v_type = VBAD;
136 vx_put(vp);
137 continue;
66325755 138 }
8cd0a023
MD
139 hammer_ref(&ip->lock);
140 vp = *vpp;
141 ip->vp = vp;
142 vp->v_type = hammer_get_vnode_type(
143 ip->ino_rec.base.base.obj_type);
7a04d74f
MD
144
145 switch(ip->ino_rec.base.base.obj_type) {
146 case HAMMER_OBJTYPE_CDEV:
147 case HAMMER_OBJTYPE_BDEV:
148 vp->v_ops = &ip->hmp->mp->mnt_vn_spec_ops;
149 addaliasu(vp, ip->ino_data.rmajor,
150 ip->ino_data.rminor);
151 break;
152 case HAMMER_OBJTYPE_FIFO:
153 vp->v_ops = &ip->hmp->mp->mnt_vn_fifo_ops;
154 break;
155 default:
156 break;
157 }
42c7d26b
MD
158
159 /*
160 * Only mark as the root vnode if the ip is not
161 * historical, otherwise the VFS cache will get
162 * confused. The other half of the special handling
163 * is in hammer_vop_nlookupdotdot().
164 */
165 if (ip->obj_id == HAMMER_OBJID_ROOT &&
166 ip->obj_asof == ip->hmp->asof) {
7a04d74f 167 vp->v_flag |= VROOT;
42c7d26b 168 }
7a04d74f 169
8cd0a023
MD
170 vp->v_data = (void *)ip;
171 /* vnode locked by getnewvnode() */
172 /* make related vnode dirty if inode dirty? */
173 hammer_unlock(&ip->lock);
a89aec1b
MD
174 if (vp->v_type == VREG)
175 vinitvmio(vp, ip->ino_rec.ino_size);
8cd0a023
MD
176 break;
177 }
178
179 /*
180 * loop if the vget fails (aka races), or if the vp
181 * no longer matches ip->vp.
182 */
183 if (vget(vp, LK_EXCLUSIVE) == 0) {
184 if (vp == ip->vp)
185 break;
186 vput(vp);
66325755
MD
187 }
188 }
a89aec1b 189 *vpp = vp;
66325755
MD
190 return(error);
191}
192
193/*
8cd0a023
MD
194 * Acquire a HAMMER inode. The returned inode is not locked. These functions
195 * do not attach or detach the related vnode (use hammer_get_vnode() for
196 * that).
d113fda1
MD
197 *
198 * The flags argument is only applied for newly created inodes, and only
199 * certain flags are inherited.
b84de5af
MD
200 *
201 * Called from the frontend.
66325755
MD
202 */
203struct hammer_inode *
36f82b23 204hammer_get_inode(hammer_transaction_t trans, struct hammer_node **cache,
61aeeb33 205 u_int64_t obj_id, hammer_tid_t asof, int flags, int *errorp)
66325755 206{
36f82b23 207 hammer_mount_t hmp = trans->hmp;
427e5fc6 208 struct hammer_inode_info iinfo;
8cd0a023 209 struct hammer_cursor cursor;
427e5fc6 210 struct hammer_inode *ip;
427e5fc6
MD
211
212 /*
213 * Determine if we already have an inode cached. If we do then
214 * we are golden.
215 */
66325755 216 iinfo.obj_id = obj_id;
7f7c1f84 217 iinfo.obj_asof = asof;
427e5fc6
MD
218loop:
219 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
220 if (ip) {
8cd0a023 221 hammer_ref(&ip->lock);
66325755
MD
222 *errorp = 0;
223 return(ip);
427e5fc6
MD
224 }
225
427e5fc6 226 ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
b3deaf57 227 ++hammer_count_inodes;
66325755 228 ip->obj_id = obj_id;
27ea2398 229 ip->obj_asof = iinfo.obj_asof;
66325755 230 ip->hmp = hmp;
d113fda1 231 ip->flags = flags & HAMMER_INODE_RO;
b84de5af 232 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
d113fda1
MD
233 if (hmp->ronly)
234 ip->flags |= HAMMER_INODE_RO;
8cd0a023 235 RB_INIT(&ip->rec_tree);
059819e3 236 TAILQ_INIT(&ip->bio_list);
b84de5af 237 TAILQ_INIT(&ip->bio_alt_list);
ec4e8497 238 TAILQ_INIT(&ip->depend_list);
427e5fc6
MD
239
240 /*
8cd0a023 241 * Locate the on-disk inode.
427e5fc6 242 */
6a37e7e4 243retry:
36f82b23 244 hammer_init_cursor(trans, &cursor, cache);
8cd0a023
MD
245 cursor.key_beg.obj_id = ip->obj_id;
246 cursor.key_beg.key = 0;
d5530d22 247 cursor.key_beg.create_tid = 0;
8cd0a023
MD
248 cursor.key_beg.delete_tid = 0;
249 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
250 cursor.key_beg.obj_type = 0;
d5530d22
MD
251 cursor.asof = iinfo.obj_asof;
252 cursor.flags = HAMMER_CURSOR_GET_RECORD | HAMMER_CURSOR_GET_DATA |
253 HAMMER_CURSOR_ASOF;
8cd0a023
MD
254
255 *errorp = hammer_btree_lookup(&cursor);
6a37e7e4
MD
256 if (*errorp == EDEADLK) {
257 hammer_done_cursor(&cursor);
258 goto retry;
259 }
427e5fc6
MD
260
261 /*
262 * On success the B-Tree lookup will hold the appropriate
263 * buffer cache buffers and provide a pointer to the requested
d113fda1
MD
264 * information. Copy the information to the in-memory inode
265 * and cache the B-Tree node to improve future operations.
427e5fc6 266 */
66325755 267 if (*errorp == 0) {
8cd0a023 268 ip->ino_rec = cursor.record->inode;
40043e7f 269 ip->ino_data = cursor.data->inode;
61aeeb33
MD
270 hammer_cache_node(cursor.node, &ip->cache[0]);
271 if (cache)
272 hammer_cache_node(cursor.node, cache);
427e5fc6 273 }
427e5fc6
MD
274
275 /*
276 * On success load the inode's record and data and insert the
277 * inode into the B-Tree. It is possible to race another lookup
278 * insertion of the same inode so deal with that condition too.
b3deaf57
MD
279 *
280 * The cursor's locked node interlocks against others creating and
281 * destroying ip while we were blocked.
427e5fc6 282 */
66325755 283 if (*errorp == 0) {
8cd0a023 284 hammer_ref(&ip->lock);
427e5fc6 285 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
61aeeb33
MD
286 hammer_uncache_node(&ip->cache[0]);
287 hammer_uncache_node(&ip->cache[1]);
b84de5af 288 KKASSERT(ip->lock.refs == 1);
b3deaf57 289 --hammer_count_inodes;
427e5fc6 290 kfree(ip, M_HAMMER);
b3deaf57 291 hammer_done_cursor(&cursor);
427e5fc6
MD
292 goto loop;
293 }
c0ade690 294 ip->flags |= HAMMER_INODE_ONDISK;
427e5fc6 295 } else {
b3deaf57 296 --hammer_count_inodes;
66325755
MD
297 kfree(ip, M_HAMMER);
298 ip = NULL;
427e5fc6 299 }
b3deaf57 300 hammer_done_cursor(&cursor);
66325755
MD
301 return (ip);
302}
303
8cd0a023
MD
304/*
305 * Create a new filesystem object, returning the inode in *ipp. The
b84de5af
MD
306 * returned inode will be referenced and shared-locked. The caller
307 * must unlock and release it when finished.
8cd0a023 308 *
b84de5af 309 * The inode is created in-memory.
8cd0a023
MD
310 */
311int
a89aec1b
MD
312hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
313 struct ucred *cred, hammer_inode_t dip,
8cd0a023 314 struct hammer_inode **ipp)
66325755 315{
a89aec1b
MD
316 hammer_mount_t hmp;
317 hammer_inode_t ip;
6b4f890b 318 uid_t xuid;
66325755 319
8cd0a023
MD
320 hmp = trans->hmp;
321 ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
b3deaf57 322 ++hammer_count_inodes;
a89aec1b 323 ip->obj_id = hammer_alloc_tid(trans);
8cd0a023 324 KKASSERT(ip->obj_id != 0);
7f7c1f84 325 ip->obj_asof = hmp->asof;
8cd0a023 326 ip->hmp = hmp;
b84de5af 327 ip->flush_state = HAMMER_FST_IDLE;
8cd0a023 328 ip->flags = HAMMER_INODE_DDIRTY | HAMMER_INODE_RDIRTY |
b84de5af 329 HAMMER_INODE_ITIMES;
8cd0a023
MD
330
331 RB_INIT(&ip->rec_tree);
059819e3 332 TAILQ_INIT(&ip->bio_list);
b84de5af 333 TAILQ_INIT(&ip->bio_alt_list);
ec4e8497 334 TAILQ_INIT(&ip->depend_list);
8cd0a023 335
b84de5af
MD
336 ip->ino_rec.ino_atime = trans->time;
337 ip->ino_rec.ino_mtime = trans->time;
8cd0a023
MD
338 ip->ino_rec.ino_size = 0;
339 ip->ino_rec.ino_nlinks = 0;
340 /* XXX */
fe7678ee 341 ip->ino_rec.base.base.btype = HAMMER_BTREE_TYPE_RECORD;
8cd0a023
MD
342 ip->ino_rec.base.base.obj_id = ip->obj_id;
343 ip->ino_rec.base.base.key = 0;
b84de5af 344 ip->ino_rec.base.base.create_tid = 0;
8cd0a023
MD
345 ip->ino_rec.base.base.delete_tid = 0;
346 ip->ino_rec.base.base.rec_type = HAMMER_RECTYPE_INODE;
347 ip->ino_rec.base.base.obj_type = hammer_get_obj_type(vap->va_type);
348
349 ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
350 ip->ino_data.mode = vap->va_mode;
b84de5af 351 ip->ino_data.ctime = trans->time;
8cd0a023 352 ip->ino_data.parent_obj_id = (dip) ? dip->ino_rec.base.base.obj_id : 0;
6b4f890b 353
7a04d74f
MD
354 switch(ip->ino_rec.base.base.obj_type) {
355 case HAMMER_OBJTYPE_CDEV:
356 case HAMMER_OBJTYPE_BDEV:
357 ip->ino_data.rmajor = vap->va_rmajor;
358 ip->ino_data.rminor = vap->va_rminor;
359 break;
360 default:
361 break;
362 }
363
6b4f890b
MD
364 /*
365 * Calculate default uid/gid and overwrite with information from
366 * the vap.
367 */
368 xuid = hammer_to_unix_xid(&dip->ino_data.uid);
369 ip->ino_data.gid = dip->ino_data.gid;
370 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode, xuid, cred,
371 &vap->va_mode);
372 ip->ino_data.mode = vap->va_mode;
373
8cd0a023
MD
374 if (vap->va_vaflags & VA_UID_UUID_VALID)
375 ip->ino_data.uid = vap->va_uid_uuid;
6b4f890b
MD
376 else if (vap->va_uid != (uid_t)VNOVAL)
377 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
8cd0a023
MD
378 if (vap->va_vaflags & VA_GID_UUID_VALID)
379 ip->ino_data.gid = vap->va_gid_uuid;
6b4f890b 380 else if (vap->va_gid != (gid_t)VNOVAL)
8cd0a023
MD
381 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
382
383 hammer_ref(&ip->lock);
b84de5af 384 hammer_lock_sh(&ip->lock);
8cd0a023
MD
385 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
386 hammer_unref(&ip->lock);
a89aec1b 387 panic("hammer_create_inode: duplicate obj_id %llx", ip->obj_id);
8cd0a023
MD
388 }
389 *ipp = ip;
390 return(0);
66325755
MD
391}
392
d113fda1
MD
393/*
394 * Called by hammer_sync_inode().
395 */
396static int
36f82b23 397hammer_update_inode(hammer_transaction_t trans, hammer_inode_t ip)
c0ade690
MD
398{
399 struct hammer_cursor cursor;
400 hammer_record_t record;
401 int error;
402
403 /*
76376933 404 * Locate the record on-disk and mark it as deleted. Both the B-Tree
195c19a1
MD
405 * node and the record must be marked deleted. The record may or
406 * may not be physically deleted, depending on the retention policy.
76376933 407 *
195c19a1
MD
408 * If the inode has already been deleted on-disk we have nothing
409 * to do.
c0ade690
MD
410 *
411 * XXX Update the inode record and data in-place if the retention
412 * policy allows it.
413 */
d26d0ae9 414retry:
c0ade690
MD
415 error = 0;
416
76376933
MD
417 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
418 HAMMER_INODE_ONDISK) {
36f82b23 419 hammer_init_cursor(trans, &cursor, &ip->cache[0]);
c0ade690
MD
420 cursor.key_beg.obj_id = ip->obj_id;
421 cursor.key_beg.key = 0;
d5530d22 422 cursor.key_beg.create_tid = 0;
c0ade690
MD
423 cursor.key_beg.delete_tid = 0;
424 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
425 cursor.key_beg.obj_type = 0;
d5530d22
MD
426 cursor.asof = ip->obj_asof;
427 cursor.flags |= HAMMER_CURSOR_GET_RECORD | HAMMER_CURSOR_ASOF;
b84de5af 428 cursor.flags |= HAMMER_CURSOR_BACKEND;
c0ade690
MD
429
430 error = hammer_btree_lookup(&cursor);
b84de5af
MD
431 if (error) {
432 kprintf("error %d\n", error);
433 Debugger("hammer_update_inode");
434 }
435
c0ade690
MD
436
437 if (error == 0) {
855942b6 438 error = hammer_ip_delete_record(&cursor, trans->tid);
f90dde4c 439 if (error && error != EDEADLK) {
b84de5af
MD
440 kprintf("error %d\n", error);
441 Debugger("hammer_update_inode2");
442 }
195c19a1
MD
443 if (error == 0)
444 ip->flags |= HAMMER_INODE_DELONDISK;
6a37e7e4 445 hammer_cache_node(cursor.node, &ip->cache[0]);
c0ade690 446 }
c0ade690 447 hammer_done_cursor(&cursor);
6a37e7e4
MD
448 if (error == EDEADLK)
449 goto retry;
c0ade690
MD
450 }
451
452 /*
453 * Write out a new record if the in-memory inode is not marked
fbc6e32a
MD
454 * as having been deleted. Update our inode statistics if this
455 * is the first application of the inode on-disk.
76376933
MD
456 *
457 * If the inode has been deleted permanently, HAMMER_INODE_DELONDISK
458 * will remain set and prevent further updates.
c0ade690
MD
459 */
460 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
40043e7f 461 record = hammer_alloc_mem_record(ip);
b84de5af
MD
462 record->state = HAMMER_FST_FLUSH;
463 record->rec.inode = ip->sync_ino_rec;
855942b6 464 record->rec.inode.base.base.create_tid = trans->tid;
b84de5af
MD
465 record->rec.inode.base.data_len = sizeof(ip->sync_ino_data);
466 record->data = (void *)&ip->sync_ino_data;
36f82b23 467 error = hammer_ip_sync_record(trans, record);
b84de5af
MD
468 if (error) {
469 kprintf("error %d\n", error);
470 Debugger("hammer_update_inode3");
471 }
ec4e8497 472 hammer_delete_mem_record(record);
b3deaf57 473 hammer_rel_mem_record(record);
d26d0ae9 474 if (error == 0) {
b84de5af
MD
475 ip->sync_flags &= ~(HAMMER_INODE_RDIRTY |
476 HAMMER_INODE_DDIRTY |
477 HAMMER_INODE_ITIMES);
478 ip->flags &= ~HAMMER_INODE_DELONDISK;
d26d0ae9 479 if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
10a5d1ba 480 hammer_modify_volume(trans, trans->rootvol,
36f82b23 481 NULL, 0);
0b075555 482 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
10a5d1ba 483 hammer_modify_volume_done(trans->rootvol);
d26d0ae9
MD
484 ip->flags |= HAMMER_INODE_ONDISK;
485 }
fbc6e32a 486 }
c0ade690 487 }
f90dde4c
MD
488 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) {
489 /*
490 * Clean out any left-over flags if the inode has been
491 * destroyed.
492 */
493 ip->sync_flags &= ~(HAMMER_INODE_RDIRTY |
494 HAMMER_INODE_DDIRTY |
495 HAMMER_INODE_ITIMES);
496 }
c0ade690
MD
497 return(error);
498}
499
a89aec1b 500/*
d113fda1
MD
501 * Update only the itimes fields. This is done no-historically. The
502 * record is updated in-place on the disk.
503 */
504static int
36f82b23 505hammer_update_itimes(hammer_transaction_t trans, hammer_inode_t ip)
d113fda1
MD
506{
507 struct hammer_cursor cursor;
508 struct hammer_inode_record *rec;
509 int error;
510
6a37e7e4 511retry:
d113fda1
MD
512 error = 0;
513 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
514 HAMMER_INODE_ONDISK) {
36f82b23 515 hammer_init_cursor(trans, &cursor, &ip->cache[0]);
d113fda1
MD
516 cursor.key_beg.obj_id = ip->obj_id;
517 cursor.key_beg.key = 0;
d5530d22 518 cursor.key_beg.create_tid = 0;
d113fda1
MD
519 cursor.key_beg.delete_tid = 0;
520 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
521 cursor.key_beg.obj_type = 0;
d5530d22
MD
522 cursor.asof = ip->obj_asof;
523 cursor.flags |= HAMMER_CURSOR_GET_RECORD | HAMMER_CURSOR_ASOF;
b84de5af 524 cursor.flags |= HAMMER_CURSOR_BACKEND;
d113fda1
MD
525
526 error = hammer_btree_lookup(&cursor);
b84de5af
MD
527 if (error) {
528 kprintf("error %d\n", error);
529 Debugger("hammer_update_itimes1");
530 }
d113fda1 531 if (error == 0) {
10a5d1ba
MD
532 /*
533 * Do not generate UNDO records for atime/mtime
534 * updates.
535 */
d113fda1 536 rec = &cursor.record->inode;
36f82b23
MD
537 hammer_modify_buffer(cursor.trans, cursor.record_buffer,
538 NULL, 0);
b84de5af
MD
539 rec->ino_atime = ip->sync_ino_rec.ino_atime;
540 rec->ino_mtime = ip->sync_ino_rec.ino_mtime;
10a5d1ba 541 hammer_modify_buffer_done(cursor.record_buffer);
b84de5af 542 ip->sync_flags &= ~HAMMER_INODE_ITIMES;
d113fda1 543 /* XXX recalculate crc */
6a37e7e4 544 hammer_cache_node(cursor.node, &ip->cache[0]);
d113fda1 545 }
d113fda1 546 hammer_done_cursor(&cursor);
6a37e7e4
MD
547 if (error == EDEADLK)
548 goto retry;
d113fda1
MD
549 }
550 return(error);
551}
552
553/*
554 * Release a reference on an inode. If asked to flush the last release
555 * will flush the inode.
b84de5af
MD
556 *
557 * On the last reference we queue the inode to the flusher for its final
558 * disposition.
a89aec1b 559 */
66325755 560void
a89aec1b 561hammer_rel_inode(struct hammer_inode *ip, int flush)
66325755 562{
f90dde4c
MD
563 /*
564 * Handle disposition when dropping the last ref.
565 */
566 while (ip->lock.refs == 1) {
ec4e8497
MD
567#if 0
568 /*
569 * XXX this can create a deep stack recursion
570 */
b84de5af
MD
571 if (curthread == ip->hmp->flusher_td) {
572 /*
f90dde4c
MD
573 * We are the flusher, do any required flushes
574 * before unloading the inode.
b84de5af 575 */
f90dde4c
MD
576 int error = 0;
577
b84de5af 578 KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
f90dde4c
MD
579 while (error == 0 &&
580 (ip->flags & HAMMER_INODE_MODMASK)) {
581 hammer_ref(&ip->lock);
582 hammer_flush_inode_copysync(ip);
583 error = hammer_sync_inode(ip, 1);
584 hammer_flush_inode_done(ip);
585 }
586 if (error)
587 kprintf("hammer_sync_inode failed error %d\n",
588 error);
589 if (ip->lock.refs > 1)
590 continue;
ec4e8497 591 hammer_unload_inode(ip);
f90dde4c
MD
592 return;
593 }
ec4e8497 594#endif
f90dde4c 595 if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
ec4e8497 596 hammer_unload_inode(ip);
f90dde4c
MD
597 return;
598 }
599
600 /*
601 * Hand the inode over to the flusher, which will
602 * add another ref to it.
603 */
604 if (++ip->hmp->reclaim_count > 256) {
605 ip->hmp->reclaim_count = 0;
606 hammer_flush_inode(ip, HAMMER_FLUSH_FORCE |
607 HAMMER_FLUSH_SIGNAL);
b84de5af 608 } else {
f90dde4c 609 hammer_flush_inode(ip, HAMMER_FLUSH_FORCE);
b84de5af 610 }
f90dde4c
MD
611 /* retry */
612 }
613
614 /*
ec4e8497
MD
615 * The inode still has multiple refs, drop one ref. If a flush was
616 * requested make sure the flusher sees it.
f90dde4c 617 */
ec4e8497
MD
618 if (flush && ip->flush_state == HAMMER_FST_IDLE)
619 hammer_flush_inode(ip, HAMMER_FLUSH_RELEASE);
620 else
b84de5af 621 hammer_unref(&ip->lock);
427e5fc6
MD
622}
623
27ea2398 624/*
b84de5af
MD
625 * Unload and destroy the specified inode. Must be called with one remaining
626 * reference. The reference is disposed of.
8cd0a023 627 *
b84de5af 628 * This can only be called in the context of the flusher.
27ea2398 629 */
b84de5af 630static int
ec4e8497 631hammer_unload_inode(struct hammer_inode *ip)
27ea2398 632{
c0ade690 633
b84de5af 634 KASSERT(ip->lock.refs == 1,
a89aec1b 635 ("hammer_unload_inode: %d refs\n", ip->lock.refs));
8cd0a023 636 KKASSERT(ip->vp == NULL);
f90dde4c
MD
637 KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
638 KKASSERT(ip->cursor_ip_refs == 0);
639 KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
640
641 KKASSERT(RB_EMPTY(&ip->rec_tree));
642 KKASSERT(TAILQ_EMPTY(&ip->bio_list));
643 KKASSERT(TAILQ_EMPTY(&ip->bio_alt_list));
644
645 RB_REMOVE(hammer_ino_rb_tree, &ip->hmp->rb_inos_root, ip);
646
647 hammer_uncache_node(&ip->cache[0]);
648 hammer_uncache_node(&ip->cache[1]);
649 --hammer_count_inodes;
650 kfree(ip, M_HAMMER);
6b4f890b 651
27ea2398
MD
652 return(0);
653}
654
427e5fc6 655/*
d113fda1
MD
656 * A transaction has modified an inode, requiring updates as specified by
657 * the passed flags.
7f7c1f84 658 *
d113fda1
MD
659 * HAMMER_INODE_RDIRTY: Inode record has been updated
660 * HAMMER_INODE_DDIRTY: Inode data has been updated
b84de5af 661 * HAMMER_INODE_XDIRTY: Dirty frontend buffer cache buffer strategized
d113fda1
MD
662 * HAMMER_INODE_DELETED: Inode record/data must be deleted
663 * HAMMER_INODE_ITIMES: mtime/atime has been updated
427e5fc6 664 */
66325755 665void
b84de5af 666hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags)
427e5fc6 667{
d113fda1 668 KKASSERT ((ip->flags & HAMMER_INODE_RO) == 0 ||
b84de5af
MD
669 (flags & (HAMMER_INODE_RDIRTY|HAMMER_INODE_DDIRTY|
670 HAMMER_INODE_XDIRTY|
671 HAMMER_INODE_DELETED|HAMMER_INODE_ITIMES)) == 0);
672
673 ip->flags |= flags;
674}
675
676/*
677 * Flush an inode. If the inode is already being flushed wait for
678 * it to complete, then flush it again. The interlock is against
679 * front-end transactions, the backend flusher does not hold the lock.
680 *
681 * The flusher must distinguish between the records that are part of the
682 * flush and any new records created in parallel with the flush. The
683 * inode data and truncation fields are also copied. BIOs are a bit more
684 * troublesome because some dirty buffers may not have been queued yet.
685 */
686void
f90dde4c 687hammer_flush_inode(hammer_inode_t ip, int flags)
b84de5af
MD
688{
689 if (ip->flush_state != HAMMER_FST_IDLE &&
690 (ip->flags & HAMMER_INODE_MODMASK)) {
691 ip->flags |= HAMMER_INODE_REFLUSH;
ec4e8497
MD
692 if (flags & HAMMER_FLUSH_RELEASE) {
693 hammer_unref(&ip->lock);
694 KKASSERT(ip->lock.refs > 0);
695 }
b84de5af
MD
696 return;
697 }
f90dde4c
MD
698 if (ip->flush_state == HAMMER_FST_IDLE) {
699 if ((ip->flags & HAMMER_INODE_MODMASK) ||
700 (flags & HAMMER_FLUSH_FORCE)) {
ec4e8497
MD
701 /*
702 * Add a reference to represent the inode being queued
703 * to the flusher. If the caller wants us to
704 * release a reference the two cancel each other out.
705 */
706 if ((flags & HAMMER_FLUSH_RELEASE) == 0)
707 hammer_ref(&ip->lock);
42c7d26b 708
f90dde4c
MD
709 hammer_flush_inode_copysync(ip);
710 /*
711 * Move the inode to the flush list and add a ref to
712 * it representing it on the list.
713 */
714 TAILQ_INSERT_TAIL(&ip->hmp->flush_list, ip, flush_entry);
715 if (flags & HAMMER_FLUSH_SIGNAL)
716 hammer_flusher_async(ip->hmp);
717 }
7f7c1f84 718 }
c0ade690
MD
719}
720
721/*
b84de5af
MD
722 * Helper routine to copy the frontend synchronization state to the backend.
723 * This routine may be called by either the frontend or the backend.
c0ade690 724 */
b84de5af
MD
725static void
726hammer_flush_inode_copysync(hammer_inode_t ip)
727{
728 int error;
ec4e8497 729 int count;
b84de5af 730
ec4e8497
MD
731 /*
732 * Prevent anyone else from trying to do the same thing.
733 */
b84de5af
MD
734 ip->flush_state = HAMMER_FST_SETUP;
735
736 /*
737 * Sync the buffer cache. This will queue the BIOs. If called
738 * from the context of the flusher the BIO's are thrown into bio_list
739 * regardless of ip->flush_state.
740 */
741 if (ip->vp != NULL)
742 error = vfsync(ip->vp, MNT_NOWAIT, 1, NULL, NULL);
743 else
744 error = 0;
745
746 /*
747 * This freezes strategy writes, any further BIOs will be
748 * queued to alt_bio (unless we are
749 */
750 ip->flush_state = HAMMER_FST_FLUSH;
751
752 /*
753 * Snapshot the state of the inode for the backend flusher.
754 *
755 * The truncation must be retained in the frontend until after
756 * we've actually performed the record deletion.
757 */
758 ip->sync_flags = (ip->flags & HAMMER_INODE_MODMASK);
759 ip->sync_trunc_off = ip->trunc_off;
760 ip->sync_ino_rec = ip->ino_rec;
761 ip->sync_ino_data = ip->ino_data;
762 ip->flags &= ~HAMMER_INODE_MODMASK |
763 HAMMER_INODE_TRUNCATED | HAMMER_INODE_BUFS;
764
765 /*
766 * Fix up the dirty buffer status.
767 */
768 if (ip->vp == NULL || RB_ROOT(&ip->vp->v_rbdirty_tree) == NULL)
769 ip->flags &= ~HAMMER_INODE_BUFS;
770 if (TAILQ_FIRST(&ip->bio_list))
771 ip->sync_flags |= HAMMER_INODE_BUFS;
772 else
773 ip->sync_flags &= ~HAMMER_INODE_BUFS;
774
775 /*
ec4e8497
MD
776 * Set the state for the inode's in-memory records. If some records
777 * could not be marked for backend flush (i.e. deleted records),
778 * re-set the XDIRTY flag.
b84de5af 779 */
ec4e8497
MD
780 count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
781 hammer_mark_record_callback, NULL);
782 if (count)
783 ip->flags |= HAMMER_INODE_XDIRTY;
b84de5af
MD
784}
785
ec4e8497
MD
786/*
787 * Mark records for backend flush, accumulate a count of the number of
788 * records which could not be marked.
789 */
c0ade690 790static int
b84de5af
MD
791hammer_mark_record_callback(hammer_record_t rec, void *data)
792{
ec4e8497
MD
793 if (rec->state == HAMMER_FST_FLUSH) {
794 return(0);
795 } else if ((rec->flags & HAMMER_RECF_DELETED_FE) == 0) {
b84de5af
MD
796 rec->state = HAMMER_FST_FLUSH;
797 hammer_ref(&rec->lock);
ec4e8497
MD
798 return(0);
799 } else {
800 return(1);
b84de5af 801 }
b84de5af
MD
802}
803
804
805
806/*
807 * Wait for a previously queued flush to complete
808 */
809void
810hammer_wait_inode(hammer_inode_t ip)
811{
812 while (ip->flush_state == HAMMER_FST_FLUSH) {
813 ip->flags |= HAMMER_INODE_FLUSHW;
814 tsleep(&ip->flags, 0, "hmrwin", 0);
815 }
816}
817
818/*
819 * Called by the backend code when a flush has been completed.
820 * The inode has already been removed from the flush list.
821 *
822 * A pipelined flush can occur, in which case we must re-enter the
823 * inode on the list and re-copy its fields.
824 */
825void
826hammer_flush_inode_done(hammer_inode_t ip)
827{
1955afa7
MD
828 struct bio *bio;
829
b84de5af
MD
830 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
831
832 if (ip->sync_flags)
833 kprintf("ip %p leftover sync_flags %08x\n", ip, ip->sync_flags);
834 ip->flags |= ip->sync_flags;
835 ip->flush_state = HAMMER_FST_IDLE;
836
b84de5af 837 /*
1955afa7
MD
838 * Reflush any BIOs that wound up in the alt list. Our inode will
839 * also wind up at the end of the flusher's list.
b84de5af
MD
840 */
841 while ((bio = TAILQ_FIRST(&ip->bio_alt_list)) != NULL) {
842 TAILQ_REMOVE(&ip->bio_alt_list, bio, bio_act);
843 TAILQ_INSERT_TAIL(&ip->bio_list, bio, bio_act);
f90dde4c 844 ip->flags |= HAMMER_INODE_XDIRTY;
b84de5af
MD
845 ip->flags |= HAMMER_INODE_REFLUSH;
846 kprintf("rebio %p ip %p @%016llx,%d\n", bio, ip, bio->bio_offset, bio->bio_buf->b_bufsize);
847 }
b84de5af
MD
848
849 /*
850 * If the frontend made more changes and requested another flush,
851 * do it.
852 */
853 if (ip->flags & HAMMER_INODE_REFLUSH) {
854 ip->flags &= ~HAMMER_INODE_REFLUSH;
b84de5af
MD
855 hammer_flush_inode(ip, 0);
856 } else {
857 if (ip->flags & HAMMER_INODE_FLUSHW) {
858 ip->flags &= ~HAMMER_INODE_FLUSHW;
859 wakeup(&ip->flags);
860 }
861 }
862 hammer_rel_inode(ip, 0);
863}
864
865/*
866 * Called from hammer_sync_inode() to synchronize in-memory records
867 * to the media.
868 */
869static int
870hammer_sync_record_callback(hammer_record_t record, void *data)
c0ade690 871{
36f82b23 872 hammer_transaction_t trans = data;
c0ade690
MD
873 int error;
874
b84de5af
MD
875 /*
876 * Skip records that do not belong to the current flush. Records
877 * belonging to the flush will have been referenced for us.
878 *
879 * Skip records that were deleted by the backend itself. Records
880 * deleted by the frontend after their state has changed to FLUSH
881 * are not considered to be deleted by the backend.
882 *
883 * XXX special delete-on-disk records can be deleted by the backend
884 * prior to the sync due to a truncation operation. This is kinda
885 * a hack to deal with it.
886 */
887 if (record->state != HAMMER_FST_FLUSH)
888 return(0);
889 if (record->flags & HAMMER_RECF_DELETED_BE) {
890 hammer_flush_record_done(record);
891 return(0);
892 }
893
894 /*
895 * Assign the create_tid for new records. Deletions already
896 * have the record's entire key properly set up.
897 */
898 if ((record->flags & HAMMER_RECF_DELETE_ONDISK) == 0)
899 record->rec.inode.base.base.create_tid = trans->tid;
900 error = hammer_ip_sync_record(trans, record);
c0ade690
MD
901
902 if (error) {
b3deaf57
MD
903 error = -error;
904 if (error != -ENOSPC) {
b84de5af
MD
905 kprintf("hammer_sync_record_callback: sync failed rec "
906 "%p, error %d\n", record, error);
907 Debugger("sync failed rec");
b3deaf57 908 }
c0ade690 909 }
b84de5af 910 hammer_flush_record_done(record);
b3deaf57 911 return(error);
c0ade690
MD
912}
913
914/*
915 * XXX error handling
916 */
917int
b84de5af 918hammer_sync_inode(hammer_inode_t ip, int handle_delete)
c0ade690
MD
919{
920 struct hammer_transaction trans;
059819e3 921 struct bio *bio;
ec4e8497
MD
922 hammer_depend_t depend;
923 int error, tmp_error;
c0ade690 924
b84de5af
MD
925 if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0 &&
926 handle_delete == 0) {
d113fda1
MD
927 return(0);
928 }
929
ec4e8497 930
c0ade690 931 hammer_lock_ex(&ip->lock);
d113fda1 932
b84de5af 933 hammer_start_transaction_fls(&trans, ip->hmp);
c0ade690 934
ec4e8497
MD
935 /*
936 * Any (directory) records this inode depends on must also be
937 * synchronized. The directory itself only needs to be flushed
938 * if its inode is not already on-disk.
939 */
940 while ((depend = TAILQ_FIRST(&ip->depend_list)) != NULL) {
941 hammer_record_t record;
942
943 record = depend->record;
944 TAILQ_REMOVE(&depend->record->depend_list, depend, rec_entry);
945 TAILQ_REMOVE(&ip->depend_list, depend, ip_entry);
946 --ip->depend_count;
947 if (record->state != HAMMER_FST_FLUSH) {
948 record->state = HAMMER_FST_FLUSH;
949 /* add ref (steal ref from dependancy) */
950 } else {
951 /* remove ref related to dependancy */
952 /* record still has at least one ref from state */
953 hammer_unref(&record->lock);
954 KKASSERT(record->lock.refs > 0);
955 }
956 if (record->ip->flags & HAMMER_INODE_ONDISK) {
957 kprintf("I");
958 hammer_sync_record_callback(record, &trans);
959 } else {
960 kprintf("J");
961 hammer_flush_inode(record->ip, 0);
962 }
963 hammer_unref(&ip->lock);
964 KKASSERT(ip->lock.refs > 0);
965 kfree(depend, M_HAMMER);
966 }
967
968
c0ade690 969 /*
b84de5af 970 * Sync inode deletions and truncations.
c0ade690 971 */
b84de5af 972 if (ip->sync_ino_rec.ino_nlinks == 0 && handle_delete &&
d113fda1 973 (ip->flags & HAMMER_INODE_GONE) == 0) {
b84de5af
MD
974 /*
975 * Handle the case where the inode has been completely deleted
976 * and is no longer referenceable from the filesystem
977 * namespace.
978 *
979 * NOTE: We do not set the RDIRTY flag when updating the
980 * delete_tid, setting HAMMER_INODE_DELETED takes care of it.
981 */
982
983 ip->flags |= HAMMER_INODE_GONE | HAMMER_INODE_DELETED;
984 ip->flags &= ~HAMMER_INODE_TRUNCATED;
985 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
c0ade690
MD
986 if (ip->vp)
987 vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
7a04d74f 988 error = hammer_ip_delete_range_all(&trans, ip);
b84de5af
MD
989 if (error)
990 Debugger("hammer_ip_delete_range_all errored");
991
992 /*
993 * Sanity check. The only records that remain should be
994 * marked for back-end deletion.
995 */
996 {
997 hammer_record_t rec;
998
999 RB_FOREACH(rec, hammer_rec_rb_tree, &ip->rec_tree) {
1000 KKASSERT(rec->flags & HAMMER_RECF_DELETED_BE);
1001 }
1002 }
1003
1004 /*
1005 * Set delete_tid in both the frontend and backend
1006 * copy of the inode record.
1007 */
c0ade690 1008 ip->ino_rec.base.base.delete_tid = trans.tid;
b84de5af
MD
1009 ip->sync_ino_rec.base.base.delete_tid = trans.tid;
1010
1011 /*
1012 * Indicate that the inode has/is-being deleted.
1013 */
1014 ip->flags |= HAMMER_NODE_DELETED;
1015 hammer_modify_inode(&trans, ip, HAMMER_INODE_RDIRTY);
10a5d1ba 1016 hammer_modify_volume(&trans, trans.rootvol, NULL, 0);
0b075555 1017 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
10a5d1ba 1018 hammer_modify_volume_done(trans.rootvol);
b84de5af
MD
1019 } else if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
1020 /*
1021 * Interlock trunc_off. The VOP front-end may continue to
1022 * make adjustments to it while we are blocked.
1023 */
1024 off_t trunc_off;
1025 off_t aligned_trunc_off;
c0ade690 1026
b84de5af
MD
1027 trunc_off = ip->sync_trunc_off;
1028 aligned_trunc_off = (trunc_off + HAMMER_BUFMASK) &
1029 ~HAMMER_BUFMASK64;
1030
1031 /*
1032 * Delete any whole blocks on-media. The front-end has
1033 * already cleaned out any partial block and made it
1034 * pending. The front-end may have updated trunc_off
1035 * while we were blocked so do not just unconditionally
1036 * set it to the maximum offset.
1037 */
1038 kprintf("sync truncation range @ %016llx\n", aligned_trunc_off);
1039 error = hammer_ip_delete_range(&trans, ip,
1040 aligned_trunc_off,
1041 0x7FFFFFFFFFFFFFFFLL);
1042 if (error)
1043 Debugger("hammer_ip_delete_range errored");
1044 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
1045 if (ip->trunc_off >= trunc_off) {
1046 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
1047 ip->flags &= ~HAMMER_INODE_TRUNCATED;
1048 }
f3b0f382
MD
1049 }
1050
b84de5af
MD
1051 error = 0; /* XXX vfsync used to be here */
1052
059819e3 1053 /*
b84de5af 1054 * Flush any queued BIOs.
059819e3
MD
1055 */
1056 while ((bio = TAILQ_FIRST(&ip->bio_list)) != NULL) {
1057 TAILQ_REMOVE(&ip->bio_list, bio, bio_act);
b84de5af
MD
1058#if 0
1059 kprintf("dowrite %016llx ip %p bio %p @ %016llx\n", trans.tid, ip, bio, bio->bio_offset);
1060#endif
ec4e8497
MD
1061 tmp_error = hammer_dowrite(&trans, ip, bio);
1062 if (tmp_error)
1063 error = tmp_error;
059819e3 1064 }
b84de5af 1065 ip->sync_flags &= ~HAMMER_INODE_BUFS;
c0ade690
MD
1066
1067 /*
b84de5af 1068 * Now sync related records.
c0ade690 1069 */
d26d0ae9 1070 for (;;) {
ec4e8497 1071 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
b84de5af 1072 hammer_sync_record_callback, &trans);
d26d0ae9 1073 KKASSERT(error <= 0);
ec4e8497
MD
1074 if (tmp_error < 0)
1075 tmp_error = -error;
1076 if (tmp_error)
1077 error = tmp_error;
d26d0ae9 1078 break;
c0ade690 1079 }
ec4e8497
MD
1080
1081 /*
1082 * XDIRTY represents rec_tree and bio_list. However, rec_tree may
1083 * contain new front-end records so short of scanning it we can't
1084 * just test whether it is empty or not.
1085 *
1086 * If no error occured assume we succeeded.
1087 */
1088 if (error == 0)
b84de5af 1089 ip->sync_flags &= ~HAMMER_INODE_XDIRTY;
ec4e8497 1090
b84de5af
MD
1091 if (error)
1092 Debugger("RB_SCAN errored");
c0ade690
MD
1093
1094 /*
1095 * Now update the inode's on-disk inode-data and/or on-disk record.
b84de5af 1096 * DELETED and ONDISK are managed only in ip->flags.
c0ade690 1097 */
b84de5af 1098 switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
c0ade690
MD
1099 case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
1100 /*
1101 * If deleted and on-disk, don't set any additional flags.
1102 * the delete flag takes care of things.
1103 */
1104 break;
1105 case HAMMER_INODE_DELETED:
1106 /*
1107 * Take care of the case where a deleted inode was never
1108 * flushed to the disk in the first place.
1109 */
b84de5af
MD
1110 ip->sync_flags &= ~(HAMMER_INODE_RDIRTY|HAMMER_INODE_DDIRTY|
1111 HAMMER_INODE_XDIRTY|HAMMER_INODE_ITIMES);
d26d0ae9
MD
1112 while (RB_ROOT(&ip->rec_tree)) {
1113 hammer_record_t rec = RB_ROOT(&ip->rec_tree);
1114 hammer_ref(&rec->lock);
b84de5af 1115 KKASSERT(rec->lock.refs == 1);
ec4e8497
MD
1116 hammer_delete_mem_record(rec);
1117 rec->flags |= HAMMER_RECF_DELETED_BE;
b3deaf57 1118 hammer_rel_mem_record(rec);
d26d0ae9 1119 }
c0ade690
MD
1120 break;
1121 case HAMMER_INODE_ONDISK:
1122 /*
1123 * If already on-disk, do not set any additional flags.
1124 */
1125 break;
1126 default:
1127 /*
1128 * If not on-disk and not deleted, set both dirty flags
b84de5af
MD
1129 * to force an initial record to be written. Also set
1130 * the create_tid for the inode.
1131 *
1132 * Set create_tid in both the frontend and backend
1133 * copy of the inode record.
c0ade690 1134 */
b84de5af
MD
1135 ip->ino_rec.base.base.create_tid = trans.tid;
1136 ip->sync_ino_rec.base.base.create_tid = trans.tid;
1137 ip->sync_flags |= HAMMER_INODE_RDIRTY | HAMMER_INODE_DDIRTY;
c0ade690
MD
1138 break;
1139 }
1140
1141 /*
d113fda1
MD
1142 * If RDIRTY or DDIRTY is set, write out a new record. If the inode
1143 * is already on-disk the old record is marked as deleted.
1144 *
1145 * If DELETED is set hammer_update_inode() will delete the existing
1146 * record without writing out a new one.
1147 *
1148 * If *ONLY* the ITIMES flag is set we can update the record in-place.
c0ade690 1149 */
b84de5af
MD
1150 if (ip->flags & HAMMER_INODE_DELETED) {
1151 error = hammer_update_inode(&trans, ip);
1152 } else
1153 if ((ip->sync_flags & (HAMMER_INODE_RDIRTY | HAMMER_INODE_DDIRTY |
1154 HAMMER_INODE_ITIMES)) == HAMMER_INODE_ITIMES) {
36f82b23 1155 error = hammer_update_itimes(&trans, ip);
d113fda1 1156 } else
b84de5af
MD
1157 if (ip->sync_flags & (HAMMER_INODE_RDIRTY | HAMMER_INODE_DDIRTY |
1158 HAMMER_INODE_ITIMES)) {
36f82b23 1159 error = hammer_update_inode(&trans, ip);
c0ade690 1160 }
b84de5af
MD
1161 if (error)
1162 Debugger("hammer_update_itimes/inode errored");
1163
1164 /*
1165 * Save the TID we used to sync the inode with to make sure we
1166 * do not improperly reuse it.
1167 */
c0ade690 1168 hammer_unlock(&ip->lock);
b84de5af 1169 hammer_done_transaction(&trans);
c0ade690 1170 return(error);
8cd0a023
MD
1171}
1172