LWPHOLD/LWPRELE must be atomic ops because an IPI can call LWPRELE.
[dragonfly.git] / sys / vfs / hammer / hammer_inode.c
CommitLineData
427e5fc6 1/*
b84de5af 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
427e5fc6
MD
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
6aeaa7bd 34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.73 2008/06/12 01:55:58 dillon Exp $
427e5fc6
MD
35 */
36
37#include "hammer.h"
869e8f55 38#include <vm/vm_extern.h>
427e5fc6
MD
39#include <sys/buf.h>
40#include <sys/buf2.h>
41
af209b0f
MD
42static int hammer_unload_inode(struct hammer_inode *ip);
43static void hammer_flush_inode_core(hammer_inode_t ip, int flags);
44static int hammer_setup_child_callback(hammer_record_t rec, void *data);
45static int hammer_setup_parent_inodes(hammer_record_t record);
46static void hammer_inode_wakereclaims(hammer_mount_t hmp);
b84de5af 47
0832c9bb
MD
48#ifdef DEBUG_TRUNCATE
49extern struct hammer_inode *HammerTruncIp;
50#endif
51
d113fda1
MD
52/*
53 * The kernel is not actively referencing this vnode but is still holding
54 * it cached.
b84de5af
MD
55 *
56 * This is called from the frontend.
d113fda1 57 */
427e5fc6
MD
58int
59hammer_vop_inactive(struct vop_inactive_args *ap)
60{
66325755 61 struct hammer_inode *ip = VTOI(ap->a_vp);
27ea2398 62
c0ade690
MD
63 /*
64 * Degenerate case
65 */
66 if (ip == NULL) {
66325755 67 vrecycle(ap->a_vp);
c0ade690
MD
68 return(0);
69 }
70
71 /*
1f07f686
MD
72 * If the inode no longer has visibility in the filesystem and is
73 * fairly clean, try to recycle it immediately. This can deadlock
74 * in vfsync() if we aren't careful.
4e97774c
MD
75 *
76 * Do not queue the inode to the flusher if we still have visibility,
77 * otherwise namespace calls such as chmod will unnecessarily generate
78 * multiple inode updates.
c0ade690 79 */
e8599db1 80 hammer_inode_unloadable_check(ip, 0);
4e97774c
MD
81 if (ip->ino_data.nlinks == 0) {
82 if (ip->flags & HAMMER_INODE_MODMASK)
83 hammer_flush_inode(ip, 0);
84 else
85 vrecycle(ap->a_vp);
86 }
427e5fc6
MD
87 return(0);
88}
89
d113fda1
MD
90/*
91 * Release the vnode association. This is typically (but not always)
1f07f686 92 * the last reference on the inode.
d113fda1 93 *
1f07f686
MD
94 * Once the association is lost we are on our own with regards to
95 * flushing the inode.
d113fda1 96 */
427e5fc6
MD
97int
98hammer_vop_reclaim(struct vop_reclaim_args *ap)
99{
da2da375 100 hammer_mount_t hmp;
427e5fc6
MD
101 struct hammer_inode *ip;
102 struct vnode *vp;
103
104 vp = ap->a_vp;
c0ade690 105
a89aec1b 106 if ((ip = vp->v_data) != NULL) {
da2da375 107 hmp = ip->hmp;
a89aec1b
MD
108 vp->v_data = NULL;
109 ip->vp = NULL;
9f5097dc
MD
110 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) {
111 ++hammer_count_reclaiming;
da2da375 112 ++hmp->inode_reclaims;
9f5097dc
MD
113 ip->flags |= HAMMER_INODE_RECLAIM;
114 }
ec4e8497 115 hammer_rel_inode(ip, 1);
a89aec1b 116 }
427e5fc6
MD
117 return(0);
118}
119
66325755
MD
120/*
121 * Return a locked vnode for the specified inode. The inode must be
122 * referenced but NOT LOCKED on entry and will remain referenced on
123 * return.
b84de5af
MD
124 *
125 * Called from the frontend.
66325755
MD
126 */
127int
e8599db1 128hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
66325755 129{
9f5097dc 130 hammer_mount_t hmp;
66325755
MD
131 struct vnode *vp;
132 int error = 0;
133
9f5097dc
MD
134 hmp = ip->hmp;
135
66325755
MD
136 for (;;) {
137 if ((vp = ip->vp) == NULL) {
9f5097dc 138 error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0);
66325755
MD
139 if (error)
140 break;
8cd0a023
MD
141 hammer_lock_ex(&ip->lock);
142 if (ip->vp != NULL) {
143 hammer_unlock(&ip->lock);
144 vp->v_type = VBAD;
145 vx_put(vp);
146 continue;
66325755 147 }
8cd0a023
MD
148 hammer_ref(&ip->lock);
149 vp = *vpp;
150 ip->vp = vp;
11ad5ade
MD
151 vp->v_type =
152 hammer_get_vnode_type(ip->ino_data.obj_type);
7a04d74f 153
9f5097dc
MD
154 if (ip->flags & HAMMER_INODE_RECLAIM) {
155 --hammer_count_reclaiming;
156 --hmp->inode_reclaims;
157 ip->flags &= ~HAMMER_INODE_RECLAIM;
158 if (hmp->flags & HAMMER_MOUNT_WAITIMAX)
159 hammer_inode_wakereclaims(hmp);
160 }
161
11ad5ade 162 switch(ip->ino_data.obj_type) {
7a04d74f
MD
163 case HAMMER_OBJTYPE_CDEV:
164 case HAMMER_OBJTYPE_BDEV:
9f5097dc 165 vp->v_ops = &hmp->mp->mnt_vn_spec_ops;
7a04d74f
MD
166 addaliasu(vp, ip->ino_data.rmajor,
167 ip->ino_data.rminor);
168 break;
169 case HAMMER_OBJTYPE_FIFO:
9f5097dc 170 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops;
7a04d74f
MD
171 break;
172 default:
173 break;
174 }
42c7d26b
MD
175
176 /*
177 * Only mark as the root vnode if the ip is not
178 * historical, otherwise the VFS cache will get
179 * confused. The other half of the special handling
180 * is in hammer_vop_nlookupdotdot().
181 */
182 if (ip->obj_id == HAMMER_OBJID_ROOT &&
9f5097dc 183 ip->obj_asof == hmp->asof) {
7a04d74f 184 vp->v_flag |= VROOT;
42c7d26b 185 }
7a04d74f 186
8cd0a023
MD
187 vp->v_data = (void *)ip;
188 /* vnode locked by getnewvnode() */
189 /* make related vnode dirty if inode dirty? */
190 hammer_unlock(&ip->lock);
a89aec1b 191 if (vp->v_type == VREG)
11ad5ade 192 vinitvmio(vp, ip->ino_data.size);
8cd0a023
MD
193 break;
194 }
195
196 /*
197 * loop if the vget fails (aka races), or if the vp
198 * no longer matches ip->vp.
199 */
200 if (vget(vp, LK_EXCLUSIVE) == 0) {
201 if (vp == ip->vp)
202 break;
203 vput(vp);
66325755
MD
204 }
205 }
a89aec1b 206 *vpp = vp;
66325755
MD
207 return(error);
208}
209
210/*
8cd0a023
MD
211 * Acquire a HAMMER inode. The returned inode is not locked. These functions
212 * do not attach or detach the related vnode (use hammer_get_vnode() for
213 * that).
d113fda1
MD
214 *
215 * The flags argument is only applied for newly created inodes, and only
216 * certain flags are inherited.
b84de5af
MD
217 *
218 * Called from the frontend.
66325755
MD
219 */
220struct hammer_inode *
36f82b23 221hammer_get_inode(hammer_transaction_t trans, struct hammer_node **cache,
61aeeb33 222 u_int64_t obj_id, hammer_tid_t asof, int flags, int *errorp)
66325755 223{
36f82b23 224 hammer_mount_t hmp = trans->hmp;
427e5fc6 225 struct hammer_inode_info iinfo;
8cd0a023 226 struct hammer_cursor cursor;
427e5fc6 227 struct hammer_inode *ip;
427e5fc6
MD
228
229 /*
230 * Determine if we already have an inode cached. If we do then
231 * we are golden.
232 */
66325755 233 iinfo.obj_id = obj_id;
7f7c1f84 234 iinfo.obj_asof = asof;
427e5fc6
MD
235loop:
236 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
237 if (ip) {
8cd0a023 238 hammer_ref(&ip->lock);
66325755
MD
239 *errorp = 0;
240 return(ip);
427e5fc6
MD
241 }
242
3897d7e9
MD
243 /*
244 * Allocate a new inode structure and deal with races later.
245 */
427e5fc6 246 ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
b3deaf57 247 ++hammer_count_inodes;
9f5097dc 248 ++hmp->count_inodes;
66325755 249 ip->obj_id = obj_id;
27ea2398 250 ip->obj_asof = iinfo.obj_asof;
66325755 251 ip->hmp = hmp;
d113fda1
MD
252 ip->flags = flags & HAMMER_INODE_RO;
253 if (hmp->ronly)
254 ip->flags |= HAMMER_INODE_RO;
a5fddc16 255 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
8cd0a023 256 RB_INIT(&ip->rec_tree);
1f07f686 257 TAILQ_INIT(&ip->target_list);
427e5fc6
MD
258
259 /*
8cd0a023 260 * Locate the on-disk inode.
427e5fc6 261 */
6a37e7e4 262retry:
4e17f465 263 hammer_init_cursor(trans, &cursor, cache, NULL);
2f85fa4d 264 cursor.key_beg.localization = HAMMER_LOCALIZE_INODE;
8cd0a023
MD
265 cursor.key_beg.obj_id = ip->obj_id;
266 cursor.key_beg.key = 0;
d5530d22 267 cursor.key_beg.create_tid = 0;
8cd0a023
MD
268 cursor.key_beg.delete_tid = 0;
269 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
270 cursor.key_beg.obj_type = 0;
d5530d22 271 cursor.asof = iinfo.obj_asof;
11ad5ade 272 cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
d5530d22 273 HAMMER_CURSOR_ASOF;
8cd0a023
MD
274
275 *errorp = hammer_btree_lookup(&cursor);
6a37e7e4
MD
276 if (*errorp == EDEADLK) {
277 hammer_done_cursor(&cursor);
278 goto retry;
279 }
427e5fc6
MD
280
281 /*
282 * On success the B-Tree lookup will hold the appropriate
283 * buffer cache buffers and provide a pointer to the requested
d113fda1
MD
284 * information. Copy the information to the in-memory inode
285 * and cache the B-Tree node to improve future operations.
427e5fc6 286 */
66325755 287 if (*errorp == 0) {
11ad5ade 288 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
40043e7f 289 ip->ino_data = cursor.data->inode;
61aeeb33
MD
290 hammer_cache_node(cursor.node, &ip->cache[0]);
291 if (cache)
292 hammer_cache_node(cursor.node, cache);
427e5fc6 293 }
427e5fc6
MD
294
295 /*
296 * On success load the inode's record and data and insert the
297 * inode into the B-Tree. It is possible to race another lookup
298 * insertion of the same inode so deal with that condition too.
b3deaf57
MD
299 *
300 * The cursor's locked node interlocks against others creating and
301 * destroying ip while we were blocked.
427e5fc6 302 */
66325755 303 if (*errorp == 0) {
8cd0a023 304 hammer_ref(&ip->lock);
427e5fc6 305 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
61aeeb33
MD
306 hammer_uncache_node(&ip->cache[0]);
307 hammer_uncache_node(&ip->cache[1]);
b84de5af 308 KKASSERT(ip->lock.refs == 1);
b3deaf57 309 --hammer_count_inodes;
9f5097dc 310 --hmp->count_inodes;
427e5fc6 311 kfree(ip, M_HAMMER);
b3deaf57 312 hammer_done_cursor(&cursor);
427e5fc6
MD
313 goto loop;
314 }
c0ade690 315 ip->flags |= HAMMER_INODE_ONDISK;
427e5fc6 316 } else {
19619882
MD
317 /*
318 * Do not panic on read-only accesses which fail, particularly
319 * historical accesses where the snapshot might not have
320 * complete connectivity.
321 */
322 if ((flags & HAMMER_INODE_RO) == 0) {
323 kprintf("hammer_get_inode: failed ip %p obj_id %016llx cursor %p error %d\n",
324 ip, ip->obj_id, &cursor, *errorp);
77062c8a 325 Debugger("x");
19619882 326 }
e63644f0
MD
327 if (ip->flags & HAMMER_INODE_RSV_INODES) {
328 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
9f5097dc 329 --hmp->rsv_inodes;
e63644f0 330 }
9f5097dc 331 hmp->rsv_databufs -= ip->rsv_databufs;
e63644f0
MD
332 ip->rsv_databufs = 0; /* sanity */
333
b3deaf57 334 --hammer_count_inodes;
9f5097dc 335 --hmp->count_inodes;
66325755
MD
336 kfree(ip, M_HAMMER);
337 ip = NULL;
427e5fc6 338 }
b3deaf57 339 hammer_done_cursor(&cursor);
66325755
MD
340 return (ip);
341}
342
8cd0a023
MD
343/*
344 * Create a new filesystem object, returning the inode in *ipp. The
1f07f686 345 * returned inode will be referenced.
8cd0a023 346 *
b84de5af 347 * The inode is created in-memory.
8cd0a023
MD
348 */
349int
a89aec1b
MD
350hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
351 struct ucred *cred, hammer_inode_t dip,
8cd0a023 352 struct hammer_inode **ipp)
66325755 353{
a89aec1b
MD
354 hammer_mount_t hmp;
355 hammer_inode_t ip;
6b4f890b 356 uid_t xuid;
66325755 357
8cd0a023
MD
358 hmp = trans->hmp;
359 ip = kmalloc(sizeof(*ip), M_HAMMER, M_WAITOK|M_ZERO);
b3deaf57 360 ++hammer_count_inodes;
9f5097dc 361 ++hmp->count_inodes;
0729c8c8 362 ip->obj_id = hammer_alloc_objid(trans, dip);
8cd0a023 363 KKASSERT(ip->obj_id != 0);
7f7c1f84 364 ip->obj_asof = hmp->asof;
8cd0a023 365 ip->hmp = hmp;
b84de5af 366 ip->flush_state = HAMMER_FST_IDLE;
11ad5ade 367 ip->flags = HAMMER_INODE_DDIRTY | HAMMER_INODE_ITIMES;
8cd0a023 368
a5fddc16 369 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
8cd0a023 370 RB_INIT(&ip->rec_tree);
1f07f686 371 TAILQ_INIT(&ip->target_list);
8cd0a023 372
11ad5ade
MD
373 ip->ino_leaf.atime = trans->time;
374 ip->ino_data.mtime = trans->time;
375 ip->ino_data.size = 0;
376 ip->ino_data.nlinks = 0;
e63644f0
MD
377
378 /*
379 * A nohistory designator on the parent directory is inherited by
380 * the child.
381 */
382 ip->ino_data.uflags = dip->ino_data.uflags &
383 (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
384
11ad5ade 385 ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
2f85fa4d 386 ip->ino_leaf.base.localization = HAMMER_LOCALIZE_INODE;
11ad5ade
MD
387 ip->ino_leaf.base.obj_id = ip->obj_id;
388 ip->ino_leaf.base.key = 0;
389 ip->ino_leaf.base.create_tid = 0;
390 ip->ino_leaf.base.delete_tid = 0;
391 ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
392 ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
393
394 ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
8cd0a023
MD
395 ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
396 ip->ino_data.mode = vap->va_mode;
b84de5af 397 ip->ino_data.ctime = trans->time;
11ad5ade 398 ip->ino_data.parent_obj_id = (dip) ? dip->ino_leaf.base.obj_id : 0;
6b4f890b 399
11ad5ade 400 switch(ip->ino_leaf.base.obj_type) {
7a04d74f
MD
401 case HAMMER_OBJTYPE_CDEV:
402 case HAMMER_OBJTYPE_BDEV:
403 ip->ino_data.rmajor = vap->va_rmajor;
404 ip->ino_data.rminor = vap->va_rminor;
405 break;
406 default:
407 break;
408 }
409
6b4f890b
MD
410 /*
411 * Calculate default uid/gid and overwrite with information from
412 * the vap.
413 */
414 xuid = hammer_to_unix_xid(&dip->ino_data.uid);
6b4f890b
MD
415 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode, xuid, cred,
416 &vap->va_mode);
417 ip->ino_data.mode = vap->va_mode;
418
8cd0a023
MD
419 if (vap->va_vaflags & VA_UID_UUID_VALID)
420 ip->ino_data.uid = vap->va_uid_uuid;
6b4f890b 421 else if (vap->va_uid != (uid_t)VNOVAL)
7538695e
MD
422 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
423 else
6b4f890b 424 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
7538695e 425
8cd0a023
MD
426 if (vap->va_vaflags & VA_GID_UUID_VALID)
427 ip->ino_data.gid = vap->va_gid_uuid;
6b4f890b 428 else if (vap->va_gid != (gid_t)VNOVAL)
8cd0a023 429 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
7538695e
MD
430 else
431 ip->ino_data.gid = dip->ino_data.gid;
8cd0a023
MD
432
433 hammer_ref(&ip->lock);
434 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
435 hammer_unref(&ip->lock);
a89aec1b 436 panic("hammer_create_inode: duplicate obj_id %llx", ip->obj_id);
8cd0a023
MD
437 }
438 *ipp = ip;
439 return(0);
66325755
MD
440}
441
d113fda1
MD
442/*
443 * Called by hammer_sync_inode().
444 */
445static int
4e17f465 446hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
c0ade690 447{
4e17f465 448 hammer_transaction_t trans = cursor->trans;
c0ade690
MD
449 hammer_record_t record;
450 int error;
451
d26d0ae9 452retry:
c0ade690
MD
453 error = 0;
454
869e8f55
MD
455 /*
456 * If the inode has a presence on-disk then locate it and mark
457 * it deleted, setting DELONDISK.
458 *
459 * The record may or may not be physically deleted, depending on
460 * the retention policy.
461 */
76376933
MD
462 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
463 HAMMER_INODE_ONDISK) {
4e17f465 464 hammer_normalize_cursor(cursor);
2f85fa4d 465 cursor->key_beg.localization = HAMMER_LOCALIZE_INODE;
4e17f465
MD
466 cursor->key_beg.obj_id = ip->obj_id;
467 cursor->key_beg.key = 0;
468 cursor->key_beg.create_tid = 0;
469 cursor->key_beg.delete_tid = 0;
470 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
471 cursor->key_beg.obj_type = 0;
472 cursor->asof = ip->obj_asof;
473 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
11ad5ade 474 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
4e17f465
MD
475 cursor->flags |= HAMMER_CURSOR_BACKEND;
476
477 error = hammer_btree_lookup(cursor);
e8599db1
MD
478 if (hammer_debug_inode)
479 kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
b84de5af
MD
480 if (error) {
481 kprintf("error %d\n", error);
482 Debugger("hammer_update_inode");
483 }
484
c0ade690 485 if (error == 0) {
e63644f0 486 error = hammer_ip_delete_record(cursor, ip, trans->tid);
e8599db1
MD
487 if (hammer_debug_inode)
488 kprintf(" error %d\n", error);
f90dde4c 489 if (error && error != EDEADLK) {
b84de5af
MD
490 kprintf("error %d\n", error);
491 Debugger("hammer_update_inode2");
492 }
1f07f686 493 if (error == 0) {
195c19a1 494 ip->flags |= HAMMER_INODE_DELONDISK;
1f07f686 495 }
e8599db1
MD
496 if (cursor->node)
497 hammer_cache_node(cursor->node, &ip->cache[0]);
4e17f465
MD
498 }
499 if (error == EDEADLK) {
500 hammer_done_cursor(cursor);
501 error = hammer_init_cursor(trans, cursor,
502 &ip->cache[0], ip);
e8599db1
MD
503 if (hammer_debug_inode)
504 kprintf("IPDED %p %d\n", ip, error);
4e17f465
MD
505 if (error == 0)
506 goto retry;
c0ade690 507 }
c0ade690
MD
508 }
509
510 /*
869e8f55
MD
511 * Ok, write out the initial record or a new record (after deleting
512 * the old one), unless the DELETED flag is set. This routine will
513 * clear DELONDISK if it writes out a record.
76376933 514 *
869e8f55
MD
515 * Update our inode statistics if this is the first application of
516 * the inode on-disk.
c0ade690 517 */
869e8f55
MD
518 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
519 /*
520 * Generate a record and write it to the media
521 */
11ad5ade 522 record = hammer_alloc_mem_record(ip, 0);
930bf163 523 record->type = HAMMER_MEM_RECORD_INODE;
1f07f686 524 record->flush_state = HAMMER_FST_FLUSH;
11ad5ade
MD
525 record->leaf = ip->sync_ino_leaf;
526 record->leaf.base.create_tid = trans->tid;
527 record->leaf.data_len = sizeof(ip->sync_ino_data);
b84de5af 528 record->data = (void *)&ip->sync_ino_data;
d36ec43b 529 record->flags |= HAMMER_RECF_INTERLOCK_BE;
4e17f465
MD
530 for (;;) {
531 error = hammer_ip_sync_record_cursor(cursor, record);
e8599db1
MD
532 if (hammer_debug_inode)
533 kprintf("GENREC %p rec %08x %d\n",
534 ip, record->flags, error);
4e17f465
MD
535 if (error != EDEADLK)
536 break;
537 hammer_done_cursor(cursor);
538 error = hammer_init_cursor(trans, cursor,
539 &ip->cache[0], ip);
e8599db1
MD
540 if (hammer_debug_inode)
541 kprintf("GENREC reinit %d\n", error);
4e17f465
MD
542 if (error)
543 break;
544 }
b84de5af
MD
545 if (error) {
546 kprintf("error %d\n", error);
547 Debugger("hammer_update_inode3");
548 }
d36ec43b
MD
549
550 /*
551 * The record isn't managed by the inode's record tree,
552 * destroy it whether we succeed or fail.
553 */
554 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
555 record->flags |= HAMMER_RECF_DELETED_FE;
1f07f686 556 record->flush_state = HAMMER_FST_IDLE;
b3deaf57 557 hammer_rel_mem_record(record);
d36ec43b 558
869e8f55
MD
559 /*
560 * Finish up.
561 */
d26d0ae9 562 if (error == 0) {
e8599db1
MD
563 if (hammer_debug_inode)
564 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
11ad5ade 565 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
b84de5af
MD
566 HAMMER_INODE_ITIMES);
567 ip->flags &= ~HAMMER_INODE_DELONDISK;
1f07f686
MD
568
569 /*
570 * Root volume count of inodes
571 */
d26d0ae9 572 if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
e8599db1
MD
573 hammer_modify_volume_field(trans,
574 trans->rootvol,
575 vol0_stat_inodes);
0b075555 576 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
10a5d1ba 577 hammer_modify_volume_done(trans->rootvol);
d26d0ae9 578 ip->flags |= HAMMER_INODE_ONDISK;
e8599db1
MD
579 if (hammer_debug_inode)
580 kprintf("NOWONDISK %p\n", ip);
d26d0ae9 581 }
fbc6e32a 582 }
c0ade690 583 }
869e8f55
MD
584
585 /*
586 * If the inode has been destroyed, clean out any left-over flags
587 * that may have been set by the frontend.
588 */
f90dde4c 589 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) {
11ad5ade 590 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
f90dde4c
MD
591 HAMMER_INODE_ITIMES);
592 }
c0ade690
MD
593 return(error);
594}
595
a89aec1b 596/*
d113fda1
MD
597 * Update only the itimes fields. This is done no-historically. The
598 * record is updated in-place on the disk.
599 */
600static int
4e17f465 601hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
d113fda1 602{
4e17f465 603 hammer_transaction_t trans = cursor->trans;
11ad5ade 604 struct hammer_btree_leaf_elm *leaf;
d113fda1
MD
605 int error;
606
6a37e7e4 607retry:
d113fda1
MD
608 error = 0;
609 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
610 HAMMER_INODE_ONDISK) {
4e17f465 611 hammer_normalize_cursor(cursor);
2f85fa4d 612 cursor->key_beg.localization = HAMMER_LOCALIZE_INODE;
4e17f465
MD
613 cursor->key_beg.obj_id = ip->obj_id;
614 cursor->key_beg.key = 0;
615 cursor->key_beg.create_tid = 0;
616 cursor->key_beg.delete_tid = 0;
617 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
618 cursor->key_beg.obj_type = 0;
619 cursor->asof = ip->obj_asof;
620 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
11ad5ade 621 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
4e17f465
MD
622 cursor->flags |= HAMMER_CURSOR_BACKEND;
623
624 error = hammer_btree_lookup(cursor);
b84de5af
MD
625 if (error) {
626 kprintf("error %d\n", error);
627 Debugger("hammer_update_itimes1");
628 }
d113fda1 629 if (error == 0) {
10a5d1ba 630 /*
11ad5ade 631 * Do not generate UNDO records for atime updates.
10a5d1ba 632 */
11ad5ade
MD
633 leaf = cursor->leaf;
634 hammer_modify_node(trans, cursor->node,
635 &leaf->atime, sizeof(leaf->atime));
636 leaf->atime = ip->sync_ino_leaf.atime;
637 hammer_modify_node_done(cursor->node);
638 /*rec->ino_mtime = ip->sync_ino_rec.ino_mtime;*/
b84de5af 639 ip->sync_flags &= ~HAMMER_INODE_ITIMES;
d113fda1 640 /* XXX recalculate crc */
4e17f465
MD
641 hammer_cache_node(cursor->node, &ip->cache[0]);
642 }
643 if (error == EDEADLK) {
644 hammer_done_cursor(cursor);
645 error = hammer_init_cursor(trans, cursor,
646 &ip->cache[0], ip);
647 if (error == 0)
648 goto retry;
d113fda1 649 }
d113fda1
MD
650 }
651 return(error);
652}
653
654/*
1f07f686 655 * Release a reference on an inode, flush as requested.
b84de5af
MD
656 *
657 * On the last reference we queue the inode to the flusher for its final
658 * disposition.
a89aec1b 659 */
66325755 660void
a89aec1b 661hammer_rel_inode(struct hammer_inode *ip, int flush)
66325755 662{
1f07f686
MD
663 hammer_mount_t hmp = ip->hmp;
664
f90dde4c
MD
665 /*
666 * Handle disposition when dropping the last ref.
667 */
1f07f686
MD
668 for (;;) {
669 if (ip->lock.refs == 1) {
670 /*
671 * Determine whether on-disk action is needed for
672 * the inode's final disposition.
673 */
e8599db1
MD
674 KKASSERT(ip->vp == NULL);
675 hammer_inode_unloadable_check(ip, 0);
4e17f465 676 if (ip->flags & HAMMER_INODE_MODMASK) {
0832c9bb
MD
677 if (hmp->rsv_inodes > desiredvnodes) {
678 hammer_flush_inode(ip,
679 HAMMER_FLUSH_SIGNAL);
680 } else {
681 hammer_flush_inode(ip, 0);
682 }
4e17f465 683 } else if (ip->lock.refs == 1) {
1f07f686
MD
684 hammer_unload_inode(ip);
685 break;
686 }
b84de5af 687 } else {
4e17f465 688 if (flush)
1f07f686 689 hammer_flush_inode(ip, 0);
4e17f465 690
1f07f686
MD
691 /*
692 * The inode still has multiple refs, try to drop
693 * one ref.
694 */
695 KKASSERT(ip->lock.refs >= 1);
696 if (ip->lock.refs > 1) {
697 hammer_unref(&ip->lock);
698 break;
699 }
b84de5af 700 }
f90dde4c 701 }
427e5fc6
MD
702}
703
27ea2398 704/*
b84de5af
MD
705 * Unload and destroy the specified inode. Must be called with one remaining
706 * reference. The reference is disposed of.
8cd0a023 707 *
b84de5af 708 * This can only be called in the context of the flusher.
27ea2398 709 */
b84de5af 710static int
ec4e8497 711hammer_unload_inode(struct hammer_inode *ip)
27ea2398 712{
9f5097dc
MD
713 hammer_mount_t hmp = ip->hmp;
714
b84de5af 715 KASSERT(ip->lock.refs == 1,
a89aec1b 716 ("hammer_unload_inode: %d refs\n", ip->lock.refs));
8cd0a023 717 KKASSERT(ip->vp == NULL);
f90dde4c
MD
718 KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
719 KKASSERT(ip->cursor_ip_refs == 0);
45a014dc 720 KKASSERT(ip->lock.lockcount == 0);
f90dde4c
MD
721 KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
722
723 KKASSERT(RB_EMPTY(&ip->rec_tree));
1f07f686 724 KKASSERT(TAILQ_EMPTY(&ip->target_list));
f90dde4c 725
9f5097dc 726 RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip);
f90dde4c
MD
727
728 hammer_uncache_node(&ip->cache[0]);
729 hammer_uncache_node(&ip->cache[1]);
0729c8c8
MD
730 if (ip->objid_cache)
731 hammer_clear_objid(ip);
f90dde4c 732 --hammer_count_inodes;
9f5097dc 733 --hmp->count_inodes;
9f5097dc
MD
734
735 if (ip->flags & HAMMER_INODE_RECLAIM) {
736 --hammer_count_reclaiming;
737 --hmp->inode_reclaims;
738 ip->flags &= ~HAMMER_INODE_RECLAIM;
af209b0f
MD
739 if (hmp->flags & HAMMER_MOUNT_WAITIMAX)
740 hammer_inode_wakereclaims(hmp);
9f5097dc 741 }
f90dde4c 742 kfree(ip, M_HAMMER);
6b4f890b 743
27ea2398
MD
744 return(0);
745}
746
51c35492
MD
747/*
748 * Called on mount -u when switching from RW to RO or vise-versa. Adjust
749 * the read-only flag for cached inodes.
750 *
751 * This routine is called from a RB_SCAN().
752 */
753int
754hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
755{
756 hammer_mount_t hmp = ip->hmp;
757
758 if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
759 ip->flags |= HAMMER_INODE_RO;
760 else
761 ip->flags &= ~HAMMER_INODE_RO;
762 return(0);
763}
764
427e5fc6 765/*
d113fda1
MD
766 * A transaction has modified an inode, requiring updates as specified by
767 * the passed flags.
7f7c1f84 768 *
d113fda1 769 * HAMMER_INODE_DDIRTY: Inode data has been updated
1f07f686 770 * HAMMER_INODE_XDIRTY: Dirty in-memory records
4e17f465 771 * HAMMER_INODE_BUFS: Dirty buffer cache buffers
d113fda1
MD
772 * HAMMER_INODE_DELETED: Inode record/data must be deleted
773 * HAMMER_INODE_ITIMES: mtime/atime has been updated
427e5fc6 774 */
66325755 775void
47637bff 776hammer_modify_inode(hammer_inode_t ip, int flags)
427e5fc6 777{
d113fda1 778 KKASSERT ((ip->flags & HAMMER_INODE_RO) == 0 ||
11ad5ade
MD
779 (flags & (HAMMER_INODE_DDIRTY |
780 HAMMER_INODE_XDIRTY | HAMMER_INODE_BUFS |
781 HAMMER_INODE_DELETED | HAMMER_INODE_ITIMES)) == 0);
e63644f0
MD
782 if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
783 ip->flags |= HAMMER_INODE_RSV_INODES;
784 ++ip->hmp->rsv_inodes;
785 }
b84de5af
MD
786
787 ip->flags |= flags;
788}
789
790/*
1f07f686
MD
791 * Request that an inode be flushed. This whole mess cannot block and may
792 * recurse. Once requested HAMMER will attempt to actively flush it until
793 * the flush can be done.
b84de5af 794 *
1f07f686
MD
795 * The inode may already be flushing, or may be in a setup state. We can
796 * place the inode in a flushing state if it is currently idle and flag it
797 * to reflush if it is currently flushing.
b84de5af
MD
798 */
799void
f90dde4c 800hammer_flush_inode(hammer_inode_t ip, int flags)
b84de5af 801{
1f07f686
MD
802 hammer_record_t depend;
803 int r, good;
804
805 /*
806 * Trivial 'nothing to flush' case. If the inode is ina SETUP
807 * state we have to put it back into an IDLE state so we can
808 * drop the extra ref.
809 */
4e17f465 810 if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
1f07f686
MD
811 if (ip->flush_state == HAMMER_FST_SETUP) {
812 ip->flush_state = HAMMER_FST_IDLE;
813 hammer_rel_inode(ip, 0);
ec4e8497 814 }
b84de5af
MD
815 return;
816 }
42c7d26b 817
1f07f686
MD
818 /*
819 * Our flush action will depend on the current state.
820 */
821 switch(ip->flush_state) {
822 case HAMMER_FST_IDLE:
823 /*
824 * We have no dependancies and can flush immediately. Some
825 * our children may not be flushable so we have to re-test
826 * with that additional knowledge.
827 */
828 hammer_flush_inode_core(ip, flags);
829 break;
830 case HAMMER_FST_SETUP:
831 /*
832 * Recurse upwards through dependancies via target_list
833 * and start their flusher actions going if possible.
834 *
835 * 'good' is our connectivity. -1 means we have none and
836 * can't flush, 0 means there weren't any dependancies, and
837 * 1 means we have good connectivity.
838 */
839 good = 0;
840 TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
841 r = hammer_setup_parent_inodes(depend);
842 if (r < 0 && good == 0)
843 good = -1;
844 if (r > 0)
845 good = 1;
846 }
847
848 /*
849 * We can continue if good >= 0. Determine how many records
850 * under our inode can be flushed (and mark them).
851 */
1f07f686
MD
852 if (good >= 0) {
853 hammer_flush_inode_core(ip, flags);
854 } else {
855 ip->flags |= HAMMER_INODE_REFLUSH;
4e17f465
MD
856 if (flags & HAMMER_FLUSH_SIGNAL) {
857 ip->flags |= HAMMER_INODE_RESIGNAL;
858 hammer_flusher_async(ip->hmp);
859 }
1f07f686
MD
860 }
861 break;
862 default:
863 /*
864 * We are already flushing, flag the inode to reflush
865 * if needed after it completes its current flush.
866 */
867 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
868 ip->flags |= HAMMER_INODE_REFLUSH;
4e17f465
MD
869 if (flags & HAMMER_FLUSH_SIGNAL) {
870 ip->flags |= HAMMER_INODE_RESIGNAL;
871 hammer_flusher_async(ip->hmp);
872 }
1f07f686
MD
873 break;
874 }
875}
876
877/*
878 * We are asked to recurse upwards and convert the record from SETUP
879 * to FLUSH if possible. record->ip is a parent of the caller's inode,
880 * and record->target_ip is the caller's inode.
881 *
882 * Return 1 if the record gives us connectivity
883 *
884 * Return 0 if the record is not relevant
885 *
886 * Return -1 if we can't resolve the dependancy and there is no connectivity.
887 */
888static int
889hammer_setup_parent_inodes(hammer_record_t record)
890{
891 hammer_mount_t hmp = record->ip->hmp;
892 hammer_record_t depend;
893 hammer_inode_t ip;
894 int r, good;
895
896 KKASSERT(record->flush_state != HAMMER_FST_IDLE);
897 ip = record->ip;
898
899 /*
900 * If the record is already flushing, is it in our flush group?
901 *
e8599db1
MD
902 * If it is in our flush group but it is a general record or a
903 * delete-on-disk, it does not improve our connectivity (return 0),
904 * and if the target inode is not trying to destroy itself we can't
905 * allow the operation yet anyway (the second return -1).
1f07f686
MD
906 */
907 if (record->flush_state == HAMMER_FST_FLUSH) {
da2da375 908 if (record->flush_group != hmp->flusher.next) {
1f07f686
MD
909 ip->flags |= HAMMER_INODE_REFLUSH;
910 return(-1);
f90dde4c 911 }
1f07f686
MD
912 if (record->type == HAMMER_MEM_RECORD_ADD)
913 return(1);
e8599db1 914 /* GENERAL or DEL */
1f07f686
MD
915 return(0);
916 }
917
918 /*
919 * It must be a setup record. Try to resolve the setup dependancies
920 * by recursing upwards so we can place ip on the flush list.
921 */
922 KKASSERT(record->flush_state == HAMMER_FST_SETUP);
923
924 good = 0;
925 TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
926 r = hammer_setup_parent_inodes(depend);
927 if (r < 0 && good == 0)
928 good = -1;
929 if (r > 0)
930 good = 1;
931 }
932
933 /*
934 * We can't flush ip because it has no connectivity (XXX also check
935 * nlinks for pre-existing connectivity!). Flag it so any resolution
936 * recurses back down.
937 */
938 if (good < 0) {
939 ip->flags |= HAMMER_INODE_REFLUSH;
940 return(good);
941 }
942
943 /*
944 * We are go, place the parent inode in a flushing state so we can
945 * place its record in a flushing state. Note that the parent
946 * may already be flushing. The record must be in the same flush
947 * group as the parent.
948 */
949 if (ip->flush_state != HAMMER_FST_FLUSH)
950 hammer_flush_inode_core(ip, HAMMER_FLUSH_RECURSION);
951 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
952 KKASSERT(record->flush_state == HAMMER_FST_SETUP);
953
954#if 0
955 if (record->type == HAMMER_MEM_RECORD_DEL &&
869e8f55 956 (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
1f07f686
MD
957 /*
958 * Regardless of flushing state we cannot sync this path if the
959 * record represents a delete-on-disk but the target inode
960 * is not ready to sync its own deletion.
961 *
962 * XXX need to count effective nlinks to determine whether
963 * the flush is ok, otherwise removing a hardlink will
964 * just leave the DEL record to rot.
965 */
966 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
967 return(-1);
968 } else
969#endif
da2da375 970 if (ip->flush_group == ip->hmp->flusher.next) {
1f07f686
MD
971 /*
972 * This is the record we wanted to synchronize.
973 */
974 record->flush_state = HAMMER_FST_FLUSH;
975 record->flush_group = ip->flush_group;
976 hammer_ref(&record->lock);
977 if (record->type == HAMMER_MEM_RECORD_ADD)
978 return(1);
979
980 /*
e8599db1
MD
981 * A general or delete-on-disk record does not contribute
982 * to our visibility. We can still flush it, however.
1f07f686
MD
983 */
984 return(0);
985 } else {
986 /*
987 * We couldn't resolve the dependancies, request that the
988 * inode be flushed when the dependancies can be resolved.
989 */
990 ip->flags |= HAMMER_INODE_REFLUSH;
991 return(-1);
7f7c1f84 992 }
c0ade690
MD
993}
994
995/*
1f07f686 996 * This is the core routine placing an inode into the FST_FLUSH state.
c0ade690 997 */
b84de5af 998static void
1f07f686 999hammer_flush_inode_core(hammer_inode_t ip, int flags)
b84de5af 1000{
1f07f686 1001 int go_count;
1f07f686 1002
4e17f465
MD
1003 /*
1004 * Set flush state and prevent the flusher from cycling into
1005 * the next flush group. Do not place the ip on the list yet.
1006 * Inodes not in the idle state get an extra reference.
1007 */
1f07f686
MD
1008 KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
1009 if (ip->flush_state == HAMMER_FST_IDLE)
1010 hammer_ref(&ip->lock);
1011 ip->flush_state = HAMMER_FST_FLUSH;
da2da375
MD
1012 ip->flush_group = ip->hmp->flusher.next;
1013 ++ip->hmp->flusher.group_lock;
af209b0f
MD
1014 ++ip->hmp->count_iqueued;
1015 ++hammer_count_iqueued;
b84de5af 1016
e8599db1
MD
1017 /*
1018 * We need to be able to vfsync/truncate from the backend.
1019 */
1020 KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
1021 if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
1022 ip->flags |= HAMMER_INODE_VHELD;
1023 vref(ip->vp);
1024 }
1025
ec4e8497 1026 /*
1f07f686
MD
1027 * Figure out how many in-memory records we can actually flush
1028 * (not including inode meta-data, buffers, etc).
ec4e8497 1029 */
1f07f686
MD
1030 if (flags & HAMMER_FLUSH_RECURSION) {
1031 go_count = 1;
1032 } else {
1033 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
1034 hammer_setup_child_callback, NULL);
1035 }
b84de5af
MD
1036
1037 /*
1f07f686
MD
1038 * This is a more involved test that includes go_count. If we
1039 * can't flush, flag the inode and return. If go_count is 0 we
1040 * were are unable to flush any records in our rec_tree and
1041 * must ignore the XDIRTY flag.
b84de5af 1042 */
1f07f686
MD
1043 if (go_count == 0) {
1044 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
1045 ip->flags |= HAMMER_INODE_REFLUSH;
af209b0f
MD
1046
1047 --ip->hmp->count_iqueued;
1048 --hammer_count_iqueued;
1049
1f07f686 1050 ip->flush_state = HAMMER_FST_SETUP;
e8599db1
MD
1051 if (ip->flags & HAMMER_INODE_VHELD) {
1052 ip->flags &= ~HAMMER_INODE_VHELD;
1053 vrele(ip->vp);
1054 }
4e17f465
MD
1055 if (flags & HAMMER_FLUSH_SIGNAL) {
1056 ip->flags |= HAMMER_INODE_RESIGNAL;
1057 hammer_flusher_async(ip->hmp);
1058 }
da2da375
MD
1059 if (--ip->hmp->flusher.group_lock == 0)
1060 wakeup(&ip->hmp->flusher.group_lock);
1f07f686
MD
1061 return;
1062 }
1063 }
b84de5af 1064
b84de5af
MD
1065 /*
1066 * Snapshot the state of the inode for the backend flusher.
1067 *
1068 * The truncation must be retained in the frontend until after
1069 * we've actually performed the record deletion.
1f07f686
MD
1070 *
1071 * NOTE: The DELETING flag is a mod flag, but it is also sticky,
1072 * and stays in ip->flags. Once set, it stays set until the
1073 * inode is destroyed.
b84de5af
MD
1074 */
1075 ip->sync_flags = (ip->flags & HAMMER_INODE_MODMASK);
1076 ip->sync_trunc_off = ip->trunc_off;
11ad5ade 1077 ip->sync_ino_leaf = ip->ino_leaf;
b84de5af 1078 ip->sync_ino_data = ip->ino_data;
47637bff
MD
1079 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
1080 ip->flags &= ~HAMMER_INODE_MODMASK;
0832c9bb
MD
1081#ifdef DEBUG_TRUNCATE
1082 if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp)
1083 kprintf("truncateS %016llx\n", ip->sync_trunc_off);
1084#endif
b84de5af
MD
1085
1086 /*
4e17f465 1087 * The flusher list inherits our inode and reference.
b84de5af 1088 */
1f07f686 1089 TAILQ_INSERT_TAIL(&ip->hmp->flush_list, ip, flush_entry);
da2da375
MD
1090 if (--ip->hmp->flusher.group_lock == 0)
1091 wakeup(&ip->hmp->flusher.group_lock);
1f07f686 1092
0832c9bb 1093 if (flags & HAMMER_FLUSH_SIGNAL) {
1f07f686 1094 hammer_flusher_async(ip->hmp);
0832c9bb 1095 }
b84de5af
MD
1096}
1097
ec4e8497 1098/*
1f07f686
MD
1099 * Callback for scan of ip->rec_tree. Try to include each record in our
1100 * flush. ip->flush_group has been set but the inode has not yet been
1101 * moved into a flushing state.
1102 *
1103 * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
1104 * both inodes.
1105 *
1106 * We return 1 for any record placed or found in FST_FLUSH, which prevents
1107 * the caller from shortcutting the flush.
ec4e8497 1108 */
c0ade690 1109static int
1f07f686 1110hammer_setup_child_callback(hammer_record_t rec, void *data)
b84de5af 1111{
1f07f686
MD
1112 hammer_inode_t target_ip;
1113 hammer_inode_t ip;
1114 int r;
1115
1116 /*
1117 * If the record has been deleted by the backend (it's being held
1118 * by the frontend in a race), just ignore it.
1119 */
1120 if (rec->flags & HAMMER_RECF_DELETED_BE)
ec4e8497 1121 return(0);
1f07f686
MD
1122
1123 /*
1124 * If the record is in an idle state it has no dependancies and
1125 * can be flushed.
1126 */
1127 ip = rec->ip;
1128 r = 0;
1129
1130 switch(rec->flush_state) {
1131 case HAMMER_FST_IDLE:
1132 /*
1133 * Record has no setup dependancy, we can flush it.
1134 */
1135 KKASSERT(rec->target_ip == NULL);
1136 rec->flush_state = HAMMER_FST_FLUSH;
1137 rec->flush_group = ip->flush_group;
b84de5af 1138 hammer_ref(&rec->lock);
1f07f686
MD
1139 r = 1;
1140 break;
1141 case HAMMER_FST_SETUP:
1142 /*
1143 * Record has a setup dependancy. Try to include the
1144 * target ip in the flush.
1145 *
1146 * We have to be careful here, if we do not do the right
1147 * thing we can lose track of dirty inodes and the system
1148 * will lockup trying to allocate buffers.
1149 */
1150 target_ip = rec->target_ip;
1151 KKASSERT(target_ip != NULL);
1152 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
1153 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
1154 /*
1155 * If the target IP is already flushing in our group
1156 * we are golden, otherwise make sure the target
1157 * reflushes.
1158 */
1159 if (target_ip->flush_group == ip->flush_group) {
1160 rec->flush_state = HAMMER_FST_FLUSH;
1161 rec->flush_group = ip->flush_group;
1162 hammer_ref(&rec->lock);
1163 r = 1;
1164 } else {
1165 target_ip->flags |= HAMMER_INODE_REFLUSH;
1166 }
1167 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
1168 /*
1169 * If the target IP is not flushing we can force
1170 * it to flush, even if it is unable to write out
1171 * any of its own records we have at least one in
1172 * hand that we CAN deal with.
1173 */
1174 rec->flush_state = HAMMER_FST_FLUSH;
1175 rec->flush_group = ip->flush_group;
1176 hammer_ref(&rec->lock);
1177 hammer_flush_inode_core(target_ip,
1178 HAMMER_FLUSH_RECURSION);
1179 r = 1;
1180 } else {
1181 /*
e8599db1
MD
1182 * General or delete-on-disk record.
1183 *
1184 * XXX this needs help. If a delete-on-disk we could
1185 * disconnect the target. If the target has its own
1186 * dependancies they really need to be flushed.
1f07f686
MD
1187 *
1188 * XXX
1189 */
1190 rec->flush_state = HAMMER_FST_FLUSH;
1191 rec->flush_group = ip->flush_group;
1192 hammer_ref(&rec->lock);
1193 hammer_flush_inode_core(target_ip,
1194 HAMMER_FLUSH_RECURSION);
1195 r = 1;
1196 }
1197 break;
1198 case HAMMER_FST_FLUSH:
1199 /*
1200 * Record already associated with a flush group. It had
1201 * better be ours.
1202 */
1203 KKASSERT(rec->flush_group == ip->flush_group);
1204 r = 1;
1205 break;
b84de5af 1206 }
1f07f686 1207 return(r);
b84de5af
MD
1208}
1209
b84de5af
MD
1210/*
1211 * Wait for a previously queued flush to complete
1212 */
1213void
1214hammer_wait_inode(hammer_inode_t ip)
1215{
e8599db1 1216 while (ip->flush_state != HAMMER_FST_IDLE) {
0832c9bb
MD
1217 if (ip->flush_state == HAMMER_FST_SETUP) {
1218 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1219 } else {
1220 ip->flags |= HAMMER_INODE_FLUSHW;
1221 tsleep(&ip->flags, 0, "hmrwin", 0);
1222 }
b84de5af
MD
1223 }
1224}
1225
a99b9ea2
MD
1226/*
1227 * Wait for records to drain
1228 */
1229void
1230hammer_wait_inode_recs(hammer_inode_t ip)
1231{
1232 while (ip->rsv_recs > hammer_limit_irecs) {
1233 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1234 if (ip->rsv_recs > hammer_limit_irecs) {
1235 ip->flags |= HAMMER_INODE_PARTIALW;
1236 tsleep(&ip->flags, 0, "hmrwpp", 0);
1237 }
1238 }
1239}
1240
b84de5af
MD
1241/*
1242 * Called by the backend code when a flush has been completed.
1243 * The inode has already been removed from the flush list.
1244 *
1245 * A pipelined flush can occur, in which case we must re-enter the
1246 * inode on the list and re-copy its fields.
1247 */
1248void
1249hammer_flush_inode_done(hammer_inode_t ip)
1250{
af209b0f
MD
1251 hammer_mount_t hmp;
1252 int dorel;
1955afa7 1253
b84de5af
MD
1254 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
1255
af209b0f
MD
1256 hmp = ip->hmp;
1257
1f07f686
MD
1258 /*
1259 * Merge left-over flags back into the frontend and fix the state.
1260 */
b84de5af 1261 ip->flags |= ip->sync_flags;
1f07f686
MD
1262
1263 /*
1264 * The backend may have adjusted nlinks, so if the adjusted nlinks
1265 * does not match the fronttend set the frontend's RDIRTY flag again.
1266 */
11ad5ade
MD
1267 if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
1268 ip->flags |= HAMMER_INODE_DDIRTY;
b84de5af 1269
4e17f465 1270 /*
e63644f0
MD
1271 * Fix up the dirty buffer status. IO completions will also
1272 * try to clean up rsv_databufs.
4e17f465 1273 */
0832c9bb 1274 if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) {
1f07f686 1275 ip->flags |= HAMMER_INODE_BUFS;
e63644f0 1276 } else {
af209b0f 1277 hmp->rsv_databufs -= ip->rsv_databufs;
e63644f0 1278 ip->rsv_databufs = 0;
1f07f686
MD
1279 }
1280
1281 /*
1282 * Re-set the XDIRTY flag if some of the inode's in-memory records
1283 * could not be flushed.
1284 */
0832c9bb
MD
1285 KKASSERT((RB_EMPTY(&ip->rec_tree) &&
1286 (ip->flags & HAMMER_INODE_XDIRTY) == 0) ||
1287 (!RB_EMPTY(&ip->rec_tree) &&
1288 (ip->flags & HAMMER_INODE_XDIRTY) != 0));
4e17f465
MD
1289
1290 /*
1291 * Do not lose track of inodes which no longer have vnode
1292 * assocations, otherwise they may never get flushed again.
1293 */
1294 if ((ip->flags & HAMMER_INODE_MODMASK) && ip->vp == NULL)
b84de5af 1295 ip->flags |= HAMMER_INODE_REFLUSH;
4e17f465
MD
1296
1297 /*
1298 * Adjust flush_state. The target state (idle or setup) shouldn't
1299 * be terribly important since we will reflush if we really need
1300 * to do anything. XXX
1301 */
1302 if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
1303 ip->flush_state = HAMMER_FST_IDLE;
1304 dorel = 1;
1305 } else {
1306 ip->flush_state = HAMMER_FST_SETUP;
af209b0f 1307 dorel = 0;
b84de5af 1308 }
b84de5af 1309
af209b0f
MD
1310 --hmp->count_iqueued;
1311 --hammer_count_iqueued;
1312
e8599db1
MD
1313 /*
1314 * Clean up the vnode ref
1315 */
1316 if (ip->flags & HAMMER_INODE_VHELD) {
1317 ip->flags &= ~HAMMER_INODE_VHELD;
1318 vrele(ip->vp);
1319 }
1320
b84de5af
MD
1321 /*
1322 * If the frontend made more changes and requested another flush,
4e17f465 1323 * then try to get it running.
b84de5af
MD
1324 */
1325 if (ip->flags & HAMMER_INODE_REFLUSH) {
1326 ip->flags &= ~HAMMER_INODE_REFLUSH;
4e17f465
MD
1327 if (ip->flags & HAMMER_INODE_RESIGNAL) {
1328 ip->flags &= ~HAMMER_INODE_RESIGNAL;
1329 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1330 } else {
1331 hammer_flush_inode(ip, 0);
0729c8c8 1332 }
4e17f465
MD
1333 }
1334
e63644f0
MD
1335 /*
1336 * If the inode is now clean drop the space reservation.
1337 */
1338 if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1339 (ip->flags & HAMMER_INODE_RSV_INODES)) {
1340 ip->flags &= ~HAMMER_INODE_RSV_INODES;
af209b0f 1341 --hmp->rsv_inodes;
e63644f0
MD
1342 }
1343
4e17f465
MD
1344 /*
1345 * Finally, if the frontend is waiting for a flush to complete,
1346 * wake it up.
1347 */
1348 if (ip->flush_state != HAMMER_FST_FLUSH) {
b84de5af
MD
1349 if (ip->flags & HAMMER_INODE_FLUSHW) {
1350 ip->flags &= ~HAMMER_INODE_FLUSHW;
1351 wakeup(&ip->flags);
1352 }
1353 }
1f07f686
MD
1354 if (dorel)
1355 hammer_rel_inode(ip, 0);
b84de5af
MD
1356}
1357
1358/*
1359 * Called from hammer_sync_inode() to synchronize in-memory records
1360 * to the media.
1361 */
1362static int
1363hammer_sync_record_callback(hammer_record_t record, void *data)
c0ade690 1364{
4e17f465
MD
1365 hammer_cursor_t cursor = data;
1366 hammer_transaction_t trans = cursor->trans;
c0ade690
MD
1367 int error;
1368
b84de5af 1369 /*
1f07f686 1370 * Skip records that do not belong to the current flush.
b84de5af 1371 */
47637bff 1372 ++hammer_stats_record_iterations;
1f07f686 1373 if (record->flush_state != HAMMER_FST_FLUSH)
b84de5af 1374 return(0);
47637bff 1375
1f07f686
MD
1376#if 1
1377 if (record->flush_group != record->ip->flush_group) {
1378 kprintf("sync_record %p ip %p bad flush group %d %d\n", record, record->ip, record->flush_group ,record->ip->flush_group);
1379 Debugger("blah2");
1380 return(0);
1381 }
1382#endif
1383 KKASSERT(record->flush_group == record->ip->flush_group);
d36ec43b
MD
1384
1385 /*
1386 * Interlock the record using the BE flag. Once BE is set the
1387 * frontend cannot change the state of FE.
1388 *
1389 * NOTE: If FE is set prior to us setting BE we still sync the
1390 * record out, but the flush completion code converts it to
1391 * a delete-on-disk record instead of destroying it.
1392 */
4e17f465 1393 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
d36ec43b
MD
1394 record->flags |= HAMMER_RECF_INTERLOCK_BE;
1395
47637bff
MD
1396 /*
1397 * The backend may have already disposed of the record.
1398 */
1399 if (record->flags & HAMMER_RECF_DELETED_BE) {
1400 error = 0;
1401 goto done;
1402 }
1403
98f7132d
MD
1404 /*
1405 * If the whole inode is being deleting all on-disk records will
930bf163
MD
1406 * be deleted very soon, we can't sync any new records to disk
1407 * because they will be deleted in the same transaction they were
1408 * created in (delete_tid == create_tid), which will assert.
1409 *
1410 * XXX There may be a case with RECORD_ADD with DELETED_FE set
1411 * that we currently panic on.
98f7132d
MD
1412 */
1413 if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
930bf163 1414 switch(record->type) {
47637bff
MD
1415 case HAMMER_MEM_RECORD_DATA:
1416 /*
1417 * We don't have to do anything, if the record was
1418 * committed the space will have been accounted for
1419 * in the blockmap.
1420 */
1421 /* fall through */
930bf163 1422 case HAMMER_MEM_RECORD_GENERAL:
98f7132d
MD
1423 record->flags |= HAMMER_RECF_DELETED_FE;
1424 record->flags |= HAMMER_RECF_DELETED_BE;
930bf163
MD
1425 error = 0;
1426 goto done;
1427 case HAMMER_MEM_RECORD_ADD:
1428 panic("hammer_sync_record_callback: illegal add "
1429 "during inode deletion record %p", record);
1430 break; /* NOT REACHED */
1431 case HAMMER_MEM_RECORD_INODE:
1432 panic("hammer_sync_record_callback: attempt to "
1433 "sync inode record %p?", record);
1434 break; /* NOT REACHED */
1435 case HAMMER_MEM_RECORD_DEL:
1436 /*
1437 * Follow through and issue the on-disk deletion
98f7132d 1438 */
930bf163 1439 break;
98f7132d 1440 }
98f7132d
MD
1441 }
1442
d36ec43b
MD
1443 /*
1444 * If DELETED_FE is set we may have already sent dependant pieces
1445 * to the disk and we must flush the record as if it hadn't been
1446 * deleted. This creates a bit of a mess because we have to
1f07f686 1447 * have ip_sync_record convert the record to MEM_RECORD_DEL before
d36ec43b
MD
1448 * it inserts the B-Tree record. Otherwise the media sync might
1449 * be visible to the frontend.
1450 */
1f07f686 1451 if (record->flags & HAMMER_RECF_DELETED_FE) {
e8599db1
MD
1452 if (record->type == HAMMER_MEM_RECORD_ADD) {
1453 record->flags |= HAMMER_RECF_CONVERT_DELETE;
1454 } else {
1455 KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
1456 return(0);
1457 }
1f07f686 1458 }
b84de5af
MD
1459
1460 /*
1461 * Assign the create_tid for new records. Deletions already
1462 * have the record's entire key properly set up.
1463 */
1f07f686 1464 if (record->type != HAMMER_MEM_RECORD_DEL)
11ad5ade 1465 record->leaf.base.create_tid = trans->tid;
4e17f465
MD
1466 for (;;) {
1467 error = hammer_ip_sync_record_cursor(cursor, record);
1468 if (error != EDEADLK)
1469 break;
1470 hammer_done_cursor(cursor);
1471 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
1472 record->ip);
1473 if (error)
1474 break;
1475 }
1476 record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
c0ade690
MD
1477
1478 if (error) {
b3deaf57
MD
1479 error = -error;
1480 if (error != -ENOSPC) {
b84de5af
MD
1481 kprintf("hammer_sync_record_callback: sync failed rec "
1482 "%p, error %d\n", record, error);
1483 Debugger("sync failed rec");
b3deaf57 1484 }
c0ade690 1485 }
98f7132d 1486done:
d36ec43b 1487 hammer_flush_record_done(record, error);
b3deaf57 1488 return(error);
c0ade690
MD
1489}
1490
1491/*
1492 * XXX error handling
1493 */
1494int
1f07f686 1495hammer_sync_inode(hammer_inode_t ip)
c0ade690
MD
1496{
1497 struct hammer_transaction trans;
4e17f465 1498 struct hammer_cursor cursor;
1f07f686
MD
1499 hammer_record_t depend;
1500 hammer_record_t next;
ec4e8497 1501 int error, tmp_error;
1f07f686 1502 u_int64_t nlinks;
c0ade690 1503
1f07f686 1504 if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
d113fda1 1505 return(0);
d113fda1 1506
b84de5af 1507 hammer_start_transaction_fls(&trans, ip->hmp);
4e17f465
MD
1508 error = hammer_init_cursor(&trans, &cursor, &ip->cache[0], ip);
1509 if (error)
1510 goto done;
c0ade690 1511
ec4e8497 1512 /*
1f07f686
MD
1513 * Any directory records referencing this inode which are not in
1514 * our current flush group must adjust our nlink count for the
1515 * purposes of synchronization to disk.
1516 *
1517 * Records which are in our flush group can be unlinked from our
c4bae5fd
MD
1518 * inode now, potentially allowing the inode to be physically
1519 * deleted.
ec4e8497 1520 */
11ad5ade 1521 nlinks = ip->ino_data.nlinks;
1f07f686
MD
1522 next = TAILQ_FIRST(&ip->target_list);
1523 while ((depend = next) != NULL) {
1524 next = TAILQ_NEXT(depend, target_entry);
1525 if (depend->flush_state == HAMMER_FST_FLUSH &&
da2da375 1526 depend->flush_group == ip->hmp->flusher.act) {
c4bae5fd
MD
1527 /*
1528 * If this is an ADD that was deleted by the frontend
1529 * the frontend nlinks count will have already been
1530 * decremented, but the backend is going to sync its
1531 * directory entry and must account for it. The
1532 * record will be converted to a delete-on-disk when
1533 * it gets synced.
1534 *
1535 * If the ADD was not deleted by the frontend we
1536 * can remove the dependancy from our target_list.
1537 */
1538 if (depend->flags & HAMMER_RECF_DELETED_FE) {
1539 ++nlinks;
1540 } else {
1541 TAILQ_REMOVE(&ip->target_list, depend,
1542 target_entry);
1543 depend->target_ip = NULL;
1544 }
1f07f686 1545 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
c4bae5fd
MD
1546 /*
1547 * Not part of our flush group
1548 */
1549 KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
1f07f686
MD
1550 switch(depend->type) {
1551 case HAMMER_MEM_RECORD_ADD:
1552 --nlinks;
1553 break;
1554 case HAMMER_MEM_RECORD_DEL:
1555 ++nlinks;
1556 break;
e8599db1
MD
1557 default:
1558 break;
1f07f686 1559 }
ec4e8497 1560 }
ec4e8497
MD
1561 }
1562
c0ade690 1563 /*
1f07f686 1564 * Set dirty if we had to modify the link count.
c0ade690 1565 */
11ad5ade 1566 if (ip->sync_ino_data.nlinks != nlinks) {
1f07f686 1567 KKASSERT((int64_t)nlinks >= 0);
11ad5ade
MD
1568 ip->sync_ino_data.nlinks = nlinks;
1569 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1f07f686 1570 }
b84de5af 1571
1f07f686 1572 /*
869e8f55
MD
1573 * If there is a trunction queued destroy any data past the (aligned)
1574 * truncation point. Userland will have dealt with the buffer
1575 * containing the truncation point for us.
1576 *
1577 * We don't flush pending frontend data buffers until after we've
1578 * dealth with the truncation.
1f07f686 1579 *
869e8f55 1580 * Don't bother if the inode is or has been deleted.
1f07f686 1581 */
869e8f55 1582 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
b84de5af
MD
1583 /*
1584 * Interlock trunc_off. The VOP front-end may continue to
1585 * make adjustments to it while we are blocked.
1586 */
1587 off_t trunc_off;
1588 off_t aligned_trunc_off;
c0ade690 1589
b84de5af
MD
1590 trunc_off = ip->sync_trunc_off;
1591 aligned_trunc_off = (trunc_off + HAMMER_BUFMASK) &
1592 ~HAMMER_BUFMASK64;
1593
1594 /*
1595 * Delete any whole blocks on-media. The front-end has
1596 * already cleaned out any partial block and made it
1597 * pending. The front-end may have updated trunc_off
47637bff 1598 * while we were blocked so we only use sync_trunc_off.
b84de5af 1599 */
4e17f465 1600 error = hammer_ip_delete_range(&cursor, ip,
b84de5af 1601 aligned_trunc_off,
47637bff 1602 0x7FFFFFFFFFFFFFFFLL, 1);
b84de5af
MD
1603 if (error)
1604 Debugger("hammer_ip_delete_range errored");
47637bff
MD
1605
1606 /*
1607 * Clear the truncation flag on the backend after we have
1608 * complete the deletions. Backend data is now good again
1609 * (including new records we are about to sync, below).
1610 */
b84de5af 1611 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
47637bff 1612 ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL;
1f07f686
MD
1613 } else {
1614 error = 0;
f3b0f382
MD
1615 }
1616
1f07f686
MD
1617 /*
1618 * Now sync related records. These will typically be directory
1619 * entries or delete-on-disk records.
869e8f55
MD
1620 *
1621 * Not all records will be flushed, but clear XDIRTY anyway. We
1622 * will set it again in the frontend hammer_flush_inode_done()
1623 * if records remain.
1f07f686
MD
1624 */
1625 if (error == 0) {
1626 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
4e17f465 1627 hammer_sync_record_callback, &cursor);
1f07f686
MD
1628 if (tmp_error < 0)
1629 tmp_error = -error;
1630 if (tmp_error)
1631 error = tmp_error;
1632 }
1633
1634 /*
869e8f55
MD
1635 * If we are deleting the inode the frontend had better not have
1636 * any active references on elements making up the inode.
1f07f686 1637 */
11ad5ade 1638 if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
869e8f55
MD
1639 RB_EMPTY(&ip->rec_tree) &&
1640 (ip->sync_flags & HAMMER_INODE_DELETING) &&
1641 (ip->flags & HAMMER_INODE_DELETED) == 0) {
1642 int count1 = 0;
1f07f686 1643
869e8f55 1644 ip->flags |= HAMMER_INODE_DELETED;
4e17f465 1645 error = hammer_ip_delete_range_all(&cursor, ip, &count1);
869e8f55
MD
1646 if (error == 0) {
1647 ip->sync_flags &= ~HAMMER_INODE_DELETING;
1648 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
1649 KKASSERT(RB_EMPTY(&ip->rec_tree));
1f07f686 1650
869e8f55
MD
1651 /*
1652 * Set delete_tid in both the frontend and backend
1653 * copy of the inode record. The DELETED flag handles
1654 * this, do not set RDIRTY.
1655 */
11ad5ade
MD
1656 ip->ino_leaf.base.delete_tid = trans.tid;
1657 ip->sync_ino_leaf.base.delete_tid = trans.tid;
1f07f686 1658
869e8f55
MD
1659 /*
1660 * Adjust the inode count in the volume header
1661 */
f36a9737
MD
1662 if (ip->flags & HAMMER_INODE_ONDISK) {
1663 hammer_modify_volume_field(&trans,
1664 trans.rootvol,
1665 vol0_stat_inodes);
1666 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
1667 hammer_modify_volume_done(trans.rootvol);
1668 }
869e8f55
MD
1669 } else {
1670 ip->flags &= ~HAMMER_INODE_DELETED;
1671 Debugger("hammer_ip_delete_range_all errored");
1672 }
1f07f686 1673 }
b84de5af 1674
b84de5af 1675 ip->sync_flags &= ~HAMMER_INODE_BUFS;
c0ade690 1676
b84de5af
MD
1677 if (error)
1678 Debugger("RB_SCAN errored");
c0ade690
MD
1679
1680 /*
1681 * Now update the inode's on-disk inode-data and/or on-disk record.
b84de5af 1682 * DELETED and ONDISK are managed only in ip->flags.
c0ade690 1683 */
b84de5af 1684 switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
c0ade690
MD
1685 case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
1686 /*
1687 * If deleted and on-disk, don't set any additional flags.
1688 * the delete flag takes care of things.
869e8f55
MD
1689 *
1690 * Clear flags which may have been set by the frontend.
c0ade690 1691 */
11ad5ade 1692 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY|
869e8f55
MD
1693 HAMMER_INODE_XDIRTY|HAMMER_INODE_ITIMES|
1694 HAMMER_INODE_DELETING);
c0ade690
MD
1695 break;
1696 case HAMMER_INODE_DELETED:
1697 /*
1698 * Take care of the case where a deleted inode was never
1699 * flushed to the disk in the first place.
869e8f55
MD
1700 *
1701 * Clear flags which may have been set by the frontend.
c0ade690 1702 */
11ad5ade 1703 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY|
869e8f55
MD
1704 HAMMER_INODE_XDIRTY|HAMMER_INODE_ITIMES|
1705 HAMMER_INODE_DELETING);
d26d0ae9 1706 while (RB_ROOT(&ip->rec_tree)) {
d36ec43b
MD
1707 hammer_record_t record = RB_ROOT(&ip->rec_tree);
1708 hammer_ref(&record->lock);
1709 KKASSERT(record->lock.refs == 1);
1710 record->flags |= HAMMER_RECF_DELETED_FE;
1711 record->flags |= HAMMER_RECF_DELETED_BE;
d36ec43b 1712 hammer_rel_mem_record(record);
d26d0ae9 1713 }
c0ade690
MD
1714 break;
1715 case HAMMER_INODE_ONDISK:
1716 /*
1717 * If already on-disk, do not set any additional flags.
1718 */
1719 break;
1720 default:
1721 /*
1722 * If not on-disk and not deleted, set both dirty flags
b84de5af
MD
1723 * to force an initial record to be written. Also set
1724 * the create_tid for the inode.
1725 *
1726 * Set create_tid in both the frontend and backend
1727 * copy of the inode record.
c0ade690 1728 */
11ad5ade
MD
1729 ip->ino_leaf.base.create_tid = trans.tid;
1730 ip->sync_ino_leaf.base.create_tid = trans.tid;
1731 ip->sync_flags |= HAMMER_INODE_DDIRTY;
c0ade690
MD
1732 break;
1733 }
1734
1735 /*
d113fda1
MD
1736 * If RDIRTY or DDIRTY is set, write out a new record. If the inode
1737 * is already on-disk the old record is marked as deleted.
1738 *
1739 * If DELETED is set hammer_update_inode() will delete the existing
1740 * record without writing out a new one.
1741 *
1742 * If *ONLY* the ITIMES flag is set we can update the record in-place.
c0ade690 1743 */
b84de5af 1744 if (ip->flags & HAMMER_INODE_DELETED) {
4e17f465 1745 error = hammer_update_inode(&cursor, ip);
b84de5af 1746 } else
11ad5ade
MD
1747 if ((ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ITIMES)) ==
1748 HAMMER_INODE_ITIMES) {
4e17f465 1749 error = hammer_update_itimes(&cursor, ip);
d113fda1 1750 } else
11ad5ade 1751 if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_ITIMES)) {
4e17f465 1752 error = hammer_update_inode(&cursor, ip);
c0ade690 1753 }
b84de5af
MD
1754 if (error)
1755 Debugger("hammer_update_itimes/inode errored");
4e17f465 1756done:
b84de5af
MD
1757 /*
1758 * Save the TID we used to sync the inode with to make sure we
1759 * do not improperly reuse it.
1760 */
4e17f465 1761 hammer_done_cursor(&cursor);
b84de5af 1762 hammer_done_transaction(&trans);
c0ade690 1763 return(error);
8cd0a023
MD
1764}
1765
1f07f686
MD
1766/*
1767 * This routine is called when the OS is no longer actively referencing
1768 * the inode (but might still be keeping it cached), or when releasing
1769 * the last reference to an inode.
1770 *
1771 * At this point if the inode's nlinks count is zero we want to destroy
1772 * it, which may mean destroying it on-media too.
1773 */
3bf2d80a 1774void
e8599db1 1775hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
1f07f686 1776{
e8599db1
MD
1777 struct vnode *vp;
1778
1f07f686 1779 /*
c4bae5fd
MD
1780 * Set the DELETING flag when the link count drops to 0 and the
1781 * OS no longer has any opens on the inode.
1782 *
1783 * The backend will clear DELETING (a mod flag) and set DELETED
1784 * (a state flag) when it is actually able to perform the
1785 * operation.
1f07f686 1786 */
11ad5ade 1787 if (ip->ino_data.nlinks == 0 &&
869e8f55 1788 (ip->flags & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
e8599db1
MD
1789 ip->flags |= HAMMER_INODE_DELETING;
1790 ip->flags |= HAMMER_INODE_TRUNCATED;
1791 ip->trunc_off = 0;
1792 vp = NULL;
1793 if (getvp) {
1794 if (hammer_get_vnode(ip, &vp) != 0)
1795 return;
1796 }
29ce0677 1797
29ce0677
MD
1798 /*
1799 * Final cleanup
1800 */
869e8f55
MD
1801 if (ip->vp) {
1802 vtruncbuf(ip->vp, 0, HAMMER_BUFSIZE);
1803 vnode_pager_setsize(ip->vp, 0);
1804 }
e8599db1
MD
1805 if (getvp) {
1806 vput(vp);
1807 }
1f07f686 1808 }
1f07f686
MD
1809}
1810
3bf2d80a
MD
1811/*
1812 * Re-test an inode when a dependancy had gone away to see if we
1813 * can chain flush it.
1814 */
1f07f686
MD
1815void
1816hammer_test_inode(hammer_inode_t ip)
1817{
1818 if (ip->flags & HAMMER_INODE_REFLUSH) {
1819 ip->flags &= ~HAMMER_INODE_REFLUSH;
1820 hammer_ref(&ip->lock);
3bf2d80a
MD
1821 if (ip->flags & HAMMER_INODE_RESIGNAL) {
1822 ip->flags &= ~HAMMER_INODE_RESIGNAL;
1823 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
1824 } else {
1825 hammer_flush_inode(ip, 0);
1826 }
1f07f686
MD
1827 hammer_rel_inode(ip, 0);
1828 }
1829}
1830
9f5097dc 1831/*
af209b0f
MD
1832 * We need to slow down user processes if we get too large a backlog of
1833 * inodes in the flusher. Even though the frontend can theoretically
1834 * get way, way ahead of the flusher, if we let it do that the flusher
1835 * will have no buffer cache locality of reference and will have to re-read
1836 * everything a second time, causing performance to drop precipitously.
1837 *
1838 * Reclaims are especially senssitive to this effect because the kernel has
1839 * already abandoned the related vnode.
9f5097dc 1840 */
af209b0f 1841
9f5097dc 1842void
d99d6bf5 1843hammer_inode_waitreclaims(hammer_inode_t ip)
9f5097dc 1844{
d99d6bf5 1845 hammer_mount_t hmp = ip->hmp;
3897d7e9 1846 int delay;
d99d6bf5 1847 int factor;
6aeaa7bd 1848 int flags = (ip->flags | ip->sync_flags);
d99d6bf5 1849
6aeaa7bd 1850 if ((flags & HAMMER_INODE_MODMASK) == 0)
d99d6bf5 1851 return;
6aeaa7bd
MD
1852 if ((flags & (HAMMER_INODE_MODMASK & ~HAMMER_INODE_MODEASY)) == 0) {
1853 factor = 2;
d99d6bf5
MD
1854 } else {
1855 factor = 1;
1856 }
3897d7e9
MD
1857
1858 while (hmp->inode_reclaims > HAMMER_RECLAIM_MIN) {
af209b0f
MD
1859 if (hmp->inode_reclaims < HAMMER_RECLAIM_MID) {
1860 hammer_flusher_async(hmp);
3897d7e9 1861 break;
af209b0f
MD
1862 }
1863 if (hmp->inode_reclaims < HAMMER_RECLAIM_MAX) {
1864 delay = (hmp->inode_reclaims - HAMMER_RECLAIM_MID) *
1865 hz / (HAMMER_RECLAIM_MAX - HAMMER_RECLAIM_MID);
d99d6bf5 1866 delay = delay / factor;
3897d7e9
MD
1867 if (delay == 0)
1868 delay = 1;
1869 hammer_flusher_async(hmp);
af209b0f 1870 tsleep(&delay, 0, "hmitik", delay);
3897d7e9
MD
1871 break;
1872 }
9f5097dc
MD
1873 hmp->flags |= HAMMER_MOUNT_WAITIMAX;
1874 hammer_flusher_async(hmp);
3897d7e9 1875 tsleep(&hmp->inode_reclaims, 0, "hmimax", hz / 10);
9f5097dc
MD
1876 }
1877}
1878
1879void
1880hammer_inode_wakereclaims(hammer_mount_t hmp)
1881{
af209b0f
MD
1882 if ((hmp->flags & HAMMER_MOUNT_WAITIMAX) &&
1883 hmp->inode_reclaims < HAMMER_RECLAIM_MAX) {
9f5097dc 1884 hmp->flags &= ~HAMMER_MOUNT_WAITIMAX;
3897d7e9 1885 wakeup(&hmp->inode_reclaims);
9f5097dc
MD
1886 }
1887}
1888