HAMMER VFS - Fix an edge case in hammer_inode_waitreclaims()
[dragonfly.git] / sys / vfs / hammer / hammer_inode.c
CommitLineData
427e5fc6 1/*
b84de5af 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
427e5fc6
MD
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
69ce4424 34 * $DragonFly: src/sys/vfs/hammer/hammer_inode.c,v 1.114 2008/09/24 00:53:51 dillon Exp $
427e5fc6
MD
35 */
36
37#include "hammer.h"
869e8f55 38#include <vm/vm_extern.h>
427e5fc6 39
af209b0f 40static int hammer_unload_inode(struct hammer_inode *ip);
5fa5c92f 41static void hammer_free_inode(hammer_inode_t ip);
7a61b85d
MD
42static void hammer_flush_inode_core(hammer_inode_t ip,
43 hammer_flush_group_t flg, int flags);
af209b0f 44static int hammer_setup_child_callback(hammer_record_t rec, void *data);
7a61b85d 45#if 0
525aad3a 46static int hammer_syncgrp_child_callback(hammer_record_t rec, void *data);
7a61b85d 47#endif
cc0758d0 48static int hammer_setup_parent_inodes(hammer_inode_t ip, int depth,
7a61b85d
MD
49 hammer_flush_group_t flg);
50static int hammer_setup_parent_inodes_helper(hammer_record_t record,
cc0758d0 51 int depth, hammer_flush_group_t flg);
ccf6a64d 52static void hammer_inode_wakereclaims(hammer_inode_t ip);
e98f1b96
MD
53static struct hammer_inostats *hammer_inode_inostats(hammer_mount_t hmp,
54 pid_t pid);
b84de5af 55
0832c9bb
MD
56#ifdef DEBUG_TRUNCATE
57extern struct hammer_inode *HammerTruncIp;
58#endif
59
43c665ae 60/*
5fa5c92f 61 * RB-Tree support for inode structures
43c665ae
MD
62 */
63int
64hammer_ino_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
65{
66 if (ip1->obj_localization < ip2->obj_localization)
67 return(-1);
68 if (ip1->obj_localization > ip2->obj_localization)
69 return(1);
70 if (ip1->obj_id < ip2->obj_id)
71 return(-1);
72 if (ip1->obj_id > ip2->obj_id)
73 return(1);
74 if (ip1->obj_asof < ip2->obj_asof)
75 return(-1);
76 if (ip1->obj_asof > ip2->obj_asof)
77 return(1);
78 return(0);
79}
80
73896937
MD
81int
82hammer_redo_rb_compare(hammer_inode_t ip1, hammer_inode_t ip2)
83{
84 if (ip1->redo_fifo_start < ip2->redo_fifo_start)
85 return(-1);
86 if (ip1->redo_fifo_start > ip2->redo_fifo_start)
87 return(1);
88 return(0);
89}
90
43c665ae 91/*
5fa5c92f 92 * RB-Tree support for inode structures / special LOOKUP_INFO
43c665ae
MD
93 */
94static int
95hammer_inode_info_cmp(hammer_inode_info_t info, hammer_inode_t ip)
96{
97 if (info->obj_localization < ip->obj_localization)
98 return(-1);
99 if (info->obj_localization > ip->obj_localization)
100 return(1);
101 if (info->obj_id < ip->obj_id)
102 return(-1);
103 if (info->obj_id > ip->obj_id)
104 return(1);
105 if (info->obj_asof < ip->obj_asof)
106 return(-1);
107 if (info->obj_asof > ip->obj_asof)
108 return(1);
109 return(0);
110}
111
112/*
113 * Used by hammer_scan_inode_snapshots() to locate all of an object's
114 * snapshots. Note that the asof field is not tested, which we can get
115 * away with because it is the lowest-priority field.
116 */
117static int
118hammer_inode_info_cmp_all_history(hammer_inode_t ip, void *data)
119{
120 hammer_inode_info_t info = data;
121
122 if (ip->obj_localization > info->obj_localization)
123 return(1);
124 if (ip->obj_localization < info->obj_localization)
125 return(-1);
126 if (ip->obj_id > info->obj_id)
127 return(1);
128 if (ip->obj_id < info->obj_id)
129 return(-1);
130 return(0);
131}
132
842e7a70
MD
133/*
134 * Used by hammer_unload_pseudofs() to locate all inodes associated with
135 * a particular PFS.
136 */
137static int
138hammer_inode_pfs_cmp(hammer_inode_t ip, void *data)
139{
140 u_int32_t localization = *(u_int32_t *)data;
141 if (ip->obj_localization > localization)
142 return(1);
143 if (ip->obj_localization < localization)
144 return(-1);
145 return(0);
146}
147
5fa5c92f
MD
148/*
149 * RB-Tree support for pseudofs structures
150 */
151static int
152hammer_pfs_rb_compare(hammer_pseudofs_inmem_t p1, hammer_pseudofs_inmem_t p2)
153{
154 if (p1->localization < p2->localization)
155 return(-1);
156 if (p1->localization > p2->localization)
157 return(1);
158 return(0);
159}
160
161
43c665ae
MD
162RB_GENERATE(hammer_ino_rb_tree, hammer_inode, rb_node, hammer_ino_rb_compare);
163RB_GENERATE_XLOOKUP(hammer_ino_rb_tree, INFO, hammer_inode, rb_node,
164 hammer_inode_info_cmp, hammer_inode_info_t);
5fa5c92f
MD
165RB_GENERATE2(hammer_pfs_rb_tree, hammer_pseudofs_inmem, rb_node,
166 hammer_pfs_rb_compare, u_int32_t, localization);
43c665ae 167
d113fda1
MD
168/*
169 * The kernel is not actively referencing this vnode but is still holding
170 * it cached.
b84de5af
MD
171 *
172 * This is called from the frontend.
2247fe02
MD
173 *
174 * MPALMOSTSAFE
d113fda1 175 */
427e5fc6
MD
176int
177hammer_vop_inactive(struct vop_inactive_args *ap)
178{
66325755 179 struct hammer_inode *ip = VTOI(ap->a_vp);
27ea2398 180
c0ade690
MD
181 /*
182 * Degenerate case
183 */
184 if (ip == NULL) {
66325755 185 vrecycle(ap->a_vp);
c0ade690
MD
186 return(0);
187 }
188
189 /*
4a2796f3
MD
190 * If the inode no longer has visibility in the filesystem try to
191 * recycle it immediately, even if the inode is dirty. Recycling
192 * it quickly allows the system to reclaim buffer cache and VM
193 * resources which can matter a lot in a heavily loaded system.
194 *
195 * This can deadlock in vfsync() if we aren't careful.
4e97774c
MD
196 *
197 * Do not queue the inode to the flusher if we still have visibility,
198 * otherwise namespace calls such as chmod will unnecessarily generate
199 * multiple inode updates.
c0ade690 200 */
4e97774c 201 if (ip->ino_data.nlinks == 0) {
2247fe02
MD
202 get_mplock();
203 hammer_inode_unloadable_check(ip, 0);
4e97774c
MD
204 if (ip->flags & HAMMER_INODE_MODMASK)
205 hammer_flush_inode(ip, 0);
4a2796f3 206 vrecycle(ap->a_vp);
2247fe02 207 rel_mplock();
4e97774c 208 }
427e5fc6
MD
209 return(0);
210}
211
d113fda1
MD
212/*
213 * Release the vnode association. This is typically (but not always)
1f07f686 214 * the last reference on the inode.
d113fda1 215 *
1f07f686
MD
216 * Once the association is lost we are on our own with regards to
217 * flushing the inode.
e0092341
MD
218 *
219 * We must interlock ip->vp so hammer_get_vnode() can avoid races.
d113fda1 220 */
427e5fc6
MD
221int
222hammer_vop_reclaim(struct vop_reclaim_args *ap)
223{
427e5fc6 224 struct hammer_inode *ip;
7bc5b8c2 225 hammer_mount_t hmp;
427e5fc6
MD
226 struct vnode *vp;
227
228 vp = ap->a_vp;
c0ade690 229
a89aec1b 230 if ((ip = vp->v_data) != NULL) {
da2da375 231 hmp = ip->hmp;
e0092341 232 hammer_lock_ex(&ip->lock);
a89aec1b
MD
233 vp->v_data = NULL;
234 ip->vp = NULL;
7bc5b8c2 235
4a2796f3 236 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0) {
9f5097dc 237 ++hammer_count_reclaiming;
da2da375 238 ++hmp->inode_reclaims;
9f5097dc
MD
239 ip->flags |= HAMMER_INODE_RECLAIM;
240 }
e0092341 241 hammer_unlock(&ip->lock);
ec4e8497 242 hammer_rel_inode(ip, 1);
a89aec1b 243 }
427e5fc6
MD
244 return(0);
245}
246
66325755
MD
247/*
248 * Return a locked vnode for the specified inode. The inode must be
249 * referenced but NOT LOCKED on entry and will remain referenced on
250 * return.
b84de5af
MD
251 *
252 * Called from the frontend.
66325755
MD
253 */
254int
e8599db1 255hammer_get_vnode(struct hammer_inode *ip, struct vnode **vpp)
66325755 256{
9f5097dc 257 hammer_mount_t hmp;
66325755
MD
258 struct vnode *vp;
259 int error = 0;
ea434b6f 260 u_int8_t obj_type;
66325755 261
9f5097dc
MD
262 hmp = ip->hmp;
263
66325755
MD
264 for (;;) {
265 if ((vp = ip->vp) == NULL) {
9f5097dc 266 error = getnewvnode(VT_HAMMER, hmp->mp, vpp, 0, 0);
66325755
MD
267 if (error)
268 break;
8cd0a023
MD
269 hammer_lock_ex(&ip->lock);
270 if (ip->vp != NULL) {
271 hammer_unlock(&ip->lock);
ee23ac7d 272 vp = *vpp;
8cd0a023
MD
273 vp->v_type = VBAD;
274 vx_put(vp);
275 continue;
66325755 276 }
8cd0a023
MD
277 hammer_ref(&ip->lock);
278 vp = *vpp;
279 ip->vp = vp;
ea434b6f
MD
280
281 obj_type = ip->ino_data.obj_type;
282 vp->v_type = hammer_get_vnode_type(obj_type);
7a04d74f 283
ccf6a64d 284 hammer_inode_wakereclaims(ip);
9f5097dc 285
11ad5ade 286 switch(ip->ino_data.obj_type) {
7a04d74f
MD
287 case HAMMER_OBJTYPE_CDEV:
288 case HAMMER_OBJTYPE_BDEV:
9f5097dc 289 vp->v_ops = &hmp->mp->mnt_vn_spec_ops;
7a04d74f
MD
290 addaliasu(vp, ip->ino_data.rmajor,
291 ip->ino_data.rminor);
292 break;
293 case HAMMER_OBJTYPE_FIFO:
9f5097dc 294 vp->v_ops = &hmp->mp->mnt_vn_fifo_ops;
7a04d74f 295 break;
42cd5131 296 case HAMMER_OBJTYPE_REGFILE:
42cd5131 297 break;
7a04d74f
MD
298 default:
299 break;
300 }
42c7d26b
MD
301
302 /*
303 * Only mark as the root vnode if the ip is not
304 * historical, otherwise the VFS cache will get
305 * confused. The other half of the special handling
306 * is in hammer_vop_nlookupdotdot().
ddfdf542 307 *
67863d04
MD
308 * Pseudo-filesystem roots can be accessed via
309 * non-root filesystem paths and setting VROOT may
310 * confuse the namecache. Set VPFSROOT instead.
42c7d26b
MD
311 */
312 if (ip->obj_id == HAMMER_OBJID_ROOT &&
67863d04
MD
313 ip->obj_asof == hmp->asof) {
314 if (ip->obj_localization == 0)
2247fe02 315 vsetflags(vp, VROOT);
67863d04 316 else
2247fe02 317 vsetflags(vp, VPFSROOT);
42c7d26b 318 }
7a04d74f 319
8cd0a023
MD
320 vp->v_data = (void *)ip;
321 /* vnode locked by getnewvnode() */
322 /* make related vnode dirty if inode dirty? */
323 hammer_unlock(&ip->lock);
b0d18f7d
MD
324 if (vp->v_type == VREG) {
325 vinitvmio(vp, ip->ino_data.size,
326 hammer_blocksize(ip->ino_data.size),
327 hammer_blockoff(ip->ino_data.size));
328 }
8cd0a023
MD
329 break;
330 }
331
e0092341
MD
332 /*
333 * Interlock vnode clearing. This does not prevent the
334 * vnode from going into a reclaimed state but it does
335 * prevent it from being destroyed or reused so the vget()
336 * will properly fail.
337 */
338 hammer_lock_ex(&ip->lock);
339 if ((vp = ip->vp) == NULL) {
340 hammer_unlock(&ip->lock);
341 continue;
342 }
343 vhold_interlocked(vp);
344 hammer_unlock(&ip->lock);
345
8cd0a023
MD
346 /*
347 * loop if the vget fails (aka races), or if the vp
348 * no longer matches ip->vp.
349 */
350 if (vget(vp, LK_EXCLUSIVE) == 0) {
e0092341
MD
351 if (vp == ip->vp) {
352 vdrop(vp);
8cd0a023 353 break;
e0092341 354 }
8cd0a023 355 vput(vp);
66325755 356 }
e0092341 357 vdrop(vp);
66325755 358 }
a89aec1b 359 *vpp = vp;
66325755
MD
360 return(error);
361}
362
43c665ae
MD
363/*
364 * Locate all copies of the inode for obj_id compatible with the specified
365 * asof, reference, and issue the related call-back. This routine is used
366 * for direct-io invalidation and does not create any new inodes.
367 */
368void
369hammer_scan_inode_snapshots(hammer_mount_t hmp, hammer_inode_info_t iinfo,
370 int (*callback)(hammer_inode_t ip, void *data),
371 void *data)
372{
373 hammer_ino_rb_tree_RB_SCAN(&hmp->rb_inos_root,
374 hammer_inode_info_cmp_all_history,
375 callback, iinfo);
376}
377
66325755 378/*
8cd0a023
MD
379 * Acquire a HAMMER inode. The returned inode is not locked. These functions
380 * do not attach or detach the related vnode (use hammer_get_vnode() for
381 * that).
d113fda1
MD
382 *
383 * The flags argument is only applied for newly created inodes, and only
384 * certain flags are inherited.
b84de5af
MD
385 *
386 * Called from the frontend.
66325755
MD
387 */
388struct hammer_inode *
bcac4bbb 389hammer_get_inode(hammer_transaction_t trans, hammer_inode_t dip,
adf01747 390 int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
ddfdf542 391 int flags, int *errorp)
66325755 392{
36f82b23 393 hammer_mount_t hmp = trans->hmp;
39d8fd63 394 struct hammer_node_cache *cachep;
427e5fc6 395 struct hammer_inode_info iinfo;
8cd0a023 396 struct hammer_cursor cursor;
427e5fc6 397 struct hammer_inode *ip;
427e5fc6 398
5fa5c92f 399
427e5fc6
MD
400 /*
401 * Determine if we already have an inode cached. If we do then
402 * we are golden.
82010f9f
MD
403 *
404 * If we find an inode with no vnode we have to mark the
405 * transaction such that hammer_inode_waitreclaims() is
406 * called later on to avoid building up an infinite number
407 * of inodes. Otherwise we can continue to * add new inodes
408 * faster then they can be disposed of, even with the tsleep
409 * delay.
4c286c36
MD
410 *
411 * If we find a dummy inode we return a failure so dounlink
412 * (which does another lookup) doesn't try to mess with the
413 * link count. hammer_vop_nresolve() uses hammer_get_dummy_inode()
414 * to ref dummy inodes.
427e5fc6 415 */
66325755 416 iinfo.obj_id = obj_id;
7f7c1f84 417 iinfo.obj_asof = asof;
ddfdf542 418 iinfo.obj_localization = localization;
427e5fc6
MD
419loop:
420 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
421 if (ip) {
4c286c36
MD
422 if (ip->flags & HAMMER_INODE_DUMMY) {
423 *errorp = ENOENT;
424 return(NULL);
425 }
8cd0a023 426 hammer_ref(&ip->lock);
66325755
MD
427 *errorp = 0;
428 return(ip);
427e5fc6
MD
429 }
430
3897d7e9
MD
431 /*
432 * Allocate a new inode structure and deal with races later.
433 */
bac808fe 434 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
b3deaf57 435 ++hammer_count_inodes;
9f5097dc 436 ++hmp->count_inodes;
66325755 437 ip->obj_id = obj_id;
27ea2398 438 ip->obj_asof = iinfo.obj_asof;
ddfdf542 439 ip->obj_localization = localization;
66325755 440 ip->hmp = hmp;
d113fda1 441 ip->flags = flags & HAMMER_INODE_RO;
bcac4bbb
MD
442 ip->cache[0].ip = ip;
443 ip->cache[1].ip = ip;
39d8fd63
MD
444 ip->cache[2].ip = ip;
445 ip->cache[3].ip = ip;
5fa5c92f 446 if (hmp->ronly)
d113fda1 447 ip->flags |= HAMMER_INODE_RO;
a9d52b76
MD
448 ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
449 0x7FFFFFFFFFFFFFFFLL;
8cd0a023 450 RB_INIT(&ip->rec_tree);
1f07f686 451 TAILQ_INIT(&ip->target_list);
5fa5c92f 452 hammer_ref(&ip->lock);
427e5fc6
MD
453
454 /*
ea434b6f
MD
455 * Locate the on-disk inode. If this is a PFS root we always
456 * access the current version of the root inode and (if it is not
457 * a master) always access information under it with a snapshot
458 * TID.
39d8fd63
MD
459 *
460 * We cache recent inode lookups in this directory in dip->cache[2].
461 * If we can't find it we assume the inode we are looking for is
462 * close to the directory inode.
427e5fc6 463 */
6a37e7e4 464retry:
39d8fd63
MD
465 cachep = NULL;
466 if (dip) {
467 if (dip->cache[2].node)
468 cachep = &dip->cache[2];
469 else
470 cachep = &dip->cache[0];
471 }
472 hammer_init_cursor(trans, &cursor, cachep, NULL);
5a930e66 473 cursor.key_beg.localization = localization + HAMMER_LOCALIZE_INODE;
8cd0a023
MD
474 cursor.key_beg.obj_id = ip->obj_id;
475 cursor.key_beg.key = 0;
d5530d22 476 cursor.key_beg.create_tid = 0;
8cd0a023
MD
477 cursor.key_beg.delete_tid = 0;
478 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
479 cursor.key_beg.obj_type = 0;
ea434b6f 480
d5530d22 481 cursor.asof = iinfo.obj_asof;
11ad5ade 482 cursor.flags = HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_GET_DATA |
d5530d22 483 HAMMER_CURSOR_ASOF;
8cd0a023
MD
484
485 *errorp = hammer_btree_lookup(&cursor);
6a37e7e4
MD
486 if (*errorp == EDEADLK) {
487 hammer_done_cursor(&cursor);
488 goto retry;
489 }
427e5fc6
MD
490
491 /*
492 * On success the B-Tree lookup will hold the appropriate
493 * buffer cache buffers and provide a pointer to the requested
d113fda1
MD
494 * information. Copy the information to the in-memory inode
495 * and cache the B-Tree node to improve future operations.
427e5fc6 496 */
66325755 497 if (*errorp == 0) {
11ad5ade 498 ip->ino_leaf = cursor.node->ondisk->elms[cursor.index].leaf;
40043e7f 499 ip->ino_data = cursor.data->inode;
bcac4bbb
MD
500
501 /*
502 * cache[0] tries to cache the location of the object inode.
503 * The assumption is that it is near the directory inode.
504 *
505 * cache[1] tries to cache the location of the object data.
39d8fd63
MD
506 * We might have something in the governing directory from
507 * scan optimizations (see the strategy code in
508 * hammer_vnops.c).
509 *
510 * We update dip->cache[2], if possible, with the location
511 * of the object inode for future directory shortcuts.
bcac4bbb
MD
512 */
513 hammer_cache_node(&ip->cache[0], cursor.node);
39d8fd63
MD
514 if (dip) {
515 if (dip->cache[3].node) {
516 hammer_cache_node(&ip->cache[1],
517 dip->cache[3].node);
518 }
519 hammer_cache_node(&dip->cache[2], cursor.node);
520 }
cb51be26
MD
521
522 /*
523 * The file should not contain any data past the file size
a9d52b76 524 * stored in the inode. Setting save_trunc_off to the
cb51be26
MD
525 * file size instead of max reduces B-Tree lookup overheads
526 * on append by allowing the flusher to avoid checking for
527 * record overwrites.
528 */
a9d52b76 529 ip->save_trunc_off = ip->ino_data.size;
5fa5c92f
MD
530
531 /*
532 * Locate and assign the pseudofs management structure to
533 * the inode.
534 */
535 if (dip && dip->obj_localization == ip->obj_localization) {
536 ip->pfsm = dip->pfsm;
537 hammer_ref(&ip->pfsm->lock);
538 } else {
ea434b6f
MD
539 ip->pfsm = hammer_load_pseudofs(trans,
540 ip->obj_localization,
541 errorp);
542 *errorp = 0; /* ignore ENOENT */
5fa5c92f 543 }
427e5fc6 544 }
427e5fc6
MD
545
546 /*
cb51be26
MD
547 * The inode is placed on the red-black tree and will be synced to
548 * the media when flushed or by the filesystem sync. If this races
549 * another instantiation/lookup the insertion will fail.
427e5fc6 550 */
66325755 551 if (*errorp == 0) {
427e5fc6 552 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
5fa5c92f 553 hammer_free_inode(ip);
b3deaf57 554 hammer_done_cursor(&cursor);
427e5fc6
MD
555 goto loop;
556 }
c0ade690 557 ip->flags |= HAMMER_INODE_ONDISK;
427e5fc6 558 } else {
e63644f0
MD
559 if (ip->flags & HAMMER_INODE_RSV_INODES) {
560 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
9f5097dc 561 --hmp->rsv_inodes;
e63644f0 562 }
e63644f0 563
5fa5c92f 564 hammer_free_inode(ip);
66325755 565 ip = NULL;
427e5fc6 566 }
b3deaf57 567 hammer_done_cursor(&cursor);
e98f1b96
MD
568
569 /*
570 * NEWINODE is only set if the inode becomes dirty later,
571 * setting it here just leads to unnecessary stalls.
572 *
573 * trans->flags |= HAMMER_TRANSF_NEWINODE;
574 */
66325755
MD
575 return (ip);
576}
577
4c286c36
MD
578/*
579 * Get a dummy inode to placemark a broken directory entry.
580 */
581struct hammer_inode *
582hammer_get_dummy_inode(hammer_transaction_t trans, hammer_inode_t dip,
583 int64_t obj_id, hammer_tid_t asof, u_int32_t localization,
584 int flags, int *errorp)
585{
586 hammer_mount_t hmp = trans->hmp;
587 struct hammer_inode_info iinfo;
588 struct hammer_inode *ip;
589
590 /*
591 * Determine if we already have an inode cached. If we do then
592 * we are golden.
593 *
594 * If we find an inode with no vnode we have to mark the
595 * transaction such that hammer_inode_waitreclaims() is
596 * called later on to avoid building up an infinite number
597 * of inodes. Otherwise we can continue to * add new inodes
598 * faster then they can be disposed of, even with the tsleep
599 * delay.
600 *
601 * If we find a non-fake inode we return an error. Only fake
602 * inodes can be returned by this routine.
603 */
604 iinfo.obj_id = obj_id;
605 iinfo.obj_asof = asof;
606 iinfo.obj_localization = localization;
607loop:
608 *errorp = 0;
609 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
610 if (ip) {
611 if ((ip->flags & HAMMER_INODE_DUMMY) == 0) {
612 *errorp = ENOENT;
613 return(NULL);
614 }
615 hammer_ref(&ip->lock);
616 return(ip);
617 }
618
619 /*
620 * Allocate a new inode structure and deal with races later.
621 */
622 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
623 ++hammer_count_inodes;
624 ++hmp->count_inodes;
625 ip->obj_id = obj_id;
626 ip->obj_asof = iinfo.obj_asof;
627 ip->obj_localization = localization;
628 ip->hmp = hmp;
629 ip->flags = flags | HAMMER_INODE_RO | HAMMER_INODE_DUMMY;
630 ip->cache[0].ip = ip;
631 ip->cache[1].ip = ip;
39d8fd63
MD
632 ip->cache[2].ip = ip;
633 ip->cache[3].ip = ip;
4c286c36
MD
634 ip->sync_trunc_off = ip->trunc_off = ip->save_trunc_off =
635 0x7FFFFFFFFFFFFFFFLL;
636 RB_INIT(&ip->rec_tree);
637 TAILQ_INIT(&ip->target_list);
638 hammer_ref(&ip->lock);
639
640 /*
641 * Populate the dummy inode. Leave everything zero'd out.
642 *
643 * (ip->ino_leaf and ip->ino_data)
644 *
645 * Make the dummy inode a FIFO object which most copy programs
646 * will properly ignore.
647 */
648 ip->save_trunc_off = ip->ino_data.size;
649 ip->ino_data.obj_type = HAMMER_OBJTYPE_FIFO;
650
651 /*
652 * Locate and assign the pseudofs management structure to
653 * the inode.
654 */
655 if (dip && dip->obj_localization == ip->obj_localization) {
656 ip->pfsm = dip->pfsm;
657 hammer_ref(&ip->pfsm->lock);
658 } else {
659 ip->pfsm = hammer_load_pseudofs(trans, ip->obj_localization,
660 errorp);
661 *errorp = 0; /* ignore ENOENT */
662 }
663
664 /*
665 * The inode is placed on the red-black tree and will be synced to
666 * the media when flushed or by the filesystem sync. If this races
667 * another instantiation/lookup the insertion will fail.
668 *
669 * NOTE: Do not set HAMMER_INODE_ONDISK. The inode is a fake.
670 */
671 if (*errorp == 0) {
672 if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
673 hammer_free_inode(ip);
674 goto loop;
675 }
676 } else {
677 if (ip->flags & HAMMER_INODE_RSV_INODES) {
678 ip->flags &= ~HAMMER_INODE_RSV_INODES; /* sanity */
679 --hmp->rsv_inodes;
680 }
681 hammer_free_inode(ip);
682 ip = NULL;
683 }
684 trans->flags |= HAMMER_TRANSF_NEWINODE;
685 return (ip);
686}
687
39d8fd63
MD
688/*
689 * Return a referenced inode only if it is in our inode cache.
690 *
691 * Dummy inodes do not count.
692 */
693struct hammer_inode *
694hammer_find_inode(hammer_transaction_t trans, int64_t obj_id,
695 hammer_tid_t asof, u_int32_t localization)
696{
697 hammer_mount_t hmp = trans->hmp;
698 struct hammer_inode_info iinfo;
699 struct hammer_inode *ip;
700
701 iinfo.obj_id = obj_id;
702 iinfo.obj_asof = asof;
703 iinfo.obj_localization = localization;
5a64efa1 704
39d8fd63
MD
705 ip = hammer_ino_rb_tree_RB_LOOKUP_INFO(&hmp->rb_inos_root, &iinfo);
706 if (ip) {
707 if (ip->flags & HAMMER_INODE_DUMMY)
708 ip = NULL;
709 else
710 hammer_ref(&ip->lock);
711 }
712 return(ip);
713}
714
8cd0a023
MD
715/*
716 * Create a new filesystem object, returning the inode in *ipp. The
ea434b6f 717 * returned inode will be referenced. The inode is created in-memory.
8cd0a023 718 *
ea434b6f
MD
719 * If pfsm is non-NULL the caller wishes to create the root inode for
720 * a master PFS.
8cd0a023
MD
721 */
722int
a89aec1b 723hammer_create_inode(hammer_transaction_t trans, struct vattr *vap,
5a64efa1
MD
724 struct ucred *cred,
725 hammer_inode_t dip, const char *name, int namelen,
ea434b6f 726 hammer_pseudofs_inmem_t pfsm, struct hammer_inode **ipp)
66325755 727{
a89aec1b
MD
728 hammer_mount_t hmp;
729 hammer_inode_t ip;
6b4f890b 730 uid_t xuid;
5a930e66 731 int error;
5a64efa1
MD
732 int64_t namekey;
733 u_int32_t dummy;
66325755 734
8cd0a023 735 hmp = trans->hmp;
5a930e66 736
bac808fe 737 ip = kmalloc(sizeof(*ip), hmp->m_inodes, M_WAITOK|M_ZERO);
b3deaf57 738 ++hammer_count_inodes;
9f5097dc 739 ++hmp->count_inodes;
82010f9f 740 trans->flags |= HAMMER_TRANSF_NEWINODE;
5a930e66 741
ea434b6f
MD
742 if (pfsm) {
743 KKASSERT(pfsm->localization != 0);
5a930e66 744 ip->obj_id = HAMMER_OBJID_ROOT;
ea434b6f
MD
745 ip->obj_localization = pfsm->localization;
746 } else {
747 KKASSERT(dip != NULL);
5a64efa1
MD
748 namekey = hammer_directory_namekey(dip, name, namelen, &dummy);
749 ip->obj_id = hammer_alloc_objid(hmp, dip, namekey);
ea434b6f
MD
750 ip->obj_localization = dip->obj_localization;
751 }
5a930e66 752
8cd0a023 753 KKASSERT(ip->obj_id != 0);
7f7c1f84 754 ip->obj_asof = hmp->asof;
8cd0a023 755 ip->hmp = hmp;
b84de5af 756 ip->flush_state = HAMMER_FST_IDLE;
ddfdf542
MD
757 ip->flags = HAMMER_INODE_DDIRTY |
758 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME;
bcac4bbb
MD
759 ip->cache[0].ip = ip;
760 ip->cache[1].ip = ip;
39d8fd63
MD
761 ip->cache[2].ip = ip;
762 ip->cache[3].ip = ip;
8cd0a023 763
a5fddc16 764 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
a9d52b76 765 /* ip->save_trunc_off = 0; (already zero) */
8cd0a023 766 RB_INIT(&ip->rec_tree);
1f07f686 767 TAILQ_INIT(&ip->target_list);
8cd0a023 768
bcac4bbb 769 ip->ino_data.atime = trans->time;
11ad5ade
MD
770 ip->ino_data.mtime = trans->time;
771 ip->ino_data.size = 0;
772 ip->ino_data.nlinks = 0;
e63644f0
MD
773
774 /*
775 * A nohistory designator on the parent directory is inherited by
5a930e66
MD
776 * the child. We will do this even for pseudo-fs creation... the
777 * sysad can turn it off.
e63644f0 778 */
ea434b6f
MD
779 if (dip) {
780 ip->ino_data.uflags = dip->ino_data.uflags &
781 (SF_NOHISTORY|UF_NOHISTORY|UF_NODUMP);
782 }
e63644f0 783
11ad5ade 784 ip->ino_leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
5a930e66
MD
785 ip->ino_leaf.base.localization = ip->obj_localization +
786 HAMMER_LOCALIZE_INODE;
11ad5ade
MD
787 ip->ino_leaf.base.obj_id = ip->obj_id;
788 ip->ino_leaf.base.key = 0;
789 ip->ino_leaf.base.create_tid = 0;
790 ip->ino_leaf.base.delete_tid = 0;
791 ip->ino_leaf.base.rec_type = HAMMER_RECTYPE_INODE;
792 ip->ino_leaf.base.obj_type = hammer_get_obj_type(vap->va_type);
793
794 ip->ino_data.obj_type = ip->ino_leaf.base.obj_type;
8cd0a023
MD
795 ip->ino_data.version = HAMMER_INODE_DATA_VERSION;
796 ip->ino_data.mode = vap->va_mode;
b84de5af 797 ip->ino_data.ctime = trans->time;
5a930e66 798
5e435c92 799 /*
beec5dc4
MD
800 * If we are running version 2 or greater directory entries are
801 * inode-localized instead of data-localized.
5e435c92
MD
802 */
803 if (trans->hmp->version >= HAMMER_VOL_VERSION_TWO) {
804 if (ip->ino_leaf.base.obj_type == HAMMER_OBJTYPE_DIRECTORY) {
beec5dc4
MD
805 ip->ino_data.cap_flags |=
806 HAMMER_INODE_CAP_DIR_LOCAL_INO;
5e435c92
MD
807 }
808 }
809
5a930e66
MD
810 /*
811 * Setup the ".." pointer. This only needs to be done for directories
812 * but we do it for all objects as a recovery aid.
ea434b6f
MD
813 */
814 if (dip)
815 ip->ino_data.parent_obj_id = dip->ino_leaf.base.obj_id;
816#if 0
817 /*
5a930e66 818 * The parent_obj_localization field only applies to pseudo-fs roots.
ea434b6f
MD
819 * XXX this is no longer applicable, PFSs are no longer directly
820 * tied into the parent's directory structure.
5a930e66 821 */
5a930e66
MD
822 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DIRECTORY &&
823 ip->obj_id == HAMMER_OBJID_ROOT) {
824 ip->ino_data.ext.obj.parent_obj_localization =
825 dip->obj_localization;
826 }
ea434b6f 827#endif
6b4f890b 828
11ad5ade 829 switch(ip->ino_leaf.base.obj_type) {
7a04d74f
MD
830 case HAMMER_OBJTYPE_CDEV:
831 case HAMMER_OBJTYPE_BDEV:
832 ip->ino_data.rmajor = vap->va_rmajor;
833 ip->ino_data.rminor = vap->va_rminor;
834 break;
835 default:
836 break;
837 }
838
6b4f890b
MD
839 /*
840 * Calculate default uid/gid and overwrite with information from
841 * the vap.
842 */
ea434b6f
MD
843 if (dip) {
844 xuid = hammer_to_unix_xid(&dip->ino_data.uid);
845 xuid = vop_helper_create_uid(hmp->mp, dip->ino_data.mode,
846 xuid, cred, &vap->va_mode);
847 } else {
848 xuid = 0;
849 }
6b4f890b
MD
850 ip->ino_data.mode = vap->va_mode;
851
8cd0a023
MD
852 if (vap->va_vaflags & VA_UID_UUID_VALID)
853 ip->ino_data.uid = vap->va_uid_uuid;
6b4f890b 854 else if (vap->va_uid != (uid_t)VNOVAL)
7538695e
MD
855 hammer_guid_to_uuid(&ip->ino_data.uid, vap->va_uid);
856 else
6b4f890b 857 hammer_guid_to_uuid(&ip->ino_data.uid, xuid);
7538695e 858
8cd0a023
MD
859 if (vap->va_vaflags & VA_GID_UUID_VALID)
860 ip->ino_data.gid = vap->va_gid_uuid;
6b4f890b 861 else if (vap->va_gid != (gid_t)VNOVAL)
8cd0a023 862 hammer_guid_to_uuid(&ip->ino_data.gid, vap->va_gid);
ea434b6f 863 else if (dip)
7538695e 864 ip->ino_data.gid = dip->ino_data.gid;
8cd0a023
MD
865
866 hammer_ref(&ip->lock);
5fa5c92f 867
ea434b6f
MD
868 if (pfsm) {
869 ip->pfsm = pfsm;
870 hammer_ref(&pfsm->lock);
871 error = 0;
872 } else if (dip->obj_localization == ip->obj_localization) {
5fa5c92f
MD
873 ip->pfsm = dip->pfsm;
874 hammer_ref(&ip->pfsm->lock);
875 error = 0;
876 } else {
ea434b6f
MD
877 ip->pfsm = hammer_load_pseudofs(trans,
878 ip->obj_localization,
879 &error);
880 error = 0; /* ignore ENOENT */
5fa5c92f
MD
881 }
882
883 if (error) {
884 hammer_free_inode(ip);
885 ip = NULL;
886 } else if (RB_INSERT(hammer_ino_rb_tree, &hmp->rb_inos_root, ip)) {
973c11b9
MD
887 panic("hammer_create_inode: duplicate obj_id %llx",
888 (long long)ip->obj_id);
5fa5c92f
MD
889 /* not reached */
890 hammer_free_inode(ip);
8cd0a023
MD
891 }
892 *ipp = ip;
5fa5c92f
MD
893 return(error);
894}
895
896/*
897 * Final cleanup / freeing of an inode structure
898 */
899static void
900hammer_free_inode(hammer_inode_t ip)
901{
bac808fe
MD
902 struct hammer_mount *hmp;
903
904 hmp = ip->hmp;
250aec18 905 KKASSERT(hammer_oneref(&ip->lock));
5fa5c92f
MD
906 hammer_uncache_node(&ip->cache[0]);
907 hammer_uncache_node(&ip->cache[1]);
39d8fd63
MD
908 hammer_uncache_node(&ip->cache[2]);
909 hammer_uncache_node(&ip->cache[3]);
ccf6a64d 910 hammer_inode_wakereclaims(ip);
5fa5c92f
MD
911 if (ip->objid_cache)
912 hammer_clear_objid(ip);
913 --hammer_count_inodes;
bac808fe 914 --hmp->count_inodes;
5fa5c92f 915 if (ip->pfsm) {
bac808fe 916 hammer_rel_pseudofs(hmp, ip->pfsm);
5fa5c92f
MD
917 ip->pfsm = NULL;
918 }
bac808fe 919 kfree(ip, hmp->m_inodes);
5fa5c92f
MD
920 ip = NULL;
921}
922
923/*
ea434b6f
MD
924 * Retrieve pseudo-fs data. NULL will never be returned.
925 *
926 * If an error occurs *errorp will be set and a default template is returned,
927 * otherwise *errorp is set to 0. Typically when an error occurs it will
928 * be ENOENT.
5fa5c92f 929 */
ea434b6f
MD
930hammer_pseudofs_inmem_t
931hammer_load_pseudofs(hammer_transaction_t trans,
932 u_int32_t localization, int *errorp)
5fa5c92f
MD
933{
934 hammer_mount_t hmp = trans->hmp;
ea434b6f 935 hammer_inode_t ip;
5fa5c92f
MD
936 hammer_pseudofs_inmem_t pfsm;
937 struct hammer_cursor cursor;
5fa5c92f
MD
938 int bytes;
939
940retry:
ea434b6f 941 pfsm = RB_LOOKUP(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, localization);
5fa5c92f 942 if (pfsm) {
5fa5c92f 943 hammer_ref(&pfsm->lock);
ea434b6f
MD
944 *errorp = 0;
945 return(pfsm);
946 }
947
948 /*
949 * PFS records are stored in the root inode (not the PFS root inode,
950 * but the real root). Avoid an infinite recursion if loading
951 * the PFS for the real root.
952 */
953 if (localization) {
954 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT,
955 HAMMER_MAX_TID,
956 HAMMER_DEF_LOCALIZATION, 0, errorp);
957 } else {
958 ip = NULL;
5fa5c92f
MD
959 }
960
bac808fe 961 pfsm = kmalloc(sizeof(*pfsm), hmp->m_misc, M_WAITOK | M_ZERO);
ea434b6f 962 pfsm->localization = localization;
a56cb012
MD
963 pfsm->pfsd.unique_uuid = trans->rootvol->ondisk->vol_fsid;
964 pfsm->pfsd.shared_uuid = pfsm->pfsd.unique_uuid;
5fa5c92f 965
ea434b6f
MD
966 hammer_init_cursor(trans, &cursor, (ip ? &ip->cache[1] : NULL), ip);
967 cursor.key_beg.localization = HAMMER_DEF_LOCALIZATION +
5fa5c92f
MD
968 HAMMER_LOCALIZE_MISC;
969 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
970 cursor.key_beg.create_tid = 0;
971 cursor.key_beg.delete_tid = 0;
ea434b6f 972 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
5fa5c92f 973 cursor.key_beg.obj_type = 0;
ea434b6f 974 cursor.key_beg.key = localization;
5fa5c92f
MD
975 cursor.asof = HAMMER_MAX_TID;
976 cursor.flags |= HAMMER_CURSOR_ASOF;
977
ea434b6f
MD
978 if (ip)
979 *errorp = hammer_ip_lookup(&cursor);
980 else
981 *errorp = hammer_btree_lookup(&cursor);
982 if (*errorp == 0) {
983 *errorp = hammer_ip_resolve_data(&cursor);
984 if (*errorp == 0) {
842e7a70
MD
985 if (cursor.data->pfsd.mirror_flags &
986 HAMMER_PFSD_DELETED) {
987 *errorp = ENOENT;
988 } else {
989 bytes = cursor.leaf->data_len;
990 if (bytes > sizeof(pfsm->pfsd))
991 bytes = sizeof(pfsm->pfsd);
992 bcopy(cursor.data, &pfsm->pfsd, bytes);
993 }
5fa5c92f 994 }
5fa5c92f 995 }
5fa5c92f
MD
996 hammer_done_cursor(&cursor);
997
ea434b6f
MD
998 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
999 hammer_ref(&pfsm->lock);
1000 if (ip)
1001 hammer_rel_inode(ip, 0);
1002 if (RB_INSERT(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm)) {
bac808fe 1003 kfree(pfsm, hmp->m_misc);
ea434b6f 1004 goto retry;
5fa5c92f 1005 }
ea434b6f 1006 return(pfsm);
5fa5c92f
MD
1007}
1008
1009/*
1010 * Store pseudo-fs data. The backend will automatically delete any prior
1011 * on-disk pseudo-fs data but we have to delete in-memory versions.
1012 */
1013int
ea434b6f 1014hammer_save_pseudofs(hammer_transaction_t trans, hammer_pseudofs_inmem_t pfsm)
5fa5c92f
MD
1015{
1016 struct hammer_cursor cursor;
5fa5c92f 1017 hammer_record_t record;
ea434b6f 1018 hammer_inode_t ip;
5fa5c92f
MD
1019 int error;
1020
ea434b6f
MD
1021 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
1022 HAMMER_DEF_LOCALIZATION, 0, &error);
5fa5c92f 1023retry:
a56cb012 1024 pfsm->fsid_udev = hammer_fsid_to_udev(&pfsm->pfsd.shared_uuid);
5fa5c92f
MD
1025 hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
1026 cursor.key_beg.localization = ip->obj_localization +
1027 HAMMER_LOCALIZE_MISC;
ea434b6f 1028 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
5fa5c92f
MD
1029 cursor.key_beg.create_tid = 0;
1030 cursor.key_beg.delete_tid = 0;
ea434b6f 1031 cursor.key_beg.rec_type = HAMMER_RECTYPE_PFS;
5fa5c92f 1032 cursor.key_beg.obj_type = 0;
ea434b6f 1033 cursor.key_beg.key = pfsm->localization;
5fa5c92f
MD
1034 cursor.asof = HAMMER_MAX_TID;
1035 cursor.flags |= HAMMER_CURSOR_ASOF;
1036
3214ade6
MD
1037 /*
1038 * Replace any in-memory version of the record.
1039 */
5fa5c92f
MD
1040 error = hammer_ip_lookup(&cursor);
1041 if (error == 0 && hammer_cursor_inmem(&cursor)) {
1042 record = cursor.iprec;
1043 if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
1044 KKASSERT(cursor.deadlk_rec == NULL);
1045 hammer_ref(&record->lock);
1046 cursor.deadlk_rec = record;
1047 error = EDEADLK;
1048 } else {
1049 record->flags |= HAMMER_RECF_DELETED_FE;
1050 error = 0;
1051 }
1052 }
3214ade6
MD
1053
1054 /*
1055 * Allocate replacement general record. The backend flush will
1056 * delete any on-disk version of the record.
1057 */
5fa5c92f
MD
1058 if (error == 0 || error == ENOENT) {
1059 record = hammer_alloc_mem_record(ip, sizeof(pfsm->pfsd));
1060 record->type = HAMMER_MEM_RECORD_GENERAL;
1061
1062 record->leaf.base.localization = ip->obj_localization +
1063 HAMMER_LOCALIZE_MISC;
ea434b6f
MD
1064 record->leaf.base.rec_type = HAMMER_RECTYPE_PFS;
1065 record->leaf.base.key = pfsm->localization;
5fa5c92f
MD
1066 record->leaf.data_len = sizeof(pfsm->pfsd);
1067 bcopy(&pfsm->pfsd, record->data, sizeof(pfsm->pfsd));
1068 error = hammer_ip_add_record(trans, record);
1069 }
1070 hammer_done_cursor(&cursor);
1071 if (error == EDEADLK)
1072 goto retry;
ea434b6f
MD
1073 hammer_rel_inode(ip, 0);
1074 return(error);
1075}
1076
1077/*
1078 * Create a root directory for a PFS if one does not alredy exist.
4c038e17
MD
1079 *
1080 * The PFS root stands alone so we must also bump the nlinks count
1081 * to prevent it from being destroyed on release.
ea434b6f
MD
1082 */
1083int
1084hammer_mkroot_pseudofs(hammer_transaction_t trans, struct ucred *cred,
1085 hammer_pseudofs_inmem_t pfsm)
1086{
1087 hammer_inode_t ip;
1088 struct vattr vap;
1089 int error;
1090
1091 ip = hammer_get_inode(trans, NULL, HAMMER_OBJID_ROOT, HAMMER_MAX_TID,
1092 pfsm->localization, 0, &error);
1093 if (ip == NULL) {
1094 vattr_null(&vap);
1095 vap.va_mode = 0755;
1096 vap.va_type = VDIR;
5a64efa1
MD
1097 error = hammer_create_inode(trans, &vap, cred,
1098 NULL, NULL, 0,
1099 pfsm, &ip);
4c038e17
MD
1100 if (error == 0) {
1101 ++ip->ino_data.nlinks;
e98f1b96 1102 hammer_modify_inode(trans, ip, HAMMER_INODE_DDIRTY);
4c038e17 1103 }
5fa5c92f 1104 }
ea434b6f
MD
1105 if (ip)
1106 hammer_rel_inode(ip, 0);
5fa5c92f
MD
1107 return(error);
1108}
1109
842e7a70
MD
1110/*
1111 * Unload any vnodes & inodes associated with a PFS, return ENOTEMPTY
1112 * if we are unable to disassociate all the inodes.
1113 */
1114static
1115int
1116hammer_unload_pseudofs_callback(hammer_inode_t ip, void *data)
1117{
1118 int res;
1119
1120 hammer_ref(&ip->lock);
250aec18 1121 if (hammer_isactive(&ip->lock) == 2 && ip->vp)
842e7a70 1122 vclean_unlocked(ip->vp);
250aec18 1123 if (hammer_isactive(&ip->lock) == 1 && ip->vp == NULL)
842e7a70
MD
1124 res = 0;
1125 else
1126 res = -1; /* stop, someone is using the inode */
1127 hammer_rel_inode(ip, 0);
1128 return(res);
1129}
1130
1131int
1132hammer_unload_pseudofs(hammer_transaction_t trans, u_int32_t localization)
1133{
1134 int res;
1135 int try;
1136
1137 for (try = res = 0; try < 4; ++try) {
1138 res = hammer_ino_rb_tree_RB_SCAN(&trans->hmp->rb_inos_root,
1139 hammer_inode_pfs_cmp,
1140 hammer_unload_pseudofs_callback,
1141 &localization);
1142 if (res == 0 && try > 1)
1143 break;
1144 hammer_flusher_sync(trans->hmp);
1145 }
1146 if (res != 0)
1147 res = ENOTEMPTY;
1148 return(res);
1149}
1150
1151
ea434b6f
MD
1152/*
1153 * Release a reference on a PFS
1154 */
5fa5c92f
MD
1155void
1156hammer_rel_pseudofs(hammer_mount_t hmp, hammer_pseudofs_inmem_t pfsm)
1157{
250aec18
MD
1158 hammer_rel(&pfsm->lock);
1159 if (hammer_norefs(&pfsm->lock)) {
5fa5c92f 1160 RB_REMOVE(hammer_pfs_rb_tree, &hmp->rb_pfsm_root, pfsm);
bac808fe 1161 kfree(pfsm, hmp->m_misc);
5fa5c92f 1162 }
66325755
MD
1163}
1164
d113fda1
MD
1165/*
1166 * Called by hammer_sync_inode().
1167 */
1168static int
4e17f465 1169hammer_update_inode(hammer_cursor_t cursor, hammer_inode_t ip)
c0ade690 1170{
4e17f465 1171 hammer_transaction_t trans = cursor->trans;
c0ade690
MD
1172 hammer_record_t record;
1173 int error;
06ad81ff 1174 int redirty;
c0ade690 1175
d26d0ae9 1176retry:
c0ade690
MD
1177 error = 0;
1178
869e8f55
MD
1179 /*
1180 * If the inode has a presence on-disk then locate it and mark
1181 * it deleted, setting DELONDISK.
1182 *
1183 * The record may or may not be physically deleted, depending on
1184 * the retention policy.
1185 */
76376933
MD
1186 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) ==
1187 HAMMER_INODE_ONDISK) {
4e17f465 1188 hammer_normalize_cursor(cursor);
5a930e66
MD
1189 cursor->key_beg.localization = ip->obj_localization +
1190 HAMMER_LOCALIZE_INODE;
4e17f465
MD
1191 cursor->key_beg.obj_id = ip->obj_id;
1192 cursor->key_beg.key = 0;
1193 cursor->key_beg.create_tid = 0;
1194 cursor->key_beg.delete_tid = 0;
1195 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1196 cursor->key_beg.obj_type = 0;
1197 cursor->asof = ip->obj_asof;
1198 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
11ad5ade 1199 cursor->flags |= HAMMER_CURSOR_GET_LEAF | HAMMER_CURSOR_ASOF;
4e17f465
MD
1200 cursor->flags |= HAMMER_CURSOR_BACKEND;
1201
1202 error = hammer_btree_lookup(cursor);
e8599db1
MD
1203 if (hammer_debug_inode)
1204 kprintf("IPDEL %p %08x %d", ip, ip->flags, error);
b84de5af 1205
c0ade690 1206 if (error == 0) {
e63644f0 1207 error = hammer_ip_delete_record(cursor, ip, trans->tid);
e8599db1
MD
1208 if (hammer_debug_inode)
1209 kprintf(" error %d\n", error);
1f07f686 1210 if (error == 0) {
195c19a1 1211 ip->flags |= HAMMER_INODE_DELONDISK;
1f07f686 1212 }
e8599db1 1213 if (cursor->node)
bcac4bbb 1214 hammer_cache_node(&ip->cache[0], cursor->node);
4e17f465
MD
1215 }
1216 if (error == EDEADLK) {
1217 hammer_done_cursor(cursor);
1218 error = hammer_init_cursor(trans, cursor,
1219 &ip->cache[0], ip);
e8599db1
MD
1220 if (hammer_debug_inode)
1221 kprintf("IPDED %p %d\n", ip, error);
4e17f465
MD
1222 if (error == 0)
1223 goto retry;
c0ade690 1224 }
c0ade690
MD
1225 }
1226
1227 /*
869e8f55
MD
1228 * Ok, write out the initial record or a new record (after deleting
1229 * the old one), unless the DELETED flag is set. This routine will
1230 * clear DELONDISK if it writes out a record.
76376933 1231 *
869e8f55
MD
1232 * Update our inode statistics if this is the first application of
1233 * the inode on-disk.
c0ade690 1234 */
869e8f55
MD
1235 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED) == 0) {
1236 /*
7a61b85d
MD
1237 * Generate a record and write it to the media. We clean-up
1238 * the state before releasing so we do not have to set-up
1239 * a flush_group.
869e8f55 1240 */
11ad5ade 1241 record = hammer_alloc_mem_record(ip, 0);
930bf163 1242 record->type = HAMMER_MEM_RECORD_INODE;
1f07f686 1243 record->flush_state = HAMMER_FST_FLUSH;
11ad5ade
MD
1244 record->leaf = ip->sync_ino_leaf;
1245 record->leaf.base.create_tid = trans->tid;
1246 record->leaf.data_len = sizeof(ip->sync_ino_data);
dd94f1b1 1247 record->leaf.create_ts = trans->time32;
b84de5af 1248 record->data = (void *)&ip->sync_ino_data;
d36ec43b 1249 record->flags |= HAMMER_RECF_INTERLOCK_BE;
06ad81ff
MD
1250
1251 /*
1252 * If this flag is set we cannot sync the new file size
1253 * because we haven't finished related truncations. The
1254 * inode will be flushed in another flush group to finish
1255 * the job.
1256 */
1257 if ((ip->flags & HAMMER_INODE_WOULDBLOCK) &&
1258 ip->sync_ino_data.size != ip->ino_data.size) {
1259 redirty = 1;
1260 ip->sync_ino_data.size = ip->ino_data.size;
1261 } else {
1262 redirty = 0;
1263 }
1264
4e17f465
MD
1265 for (;;) {
1266 error = hammer_ip_sync_record_cursor(cursor, record);
e8599db1
MD
1267 if (hammer_debug_inode)
1268 kprintf("GENREC %p rec %08x %d\n",
1269 ip, record->flags, error);
4e17f465
MD
1270 if (error != EDEADLK)
1271 break;
1272 hammer_done_cursor(cursor);
1273 error = hammer_init_cursor(trans, cursor,
1274 &ip->cache[0], ip);
e8599db1
MD
1275 if (hammer_debug_inode)
1276 kprintf("GENREC reinit %d\n", error);
4e17f465
MD
1277 if (error)
1278 break;
1279 }
d36ec43b
MD
1280
1281 /*
3214ade6
MD
1282 * Note: The record was never on the inode's record tree
1283 * so just wave our hands importantly and destroy it.
d36ec43b 1284 */
3214ade6 1285 record->flags |= HAMMER_RECF_COMMITTED;
d36ec43b 1286 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
1f07f686 1287 record->flush_state = HAMMER_FST_IDLE;
3214ade6 1288 ++ip->rec_generation;
b3deaf57 1289 hammer_rel_mem_record(record);
d36ec43b 1290
869e8f55
MD
1291 /*
1292 * Finish up.
1293 */
d26d0ae9 1294 if (error == 0) {
e8599db1
MD
1295 if (hammer_debug_inode)
1296 kprintf("CLEANDELOND %p %08x\n", ip, ip->flags);
11ad5ade 1297 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
9192654c 1298 HAMMER_INODE_SDIRTY |
ddfdf542
MD
1299 HAMMER_INODE_ATIME |
1300 HAMMER_INODE_MTIME);
b84de5af 1301 ip->flags &= ~HAMMER_INODE_DELONDISK;
06ad81ff
MD
1302 if (redirty)
1303 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1f07f686
MD
1304
1305 /*
1306 * Root volume count of inodes
1307 */
98da6d8c 1308 hammer_sync_lock_sh(trans);
d26d0ae9 1309 if ((ip->flags & HAMMER_INODE_ONDISK) == 0) {
e8599db1
MD
1310 hammer_modify_volume_field(trans,
1311 trans->rootvol,
1312 vol0_stat_inodes);
0b075555 1313 ++ip->hmp->rootvol->ondisk->vol0_stat_inodes;
10a5d1ba 1314 hammer_modify_volume_done(trans->rootvol);
d26d0ae9 1315 ip->flags |= HAMMER_INODE_ONDISK;
e8599db1
MD
1316 if (hammer_debug_inode)
1317 kprintf("NOWONDISK %p\n", ip);
d26d0ae9 1318 }
98da6d8c 1319 hammer_sync_unlock(trans);
fbc6e32a 1320 }
c0ade690 1321 }
869e8f55
MD
1322
1323 /*
1324 * If the inode has been destroyed, clean out any left-over flags
1325 * that may have been set by the frontend.
1326 */
f90dde4c 1327 if (error == 0 && (ip->flags & HAMMER_INODE_DELETED)) {
11ad5ade 1328 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY |
9192654c 1329 HAMMER_INODE_SDIRTY |
ddfdf542
MD
1330 HAMMER_INODE_ATIME |
1331 HAMMER_INODE_MTIME);
f90dde4c 1332 }
c0ade690
MD
1333 return(error);
1334}
1335
a89aec1b 1336/*
ddfdf542
MD
1337 * Update only the itimes fields.
1338 *
1339 * ATIME can be updated without generating any UNDO. MTIME is updated
1340 * with UNDO so it is guaranteed to be synchronized properly in case of
1341 * a crash.
1342 *
1343 * Neither field is included in the B-Tree leaf element's CRC, which is how
1344 * we can get away with updating ATIME the way we do.
d113fda1
MD
1345 */
1346static int
4e17f465 1347hammer_update_itimes(hammer_cursor_t cursor, hammer_inode_t ip)
d113fda1 1348{
4e17f465 1349 hammer_transaction_t trans = cursor->trans;
d113fda1
MD
1350 int error;
1351
6a37e7e4 1352retry:
ddfdf542 1353 if ((ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DELONDISK)) !=
d113fda1 1354 HAMMER_INODE_ONDISK) {
ddfdf542
MD
1355 return(0);
1356 }
4e17f465 1357
ddfdf542 1358 hammer_normalize_cursor(cursor);
5a930e66
MD
1359 cursor->key_beg.localization = ip->obj_localization +
1360 HAMMER_LOCALIZE_INODE;
ddfdf542
MD
1361 cursor->key_beg.obj_id = ip->obj_id;
1362 cursor->key_beg.key = 0;
1363 cursor->key_beg.create_tid = 0;
1364 cursor->key_beg.delete_tid = 0;
1365 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE;
1366 cursor->key_beg.obj_type = 0;
1367 cursor->asof = ip->obj_asof;
1368 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1369 cursor->flags |= HAMMER_CURSOR_ASOF;
1370 cursor->flags |= HAMMER_CURSOR_GET_LEAF;
1371 cursor->flags |= HAMMER_CURSOR_GET_DATA;
1372 cursor->flags |= HAMMER_CURSOR_BACKEND;
1373
1374 error = hammer_btree_lookup(cursor);
ddfdf542
MD
1375 if (error == 0) {
1376 hammer_cache_node(&ip->cache[0], cursor->node);
1377 if (ip->sync_flags & HAMMER_INODE_MTIME) {
10a5d1ba 1378 /*
ddfdf542
MD
1379 * Updating MTIME requires an UNDO. Just cover
1380 * both atime and mtime.
10a5d1ba 1381 */
98da6d8c 1382 hammer_sync_lock_sh(trans);
bcac4bbb
MD
1383 hammer_modify_buffer(trans, cursor->data_buffer,
1384 HAMMER_ITIMES_BASE(&cursor->data->inode),
1385 HAMMER_ITIMES_BYTES);
1386 cursor->data->inode.atime = ip->sync_ino_data.atime;
1387 cursor->data->inode.mtime = ip->sync_ino_data.mtime;
1388 hammer_modify_buffer_done(cursor->data_buffer);
98da6d8c 1389 hammer_sync_unlock(trans);
ddfdf542
MD
1390 } else if (ip->sync_flags & HAMMER_INODE_ATIME) {
1391 /*
1392 * Updating atime only can be done in-place with
1393 * no UNDO.
1394 */
98da6d8c 1395 hammer_sync_lock_sh(trans);
ddfdf542
MD
1396 hammer_modify_buffer(trans, cursor->data_buffer,
1397 NULL, 0);
1398 cursor->data->inode.atime = ip->sync_ino_data.atime;
1399 hammer_modify_buffer_done(cursor->data_buffer);
98da6d8c 1400 hammer_sync_unlock(trans);
d113fda1 1401 }
ddfdf542
MD
1402 ip->sync_flags &= ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME);
1403 }
1404 if (error == EDEADLK) {
1405 hammer_done_cursor(cursor);
1406 error = hammer_init_cursor(trans, cursor,
1407 &ip->cache[0], ip);
1408 if (error == 0)
1409 goto retry;
d113fda1
MD
1410 }
1411 return(error);
1412}
1413
1414/*
1f07f686 1415 * Release a reference on an inode, flush as requested.
b84de5af
MD
1416 *
1417 * On the last reference we queue the inode to the flusher for its final
1418 * disposition.
a89aec1b 1419 */
66325755 1420void
a89aec1b 1421hammer_rel_inode(struct hammer_inode *ip, int flush)
66325755 1422{
35a49944 1423 /*hammer_mount_t hmp = ip->hmp;*/
1f07f686 1424
f90dde4c
MD
1425 /*
1426 * Handle disposition when dropping the last ref.
1427 */
1f07f686 1428 for (;;) {
250aec18 1429 if (hammer_oneref(&ip->lock)) {
1f07f686
MD
1430 /*
1431 * Determine whether on-disk action is needed for
1432 * the inode's final disposition.
1433 */
e8599db1
MD
1434 KKASSERT(ip->vp == NULL);
1435 hammer_inode_unloadable_check(ip, 0);
4e17f465 1436 if (ip->flags & HAMMER_INODE_MODMASK) {
35a49944 1437 hammer_flush_inode(ip, 0);
250aec18 1438 } else if (hammer_oneref(&ip->lock)) {
1f07f686
MD
1439 hammer_unload_inode(ip);
1440 break;
1441 }
b84de5af 1442 } else {
4e17f465 1443 if (flush)
1f07f686 1444 hammer_flush_inode(ip, 0);
4e17f465 1445
1f07f686
MD
1446 /*
1447 * The inode still has multiple refs, try to drop
1448 * one ref.
1449 */
250aec18
MD
1450 KKASSERT(hammer_isactive(&ip->lock) >= 1);
1451 if (hammer_isactive(&ip->lock) > 1) {
1452 hammer_rel(&ip->lock);
1f07f686
MD
1453 break;
1454 }
b84de5af 1455 }
f90dde4c 1456 }
427e5fc6
MD
1457}
1458
27ea2398 1459/*
b84de5af
MD
1460 * Unload and destroy the specified inode. Must be called with one remaining
1461 * reference. The reference is disposed of.
8cd0a023 1462 *
cdb6e4e6 1463 * The inode must be completely clean.
27ea2398 1464 */
b84de5af 1465static int
ec4e8497 1466hammer_unload_inode(struct hammer_inode *ip)
27ea2398 1467{
9f5097dc
MD
1468 hammer_mount_t hmp = ip->hmp;
1469
250aec18
MD
1470 KASSERT(hammer_oneref(&ip->lock),
1471 ("hammer_unload_inode: %d refs\n", hammer_isactive(&ip->lock)));
8cd0a023 1472 KKASSERT(ip->vp == NULL);
f90dde4c
MD
1473 KKASSERT(ip->flush_state == HAMMER_FST_IDLE);
1474 KKASSERT(ip->cursor_ip_refs == 0);
899eb297 1475 KKASSERT(hammer_notlocked(&ip->lock));
f90dde4c
MD
1476 KKASSERT((ip->flags & HAMMER_INODE_MODMASK) == 0);
1477
1478 KKASSERT(RB_EMPTY(&ip->rec_tree));
1f07f686 1479 KKASSERT(TAILQ_EMPTY(&ip->target_list));
f90dde4c 1480
73896937
MD
1481 if (ip->flags & HAMMER_INODE_RDIRTY) {
1482 RB_REMOVE(hammer_redo_rb_tree, &hmp->rb_redo_root, ip);
1483 ip->flags &= ~HAMMER_INODE_RDIRTY;
1484 }
9f5097dc 1485 RB_REMOVE(hammer_ino_rb_tree, &hmp->rb_inos_root, ip);
f90dde4c 1486
5fa5c92f 1487 hammer_free_inode(ip);
27ea2398
MD
1488 return(0);
1489}
1490
cdb6e4e6
MD
1491/*
1492 * Called during unmounting if a critical error occured. The in-memory
1493 * inode and all related structures are destroyed.
1494 *
1495 * If a critical error did not occur the unmount code calls the standard
1496 * release and asserts that the inode is gone.
1497 */
1498int
1499hammer_destroy_inode_callback(struct hammer_inode *ip, void *data __unused)
1500{
1501 hammer_record_t rec;
1502
1503 /*
1504 * Get rid of the inodes in-memory records, regardless of their
1505 * state, and clear the mod-mask.
1506 */
1507 while ((rec = TAILQ_FIRST(&ip->target_list)) != NULL) {
1508 TAILQ_REMOVE(&ip->target_list, rec, target_entry);
1509 rec->target_ip = NULL;
1510 if (rec->flush_state == HAMMER_FST_SETUP)
1511 rec->flush_state = HAMMER_FST_IDLE;
1512 }
1513 while ((rec = RB_ROOT(&ip->rec_tree)) != NULL) {
1514 if (rec->flush_state == HAMMER_FST_FLUSH)
1515 --rec->flush_group->refs;
1516 else
1517 hammer_ref(&rec->lock);
250aec18 1518 KKASSERT(hammer_oneref(&rec->lock));
cdb6e4e6
MD
1519 rec->flush_state = HAMMER_FST_IDLE;
1520 rec->flush_group = NULL;
3214ade6
MD
1521 rec->flags |= HAMMER_RECF_DELETED_FE; /* wave hands */
1522 rec->flags |= HAMMER_RECF_DELETED_BE; /* wave hands */
1523 ++ip->rec_generation;
cdb6e4e6
MD
1524 hammer_rel_mem_record(rec);
1525 }
1526 ip->flags &= ~HAMMER_INODE_MODMASK;
1527 ip->sync_flags &= ~HAMMER_INODE_MODMASK;
1528 KKASSERT(ip->vp == NULL);
1529
1530 /*
1531 * Remove the inode from any flush group, force it idle. FLUSH
1532 * and SETUP states have an inode ref.
1533 */
1534 switch(ip->flush_state) {
1535 case HAMMER_FST_FLUSH:
ff003b11 1536 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip);
cdb6e4e6
MD
1537 --ip->flush_group->refs;
1538 ip->flush_group = NULL;
1539 /* fall through */
1540 case HAMMER_FST_SETUP:
250aec18 1541 hammer_rel(&ip->lock);
cdb6e4e6
MD
1542 ip->flush_state = HAMMER_FST_IDLE;
1543 /* fall through */
1544 case HAMMER_FST_IDLE:
1545 break;
1546 }
1547
1548 /*
1549 * There shouldn't be any associated vnode. The unload needs at
1550 * least one ref, if we do have a vp steal its ip ref.
1551 */
1552 if (ip->vp) {
1553 kprintf("hammer_destroy_inode_callback: Unexpected "
1554 "vnode association ip %p vp %p\n", ip, ip->vp);
1555 ip->vp->v_data = NULL;
1556 ip->vp = NULL;
1557 } else {
1558 hammer_ref(&ip->lock);
1559 }
1560 hammer_unload_inode(ip);
1561 return(0);
1562}
1563
51c35492
MD
1564/*
1565 * Called on mount -u when switching from RW to RO or vise-versa. Adjust
1566 * the read-only flag for cached inodes.
1567 *
1568 * This routine is called from a RB_SCAN().
1569 */
1570int
1571hammer_reload_inode(hammer_inode_t ip, void *arg __unused)
1572{
1573 hammer_mount_t hmp = ip->hmp;
1574
1575 if (hmp->ronly || hmp->asof != HAMMER_MAX_TID)
1576 ip->flags |= HAMMER_INODE_RO;
1577 else
1578 ip->flags &= ~HAMMER_INODE_RO;
1579 return(0);
1580}
1581
427e5fc6 1582/*
d113fda1
MD
1583 * A transaction has modified an inode, requiring updates as specified by
1584 * the passed flags.
7f7c1f84 1585 *
9192654c
MD
1586 * HAMMER_INODE_DDIRTY: Inode data has been updated, not incl mtime/atime,
1587 * and not including size changes due to write-append
1588 * (but other size changes are included).
1589 * HAMMER_INODE_SDIRTY: Inode data has been updated, size changes due to
1590 * write-append.
1f07f686 1591 * HAMMER_INODE_XDIRTY: Dirty in-memory records
4e17f465 1592 * HAMMER_INODE_BUFS: Dirty buffer cache buffers
d113fda1 1593 * HAMMER_INODE_DELETED: Inode record/data must be deleted
ddfdf542 1594 * HAMMER_INODE_ATIME/MTIME: mtime/atime has been updated
427e5fc6 1595 */
66325755 1596void
e98f1b96 1597hammer_modify_inode(hammer_transaction_t trans, hammer_inode_t ip, int flags)
427e5fc6 1598{
cdb6e4e6
MD
1599 /*
1600 * ronly of 0 or 2 does not trigger assertion.
1601 * 2 is a special error state
1602 */
1603 KKASSERT(ip->hmp->ronly != 1 ||
ddfdf542 1604 (flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
9192654c 1605 HAMMER_INODE_SDIRTY |
ddfdf542
MD
1606 HAMMER_INODE_BUFS | HAMMER_INODE_DELETED |
1607 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) == 0);
e63644f0
MD
1608 if ((ip->flags & HAMMER_INODE_RSV_INODES) == 0) {
1609 ip->flags |= HAMMER_INODE_RSV_INODES;
1610 ++ip->hmp->rsv_inodes;
1611 }
b84de5af 1612
e98f1b96
MD
1613 /*
1614 * Set the NEWINODE flag in the transaction if the inode
1615 * transitions to a dirty state. This is used to track
1616 * the load on the inode cache.
1617 */
1618 if (trans &&
1619 (ip->flags & HAMMER_INODE_MODMASK) == 0 &&
1620 (flags & HAMMER_INODE_MODMASK)) {
1621 trans->flags |= HAMMER_TRANSF_NEWINODE;
1622 }
1623
b84de5af
MD
1624 ip->flags |= flags;
1625}
1626
1627/*
1f07f686 1628 * Request that an inode be flushed. This whole mess cannot block and may
7bc5b8c2
MD
1629 * recurse (if not synchronous). Once requested HAMMER will attempt to
1630 * actively flush the inode until the flush can be done.
b84de5af 1631 *
1f07f686
MD
1632 * The inode may already be flushing, or may be in a setup state. We can
1633 * place the inode in a flushing state if it is currently idle and flag it
1634 * to reflush if it is currently flushing.
7bc5b8c2 1635 *
4889cbd4
MD
1636 * Upon return if the inode could not be flushed due to a setup
1637 * dependancy, then it will be automatically flushed when the dependancy
1638 * is satisfied.
b84de5af
MD
1639 */
1640void
f90dde4c 1641hammer_flush_inode(hammer_inode_t ip, int flags)
b84de5af 1642{
7a61b85d
MD
1643 hammer_mount_t hmp;
1644 hammer_flush_group_t flg;
bf3b416b 1645 int good;
1f07f686
MD
1646
1647 /*
7b6ccb11
MD
1648 * next_flush_group is the first flush group we can place the inode
1649 * in. It may be NULL. If it becomes full we append a new flush
1650 * group and make that the next_flush_group.
7a61b85d
MD
1651 */
1652 hmp = ip->hmp;
7b6ccb11
MD
1653 while ((flg = hmp->next_flush_group) != NULL) {
1654 KKASSERT(flg->running == 0);
1655 if (flg->total_count + flg->refs <= ip->hmp->undo_rec_limit)
1656 break;
1657 hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
1658 hammer_flusher_async(ip->hmp, flg);
7a61b85d
MD
1659 }
1660 if (flg == NULL) {
bac808fe 1661 flg = kmalloc(sizeof(*flg), hmp->m_misc, M_WAITOK|M_ZERO);
7b6ccb11 1662 hmp->next_flush_group = flg;
ff003b11 1663 RB_INIT(&flg->flush_tree);
7a61b85d
MD
1664 TAILQ_INSERT_TAIL(&hmp->flush_group_list, flg, flush_entry);
1665 }
1666
1667 /*
1668 * Trivial 'nothing to flush' case. If the inode is in a SETUP
1f07f686
MD
1669 * state we have to put it back into an IDLE state so we can
1670 * drop the extra ref.
7a61b85d
MD
1671 *
1672 * If we have a parent dependancy we must still fall through
1673 * so we can run it.
1f07f686 1674 */
4e17f465 1675 if ((ip->flags & HAMMER_INODE_MODMASK) == 0) {
7a61b85d
MD
1676 if (ip->flush_state == HAMMER_FST_SETUP &&
1677 TAILQ_EMPTY(&ip->target_list)) {
1f07f686
MD
1678 ip->flush_state = HAMMER_FST_IDLE;
1679 hammer_rel_inode(ip, 0);
ec4e8497 1680 }
7a61b85d
MD
1681 if (ip->flush_state == HAMMER_FST_IDLE)
1682 return;
b84de5af 1683 }
42c7d26b 1684
1f07f686
MD
1685 /*
1686 * Our flush action will depend on the current state.
1687 */
1688 switch(ip->flush_state) {
1689 case HAMMER_FST_IDLE:
1690 /*
1691 * We have no dependancies and can flush immediately. Some
1692 * our children may not be flushable so we have to re-test
1693 * with that additional knowledge.
1694 */
7a61b85d 1695 hammer_flush_inode_core(ip, flg, flags);
1f07f686
MD
1696 break;
1697 case HAMMER_FST_SETUP:
1698 /*
1699 * Recurse upwards through dependancies via target_list
1700 * and start their flusher actions going if possible.
1701 *
1702 * 'good' is our connectivity. -1 means we have none and
1703 * can't flush, 0 means there weren't any dependancies, and
1704 * 1 means we have good connectivity.
1705 */
cc0758d0 1706 good = hammer_setup_parent_inodes(ip, 0, flg);
1f07f686 1707
1f07f686 1708 if (good >= 0) {
7b6ccb11
MD
1709 /*
1710 * We can continue if good >= 0. Determine how
1711 * many records under our inode can be flushed (and
1712 * mark them).
1713 */
7a61b85d 1714 hammer_flush_inode_core(ip, flg, flags);
1f07f686 1715 } else {
7b6ccb11 1716 /*
4889cbd4 1717 * Parent has no connectivity, tell it to flush
7b6ccb11 1718 * us as soon as it does.
4889cbd4
MD
1719 *
1720 * The REFLUSH flag is also needed to trigger
1721 * dependancy wakeups.
7b6ccb11 1722 */
4889cbd4
MD
1723 ip->flags |= HAMMER_INODE_CONN_DOWN |
1724 HAMMER_INODE_REFLUSH;
4e17f465
MD
1725 if (flags & HAMMER_FLUSH_SIGNAL) {
1726 ip->flags |= HAMMER_INODE_RESIGNAL;
7a61b85d 1727 hammer_flusher_async(ip->hmp, flg);
4e17f465 1728 }
1f07f686
MD
1729 }
1730 break;
7b6ccb11 1731 case HAMMER_FST_FLUSH:
1f07f686
MD
1732 /*
1733 * We are already flushing, flag the inode to reflush
1734 * if needed after it completes its current flush.
4889cbd4
MD
1735 *
1736 * The REFLUSH flag is also needed to trigger
1737 * dependancy wakeups.
1f07f686
MD
1738 */
1739 if ((ip->flags & HAMMER_INODE_REFLUSH) == 0)
1740 ip->flags |= HAMMER_INODE_REFLUSH;
4e17f465
MD
1741 if (flags & HAMMER_FLUSH_SIGNAL) {
1742 ip->flags |= HAMMER_INODE_RESIGNAL;
7a61b85d 1743 hammer_flusher_async(ip->hmp, flg);
4e17f465 1744 }
1f07f686
MD
1745 break;
1746 }
1747}
1748
1749/*
bf3b416b
MD
1750 * Scan ip->target_list, which is a list of records owned by PARENTS to our
1751 * ip which reference our ip.
1752 *
1753 * XXX This is a huge mess of recursive code, but not one bit of it blocks
1754 * so for now do not ref/deref the structures. Note that if we use the
1755 * ref/rel code later, the rel CAN block.
1756 */
1757static int
cc0758d0
MD
1758hammer_setup_parent_inodes(hammer_inode_t ip, int depth,
1759 hammer_flush_group_t flg)
bf3b416b
MD
1760{
1761 hammer_record_t depend;
bf3b416b
MD
1762 int good;
1763 int r;
1764
cc0758d0
MD
1765 /*
1766 * If we hit our recursion limit and we have parent dependencies
1767 * We cannot continue. Returning < 0 will cause us to be flagged
1768 * for reflush. Returning -2 cuts off additional dependency checks
1769 * because they are likely to also hit the depth limit.
1770 *
1771 * We cannot return < 0 if there are no dependencies or there might
1772 * not be anything to wakeup (ip).
1773 */
1774 if (depth == 20 && TAILQ_FIRST(&ip->target_list)) {
1775 kprintf("HAMMER Warning: depth limit reached on "
1776 "setup recursion, inode %p %016llx\n",
1777 ip, (long long)ip->obj_id);
1778 return(-2);
1779 }
1780
1781 /*
1782 * Scan dependencies
1783 */
bf3b416b
MD
1784 good = 0;
1785 TAILQ_FOREACH(depend, &ip->target_list, target_entry) {
cc0758d0 1786 r = hammer_setup_parent_inodes_helper(depend, depth, flg);
bf3b416b 1787 KKASSERT(depend->target_ip == ip);
bf3b416b
MD
1788 if (r < 0 && good == 0)
1789 good = -1;
1790 if (r > 0)
1791 good = 1;
cc0758d0
MD
1792
1793 /*
1794 * If we failed due to the recursion depth limit then stop
1795 * now.
1796 */
1797 if (r == -2)
1798 break;
bf3b416b
MD
1799 }
1800 return(good);
bf3b416b
MD
1801}
1802
1803/*
1804 * This helper function takes a record representing the dependancy between
1805 * the parent inode and child inode.
1806 *
1807 * record->ip = parent inode
1808 * record->target_ip = child inode
1809 *
1f07f686 1810 * We are asked to recurse upwards and convert the record from SETUP
bf3b416b 1811 * to FLUSH if possible.
1f07f686
MD
1812 *
1813 * Return 1 if the record gives us connectivity
1814 *
1815 * Return 0 if the record is not relevant
1816 *
1817 * Return -1 if we can't resolve the dependancy and there is no connectivity.
1818 */
1819static int
cc0758d0 1820hammer_setup_parent_inodes_helper(hammer_record_t record, int depth,
7a61b85d 1821 hammer_flush_group_t flg)
1f07f686 1822{
bf3b416b
MD
1823 hammer_mount_t hmp;
1824 hammer_inode_t pip;
1825 int good;
1f07f686
MD
1826
1827 KKASSERT(record->flush_state != HAMMER_FST_IDLE);
bf3b416b
MD
1828 pip = record->ip;
1829 hmp = pip->hmp;
1f07f686
MD
1830
1831 /*
1832 * If the record is already flushing, is it in our flush group?
1833 *
e8599db1
MD
1834 * If it is in our flush group but it is a general record or a
1835 * delete-on-disk, it does not improve our connectivity (return 0),
1836 * and if the target inode is not trying to destroy itself we can't
1837 * allow the operation yet anyway (the second return -1).
1f07f686
MD
1838 */
1839 if (record->flush_state == HAMMER_FST_FLUSH) {
7b6ccb11
MD
1840 /*
1841 * If not in our flush group ask the parent to reflush
1842 * us as soon as possible.
1843 */
7a61b85d 1844 if (record->flush_group != flg) {
bf3b416b 1845 pip->flags |= HAMMER_INODE_REFLUSH;
7b6ccb11 1846 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1f07f686 1847 return(-1);
f90dde4c 1848 }
7b6ccb11
MD
1849
1850 /*
1851 * If in our flush group everything is already set up,
1852 * just return whether the record will improve our
1853 * visibility or not.
1854 */
1f07f686
MD
1855 if (record->type == HAMMER_MEM_RECORD_ADD)
1856 return(1);
1857 return(0);
1858 }
1859
1860 /*
1861 * It must be a setup record. Try to resolve the setup dependancies
1862 * by recursing upwards so we can place ip on the flush list.
cc0758d0
MD
1863 *
1864 * Limit ourselves to 20 levels of recursion to avoid blowing out
1865 * the kernel stack. If we hit the recursion limit we can't flush
1866 * until the parent flushes. The parent will flush independantly
1867 * on its own and ultimately a deep recursion will be resolved.
1f07f686
MD
1868 */
1869 KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1870
cc0758d0 1871 good = hammer_setup_parent_inodes(pip, depth + 1, flg);
1f07f686
MD
1872
1873 /*
7b6ccb11
MD
1874 * If good < 0 the parent has no connectivity and we cannot safely
1875 * flush the directory entry, which also means we can't flush our
cc0758d0
MD
1876 * ip. Flag us for downward recursion once the parent's
1877 * connectivity is resolved. Flag the parent for [re]flush or it
1878 * may not check for downward recursions.
1f07f686
MD
1879 */
1880 if (good < 0) {
cc0758d0 1881 pip->flags |= HAMMER_INODE_REFLUSH;
7b6ccb11 1882 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1f07f686
MD
1883 return(good);
1884 }
1885
1886 /*
1887 * We are go, place the parent inode in a flushing state so we can
1888 * place its record in a flushing state. Note that the parent
1889 * may already be flushing. The record must be in the same flush
1890 * group as the parent.
1891 */
bf3b416b 1892 if (pip->flush_state != HAMMER_FST_FLUSH)
7a61b85d 1893 hammer_flush_inode_core(pip, flg, HAMMER_FLUSH_RECURSION);
bf3b416b 1894 KKASSERT(pip->flush_state == HAMMER_FST_FLUSH);
1f07f686
MD
1895 KKASSERT(record->flush_state == HAMMER_FST_SETUP);
1896
1897#if 0
1898 if (record->type == HAMMER_MEM_RECORD_DEL &&
869e8f55 1899 (record->target_ip->flags & (HAMMER_INODE_DELETED|HAMMER_INODE_DELONDISK)) == 0) {
1f07f686
MD
1900 /*
1901 * Regardless of flushing state we cannot sync this path if the
1902 * record represents a delete-on-disk but the target inode
1903 * is not ready to sync its own deletion.
1904 *
1905 * XXX need to count effective nlinks to determine whether
1906 * the flush is ok, otherwise removing a hardlink will
1907 * just leave the DEL record to rot.
1908 */
1909 record->target_ip->flags |= HAMMER_INODE_REFLUSH;
1910 return(-1);
1911 } else
1912#endif
7a61b85d 1913 if (pip->flush_group == flg) {
1f07f686 1914 /*
d7e278bb
MD
1915 * Because we have not calculated nlinks yet we can just
1916 * set records to the flush state if the parent is in
1917 * the same flush group as we are.
1f07f686 1918 */
7b6ccb11
MD
1919 record->flush_state = HAMMER_FST_FLUSH;
1920 record->flush_group = flg;
1921 ++record->flush_group->refs;
1922 hammer_ref(&record->lock);
1f07f686
MD
1923
1924 /*
7b6ccb11
MD
1925 * A general directory-add contributes to our visibility.
1926 *
1927 * Otherwise it is probably a directory-delete or
1928 * delete-on-disk record and does not contribute to our
1929 * visbility (but we can still flush it).
1f07f686 1930 */
7b6ccb11
MD
1931 if (record->type == HAMMER_MEM_RECORD_ADD)
1932 return(1);
1f07f686
MD
1933 return(0);
1934 } else {
1935 /*
7b6ccb11
MD
1936 * If the parent is not in our flush group we cannot
1937 * flush this record yet, there is no visibility.
1938 * We tell the parent to reflush and mark ourselves
1939 * so the parent knows it should flush us too.
1f07f686 1940 */
bf3b416b 1941 pip->flags |= HAMMER_INODE_REFLUSH;
7b6ccb11 1942 record->target_ip->flags |= HAMMER_INODE_CONN_DOWN;
1f07f686 1943 return(-1);
7f7c1f84 1944 }
c0ade690
MD
1945}
1946
1947/*
1f07f686 1948 * This is the core routine placing an inode into the FST_FLUSH state.
c0ade690 1949 */
b84de5af 1950static void
7a61b85d 1951hammer_flush_inode_core(hammer_inode_t ip, hammer_flush_group_t flg, int flags)
b84de5af 1952{
1f07f686 1953 int go_count;
1f07f686 1954
4e17f465
MD
1955 /*
1956 * Set flush state and prevent the flusher from cycling into
1957 * the next flush group. Do not place the ip on the list yet.
1958 * Inodes not in the idle state get an extra reference.
1959 */
1f07f686
MD
1960 KKASSERT(ip->flush_state != HAMMER_FST_FLUSH);
1961 if (ip->flush_state == HAMMER_FST_IDLE)
1962 hammer_ref(&ip->lock);
1963 ip->flush_state = HAMMER_FST_FLUSH;
7a61b85d 1964 ip->flush_group = flg;
da2da375 1965 ++ip->hmp->flusher.group_lock;
af209b0f
MD
1966 ++ip->hmp->count_iqueued;
1967 ++hammer_count_iqueued;
7a61b85d 1968 ++flg->total_count;
73896937 1969 hammer_redo_fifo_start_flush(ip);
b84de5af 1970
21fde338
MD
1971 /*
1972 * If the flush group reaches the autoflush limit we want to signal
1973 * the flusher. This is particularly important for remove()s.
fa2b9a03
MD
1974 *
1975 * If the default hammer_limit_reclaim is changed via sysctl
1976 * make sure we don't hit a degenerate case where we don't start
1977 * a flush but blocked on further inode ops.
21fde338 1978 */
fa2b9a03
MD
1979 if (flg->total_count == hammer_autoflush ||
1980 flg->total_count >= hammer_limit_reclaim / 4)
21fde338
MD
1981 flags |= HAMMER_FLUSH_SIGNAL;
1982
e0092341 1983#if 0
e8599db1
MD
1984 /*
1985 * We need to be able to vfsync/truncate from the backend.
e0092341
MD
1986 *
1987 * XXX Any truncation from the backend will acquire the vnode
1988 * independently.
e8599db1
MD
1989 */
1990 KKASSERT((ip->flags & HAMMER_INODE_VHELD) == 0);
1991 if (ip->vp && (ip->vp->v_flag & VINACTIVE) == 0) {
1992 ip->flags |= HAMMER_INODE_VHELD;
1993 vref(ip->vp);
1994 }
e0092341 1995#endif
e8599db1 1996
ec4e8497 1997 /*
1f07f686
MD
1998 * Figure out how many in-memory records we can actually flush
1999 * (not including inode meta-data, buffers, etc).
ec4e8497 2000 */
d7e278bb 2001 KKASSERT((ip->flags & HAMMER_INODE_WOULDBLOCK) == 0);
1f07f686 2002 if (flags & HAMMER_FLUSH_RECURSION) {
7a61b85d
MD
2003 /*
2004 * If this is a upwards recursion we do not want to
2005 * recurse down again!
2006 */
1f07f686 2007 go_count = 1;
d7e278bb 2008#if 0
312de84d 2009 } else if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
7a61b85d
MD
2010 /*
2011 * No new records are added if we must complete a flush
2012 * from a previous cycle, but we do have to move the records
2013 * from the previous cycle to the current one.
2014 */
2015#if 0
525aad3a
MD
2016 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2017 hammer_syncgrp_child_callback, NULL);
7a61b85d 2018#endif
312de84d 2019 go_count = 1;
d7e278bb 2020#endif
1f07f686 2021 } else {
7a61b85d
MD
2022 /*
2023 * Normal flush, scan records and bring them into the flush.
2024 * Directory adds and deletes are usually skipped (they are
2025 * grouped with the related inode rather then with the
2026 * directory).
2027 *
2028 * go_count can be negative, which means the scan aborted
2029 * due to the flush group being over-full and we should
2030 * flush what we have.
2031 */
1f07f686
MD
2032 go_count = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
2033 hammer_setup_child_callback, NULL);
2034 }
b84de5af
MD
2035
2036 /*
1f07f686
MD
2037 * This is a more involved test that includes go_count. If we
2038 * can't flush, flag the inode and return. If go_count is 0 we
2039 * were are unable to flush any records in our rec_tree and
2040 * must ignore the XDIRTY flag.
b84de5af 2041 */
1f07f686
MD
2042 if (go_count == 0) {
2043 if ((ip->flags & HAMMER_INODE_MODMASK_NOXDIRTY) == 0) {
af209b0f
MD
2044 --ip->hmp->count_iqueued;
2045 --hammer_count_iqueued;
2046
4889cbd4 2047 --flg->total_count;
1f07f686 2048 ip->flush_state = HAMMER_FST_SETUP;
7a61b85d 2049 ip->flush_group = NULL;
e0092341 2050#if 0
e8599db1
MD
2051 if (ip->flags & HAMMER_INODE_VHELD) {
2052 ip->flags &= ~HAMMER_INODE_VHELD;
2053 vrele(ip->vp);
2054 }
e0092341 2055#endif
4889cbd4
MD
2056
2057 /*
2058 * REFLUSH is needed to trigger dependancy wakeups
2059 * when an inode is in SETUP.
2060 */
2061 ip->flags |= HAMMER_INODE_REFLUSH;
4e17f465
MD
2062 if (flags & HAMMER_FLUSH_SIGNAL) {
2063 ip->flags |= HAMMER_INODE_RESIGNAL;
7a61b85d 2064 hammer_flusher_async(ip->hmp, flg);
4e17f465 2065 }
da2da375
MD
2066 if (--ip->hmp->flusher.group_lock == 0)
2067 wakeup(&ip->hmp->flusher.group_lock);
1f07f686
MD
2068 return;
2069 }
2070 }
b84de5af 2071
b84de5af
MD
2072 /*
2073 * Snapshot the state of the inode for the backend flusher.
2074 *
a9d52b76 2075 * We continue to retain save_trunc_off even when all truncations
cb51be26
MD
2076 * have been resolved as an optimization to determine if we can
2077 * skip the B-Tree lookup for overwrite deletions.
2078 *
1f07f686
MD
2079 * NOTE: The DELETING flag is a mod flag, but it is also sticky,
2080 * and stays in ip->flags. Once set, it stays set until the
2081 * inode is destroyed.
b84de5af 2082 */
d7e278bb 2083 if (ip->flags & HAMMER_INODE_TRUNCATED) {
312de84d
MD
2084 KKASSERT((ip->sync_flags & HAMMER_INODE_TRUNCATED) == 0);
2085 ip->sync_trunc_off = ip->trunc_off;
2086 ip->trunc_off = 0x7FFFFFFFFFFFFFFFLL;
2087 ip->flags &= ~HAMMER_INODE_TRUNCATED;
2088 ip->sync_flags |= HAMMER_INODE_TRUNCATED;
a9d52b76
MD
2089
2090 /*
2091 * The save_trunc_off used to cache whether the B-Tree
2092 * holds any records past that point is not used until
2093 * after the truncation has succeeded, so we can safely
2094 * set it now.
2095 */
2096 if (ip->save_trunc_off > ip->sync_trunc_off)
2097 ip->save_trunc_off = ip->sync_trunc_off;
2098 }
312de84d
MD
2099 ip->sync_flags |= (ip->flags & HAMMER_INODE_MODMASK &
2100 ~HAMMER_INODE_TRUNCATED);
11ad5ade 2101 ip->sync_ino_leaf = ip->ino_leaf;
b84de5af 2102 ip->sync_ino_data = ip->ino_data;
312de84d 2103 ip->flags &= ~HAMMER_INODE_MODMASK | HAMMER_INODE_TRUNCATED;
0832c9bb
MD
2104#ifdef DEBUG_TRUNCATE
2105 if ((ip->sync_flags & HAMMER_INODE_TRUNCATED) && ip == HammerTruncIp)
2106 kprintf("truncateS %016llx\n", ip->sync_trunc_off);
2107#endif
b84de5af
MD
2108
2109 /*
4e17f465 2110 * The flusher list inherits our inode and reference.
b84de5af 2111 */
7a61b85d 2112 KKASSERT(flg->running == 0);
ff003b11 2113 RB_INSERT(hammer_fls_rb_tree, &flg->flush_tree, ip);
da2da375
MD
2114 if (--ip->hmp->flusher.group_lock == 0)
2115 wakeup(&ip->hmp->flusher.group_lock);
1f07f686 2116
0832c9bb 2117 if (flags & HAMMER_FLUSH_SIGNAL) {
7a61b85d 2118 hammer_flusher_async(ip->hmp, flg);
0832c9bb 2119 }
b84de5af
MD
2120}
2121
ec4e8497 2122/*
1f07f686
MD
2123 * Callback for scan of ip->rec_tree. Try to include each record in our
2124 * flush. ip->flush_group has been set but the inode has not yet been
2125 * moved into a flushing state.
2126 *
2127 * If we get stuck on a record we have to set HAMMER_INODE_REFLUSH on
2128 * both inodes.
2129 *
2130 * We return 1 for any record placed or found in FST_FLUSH, which prevents
2131 * the caller from shortcutting the flush.
ec4e8497 2132 */
c0ade690 2133static int
1f07f686 2134hammer_setup_child_callback(hammer_record_t rec, void *data)
b84de5af 2135{
7a61b85d 2136 hammer_flush_group_t flg;
1f07f686
MD
2137 hammer_inode_t target_ip;
2138 hammer_inode_t ip;
2139 int r;
2140
2141 /*
3214ade6
MD
2142 * Records deleted or committed by the backend are ignored.
2143 * Note that the flush detects deleted frontend records at
2144 * multiple points to deal with races. This is just the first
2145 * line of defense. The only time HAMMER_RECF_DELETED_FE cannot
2146 * be set is when HAMMER_RECF_INTERLOCK_BE is set, because it
2147 * messes up link-count calculations.
7bc5b8c2 2148 *
3214ade6
MD
2149 * NOTE: Don't get confused between record deletion and, say,
2150 * directory entry deletion. The deletion of a directory entry
2151 * which is on-media has nothing to do with the record deletion
2152 * flags.
1f07f686 2153 */
3214ade6
MD
2154 if (rec->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
2155 HAMMER_RECF_COMMITTED)) {
ecca949a 2156 if (rec->flush_state == HAMMER_FST_FLUSH) {
7a61b85d 2157 KKASSERT(rec->flush_group == rec->ip->flush_group);
ecca949a
MD
2158 r = 1;
2159 } else {
2160 r = 0;
2161 }
2162 return(r);
2163 }
1f07f686
MD
2164
2165 /*
2166 * If the record is in an idle state it has no dependancies and
2167 * can be flushed.
2168 */
2169 ip = rec->ip;
7a61b85d 2170 flg = ip->flush_group;
1f07f686
MD
2171 r = 0;
2172
2173 switch(rec->flush_state) {
2174 case HAMMER_FST_IDLE:
2175 /*
7a61b85d 2176 * The record has no setup dependancy, we can flush it.
1f07f686
MD
2177 */
2178 KKASSERT(rec->target_ip == NULL);
2179 rec->flush_state = HAMMER_FST_FLUSH;
7a61b85d
MD
2180 rec->flush_group = flg;
2181 ++flg->refs;
b84de5af 2182 hammer_ref(&rec->lock);
1f07f686
MD
2183 r = 1;
2184 break;
2185 case HAMMER_FST_SETUP:
2186 /*
7a61b85d
MD
2187 * The record has a setup dependancy. These are typically
2188 * directory entry adds and deletes. Such entries will be
7b6ccb11
MD
2189 * flushed when their inodes are flushed so we do not
2190 * usually have to add them to the flush here. However,
2191 * if the target_ip has set HAMMER_INODE_CONN_DOWN then
2192 * it is asking us to flush this record (and it).
1f07f686
MD
2193 */
2194 target_ip = rec->target_ip;
2195 KKASSERT(target_ip != NULL);
2196 KKASSERT(target_ip->flush_state != HAMMER_FST_IDLE);
7a61b85d
MD
2197
2198 /*
2199 * If the target IP is already flushing in our group
5c8d05e2
MD
2200 * we could associate the record, but target_ip has
2201 * already synced ino_data to sync_ino_data and we
2202 * would also have to adjust nlinks. Plus there are
2203 * ordering issues for adds and deletes.
2204 *
2205 * Reflush downward if this is an ADD, and upward if
2206 * this is a DEL.
7a61b85d 2207 */
1f07f686 2208 if (target_ip->flush_state == HAMMER_FST_FLUSH) {
5c8d05e2
MD
2209 if (rec->flush_state == HAMMER_MEM_RECORD_ADD)
2210 ip->flags |= HAMMER_INODE_REFLUSH;
2211 else
1f07f686 2212 target_ip->flags |= HAMMER_INODE_REFLUSH;
7a61b85d
MD
2213 break;
2214 }
2215
2216 /*
2217 * Target IP is not yet flushing. This can get complex
2218 * because we have to be careful about the recursion.
7b6ccb11
MD
2219 *
2220 * Directories create an issue for us in that if a flush
2221 * of a directory is requested the expectation is to flush
2222 * any pending directory entries, but this will cause the
2223 * related inodes to recursively flush as well. We can't
2224 * really defer the operation so just get as many as we
2225 * can and
7a61b85d 2226 */
7b6ccb11 2227#if 0
7a61b85d 2228 if ((target_ip->flags & HAMMER_INODE_RECLAIM) == 0 &&
7b6ccb11 2229 (target_ip->flags & HAMMER_INODE_CONN_DOWN) == 0) {
7a61b85d 2230 /*
7b6ccb11
MD
2231 * We aren't reclaiming and the target ip was not
2232 * previously prevented from flushing due to this
2233 * record dependancy. Do not flush this record.
7a61b85d
MD
2234 */
2235 /*r = 0;*/
7b6ccb11
MD
2236 } else
2237#endif
2238 if (flg->total_count + flg->refs >
7a61b85d
MD
2239 ip->hmp->undo_rec_limit) {
2240 /*
2241 * Our flush group is over-full and we risk blowing
2242 * out the UNDO FIFO. Stop the scan, flush what we
2243 * have, then reflush the directory.
2244 *
2245 * The directory may be forced through multiple
2246 * flush groups before it can be completely
2247 * flushed.
2248 */
4889cbd4
MD
2249 ip->flags |= HAMMER_INODE_RESIGNAL |
2250 HAMMER_INODE_REFLUSH;
7a61b85d 2251 r = -1;
1f07f686
MD
2252 } else if (rec->type == HAMMER_MEM_RECORD_ADD) {
2253 /*
2254 * If the target IP is not flushing we can force
2255 * it to flush, even if it is unable to write out
2256 * any of its own records we have at least one in
2257 * hand that we CAN deal with.
2258 */
2259 rec->flush_state = HAMMER_FST_FLUSH;
7a61b85d
MD
2260 rec->flush_group = flg;
2261 ++flg->refs;
1f07f686 2262 hammer_ref(&rec->lock);
7a61b85d 2263 hammer_flush_inode_core(target_ip, flg,
1f07f686
MD
2264 HAMMER_FLUSH_RECURSION);
2265 r = 1;
2266 } else {
2267 /*
e8599db1
MD
2268 * General or delete-on-disk record.
2269 *
2270 * XXX this needs help. If a delete-on-disk we could
2271 * disconnect the target. If the target has its own
2272 * dependancies they really need to be flushed.
1f07f686
MD
2273 *
2274 * XXX
2275 */
2276 rec->flush_state = HAMMER_FST_FLUSH;
7a61b85d
MD
2277 rec->flush_group = flg;
2278 ++flg->refs;
1f07f686 2279 hammer_ref(&rec->lock);
7a61b85d 2280 hammer_flush_inode_core(target_ip, flg,
1f07f686
MD
2281 HAMMER_FLUSH_RECURSION);
2282 r = 1;
2283 }
2284 break;
2285 case HAMMER_FST_FLUSH:
2286 /*
d7e278bb 2287 * The flush_group should already match.
1f07f686 2288 */
7a61b85d 2289 KKASSERT(rec->flush_group == flg);
1f07f686
MD
2290 r = 1;
2291 break;
b84de5af 2292 }
1f07f686 2293 return(r);
b84de5af
MD
2294}
2295
7a61b85d 2296#if 0
525aad3a
MD
2297/*
2298 * This version just moves records already in a flush state to the new
2299 * flush group and that is it.
2300 */
2301static int
2302hammer_syncgrp_child_callback(hammer_record_t rec, void *data)
2303{
2304 hammer_inode_t ip = rec->ip;
2305
2306 switch(rec->flush_state) {
2307 case HAMMER_FST_FLUSH:
7a61b85d 2308 KKASSERT(rec->flush_group == ip->flush_group);
525aad3a
MD
2309 break;
2310 default:
2311 break;
2312 }
2313 return(0);
2314}
7a61b85d 2315#endif
525aad3a 2316
b84de5af 2317/*
7a61b85d 2318 * Wait for a previously queued flush to complete.
cdb6e4e6
MD
2319 *
2320 * If a critical error occured we don't try to wait.
b84de5af
MD
2321 */
2322void
2323hammer_wait_inode(hammer_inode_t ip)
2324{
7a61b85d 2325 hammer_flush_group_t flg;
ddfdf542 2326
7a61b85d 2327 flg = NULL;
cdb6e4e6 2328 if ((ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) {
cdb6e4e6
MD
2329 while (ip->flush_state != HAMMER_FST_IDLE &&
2330 (ip->hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) == 0) {
f153644d
MD
2331 if (ip->flush_state == HAMMER_FST_SETUP)
2332 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2333 if (ip->flush_state != HAMMER_FST_IDLE) {
2334 ip->flags |= HAMMER_INODE_FLUSHW;
2335 tsleep(&ip->flags, 0, "hmrwin", 0);
2336 }
cdb6e4e6 2337 }
b84de5af
MD
2338 }
2339}
2340
2341/*
2342 * Called by the backend code when a flush has been completed.
2343 * The inode has already been removed from the flush list.
2344 *
2345 * A pipelined flush can occur, in which case we must re-enter the
2346 * inode on the list and re-copy its fields.
2347 */
2348void
cdb6e4e6 2349hammer_flush_inode_done(hammer_inode_t ip, int error)
b84de5af 2350{
af209b0f
MD
2351 hammer_mount_t hmp;
2352 int dorel;
1955afa7 2353
b84de5af
MD
2354 KKASSERT(ip->flush_state == HAMMER_FST_FLUSH);
2355
af209b0f
MD
2356 hmp = ip->hmp;
2357
5c667a24
MD
2358 /*
2359 * Auto-reflush if the backend could not completely flush
2360 * the inode. This fixes a case where a deferred buffer flush
2361 * could cause fsync to return early.
2362 */
2363 if (ip->sync_flags & HAMMER_INODE_MODMASK)
2364 ip->flags |= HAMMER_INODE_REFLUSH;
2365
1f07f686
MD
2366 /*
2367 * Merge left-over flags back into the frontend and fix the state.
a9d52b76 2368 * Incomplete truncations are retained by the backend.
1f07f686 2369 */
cdb6e4e6 2370 ip->error = error;
a9d52b76
MD
2371 ip->flags |= ip->sync_flags & ~HAMMER_INODE_TRUNCATED;
2372 ip->sync_flags &= HAMMER_INODE_TRUNCATED;
1f07f686
MD
2373
2374 /*
2375 * The backend may have adjusted nlinks, so if the adjusted nlinks
47f363f1 2376 * does not match the fronttend set the frontend's DDIRTY flag again.
1f07f686 2377 */
11ad5ade
MD
2378 if (ip->ino_data.nlinks != ip->sync_ino_data.nlinks)
2379 ip->flags |= HAMMER_INODE_DDIRTY;
b84de5af 2380
4e17f465 2381 /*
a7e9bef1 2382 * Fix up the dirty buffer status.
4e17f465 2383 */
0832c9bb 2384 if (ip->vp && RB_ROOT(&ip->vp->v_rbdirty_tree)) {
1f07f686 2385 ip->flags |= HAMMER_INODE_BUFS;
1f07f686 2386 }
73896937 2387 hammer_redo_fifo_end_flush(ip);
1f07f686
MD
2388
2389 /*
2390 * Re-set the XDIRTY flag if some of the inode's in-memory records
2391 * could not be flushed.
2392 */
0832c9bb
MD
2393 KKASSERT((RB_EMPTY(&ip->rec_tree) &&
2394 (ip->flags & HAMMER_INODE_XDIRTY) == 0) ||
2395 (!RB_EMPTY(&ip->rec_tree) &&
2396 (ip->flags & HAMMER_INODE_XDIRTY) != 0));
4e17f465
MD
2397
2398 /*
2399 * Do not lose track of inodes which no longer have vnode
2400 * assocations, otherwise they may never get flushed again.
35a49944
MD
2401 *
2402 * The reflush flag can be set superfluously, causing extra pain
2403 * for no reason. If the inode is no longer modified it no longer
2404 * needs to be flushed.
4e17f465 2405 */
35a49944
MD
2406 if (ip->flags & HAMMER_INODE_MODMASK) {
2407 if (ip->vp == NULL)
2408 ip->flags |= HAMMER_INODE_REFLUSH;
2409 } else {
2410 ip->flags &= ~HAMMER_INODE_REFLUSH;
2411 }
4e17f465 2412
a9d52b76 2413 /*
7a61b85d 2414 * Adjust the flush state.
4e17f465 2415 */
06ad81ff 2416 if (ip->flags & HAMMER_INODE_WOULDBLOCK) {
7a61b85d
MD
2417 /*
2418 * We were unable to flush out all our records, leave the
2419 * inode in a flush state and in the current flush group.
d7e278bb 2420 * The flush group will be re-run.
7a61b85d 2421 *
d7e278bb
MD
2422 * This occurs if the UNDO block gets too full or there is
2423 * too much dirty meta-data and allows the flusher to
2424 * finalize the UNDO block and then re-flush.
7a61b85d 2425 */
06ad81ff 2426 ip->flags &= ~HAMMER_INODE_WOULDBLOCK;
af209b0f 2427 dorel = 0;
7a61b85d
MD
2428 } else {
2429 /*
2430 * Remove from the flush_group
2431 */
ff003b11 2432 RB_REMOVE(hammer_fls_rb_tree, &ip->flush_group->flush_tree, ip);
7a61b85d
MD
2433 ip->flush_group = NULL;
2434
e0092341 2435#if 0
7a61b85d
MD
2436 /*
2437 * Clean up the vnode ref and tracking counts.
2438 */
2439 if (ip->flags & HAMMER_INODE_VHELD) {
2440 ip->flags &= ~HAMMER_INODE_VHELD;
2441 vrele(ip->vp);
2442 }
e0092341 2443#endif
7a61b85d
MD
2444 --hmp->count_iqueued;
2445 --hammer_count_iqueued;
2446
2447 /*
2448 * And adjust the state.
2449 */
2450 if (TAILQ_EMPTY(&ip->target_list) && RB_EMPTY(&ip->rec_tree)) {
2451 ip->flush_state = HAMMER_FST_IDLE;
2452 dorel = 1;
2453 } else {
2454 ip->flush_state = HAMMER_FST_SETUP;
2455 dorel = 0;
2456 }
b84de5af 2457
7a61b85d
MD
2458 /*
2459 * If the frontend is waiting for a flush to complete,
2460 * wake it up.
2461 */
2462 if (ip->flags & HAMMER_INODE_FLUSHW) {
2463 ip->flags &= ~HAMMER_INODE_FLUSHW;
2464 wakeup(&ip->flags);
2465 }
af209b0f 2466
d7e278bb
MD
2467 /*
2468 * If the frontend made more changes and requested another
2469 * flush, then try to get it running.
2470 *
2471 * Reflushes are aborted when the inode is errored out.
2472 */
2473 if (ip->flags & HAMMER_INODE_REFLUSH) {
2474 ip->flags &= ~HAMMER_INODE_REFLUSH;
2475 if (ip->flags & HAMMER_INODE_RESIGNAL) {
2476 ip->flags &= ~HAMMER_INODE_RESIGNAL;
2477 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
2478 } else {
2479 hammer_flush_inode(ip, 0);
2480 }
0729c8c8 2481 }
4e17f465
MD
2482 }
2483
7b6ccb11
MD
2484 /*
2485 * If we have no parent dependancies we can clear CONN_DOWN
2486 */
2487 if (TAILQ_EMPTY(&ip->target_list))
2488 ip->flags &= ~HAMMER_INODE_CONN_DOWN;
2489
e63644f0
MD
2490 /*
2491 * If the inode is now clean drop the space reservation.
2492 */
2493 if ((ip->flags & HAMMER_INODE_MODMASK) == 0 &&
2494 (ip->flags & HAMMER_INODE_RSV_INODES)) {
2495 ip->flags &= ~HAMMER_INODE_RSV_INODES;
af209b0f 2496 --hmp->rsv_inodes;
e63644f0
MD
2497 }
2498
1f07f686
MD
2499 if (dorel)
2500 hammer_rel_inode(ip, 0);
b84de5af
MD
2501}
2502
2503/*
2504 * Called from hammer_sync_inode() to synchronize in-memory records
2505 * to the media.
2506 */
2507static int
2508hammer_sync_record_callback(hammer_record_t record, void *data)
c0ade690 2509{
4e17f465
MD
2510 hammer_cursor_t cursor = data;
2511 hammer_transaction_t trans = cursor->trans;
6c1f89f4 2512 hammer_mount_t hmp = trans->hmp;
c0ade690
MD
2513 int error;
2514
b84de5af 2515 /*
1f07f686 2516 * Skip records that do not belong to the current flush.
b84de5af 2517 */
47637bff 2518 ++hammer_stats_record_iterations;
1f07f686 2519 if (record->flush_state != HAMMER_FST_FLUSH)
b84de5af 2520 return(0);
47637bff 2521
1f07f686
MD
2522#if 1
2523 if (record->flush_group != record->ip->flush_group) {
7a61b85d 2524 kprintf("sync_record %p ip %p bad flush group %p %p\n", record, record->ip, record->flush_group ,record->ip->flush_group);
fc73edd8
MD
2525 if (hammer_debug_critical)
2526 Debugger("blah2");
1f07f686
MD
2527 return(0);
2528 }
2529#endif
2530 KKASSERT(record->flush_group == record->ip->flush_group);
d36ec43b
MD
2531
2532 /*
2533 * Interlock the record using the BE flag. Once BE is set the
2534 * frontend cannot change the state of FE.
2535 *
2536 * NOTE: If FE is set prior to us setting BE we still sync the
2537 * record out, but the flush completion code converts it to
2538 * a delete-on-disk record instead of destroying it.
2539 */
4e17f465 2540 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
d36ec43b
MD
2541 record->flags |= HAMMER_RECF_INTERLOCK_BE;
2542
47637bff 2543 /*
3214ade6 2544 * The backend has already disposed of the record.
47637bff 2545 */
3214ade6 2546 if (record->flags & (HAMMER_RECF_DELETED_BE | HAMMER_RECF_COMMITTED)) {
47637bff
MD
2547 error = 0;
2548 goto done;
2549 }
2550
98f7132d
MD
2551 /*
2552 * If the whole inode is being deleting all on-disk records will
930bf163
MD
2553 * be deleted very soon, we can't sync any new records to disk
2554 * because they will be deleted in the same transaction they were
2555 * created in (delete_tid == create_tid), which will assert.
2556 *
2557 * XXX There may be a case with RECORD_ADD with DELETED_FE set
2558 * that we currently panic on.
98f7132d
MD
2559 */
2560 if (record->ip->sync_flags & HAMMER_INODE_DELETING) {
930bf163 2561 switch(record->type) {
47637bff
MD
2562 case HAMMER_MEM_RECORD_DATA:
2563 /*
2564 * We don't have to do anything, if the record was
2565 * committed the space will have been accounted for
2566 * in the blockmap.
2567 */
2568 /* fall through */
930bf163 2569 case HAMMER_MEM_RECORD_GENERAL:
3214ade6
MD
2570 /*
2571 * Set deleted-by-backend flag. Do not set the
2572 * backend committed flag, because we are throwing
2573 * the record away.
2574 */
98f7132d 2575 record->flags |= HAMMER_RECF_DELETED_BE;
3214ade6 2576 ++record->ip->rec_generation;
930bf163
MD
2577 error = 0;
2578 goto done;
2579 case HAMMER_MEM_RECORD_ADD:
2580 panic("hammer_sync_record_callback: illegal add "
2581 "during inode deletion record %p", record);
2582 break; /* NOT REACHED */
2583 case HAMMER_MEM_RECORD_INODE:
2584 panic("hammer_sync_record_callback: attempt to "
2585 "sync inode record %p?", record);
2586 break; /* NOT REACHED */
2587 case HAMMER_MEM_RECORD_DEL:
2588 /*
2589 * Follow through and issue the on-disk deletion
98f7132d 2590 */
930bf163 2591 break;
98f7132d 2592 }
98f7132d
MD
2593 }
2594
d36ec43b 2595 /*
7bc5b8c2
MD
2596 * If DELETED_FE is set special handling is needed for directory
2597 * entries. Dependant pieces related to the directory entry may
2598 * have already been synced to disk. If this occurs we have to
2599 * sync the directory entry and then change the in-memory record
2600 * from an ADD to a DELETE to cover the fact that it's been
2601 * deleted by the frontend.
2602 *
2603 * A directory delete covering record (MEM_RECORD_DEL) can never
2604 * be deleted by the frontend.
2605 *
2606 * Any other record type (aka DATA) can be deleted by the frontend.
2607 * XXX At the moment the flusher must skip it because there may
2608 * be another data record in the flush group for the same block,
2609 * meaning that some frontend data changes can leak into the backend's
2610 * synchronization point.
d36ec43b 2611 */
1f07f686 2612 if (record->flags & HAMMER_RECF_DELETED_FE) {
e8599db1 2613 if (record->type == HAMMER_MEM_RECORD_ADD) {
3214ade6
MD
2614 /*
2615 * Convert a front-end deleted directory-add to
2616 * a directory-delete entry later.
2617 */
e8599db1
MD
2618 record->flags |= HAMMER_RECF_CONVERT_DELETE;
2619 } else {
3214ade6
MD
2620 /*
2621 * Dispose of the record (race case). Mark as
2622 * deleted by backend (and not committed).
2623 */
e8599db1 2624 KKASSERT(record->type != HAMMER_MEM_RECORD_DEL);
7bc5b8c2 2625 record->flags |= HAMMER_RECF_DELETED_BE;
3214ade6 2626 ++record->ip->rec_generation;
7bc5b8c2
MD
2627 error = 0;
2628 goto done;
e8599db1 2629 }
1f07f686 2630 }
b84de5af
MD
2631
2632 /*
2633 * Assign the create_tid for new records. Deletions already
2634 * have the record's entire key properly set up.
2635 */
3214ade6 2636 if (record->type != HAMMER_MEM_RECORD_DEL) {
11ad5ade 2637 record->leaf.base.create_tid = trans->tid;
dd94f1b1 2638 record->leaf.create_ts = trans->time32;
3214ade6 2639 }
47f363f1
MD
2640
2641 /*
2642 * This actually moves the record to the on-media B-Tree. We
2643 * must also generate REDO_TERM entries in the UNDO/REDO FIFO
2644 * indicating that the related REDO_WRITE(s) have been committed.
2645 *
2646 * During recovery any REDO_TERM's within the nominal recovery span
2647 * are ignored since the related meta-data is being undone, causing
2648 * any matching REDO_WRITEs to execute. The REDO_TERMs outside
2649 * the nominal recovery span will match against REDO_WRITEs and
2650 * prevent them from being executed (because the meta-data has
2651 * already been synchronized).
2652 */
2653 if (record->flags & HAMMER_RECF_REDO) {
2654 KKASSERT(record->type == HAMMER_MEM_RECORD_DATA);
2655 hammer_generate_redo(trans, record->ip,
2656 record->leaf.base.key -
2657 record->leaf.data_len,
2658 HAMMER_REDO_TERM_WRITE,
2659 NULL,
2660 record->leaf.data_len);
2661 }
4e17f465
MD
2662 for (;;) {
2663 error = hammer_ip_sync_record_cursor(cursor, record);
2664 if (error != EDEADLK)
2665 break;
2666 hammer_done_cursor(cursor);
2667 error = hammer_init_cursor(trans, cursor, &record->ip->cache[0],
2668 record->ip);
2669 if (error)
2670 break;
2671 }
2672 record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
c0ade690 2673
cdb6e4e6 2674 if (error)
b3deaf57 2675 error = -error;
98f7132d 2676done:
d36ec43b 2677 hammer_flush_record_done(record, error);
6c1f89f4
MD
2678
2679 /*
2680 * Do partial finalization if we have built up too many dirty
2681 * buffers. Otherwise a buffer cache deadlock can occur when
2682 * doing things like creating tens of thousands of tiny files.
2683 *
842e7a70
MD
2684 * We must release our cursor lock to avoid a 3-way deadlock
2685 * due to the exclusive sync lock the finalizer must get.
c9ce54d6
MD
2686 *
2687 * WARNING: See warnings in hammer_unlock_cursor() function.
6c1f89f4 2688 */
842e7a70 2689 if (hammer_flusher_meta_limit(hmp)) {
982be4bf 2690 hammer_unlock_cursor(cursor);
6c1f89f4 2691 hammer_flusher_finalize(trans, 0);
982be4bf 2692 hammer_lock_cursor(cursor);
842e7a70 2693 }
6c1f89f4 2694
b3deaf57 2695 return(error);
c0ade690
MD
2696}
2697
2698/*
d7e278bb 2699 * Backend function called by the flusher to sync an inode to media.
c0ade690
MD
2700 */
2701int
02325004 2702hammer_sync_inode(hammer_transaction_t trans, hammer_inode_t ip)
c0ade690 2703{
4e17f465 2704 struct hammer_cursor cursor;
cb51be26 2705 hammer_node_t tmp_node;
1f07f686
MD
2706 hammer_record_t depend;
2707 hammer_record_t next;
ec4e8497 2708 int error, tmp_error;
1f07f686 2709 u_int64_t nlinks;
c0ade690 2710
1f07f686 2711 if ((ip->sync_flags & HAMMER_INODE_MODMASK) == 0)
d113fda1 2712 return(0);
d113fda1 2713
02325004 2714 error = hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
4e17f465
MD
2715 if (error)
2716 goto done;
c0ade690 2717
ec4e8497 2718 /*
1f07f686
MD
2719 * Any directory records referencing this inode which are not in
2720 * our current flush group must adjust our nlink count for the
47f363f1 2721 * purposes of synchronizating to disk.
1f07f686
MD
2722 *
2723 * Records which are in our flush group can be unlinked from our
c4bae5fd
MD
2724 * inode now, potentially allowing the inode to be physically
2725 * deleted.
bf3b416b
MD
2726 *
2727 * This cannot block.
ec4e8497 2728 */
11ad5ade 2729 nlinks = ip->ino_data.nlinks;
1f07f686
MD
2730 next = TAILQ_FIRST(&ip->target_list);
2731 while ((depend = next) != NULL) {
2732 next = TAILQ_NEXT(depend, target_entry);
2733 if (depend->flush_state == HAMMER_FST_FLUSH &&
7a61b85d 2734 depend->flush_group == ip->flush_group) {
c4bae5fd
MD
2735 /*
2736 * If this is an ADD that was deleted by the frontend
2737 * the frontend nlinks count will have already been
2738 * decremented, but the backend is going to sync its
2739 * directory entry and must account for it. The
2740 * record will be converted to a delete-on-disk when
2741 * it gets synced.
2742 *
2743 * If the ADD was not deleted by the frontend we
2744 * can remove the dependancy from our target_list.
2745 */
2746 if (depend->flags & HAMMER_RECF_DELETED_FE) {
2747 ++nlinks;
2748 } else {
2749 TAILQ_REMOVE(&ip->target_list, depend,
2750 target_entry);
2751 depend->target_ip = NULL;
2752 }
1f07f686 2753 } else if ((depend->flags & HAMMER_RECF_DELETED_FE) == 0) {
c4bae5fd 2754 /*
3214ade6
MD
2755 * Not part of our flush group and not deleted by
2756 * the front-end, adjust the link count synced to
2757 * the media (undo what the frontend did when it
2758 * queued the record).
c4bae5fd
MD
2759 */
2760 KKASSERT((depend->flags & HAMMER_RECF_DELETED_BE) == 0);
1f07f686
MD
2761 switch(depend->type) {
2762 case HAMMER_MEM_RECORD_ADD:
2763 --nlinks;
2764 break;
2765 case HAMMER_MEM_RECORD_DEL:
2766 ++nlinks;
2767 break;
e8599db1
MD
2768 default:
2769 break;
1f07f686 2770 }
ec4e8497 2771 }
ec4e8497
MD
2772 }
2773
c0ade690 2774 /*
1f07f686 2775 * Set dirty if we had to modify the link count.
c0ade690 2776 */
11ad5ade 2777 if (ip->sync_ino_data.nlinks != nlinks) {
1f07f686 2778 KKASSERT((int64_t)nlinks >= 0);
11ad5ade
MD
2779 ip->sync_ino_data.nlinks = nlinks;
2780 ip->sync_flags |= HAMMER_INODE_DDIRTY;
1f07f686 2781 }
b84de5af 2782
1f07f686 2783 /*
869e8f55
MD
2784 * If there is a trunction queued destroy any data past the (aligned)
2785 * truncation point. Userland will have dealt with the buffer
2786 * containing the truncation point for us.
2787 *
2788 * We don't flush pending frontend data buffers until after we've
cb51be26 2789 * dealt with the truncation.
1f07f686 2790 */
869e8f55 2791 if (ip->sync_flags & HAMMER_INODE_TRUNCATED) {
b84de5af
MD
2792 /*
2793 * Interlock trunc_off. The VOP front-end may continue to
2794 * make adjustments to it while we are blocked.
2795 */
2796 off_t trunc_off;
2797 off_t aligned_trunc_off;
4a2796f3 2798 int blkmask;
c0ade690 2799
b84de5af 2800 trunc_off = ip->sync_trunc_off;
4a2796f3
MD
2801 blkmask = hammer_blocksize(trunc_off) - 1;
2802 aligned_trunc_off = (trunc_off + blkmask) & ~(int64_t)blkmask;
b84de5af
MD
2803
2804 /*
2805 * Delete any whole blocks on-media. The front-end has
2806 * already cleaned out any partial block and made it
2807 * pending. The front-end may have updated trunc_off
47637bff 2808 * while we were blocked so we only use sync_trunc_off.
06ad81ff
MD
2809 *
2810 * This operation can blow out the buffer cache, EWOULDBLOCK
a9d52b76
MD
2811 * means we were unable to complete the deletion. The
2812 * deletion will update sync_trunc_off in that case.
b84de5af 2813 */
4e17f465 2814 error = hammer_ip_delete_range(&cursor, ip,
b84de5af 2815 aligned_trunc_off,
06ad81ff
MD
2816 0x7FFFFFFFFFFFFFFFLL, 2);
2817 if (error == EWOULDBLOCK) {
2818 ip->flags |= HAMMER_INODE_WOULDBLOCK;
2819 error = 0;
2820 goto defer_buffer_flush;
2821 }
2822
b84de5af 2823 if (error)
cdb6e4e6 2824 goto done;
47637bff 2825
47f363f1
MD
2826 /*
2827 * Generate a REDO_TERM_TRUNC entry in the UNDO/REDO FIFO.
2828 *
2829 * XXX we do this even if we did not previously generate
2830 * a REDO_TRUNC record. This operation may enclosed the
2831 * range for multiple prior truncation entries in the REDO
2832 * log.
2833 */
73896937
MD
2834 if (trans->hmp->version >= HAMMER_VOL_VERSION_FOUR &&
2835 (ip->flags & HAMMER_INODE_RDIRTY)) {
47f363f1
MD
2836 hammer_generate_redo(trans, ip, aligned_trunc_off,
2837 HAMMER_REDO_TERM_TRUNC,
2838 NULL, 0);
2839 }
2840
47637bff
MD
2841 /*
2842 * Clear the truncation flag on the backend after we have
47f363f1 2843 * completed the deletions. Backend data is now good again
47637bff 2844 * (including new records we are about to sync, below).
cb51be26
MD
2845 *
2846 * Leave sync_trunc_off intact. As we write additional
2847 * records the backend will update sync_trunc_off. This
2848 * tells the backend whether it can skip the overwrite
2849 * test. This should work properly even when the backend
2850 * writes full blocks where the truncation point straddles
2851 * the block because the comparison is against the base
2852 * offset of the record.
47637bff 2853 */
b84de5af 2854 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
cb51be26 2855 /* ip->sync_trunc_off = 0x7FFFFFFFFFFFFFFFLL; */
1f07f686
MD
2856 } else {
2857 error = 0;
f3b0f382
MD
2858 }
2859
1f07f686
MD
2860 /*
2861 * Now sync related records. These will typically be directory
6c1f89f4 2862 * entries, records tracking direct-writes, or delete-on-disk records.
1f07f686
MD
2863 */
2864 if (error == 0) {
2865 tmp_error = RB_SCAN(hammer_rec_rb_tree, &ip->rec_tree, NULL,
4e17f465 2866 hammer_sync_record_callback, &cursor);
1f07f686
MD
2867 if (tmp_error < 0)
2868 tmp_error = -error;
2869 if (tmp_error)
2870 error = tmp_error;
2871 }
bcac4bbb 2872 hammer_cache_node(&ip->cache[1], cursor.node);
cb51be26
MD
2873
2874 /*
43c665ae
MD
2875 * Re-seek for inode update, assuming our cache hasn't been ripped
2876 * out from under us.
cb51be26
MD
2877 */
2878 if (error == 0) {
4c286c36 2879 tmp_node = hammer_ref_node_safe(trans, &ip->cache[0], &error);
cb51be26 2880 if (tmp_node) {
5fa5c92f
MD
2881 hammer_cursor_downgrade(&cursor);
2882 hammer_lock_sh(&tmp_node->lock);
43c665ae
MD
2883 if ((tmp_node->flags & HAMMER_NODE_DELETED) == 0)
2884 hammer_cursor_seek(&cursor, tmp_node, 0);
5fa5c92f 2885 hammer_unlock(&tmp_node->lock);
cb51be26
MD
2886 hammer_rel_node(tmp_node);
2887 }
2888 error = 0;
2889 }
1f07f686
MD
2890
2891 /*
869e8f55
MD
2892 * If we are deleting the inode the frontend had better not have
2893 * any active references on elements making up the inode.
a9d52b76
MD
2894 *
2895 * The call to hammer_ip_delete_clean() cleans up auxillary records
2896 * but not DB or DATA records. Those must have already been deleted
2897 * by the normal truncation mechanic.
1f07f686 2898 */
11ad5ade 2899 if (error == 0 && ip->sync_ino_data.nlinks == 0 &&
869e8f55
MD
2900 RB_EMPTY(&ip->rec_tree) &&
2901 (ip->sync_flags & HAMMER_INODE_DELETING) &&
2902 (ip->flags & HAMMER_INODE_DELETED) == 0) {
2903 int count1 = 0;
1f07f686 2904
a9d52b76 2905 error = hammer_ip_delete_clean(&cursor, ip, &count1);
869e8f55 2906 if (error == 0) {
06ad81ff 2907 ip->flags |= HAMMER_INODE_DELETED;
869e8f55
MD
2908 ip->sync_flags &= ~HAMMER_INODE_DELETING;
2909 ip->sync_flags &= ~HAMMER_INODE_TRUNCATED;
2910 KKASSERT(RB_EMPTY(&ip->rec_tree));
1f07f686 2911
869e8f55
MD
2912 /*
2913 * Set delete_tid in both the frontend and backend
2914 * copy of the inode record. The DELETED flag handles
47f363f1 2915 * this, do not set DDIRTY.
869e8f55 2916 */
02325004
MD
2917 ip->ino_leaf.base.delete_tid = trans->tid;
2918 ip->sync_ino_leaf.base.delete_tid = trans->tid;
2919 ip->ino_leaf.delete_ts = trans->time32;
2920 ip->sync_ino_leaf.delete_ts = trans->time32;
dd94f1b1 2921
1f07f686 2922
869e8f55
MD
2923 /*
2924 * Adjust the inode count in the volume header
2925 */
02325004 2926 hammer_sync_lock_sh(trans);
f36a9737 2927 if (ip->flags & HAMMER_INODE_ONDISK) {
02325004
MD
2928 hammer_modify_volume_field(trans,
2929 trans->rootvol,
f36a9737
MD
2930 vol0_stat_inodes);
2931 --ip->hmp->rootvol->ondisk->vol0_stat_inodes;
02325004 2932 hammer_modify_volume_done(trans->rootvol);
f36a9737 2933 }
02325004 2934 hammer_sync_unlock(trans);
869e8f55 2935 }
1f07f686 2936 }
b84de5af 2937
b84de5af 2938 if (error)
cdb6e4e6
MD
2939 goto done;
2940 ip->sync_flags &= ~HAMMER_INODE_BUFS;
c0ade690 2941
06ad81ff 2942defer_buffer_flush:
c0ade690
MD
2943 /*
2944 * Now update the inode's on-disk inode-data and/or on-disk record.
b84de5af 2945 * DELETED and ONDISK are managed only in ip->flags.
06ad81ff
MD
2946 *
2947 * In the case of a defered buffer flush we still update the on-disk
2948 * inode to satisfy visibility requirements if there happen to be
2949 * directory dependancies.
c0ade690 2950 */
b84de5af 2951 switch(ip->flags & (HAMMER_INODE_DELETED | HAMMER_INODE_ONDISK)) {
c0ade690
MD
2952 case HAMMER_INODE_DELETED|HAMMER_INODE_ONDISK:
2953 /*
2954 * If deleted and on-disk, don't set any additional flags.
2955 * the delete flag takes care of things.
869e8f55
MD
2956 *
2957 * Clear flags which may have been set by the frontend.
c0ade690 2958 */
ddfdf542 2959 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
9192654c 2960 HAMMER_INODE_SDIRTY |
ddfdf542 2961 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
869e8f55 2962 HAMMER_INODE_DELETING);
c0ade690
MD
2963 break;
2964 case HAMMER_INODE_DELETED:
2965 /*
2966 * Take care of the case where a deleted inode was never
2967 * flushed to the disk in the first place.
869e8f55
MD
2968 *
2969 * Clear flags which may have been set by the frontend.
c0ade690 2970 */
ddfdf542 2971 ip->sync_flags &= ~(HAMMER_INODE_DDIRTY | HAMMER_INODE_XDIRTY |
9192654c 2972 HAMMER_INODE_SDIRTY |
ddfdf542 2973 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME |
869e8f55 2974 HAMMER_INODE_DELETING);
d26d0ae9 2975 while (RB_ROOT(&ip->rec_tree)) {
d36ec43b
MD
2976 hammer_record_t record = RB_ROOT(&ip->rec_tree);
2977 hammer_ref(&record->lock);
250aec18 2978 KKASSERT(hammer_oneref(&record->lock));
d36ec43b 2979 record->flags |= HAMMER_RECF_DELETED_BE;
3214ade6 2980 ++record->ip->rec_generation;
d36ec43b 2981 hammer_rel_mem_record(record);
d26d0ae9 2982 }
c0ade690
MD
2983 break;
2984 case HAMMER_INODE_ONDISK:
2985 /*
2986 * If already on-disk, do not set any additional flags.
2987 */
2988 break;
2989 default:
2990 /*
ddfdf542
MD
2991 * If not on-disk and not deleted, set DDIRTY to force
2992 * an initial record to be written.
b84de5af 2993 *
ddfdf542 2994 * Also set the create_tid in both the frontend and backend
b84de5af 2995 * copy of the inode record.
c0ade690 2996 */
02325004
MD
2997 ip->ino_leaf.base.create_tid = trans->tid;
2998 ip->ino_leaf.create_ts = trans->time32;
2999 ip->sync_ino_leaf.base.create_tid = trans->tid;
3000 ip->sync_ino_leaf.create_ts = trans->time32;
11ad5ade 3001 ip->sync_flags |= HAMMER_INODE_DDIRTY;
c0ade690
MD
3002 break;
3003 }
3004
3005 /*
47f363f1 3006 * If DDIRTY or SDIRTY is set, write out a new record.
9192654c
MD
3007 * If the inode is already on-disk the old record is marked as
3008 * deleted.
d113fda1
MD
3009 *
3010 * If DELETED is set hammer_update_inode() will delete the existing
3011 * record without writing out a new one.
3012 *
3013 * If *ONLY* the ITIMES flag is set we can update the record in-place.
c0ade690 3014 */
b84de5af 3015 if (ip->flags & HAMMER_INODE_DELETED) {
4e17f465 3016 error = hammer_update_inode(&cursor, ip);
b84de5af 3017 } else
9192654c 3018 if (!(ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY)) &&
ddfdf542 3019 (ip->sync_flags & (HAMMER_INODE_ATIME | HAMMER_INODE_MTIME))) {
4e17f465 3020 error = hammer_update_itimes(&cursor, ip);
d113fda1 3021 } else
9192654c
MD
3022 if (ip->sync_flags & (HAMMER_INODE_DDIRTY | HAMMER_INODE_SDIRTY |
3023 HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) {
4e17f465 3024 error = hammer_update_inode(&cursor, ip);
c0ade690 3025 }
4e17f465 3026done:
cdb6e4e6
MD
3027 if (error) {
3028 hammer_critical_error(ip->hmp, ip, error,
3029 "while syncing inode");
3030 }
4e17f465 3031 hammer_done_cursor(&cursor);
c0ade690 3032 return(error);
8cd0a023
MD
3033}
3034
1f07f686
MD
3035/*
3036 * This routine is called when the OS is no longer actively referencing
3037 * the inode (but might still be keeping it cached), or when releasing
3038 * the last reference to an inode.
3039 *
3040 * At this point if the inode's nlinks count is zero we want to destroy
3041 * it, which may mean destroying it on-media too.
3042 */
3bf2d80a 3043void
e8599db1 3044hammer_inode_unloadable_check(hammer_inode_t ip, int getvp)
1f07f686 3045{
e8599db1
MD
3046 struct vnode *vp;
3047
1f07f686 3048 /*
c4bae5fd
MD
3049 * Set the DELETING flag when the link count drops to 0 and the
3050 * OS no longer has any opens on the inode.
3051 *
3052 * The backend will clear DELETING (a mod flag) and set DELETED
3053 * (a state flag) when it is actually able to perform the
3054 * operation.
35a49944
MD
3055 *
3056 * Don't reflag the deletion if the flusher is currently syncing
3057 * one that was already flagged. A previously set DELETING flag
3058 * may bounce around flags and sync_flags until the operation is
3059 * completely done.
1f07f686 3060 */
11ad5ade 3061 if (ip->ino_data.nlinks == 0 &&
35a49944 3062 ((ip->flags | ip->sync_flags) & (HAMMER_INODE_DELETING|HAMMER_INODE_DELETED)) == 0) {
e8599db1
MD
3063 ip->flags |= HAMMER_INODE_DELETING;
3064 ip->flags |= HAMMER_INODE_TRUNCATED;
3065 ip->trunc_off = 0;
3066 vp = NULL;
3067 if (getvp) {
3068 if (hammer_get_vnode(ip, &vp) != 0)
3069 return;
3070 }
29ce0677 3071
29ce0677
MD
3072 /*
3073 * Final cleanup
3074 */
6362a262
MD
3075 if (ip->vp)
3076 nvtruncbuf(ip->vp, 0, HAMMER_BUFSIZE, 0);
3077 if (getvp)
e8599db1 3078 vput(vp);
1f07f686 3079 }
1f07f686
MD
3080}
3081
3bf2d80a 3082/*
7b6ccb11
MD
3083 * After potentially resolving a dependancy the inode is tested
3084 * to determine whether it needs to be reflushed.
3bf2d80a 3085 */
1f07f686
MD
3086void
3087hammer_test_inode(hammer_inode_t ip)
3088{
3089 if (ip->flags & HAMMER_INODE_REFLUSH) {
3090 ip->flags &= ~HAMMER_INODE_REFLUSH;
3091 hammer_ref(&ip->lock);
3bf2d80a
MD
3092 if (ip->flags & HAMMER_INODE_RESIGNAL) {
3093 ip->flags &= ~HAMMER_INODE_RESIGNAL;
3094 hammer_flush_inode(ip, HAMMER_FLUSH_SIGNAL);
3095 } else {
3096 hammer_flush_inode(ip, 0);
3097 }
1f07f686
MD
3098 hammer_rel_inode(ip, 0);
3099 }
3100}
3101
9f5097dc 3102/*
7bc5b8c2
MD
3103 * Clear the RECLAIM flag on an inode. This occurs when the inode is
3104 * reassociated with a vp or just before it gets freed.
af209b0f 3105 *
82010f9f 3106 * Pipeline wakeups to threads blocked due to an excessive number of
ccf6a64d
MD
3107 * detached inodes. This typically occurs when atime updates accumulate
3108 * while scanning a directory tree.
9f5097dc 3109 */
7bc5b8c2 3110static void
ccf6a64d 3111hammer_inode_wakereclaims(hammer_inode_t ip)
9f5097dc 3112{
7bc5b8c2 3113 struct hammer_reclaim *reclaim;
d99d6bf5 3114 hammer_mount_t hmp = ip->hmp;
d99d6bf5 3115
7bc5b8c2 3116 if ((ip->flags & HAMMER_INODE_RECLAIM) == 0)
d99d6bf5 3117 return;
3897d7e9 3118
7bc5b8c2
MD
3119 --hammer_count_reclaiming;
3120 --hmp->inode_reclaims;
3121 ip->flags &= ~HAMMER_INODE_RECLAIM;
9f5097dc 3122
ccf6a64d
MD
3123 while ((reclaim = TAILQ_FIRST(&hmp->reclaim_list)) != NULL) {
3124 if (reclaim->count > 0 && --reclaim->count == 0) {
82010f9f
MD
3125 TAILQ_REMOVE(&hmp->reclaim_list, reclaim, entry);
3126 wakeup(reclaim);
3127 }
ff003b11 3128 if (hmp->inode_reclaims > hammer_limit_reclaim / 2)
ccf6a64d 3129 break;
9f5097dc
MD
3130 }
3131}
3132
4a2796f3
MD
3133/*
3134 * Setup our reclaim pipeline. We only let so many detached (and dirty)
ccf6a64d
MD
3135 * inodes build up before we start blocking. This routine is called
3136 * if a new inode is created or an inode is loaded from media.
4a2796f3
MD
3137 *
3138 * When we block we don't care *which* inode has finished reclaiming,
ccf6a64d 3139 * as lone as one does.
4a2796f3
MD
3140 */
3141void
e98f1b96 3142hammer_inode_waitreclaims(hammer_transaction_t trans)
4a2796f3 3143{
e98f1b96 3144 hammer_mount_t hmp = trans->hmp;
4a2796f3 3145 struct hammer_reclaim reclaim;
4a2796f3 3146
e98f1b96
MD
3147 /*
3148 * Track inode load
3149 */
3150 if (curthread->td_proc) {
3151 struct hammer_inostats *stats;
3152 int lower_limit;
3153
3154 stats = hammer_inode_inostats(hmp, curthread->td_proc->p_pid);
3155 ++stats->count;
3156
3157 if (stats->count > hammer_limit_reclaim / 2)
3158 stats->count = hammer_limit_reclaim / 2;
3159 lower_limit = hammer_limit_reclaim - stats->count;
3160 if (hammer_debug_general & 0x10000)
3161 kprintf("pid %5d limit %d\n", (int)curthread->td_proc->p_pid, lower_limit);
3162
3163 if (hmp->inode_reclaims < lower_limit)
3164 return;
3165 } else {
3166 /*
3167 * Default mode
3168 */
3169 if (hmp->inode_reclaims < hammer_limit_reclaim)
3170 return;
3171 }
ccf6a64d
MD
3172 reclaim.count = 1;
3173 TAILQ_INSERT_TAIL(&hmp->reclaim_list, &reclaim, entry);
3174 tsleep(&reclaim, 0, "hmrrcm", hz);
3175 if (reclaim.count > 0)
3176 TAILQ_REMOVE(&hmp->reclaim_list, &reclaim, entry);
82010f9f 3177}
4a2796f3 3178
858cc00a
MD
3179/*
3180 * Keep track of reclaim statistics on a per-pid basis using a loose
3181 * 4-way set associative hash table. Collisions inherit the count of
3182 * the previous entry.
3183 *
3184 * NOTE: We want to be careful here to limit the chain size. If the chain
3185 * size is too large a pid will spread its stats out over too many
3186 * entries under certain types of heavy filesystem activity and
3187 * wind up not delaying long enough.
3188 */
e98f1b96
MD
3189static
3190struct hammer_inostats *
3191hammer_inode_inostats(hammer_mount_t hmp, pid_t pid)
3192{
3193 struct hammer_inostats *stats;
3194 int delta;
3195 int chain;
858cc00a 3196 static int iterator; /* we don't care about MP races */
e98f1b96 3197
858cc00a
MD
3198 /*
3199 * Chain up to 4 times to find our entry.
3200 */
e98f1b96
MD
3201 for (chain = 0; chain < 4; ++chain) {
3202 stats = &hmp->inostats[(pid + chain) & HAMMER_INOSTATS_HMASK];
3203 if (stats->pid == pid)
3204 break;
3205 }
858cc00a
MD
3206
3207 /*
3208 * Replace one of the four chaining entries with our new entry.
3209 */
e98f1b96 3210 if (chain == 4) {
858cc00a
MD
3211 stats = &hmp->inostats[(pid + (iterator++ & 3)) &
3212 HAMMER_INOSTATS_HMASK];
e98f1b96
MD
3213 stats->pid = pid;
3214 }
3215
858cc00a
MD
3216 /*
3217 * Decay the entry
3218 */
e98f1b96
MD
3219 if (stats->count && stats->ltick != ticks) {
3220 delta = ticks - stats->ltick;
3221 stats->ltick = ticks;
3222 if (delta <= 0 || delta > hz * 60)
3223 stats->count = 0;
3224 else
3225 stats->count = stats->count * hz / (hz + delta);
3226 }
3227 if (hammer_debug_general & 0x10000)
3228 kprintf("pid %5d stats %d\n", (int)pid, stats->count);
3229 return (stats);
3230}
3231
ccf6a64d
MD
3232#if 0
3233
82010f9f 3234/*
ccf6a64d
MD
3235 * XXX not used, doesn't work very well due to the large batching nature
3236 * of flushes.
3237 *
82010f9f
MD
3238 * A larger then normal backlog of inodes is sitting in the flusher,
3239 * enforce a general slowdown to let it catch up. This routine is only
3240 * called on completion of a non-flusher-related transaction which
3241 * performed B-Tree node I/O.
3242 *
3243 * It is possible for the flusher to stall in a continuous load.
3244 * blogbench -i1000 -o seems to do a good job generating this sort of load.
3245 * If the flusher is unable to catch up the inode count can bloat until
3246 * we run out of kvm.
3247 *
3248 * This is a bit of a hack.
3249 */
3250void
3251hammer_inode_waithard(hammer_mount_t hmp)
3252{
3253 /*
3254 * Hysteresis.
3255 */
3256 if (hmp->flags & HAMMER_MOUNT_FLUSH_RECOVERY) {
ff003b11 3257 if (hmp->inode_reclaims < hammer_limit_reclaim / 2 &&
82010f9f
MD
3258 hmp->count_iqueued < hmp->count_inodes / 20) {
3259 hmp->flags &= ~HAMMER_MOUNT_FLUSH_RECOVERY;
3260 return;
3261 }