Pre-2.0 release: Sync with HAMMER 64 - NFS and cross-device link fixes.
[dragonfly.git] / sys / vfs / hammer / hammer_object.c
CommitLineData
66325755 1/*
b84de5af 2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
66325755
MD
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
8c585a28 34 * $DragonFly: src/sys/vfs/hammer/hammer_object.c,v 1.90.2.2 2008/07/19 04:51:09 dillon Exp $
66325755
MD
35 */
36
37#include "hammer.h"
38
47637bff 39static int hammer_mem_add(hammer_record_t record);
45a014dc 40static int hammer_mem_lookup(hammer_cursor_t cursor);
4e17f465 41static int hammer_mem_first(hammer_cursor_t cursor);
312de84d 42static int hammer_frontend_trunc_callback(hammer_record_t record,
0832c9bb 43 void *data __unused);
cb51be26 44static int hammer_record_needs_overwrite_delete(hammer_record_t record);
5fa5c92f
MD
45static int hammer_delete_general(hammer_cursor_t cursor, hammer_inode_t ip,
46 hammer_btree_leaf_elm_t leaf);
8cd0a023 47
47637bff
MD
48struct rec_trunc_info {
49 u_int16_t rec_type;
50 int64_t trunc_off;
51};
52
66325755 53/*
0832c9bb 54 * Red-black tree support. Comparison code for insertion.
66325755 55 */
8cd0a023 56static int
a89aec1b 57hammer_rec_rb_compare(hammer_record_t rec1, hammer_record_t rec2)
8cd0a023 58{
11ad5ade 59 if (rec1->leaf.base.rec_type < rec2->leaf.base.rec_type)
8cd0a023 60 return(-1);
11ad5ade 61 if (rec1->leaf.base.rec_type > rec2->leaf.base.rec_type)
8cd0a023
MD
62 return(1);
63
11ad5ade 64 if (rec1->leaf.base.key < rec2->leaf.base.key)
8cd0a023 65 return(-1);
11ad5ade 66 if (rec1->leaf.base.key > rec2->leaf.base.key)
8cd0a023
MD
67 return(1);
68
ec4e8497 69 /*
d36ec43b 70 * Never match against an item deleted by the front-end.
bf3b416b
MD
71 *
72 * rec1 is greater then rec2 if rec1 is marked deleted.
73 * rec1 is less then rec2 if rec2 is marked deleted.
74 *
75 * Multiple deleted records may be present, do not return 0
76 * if both are marked deleted.
ec4e8497
MD
77 */
78 if (rec1->flags & HAMMER_RECF_DELETED_FE)
79 return(1);
80 if (rec2->flags & HAMMER_RECF_DELETED_FE)
81 return(-1);
82
8cd0a023
MD
83 return(0);
84}
85
0832c9bb
MD
86/*
87 * Basic record comparison code similar to hammer_btree_cmp().
88 */
8cd0a023 89static int
0832c9bb 90hammer_rec_cmp(hammer_base_elm_t elm, hammer_record_t rec)
66325755 91{
0832c9bb 92 if (elm->rec_type < rec->leaf.base.rec_type)
d26d0ae9 93 return(-3);
0832c9bb 94 if (elm->rec_type > rec->leaf.base.rec_type)
d26d0ae9 95 return(3);
8cd0a023 96
0832c9bb 97 if (elm->key < rec->leaf.base.key)
8cd0a023 98 return(-2);
0832c9bb 99 if (elm->key > rec->leaf.base.key)
8cd0a023
MD
100 return(2);
101
cebe9493
MD
102 /*
103 * Never match against an item deleted by the front-end.
bf3b416b 104 * elm is less then rec if rec is marked deleted.
cebe9493
MD
105 */
106 if (rec->flags & HAMMER_RECF_DELETED_FE)
bf3b416b 107 return(-1);
0832c9bb
MD
108 return(0);
109}
110
111/*
112 * Special LOOKUP_INFO to locate an overlapping record. This used by
113 * the reservation code to implement small-block records (whos keys will
114 * be different depending on data_len, when representing the same base
115 * offset).
116 *
117 * NOTE: The base file offset of a data record is (key - data_len), not (key).
118 */
119static int
120hammer_rec_overlap_compare(hammer_btree_leaf_elm_t leaf, hammer_record_t rec)
121{
122 if (leaf->base.rec_type < rec->leaf.base.rec_type)
123 return(-3);
124 if (leaf->base.rec_type > rec->leaf.base.rec_type)
125 return(3);
126
4a2796f3
MD
127 /*
128 * Overlap compare
129 */
0832c9bb 130 if (leaf->base.rec_type == HAMMER_RECTYPE_DATA) {
cebe9493 131 /* leaf_end <= rec_beg */
0832c9bb
MD
132 if (leaf->base.key <= rec->leaf.base.key - rec->leaf.data_len)
133 return(-2);
cebe9493 134 /* leaf_beg >= rec_end */
0832c9bb
MD
135 if (leaf->base.key - leaf->data_len >= rec->leaf.base.key)
136 return(2);
137 } else {
138 if (leaf->base.key < rec->leaf.base.key)
139 return(-2);
140 if (leaf->base.key > rec->leaf.base.key)
141 return(2);
142 }
143
cebe9493
MD
144 /*
145 * Never match against an item deleted by the front-end.
bf3b416b 146 * leaf is less then rec if rec is marked deleted.
4a2796f3
MD
147 *
148 * We must still return the proper code for the scan to continue
149 * along the correct branches.
cebe9493 150 */
4a2796f3
MD
151 if (rec->flags & HAMMER_RECF_DELETED_FE) {
152 if (leaf->base.key < rec->leaf.base.key)
153 return(-2);
154 if (leaf->base.key > rec->leaf.base.key)
155 return(2);
bf3b416b 156 return(-1);
4a2796f3 157 }
8cd0a023 158 return(0);
66325755
MD
159}
160
6b4f890b 161/*
7f7c1f84 162 * RB_SCAN comparison code for hammer_mem_first(). The argument order
6b4f890b 163 * is reversed so the comparison result has to be negated. key_beg and
7f7c1f84
MD
164 * key_end are both range-inclusive.
165 *
7f7c1f84 166 * Localized deletions are not cached in-memory.
6b4f890b
MD
167 */
168static
169int
170hammer_rec_scan_cmp(hammer_record_t rec, void *data)
171{
172 hammer_cursor_t cursor = data;
173 int r;
174
0832c9bb 175 r = hammer_rec_cmp(&cursor->key_beg, rec);
7f7c1f84 176 if (r > 1)
6b4f890b 177 return(-1);
0832c9bb 178 r = hammer_rec_cmp(&cursor->key_end, rec);
7f7c1f84 179 if (r < -1)
6b4f890b
MD
180 return(1);
181 return(0);
182}
183
2ae23e2e
MD
184/*
185 * This compare function is used when simply looking up key_beg.
186 */
187static
188int
189hammer_rec_find_cmp(hammer_record_t rec, void *data)
190{
191 hammer_cursor_t cursor = data;
192 int r;
193
0832c9bb 194 r = hammer_rec_cmp(&cursor->key_beg, rec);
2ae23e2e
MD
195 if (r > 1)
196 return(-1);
197 if (r < -1)
198 return(1);
199 return(0);
200}
201
47637bff
MD
202/*
203 * Locate blocks within the truncation range. Partial blocks do not count.
204 */
205static
206int
207hammer_rec_trunc_cmp(hammer_record_t rec, void *data)
208{
209 struct rec_trunc_info *info = data;
210
211 if (rec->leaf.base.rec_type < info->rec_type)
212 return(-1);
213 if (rec->leaf.base.rec_type > info->rec_type)
214 return(1);
215
216 switch(rec->leaf.base.rec_type) {
217 case HAMMER_RECTYPE_DB:
218 /*
219 * DB record key is not beyond the truncation point, retain.
220 */
221 if (rec->leaf.base.key < info->trunc_off)
222 return(-1);
223 break;
224 case HAMMER_RECTYPE_DATA:
225 /*
226 * DATA record offset start is not beyond the truncation point,
227 * retain.
228 */
229 if (rec->leaf.base.key - rec->leaf.data_len < info->trunc_off)
230 return(-1);
231 break;
232 default:
233 panic("hammer_rec_trunc_cmp: unexpected record type");
234 }
235
236 /*
237 * The record start is >= the truncation point, return match,
238 * the record should be destroyed.
239 */
240 return(0);
241}
242
8cd0a023
MD
243RB_GENERATE(hammer_rec_rb_tree, hammer_record, rb_node, hammer_rec_rb_compare);
244RB_GENERATE_XLOOKUP(hammer_rec_rb_tree, INFO, hammer_record, rb_node,
0832c9bb 245 hammer_rec_overlap_compare, hammer_btree_leaf_elm_t);
8cd0a023 246
a89aec1b 247/*
d26d0ae9 248 * Allocate a record for the caller to finish filling in. The record is
b3deaf57 249 * returned referenced.
a89aec1b
MD
250 */
251hammer_record_t
11ad5ade 252hammer_alloc_mem_record(hammer_inode_t ip, int data_len)
a89aec1b
MD
253{
254 hammer_record_t record;
255
b3deaf57 256 ++hammer_count_records;
df301614
MD
257 record = kmalloc(sizeof(*record), M_HAMMER,
258 M_WAITOK | M_ZERO | M_USE_RESERVE);
1f07f686 259 record->flush_state = HAMMER_FST_IDLE;
a89aec1b 260 record->ip = ip;
11ad5ade
MD
261 record->leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
262 record->leaf.data_len = data_len;
d26d0ae9 263 hammer_ref(&record->lock);
11ad5ade
MD
264
265 if (data_len) {
266 record->data = kmalloc(data_len, M_HAMMER, M_WAITOK | M_ZERO);
267 record->flags |= HAMMER_RECF_ALLOCDATA;
268 ++hammer_count_record_datas;
269 }
270
a89aec1b
MD
271 return (record);
272}
273
b84de5af 274void
af209b0f 275hammer_wait_mem_record_ident(hammer_record_t record, const char *ident)
b84de5af 276{
1f07f686 277 while (record->flush_state == HAMMER_FST_FLUSH) {
b84de5af 278 record->flags |= HAMMER_RECF_WANTED;
af209b0f 279 tsleep(record, 0, ident, 0);
b84de5af
MD
280 }
281}
282
283/*
d36ec43b
MD
284 * Called from the backend, hammer_inode.c, after a record has been
285 * flushed to disk. The record has been exclusively locked by the
286 * caller and interlocked with BE.
b84de5af 287 *
d36ec43b
MD
288 * We clean up the state, unlock, and release the record (the record
289 * was referenced by the fact that it was in the HAMMER_FST_FLUSH state).
b84de5af
MD
290 */
291void
d36ec43b 292hammer_flush_record_done(hammer_record_t record, int error)
b84de5af 293{
1f07f686 294 hammer_inode_t target_ip;
1f07f686
MD
295
296 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
d36ec43b
MD
297 KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
298
299 if (error) {
300 /*
301 * An error occured, the backend was unable to sync the
302 * record to its media. Leave the record intact.
303 */
925c688a
MD
304 hammer_critical_error(record->ip->hmp, record->ip, error,
305 "while flushing record");
d36ec43b 306 }
4e17f465 307
7a61b85d
MD
308 --record->flush_group->refs;
309 record->flush_group = NULL;
310
4e17f465 311 if (record->flags & HAMMER_RECF_DELETED_BE) {
1f07f686
MD
312 if ((target_ip = record->target_ip) != NULL) {
313 TAILQ_REMOVE(&target_ip->target_list, record,
314 target_entry);
315 record->target_ip = NULL;
316 hammer_test_inode(target_ip);
317 }
318 record->flush_state = HAMMER_FST_IDLE;
319 } else {
c4bae5fd 320 if (record->target_ip) {
1f07f686 321 record->flush_state = HAMMER_FST_SETUP;
c4bae5fd
MD
322 hammer_test_inode(record->ip);
323 hammer_test_inode(record->target_ip);
324 } else {
1f07f686 325 record->flush_state = HAMMER_FST_IDLE;
c4bae5fd 326 }
1f07f686 327 }
d36ec43b 328 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
b84de5af
MD
329 if (record->flags & HAMMER_RECF_WANTED) {
330 record->flags &= ~HAMMER_RECF_WANTED;
331 wakeup(record);
332 }
333 hammer_rel_mem_record(record);
334}
335
a89aec1b 336/*
b3deaf57
MD
337 * Release a memory record. Records marked for deletion are immediately
338 * removed from the RB-Tree but otherwise left intact until the last ref
339 * goes away.
a89aec1b
MD
340 */
341void
b3deaf57 342hammer_rel_mem_record(struct hammer_record *record)
a89aec1b 343{
1b0ab2c3
MD
344 hammer_mount_t hmp;
345 hammer_reserve_t resv;
346 hammer_inode_t ip;
347 hammer_inode_t target_ip;
1f07f686 348
b3deaf57 349 hammer_unref(&record->lock);
b33e2cc0 350
0832c9bb
MD
351 if (record->lock.refs == 0) {
352 /*
353 * Upon release of the last reference wakeup any waiters.
354 * The record structure may get destroyed so callers will
355 * loop up and do a relookup.
bf3b416b
MD
356 *
357 * WARNING! Record must be removed from RB-TREE before we
358 * might possibly block. hammer_test_inode() can block!
0832c9bb
MD
359 */
360 ip = record->ip;
1b0ab2c3 361 hmp = ip->hmp;
0832c9bb
MD
362
363 /*
364 * Upon release of the last reference a record marked deleted
365 * is destroyed.
366 */
367 if (record->flags & HAMMER_RECF_DELETED_FE) {
bf3b416b 368 KKASSERT(ip->lock.refs > 0);
1f07f686
MD
369 KKASSERT(record->flush_state != HAMMER_FST_FLUSH);
370
bf3b416b
MD
371 /*
372 * target_ip may have zero refs, we have to ref it
373 * to prevent it from being ripped out from under
374 * us.
375 */
1f07f686
MD
376 if ((target_ip = record->target_ip) != NULL) {
377 TAILQ_REMOVE(&target_ip->target_list,
378 record, target_entry);
379 record->target_ip = NULL;
bf3b416b 380 hammer_ref(&target_ip->lock);
1f07f686 381 }
d36ec43b 382
b84de5af
MD
383 if (record->flags & HAMMER_RECF_ONRBTREE) {
384 RB_REMOVE(hammer_rec_rb_tree,
385 &record->ip->rec_tree,
386 record);
47637bff 387 KKASSERT(ip->rsv_recs > 0);
1b0ab2c3 388 --hmp->rsv_recs;
47637bff 389 --ip->rsv_recs;
1b0ab2c3 390 hmp->rsv_databytes -= record->leaf.data_len;
b84de5af 391 record->flags &= ~HAMMER_RECF_ONRBTREE;
a99b9ea2 392
ae8bb789
MD
393 if (RB_EMPTY(&record->ip->rec_tree)) {
394 record->ip->flags &= ~HAMMER_INODE_XDIRTY;
0832c9bb 395 record->ip->sync_flags &= ~HAMMER_INODE_XDIRTY;
ae8bb789
MD
396 hammer_test_inode(record->ip);
397 }
b84de5af 398 }
bf3b416b 399
1b0ab2c3
MD
400 /*
401 * We must wait for any direct-IO to complete before
402 * we can destroy the record.
403 */
404 if (record->flags & HAMMER_RECF_DIRECT_IO)
405 hammer_io_direct_wait(record);
406
407
bf3b416b
MD
408 /*
409 * Do this test after removing record from the B-Tree.
410 */
411 if (target_ip) {
412 hammer_test_inode(target_ip);
413 hammer_rel_inode(target_ip, 0);
414 }
415
b3deaf57
MD
416 if (record->flags & HAMMER_RECF_ALLOCDATA) {
417 --hammer_count_record_datas;
418 kfree(record->data, M_HAMMER);
419 record->flags &= ~HAMMER_RECF_ALLOCDATA;
420 }
1b0ab2c3
MD
421
422 /*
423 * Release the reservation. If the record was not
424 * committed return the reservation before
425 * releasing it.
426 */
427 if ((resv = record->resv) != NULL) {
428 if ((record->flags & HAMMER_RECF_COMMITTED) == 0) {
429 hammer_blockmap_reserve_undo(
430 resv,
431 record->leaf.data_offset,
432 record->leaf.data_len);
433 }
434 hammer_blockmap_reserve_complete(hmp, resv);
0832c9bb
MD
435 record->resv = NULL;
436 }
b3deaf57
MD
437 record->data = NULL;
438 --hammer_count_records;
439 kfree(record, M_HAMMER);
a89aec1b 440 }
a89aec1b 441 }
a89aec1b
MD
442}
443
b84de5af 444/*
a5fddc16
MD
445 * Record visibility depends on whether the record is being accessed by
446 * the backend or the frontend.
447 *
448 * Return non-zero if the record is visible, zero if it isn't or if it is
449 * deleted.
b84de5af
MD
450 */
451static __inline
452int
a5fddc16 453hammer_ip_iterate_mem_good(hammer_cursor_t cursor, hammer_record_t record)
b84de5af
MD
454{
455 if (cursor->flags & HAMMER_CURSOR_BACKEND) {
a5fddc16
MD
456 if (record->flags & HAMMER_RECF_DELETED_BE)
457 return(0);
b84de5af 458 } else {
a5fddc16 459 if (record->flags & HAMMER_RECF_DELETED_FE)
b84de5af
MD
460 return(0);
461 }
462 return(1);
463}
464
a89aec1b 465/*
2ae23e2e
MD
466 * This callback is used as part of the RB_SCAN function for in-memory
467 * records. We terminate it (return -1) as soon as we get a match.
6b4f890b 468 *
b84de5af
MD
469 * This routine is used by frontend code.
470 *
2ae23e2e
MD
471 * The primary compare code does not account for ASOF lookups. This
472 * code handles that case as well as a few others.
6b4f890b
MD
473 */
474static
475int
476hammer_rec_scan_callback(hammer_record_t rec, void *data)
477{
478 hammer_cursor_t cursor = data;
479
b33e2cc0
MD
480 /*
481 * We terminate on success, so this should be NULL on entry.
482 */
483 KKASSERT(cursor->iprec == NULL);
484
485 /*
b84de5af 486 * Skip if the record was marked deleted.
b33e2cc0 487 */
b84de5af 488 if (hammer_ip_iterate_mem_good(cursor, rec) == 0)
b33e2cc0
MD
489 return(0);
490
7f7c1f84
MD
491 /*
492 * Skip if not visible due to our as-of TID
493 */
d5530d22 494 if (cursor->flags & HAMMER_CURSOR_ASOF) {
11ad5ade 495 if (cursor->asof < rec->leaf.base.create_tid)
7f7c1f84 496 return(0);
11ad5ade
MD
497 if (rec->leaf.base.delete_tid &&
498 cursor->asof >= rec->leaf.base.delete_tid) {
7f7c1f84
MD
499 return(0);
500 }
501 }
502
503 /*
3f43fb33
MD
504 * ref the record. The record is protected from backend B-Tree
505 * interactions by virtue of the cursor's IP lock.
7f7c1f84 506 */
b33e2cc0 507 hammer_ref(&rec->lock);
b84de5af 508
b33e2cc0
MD
509 /*
510 * The record may have been deleted while we were blocked.
511 */
b84de5af
MD
512 if (hammer_ip_iterate_mem_good(cursor, rec) == 0) {
513 hammer_rel_mem_record(rec);
b33e2cc0
MD
514 return(0);
515 }
516
517 /*
518 * Set the matching record and stop the scan.
519 */
520 cursor->iprec = rec;
521 return(-1);
6b4f890b
MD
522}
523
2ae23e2e
MD
524
525/*
526 * Lookup an in-memory record given the key specified in the cursor. Works
527 * just like hammer_btree_lookup() but operates on an inode's in-memory
528 * record list.
529 *
530 * The lookup must fail if the record is marked for deferred deletion.
531 */
532static
533int
45a014dc 534hammer_mem_lookup(hammer_cursor_t cursor)
2ae23e2e
MD
535{
536 int error;
537
45a014dc 538 KKASSERT(cursor->ip);
2ae23e2e
MD
539 if (cursor->iprec) {
540 hammer_rel_mem_record(cursor->iprec);
541 cursor->iprec = NULL;
542 }
45a014dc 543 hammer_rec_rb_tree_RB_SCAN(&cursor->ip->rec_tree, hammer_rec_find_cmp,
2ae23e2e
MD
544 hammer_rec_scan_callback, cursor);
545
546 if (cursor->iprec == NULL)
547 error = ENOENT;
548 else
549 error = 0;
550 return(error);
551}
552
553/*
554 * hammer_mem_first() - locate the first in-memory record matching the
555 * cursor within the bounds of the key range.
556 */
6b4f890b
MD
557static
558int
4e17f465 559hammer_mem_first(hammer_cursor_t cursor)
6b4f890b 560{
4e17f465
MD
561 hammer_inode_t ip;
562
563 ip = cursor->ip;
564 KKASSERT(ip != NULL);
565
b3deaf57
MD
566 if (cursor->iprec) {
567 hammer_rel_mem_record(cursor->iprec);
568 cursor->iprec = NULL;
569 }
b3deaf57 570
6b4f890b
MD
571 hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_scan_cmp,
572 hammer_rec_scan_callback, cursor);
7f7c1f84
MD
573
574 /*
575 * Adjust scan.node and keep it linked into the RB-tree so we can
576 * hold the cursor through third party modifications of the RB-tree.
577 */
4e17f465 578 if (cursor->iprec)
6b4f890b 579 return(0);
6b4f890b
MD
580 return(ENOENT);
581}
582
a89aec1b
MD
583/************************************************************************
584 * HAMMER IN-MEMORY RECORD FUNCTIONS *
585 ************************************************************************
586 *
587 * These functions manipulate in-memory records. Such records typically
588 * exist prior to being committed to disk or indexed via the on-disk B-Tree.
589 */
590
8cd0a023
MD
591/*
592 * Add a directory entry (dip,ncp) which references inode (ip).
593 *
594 * Note that the low 32 bits of the namekey are set temporarily to create
595 * a unique in-memory record, and may be modified a second time when the
596 * record is synchronized to disk. In particular, the low 32 bits cannot be
597 * all 0's when synching to disk, which is not handled here.
5a930e66
MD
598 *
599 * NOTE: bytes does not include any terminating \0 on name, and name might
600 * not be terminated.
8cd0a023 601 */
66325755 602int
a89aec1b 603hammer_ip_add_directory(struct hammer_transaction *trans,
5a930e66 604 struct hammer_inode *dip, const char *name, int bytes,
66325755
MD
605 struct hammer_inode *ip)
606{
c82af904 607 struct hammer_cursor cursor;
a89aec1b 608 hammer_record_t record;
8cd0a023 609 int error;
c82af904
MD
610 int count;
611 u_int32_t iterator;
8cd0a023 612
11ad5ade 613 record = hammer_alloc_mem_record(dip, HAMMER_ENTRY_SIZE(bytes));
6b4f890b
MD
614 if (++trans->hmp->namekey_iterator == 0)
615 ++trans->hmp->namekey_iterator;
8cd0a023 616
1f07f686 617 record->type = HAMMER_MEM_RECORD_ADD;
5a930e66
MD
618 record->leaf.base.localization = dip->obj_localization +
619 HAMMER_LOCALIZE_MISC;
11ad5ade 620 record->leaf.base.obj_id = dip->obj_id;
5a930e66 621 record->leaf.base.key = hammer_directory_namekey(name, bytes);
11ad5ade
MD
622 record->leaf.base.key += trans->hmp->namekey_iterator;
623 record->leaf.base.rec_type = HAMMER_RECTYPE_DIRENTRY;
624 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
625 record->data->entry.obj_id = ip->obj_id;
43c665ae 626 record->data->entry.localization = ip->obj_localization;
5a930e66 627 bcopy(name, record->data->entry.name, bytes);
11ad5ade
MD
628
629 ++ip->ino_data.nlinks;
47637bff 630 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
ec4e8497 631
c82af904
MD
632 /*
633 * Find an unused namekey. Both the in-memory record tree and
634 * the B-Tree are checked. Exact matches also match create_tid
635 * so use an ASOF search to (mostly) ignore it.
06ad81ff
MD
636 *
637 * delete-visibility is set so pending deletions do not give us
638 * a false-negative on our ability to use an iterator.
c82af904
MD
639 */
640 hammer_init_cursor(trans, &cursor, &dip->cache[1], dip);
641 cursor.key_beg = record->leaf.base;
642 cursor.flags |= HAMMER_CURSOR_ASOF;
06ad81ff 643 cursor.flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
c82af904
MD
644 cursor.asof = ip->obj_asof;
645
646 count = 0;
647 while (hammer_ip_lookup(&cursor) == 0) {
648 iterator = (u_int32_t)record->leaf.base.key + 1;
649 if (iterator == 0)
650 iterator = 1;
651 record->leaf.base.key &= ~0xFFFFFFFFLL;
652 record->leaf.base.key |= iterator;
653 cursor.key_beg.key = record->leaf.base.key;
654 if (++count == 1000000000) {
655 hammer_rel_mem_record(record);
656 error = ENOSPC;
657 goto failed;
658 }
659 }
660
ec4e8497 661 /*
1f07f686
MD
662 * The target inode and the directory entry are bound together.
663 */
664 record->target_ip = ip;
665 record->flush_state = HAMMER_FST_SETUP;
666 TAILQ_INSERT_TAIL(&ip->target_list, record, target_entry);
667
668 /*
669 * The inode now has a dependancy and must be taken out of the idle
670 * state. An inode not in an idle state is given an extra reference.
ec4e8497 671 */
1f07f686
MD
672 if (ip->flush_state == HAMMER_FST_IDLE) {
673 hammer_ref(&ip->lock);
674 ip->flush_state = HAMMER_FST_SETUP;
675 }
47637bff 676 error = hammer_mem_add(record);
8c585a28
MD
677 if (error == 0) {
678 dip->ino_data.mtime = trans->time;
679 hammer_modify_inode(dip, HAMMER_INODE_MTIME);
680 }
c82af904
MD
681failed:
682 hammer_done_cursor(&cursor);
8cd0a023
MD
683 return(error);
684}
685
686/*
a89aec1b
MD
687 * Delete the directory entry and update the inode link count. The
688 * cursor must be seeked to the directory entry record being deleted.
689 *
b84de5af
MD
690 * The related inode should be share-locked by the caller. The caller is
691 * on the frontend.
6a37e7e4
MD
692 *
693 * This function can return EDEADLK requiring the caller to terminate
b84de5af 694 * the cursor, any locks, wait on the returned record, and retry.
8cd0a023 695 */
a89aec1b
MD
696int
697hammer_ip_del_directory(struct hammer_transaction *trans,
698 hammer_cursor_t cursor, struct hammer_inode *dip,
699 struct hammer_inode *ip)
8cd0a023 700{
b84de5af 701 hammer_record_t record;
a89aec1b 702 int error;
8cd0a023 703
47637bff 704 if (hammer_cursor_inmem(cursor)) {
b84de5af
MD
705 /*
706 * In-memory (unsynchronized) records can simply be freed.
707 * Even though the HAMMER_RECF_DELETED_FE flag is ignored
708 * by the backend, we must still avoid races against the
709 * backend potentially syncing the record to the media.
710 *
711 * We cannot call hammer_ip_delete_record(), that routine may
712 * only be called from the backend.
713 */
714 record = cursor->iprec;
d36ec43b 715 if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
b84de5af
MD
716 KKASSERT(cursor->deadlk_rec == NULL);
717 hammer_ref(&record->lock);
718 cursor->deadlk_rec = record;
719 error = EDEADLK;
720 } else {
1f07f686 721 KKASSERT(record->type == HAMMER_MEM_RECORD_ADD);
d36ec43b 722 record->flags |= HAMMER_RECF_DELETED_FE;
b84de5af
MD
723 error = 0;
724 }
725 } else {
726 /*
727 * If the record is on-disk we have to queue the deletion by
728 * the record's key. This also causes lookups to skip the
729 * record.
730 */
98f7132d
MD
731 KKASSERT(dip->flags &
732 (HAMMER_INODE_ONDISK | HAMMER_INODE_DONDISK));
11ad5ade 733 record = hammer_alloc_mem_record(dip, 0);
1f07f686 734 record->type = HAMMER_MEM_RECORD_DEL;
11ad5ade 735 record->leaf.base = cursor->leaf->base;
1f07f686
MD
736
737 record->target_ip = ip;
738 record->flush_state = HAMMER_FST_SETUP;
739 TAILQ_INSERT_TAIL(&ip->target_list, record, target_entry);
b84de5af 740
ec4e8497 741 /*
1f07f686
MD
742 * The inode now has a dependancy and must be taken out of
743 * the idle state. An inode not in an idle state is given
744 * an extra reference.
ec4e8497 745 */
1f07f686
MD
746 if (ip->flush_state == HAMMER_FST_IDLE) {
747 hammer_ref(&ip->lock);
748 ip->flush_state = HAMMER_FST_SETUP;
749 }
ec4e8497 750
47637bff 751 error = hammer_mem_add(record);
b84de5af 752 }
a89aec1b
MD
753
754 /*
c0ade690 755 * One less link. The file may still be open in the OS even after
3bf2d80a 756 * all links have gone away.
6a37e7e4
MD
757 *
758 * We have to terminate the cursor before syncing the inode to
3bf2d80a
MD
759 * avoid deadlocking against ourselves. XXX this may no longer
760 * be true.
855942b6 761 *
3bf2d80a
MD
762 * If nlinks drops to zero and the vnode is inactive (or there is
763 * no vnode), call hammer_inode_unloadable_check() to zonk the
764 * inode. If we don't do this here the inode will not be destroyed
765 * on-media until we unmount.
a89aec1b
MD
766 */
767 if (error == 0) {
11ad5ade 768 --ip->ino_data.nlinks;
47637bff 769 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
11ad5ade 770 if (ip->ino_data.nlinks == 0 &&
d113fda1 771 (ip->vp == NULL || (ip->vp->v_flag & VINACTIVE))) {
6a37e7e4 772 hammer_done_cursor(cursor);
e8599db1 773 hammer_inode_unloadable_check(ip, 1);
3bf2d80a 774 hammer_flush_inode(ip, 0);
d113fda1 775 }
8c585a28
MD
776 dip->ino_data.mtime = trans->time;
777 hammer_modify_inode(dip, HAMMER_INODE_MTIME);
76376933 778
a89aec1b
MD
779 }
780 return(error);
66325755
MD
781}
782
7a04d74f
MD
783/*
784 * Add a record to an inode.
785 *
786 * The caller must allocate the record with hammer_alloc_mem_record(ip) and
787 * initialize the following additional fields:
788 *
b84de5af
MD
789 * The related inode should be share-locked by the caller. The caller is
790 * on the frontend.
791 *
7a04d74f
MD
792 * record->rec.entry.base.base.key
793 * record->rec.entry.base.base.rec_type
794 * record->rec.entry.base.base.data_len
47197d71 795 * record->data (a copy will be kmalloc'd if it cannot be embedded)
7a04d74f
MD
796 */
797int
798hammer_ip_add_record(struct hammer_transaction *trans, hammer_record_t record)
799{
800 hammer_inode_t ip = record->ip;
801 int error;
7a04d74f 802
2f85fa4d 803 KKASSERT(record->leaf.base.localization != 0);
11ad5ade
MD
804 record->leaf.base.obj_id = ip->obj_id;
805 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
47637bff 806 error = hammer_mem_add(record);
7a04d74f
MD
807 return(error);
808}
809
8cd0a023 810/*
47637bff
MD
811 * Locate a bulk record in-memory. Bulk records allow disk space to be
812 * reserved so the front-end can flush large data writes without having
813 * to queue the BIO to the flusher. Only the related record gets queued
814 * to the flusher.
815 */
816static hammer_record_t
817hammer_ip_get_bulk(hammer_inode_t ip, off_t file_offset, int bytes)
818{
819 hammer_record_t record;
0832c9bb
MD
820 struct hammer_btree_leaf_elm leaf;
821
822 bzero(&leaf, sizeof(leaf));
823 leaf.base.obj_id = ip->obj_id;
824 leaf.base.key = file_offset + bytes;
825 leaf.base.create_tid = 0;
826 leaf.base.delete_tid = 0;
827 leaf.base.rec_type = HAMMER_RECTYPE_DATA;
828 leaf.base.obj_type = 0; /* unused */
829 leaf.base.btype = HAMMER_BTREE_TYPE_RECORD; /* unused */
5a930e66 830 leaf.base.localization = ip->obj_localization + HAMMER_LOCALIZE_MISC;
0832c9bb
MD
831 leaf.data_len = bytes;
832
833 record = hammer_rec_rb_tree_RB_LOOKUP_INFO(&ip->rec_tree, &leaf);
47637bff
MD
834 if (record)
835 hammer_ref(&record->lock);
836 return(record);
837}
838
839/*
840 * Reserve blockmap space placemarked with an in-memory record.
841 *
bcac4bbb
MD
842 * This routine is called by the frontend in order to be able to directly
843 * flush a buffer cache buffer. The frontend has locked the related buffer
844 * cache buffers and we should be able to manipulate any overlapping
845 * in-memory records.
47637bff
MD
846 */
847hammer_record_t
0832c9bb
MD
848hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset, void *data, int bytes,
849 int *errorp)
47637bff
MD
850{
851 hammer_record_t record;
852 hammer_record_t conflict;
0832c9bb 853 int zone;
bf3b416b 854 int flags;
47637bff
MD
855
856 /*
7bc5b8c2
MD
857 * Deal with conflicting in-memory records. We cannot have multiple
858 * in-memory records for the same offset without seriously confusing
859 * the backend, including but not limited to the backend issuing
860 * delete-create-delete sequences and asserting on the delete_tid
861 * being the same as the create_tid.
47637bff 862 *
7bc5b8c2
MD
863 * If we encounter a record with the backend interlock set we cannot
864 * immediately delete it without confusing the backend.
47637bff 865 */
0832c9bb 866 while ((conflict = hammer_ip_get_bulk(ip, file_offset, bytes)) !=NULL) {
7bc5b8c2
MD
867 if (conflict->flags & HAMMER_RECF_INTERLOCK_BE) {
868 conflict->flags |= HAMMER_RECF_WANTED;
869 tsleep(conflict, 0, "hmrrc3", 0);
bf3b416b
MD
870 } else {
871 conflict->flags |= HAMMER_RECF_DELETED_FE;
47637bff 872 }
7bc5b8c2 873 hammer_rel_mem_record(conflict);
47637bff
MD
874 }
875
876 /*
0832c9bb
MD
877 * Create a record to cover the direct write. This is called with
878 * the related BIO locked so there should be no possible conflict.
879 *
880 * The backend is responsible for finalizing the space reserved in
881 * this record.
882 *
883 * XXX bytes not aligned, depend on the reservation code to
884 * align the reservation.
47637bff
MD
885 */
886 record = hammer_alloc_mem_record(ip, 0);
0832c9bb
MD
887 zone = (bytes >= HAMMER_BUFSIZE) ? HAMMER_ZONE_LARGE_DATA_INDEX :
888 HAMMER_ZONE_SMALL_DATA_INDEX;
889 record->resv = hammer_blockmap_reserve(ip->hmp, zone, bytes,
890 &record->leaf.data_offset,
891 errorp);
892 if (record->resv == NULL) {
cebe9493 893 kprintf("hammer_ip_add_bulk: reservation failed\n");
47637bff
MD
894 hammer_rel_mem_record(record);
895 return(NULL);
896 }
897 record->type = HAMMER_MEM_RECORD_DATA;
898 record->leaf.base.rec_type = HAMMER_RECTYPE_DATA;
899 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
900 record->leaf.base.obj_id = ip->obj_id;
901 record->leaf.base.key = file_offset + bytes;
5a930e66
MD
902 record->leaf.base.localization = ip->obj_localization +
903 HAMMER_LOCALIZE_MISC;
47637bff 904 record->leaf.data_len = bytes;
ddfdf542 905 hammer_crc_set_leaf(data, &record->leaf);
bf3b416b 906 flags = record->flags;
47637bff
MD
907
908 hammer_ref(&record->lock); /* mem_add eats a reference */
0832c9bb 909 *errorp = hammer_mem_add(record);
4a2796f3
MD
910 if (*errorp) {
911 conflict = hammer_ip_get_bulk(ip, file_offset, bytes);
912 kprintf("hammer_ip_add_bulk: error %d conflict %p file_offset %lld bytes %d\n",
913 *errorp, conflict, file_offset, bytes);
914 if (conflict)
915 kprintf("conflict %lld %d\n", conflict->leaf.base.key, conflict->leaf.data_len);
916 if (conflict)
917 hammer_rel_mem_record(conflict);
918 }
cebe9493 919 KKASSERT(*errorp == 0);
4a2796f3
MD
920 conflict = hammer_ip_get_bulk(ip, file_offset, bytes);
921 if (conflict != record) {
922 kprintf("conflict mismatch %p %p %08x\n", conflict, record, record->flags);
923 if (conflict)
924 kprintf("conflict mismatch %lld/%d %lld/%d\n", conflict->leaf.base.key, conflict->leaf.data_len, record->leaf.base.key, record->leaf.data_len);
925 }
926 KKASSERT(conflict == record);
927 hammer_rel_mem_record(conflict);
0832c9bb 928
47637bff
MD
929 return (record);
930}
931
932/*
933 * Frontend truncation code. Scan in-memory records only. On-disk records
934 * and records in a flushing state are handled by the backend. The vnops
935 * setattr code will handle the block containing the truncation point.
936 *
937 * Partial blocks are not deleted.
938 */
47637bff
MD
939int
940hammer_ip_frontend_trunc(struct hammer_inode *ip, off_t file_size)
941{
942 struct rec_trunc_info info;
943
944 switch(ip->ino_data.obj_type) {
945 case HAMMER_OBJTYPE_REGFILE:
946 info.rec_type = HAMMER_RECTYPE_DATA;
947 break;
948 case HAMMER_OBJTYPE_DBFILE:
949 info.rec_type = HAMMER_RECTYPE_DB;
950 break;
951 default:
952 return(EINVAL);
953 }
954 info.trunc_off = file_size;
955 hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_trunc_cmp,
312de84d 956 hammer_frontend_trunc_callback, &info);
47637bff
MD
957 return(0);
958}
959
0832c9bb 960static int
312de84d 961hammer_frontend_trunc_callback(hammer_record_t record, void *data __unused)
0832c9bb
MD
962{
963 if (record->flags & HAMMER_RECF_DELETED_FE)
964 return(0);
965 if (record->flush_state == HAMMER_FST_FLUSH)
966 return(0);
967 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
968 hammer_ref(&record->lock);
969 record->flags |= HAMMER_RECF_DELETED_FE;
970 hammer_rel_mem_record(record);
971 return(0);
972}
973
cb51be26
MD
974/*
975 * Return 1 if the caller must check for and delete existing records
976 * before writing out a new data record.
977 *
978 * Return 0 if the caller can just insert the record into the B-Tree without
979 * checking.
980 */
981static int
982hammer_record_needs_overwrite_delete(hammer_record_t record)
983{
984 hammer_inode_t ip = record->ip;
985 int64_t file_offset;
986 int r;
987
988 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE)
989 file_offset = record->leaf.base.key;
990 else
991 file_offset = record->leaf.base.key - record->leaf.data_len;
a9d52b76 992 r = (file_offset < ip->save_trunc_off);
cb51be26 993 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
a9d52b76
MD
994 if (ip->save_trunc_off <= record->leaf.base.key)
995 ip->save_trunc_off = record->leaf.base.key + 1;
cb51be26 996 } else {
a9d52b76
MD
997 if (ip->save_trunc_off < record->leaf.base.key)
998 ip->save_trunc_off = record->leaf.base.key;
cb51be26
MD
999 }
1000 return(r);
1001}
1002
47637bff
MD
1003/*
1004 * Backend code. Sync a record to the media.
1005 */
4e17f465
MD
1006int
1007hammer_ip_sync_record_cursor(hammer_cursor_t cursor, hammer_record_t record)
1008{
1009 hammer_transaction_t trans = cursor->trans;
cebe9493 1010 int64_t file_offset;
4a2796f3 1011 int bytes;
40043e7f 1012 void *bdata;
c0ade690 1013 int error;
602c6cb8 1014 int doprop;
c0ade690 1015
1f07f686 1016 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
d36ec43b 1017 KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
2f85fa4d 1018 KKASSERT(record->leaf.base.localization != 0);
b33e2cc0 1019
47637bff
MD
1020 /*
1021 * If this is a bulk-data record placemarker there may be an existing
1022 * record on-disk, indicating a data overwrite. If there is the
1023 * on-disk record must be deleted before we can insert our new record.
1024 *
1025 * We've synthesized this record and do not know what the create_tid
1026 * on-disk is, nor how much data it represents.
1027 *
1028 * Keep in mind that (key) for data records is (base_offset + len),
1029 * not (base_offset). Also, we only want to get rid of on-disk
1030 * records since we are trying to sync our in-memory record, call
1031 * hammer_ip_delete_range() with truncating set to 1 to make sure
1032 * it skips in-memory records.
1033 *
1034 * It is ok for the lookup to return ENOENT.
cb51be26
MD
1035 *
1036 * NOTE OPTIMIZATION: sync_trunc_off is used to determine if we have
1037 * to call hammer_ip_delete_range() or not. This also means we must
1038 * update sync_trunc_off() as we write.
47637bff 1039 */
cb51be26
MD
1040 if (record->type == HAMMER_MEM_RECORD_DATA &&
1041 hammer_record_needs_overwrite_delete(record)) {
cebe9493 1042 file_offset = record->leaf.base.key - record->leaf.data_len;
4a2796f3
MD
1043 bytes = (record->leaf.data_len + HAMMER_BUFMASK) &
1044 ~HAMMER_BUFMASK;
cebe9493 1045 KKASSERT((file_offset & HAMMER_BUFMASK) == 0);
47637bff
MD
1046 error = hammer_ip_delete_range(
1047 cursor, record->ip,
4a2796f3 1048 file_offset, file_offset + bytes - 1,
cebe9493 1049 1);
47637bff
MD
1050 if (error && error != ENOENT)
1051 goto done;
1052 }
1053
5fa5c92f
MD
1054 /*
1055 * If this is a general record there may be an on-disk version
1056 * that must be deleted before we can insert the new record.
1057 */
1058 if (record->type == HAMMER_MEM_RECORD_GENERAL) {
1059 error = hammer_delete_general(cursor, record->ip,
1060 &record->leaf);
1061 if (error && error != ENOENT)
1062 goto done;
1063 }
1064
47637bff
MD
1065 /*
1066 * Setup the cursor.
1067 */
4e17f465 1068 hammer_normalize_cursor(cursor);
11ad5ade 1069 cursor->key_beg = record->leaf.base;
4e17f465
MD
1070 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1071 cursor->flags |= HAMMER_CURSOR_BACKEND;
1072 cursor->flags &= ~HAMMER_CURSOR_INSERT;
c0ade690 1073
98f7132d
MD
1074 /*
1075 * Records can wind up on-media before the inode itself is on-media.
1076 * Flag the case.
1077 */
1078 record->ip->flags |= HAMMER_INODE_DONDISK;
1079
c0ade690 1080 /*
47637bff
MD
1081 * If we are deleting a directory entry an exact match must be
1082 * found on-disk.
b84de5af 1083 */
1f07f686 1084 if (record->type == HAMMER_MEM_RECORD_DEL) {
4e17f465
MD
1085 error = hammer_btree_lookup(cursor);
1086 if (error == 0) {
02325004 1087 /* XXX iprec? */
e63644f0
MD
1088 error = hammer_ip_delete_record(cursor, record->ip,
1089 trans->tid);
4e17f465
MD
1090 if (error == 0) {
1091 record->flags |= HAMMER_RECF_DELETED_FE;
1092 record->flags |= HAMMER_RECF_DELETED_BE;
1b0ab2c3 1093 record->flags |= HAMMER_RECF_COMMITTED;
4e17f465
MD
1094 }
1095 }
b84de5af
MD
1096 goto done;
1097 }
1098
1099 /*
1100 * We are inserting.
1101 *
d26d0ae9 1102 * Issue a lookup to position the cursor and locate the cluster. The
b3deaf57
MD
1103 * target key should not exist. If we are creating a directory entry
1104 * we may have to iterate the low 32 bits of the key to find an unused
1105 * key.
c0ade690 1106 */
98da6d8c 1107 hammer_sync_lock_sh(trans);
4e17f465 1108 cursor->flags |= HAMMER_CURSOR_INSERT;
c82af904
MD
1109 error = hammer_btree_lookup(cursor);
1110 if (hammer_debug_inode)
1111 kprintf("DOINSERT LOOKUP %d\n", error);
1112 if (error == 0) {
1113 kprintf("hammer_ip_sync_record: duplicate rec "
1114 "at (%016llx)\n", record->leaf.base.key);
1115 Debugger("duplicate record1");
1116 error = EIO;
c0ade690 1117 }
47637bff
MD
1118#if 0
1119 if (record->type == HAMMER_MEM_RECORD_DATA)
1120 kprintf("sync_record %016llx ---------------- %016llx %d\n",
1121 record->leaf.base.key - record->leaf.data_len,
1122 record->leaf.data_offset, error);
1123#endif
47637bff 1124
c0ade690 1125 if (error != ENOENT)
98da6d8c 1126 goto done_unlock;
c0ade690
MD
1127
1128 /*
47197d71
MD
1129 * Allocate the record and data. The result buffers will be
1130 * marked as being modified and further calls to
1131 * hammer_modify_buffer() will result in unneeded UNDO records.
1132 *
40043e7f 1133 * Support zero-fill records (data == NULL and data_len != 0)
c0ade690 1134 */
47637bff
MD
1135 if (record->type == HAMMER_MEM_RECORD_DATA) {
1136 /*
1137 * The data portion of a bulk-data record has already been
1138 * committed to disk, we need only adjust the layer2
1139 * statistics in the same transaction as our B-Tree insert.
1140 */
1141 KKASSERT(record->leaf.data_offset != 0);
925c688a
MD
1142 error = hammer_blockmap_finalize(trans,
1143 record->leaf.data_offset,
1144 record->leaf.data_len);
47637bff
MD
1145 } else if (record->data && record->leaf.data_len) {
1146 /*
1147 * Wholely cached record, with data. Allocate the data.
1148 */
11ad5ade 1149 bdata = hammer_alloc_data(trans, record->leaf.data_len,
bf3b416b 1150 record->leaf.base.rec_type,
11ad5ade 1151 &record->leaf.data_offset,
4e17f465 1152 &cursor->data_buffer, &error);
11ad5ade 1153 if (bdata == NULL)
98da6d8c 1154 goto done_unlock;
ddfdf542 1155 hammer_crc_set_leaf(record->data, &record->leaf);
4e17f465 1156 hammer_modify_buffer(trans, cursor->data_buffer, NULL, 0);
11ad5ade 1157 bcopy(record->data, bdata, record->leaf.data_len);
4e17f465 1158 hammer_modify_buffer_done(cursor->data_buffer);
47197d71 1159 } else {
47637bff
MD
1160 /*
1161 * Wholely cached record, without data.
1162 */
11ad5ade
MD
1163 record->leaf.data_offset = 0;
1164 record->leaf.data_crc = 0;
c0ade690 1165 }
c0ade690 1166
1b0ab2c3
MD
1167 /*
1168 * If the record's data was direct-written we cannot insert
1169 * it until the direct-IO has completed.
1170 */
1171 if (record->flags & HAMMER_RECF_DIRECT_IO)
1172 hammer_io_direct_wait(record);
1173
602c6cb8 1174 error = hammer_btree_insert(cursor, &record->leaf, &doprop);
47637bff
MD
1175 if (hammer_debug_inode && error)
1176 kprintf("BTREE INSERT error %d @ %016llx:%d key %016llx\n", error, cursor->node->node_offset, cursor->index, record->leaf.base.key);
b3deaf57
MD
1177
1178 /*
47637bff
MD
1179 * Our record is on-disk, normally mark the in-memory version as
1180 * deleted. If the record represented a directory deletion but
1181 * we had to sync a valid directory entry to disk we must convert
1182 * the record to a covering delete so the frontend does not have
1183 * visibility on the synced entry.
b3deaf57 1184 */
4e17f465 1185 if (error == 0) {
602c6cb8 1186 if (doprop) {
4c038e17
MD
1187 hammer_btree_do_propagation(cursor,
1188 record->ip->pfsm,
602c6cb8
MD
1189 &record->leaf);
1190 }
4e17f465
MD
1191 if (record->flags & HAMMER_RECF_CONVERT_DELETE) {
1192 KKASSERT(record->type == HAMMER_MEM_RECORD_ADD);
1193 record->flags &= ~HAMMER_RECF_DELETED_FE;
1194 record->type = HAMMER_MEM_RECORD_DEL;
c4bae5fd 1195 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
4e17f465 1196 record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
c4bae5fd 1197 /* hammer_flush_record_done takes care of the rest */
4e17f465
MD
1198 } else {
1199 record->flags |= HAMMER_RECF_DELETED_FE;
1200 record->flags |= HAMMER_RECF_DELETED_BE;
1f07f686 1201 }
1b0ab2c3 1202 record->flags |= HAMMER_RECF_COMMITTED;
4e17f465 1203 } else {
11ad5ade
MD
1204 if (record->leaf.data_offset) {
1205 hammer_blockmap_free(trans, record->leaf.data_offset,
1206 record->leaf.data_len);
19619882 1207 }
4e17f465 1208 }
98da6d8c
MD
1209done_unlock:
1210 hammer_sync_unlock(trans);
47197d71 1211done:
c0ade690 1212 return(error);
8cd0a023 1213}
66325755
MD
1214
1215/*
a89aec1b
MD
1216 * Add the record to the inode's rec_tree. The low 32 bits of a directory
1217 * entry's key is used to deal with hash collisions in the upper 32 bits.
1218 * A unique 64 bit key is generated in-memory and may be regenerated a
1219 * second time when the directory record is flushed to the on-disk B-Tree.
d26d0ae9 1220 *
b3deaf57
MD
1221 * A referenced record is passed to this function. This function
1222 * eats the reference. If an error occurs the record will be deleted.
47197d71
MD
1223 *
1224 * A copy of the temporary record->data pointer provided by the caller
1225 * will be made.
66325755 1226 */
8cd0a023 1227static
66325755 1228int
47637bff 1229hammer_mem_add(hammer_record_t record)
66325755 1230{
47637bff
MD
1231 hammer_mount_t hmp = record->ip->hmp;
1232
47197d71
MD
1233 /*
1234 * Make a private copy of record->data
1235 */
11ad5ade
MD
1236 if (record->data)
1237 KKASSERT(record->flags & HAMMER_RECF_ALLOCDATA);
47197d71
MD
1238
1239 /*
c82af904
MD
1240 * Insert into the RB tree. A unique key should have already
1241 * been selected if this is a directory entry.
47197d71 1242 */
c82af904
MD
1243 if (RB_INSERT(hammer_rec_rb_tree, &record->ip->rec_tree, record)) {
1244 record->flags |= HAMMER_RECF_DELETED_FE;
1245 hammer_rel_mem_record(record);
1246 return (EEXIST);
8cd0a023 1247 }
4a2796f3 1248 ++hmp->count_newrecords;
47637bff
MD
1249 ++hmp->rsv_recs;
1250 ++record->ip->rsv_recs;
e63644f0 1251 record->ip->hmp->rsv_databytes += record->leaf.data_len;
8cd0a023 1252 record->flags |= HAMMER_RECF_ONRBTREE;
47637bff 1253 hammer_modify_inode(record->ip, HAMMER_INODE_XDIRTY);
b3deaf57 1254 hammer_rel_mem_record(record);
8cd0a023 1255 return(0);
66325755
MD
1256}
1257
a89aec1b
MD
1258/************************************************************************
1259 * HAMMER INODE MERGED-RECORD FUNCTIONS *
1260 ************************************************************************
1261 *
1262 * These functions augment the B-Tree scanning functions in hammer_btree.c
1263 * by merging in-memory records with on-disk records.
1264 */
1265
1266/*
1267 * Locate a particular record either in-memory or on-disk.
1268 *
1269 * NOTE: This is basically a standalone routine, hammer_ip_next() may
1270 * NOT be called to iterate results.
1271 */
1272int
45a014dc 1273hammer_ip_lookup(hammer_cursor_t cursor)
a89aec1b
MD
1274{
1275 int error;
1276
1277 /*
1278 * If the element is in-memory return it without searching the
1279 * on-disk B-Tree
1280 */
45a014dc
MD
1281 KKASSERT(cursor->ip);
1282 error = hammer_mem_lookup(cursor);
a89aec1b 1283 if (error == 0) {
11ad5ade 1284 cursor->leaf = &cursor->iprec->leaf;
a89aec1b
MD
1285 return(error);
1286 }
1287 if (error != ENOENT)
1288 return(error);
1289
1290 /*
1291 * If the inode has on-disk components search the on-disk B-Tree.
1292 */
45a014dc 1293 if ((cursor->ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) == 0)
a89aec1b
MD
1294 return(error);
1295 error = hammer_btree_lookup(cursor);
1296 if (error == 0)
11ad5ade 1297 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
a89aec1b
MD
1298 return(error);
1299}
1300
1301/*
1302 * Locate the first record within the cursor's key_beg/key_end range,
1303 * restricted to a particular inode. 0 is returned on success, ENOENT
1304 * if no records matched the requested range, or some other error.
1305 *
1306 * When 0 is returned hammer_ip_next() may be used to iterate additional
1307 * records within the requested range.
6a37e7e4
MD
1308 *
1309 * This function can return EDEADLK, requiring the caller to terminate
1310 * the cursor and try again.
a89aec1b
MD
1311 */
1312int
4e17f465 1313hammer_ip_first(hammer_cursor_t cursor)
a89aec1b 1314{
4e17f465 1315 hammer_inode_t ip = cursor->ip;
a89aec1b
MD
1316 int error;
1317
4e17f465
MD
1318 KKASSERT(ip != NULL);
1319
a89aec1b
MD
1320 /*
1321 * Clean up fields and setup for merged scan
1322 */
195c19a1 1323 cursor->flags &= ~HAMMER_CURSOR_DELBTREE;
6b4f890b 1324 cursor->flags |= HAMMER_CURSOR_ATEDISK | HAMMER_CURSOR_ATEMEM;
a89aec1b 1325 cursor->flags |= HAMMER_CURSOR_DISKEOF | HAMMER_CURSOR_MEMEOF;
b3deaf57
MD
1326 if (cursor->iprec) {
1327 hammer_rel_mem_record(cursor->iprec);
1328 cursor->iprec = NULL;
1329 }
a89aec1b
MD
1330
1331 /*
c0ade690
MD
1332 * Search the on-disk B-Tree. hammer_btree_lookup() only does an
1333 * exact lookup so if we get ENOENT we have to call the iterate
1334 * function to validate the first record after the begin key.
1335 *
1336 * The ATEDISK flag is used by hammer_btree_iterate to determine
d26d0ae9
MD
1337 * whether it must index forwards or not. It is also used here
1338 * to select the next record from in-memory or on-disk.
6a37e7e4
MD
1339 *
1340 * EDEADLK can only occur if the lookup hit an empty internal
1341 * element and couldn't delete it. Since this could only occur
1342 * in-range, we can just iterate from the failure point.
a89aec1b 1343 */
0a72edae 1344 if (ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) {
a89aec1b 1345 error = hammer_btree_lookup(cursor);
6a37e7e4 1346 if (error == ENOENT || error == EDEADLK) {
c0ade690 1347 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
855942b6
MD
1348 if (hammer_debug_general & 0x2000)
1349 kprintf("error %d node %p %016llx index %d\n", error, cursor->node, cursor->node->node_offset, cursor->index);
c0ade690
MD
1350 error = hammer_btree_iterate(cursor);
1351 }
1352 if (error && error != ENOENT)
a89aec1b 1353 return(error);
6b4f890b 1354 if (error == 0) {
c0ade690
MD
1355 cursor->flags &= ~HAMMER_CURSOR_DISKEOF;
1356 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1357 } else {
1358 cursor->flags |= HAMMER_CURSOR_ATEDISK;
6b4f890b 1359 }
a89aec1b
MD
1360 }
1361
1362 /*
c0ade690 1363 * Search the in-memory record list (Red-Black tree). Unlike the
7f7c1f84 1364 * B-Tree search, mem_first checks for records in the range.
a89aec1b 1365 */
4e17f465 1366 error = hammer_mem_first(cursor);
a89aec1b
MD
1367 if (error && error != ENOENT)
1368 return(error);
6b4f890b 1369 if (error == 0) {
a89aec1b 1370 cursor->flags &= ~HAMMER_CURSOR_MEMEOF;
6b4f890b 1371 cursor->flags &= ~HAMMER_CURSOR_ATEMEM;
b84de5af
MD
1372 if (hammer_ip_iterate_mem_good(cursor, cursor->iprec) == 0)
1373 cursor->flags |= HAMMER_CURSOR_ATEMEM;
6b4f890b 1374 }
a89aec1b
MD
1375
1376 /*
1377 * This will return the first matching record.
1378 */
1379 return(hammer_ip_next(cursor));
1380}
1381
1382/*
1383 * Retrieve the next record in a merged iteration within the bounds of the
1384 * cursor. This call may be made multiple times after the cursor has been
1385 * initially searched with hammer_ip_first().
1386 *
1387 * 0 is returned on success, ENOENT if no further records match the
1388 * requested range, or some other error code is returned.
1389 */
1390int
1391hammer_ip_next(hammer_cursor_t cursor)
1392{
1393 hammer_btree_elm_t elm;
b84de5af 1394 hammer_record_t rec, save;
a89aec1b
MD
1395 int error;
1396 int r;
1397
b84de5af 1398next_btree:
a89aec1b
MD
1399 /*
1400 * Load the current on-disk and in-memory record. If we ate any
1401 * records we have to get the next one.
1402 *
195c19a1
MD
1403 * If we deleted the last on-disk record we had scanned ATEDISK will
1404 * be clear and DELBTREE will be set, forcing a call to iterate. The
1405 * fact that ATEDISK is clear causes iterate to re-test the 'current'
1406 * element. If ATEDISK is set, iterate will skip the 'current'
1407 * element.
1408 *
a89aec1b
MD
1409 * Get the next on-disk record
1410 */
195c19a1 1411 if (cursor->flags & (HAMMER_CURSOR_ATEDISK|HAMMER_CURSOR_DELBTREE)) {
a89aec1b
MD
1412 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
1413 error = hammer_btree_iterate(cursor);
b3deaf57 1414 cursor->flags &= ~HAMMER_CURSOR_DELBTREE;
cb51be26 1415 if (error == 0) {
a89aec1b 1416 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
bcac4bbb
MD
1417 hammer_cache_node(&cursor->ip->cache[1],
1418 cursor->node);
cb51be26 1419 } else {
195c19a1
MD
1420 cursor->flags |= HAMMER_CURSOR_DISKEOF |
1421 HAMMER_CURSOR_ATEDISK;
cb51be26 1422 }
a89aec1b
MD
1423 }
1424 }
1425
b84de5af 1426next_memory:
a89aec1b 1427 /*
3f43fb33 1428 * Get the next in-memory record.
7f7c1f84
MD
1429 *
1430 * hammer_rec_scan_cmp: Is the record still in our general range,
1431 * (non-inclusive of snapshot exclusions)?
1432 * hammer_rec_scan_callback: Is the record in our snapshot?
a89aec1b
MD
1433 */
1434 if (cursor->flags & HAMMER_CURSOR_ATEMEM) {
1435 if ((cursor->flags & HAMMER_CURSOR_MEMEOF) == 0) {
b84de5af
MD
1436 save = cursor->iprec;
1437 cursor->iprec = NULL;
1438 rec = save ? hammer_rec_rb_tree_RB_NEXT(save) : NULL;
7f7c1f84 1439 while (rec) {
ec4e8497
MD
1440 if (hammer_rec_scan_cmp(rec, cursor) != 0)
1441 break;
1442 if (hammer_rec_scan_callback(rec, cursor) != 0)
1443 break;
7f7c1f84
MD
1444 rec = hammer_rec_rb_tree_RB_NEXT(rec);
1445 }
b84de5af
MD
1446 if (save)
1447 hammer_rel_mem_record(save);
7f7c1f84 1448 if (cursor->iprec) {
b3deaf57 1449 KKASSERT(cursor->iprec == rec);
a89aec1b 1450 cursor->flags &= ~HAMMER_CURSOR_ATEMEM;
a89aec1b
MD
1451 } else {
1452 cursor->flags |= HAMMER_CURSOR_MEMEOF;
1453 }
a89aec1b
MD
1454 }
1455 }
1456
4e17f465
MD
1457 /*
1458 * The memory record may have become stale while being held in
1459 * cursor->iprec. We are interlocked against the backend on
1460 * with regards to B-Tree entries.
1461 */
1462 if ((cursor->flags & HAMMER_CURSOR_ATEMEM) == 0) {
1463 if (hammer_ip_iterate_mem_good(cursor, cursor->iprec) == 0) {
1464 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1465 goto next_memory;
1466 }
1467 }
1468
a89aec1b
MD
1469 /*
1470 * Extract either the disk or memory record depending on their
1471 * relative position.
1472 */
1473 error = 0;
6b4f890b 1474 switch(cursor->flags & (HAMMER_CURSOR_ATEDISK | HAMMER_CURSOR_ATEMEM)) {
a89aec1b
MD
1475 case 0:
1476 /*
9f5097dc
MD
1477 * Both entries valid. Compare the entries and nominally
1478 * return the first one in the sort order. Numerous cases
1479 * require special attention, however.
a89aec1b
MD
1480 */
1481 elm = &cursor->node->ondisk->elms[cursor->index];
11ad5ade 1482 r = hammer_btree_cmp(&elm->base, &cursor->iprec->leaf.base);
47637bff
MD
1483
1484 /*
9f5097dc
MD
1485 * If the two entries differ only by their key (-2/2) or
1486 * create_tid (-1/1), and are DATA records, we may have a
1487 * nominal match. We have to calculate the base file
1488 * offset of the data.
47637bff 1489 */
9f5097dc
MD
1490 if (r <= 2 && r >= -2 && r != 0 &&
1491 cursor->ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE &&
1492 cursor->iprec->type == HAMMER_MEM_RECORD_DATA) {
1493 int64_t base1 = elm->leaf.base.key - elm->leaf.data_len;
1494 int64_t base2 = cursor->iprec->leaf.base.key -
1495 cursor->iprec->leaf.data_len;
bf3b416b 1496 if (base1 == base2)
9f5097dc 1497 r = 0;
9f5097dc
MD
1498 }
1499
a89aec1b
MD
1500 if (r < 0) {
1501 error = hammer_btree_extract(cursor,
11ad5ade 1502 HAMMER_CURSOR_GET_LEAF);
a89aec1b
MD
1503 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1504 break;
1505 }
b84de5af
MD
1506
1507 /*
47637bff
MD
1508 * If the entries match exactly the memory entry is either
1509 * an on-disk directory entry deletion or a bulk data
1510 * overwrite. If it is a directory entry deletion we eat
1511 * both entries.
1512 *
1513 * For the bulk-data overwrite case it is possible to have
1514 * visibility into both, which simply means the syncer
1515 * hasn't gotten around to doing the delete+insert sequence
1516 * on the B-Tree. Use the memory entry and throw away the
1517 * on-disk entry.
1f07f686 1518 *
47637bff 1519 * If the in-memory record is not either of these we
1f07f686
MD
1520 * probably caught the syncer while it was syncing it to
1521 * the media. Since we hold a shared lock on the cursor,
1522 * the in-memory record had better be marked deleted at
1523 * this point.
b84de5af
MD
1524 */
1525 if (r == 0) {
1f07f686
MD
1526 if (cursor->iprec->type == HAMMER_MEM_RECORD_DEL) {
1527 if ((cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1528 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1529 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1530 goto next_btree;
1531 }
47637bff
MD
1532 } else if (cursor->iprec->type == HAMMER_MEM_RECORD_DATA) {
1533 if ((cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1534 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1535 }
1536 /* fall through to memory entry */
1f07f686 1537 } else {
3f43fb33 1538 panic("hammer_ip_next: duplicate mem/b-tree entry %p %d %08x", cursor->iprec, cursor->iprec->type, cursor->iprec->flags);
b84de5af 1539 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1f07f686 1540 goto next_memory;
b84de5af
MD
1541 }
1542 }
a89aec1b 1543 /* fall through to the memory entry */
6b4f890b 1544 case HAMMER_CURSOR_ATEDISK:
a89aec1b 1545 /*
4e17f465 1546 * Only the memory entry is valid.
a89aec1b 1547 */
11ad5ade 1548 cursor->leaf = &cursor->iprec->leaf;
a89aec1b 1549 cursor->flags |= HAMMER_CURSOR_ATEMEM;
4e17f465
MD
1550
1551 /*
1552 * If the memory entry is an on-disk deletion we should have
1553 * also had found a B-Tree record. If the backend beat us
1554 * to it it would have interlocked the cursor and we should
1555 * have seen the in-memory record marked DELETED_FE.
1556 */
98f7132d
MD
1557 if (cursor->iprec->type == HAMMER_MEM_RECORD_DEL &&
1558 (cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
3f43fb33 1559 panic("hammer_ip_next: del-on-disk with no b-tree entry iprec %p flags %08x", cursor->iprec, cursor->iprec->flags);
b84de5af 1560 }
a89aec1b 1561 break;
6b4f890b 1562 case HAMMER_CURSOR_ATEMEM:
a89aec1b
MD
1563 /*
1564 * Only the disk entry is valid
1565 */
11ad5ade 1566 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
a89aec1b
MD
1567 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1568 break;
1569 default:
1570 /*
1571 * Neither entry is valid
1572 *
1573 * XXX error not set properly
1574 */
11ad5ade 1575 cursor->leaf = NULL;
a89aec1b
MD
1576 error = ENOENT;
1577 break;
1578 }
1579 return(error);
1580}
1581
1582/*
40043e7f 1583 * Resolve the cursor->data pointer for the current cursor position in
a89aec1b
MD
1584 * a merged iteration.
1585 */
1586int
1587hammer_ip_resolve_data(hammer_cursor_t cursor)
1588{
47637bff 1589 hammer_record_t record;
a89aec1b
MD
1590 int error;
1591
47637bff
MD
1592 if (hammer_cursor_inmem(cursor)) {
1593 /*
1594 * The data associated with an in-memory record is usually
1595 * kmalloced, but reserve-ahead data records will have an
1596 * on-disk reference.
1597 *
1598 * NOTE: Reserve-ahead data records must be handled in the
1599 * context of the related high level buffer cache buffer
1600 * to interlock against async writes.
1601 */
1602 record = cursor->iprec;
1603 cursor->data = record->data;
a89aec1b 1604 error = 0;
47637bff
MD
1605 if (cursor->data == NULL) {
1606 KKASSERT(record->leaf.base.rec_type ==
1607 HAMMER_RECTYPE_DATA);
4a2796f3 1608 cursor->data = hammer_bread_ext(cursor->trans->hmp,
47637bff 1609 record->leaf.data_offset,
4a2796f3 1610 record->leaf.data_len,
47637bff
MD
1611 &error,
1612 &cursor->data_buffer);
1613 }
a89aec1b 1614 } else {
11ad5ade 1615 cursor->leaf = &cursor->node->ondisk->elms[cursor->index].leaf;
a89aec1b
MD
1616 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA);
1617 }
1618 return(error);
1619}
1620
66325755 1621/*
47637bff
MD
1622 * Backend truncation / record replacement - delete records in range.
1623 *
1624 * Delete all records within the specified range for inode ip. In-memory
06ad81ff
MD
1625 * records still associated with the frontend are ignored.
1626 *
1627 * If truncating is non-zero in-memory records associated with the back-end
1628 * are ignored. If truncating is > 1 we can return EWOULDBLOCK.
a89aec1b 1629 *
a9d52b76 1630 * NOTES:
a89aec1b 1631 *
a9d52b76
MD
1632 * * An unaligned range will cause new records to be added to cover
1633 * the edge cases. (XXX not implemented yet).
47637bff 1634 *
a9d52b76
MD
1635 * * Replacement via reservations (see hammer_ip_sync_record_cursor())
1636 * also do not deal with unaligned ranges.
76376933 1637 *
a9d52b76
MD
1638 * * ran_end is inclusive (e.g. 0,1023 instead of 0,1024).
1639 *
1640 * * Record keys for regular file data have to be special-cased since
1641 * they indicate the end of the range (key = base + bytes).
1642 *
1643 * * This function may be asked to delete ridiculously huge ranges, for
1644 * example if someone truncates or removes a 1TB regular file. We
1645 * must be very careful on restarts and we may have to stop w/
1646 * EWOULDBLOCK to avoid blowing out the buffer cache.
66325755
MD
1647 */
1648int
4e17f465 1649hammer_ip_delete_range(hammer_cursor_t cursor, hammer_inode_t ip,
47637bff 1650 int64_t ran_beg, int64_t ran_end, int truncating)
66325755 1651{
4e17f465 1652 hammer_transaction_t trans = cursor->trans;
11ad5ade 1653 hammer_btree_leaf_elm_t leaf;
a89aec1b
MD
1654 int error;
1655 int64_t off;
a9d52b76 1656 int64_t tmp64;
a89aec1b 1657
b84de5af
MD
1658#if 0
1659 kprintf("delete_range %p %016llx-%016llx\n", ip, ran_beg, ran_end);
1660#endif
1661
1662 KKASSERT(trans->type == HAMMER_TRANS_FLS);
6a37e7e4 1663retry:
4e17f465 1664 hammer_normalize_cursor(cursor);
5a930e66
MD
1665 cursor->key_beg.localization = ip->obj_localization +
1666 HAMMER_LOCALIZE_MISC;
4e17f465
MD
1667 cursor->key_beg.obj_id = ip->obj_id;
1668 cursor->key_beg.create_tid = 0;
1669 cursor->key_beg.delete_tid = 0;
1670 cursor->key_beg.obj_type = 0;
4e17f465 1671
11ad5ade 1672 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
4e17f465
MD
1673 cursor->key_beg.key = ran_beg;
1674 cursor->key_beg.rec_type = HAMMER_RECTYPE_DB;
a89aec1b 1675 } else {
195c19a1
MD
1676 /*
1677 * The key in the B-Tree is (base+bytes), so the first possible
1678 * matching key is ran_beg + 1.
1679 */
4e17f465
MD
1680 cursor->key_beg.key = ran_beg + 1;
1681 cursor->key_beg.rec_type = HAMMER_RECTYPE_DATA;
a9d52b76 1682 }
195c19a1 1683
a9d52b76
MD
1684 cursor->key_end = cursor->key_beg;
1685 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1686 cursor->key_end.key = ran_end;
1687 } else {
195c19a1
MD
1688 tmp64 = ran_end + MAXPHYS + 1; /* work around GCC-4 bug */
1689 if (tmp64 < ran_end)
4e17f465 1690 cursor->key_end.key = 0x7FFFFFFFFFFFFFFFLL;
a89aec1b 1691 else
4e17f465 1692 cursor->key_end.key = ran_end + MAXPHYS + 1;
a89aec1b 1693 }
a9d52b76
MD
1694
1695 cursor->asof = ip->obj_asof;
1696 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1697 cursor->flags |= HAMMER_CURSOR_ASOF;
1698 cursor->flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
1699 cursor->flags |= HAMMER_CURSOR_BACKEND;
4e17f465 1700 cursor->flags |= HAMMER_CURSOR_END_INCLUSIVE;
a89aec1b 1701
4e17f465 1702 error = hammer_ip_first(cursor);
a89aec1b
MD
1703
1704 /*
1705 * Iterate through matching records and mark them as deleted.
1706 */
1707 while (error == 0) {
11ad5ade 1708 leaf = cursor->leaf;
a89aec1b 1709
11ad5ade 1710 KKASSERT(leaf->base.delete_tid == 0);
e4a5ff06 1711 KKASSERT(leaf->base.obj_id == ip->obj_id);
a89aec1b
MD
1712
1713 /*
1714 * There may be overlap cases for regular file data. Also
47637bff
MD
1715 * remember the key for a regular file record is (base + len),
1716 * NOT (base).
e4a5ff06
MD
1717 *
1718 * Note that do to duplicates (mem & media) allowed by
1719 * DELETE_VISIBILITY, off can wind up less then ran_beg.
a89aec1b 1720 */
11ad5ade 1721 if (leaf->base.rec_type == HAMMER_RECTYPE_DATA) {
11ad5ade 1722 off = leaf->base.key - leaf->data_len;
a89aec1b 1723 /*
76376933
MD
1724 * Check the left edge case. We currently do not
1725 * split existing records.
a89aec1b 1726 */
e4a5ff06 1727 if (off < ran_beg && leaf->base.key > ran_beg) {
76376933 1728 panic("hammer left edge case %016llx %d\n",
11ad5ade 1729 leaf->base.key, leaf->data_len);
a89aec1b
MD
1730 }
1731
1732 /*
1733 * Check the right edge case. Note that the
1734 * record can be completely out of bounds, which
1735 * terminates the search.
1736 *
76376933
MD
1737 * base->key is exclusive of the right edge while
1738 * ran_end is inclusive of the right edge. The
1739 * (key - data_len) left boundary is inclusive.
195c19a1
MD
1740 *
1741 * XXX theory-check this test at some point, are
1742 * we missing a + 1 somewhere? Note that ran_end
1743 * could overflow.
a89aec1b 1744 */
11ad5ade
MD
1745 if (leaf->base.key - 1 > ran_end) {
1746 if (leaf->base.key - leaf->data_len > ran_end)
a89aec1b 1747 break;
a89aec1b
MD
1748 panic("hammer right edge case\n");
1749 }
a9d52b76
MD
1750 } else {
1751 off = leaf->base.key;
a89aec1b
MD
1752 }
1753
1754 /*
47637bff
MD
1755 * Delete the record. When truncating we do not delete
1756 * in-memory (data) records because they represent data
1757 * written after the truncation.
1758 *
1759 * This will also physically destroy the B-Tree entry and
195c19a1
MD
1760 * data if the retention policy dictates. The function
1761 * will set HAMMER_CURSOR_DELBTREE which hammer_ip_next()
1762 * uses to perform a fixup.
a89aec1b 1763 */
06ad81ff 1764 if (truncating == 0 || hammer_cursor_ondisk(cursor)) {
47637bff 1765 error = hammer_ip_delete_record(cursor, ip, trans->tid);
a9d52b76
MD
1766 /*
1767 * If we have built up too many meta-buffers we risk
1768 * deadlocking the kernel and must stop. This can
1769 * occur when deleting ridiculously huge files.
1770 * sync_trunc_off is updated so the next cycle does
1771 * not re-iterate records we have already deleted.
1772 *
1773 * This is only done with formal truncations.
1774 */
06ad81ff
MD
1775 if (truncating > 1 && error == 0 &&
1776 hammer_flusher_meta_limit(ip->hmp)) {
a9d52b76 1777 ip->sync_trunc_off = off;
06ad81ff
MD
1778 error = EWOULDBLOCK;
1779 }
1780 }
195c19a1
MD
1781 if (error)
1782 break;
a9d52b76 1783 ran_beg = off; /* for restart */
4e17f465
MD
1784 error = hammer_ip_next(cursor);
1785 }
cb51be26 1786 if (cursor->node)
bcac4bbb 1787 hammer_cache_node(&ip->cache[1], cursor->node);
cb51be26 1788
4e17f465
MD
1789 if (error == EDEADLK) {
1790 hammer_done_cursor(cursor);
bcac4bbb 1791 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
4e17f465
MD
1792 if (error == 0)
1793 goto retry;
a89aec1b 1794 }
a89aec1b
MD
1795 if (error == ENOENT)
1796 error = 0;
1797 return(error);
66325755
MD
1798}
1799
5fa5c92f
MD
1800/*
1801 * This backend function deletes the specified record on-disk, similar to
1802 * delete_range but for a specific record. Unlike the exact deletions
1803 * used when deleting a directory entry this function uses an ASOF search
1804 * like delete_range.
1805 *
1806 * This function may be called with ip->obj_asof set for a slave snapshot,
1807 * so don't use it. We always delete non-historical records only.
1808 */
1809static int
1810hammer_delete_general(hammer_cursor_t cursor, hammer_inode_t ip,
1811 hammer_btree_leaf_elm_t leaf)
1812{
1813 hammer_transaction_t trans = cursor->trans;
1814 int error;
1815
1816 KKASSERT(trans->type == HAMMER_TRANS_FLS);
1817retry:
1818 hammer_normalize_cursor(cursor);
1819 cursor->key_beg = leaf->base;
1820 cursor->asof = HAMMER_MAX_TID;
1821 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1822 cursor->flags |= HAMMER_CURSOR_ASOF;
1823 cursor->flags |= HAMMER_CURSOR_BACKEND;
1824 cursor->flags &= ~HAMMER_CURSOR_INSERT;
1825
1826 error = hammer_btree_lookup(cursor);
1827 if (error == 0) {
1828 error = hammer_ip_delete_record(cursor, ip, trans->tid);
1829 }
1830 if (error == EDEADLK) {
1831 hammer_done_cursor(cursor);
1832 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
1833 if (error == 0)
1834 goto retry;
1835 }
1836 return(error);
1837}
1838
b3deaf57 1839/*
a9d52b76
MD
1840 * This function deletes remaining auxillary records when an inode is
1841 * being deleted. This function explicitly does not delete the
1842 * inode record, directory entry, data, or db records. Those must be
1843 * properly disposed of prior to this call.
b3deaf57 1844 */
7a04d74f 1845int
a9d52b76 1846hammer_ip_delete_clean(hammer_cursor_t cursor, hammer_inode_t ip, int *countp)
7a04d74f 1847{
4e17f465 1848 hammer_transaction_t trans = cursor->trans;
11ad5ade 1849 hammer_btree_leaf_elm_t leaf;
7a04d74f
MD
1850 int error;
1851
b84de5af 1852 KKASSERT(trans->type == HAMMER_TRANS_FLS);
6a37e7e4 1853retry:
4e17f465 1854 hammer_normalize_cursor(cursor);
5a930e66
MD
1855 cursor->key_beg.localization = ip->obj_localization +
1856 HAMMER_LOCALIZE_MISC;
4e17f465
MD
1857 cursor->key_beg.obj_id = ip->obj_id;
1858 cursor->key_beg.create_tid = 0;
1859 cursor->key_beg.delete_tid = 0;
1860 cursor->key_beg.obj_type = 0;
a9d52b76 1861 cursor->key_beg.rec_type = HAMMER_RECTYPE_CLEAN_START;
4e17f465
MD
1862 cursor->key_beg.key = HAMMER_MIN_KEY;
1863
1864 cursor->key_end = cursor->key_beg;
a9d52b76 1865 cursor->key_end.rec_type = HAMMER_RECTYPE_MAX;
4e17f465
MD
1866 cursor->key_end.key = HAMMER_MAX_KEY;
1867
1868 cursor->asof = ip->obj_asof;
1869 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1870 cursor->flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1871 cursor->flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
1872 cursor->flags |= HAMMER_CURSOR_BACKEND;
1873
1874 error = hammer_ip_first(cursor);
7a04d74f
MD
1875
1876 /*
1877 * Iterate through matching records and mark them as deleted.
1878 */
1879 while (error == 0) {
11ad5ade 1880 leaf = cursor->leaf;
7a04d74f 1881
11ad5ade 1882 KKASSERT(leaf->base.delete_tid == 0);
7a04d74f
MD
1883
1884 /*
1885 * Mark the record and B-Tree entry as deleted. This will
1886 * also physically delete the B-Tree entry, record, and
1887 * data if the retention policy dictates. The function
1888 * will set HAMMER_CURSOR_DELBTREE which hammer_ip_next()
1889 * uses to perform a fixup.
1f07f686
MD
1890 *
1891 * Directory entries (and delete-on-disk directory entries)
1892 * must be synced and cannot be deleted.
7a04d74f 1893 */
a9d52b76
MD
1894 error = hammer_ip_delete_record(cursor, ip, trans->tid);
1895 ++*countp;
7a04d74f
MD
1896 if (error)
1897 break;
4e17f465
MD
1898 error = hammer_ip_next(cursor);
1899 }
cb51be26 1900 if (cursor->node)
bcac4bbb 1901 hammer_cache_node(&ip->cache[1], cursor->node);
4e17f465
MD
1902 if (error == EDEADLK) {
1903 hammer_done_cursor(cursor);
bcac4bbb 1904 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
4e17f465
MD
1905 if (error == 0)
1906 goto retry;
7a04d74f 1907 }
7a04d74f
MD
1908 if (error == ENOENT)
1909 error = 0;
1910 return(error);
1911}
1912
195c19a1 1913/*
46fe7ae1
MD
1914 * Delete the record at the current cursor. On success the cursor will
1915 * be positioned appropriately for an iteration but may no longer be at
1916 * a leaf node.
6a37e7e4 1917 *
b84de5af
MD
1918 * This routine is only called from the backend.
1919 *
6a37e7e4
MD
1920 * NOTE: This can return EDEADLK, requiring the caller to terminate the
1921 * cursor and retry.
195c19a1
MD
1922 */
1923int
e63644f0
MD
1924hammer_ip_delete_record(hammer_cursor_t cursor, hammer_inode_t ip,
1925 hammer_tid_t tid)
195c19a1 1926{
4a2796f3 1927 hammer_record_t iprec;
195c19a1
MD
1928 hammer_btree_elm_t elm;
1929 hammer_mount_t hmp;
1930 int error;
1931
d36ec43b 1932 KKASSERT(cursor->flags & HAMMER_CURSOR_BACKEND);
cebe9493 1933 KKASSERT(tid != 0);
4a2796f3 1934 hmp = cursor->node->hmp;
d36ec43b 1935
195c19a1 1936 /*
d36ec43b
MD
1937 * In-memory (unsynchronized) records can simply be freed. This
1938 * only occurs in range iterations since all other records are
1939 * individually synchronized. Thus there should be no confusion with
1940 * the interlock.
4a2796f3
MD
1941 *
1942 * An in-memory record may be deleted before being committed to disk,
1b0ab2c3
MD
1943 * but could have been accessed in the mean time. The reservation
1944 * code will deal with the case.
195c19a1 1945 */
47637bff 1946 if (hammer_cursor_inmem(cursor)) {
4a2796f3
MD
1947 iprec = cursor->iprec;
1948 KKASSERT((iprec->flags & HAMMER_RECF_INTERLOCK_BE) ==0);
1949 iprec->flags |= HAMMER_RECF_DELETED_FE;
1950 iprec->flags |= HAMMER_RECF_DELETED_BE;
195c19a1
MD
1951 return(0);
1952 }
1953
1954 /*
1955 * On-disk records are marked as deleted by updating their delete_tid.
9582c7da
MD
1956 * This does not effect their position in the B-Tree (which is based
1957 * on their create_tid).
842e7a70
MD
1958 *
1959 * Frontend B-Tree operations track inodes so we tell
1960 * hammer_delete_at_cursor() not to.
195c19a1 1961 */
11ad5ade 1962 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
195c19a1 1963 elm = NULL;
195c19a1
MD
1964
1965 if (error == 0) {
602c6cb8
MD
1966 error = hammer_delete_at_cursor(
1967 cursor,
1968 HAMMER_DELETE_ADJUST | hammer_nohistory(ip),
842e7a70
MD
1969 cursor->trans->tid,
1970 cursor->trans->time32,
1971 0, NULL);
195c19a1
MD
1972 }
1973 return(error);
1974}
1975
602c6cb8
MD
1976/*
1977 * Delete the B-Tree element at the current cursor and do any necessary
1978 * mirror propagation.
1979 *
1980 * The cursor must be properly positioned for an iteration on return but
1981 * may be pointing at an internal element.
842e7a70
MD
1982 *
1983 * An element can be un-deleted by passing a delete_tid of 0 with
1984 * HAMMER_DELETE_ADJUST.
602c6cb8 1985 */
7dc57964 1986int
602c6cb8 1987hammer_delete_at_cursor(hammer_cursor_t cursor, int delete_flags,
842e7a70
MD
1988 hammer_tid_t delete_tid, u_int32_t delete_ts,
1989 int track, int64_t *stat_bytes)
7dc57964 1990{
602c6cb8 1991 struct hammer_btree_leaf_elm save_leaf;
842e7a70 1992 hammer_transaction_t trans;
602c6cb8
MD
1993 hammer_btree_leaf_elm_t leaf;
1994 hammer_node_t node;
7dc57964 1995 hammer_btree_elm_t elm;
47197d71 1996 hammer_off_t data_offset;
7dc57964 1997 int32_t data_len;
bf686dbe 1998 u_int16_t rec_type;
7dc57964 1999 int error;
842e7a70 2000 int icount;
602c6cb8 2001 int doprop;
7dc57964 2002
602c6cb8
MD
2003 error = hammer_cursor_upgrade(cursor);
2004 if (error)
2005 return(error);
2006
842e7a70 2007 trans = cursor->trans;
602c6cb8
MD
2008 node = cursor->node;
2009 elm = &node->ondisk->elms[cursor->index];
2010 leaf = &elm->leaf;
7dc57964
MD
2011 KKASSERT(elm->base.btype == HAMMER_BTREE_TYPE_RECORD);
2012
842e7a70
MD
2013 hammer_sync_lock_sh(trans);
2014 doprop = 0;
2015 icount = 0;
2016
602c6cb8
MD
2017 /*
2018 * Adjust the delete_tid. Update the mirror_tid propagation field
02325004 2019 * as well. delete_tid can be 0 (undelete -- used by mirroring).
602c6cb8 2020 */
602c6cb8 2021 if (delete_flags & HAMMER_DELETE_ADJUST) {
842e7a70
MD
2022 if (elm->base.rec_type == HAMMER_RECTYPE_INODE) {
2023 if (elm->leaf.base.delete_tid == 0 && delete_tid)
2024 icount = -1;
2025 if (elm->leaf.base.delete_tid && delete_tid == 0)
2026 icount = 1;
2027 }
2028
2029 hammer_modify_node(trans, node, elm, sizeof(*elm));
2030 elm->leaf.base.delete_tid = delete_tid;
2031 elm->leaf.delete_ts = delete_ts;
602c6cb8
MD
2032 hammer_modify_node_done(node);
2033
2034 if (elm->leaf.base.delete_tid > node->ondisk->mirror_tid) {
842e7a70 2035 hammer_modify_node_field(trans, node, mirror_tid);
602c6cb8
MD
2036 node->ondisk->mirror_tid = elm->leaf.base.delete_tid;
2037 hammer_modify_node_done(node);
2038 doprop = 1;
02325004
MD
2039 if (hammer_debug_general & 0x0002) {
2040 kprintf("delete_at_cursor: propagate %016llx"
2041 " @%016llx\n",
2042 elm->leaf.base.delete_tid,
2043 node->node_offset);
2044 }
602c6cb8 2045 }
7dc57964 2046
7dc57964 2047 /*
602c6cb8
MD
2048 * Adjust for the iteration. We have deleted the current
2049 * element and want to clear ATEDISK so the iteration does
2050 * not skip the element after, which now becomes the current
2051 * element.
7dc57964
MD
2052 */
2053 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
2054 cursor->flags |= HAMMER_CURSOR_DELBTREE;
2055 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
2056 }
602c6cb8
MD
2057
2058 /*
2059 * An on-disk record cannot have the same delete_tid
2060 * as its create_tid. In a chain of record updates
2061 * this could result in a duplicate record.
2062 */
2063 KKASSERT(elm->leaf.base.delete_tid !=
2064 elm->leaf.base.create_tid);
40043e7f 2065 }
602c6cb8
MD
2066
2067 /*
2068 * Destroy the B-Tree element if asked (typically if a nohistory
2069 * file or mount, or when called by the pruning code).
2070 *
2071 * Adjust the ATEDISK flag to properly support iterations.
2072 */
2073 if (delete_flags & HAMMER_DELETE_DESTROY) {
2074 data_offset = elm->leaf.data_offset;
2075 data_len = elm->leaf.data_len;
2076 rec_type = elm->leaf.base.rec_type;
2077 if (doprop) {
2078 save_leaf = elm->leaf;
2079 leaf = &save_leaf;
eb3f8f1f 2080 }
842e7a70
MD
2081 if (elm->base.rec_type == HAMMER_RECTYPE_INODE &&
2082 elm->leaf.base.delete_tid == 0) {
2083 icount = -1;
2084 }
602c6cb8
MD
2085
2086 error = hammer_btree_delete(cursor);
2087 if (error == 0) {
2088 /*
2089 * This forces a fixup for the iteration because
2090 * the cursor is now either sitting at the 'next'
2091 * element or sitting at the end of a leaf.
2092 */
2093 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
2094 cursor->flags |= HAMMER_CURSOR_DELBTREE;
2095 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
2096 }
2097 }
2098 if (error == 0) {
2099 switch(data_offset & HAMMER_OFF_ZONE_MASK) {
2100 case HAMMER_ZONE_LARGE_DATA:
2101 case HAMMER_ZONE_SMALL_DATA:
2102 case HAMMER_ZONE_META:
842e7a70 2103 hammer_blockmap_free(trans,
602c6cb8
MD
2104 data_offset, data_len);
2105 break;
2106 default:
2107 break;
2108 }
2109 }
2110 }
2111
842e7a70
MD
2112 /*
2113 * Track inode count and next_tid. This is used by the mirroring
2114 * and PFS code. icount can be negative, zero, or positive.
2115 */
2116 if (error == 0 && track) {
2117 if (icount) {
2118 hammer_modify_volume_field(trans, trans->rootvol,
2119 vol0_stat_inodes);
2120 trans->rootvol->ondisk->vol0_stat_inodes += icount;
2121 hammer_modify_volume_done(trans->rootvol);
2122 }
2123 if (trans->rootvol->ondisk->vol0_next_tid < delete_tid) {
2124 hammer_modify_volume(trans, trans->rootvol, NULL, 0);
2125 trans->rootvol->ondisk->vol0_next_tid = delete_tid;
2126 hammer_modify_volume_done(trans->rootvol);
2127 }
2128 }
2129
602c6cb8
MD
2130 /*
2131 * mirror_tid propagation occurs if the node's mirror_tid had to be
2132 * updated while adjusting the delete_tid.
2133 *
2134 * This occurs when deleting even in nohistory mode, but does not
2135 * occur when pruning an already-deleted node.
842e7a70
MD
2136 *
2137 * cursor->ip is NULL when called from the pruning, mirroring,
2138 * and pfs code. If non-NULL propagation will be conditionalized
2139 * on whether the PFS is in no-history mode or not.
602c6cb8
MD
2140 */
2141 if (doprop) {
842e7a70
MD
2142 if (cursor->ip)
2143 hammer_btree_do_propagation(cursor, cursor->ip->pfsm, leaf);
2144 else
2145 hammer_btree_do_propagation(cursor, NULL, leaf);
7dc57964 2146 }
842e7a70 2147 hammer_sync_unlock(trans);
7dc57964
MD
2148 return (error);
2149}
2150
b3deaf57 2151/*
1f07f686
MD
2152 * Determine whether we can remove a directory. This routine checks whether
2153 * a directory is empty or not and enforces flush connectivity.
2154 *
2155 * Flush connectivity requires that we block if the target directory is
2156 * currently flushing, otherwise it may not end up in the same flush group.
2157 *
2158 * Returns 0 on success, ENOTEMPTY or EDEADLK (or other errors) on failure.
b3deaf57
MD
2159 */
2160int
98f7132d 2161hammer_ip_check_directory_empty(hammer_transaction_t trans, hammer_inode_t ip)
b3deaf57
MD
2162{
2163 struct hammer_cursor cursor;
2164 int error;
2165
1f07f686
MD
2166 /*
2167 * Check directory empty
2168 */
bcac4bbb 2169 hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
b3deaf57 2170
5a930e66
MD
2171 cursor.key_beg.localization = ip->obj_localization +
2172 HAMMER_LOCALIZE_MISC;
b3deaf57 2173 cursor.key_beg.obj_id = ip->obj_id;
d5530d22 2174 cursor.key_beg.create_tid = 0;
b3deaf57
MD
2175 cursor.key_beg.delete_tid = 0;
2176 cursor.key_beg.obj_type = 0;
2177 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE + 1;
2178 cursor.key_beg.key = HAMMER_MIN_KEY;
2179
2180 cursor.key_end = cursor.key_beg;
2181 cursor.key_end.rec_type = 0xFFFF;
2182 cursor.key_end.key = HAMMER_MAX_KEY;
2183
d5530d22
MD
2184 cursor.asof = ip->obj_asof;
2185 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
b3deaf57 2186
4e17f465 2187 error = hammer_ip_first(&cursor);
b3deaf57
MD
2188 if (error == ENOENT)
2189 error = 0;
2190 else if (error == 0)
2191 error = ENOTEMPTY;
2192 hammer_done_cursor(&cursor);
2193 return(error);
2194}
2195