HAMMER - Fix long stalls when writing out core files
[dragonfly.git] / sys / vfs / hammer / hammer_object.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * $DragonFly: src/sys/vfs/hammer/hammer_object.c,v 1.97 2008/09/23 22:28:56 dillon Exp $
35 */
36
37#include "hammer.h"
38
39static int hammer_mem_lookup(hammer_cursor_t cursor);
40static void hammer_mem_first(hammer_cursor_t cursor);
41static int hammer_frontend_trunc_callback(hammer_record_t record,
42 void *data __unused);
43static int hammer_bulk_scan_callback(hammer_record_t record, void *data);
44static int hammer_record_needs_overwrite_delete(hammer_record_t record);
45static int hammer_delete_general(hammer_cursor_t cursor, hammer_inode_t ip,
46 hammer_btree_leaf_elm_t leaf);
47static int hammer_cursor_localize_data(hammer_data_ondisk_t data,
48 hammer_btree_leaf_elm_t leaf);
49
50struct rec_trunc_info {
51 u_int16_t rec_type;
52 int64_t trunc_off;
53};
54
55struct hammer_bulk_info {
56 hammer_record_t record;
57 hammer_record_t conflict;
58};
59
60/*
61 * Red-black tree support. Comparison code for insertion.
62 */
63static int
64hammer_rec_rb_compare(hammer_record_t rec1, hammer_record_t rec2)
65{
66 if (rec1->leaf.base.rec_type < rec2->leaf.base.rec_type)
67 return(-1);
68 if (rec1->leaf.base.rec_type > rec2->leaf.base.rec_type)
69 return(1);
70
71 if (rec1->leaf.base.key < rec2->leaf.base.key)
72 return(-1);
73 if (rec1->leaf.base.key > rec2->leaf.base.key)
74 return(1);
75
76 /*
77 * For search & insertion purposes records deleted by the
78 * frontend or deleted/committed by the backend are silently
79 * ignored. Otherwise pipelined insertions will get messed
80 * up.
81 *
82 * rec1 is greater then rec2 if rec1 is marked deleted.
83 * rec1 is less then rec2 if rec2 is marked deleted.
84 *
85 * Multiple deleted records may be present, do not return 0
86 * if both are marked deleted.
87 */
88 if (rec1->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
89 HAMMER_RECF_COMMITTED)) {
90 return(1);
91 }
92 if (rec2->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
93 HAMMER_RECF_COMMITTED)) {
94 return(-1);
95 }
96
97 return(0);
98}
99
100/*
101 * Basic record comparison code similar to hammer_btree_cmp().
102 *
103 * obj_id is not compared and may not yet be assigned in the record.
104 */
105static int
106hammer_rec_cmp(hammer_base_elm_t elm, hammer_record_t rec)
107{
108 if (elm->rec_type < rec->leaf.base.rec_type)
109 return(-3);
110 if (elm->rec_type > rec->leaf.base.rec_type)
111 return(3);
112
113 if (elm->key < rec->leaf.base.key)
114 return(-2);
115 if (elm->key > rec->leaf.base.key)
116 return(2);
117
118 /*
119 * Never match against an item deleted by the frontend
120 * or backend, or committed by the backend.
121 *
122 * elm is less then rec if rec is marked deleted.
123 */
124 if (rec->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
125 HAMMER_RECF_COMMITTED)) {
126 return(-1);
127 }
128 return(0);
129}
130
131/*
132 * Ranged scan to locate overlapping record(s). This is used by
133 * hammer_ip_get_bulk() to locate an overlapping record. We have
134 * to use a ranged scan because the keys for data records with the
135 * same file base offset can be different due to differing data_len's.
136 *
137 * NOTE: The base file offset of a data record is (key - data_len), not (key).
138 */
139static int
140hammer_rec_overlap_cmp(hammer_record_t rec, void *data)
141{
142 struct hammer_bulk_info *info = data;
143 hammer_btree_leaf_elm_t leaf = &info->record->leaf;
144
145 if (rec->leaf.base.rec_type < leaf->base.rec_type)
146 return(-3);
147 if (rec->leaf.base.rec_type > leaf->base.rec_type)
148 return(3);
149
150 /*
151 * Overlap compare
152 */
153 if (leaf->base.rec_type == HAMMER_RECTYPE_DATA) {
154 /* rec_beg >= leaf_end */
155 if (rec->leaf.base.key - rec->leaf.data_len >= leaf->base.key)
156 return(2);
157 /* rec_end <= leaf_beg */
158 if (rec->leaf.base.key <= leaf->base.key - leaf->data_len)
159 return(-2);
160 } else {
161 if (rec->leaf.base.key < leaf->base.key)
162 return(-2);
163 if (rec->leaf.base.key > leaf->base.key)
164 return(2);
165 }
166
167 /*
168 * We have to return 0 at this point, even if DELETED_FE is set,
169 * because returning anything else will cause the scan to ignore
170 * one of the branches when we really want it to check both.
171 */
172 return(0);
173}
174
175/*
176 * RB_SCAN comparison code for hammer_mem_first(). The argument order
177 * is reversed so the comparison result has to be negated. key_beg and
178 * key_end are both range-inclusive.
179 *
180 * Localized deletions are not cached in-memory.
181 */
182static
183int
184hammer_rec_scan_cmp(hammer_record_t rec, void *data)
185{
186 hammer_cursor_t cursor = data;
187 int r;
188
189 r = hammer_rec_cmp(&cursor->key_beg, rec);
190 if (r > 1)
191 return(-1);
192 r = hammer_rec_cmp(&cursor->key_end, rec);
193 if (r < -1)
194 return(1);
195 return(0);
196}
197
198/*
199 * This compare function is used when simply looking up key_beg.
200 */
201static
202int
203hammer_rec_find_cmp(hammer_record_t rec, void *data)
204{
205 hammer_cursor_t cursor = data;
206 int r;
207
208 r = hammer_rec_cmp(&cursor->key_beg, rec);
209 if (r > 1)
210 return(-1);
211 if (r < -1)
212 return(1);
213 return(0);
214}
215
216/*
217 * Locate blocks within the truncation range. Partial blocks do not count.
218 */
219static
220int
221hammer_rec_trunc_cmp(hammer_record_t rec, void *data)
222{
223 struct rec_trunc_info *info = data;
224
225 if (rec->leaf.base.rec_type < info->rec_type)
226 return(-1);
227 if (rec->leaf.base.rec_type > info->rec_type)
228 return(1);
229
230 switch(rec->leaf.base.rec_type) {
231 case HAMMER_RECTYPE_DB:
232 /*
233 * DB record key is not beyond the truncation point, retain.
234 */
235 if (rec->leaf.base.key < info->trunc_off)
236 return(-1);
237 break;
238 case HAMMER_RECTYPE_DATA:
239 /*
240 * DATA record offset start is not beyond the truncation point,
241 * retain.
242 */
243 if (rec->leaf.base.key - rec->leaf.data_len < info->trunc_off)
244 return(-1);
245 break;
246 default:
247 panic("hammer_rec_trunc_cmp: unexpected record type");
248 }
249
250 /*
251 * The record start is >= the truncation point, return match,
252 * the record should be destroyed.
253 */
254 return(0);
255}
256
257RB_GENERATE(hammer_rec_rb_tree, hammer_record, rb_node, hammer_rec_rb_compare);
258
259/*
260 * Allocate a record for the caller to finish filling in. The record is
261 * returned referenced.
262 */
263hammer_record_t
264hammer_alloc_mem_record(hammer_inode_t ip, int data_len)
265{
266 hammer_record_t record;
267 hammer_mount_t hmp;
268
269 hmp = ip->hmp;
270 ++hammer_count_records;
271 record = kmalloc(sizeof(*record), hmp->m_misc,
272 M_WAITOK | M_ZERO | M_USE_RESERVE);
273 record->flush_state = HAMMER_FST_IDLE;
274 record->ip = ip;
275 record->leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
276 record->leaf.data_len = data_len;
277 hammer_ref(&record->lock);
278
279 if (data_len) {
280 record->data = kmalloc(data_len, hmp->m_misc, M_WAITOK | M_ZERO);
281 record->flags |= HAMMER_RECF_ALLOCDATA;
282 ++hammer_count_record_datas;
283 }
284
285 return (record);
286}
287
288void
289hammer_wait_mem_record_ident(hammer_record_t record, const char *ident)
290{
291 while (record->flush_state == HAMMER_FST_FLUSH) {
292 record->flags |= HAMMER_RECF_WANTED;
293 tsleep(record, 0, ident, 0);
294 }
295}
296
297/*
298 * Called from the backend, hammer_inode.c, after a record has been
299 * flushed to disk. The record has been exclusively locked by the
300 * caller and interlocked with BE.
301 *
302 * We clean up the state, unlock, and release the record (the record
303 * was referenced by the fact that it was in the HAMMER_FST_FLUSH state).
304 */
305void
306hammer_flush_record_done(hammer_record_t record, int error)
307{
308 hammer_inode_t target_ip;
309
310 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
311 KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
312
313 /*
314 * If an error occured, the backend was unable to sync the
315 * record to its media. Leave the record intact.
316 */
317 if (error) {
318 hammer_critical_error(record->ip->hmp, record->ip, error,
319 "while flushing record");
320 }
321
322 --record->flush_group->refs;
323 record->flush_group = NULL;
324
325 /*
326 * Adjust the flush state and dependancy based on success or
327 * failure.
328 */
329 if (record->flags & (HAMMER_RECF_DELETED_BE | HAMMER_RECF_COMMITTED)) {
330 if ((target_ip = record->target_ip) != NULL) {
331 TAILQ_REMOVE(&target_ip->target_list, record,
332 target_entry);
333 record->target_ip = NULL;
334 hammer_test_inode(target_ip);
335 }
336 record->flush_state = HAMMER_FST_IDLE;
337 } else {
338 if (record->target_ip) {
339 record->flush_state = HAMMER_FST_SETUP;
340 hammer_test_inode(record->ip);
341 hammer_test_inode(record->target_ip);
342 } else {
343 record->flush_state = HAMMER_FST_IDLE;
344 }
345 }
346 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
347
348 /*
349 * Cleanup
350 */
351 if (record->flags & HAMMER_RECF_WANTED) {
352 record->flags &= ~HAMMER_RECF_WANTED;
353 wakeup(record);
354 }
355 hammer_rel_mem_record(record);
356}
357
358/*
359 * Release a memory record. Records marked for deletion are immediately
360 * removed from the RB-Tree but otherwise left intact until the last ref
361 * goes away.
362 */
363void
364hammer_rel_mem_record(struct hammer_record *record)
365{
366 hammer_mount_t hmp;
367 hammer_reserve_t resv;
368 hammer_inode_t ip;
369 hammer_inode_t target_ip;
370 int diddrop;
371
372 hammer_rel(&record->lock);
373
374 if (hammer_norefs(&record->lock)) {
375 /*
376 * Upon release of the last reference wakeup any waiters.
377 * The record structure may get destroyed so callers will
378 * loop up and do a relookup.
379 *
380 * WARNING! Record must be removed from RB-TREE before we
381 * might possibly block. hammer_test_inode() can block!
382 */
383 ip = record->ip;
384 hmp = ip->hmp;
385
386 /*
387 * Upon release of the last reference a record marked deleted
388 * by the front or backend, or committed by the backend,
389 * is destroyed.
390 */
391 if (record->flags & (HAMMER_RECF_DELETED_FE |
392 HAMMER_RECF_DELETED_BE |
393 HAMMER_RECF_COMMITTED)) {
394 KKASSERT(hammer_isactive(&ip->lock) > 0);
395 KKASSERT(record->flush_state != HAMMER_FST_FLUSH);
396
397 /*
398 * target_ip may have zero refs, we have to ref it
399 * to prevent it from being ripped out from under
400 * us.
401 */
402 if ((target_ip = record->target_ip) != NULL) {
403 TAILQ_REMOVE(&target_ip->target_list,
404 record, target_entry);
405 record->target_ip = NULL;
406 hammer_ref(&target_ip->lock);
407 }
408
409 /*
410 * Remove the record from the B-Tree
411 */
412 if (record->flags & HAMMER_RECF_ONRBTREE) {
413 RB_REMOVE(hammer_rec_rb_tree,
414 &record->ip->rec_tree,
415 record);
416 record->flags &= ~HAMMER_RECF_ONRBTREE;
417 KKASSERT(ip->rsv_recs > 0);
418 if (RB_EMPTY(&record->ip->rec_tree)) {
419 record->ip->flags &=
420 ~HAMMER_INODE_XDIRTY;
421 record->ip->sync_flags &=
422 ~HAMMER_INODE_XDIRTY;
423 }
424 diddrop = 1;
425 } else {
426 diddrop = 0;
427 }
428
429 /*
430 * We must wait for any direct-IO to complete before
431 * we can destroy the record because the bio may
432 * have a reference to it.
433 */
434 if (record->gflags &
435 (HAMMER_RECG_DIRECT_IO | HAMMER_RECG_DIRECT_INVAL)) {
436 hammer_io_direct_wait(record);
437 }
438
439 /*
440 * Account for the completion after the direct IO
441 * has completed.
442 */
443 if (diddrop) {
444 --hmp->rsv_recs;
445 --ip->rsv_recs;
446 hmp->rsv_databytes -= record->leaf.data_len;
447
448 if (RB_EMPTY(&record->ip->rec_tree))
449 hammer_test_inode(record->ip);
450 if ((ip->flags & HAMMER_INODE_RECSW) &&
451 ip->rsv_recs <= hammer_limit_inode_recs/2) {
452 ip->flags &= ~HAMMER_INODE_RECSW;
453 wakeup(&ip->rsv_recs);
454 }
455 }
456
457 /*
458 * Do this test after removing record from the B-Tree.
459 */
460 if (target_ip) {
461 hammer_test_inode(target_ip);
462 hammer_rel_inode(target_ip, 0);
463 }
464
465 if (record->flags & HAMMER_RECF_ALLOCDATA) {
466 --hammer_count_record_datas;
467 kfree(record->data, hmp->m_misc);
468 record->flags &= ~HAMMER_RECF_ALLOCDATA;
469 }
470
471 /*
472 * Release the reservation.
473 *
474 * If the record was not committed we can theoretically
475 * undo the reservation. However, doing so might
476 * create weird edge cases with the ordering of
477 * direct writes because the related buffer cache
478 * elements are per-vnode. So we don't try.
479 */
480 if ((resv = record->resv) != NULL) {
481 /* XXX undo leaf.data_offset,leaf.data_len */
482 hammer_blockmap_reserve_complete(hmp, resv);
483 record->resv = NULL;
484 }
485 record->data = NULL;
486 --hammer_count_records;
487 kfree(record, hmp->m_misc);
488 }
489 }
490}
491
492/*
493 * Record visibility depends on whether the record is being accessed by
494 * the backend or the frontend. Backend tests ignore the frontend delete
495 * flag. Frontend tests do NOT ignore the backend delete/commit flags and
496 * must also check for commit races.
497 *
498 * Return non-zero if the record is visible, zero if it isn't or if it is
499 * deleted. Returns 0 if the record has been comitted (unless the special
500 * delete-visibility flag is set). A committed record must be located
501 * via the media B-Tree. Returns non-zero if the record is good.
502 *
503 * If HAMMER_CURSOR_DELETE_VISIBILITY is set we allow deleted memory
504 * records to be returned. This is so pending deletions are detected
505 * when using an iterator to locate an unused hash key, or when we need
506 * to locate historical records on-disk to destroy.
507 */
508static __inline
509int
510hammer_ip_iterate_mem_good(hammer_cursor_t cursor, hammer_record_t record)
511{
512 if (cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY)
513 return(1);
514 if (cursor->flags & HAMMER_CURSOR_BACKEND) {
515 if (record->flags & (HAMMER_RECF_DELETED_BE |
516 HAMMER_RECF_COMMITTED)) {
517 return(0);
518 }
519 } else {
520 if (record->flags & (HAMMER_RECF_DELETED_FE |
521 HAMMER_RECF_DELETED_BE |
522 HAMMER_RECF_COMMITTED)) {
523 return(0);
524 }
525 }
526 return(1);
527}
528
529/*
530 * This callback is used as part of the RB_SCAN function for in-memory
531 * records. We terminate it (return -1) as soon as we get a match.
532 *
533 * This routine is used by frontend code.
534 *
535 * The primary compare code does not account for ASOF lookups. This
536 * code handles that case as well as a few others.
537 */
538static
539int
540hammer_rec_scan_callback(hammer_record_t rec, void *data)
541{
542 hammer_cursor_t cursor = data;
543
544 /*
545 * We terminate on success, so this should be NULL on entry.
546 */
547 KKASSERT(cursor->iprec == NULL);
548
549 /*
550 * Skip if the record was marked deleted or committed.
551 */
552 if (hammer_ip_iterate_mem_good(cursor, rec) == 0)
553 return(0);
554
555 /*
556 * Skip if not visible due to our as-of TID
557 */
558 if (cursor->flags & HAMMER_CURSOR_ASOF) {
559 if (cursor->asof < rec->leaf.base.create_tid)
560 return(0);
561 if (rec->leaf.base.delete_tid &&
562 cursor->asof >= rec->leaf.base.delete_tid) {
563 return(0);
564 }
565 }
566
567 /*
568 * ref the record. The record is protected from backend B-Tree
569 * interactions by virtue of the cursor's IP lock.
570 */
571 hammer_ref(&rec->lock);
572
573 /*
574 * The record may have been deleted or committed while we
575 * were blocked. XXX remove?
576 */
577 if (hammer_ip_iterate_mem_good(cursor, rec) == 0) {
578 hammer_rel_mem_record(rec);
579 return(0);
580 }
581
582 /*
583 * Set the matching record and stop the scan.
584 */
585 cursor->iprec = rec;
586 return(-1);
587}
588
589
590/*
591 * Lookup an in-memory record given the key specified in the cursor. Works
592 * just like hammer_btree_lookup() but operates on an inode's in-memory
593 * record list.
594 *
595 * The lookup must fail if the record is marked for deferred deletion.
596 *
597 * The API for mem/btree_lookup() does not mess with the ATE/EOF bits.
598 */
599static
600int
601hammer_mem_lookup(hammer_cursor_t cursor)
602{
603 KKASSERT(cursor->ip);
604 if (cursor->iprec) {
605 hammer_rel_mem_record(cursor->iprec);
606 cursor->iprec = NULL;
607 }
608 hammer_rec_rb_tree_RB_SCAN(&cursor->ip->rec_tree, hammer_rec_find_cmp,
609 hammer_rec_scan_callback, cursor);
610
611 return (cursor->iprec ? 0 : ENOENT);
612}
613
614/*
615 * hammer_mem_first() - locate the first in-memory record matching the
616 * cursor within the bounds of the key range.
617 *
618 * WARNING! API is slightly different from btree_first(). hammer_mem_first()
619 * will set ATEMEM the same as MEMEOF, and does not return any error.
620 */
621static
622void
623hammer_mem_first(hammer_cursor_t cursor)
624{
625 hammer_inode_t ip;
626
627 ip = cursor->ip;
628 KKASSERT(ip != NULL);
629
630 if (cursor->iprec) {
631 hammer_rel_mem_record(cursor->iprec);
632 cursor->iprec = NULL;
633 }
634 hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_scan_cmp,
635 hammer_rec_scan_callback, cursor);
636
637 if (cursor->iprec)
638 cursor->flags &= ~(HAMMER_CURSOR_MEMEOF | HAMMER_CURSOR_ATEMEM);
639 else
640 cursor->flags |= HAMMER_CURSOR_MEMEOF | HAMMER_CURSOR_ATEMEM;
641}
642
643/************************************************************************
644 * HAMMER IN-MEMORY RECORD FUNCTIONS *
645 ************************************************************************
646 *
647 * These functions manipulate in-memory records. Such records typically
648 * exist prior to being committed to disk or indexed via the on-disk B-Tree.
649 */
650
651/*
652 * Add a directory entry (dip,ncp) which references inode (ip).
653 *
654 * Note that the low 32 bits of the namekey are set temporarily to create
655 * a unique in-memory record, and may be modified a second time when the
656 * record is synchronized to disk. In particular, the low 32 bits cannot be
657 * all 0's when synching to disk, which is not handled here.
658 *
659 * NOTE: bytes does not include any terminating \0 on name, and name might
660 * not be terminated.
661 */
662int
663hammer_ip_add_directory(struct hammer_transaction *trans,
664 struct hammer_inode *dip, const char *name, int bytes,
665 struct hammer_inode *ip)
666{
667 struct hammer_cursor cursor;
668 hammer_record_t record;
669 int error;
670 u_int32_t max_iterations;
671
672 record = hammer_alloc_mem_record(dip, HAMMER_ENTRY_SIZE(bytes));
673
674 record->type = HAMMER_MEM_RECORD_ADD;
675 record->leaf.base.localization = dip->obj_localization +
676 hammer_dir_localization(dip);
677 record->leaf.base.obj_id = dip->obj_id;
678 record->leaf.base.key = hammer_directory_namekey(dip, name, bytes,
679 &max_iterations);
680 record->leaf.base.rec_type = HAMMER_RECTYPE_DIRENTRY;
681 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
682 record->data->entry.obj_id = ip->obj_id;
683 record->data->entry.localization = ip->obj_localization;
684 bcopy(name, record->data->entry.name, bytes);
685
686 ++ip->ino_data.nlinks;
687 ip->ino_data.ctime = trans->time;
688 hammer_modify_inode(trans, ip, HAMMER_INODE_DDIRTY);
689
690 /*
691 * Find an unused namekey. Both the in-memory record tree and
692 * the B-Tree are checked. We do not want historically deleted
693 * names to create a collision as our iteration space may be limited,
694 * and since create_tid wouldn't match anyway an ASOF search
695 * must be used to locate collisions.
696 *
697 * delete-visibility is set so pending deletions do not give us
698 * a false-negative on our ability to use an iterator.
699 *
700 * The iterator must not rollover the key. Directory keys only
701 * use the positive key space.
702 */
703 hammer_init_cursor(trans, &cursor, &dip->cache[1], dip);
704 cursor.key_beg = record->leaf.base;
705 cursor.flags |= HAMMER_CURSOR_ASOF;
706 cursor.flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
707 cursor.asof = ip->obj_asof;
708
709 while (hammer_ip_lookup(&cursor) == 0) {
710 ++record->leaf.base.key;
711 KKASSERT(record->leaf.base.key > 0);
712 cursor.key_beg.key = record->leaf.base.key;
713 if (--max_iterations == 0) {
714 hammer_rel_mem_record(record);
715 error = ENOSPC;
716 goto failed;
717 }
718 }
719
720 /*
721 * The target inode and the directory entry are bound together.
722 */
723 record->target_ip = ip;
724 record->flush_state = HAMMER_FST_SETUP;
725 TAILQ_INSERT_TAIL(&ip->target_list, record, target_entry);
726
727 /*
728 * The inode now has a dependancy and must be taken out of the idle
729 * state. An inode not in an idle state is given an extra reference.
730 *
731 * When transitioning to a SETUP state flag for an automatic reflush
732 * when the dependancies are disposed of if someone is waiting on
733 * the inode.
734 */
735 if (ip->flush_state == HAMMER_FST_IDLE) {
736 hammer_ref(&ip->lock);
737 ip->flush_state = HAMMER_FST_SETUP;
738 if (ip->flags & HAMMER_INODE_FLUSHW)
739 ip->flags |= HAMMER_INODE_REFLUSH;
740 }
741 error = hammer_mem_add(record);
742 if (error == 0) {
743 dip->ino_data.mtime = trans->time;
744 hammer_modify_inode(trans, dip, HAMMER_INODE_MTIME);
745 }
746failed:
747 hammer_done_cursor(&cursor);
748 return(error);
749}
750
751/*
752 * Delete the directory entry and update the inode link count. The
753 * cursor must be seeked to the directory entry record being deleted.
754 *
755 * The related inode should be share-locked by the caller. The caller is
756 * on the frontend. It could also be NULL indicating that the directory
757 * entry being removed has no related inode.
758 *
759 * This function can return EDEADLK requiring the caller to terminate
760 * the cursor, any locks, wait on the returned record, and retry.
761 */
762int
763hammer_ip_del_directory(struct hammer_transaction *trans,
764 hammer_cursor_t cursor, struct hammer_inode *dip,
765 struct hammer_inode *ip)
766{
767 hammer_record_t record;
768 int error;
769
770 if (hammer_cursor_inmem(cursor)) {
771 /*
772 * In-memory (unsynchronized) records can simply be freed.
773 *
774 * Even though the HAMMER_RECF_DELETED_FE flag is ignored
775 * by the backend, we must still avoid races against the
776 * backend potentially syncing the record to the media.
777 *
778 * We cannot call hammer_ip_delete_record(), that routine may
779 * only be called from the backend.
780 */
781 record = cursor->iprec;
782 if (record->flags & (HAMMER_RECF_INTERLOCK_BE |
783 HAMMER_RECF_DELETED_BE |
784 HAMMER_RECF_COMMITTED)) {
785 KKASSERT(cursor->deadlk_rec == NULL);
786 hammer_ref(&record->lock);
787 cursor->deadlk_rec = record;
788 error = EDEADLK;
789 } else {
790 KKASSERT(record->type == HAMMER_MEM_RECORD_ADD);
791 record->flags |= HAMMER_RECF_DELETED_FE;
792 error = 0;
793 }
794 } else {
795 /*
796 * If the record is on-disk we have to queue the deletion by
797 * the record's key. This also causes lookups to skip the
798 * record (lookups for the purposes of finding an unused
799 * directory key do not skip the record).
800 */
801 KKASSERT(dip->flags &
802 (HAMMER_INODE_ONDISK | HAMMER_INODE_DONDISK));
803 record = hammer_alloc_mem_record(dip, 0);
804 record->type = HAMMER_MEM_RECORD_DEL;
805 record->leaf.base = cursor->leaf->base;
806 KKASSERT(dip->obj_id == record->leaf.base.obj_id);
807
808 /*
809 * ip may be NULL, indicating the deletion of a directory
810 * entry which has no related inode.
811 */
812 record->target_ip = ip;
813 if (ip) {
814 record->flush_state = HAMMER_FST_SETUP;
815 TAILQ_INSERT_TAIL(&ip->target_list, record,
816 target_entry);
817 } else {
818 record->flush_state = HAMMER_FST_IDLE;
819 }
820
821 /*
822 * The inode now has a dependancy and must be taken out of
823 * the idle state. An inode not in an idle state is given
824 * an extra reference.
825 *
826 * When transitioning to a SETUP state flag for an automatic
827 * reflush when the dependancies are disposed of if someone
828 * is waiting on the inode.
829 */
830 if (ip && ip->flush_state == HAMMER_FST_IDLE) {
831 hammer_ref(&ip->lock);
832 ip->flush_state = HAMMER_FST_SETUP;
833 if (ip->flags & HAMMER_INODE_FLUSHW)
834 ip->flags |= HAMMER_INODE_REFLUSH;
835 }
836
837 error = hammer_mem_add(record);
838 }
839
840 /*
841 * One less link. The file may still be open in the OS even after
842 * all links have gone away.
843 *
844 * We have to terminate the cursor before syncing the inode to
845 * avoid deadlocking against ourselves. XXX this may no longer
846 * be true.
847 *
848 * If nlinks drops to zero and the vnode is inactive (or there is
849 * no vnode), call hammer_inode_unloadable_check() to zonk the
850 * inode. If we don't do this here the inode will not be destroyed
851 * on-media until we unmount.
852 */
853 if (error == 0) {
854 if (ip) {
855 --ip->ino_data.nlinks; /* do before we might block */
856 ip->ino_data.ctime = trans->time;
857 }
858 dip->ino_data.mtime = trans->time;
859 hammer_modify_inode(trans, dip, HAMMER_INODE_MTIME);
860 if (ip) {
861 hammer_modify_inode(trans, ip, HAMMER_INODE_DDIRTY);
862 if (ip->ino_data.nlinks == 0 &&
863 (ip->vp == NULL || (ip->vp->v_flag & VINACTIVE))) {
864 hammer_done_cursor(cursor);
865 hammer_inode_unloadable_check(ip, 1);
866 hammer_flush_inode(ip, 0);
867 }
868 }
869
870 }
871 return(error);
872}
873
874/*
875 * Add a record to an inode.
876 *
877 * The caller must allocate the record with hammer_alloc_mem_record(ip) and
878 * initialize the following additional fields:
879 *
880 * The related inode should be share-locked by the caller. The caller is
881 * on the frontend.
882 *
883 * record->rec.entry.base.base.key
884 * record->rec.entry.base.base.rec_type
885 * record->rec.entry.base.base.data_len
886 * record->data (a copy will be kmalloc'd if it cannot be embedded)
887 */
888int
889hammer_ip_add_record(struct hammer_transaction *trans, hammer_record_t record)
890{
891 hammer_inode_t ip = record->ip;
892 int error;
893
894 KKASSERT(record->leaf.base.localization != 0);
895 record->leaf.base.obj_id = ip->obj_id;
896 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
897 error = hammer_mem_add(record);
898 return(error);
899}
900
901/*
902 * Locate a pre-existing bulk record in memory. The caller wishes to
903 * replace the record with a new one. The existing record may have a
904 * different length (and thus a different key) so we have to use an
905 * overlap check function.
906 */
907static hammer_record_t
908hammer_ip_get_bulk(hammer_record_t record)
909{
910 struct hammer_bulk_info info;
911 hammer_inode_t ip = record->ip;
912
913 info.record = record;
914 info.conflict = NULL;
915 hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_overlap_cmp,
916 hammer_bulk_scan_callback, &info);
917
918 return(info.conflict); /* may be NULL */
919}
920
921/*
922 * Take records vetted by overlap_cmp. The first non-deleted record
923 * (if any) stops the scan.
924 */
925static int
926hammer_bulk_scan_callback(hammer_record_t record, void *data)
927{
928 struct hammer_bulk_info *info = data;
929
930 if (record->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
931 HAMMER_RECF_COMMITTED)) {
932 return(0);
933 }
934 hammer_ref(&record->lock);
935 info->conflict = record;
936 return(-1); /* stop scan */
937}
938
939/*
940 * Reserve blockmap space placemarked with an in-memory record.
941 *
942 * This routine is called by the frontend in order to be able to directly
943 * flush a buffer cache buffer. The frontend has locked the related buffer
944 * cache buffers and we should be able to manipulate any overlapping
945 * in-memory records.
946 *
947 * The caller is responsible for adding the returned record and deleting
948 * the returned conflicting record (if any), typically by calling
949 * hammer_ip_replace_bulk() (via hammer_io_direct_write()).
950 */
951hammer_record_t
952hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset, void *data, int bytes,
953 int *errorp)
954{
955 hammer_record_t record;
956 hammer_dedup_cache_t dcp;
957 hammer_crc_t crc;
958 int zone;
959
960 /*
961 * Create a record to cover the direct write. The record cannot
962 * be added to the in-memory RB tree here as it might conflict
963 * with an existing memory record. See hammer_io_direct_write().
964 *
965 * The backend is responsible for finalizing the space reserved in
966 * this record.
967 *
968 * XXX bytes not aligned, depend on the reservation code to
969 * align the reservation.
970 */
971 record = hammer_alloc_mem_record(ip, 0);
972 zone = (bytes >= HAMMER_BUFSIZE) ? HAMMER_ZONE_LARGE_DATA_INDEX :
973 HAMMER_ZONE_SMALL_DATA_INDEX;
974 if (bytes == 0)
975 crc = 0;
976 else
977 crc = crc32(data, bytes);
978
979 if (hammer_live_dedup == 0)
980 goto nodedup;
981 if ((dcp = hammer_dedup_cache_lookup(ip->hmp, crc)) != NULL) {
982 struct hammer_dedup_cache tmp = *dcp;
983
984 record->resv = hammer_blockmap_reserve_dedup(ip->hmp, zone,
985 bytes, tmp.data_offset, errorp);
986 if (record->resv == NULL)
987 goto nodedup;
988
989 if (!hammer_dedup_validate(&tmp, zone, bytes, data)) {
990 hammer_blockmap_reserve_complete(ip->hmp, record->resv);
991 goto nodedup;
992 }
993
994 record->leaf.data_offset = tmp.data_offset;
995 record->flags |= HAMMER_RECF_DEDUPED;
996 } else {
997nodedup:
998 record->resv = hammer_blockmap_reserve(ip->hmp, zone, bytes,
999 &record->leaf.data_offset, errorp);
1000 if (record->resv == NULL) {
1001 kprintf("hammer_ip_add_bulk: reservation failed\n");
1002 hammer_rel_mem_record(record);
1003 return(NULL);
1004 }
1005 }
1006
1007 record->type = HAMMER_MEM_RECORD_DATA;
1008 record->leaf.base.rec_type = HAMMER_RECTYPE_DATA;
1009 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
1010 record->leaf.base.obj_id = ip->obj_id;
1011 record->leaf.base.key = file_offset + bytes;
1012 record->leaf.base.localization = ip->obj_localization +
1013 HAMMER_LOCALIZE_MISC;
1014 record->leaf.data_len = bytes;
1015 record->leaf.data_crc = crc;
1016 KKASSERT(*errorp == 0);
1017
1018 return(record);
1019}
1020
1021/*
1022 * Called by hammer_io_direct_write() prior to any possible completion
1023 * of the BIO to emplace the memory record associated with the I/O and
1024 * to replace any prior memory record which might still be active.
1025 *
1026 * Setting the FE deleted flag on the old record (if any) avoids any RB
1027 * tree insertion conflict, amoung other things.
1028 *
1029 * This has to be done prior to the caller completing any related buffer
1030 * cache I/O or a reinstantiation of the buffer may load data from the
1031 * old media location instead of the new media location. The holding
1032 * of the locked buffer cache buffer serves to interlock the record
1033 * replacement operation.
1034 */
1035void
1036hammer_ip_replace_bulk(hammer_mount_t hmp, hammer_record_t record)
1037{
1038 hammer_record_t conflict;
1039 int error;
1040
1041 while ((conflict = hammer_ip_get_bulk(record)) != NULL) {
1042 if ((conflict->flags & HAMMER_RECF_INTERLOCK_BE) == 0) {
1043 conflict->flags |= HAMMER_RECF_DELETED_FE;
1044 break;
1045 }
1046 conflict->flags |= HAMMER_RECF_WANTED;
1047 tsleep(conflict, 0, "hmrrc3", 0);
1048 hammer_rel_mem_record(conflict);
1049 }
1050 error = hammer_mem_add(record);
1051 if (conflict)
1052 hammer_rel_mem_record(conflict);
1053 KKASSERT(error == 0);
1054}
1055
1056/*
1057 * Frontend truncation code. Scan in-memory records only. On-disk records
1058 * and records in a flushing state are handled by the backend. The vnops
1059 * setattr code will handle the block containing the truncation point.
1060 *
1061 * Partial blocks are not deleted.
1062 *
1063 * This code is only called on regular files.
1064 */
1065int
1066hammer_ip_frontend_trunc(struct hammer_inode *ip, off_t file_size)
1067{
1068 struct rec_trunc_info info;
1069
1070 switch(ip->ino_data.obj_type) {
1071 case HAMMER_OBJTYPE_REGFILE:
1072 info.rec_type = HAMMER_RECTYPE_DATA;
1073 break;
1074 case HAMMER_OBJTYPE_DBFILE:
1075 info.rec_type = HAMMER_RECTYPE_DB;
1076 break;
1077 default:
1078 return(EINVAL);
1079 }
1080 info.trunc_off = file_size;
1081 hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_trunc_cmp,
1082 hammer_frontend_trunc_callback, &info);
1083 return(0);
1084}
1085
1086/*
1087 * Scan callback for frontend records to destroy during a truncation.
1088 * We must ensure that DELETED_FE is set on the record or the frontend
1089 * will get confused in future read() calls.
1090 *
1091 * NOTE: DELETED_FE cannot be set while the record interlock (BE) is held.
1092 * In this rare case we must wait for the interlock to be cleared.
1093 *
1094 * NOTE: This function is only called on regular files. There are further
1095 * restrictions to the setting of DELETED_FE on directory records
1096 * undergoing a flush due to sensitive inode link count calculations.
1097 */
1098static int
1099hammer_frontend_trunc_callback(hammer_record_t record, void *data __unused)
1100{
1101 if (record->flags & HAMMER_RECF_DELETED_FE)
1102 return(0);
1103#if 0
1104 if (record->flush_state == HAMMER_FST_FLUSH)
1105 return(0);
1106#endif
1107 hammer_ref(&record->lock);
1108 while (record->flags & HAMMER_RECF_INTERLOCK_BE)
1109 hammer_wait_mem_record_ident(record, "hmmtrr");
1110 record->flags |= HAMMER_RECF_DELETED_FE;
1111 hammer_rel_mem_record(record);
1112 return(0);
1113}
1114
1115/*
1116 * Return 1 if the caller must check for and delete existing records
1117 * before writing out a new data record.
1118 *
1119 * Return 0 if the caller can just insert the record into the B-Tree without
1120 * checking.
1121 */
1122static int
1123hammer_record_needs_overwrite_delete(hammer_record_t record)
1124{
1125 hammer_inode_t ip = record->ip;
1126 int64_t file_offset;
1127 int r;
1128
1129 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE)
1130 file_offset = record->leaf.base.key;
1131 else
1132 file_offset = record->leaf.base.key - record->leaf.data_len;
1133 r = (file_offset < ip->save_trunc_off);
1134 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1135 if (ip->save_trunc_off <= record->leaf.base.key)
1136 ip->save_trunc_off = record->leaf.base.key + 1;
1137 } else {
1138 if (ip->save_trunc_off < record->leaf.base.key)
1139 ip->save_trunc_off = record->leaf.base.key;
1140 }
1141 return(r);
1142}
1143
1144/*
1145 * Backend code. Sync a record to the media.
1146 */
1147int
1148hammer_ip_sync_record_cursor(hammer_cursor_t cursor, hammer_record_t record)
1149{
1150 hammer_transaction_t trans = cursor->trans;
1151 int64_t file_offset;
1152 int bytes;
1153 void *bdata;
1154 int error;
1155 int doprop;
1156
1157 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
1158 KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
1159 KKASSERT(record->leaf.base.localization != 0);
1160
1161 /*
1162 * Any direct-write related to the record must complete before we
1163 * can sync the record to the on-disk media.
1164 */
1165 if (record->gflags & (HAMMER_RECG_DIRECT_IO | HAMMER_RECG_DIRECT_INVAL))
1166 hammer_io_direct_wait(record);
1167
1168 /*
1169 * If this is a bulk-data record placemarker there may be an existing
1170 * record on-disk, indicating a data overwrite. If there is the
1171 * on-disk record must be deleted before we can insert our new record.
1172 *
1173 * We've synthesized this record and do not know what the create_tid
1174 * on-disk is, nor how much data it represents.
1175 *
1176 * Keep in mind that (key) for data records is (base_offset + len),
1177 * not (base_offset). Also, we only want to get rid of on-disk
1178 * records since we are trying to sync our in-memory record, call
1179 * hammer_ip_delete_range() with truncating set to 1 to make sure
1180 * it skips in-memory records.
1181 *
1182 * It is ok for the lookup to return ENOENT.
1183 *
1184 * NOTE OPTIMIZATION: sync_trunc_off is used to determine if we have
1185 * to call hammer_ip_delete_range() or not. This also means we must
1186 * update sync_trunc_off() as we write.
1187 */
1188 if (record->type == HAMMER_MEM_RECORD_DATA &&
1189 hammer_record_needs_overwrite_delete(record)) {
1190 file_offset = record->leaf.base.key - record->leaf.data_len;
1191 bytes = (record->leaf.data_len + HAMMER_BUFMASK) &
1192 ~HAMMER_BUFMASK;
1193 KKASSERT((file_offset & HAMMER_BUFMASK) == 0);
1194 error = hammer_ip_delete_range(
1195 cursor, record->ip,
1196 file_offset, file_offset + bytes - 1,
1197 1);
1198 if (error && error != ENOENT)
1199 goto done;
1200 }
1201
1202 /*
1203 * If this is a general record there may be an on-disk version
1204 * that must be deleted before we can insert the new record.
1205 */
1206 if (record->type == HAMMER_MEM_RECORD_GENERAL) {
1207 error = hammer_delete_general(cursor, record->ip,
1208 &record->leaf);
1209 if (error && error != ENOENT)
1210 goto done;
1211 }
1212
1213 /*
1214 * Setup the cursor.
1215 */
1216 hammer_normalize_cursor(cursor);
1217 cursor->key_beg = record->leaf.base;
1218 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1219 cursor->flags |= HAMMER_CURSOR_BACKEND;
1220 cursor->flags &= ~HAMMER_CURSOR_INSERT;
1221
1222 /*
1223 * Records can wind up on-media before the inode itself is on-media.
1224 * Flag the case.
1225 */
1226 record->ip->flags |= HAMMER_INODE_DONDISK;
1227
1228 /*
1229 * If we are deleting a directory entry an exact match must be
1230 * found on-disk.
1231 */
1232 if (record->type == HAMMER_MEM_RECORD_DEL) {
1233 error = hammer_btree_lookup(cursor);
1234 if (error == 0) {
1235 KKASSERT(cursor->iprec == NULL);
1236 error = hammer_ip_delete_record(cursor, record->ip,
1237 trans->tid);
1238 if (error == 0) {
1239 record->flags |= HAMMER_RECF_DELETED_BE |
1240 HAMMER_RECF_COMMITTED;
1241 ++record->ip->rec_generation;
1242 }
1243 }
1244 goto done;
1245 }
1246
1247 /*
1248 * We are inserting.
1249 *
1250 * Issue a lookup to position the cursor and locate the insertion
1251 * point. The target key should not exist. If we are creating a
1252 * directory entry we may have to iterate the low 32 bits of the
1253 * key to find an unused key.
1254 */
1255 hammer_sync_lock_sh(trans);
1256 cursor->flags |= HAMMER_CURSOR_INSERT;
1257 error = hammer_btree_lookup(cursor);
1258 if (hammer_debug_inode)
1259 kprintf("DOINSERT LOOKUP %d\n", error);
1260 if (error == 0) {
1261 kprintf("hammer_ip_sync_record: duplicate rec "
1262 "at (%016llx)\n", (long long)record->leaf.base.key);
1263 if (hammer_debug_critical)
1264 Debugger("duplicate record1");
1265 error = EIO;
1266 }
1267#if 0
1268 if (record->type == HAMMER_MEM_RECORD_DATA)
1269 kprintf("sync_record %016llx ---------------- %016llx %d\n",
1270 record->leaf.base.key - record->leaf.data_len,
1271 record->leaf.data_offset, error);
1272#endif
1273
1274 if (error != ENOENT)
1275 goto done_unlock;
1276
1277 /*
1278 * Allocate the record and data. The result buffers will be
1279 * marked as being modified and further calls to
1280 * hammer_modify_buffer() will result in unneeded UNDO records.
1281 *
1282 * Support zero-fill records (data == NULL and data_len != 0)
1283 */
1284 if (record->type == HAMMER_MEM_RECORD_DATA) {
1285 /*
1286 * The data portion of a bulk-data record has already been
1287 * committed to disk, we need only adjust the layer2
1288 * statistics in the same transaction as our B-Tree insert.
1289 */
1290 KKASSERT(record->leaf.data_offset != 0);
1291 error = hammer_blockmap_finalize(trans,
1292 record->resv,
1293 record->leaf.data_offset,
1294 record->leaf.data_len);
1295
1296 if (hammer_live_dedup == 2 &&
1297 (record->flags & HAMMER_RECF_DEDUPED) == 0) {
1298 hammer_dedup_cache_add(record->ip, &record->leaf);
1299 }
1300 } else if (record->data && record->leaf.data_len) {
1301 /*
1302 * Wholely cached record, with data. Allocate the data.
1303 */
1304 bdata = hammer_alloc_data(trans, record->leaf.data_len,
1305 record->leaf.base.rec_type,
1306 &record->leaf.data_offset,
1307 &cursor->data_buffer,
1308 0, &error);
1309 if (bdata == NULL)
1310 goto done_unlock;
1311 hammer_crc_set_leaf(record->data, &record->leaf);
1312 hammer_modify_buffer(trans, cursor->data_buffer, NULL, 0);
1313 bcopy(record->data, bdata, record->leaf.data_len);
1314 hammer_modify_buffer_done(cursor->data_buffer);
1315 } else {
1316 /*
1317 * Wholely cached record, without data.
1318 */
1319 record->leaf.data_offset = 0;
1320 record->leaf.data_crc = 0;
1321 }
1322
1323 error = hammer_btree_insert(cursor, &record->leaf, &doprop);
1324 if (hammer_debug_inode && error) {
1325 kprintf("BTREE INSERT error %d @ %016llx:%d key %016llx\n",
1326 error,
1327 (long long)cursor->node->node_offset,
1328 cursor->index,
1329 (long long)record->leaf.base.key);
1330 }
1331
1332 /*
1333 * Our record is on-disk and we normally mark the in-memory version
1334 * as having been committed (and not BE-deleted).
1335 *
1336 * If the record represented a directory deletion but we had to
1337 * sync a valid directory entry to disk due to dependancies,
1338 * we must convert the record to a covering delete so the
1339 * frontend does not have visibility on the synced entry.
1340 *
1341 * WARNING: cursor's leaf pointer may have changed after do_propagation
1342 * returns!
1343 */
1344 if (error == 0) {
1345 if (doprop) {
1346 hammer_btree_do_propagation(cursor,
1347 record->ip->pfsm,
1348 &record->leaf);
1349 }
1350 if (record->flags & HAMMER_RECF_CONVERT_DELETE) {
1351 /*
1352 * Must convert deleted directory entry add
1353 * to a directory entry delete.
1354 */
1355 KKASSERT(record->type == HAMMER_MEM_RECORD_ADD);
1356 record->flags &= ~HAMMER_RECF_DELETED_FE;
1357 record->type = HAMMER_MEM_RECORD_DEL;
1358 KKASSERT(record->ip->obj_id == record->leaf.base.obj_id);
1359 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
1360 record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
1361 KKASSERT((record->flags & (HAMMER_RECF_COMMITTED |
1362 HAMMER_RECF_DELETED_BE)) == 0);
1363 /* converted record is not yet committed */
1364 /* hammer_flush_record_done takes care of the rest */
1365 } else {
1366 /*
1367 * Everything went fine and we are now done with
1368 * this record.
1369 */
1370 record->flags |= HAMMER_RECF_COMMITTED;
1371 ++record->ip->rec_generation;
1372 }
1373 } else {
1374 if (record->leaf.data_offset) {
1375 hammer_blockmap_free(trans, record->leaf.data_offset,
1376 record->leaf.data_len);
1377 }
1378 }
1379done_unlock:
1380 hammer_sync_unlock(trans);
1381done:
1382 return(error);
1383}
1384
1385/*
1386 * Add the record to the inode's rec_tree. The low 32 bits of a directory
1387 * entry's key is used to deal with hash collisions in the upper 32 bits.
1388 * A unique 64 bit key is generated in-memory and may be regenerated a
1389 * second time when the directory record is flushed to the on-disk B-Tree.
1390 *
1391 * A referenced record is passed to this function. This function
1392 * eats the reference. If an error occurs the record will be deleted.
1393 *
1394 * A copy of the temporary record->data pointer provided by the caller
1395 * will be made.
1396 */
1397int
1398hammer_mem_add(hammer_record_t record)
1399{
1400 hammer_mount_t hmp = record->ip->hmp;
1401
1402 /*
1403 * Make a private copy of record->data
1404 */
1405 if (record->data)
1406 KKASSERT(record->flags & HAMMER_RECF_ALLOCDATA);
1407
1408 /*
1409 * Insert into the RB tree. A unique key should have already
1410 * been selected if this is a directory entry.
1411 */
1412 if (RB_INSERT(hammer_rec_rb_tree, &record->ip->rec_tree, record)) {
1413 record->flags |= HAMMER_RECF_DELETED_FE;
1414 hammer_rel_mem_record(record);
1415 return (EEXIST);
1416 }
1417 ++hmp->count_newrecords;
1418 ++hmp->rsv_recs;
1419 ++record->ip->rsv_recs;
1420 record->ip->hmp->rsv_databytes += record->leaf.data_len;
1421 record->flags |= HAMMER_RECF_ONRBTREE;
1422 hammer_modify_inode(NULL, record->ip, HAMMER_INODE_XDIRTY);
1423 hammer_rel_mem_record(record);
1424 return(0);
1425}
1426
1427/************************************************************************
1428 * HAMMER INODE MERGED-RECORD FUNCTIONS *
1429 ************************************************************************
1430 *
1431 * These functions augment the B-Tree scanning functions in hammer_btree.c
1432 * by merging in-memory records with on-disk records.
1433 */
1434
1435/*
1436 * Locate a particular record either in-memory or on-disk.
1437 *
1438 * NOTE: This is basically a standalone routine, hammer_ip_next() may
1439 * NOT be called to iterate results.
1440 */
1441int
1442hammer_ip_lookup(hammer_cursor_t cursor)
1443{
1444 int error;
1445
1446 /*
1447 * If the element is in-memory return it without searching the
1448 * on-disk B-Tree
1449 */
1450 KKASSERT(cursor->ip);
1451 error = hammer_mem_lookup(cursor);
1452 if (error == 0) {
1453 cursor->leaf = &cursor->iprec->leaf;
1454 return(error);
1455 }
1456 if (error != ENOENT)
1457 return(error);
1458
1459 /*
1460 * If the inode has on-disk components search the on-disk B-Tree.
1461 */
1462 if ((cursor->ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) == 0)
1463 return(error);
1464 error = hammer_btree_lookup(cursor);
1465 if (error == 0)
1466 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1467 return(error);
1468}
1469
1470/*
1471 * Helper for hammer_ip_first()/hammer_ip_next()
1472 *
1473 * NOTE: Both ATEDISK and DISKEOF will be set the same. This sets up
1474 * hammer_ip_first() for calling hammer_ip_next(), and sets up the re-seek
1475 * state if hammer_ip_next() needs to re-seek.
1476 */
1477static __inline
1478int
1479_hammer_ip_seek_btree(hammer_cursor_t cursor)
1480{
1481 hammer_inode_t ip = cursor->ip;
1482 int error;
1483
1484 if (ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) {
1485 error = hammer_btree_lookup(cursor);
1486 if (error == ENOENT || error == EDEADLK) {
1487 if (hammer_debug_general & 0x2000) {
1488 kprintf("error %d node %p %016llx index %d\n",
1489 error, cursor->node,
1490 (long long)cursor->node->node_offset,
1491 cursor->index);
1492 }
1493 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1494 error = hammer_btree_iterate(cursor);
1495 }
1496 if (error == 0) {
1497 cursor->flags &= ~(HAMMER_CURSOR_DISKEOF |
1498 HAMMER_CURSOR_ATEDISK);
1499 } else {
1500 cursor->flags |= HAMMER_CURSOR_DISKEOF |
1501 HAMMER_CURSOR_ATEDISK;
1502 if (error == ENOENT)
1503 error = 0;
1504 }
1505 } else {
1506 cursor->flags |= HAMMER_CURSOR_DISKEOF | HAMMER_CURSOR_ATEDISK;
1507 error = 0;
1508 }
1509 return(error);
1510}
1511
1512/*
1513 * Helper for hammer_ip_next()
1514 *
1515 * The caller has determined that the media cursor is further along than the
1516 * memory cursor and must be reseeked after a generation number change.
1517 */
1518static
1519int
1520_hammer_ip_reseek(hammer_cursor_t cursor)
1521{
1522 struct hammer_base_elm save;
1523 hammer_btree_elm_t elm;
1524 int error;
1525 int r;
1526 int again = 0;
1527
1528 /*
1529 * Do the re-seek.
1530 */
1531 kprintf("HAMMER: Debug: re-seeked during scan @ino=%016llx\n",
1532 (long long)cursor->ip->obj_id);
1533 save = cursor->key_beg;
1534 cursor->key_beg = cursor->iprec->leaf.base;
1535 error = _hammer_ip_seek_btree(cursor);
1536 KKASSERT(error == 0);
1537 cursor->key_beg = save;
1538
1539 /*
1540 * If the memory record was previous returned to
1541 * the caller and the media record matches
1542 * (-1/+1: only create_tid differs), then iterate
1543 * the media record to avoid a double result.
1544 */
1545 if ((cursor->flags & HAMMER_CURSOR_ATEDISK) == 0 &&
1546 (cursor->flags & HAMMER_CURSOR_LASTWASMEM)) {
1547 elm = &cursor->node->ondisk->elms[cursor->index];
1548 r = hammer_btree_cmp(&elm->base,
1549 &cursor->iprec->leaf.base);
1550 if (cursor->flags & HAMMER_CURSOR_ASOF) {
1551 if (r >= -1 && r <= 1) {
1552 kprintf("HAMMER: Debug: iterated after "
1553 "re-seek (asof r=%d)\n", r);
1554 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1555 again = 1;
1556 }
1557 } else {
1558 if (r == 0) {
1559 kprintf("HAMMER: Debug: iterated after "
1560 "re-seek\n");
1561 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1562 again = 1;
1563 }
1564 }
1565 }
1566 return(again);
1567}
1568
1569/*
1570 * Locate the first record within the cursor's key_beg/key_end range,
1571 * restricted to a particular inode. 0 is returned on success, ENOENT
1572 * if no records matched the requested range, or some other error.
1573 *
1574 * When 0 is returned hammer_ip_next() may be used to iterate additional
1575 * records within the requested range.
1576 *
1577 * This function can return EDEADLK, requiring the caller to terminate
1578 * the cursor and try again.
1579 */
1580
1581int
1582hammer_ip_first(hammer_cursor_t cursor)
1583{
1584 hammer_inode_t ip __debugvar = cursor->ip;
1585 int error;
1586
1587 KKASSERT(ip != NULL);
1588
1589 /*
1590 * Clean up fields and setup for merged scan
1591 */
1592 cursor->flags &= ~HAMMER_CURSOR_RETEST;
1593
1594 /*
1595 * Search the in-memory record list (Red-Black tree). Unlike the
1596 * B-Tree search, mem_first checks for records in the range.
1597 *
1598 * This function will setup both ATEMEM and MEMEOF properly for
1599 * the ip iteration. ATEMEM will be set if MEMEOF is set.
1600 */
1601 hammer_mem_first(cursor);
1602
1603 /*
1604 * Detect generation changes during blockages, including
1605 * blockages which occur on the initial btree search.
1606 */
1607 cursor->rec_generation = cursor->ip->rec_generation;
1608
1609 /*
1610 * Initial search and result
1611 */
1612 error = _hammer_ip_seek_btree(cursor);
1613 if (error == 0)
1614 error = hammer_ip_next(cursor);
1615
1616 return (error);
1617}
1618
1619/*
1620 * Retrieve the next record in a merged iteration within the bounds of the
1621 * cursor. This call may be made multiple times after the cursor has been
1622 * initially searched with hammer_ip_first().
1623 *
1624 * There are numerous special cases in this code to deal with races between
1625 * in-memory records and on-media records.
1626 *
1627 * 0 is returned on success, ENOENT if no further records match the
1628 * requested range, or some other error code is returned.
1629 */
1630int
1631hammer_ip_next(hammer_cursor_t cursor)
1632{
1633 hammer_btree_elm_t elm;
1634 hammer_record_t rec;
1635 hammer_record_t tmprec;
1636 int error;
1637 int r;
1638
1639again:
1640 /*
1641 * Get the next on-disk record
1642 *
1643 * NOTE: If we deleted the last on-disk record we had scanned
1644 * ATEDISK will be clear and RETEST will be set, forcing
1645 * a call to iterate. The fact that ATEDISK is clear causes
1646 * iterate to re-test the 'current' element. If ATEDISK is
1647 * set, iterate will skip the 'current' element.
1648 */
1649 error = 0;
1650 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
1651 if (cursor->flags & (HAMMER_CURSOR_ATEDISK |
1652 HAMMER_CURSOR_RETEST)) {
1653 error = hammer_btree_iterate(cursor);
1654 cursor->flags &= ~HAMMER_CURSOR_RETEST;
1655 if (error == 0) {
1656 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1657 hammer_cache_node(&cursor->ip->cache[1],
1658 cursor->node);
1659 } else if (error == ENOENT) {
1660 cursor->flags |= HAMMER_CURSOR_DISKEOF |
1661 HAMMER_CURSOR_ATEDISK;
1662 error = 0;
1663 }
1664 }
1665 }
1666
1667 /*
1668 * If the generation changed the backend has deleted or committed
1669 * one or more memory records since our last check.
1670 *
1671 * When this case occurs if the disk cursor is > current memory record
1672 * or the disk cursor is at EOF, we must re-seek the disk-cursor.
1673 * Since the cursor is ahead it must have not yet been eaten (if
1674 * not at eof anyway). (XXX data offset case?)
1675 *
1676 * NOTE: we are not doing a full check here. That will be handled
1677 * later on.
1678 *
1679 * If we have exhausted all memory records we do not have to do any
1680 * further seeks.
1681 */
1682 while (cursor->rec_generation != cursor->ip->rec_generation &&
1683 error == 0
1684 ) {
1685 kprintf("HAMMER: Debug: generation changed during scan @ino=%016llx\n", (long long)cursor->ip->obj_id);
1686 cursor->rec_generation = cursor->ip->rec_generation;
1687 if (cursor->flags & HAMMER_CURSOR_MEMEOF)
1688 break;
1689 if (cursor->flags & HAMMER_CURSOR_DISKEOF) {
1690 r = 1;
1691 } else {
1692 KKASSERT((cursor->flags & HAMMER_CURSOR_ATEDISK) == 0);
1693 elm = &cursor->node->ondisk->elms[cursor->index];
1694 r = hammer_btree_cmp(&elm->base,
1695 &cursor->iprec->leaf.base);
1696 }
1697
1698 /*
1699 * Do we re-seek the media cursor?
1700 */
1701 if (r > 0) {
1702 if (_hammer_ip_reseek(cursor))
1703 goto again;
1704 }
1705 }
1706
1707 /*
1708 * We can now safely get the next in-memory record. We cannot
1709 * block here.
1710 *
1711 * hammer_rec_scan_cmp: Is the record still in our general range,
1712 * (non-inclusive of snapshot exclusions)?
1713 * hammer_rec_scan_callback: Is the record in our snapshot?
1714 */
1715 tmprec = NULL;
1716 if ((cursor->flags & HAMMER_CURSOR_MEMEOF) == 0) {
1717 /*
1718 * If the current memory record was eaten then get the next
1719 * one. Stale records are skipped.
1720 */
1721 if (cursor->flags & HAMMER_CURSOR_ATEMEM) {
1722 tmprec = cursor->iprec;
1723 cursor->iprec = NULL;
1724 rec = hammer_rec_rb_tree_RB_NEXT(tmprec);
1725 while (rec) {
1726 if (hammer_rec_scan_cmp(rec, cursor) != 0)
1727 break;
1728 if (hammer_rec_scan_callback(rec, cursor) != 0)
1729 break;
1730 rec = hammer_rec_rb_tree_RB_NEXT(rec);
1731 }
1732 if (cursor->iprec) {
1733 KKASSERT(cursor->iprec == rec);
1734 cursor->flags &= ~HAMMER_CURSOR_ATEMEM;
1735 } else {
1736 cursor->flags |= HAMMER_CURSOR_MEMEOF;
1737 }
1738 cursor->flags &= ~HAMMER_CURSOR_LASTWASMEM;
1739 }
1740 }
1741
1742 /*
1743 * MEMORY RECORD VALIDITY TEST
1744 *
1745 * (We still can't block, which is why tmprec is being held so
1746 * long).
1747 *
1748 * If the memory record is no longer valid we skip it. It may
1749 * have been deleted by the frontend. If it was deleted or
1750 * committed by the backend the generation change re-seeked the
1751 * disk cursor and the record will be present there.
1752 */
1753 if (error == 0 && (cursor->flags & HAMMER_CURSOR_MEMEOF) == 0) {
1754 KKASSERT(cursor->iprec);
1755 KKASSERT((cursor->flags & HAMMER_CURSOR_ATEMEM) == 0);
1756 if (!hammer_ip_iterate_mem_good(cursor, cursor->iprec)) {
1757 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1758 if (tmprec)
1759 hammer_rel_mem_record(tmprec);
1760 goto again;
1761 }
1762 }
1763 if (tmprec)
1764 hammer_rel_mem_record(tmprec);
1765
1766 /*
1767 * Extract either the disk or memory record depending on their
1768 * relative position.
1769 */
1770 error = 0;
1771 switch(cursor->flags & (HAMMER_CURSOR_ATEDISK | HAMMER_CURSOR_ATEMEM)) {
1772 case 0:
1773 /*
1774 * Both entries valid. Compare the entries and nominally
1775 * return the first one in the sort order. Numerous cases
1776 * require special attention, however.
1777 */
1778 elm = &cursor->node->ondisk->elms[cursor->index];
1779 r = hammer_btree_cmp(&elm->base, &cursor->iprec->leaf.base);
1780
1781 /*
1782 * If the two entries differ only by their key (-2/2) or
1783 * create_tid (-1/1), and are DATA records, we may have a
1784 * nominal match. We have to calculate the base file
1785 * offset of the data.
1786 */
1787 if (r <= 2 && r >= -2 && r != 0 &&
1788 cursor->ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE &&
1789 cursor->iprec->type == HAMMER_MEM_RECORD_DATA) {
1790 int64_t base1 = elm->leaf.base.key - elm->leaf.data_len;
1791 int64_t base2 = cursor->iprec->leaf.base.key -
1792 cursor->iprec->leaf.data_len;
1793 if (base1 == base2)
1794 r = 0;
1795 }
1796
1797 if (r < 0) {
1798 error = hammer_btree_extract(cursor,
1799 HAMMER_CURSOR_GET_LEAF);
1800 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1801 cursor->flags &= ~HAMMER_CURSOR_LASTWASMEM;
1802 break;
1803 }
1804
1805 /*
1806 * If the entries match exactly the memory entry is either
1807 * an on-disk directory entry deletion or a bulk data
1808 * overwrite. If it is a directory entry deletion we eat
1809 * both entries.
1810 *
1811 * For the bulk-data overwrite case it is possible to have
1812 * visibility into both, which simply means the syncer
1813 * hasn't gotten around to doing the delete+insert sequence
1814 * on the B-Tree. Use the memory entry and throw away the
1815 * on-disk entry.
1816 *
1817 * If the in-memory record is not either of these we
1818 * probably caught the syncer while it was syncing it to
1819 * the media. Since we hold a shared lock on the cursor,
1820 * the in-memory record had better be marked deleted at
1821 * this point.
1822 */
1823 if (r == 0) {
1824 if (cursor->iprec->type == HAMMER_MEM_RECORD_DEL) {
1825 if ((cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1826 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1827 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1828 goto again;
1829 }
1830 } else if (cursor->iprec->type == HAMMER_MEM_RECORD_DATA) {
1831 if ((cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1832 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1833 }
1834 /* fall through to memory entry */
1835 } else {
1836 panic("hammer_ip_next: duplicate mem/b-tree entry %p %d %08x", cursor->iprec, cursor->iprec->type, cursor->iprec->flags);
1837 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1838 goto again;
1839 }
1840 }
1841 /* fall through to the memory entry */
1842 case HAMMER_CURSOR_ATEDISK:
1843 /*
1844 * Only the memory entry is valid.
1845 */
1846 cursor->leaf = &cursor->iprec->leaf;
1847 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1848 cursor->flags |= HAMMER_CURSOR_LASTWASMEM;
1849
1850 /*
1851 * If the memory entry is an on-disk deletion we should have
1852 * also had found a B-Tree record. If the backend beat us
1853 * to it it would have interlocked the cursor and we should
1854 * have seen the in-memory record marked DELETED_FE.
1855 */
1856 if (cursor->iprec->type == HAMMER_MEM_RECORD_DEL &&
1857 (cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1858 panic("hammer_ip_next: del-on-disk with no b-tree entry iprec %p flags %08x", cursor->iprec, cursor->iprec->flags);
1859 }
1860 break;
1861 case HAMMER_CURSOR_ATEMEM:
1862 /*
1863 * Only the disk entry is valid
1864 */
1865 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1866 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1867 cursor->flags &= ~HAMMER_CURSOR_LASTWASMEM;
1868 break;
1869 default:
1870 /*
1871 * Neither entry is valid
1872 *
1873 * XXX error not set properly
1874 */
1875 cursor->flags &= ~HAMMER_CURSOR_LASTWASMEM;
1876 cursor->leaf = NULL;
1877 error = ENOENT;
1878 break;
1879 }
1880 return(error);
1881}
1882
1883/*
1884 * Resolve the cursor->data pointer for the current cursor position in
1885 * a merged iteration.
1886 */
1887int
1888hammer_ip_resolve_data(hammer_cursor_t cursor)
1889{
1890 hammer_record_t record;
1891 int error;
1892
1893 if (hammer_cursor_inmem(cursor)) {
1894 /*
1895 * The data associated with an in-memory record is usually
1896 * kmalloced, but reserve-ahead data records will have an
1897 * on-disk reference.
1898 *
1899 * NOTE: Reserve-ahead data records must be handled in the
1900 * context of the related high level buffer cache buffer
1901 * to interlock against async writes.
1902 */
1903 record = cursor->iprec;
1904 cursor->data = record->data;
1905 error = 0;
1906 if (cursor->data == NULL) {
1907 KKASSERT(record->leaf.base.rec_type ==
1908 HAMMER_RECTYPE_DATA);
1909 cursor->data = hammer_bread_ext(cursor->trans->hmp,
1910 record->leaf.data_offset,
1911 record->leaf.data_len,
1912 &error,
1913 &cursor->data_buffer);
1914 }
1915 } else {
1916 cursor->leaf = &cursor->node->ondisk->elms[cursor->index].leaf;
1917 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA);
1918 }
1919 return(error);
1920}
1921
1922/*
1923 * Backend truncation / record replacement - delete records in range.
1924 *
1925 * Delete all records within the specified range for inode ip. In-memory
1926 * records still associated with the frontend are ignored.
1927 *
1928 * If truncating is non-zero in-memory records associated with the back-end
1929 * are ignored. If truncating is > 1 we can return EWOULDBLOCK.
1930 *
1931 * NOTES:
1932 *
1933 * * An unaligned range will cause new records to be added to cover
1934 * the edge cases. (XXX not implemented yet).
1935 *
1936 * * Replacement via reservations (see hammer_ip_sync_record_cursor())
1937 * also do not deal with unaligned ranges.
1938 *
1939 * * ran_end is inclusive (e.g. 0,1023 instead of 0,1024).
1940 *
1941 * * Record keys for regular file data have to be special-cased since
1942 * they indicate the end of the range (key = base + bytes).
1943 *
1944 * * This function may be asked to delete ridiculously huge ranges, for
1945 * example if someone truncates or removes a 1TB regular file. We
1946 * must be very careful on restarts and we may have to stop w/
1947 * EWOULDBLOCK to avoid blowing out the buffer cache.
1948 */
1949int
1950hammer_ip_delete_range(hammer_cursor_t cursor, hammer_inode_t ip,
1951 int64_t ran_beg, int64_t ran_end, int truncating)
1952{
1953 hammer_transaction_t trans = cursor->trans;
1954 hammer_btree_leaf_elm_t leaf;
1955 int error;
1956 int64_t off;
1957 int64_t tmp64;
1958
1959#if 0
1960 kprintf("delete_range %p %016llx-%016llx\n", ip, ran_beg, ran_end);
1961#endif
1962
1963 KKASSERT(trans->type == HAMMER_TRANS_FLS);
1964retry:
1965 hammer_normalize_cursor(cursor);
1966 cursor->key_beg.localization = ip->obj_localization +
1967 HAMMER_LOCALIZE_MISC;
1968 cursor->key_beg.obj_id = ip->obj_id;
1969 cursor->key_beg.create_tid = 0;
1970 cursor->key_beg.delete_tid = 0;
1971 cursor->key_beg.obj_type = 0;
1972
1973 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1974 cursor->key_beg.key = ran_beg;
1975 cursor->key_beg.rec_type = HAMMER_RECTYPE_DB;
1976 } else {
1977 /*
1978 * The key in the B-Tree is (base+bytes), so the first possible
1979 * matching key is ran_beg + 1.
1980 */
1981 cursor->key_beg.key = ran_beg + 1;
1982 cursor->key_beg.rec_type = HAMMER_RECTYPE_DATA;
1983 }
1984
1985 cursor->key_end = cursor->key_beg;
1986 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1987 cursor->key_end.key = ran_end;
1988 } else {
1989 tmp64 = ran_end + MAXPHYS + 1; /* work around GCC-4 bug */
1990 if (tmp64 < ran_end)
1991 cursor->key_end.key = 0x7FFFFFFFFFFFFFFFLL;
1992 else
1993 cursor->key_end.key = ran_end + MAXPHYS + 1;
1994 }
1995
1996 cursor->asof = ip->obj_asof;
1997 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1998 cursor->flags |= HAMMER_CURSOR_ASOF;
1999 cursor->flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
2000 cursor->flags |= HAMMER_CURSOR_BACKEND;
2001 cursor->flags |= HAMMER_CURSOR_END_INCLUSIVE;
2002
2003 error = hammer_ip_first(cursor);
2004
2005 /*
2006 * Iterate through matching records and mark them as deleted.
2007 */
2008 while (error == 0) {
2009 leaf = cursor->leaf;
2010
2011 KKASSERT(leaf->base.delete_tid == 0);
2012 KKASSERT(leaf->base.obj_id == ip->obj_id);
2013
2014 /*
2015 * There may be overlap cases for regular file data. Also
2016 * remember the key for a regular file record is (base + len),
2017 * NOT (base).
2018 *
2019 * Note that due to duplicates (mem & media) allowed by
2020 * DELETE_VISIBILITY, off can wind up less then ran_beg.
2021 */
2022 if (leaf->base.rec_type == HAMMER_RECTYPE_DATA) {
2023 off = leaf->base.key - leaf->data_len;
2024 /*
2025 * Check the left edge case. We currently do not
2026 * split existing records.
2027 */
2028 if (off < ran_beg && leaf->base.key > ran_beg) {
2029 panic("hammer left edge case %016llx %d\n",
2030 (long long)leaf->base.key,
2031 leaf->data_len);
2032 }
2033
2034 /*
2035 * Check the right edge case. Note that the
2036 * record can be completely out of bounds, which
2037 * terminates the search.
2038 *
2039 * base->key is exclusive of the right edge while
2040 * ran_end is inclusive of the right edge. The
2041 * (key - data_len) left boundary is inclusive.
2042 *
2043 * XXX theory-check this test at some point, are
2044 * we missing a + 1 somewhere? Note that ran_end
2045 * could overflow.
2046 */
2047 if (leaf->base.key - 1 > ran_end) {
2048 if (leaf->base.key - leaf->data_len > ran_end)
2049 break;
2050 panic("hammer right edge case\n");
2051 }
2052 } else {
2053 off = leaf->base.key;
2054 }
2055
2056 /*
2057 * Delete the record. When truncating we do not delete
2058 * in-memory (data) records because they represent data
2059 * written after the truncation.
2060 *
2061 * This will also physically destroy the B-Tree entry and
2062 * data if the retention policy dictates. The function
2063 * will set HAMMER_CURSOR_RETEST to cause hammer_ip_next()
2064 * to retest the new 'current' element.
2065 */
2066 if (truncating == 0 || hammer_cursor_ondisk(cursor)) {
2067 error = hammer_ip_delete_record(cursor, ip, trans->tid);
2068 /*
2069 * If we have built up too many meta-buffers we risk
2070 * deadlocking the kernel and must stop. This can
2071 * occur when deleting ridiculously huge files.
2072 * sync_trunc_off is updated so the next cycle does
2073 * not re-iterate records we have already deleted.
2074 *
2075 * This is only done with formal truncations.
2076 */
2077 if (truncating > 1 && error == 0 &&
2078 hammer_flusher_meta_limit(ip->hmp)) {
2079 ip->sync_trunc_off = off;
2080 error = EWOULDBLOCK;
2081 }
2082 }
2083 if (error)
2084 break;
2085 ran_beg = off; /* for restart */
2086 error = hammer_ip_next(cursor);
2087 }
2088 if (cursor->node)
2089 hammer_cache_node(&ip->cache[1], cursor->node);
2090
2091 if (error == EDEADLK) {
2092 hammer_done_cursor(cursor);
2093 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
2094 if (error == 0)
2095 goto retry;
2096 }
2097 if (error == ENOENT)
2098 error = 0;
2099 return(error);
2100}
2101
2102/*
2103 * This backend function deletes the specified record on-disk, similar to
2104 * delete_range but for a specific record. Unlike the exact deletions
2105 * used when deleting a directory entry this function uses an ASOF search
2106 * like delete_range.
2107 *
2108 * This function may be called with ip->obj_asof set for a slave snapshot,
2109 * so don't use it. We always delete non-historical records only.
2110 */
2111static int
2112hammer_delete_general(hammer_cursor_t cursor, hammer_inode_t ip,
2113 hammer_btree_leaf_elm_t leaf)
2114{
2115 hammer_transaction_t trans = cursor->trans;
2116 int error;
2117
2118 KKASSERT(trans->type == HAMMER_TRANS_FLS);
2119retry:
2120 hammer_normalize_cursor(cursor);
2121 cursor->key_beg = leaf->base;
2122 cursor->asof = HAMMER_MAX_TID;
2123 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
2124 cursor->flags |= HAMMER_CURSOR_ASOF;
2125 cursor->flags |= HAMMER_CURSOR_BACKEND;
2126 cursor->flags &= ~HAMMER_CURSOR_INSERT;
2127
2128 error = hammer_btree_lookup(cursor);
2129 if (error == 0) {
2130 error = hammer_ip_delete_record(cursor, ip, trans->tid);
2131 }
2132 if (error == EDEADLK) {
2133 hammer_done_cursor(cursor);
2134 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
2135 if (error == 0)
2136 goto retry;
2137 }
2138 return(error);
2139}
2140
2141/*
2142 * This function deletes remaining auxillary records when an inode is
2143 * being deleted. This function explicitly does not delete the
2144 * inode record, directory entry, data, or db records. Those must be
2145 * properly disposed of prior to this call.
2146 */
2147int
2148hammer_ip_delete_clean(hammer_cursor_t cursor, hammer_inode_t ip, int *countp)
2149{
2150 hammer_transaction_t trans = cursor->trans;
2151 hammer_btree_leaf_elm_t leaf;
2152 int error;
2153
2154 KKASSERT(trans->type == HAMMER_TRANS_FLS);
2155retry:
2156 hammer_normalize_cursor(cursor);
2157 cursor->key_beg.localization = ip->obj_localization +
2158 HAMMER_LOCALIZE_MISC;
2159 cursor->key_beg.obj_id = ip->obj_id;
2160 cursor->key_beg.create_tid = 0;
2161 cursor->key_beg.delete_tid = 0;
2162 cursor->key_beg.obj_type = 0;
2163 cursor->key_beg.rec_type = HAMMER_RECTYPE_CLEAN_START;
2164 cursor->key_beg.key = HAMMER_MIN_KEY;
2165
2166 cursor->key_end = cursor->key_beg;
2167 cursor->key_end.rec_type = HAMMER_RECTYPE_MAX;
2168 cursor->key_end.key = HAMMER_MAX_KEY;
2169
2170 cursor->asof = ip->obj_asof;
2171 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
2172 cursor->flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
2173 cursor->flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
2174 cursor->flags |= HAMMER_CURSOR_BACKEND;
2175
2176 error = hammer_ip_first(cursor);
2177
2178 /*
2179 * Iterate through matching records and mark them as deleted.
2180 */
2181 while (error == 0) {
2182 leaf = cursor->leaf;
2183
2184 KKASSERT(leaf->base.delete_tid == 0);
2185
2186 /*
2187 * Mark the record and B-Tree entry as deleted. This will
2188 * also physically delete the B-Tree entry, record, and
2189 * data if the retention policy dictates. The function
2190 * will set HAMMER_CURSOR_RETEST to cause hammer_ip_next()
2191 * to retest the new 'current' element.
2192 *
2193 * Directory entries (and delete-on-disk directory entries)
2194 * must be synced and cannot be deleted.
2195 */
2196 error = hammer_ip_delete_record(cursor, ip, trans->tid);
2197 ++*countp;
2198 if (error)
2199 break;
2200 error = hammer_ip_next(cursor);
2201 }
2202 if (cursor->node)
2203 hammer_cache_node(&ip->cache[1], cursor->node);
2204 if (error == EDEADLK) {
2205 hammer_done_cursor(cursor);
2206 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
2207 if (error == 0)
2208 goto retry;
2209 }
2210 if (error == ENOENT)
2211 error = 0;
2212 return(error);
2213}
2214
2215/*
2216 * Delete the record at the current cursor. On success the cursor will
2217 * be positioned appropriately for an iteration but may no longer be at
2218 * a leaf node.
2219 *
2220 * This routine is only called from the backend.
2221 *
2222 * NOTE: This can return EDEADLK, requiring the caller to terminate the
2223 * cursor and retry.
2224 */
2225int
2226hammer_ip_delete_record(hammer_cursor_t cursor, hammer_inode_t ip,
2227 hammer_tid_t tid)
2228{
2229 hammer_record_t iprec;
2230 hammer_mount_t hmp;
2231 int error;
2232
2233 KKASSERT(cursor->flags & HAMMER_CURSOR_BACKEND);
2234 KKASSERT(tid != 0);
2235 hmp = cursor->node->hmp;
2236
2237 /*
2238 * In-memory (unsynchronized) records can simply be freed. This
2239 * only occurs in range iterations since all other records are
2240 * individually synchronized. Thus there should be no confusion with
2241 * the interlock.
2242 *
2243 * An in-memory record may be deleted before being committed to disk,
2244 * but could have been accessed in the mean time. The reservation
2245 * code will deal with the case.
2246 */
2247 if (hammer_cursor_inmem(cursor)) {
2248 iprec = cursor->iprec;
2249 KKASSERT((iprec->flags & HAMMER_RECF_INTERLOCK_BE) ==0);
2250 iprec->flags |= HAMMER_RECF_DELETED_FE;
2251 iprec->flags |= HAMMER_RECF_DELETED_BE;
2252 KKASSERT(iprec->ip == ip);
2253 ++ip->rec_generation;
2254 return(0);
2255 }
2256
2257 /*
2258 * On-disk records are marked as deleted by updating their delete_tid.
2259 * This does not effect their position in the B-Tree (which is based
2260 * on their create_tid).
2261 *
2262 * Frontend B-Tree operations track inodes so we tell
2263 * hammer_delete_at_cursor() not to.
2264 */
2265 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
2266
2267 if (error == 0) {
2268 error = hammer_delete_at_cursor(
2269 cursor,
2270 HAMMER_DELETE_ADJUST | hammer_nohistory(ip),
2271 cursor->trans->tid,
2272 cursor->trans->time32,
2273 0, NULL);
2274 }
2275 return(error);
2276}
2277
2278/*
2279 * Used to write a generic record w/optional data to the media b-tree
2280 * when no inode context is available. Used by the mirroring and
2281 * snapshot code.
2282 *
2283 * Caller must set cursor->key_beg to leaf->base. The cursor must be
2284 * flagged for backend operation and not flagged ASOF (since we are
2285 * doing an insertion).
2286 *
2287 * This function will acquire the appropriate sync lock and will set
2288 * the cursor insertion flag for the operation, do the btree lookup,
2289 * and the insertion, and clear the insertion flag and sync lock before
2290 * returning. The cursor state will be such that the caller can continue
2291 * scanning (used by the mirroring code).
2292 *
2293 * mode: HAMMER_CREATE_MODE_UMIRROR copyin data, check crc
2294 * HAMMER_CREATE_MODE_SYS bcopy data, generate crc
2295 *
2296 * NOTE: EDEADLK can be returned. The caller must do deadlock handling and
2297 * retry.
2298 *
2299 * EALREADY can be returned if the record already exists (WARNING,
2300 * because ASOF cannot be used no check is made for illegal
2301 * duplicates).
2302 *
2303 * NOTE: Do not use the function for normal inode-related records as this
2304 * functions goes directly to the media and is not integrated with
2305 * in-memory records.
2306 */
2307int
2308hammer_create_at_cursor(hammer_cursor_t cursor, hammer_btree_leaf_elm_t leaf,
2309 void *udata, int mode)
2310{
2311 hammer_transaction_t trans;
2312 hammer_buffer_t data_buffer;
2313 hammer_off_t ndata_offset;
2314 hammer_tid_t high_tid;
2315 void *ndata;
2316 int error;
2317 int doprop;
2318
2319 trans = cursor->trans;
2320 data_buffer = NULL;
2321 ndata_offset = 0;
2322 doprop = 0;
2323
2324 KKASSERT((cursor->flags &
2325 (HAMMER_CURSOR_BACKEND | HAMMER_CURSOR_ASOF)) ==
2326 (HAMMER_CURSOR_BACKEND));
2327
2328 hammer_sync_lock_sh(trans);
2329
2330 if (leaf->data_len) {
2331 ndata = hammer_alloc_data(trans, leaf->data_len,
2332 leaf->base.rec_type,
2333 &ndata_offset, &data_buffer,
2334 0, &error);
2335 if (ndata == NULL) {
2336 hammer_sync_unlock(trans);
2337 return (error);
2338 }
2339 leaf->data_offset = ndata_offset;
2340 hammer_modify_buffer(trans, data_buffer, NULL, 0);
2341
2342 switch(mode) {
2343 case HAMMER_CREATE_MODE_UMIRROR:
2344 error = copyin(udata, ndata, leaf->data_len);
2345 if (error == 0) {
2346 if (hammer_crc_test_leaf(ndata, leaf) == 0) {
2347 kprintf("data crc mismatch on pipe\n");
2348 error = EINVAL;
2349 } else {
2350 error = hammer_cursor_localize_data(
2351 ndata, leaf);
2352 }
2353 }
2354 break;
2355 case HAMMER_CREATE_MODE_SYS:
2356 bcopy(udata, ndata, leaf->data_len);
2357 error = 0;
2358 hammer_crc_set_leaf(ndata, leaf);
2359 break;
2360 default:
2361 panic("hammer: hammer_create_at_cursor: bad mode %d",
2362 mode);
2363 break; /* NOT REACHED */
2364 }
2365 hammer_modify_buffer_done(data_buffer);
2366 } else {
2367 leaf->data_offset = 0;
2368 error = 0;
2369 ndata = NULL;
2370 }
2371 if (error)
2372 goto failed;
2373
2374 /*
2375 * Do the insertion. This can fail with a EDEADLK or EALREADY
2376 */
2377 cursor->flags |= HAMMER_CURSOR_INSERT;
2378 error = hammer_btree_lookup(cursor);
2379 if (error != ENOENT) {
2380 if (error == 0)
2381 error = EALREADY;
2382 goto failed;
2383 }
2384 error = hammer_btree_insert(cursor, leaf, &doprop);
2385
2386 /*
2387 * Cursor is left on current element, we want to skip it now.
2388 * (in case the caller is scanning)
2389 */
2390 cursor->flags |= HAMMER_CURSOR_ATEDISK;
2391 cursor->flags &= ~HAMMER_CURSOR_INSERT;
2392
2393 /*
2394 * If the insertion happens to be creating (and not just replacing)
2395 * an inode we have to track it.
2396 */
2397 if (error == 0 &&
2398 leaf->base.rec_type == HAMMER_RECTYPE_INODE &&
2399 leaf->base.delete_tid == 0) {
2400 hammer_modify_volume_field(trans, trans->rootvol,
2401 vol0_stat_inodes);
2402 ++trans->hmp->rootvol->ondisk->vol0_stat_inodes;
2403 hammer_modify_volume_done(trans->rootvol);
2404 }
2405
2406 /*
2407 * vol0_next_tid must track the highest TID stored in the filesystem.
2408 * We do not need to generate undo for this update.
2409 */
2410 high_tid = leaf->base.create_tid;
2411 if (high_tid < leaf->base.delete_tid)
2412 high_tid = leaf->base.delete_tid;
2413 if (trans->rootvol->ondisk->vol0_next_tid < high_tid) {
2414 hammer_modify_volume(trans, trans->rootvol, NULL, 0);
2415 trans->rootvol->ondisk->vol0_next_tid = high_tid;
2416 hammer_modify_volume_done(trans->rootvol);
2417 }
2418
2419 /*
2420 * WARNING! cursor's leaf pointer may have changed after
2421 * do_propagation returns.
2422 */
2423 if (error == 0 && doprop)
2424 hammer_btree_do_propagation(cursor, NULL, leaf);
2425
2426failed:
2427 /*
2428 * Cleanup
2429 */
2430 if (error && leaf->data_offset) {
2431 hammer_blockmap_free(trans, leaf->data_offset, leaf->data_len);
2432
2433 }
2434 hammer_sync_unlock(trans);
2435 if (data_buffer)
2436 hammer_rel_buffer(data_buffer, 0);
2437 return (error);
2438}
2439
2440/*
2441 * Delete the B-Tree element at the current cursor and do any necessary
2442 * mirror propagation.
2443 *
2444 * The cursor must be properly positioned for an iteration on return but
2445 * may be pointing at an internal element.
2446 *
2447 * An element can be un-deleted by passing a delete_tid of 0 with
2448 * HAMMER_DELETE_ADJUST.
2449 */
2450int
2451hammer_delete_at_cursor(hammer_cursor_t cursor, int delete_flags,
2452 hammer_tid_t delete_tid, u_int32_t delete_ts,
2453 int track, int64_t *stat_bytes)
2454{
2455 struct hammer_btree_leaf_elm save_leaf;
2456 hammer_transaction_t trans;
2457 hammer_btree_leaf_elm_t leaf;
2458 hammer_node_t node;
2459 hammer_btree_elm_t elm;
2460 hammer_off_t data_offset;
2461 int32_t data_len;
2462 u_int16_t rec_type;
2463 int error;
2464 int icount;
2465 int doprop;
2466
2467 error = hammer_cursor_upgrade(cursor);
2468 if (error)
2469 return(error);
2470
2471 trans = cursor->trans;
2472 node = cursor->node;
2473 elm = &node->ondisk->elms[cursor->index];
2474 leaf = &elm->leaf;
2475 KKASSERT(elm->base.btype == HAMMER_BTREE_TYPE_RECORD);
2476
2477 hammer_sync_lock_sh(trans);
2478 doprop = 0;
2479 icount = 0;
2480
2481 /*
2482 * Adjust the delete_tid. Update the mirror_tid propagation field
2483 * as well. delete_tid can be 0 (undelete -- used by mirroring).
2484 */
2485 if (delete_flags & HAMMER_DELETE_ADJUST) {
2486 if (elm->base.rec_type == HAMMER_RECTYPE_INODE) {
2487 if (elm->leaf.base.delete_tid == 0 && delete_tid)
2488 icount = -1;
2489 if (elm->leaf.base.delete_tid && delete_tid == 0)
2490 icount = 1;
2491 }
2492
2493 hammer_modify_node(trans, node, elm, sizeof(*elm));
2494 elm->leaf.base.delete_tid = delete_tid;
2495 elm->leaf.delete_ts = delete_ts;
2496 hammer_modify_node_done(node);
2497
2498 if (elm->leaf.base.delete_tid > node->ondisk->mirror_tid) {
2499 hammer_modify_node_field(trans, node, mirror_tid);
2500 node->ondisk->mirror_tid = elm->leaf.base.delete_tid;
2501 hammer_modify_node_done(node);
2502 doprop = 1;
2503 if (hammer_debug_general & 0x0002) {
2504 kprintf("delete_at_cursor: propagate %016llx"
2505 " @%016llx\n",
2506 (long long)elm->leaf.base.delete_tid,
2507 (long long)node->node_offset);
2508 }
2509 }
2510
2511 /*
2512 * Adjust for the iteration. We have deleted the current
2513 * element and want to clear ATEDISK so the iteration does
2514 * not skip the element after, which now becomes the current
2515 * element. This element must be re-tested if doing an
2516 * iteration, which is handled by the RETEST flag.
2517 */
2518 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
2519 cursor->flags |= HAMMER_CURSOR_RETEST;
2520 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
2521 }
2522
2523 /*
2524 * An on-disk record cannot have the same delete_tid
2525 * as its create_tid. In a chain of record updates
2526 * this could result in a duplicate record.
2527 */
2528 KKASSERT(elm->leaf.base.delete_tid !=
2529 elm->leaf.base.create_tid);
2530 }
2531
2532 /*
2533 * Destroy the B-Tree element if asked (typically if a nohistory
2534 * file or mount, or when called by the pruning code).
2535 *
2536 * Adjust the ATEDISK flag to properly support iterations.
2537 */
2538 if (delete_flags & HAMMER_DELETE_DESTROY) {
2539 data_offset = elm->leaf.data_offset;
2540 data_len = elm->leaf.data_len;
2541 rec_type = elm->leaf.base.rec_type;
2542 if (doprop) {
2543 save_leaf = elm->leaf;
2544 leaf = &save_leaf;
2545 }
2546 if (elm->base.rec_type == HAMMER_RECTYPE_INODE &&
2547 elm->leaf.base.delete_tid == 0) {
2548 icount = -1;
2549 }
2550
2551 error = hammer_btree_delete(cursor);
2552 if (error == 0) {
2553 /*
2554 * The deletion moves the next element (if any) to
2555 * the current element position. We must clear
2556 * ATEDISK so this element is not skipped and we
2557 * must set RETEST to force any iteration to re-test
2558 * the element.
2559 */
2560 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
2561 cursor->flags |= HAMMER_CURSOR_RETEST;
2562 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
2563 }
2564 }
2565 if (error == 0) {
2566 switch(data_offset & HAMMER_OFF_ZONE_MASK) {
2567 case HAMMER_ZONE_LARGE_DATA:
2568 case HAMMER_ZONE_SMALL_DATA:
2569 case HAMMER_ZONE_META:
2570 hammer_blockmap_free(trans,
2571 data_offset, data_len);
2572 break;
2573 default:
2574 break;
2575 }
2576 }
2577 }
2578
2579 /*
2580 * Track inode count and next_tid. This is used by the mirroring
2581 * and PFS code. icount can be negative, zero, or positive.
2582 */
2583 if (error == 0 && track) {
2584 if (icount) {
2585 hammer_modify_volume_field(trans, trans->rootvol,
2586 vol0_stat_inodes);
2587 trans->rootvol->ondisk->vol0_stat_inodes += icount;
2588 hammer_modify_volume_done(trans->rootvol);
2589 }
2590 if (trans->rootvol->ondisk->vol0_next_tid < delete_tid) {
2591 hammer_modify_volume(trans, trans->rootvol, NULL, 0);
2592 trans->rootvol->ondisk->vol0_next_tid = delete_tid;
2593 hammer_modify_volume_done(trans->rootvol);
2594 }
2595 }
2596
2597 /*
2598 * mirror_tid propagation occurs if the node's mirror_tid had to be
2599 * updated while adjusting the delete_tid.
2600 *
2601 * This occurs when deleting even in nohistory mode, but does not
2602 * occur when pruning an already-deleted node.
2603 *
2604 * cursor->ip is NULL when called from the pruning, mirroring,
2605 * and pfs code. If non-NULL propagation will be conditionalized
2606 * on whether the PFS is in no-history mode or not.
2607 *
2608 * WARNING: cursor's leaf pointer may have changed after do_propagation
2609 * returns!
2610 */
2611 if (doprop) {
2612 if (cursor->ip)
2613 hammer_btree_do_propagation(cursor, cursor->ip->pfsm, leaf);
2614 else
2615 hammer_btree_do_propagation(cursor, NULL, leaf);
2616 }
2617 hammer_sync_unlock(trans);
2618 return (error);
2619}
2620
2621/*
2622 * Determine whether we can remove a directory. This routine checks whether
2623 * a directory is empty or not and enforces flush connectivity.
2624 *
2625 * Flush connectivity requires that we block if the target directory is
2626 * currently flushing, otherwise it may not end up in the same flush group.
2627 *
2628 * Returns 0 on success, ENOTEMPTY or EDEADLK (or other errors) on failure.
2629 */
2630int
2631hammer_ip_check_directory_empty(hammer_transaction_t trans, hammer_inode_t ip)
2632{
2633 struct hammer_cursor cursor;
2634 int error;
2635
2636 /*
2637 * Check directory empty
2638 */
2639 hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
2640
2641 cursor.key_beg.localization = ip->obj_localization +
2642 hammer_dir_localization(ip);
2643 cursor.key_beg.obj_id = ip->obj_id;
2644 cursor.key_beg.create_tid = 0;
2645 cursor.key_beg.delete_tid = 0;
2646 cursor.key_beg.obj_type = 0;
2647 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE + 1;
2648 cursor.key_beg.key = HAMMER_MIN_KEY;
2649
2650 cursor.key_end = cursor.key_beg;
2651 cursor.key_end.rec_type = 0xFFFF;
2652 cursor.key_end.key = HAMMER_MAX_KEY;
2653
2654 cursor.asof = ip->obj_asof;
2655 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
2656
2657 error = hammer_ip_first(&cursor);
2658 if (error == ENOENT)
2659 error = 0;
2660 else if (error == 0)
2661 error = ENOTEMPTY;
2662 hammer_done_cursor(&cursor);
2663 return(error);
2664}
2665
2666/*
2667 * Localize the data payload. Directory entries may need their
2668 * localization adjusted.
2669 */
2670static
2671int
2672hammer_cursor_localize_data(hammer_data_ondisk_t data,
2673 hammer_btree_leaf_elm_t leaf)
2674{
2675 u_int32_t localization;
2676
2677 if (leaf->base.rec_type == HAMMER_RECTYPE_DIRENTRY) {
2678 localization = leaf->base.localization &
2679 HAMMER_LOCALIZE_PSEUDOFS_MASK;
2680 if (data->entry.localization != localization) {
2681 data->entry.localization = localization;
2682 hammer_crc_set_leaf(data, leaf);
2683 }
2684 }
2685 return(0);
2686}