2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_object.c,v 1.97 2008/09/23 22:28:56 dillon Exp $
39 static int hammer_mem_lookup(hammer_cursor_t cursor);
40 static void hammer_mem_first(hammer_cursor_t cursor);
41 static int hammer_frontend_trunc_callback(hammer_record_t record,
43 static int hammer_bulk_scan_callback(hammer_record_t record, void *data);
44 static int hammer_record_needs_overwrite_delete(hammer_record_t record);
45 static int hammer_delete_general(hammer_cursor_t cursor, hammer_inode_t ip,
46 hammer_btree_leaf_elm_t leaf);
48 struct rec_trunc_info {
53 struct hammer_bulk_info {
54 hammer_record_t record;
55 struct hammer_btree_leaf_elm leaf;
59 * Red-black tree support. Comparison code for insertion.
62 hammer_rec_rb_compare(hammer_record_t rec1, hammer_record_t rec2)
64 if (rec1->leaf.base.rec_type < rec2->leaf.base.rec_type)
66 if (rec1->leaf.base.rec_type > rec2->leaf.base.rec_type)
69 if (rec1->leaf.base.key < rec2->leaf.base.key)
71 if (rec1->leaf.base.key > rec2->leaf.base.key)
75 * For search & insertion purposes records deleted by the
76 * frontend or deleted/committed by the backend are silently
77 * ignored. Otherwise pipelined insertions will get messed
80 * rec1 is greater then rec2 if rec1 is marked deleted.
81 * rec1 is less then rec2 if rec2 is marked deleted.
83 * Multiple deleted records may be present, do not return 0
84 * if both are marked deleted.
86 if (rec1->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
87 HAMMER_RECF_COMMITTED)) {
90 if (rec2->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
91 HAMMER_RECF_COMMITTED)) {
99 * Basic record comparison code similar to hammer_btree_cmp().
102 hammer_rec_cmp(hammer_base_elm_t elm, hammer_record_t rec)
104 if (elm->rec_type < rec->leaf.base.rec_type)
106 if (elm->rec_type > rec->leaf.base.rec_type)
109 if (elm->key < rec->leaf.base.key)
111 if (elm->key > rec->leaf.base.key)
115 * Never match against an item deleted by the frontend
116 * or backend, or committed by the backend.
118 * elm is less then rec if rec is marked deleted.
120 if (rec->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
121 HAMMER_RECF_COMMITTED)) {
128 * Ranged scan to locate overlapping record(s). This is used by
129 * hammer_ip_get_bulk() to locate an overlapping record. We have
130 * to use a ranged scan because the keys for data records with the
131 * same file base offset can be different due to differing data_len's.
133 * NOTE: The base file offset of a data record is (key - data_len), not (key).
136 hammer_rec_overlap_cmp(hammer_record_t rec, void *data)
138 struct hammer_bulk_info *info = data;
139 hammer_btree_leaf_elm_t leaf = &info->leaf;
141 if (rec->leaf.base.rec_type < leaf->base.rec_type)
143 if (rec->leaf.base.rec_type > leaf->base.rec_type)
149 if (leaf->base.rec_type == HAMMER_RECTYPE_DATA) {
150 /* rec_beg >= leaf_end */
151 if (rec->leaf.base.key - rec->leaf.data_len >= leaf->base.key)
153 /* rec_end <= leaf_beg */
154 if (rec->leaf.base.key <= leaf->base.key - leaf->data_len)
157 if (rec->leaf.base.key < leaf->base.key)
159 if (rec->leaf.base.key > leaf->base.key)
164 * We have to return 0 at this point, even if DELETED_FE is set,
165 * because returning anything else will cause the scan to ignore
166 * one of the branches when we really want it to check both.
172 * RB_SCAN comparison code for hammer_mem_first(). The argument order
173 * is reversed so the comparison result has to be negated. key_beg and
174 * key_end are both range-inclusive.
176 * Localized deletions are not cached in-memory.
180 hammer_rec_scan_cmp(hammer_record_t rec, void *data)
182 hammer_cursor_t cursor = data;
185 r = hammer_rec_cmp(&cursor->key_beg, rec);
188 r = hammer_rec_cmp(&cursor->key_end, rec);
195 * This compare function is used when simply looking up key_beg.
199 hammer_rec_find_cmp(hammer_record_t rec, void *data)
201 hammer_cursor_t cursor = data;
204 r = hammer_rec_cmp(&cursor->key_beg, rec);
213 * Locate blocks within the truncation range. Partial blocks do not count.
217 hammer_rec_trunc_cmp(hammer_record_t rec, void *data)
219 struct rec_trunc_info *info = data;
221 if (rec->leaf.base.rec_type < info->rec_type)
223 if (rec->leaf.base.rec_type > info->rec_type)
226 switch(rec->leaf.base.rec_type) {
227 case HAMMER_RECTYPE_DB:
229 * DB record key is not beyond the truncation point, retain.
231 if (rec->leaf.base.key < info->trunc_off)
234 case HAMMER_RECTYPE_DATA:
236 * DATA record offset start is not beyond the truncation point,
239 if (rec->leaf.base.key - rec->leaf.data_len < info->trunc_off)
243 panic("hammer_rec_trunc_cmp: unexpected record type");
247 * The record start is >= the truncation point, return match,
248 * the record should be destroyed.
253 RB_GENERATE(hammer_rec_rb_tree, hammer_record, rb_node, hammer_rec_rb_compare);
256 * Allocate a record for the caller to finish filling in. The record is
257 * returned referenced.
260 hammer_alloc_mem_record(hammer_inode_t ip, int data_len)
262 hammer_record_t record;
266 ++hammer_count_records;
267 record = kmalloc(sizeof(*record), hmp->m_misc,
268 M_WAITOK | M_ZERO | M_USE_RESERVE);
269 record->flush_state = HAMMER_FST_IDLE;
271 record->leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
272 record->leaf.data_len = data_len;
273 hammer_ref(&record->lock);
276 record->data = kmalloc(data_len, hmp->m_misc, M_WAITOK | M_ZERO);
277 record->flags |= HAMMER_RECF_ALLOCDATA;
278 ++hammer_count_record_datas;
285 hammer_wait_mem_record_ident(hammer_record_t record, const char *ident)
287 while (record->flush_state == HAMMER_FST_FLUSH) {
288 record->flags |= HAMMER_RECF_WANTED;
289 tsleep(record, 0, ident, 0);
294 * Called from the backend, hammer_inode.c, after a record has been
295 * flushed to disk. The record has been exclusively locked by the
296 * caller and interlocked with BE.
298 * We clean up the state, unlock, and release the record (the record
299 * was referenced by the fact that it was in the HAMMER_FST_FLUSH state).
302 hammer_flush_record_done(hammer_record_t record, int error)
304 hammer_inode_t target_ip;
306 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
307 KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
310 * If an error occured, the backend was unable to sync the
311 * record to its media. Leave the record intact.
314 hammer_critical_error(record->ip->hmp, record->ip, error,
315 "while flushing record");
318 --record->flush_group->refs;
319 record->flush_group = NULL;
322 * Adjust the flush state and dependancy based on success or
325 if (record->flags & (HAMMER_RECF_DELETED_BE | HAMMER_RECF_COMMITTED)) {
326 if ((target_ip = record->target_ip) != NULL) {
327 TAILQ_REMOVE(&target_ip->target_list, record,
329 record->target_ip = NULL;
330 hammer_test_inode(target_ip);
332 record->flush_state = HAMMER_FST_IDLE;
334 if (record->target_ip) {
335 record->flush_state = HAMMER_FST_SETUP;
336 hammer_test_inode(record->ip);
337 hammer_test_inode(record->target_ip);
339 record->flush_state = HAMMER_FST_IDLE;
342 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
347 if (record->flags & HAMMER_RECF_WANTED) {
348 record->flags &= ~HAMMER_RECF_WANTED;
351 hammer_rel_mem_record(record);
355 * Release a memory record. Records marked for deletion are immediately
356 * removed from the RB-Tree but otherwise left intact until the last ref
360 hammer_rel_mem_record(struct hammer_record *record)
363 hammer_reserve_t resv;
365 hammer_inode_t target_ip;
367 hammer_unref(&record->lock);
369 if (record->lock.refs == 0) {
371 * Upon release of the last reference wakeup any waiters.
372 * The record structure may get destroyed so callers will
373 * loop up and do a relookup.
375 * WARNING! Record must be removed from RB-TREE before we
376 * might possibly block. hammer_test_inode() can block!
382 * Upon release of the last reference a record marked deleted
383 * by the front or backend, or committed by the backend,
386 if (record->flags & (HAMMER_RECF_DELETED_FE |
387 HAMMER_RECF_DELETED_BE |
388 HAMMER_RECF_COMMITTED)) {
389 KKASSERT(ip->lock.refs > 0);
390 KKASSERT(record->flush_state != HAMMER_FST_FLUSH);
393 * target_ip may have zero refs, we have to ref it
394 * to prevent it from being ripped out from under
397 if ((target_ip = record->target_ip) != NULL) {
398 TAILQ_REMOVE(&target_ip->target_list,
399 record, target_entry);
400 record->target_ip = NULL;
401 hammer_ref(&target_ip->lock);
404 if (record->flags & HAMMER_RECF_ONRBTREE) {
405 RB_REMOVE(hammer_rec_rb_tree,
406 &record->ip->rec_tree,
408 KKASSERT(ip->rsv_recs > 0);
411 hmp->rsv_databytes -= record->leaf.data_len;
412 record->flags &= ~HAMMER_RECF_ONRBTREE;
414 if (RB_EMPTY(&record->ip->rec_tree)) {
415 record->ip->flags &= ~HAMMER_INODE_XDIRTY;
416 record->ip->sync_flags &= ~HAMMER_INODE_XDIRTY;
417 hammer_test_inode(record->ip);
422 * We must wait for any direct-IO to complete before
423 * we can destroy the record because the bio may
424 * have a reference to it.
427 (HAMMER_RECF_DIRECT_IO | HAMMER_RECF_DIRECT_INVAL)) {
428 hammer_io_direct_wait(record);
433 * Do this test after removing record from the B-Tree.
436 hammer_test_inode(target_ip);
437 hammer_rel_inode(target_ip, 0);
440 if (record->flags & HAMMER_RECF_ALLOCDATA) {
441 --hammer_count_record_datas;
442 kfree(record->data, hmp->m_misc);
443 record->flags &= ~HAMMER_RECF_ALLOCDATA;
447 * Release the reservation.
449 * If the record was not committed we can theoretically
450 * undo the reservation. However, doing so might
451 * create weird edge cases with the ordering of
452 * direct writes because the related buffer cache
453 * elements are per-vnode. So we don't try.
455 if ((resv = record->resv) != NULL) {
456 /* XXX undo leaf.data_offset,leaf.data_len */
457 hammer_blockmap_reserve_complete(hmp, resv);
461 --hammer_count_records;
462 kfree(record, hmp->m_misc);
468 * Record visibility depends on whether the record is being accessed by
469 * the backend or the frontend. Backend tests ignore the frontend delete
470 * flag. Frontend tests do NOT ignore the backend delete/commit flags and
471 * must also check for commit races.
473 * Return non-zero if the record is visible, zero if it isn't or if it is
474 * deleted. Returns 0 if the record has been comitted (unless the special
475 * delete-visibility flag is set). A committed record must be located
476 * via the media B-Tree. Returns non-zero if the record is good.
478 * If HAMMER_CURSOR_DELETE_VISIBILITY is set we allow deleted memory
479 * records to be returned. This is so pending deletions are detected
480 * when using an iterator to locate an unused hash key, or when we need
481 * to locate historical records on-disk to destroy.
485 hammer_ip_iterate_mem_good(hammer_cursor_t cursor, hammer_record_t record)
487 if (cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY)
489 if (cursor->flags & HAMMER_CURSOR_BACKEND) {
490 if (record->flags & (HAMMER_RECF_DELETED_BE |
491 HAMMER_RECF_COMMITTED)) {
495 if (record->flags & (HAMMER_RECF_DELETED_FE |
496 HAMMER_RECF_DELETED_BE |
497 HAMMER_RECF_COMMITTED)) {
505 * This callback is used as part of the RB_SCAN function for in-memory
506 * records. We terminate it (return -1) as soon as we get a match.
508 * This routine is used by frontend code.
510 * The primary compare code does not account for ASOF lookups. This
511 * code handles that case as well as a few others.
515 hammer_rec_scan_callback(hammer_record_t rec, void *data)
517 hammer_cursor_t cursor = data;
520 * We terminate on success, so this should be NULL on entry.
522 KKASSERT(cursor->iprec == NULL);
525 * Skip if the record was marked deleted or committed.
527 if (hammer_ip_iterate_mem_good(cursor, rec) == 0)
531 * Skip if not visible due to our as-of TID
533 if (cursor->flags & HAMMER_CURSOR_ASOF) {
534 if (cursor->asof < rec->leaf.base.create_tid)
536 if (rec->leaf.base.delete_tid &&
537 cursor->asof >= rec->leaf.base.delete_tid) {
543 * ref the record. The record is protected from backend B-Tree
544 * interactions by virtue of the cursor's IP lock.
546 hammer_ref(&rec->lock);
549 * The record may have been deleted or committed while we
550 * were blocked. XXX remove?
552 if (hammer_ip_iterate_mem_good(cursor, rec) == 0) {
553 hammer_rel_mem_record(rec);
558 * Set the matching record and stop the scan.
566 * Lookup an in-memory record given the key specified in the cursor. Works
567 * just like hammer_btree_lookup() but operates on an inode's in-memory
570 * The lookup must fail if the record is marked for deferred deletion.
572 * The API for mem/btree_lookup() does not mess with the ATE/EOF bits.
576 hammer_mem_lookup(hammer_cursor_t cursor)
578 KKASSERT(cursor->ip);
580 hammer_rel_mem_record(cursor->iprec);
581 cursor->iprec = NULL;
583 hammer_rec_rb_tree_RB_SCAN(&cursor->ip->rec_tree, hammer_rec_find_cmp,
584 hammer_rec_scan_callback, cursor);
586 return (cursor->iprec ? 0 : ENOENT);
590 * hammer_mem_first() - locate the first in-memory record matching the
591 * cursor within the bounds of the key range.
593 * WARNING! API is slightly different from btree_first(). hammer_mem_first()
594 * will set ATEMEM the same as MEMEOF, and does not return any error.
598 hammer_mem_first(hammer_cursor_t cursor)
603 KKASSERT(ip != NULL);
606 hammer_rel_mem_record(cursor->iprec);
607 cursor->iprec = NULL;
609 hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_scan_cmp,
610 hammer_rec_scan_callback, cursor);
613 cursor->flags &= ~(HAMMER_CURSOR_MEMEOF | HAMMER_CURSOR_ATEMEM);
615 cursor->flags |= HAMMER_CURSOR_MEMEOF | HAMMER_CURSOR_ATEMEM;
618 /************************************************************************
619 * HAMMER IN-MEMORY RECORD FUNCTIONS *
620 ************************************************************************
622 * These functions manipulate in-memory records. Such records typically
623 * exist prior to being committed to disk or indexed via the on-disk B-Tree.
627 * Add a directory entry (dip,ncp) which references inode (ip).
629 * Note that the low 32 bits of the namekey are set temporarily to create
630 * a unique in-memory record, and may be modified a second time when the
631 * record is synchronized to disk. In particular, the low 32 bits cannot be
632 * all 0's when synching to disk, which is not handled here.
634 * NOTE: bytes does not include any terminating \0 on name, and name might
638 hammer_ip_add_directory(struct hammer_transaction *trans,
639 struct hammer_inode *dip, const char *name, int bytes,
640 struct hammer_inode *ip)
642 struct hammer_cursor cursor;
643 hammer_record_t record;
645 u_int32_t max_iterations;
647 record = hammer_alloc_mem_record(dip, HAMMER_ENTRY_SIZE(bytes));
649 record->type = HAMMER_MEM_RECORD_ADD;
650 record->leaf.base.localization = dip->obj_localization +
651 HAMMER_LOCALIZE_MISC;
652 record->leaf.base.obj_id = dip->obj_id;
653 record->leaf.base.key = hammer_directory_namekey(dip, name, bytes,
655 record->leaf.base.rec_type = HAMMER_RECTYPE_DIRENTRY;
656 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
657 record->data->entry.obj_id = ip->obj_id;
658 record->data->entry.localization = ip->obj_localization;
659 bcopy(name, record->data->entry.name, bytes);
661 ++ip->ino_data.nlinks;
662 ip->ino_data.ctime = trans->time;
663 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
666 * Find an unused namekey. Both the in-memory record tree and
667 * the B-Tree are checked. We do not want historically deleted
668 * names to create a collision as our iteration space may be limited,
669 * and since create_tid wouldn't match anyway an ASOF search
670 * must be used to locate collisions.
672 * delete-visibility is set so pending deletions do not give us
673 * a false-negative on our ability to use an iterator.
675 * The iterator must not rollover the key. Directory keys only
676 * use the positive key space.
678 hammer_init_cursor(trans, &cursor, &dip->cache[1], dip);
679 cursor.key_beg = record->leaf.base;
680 cursor.flags |= HAMMER_CURSOR_ASOF;
681 cursor.flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
682 cursor.asof = ip->obj_asof;
684 while (hammer_ip_lookup(&cursor) == 0) {
685 ++record->leaf.base.key;
686 KKASSERT(record->leaf.base.key > 0);
687 cursor.key_beg.key = record->leaf.base.key;
688 if (--max_iterations == 0) {
689 hammer_rel_mem_record(record);
696 * The target inode and the directory entry are bound together.
698 record->target_ip = ip;
699 record->flush_state = HAMMER_FST_SETUP;
700 TAILQ_INSERT_TAIL(&ip->target_list, record, target_entry);
703 * The inode now has a dependancy and must be taken out of the idle
704 * state. An inode not in an idle state is given an extra reference.
706 * When transitioning to a SETUP state flag for an automatic reflush
707 * when the dependancies are disposed of if someone is waiting on
710 if (ip->flush_state == HAMMER_FST_IDLE) {
711 hammer_ref(&ip->lock);
712 ip->flush_state = HAMMER_FST_SETUP;
713 if (ip->flags & HAMMER_INODE_FLUSHW)
714 ip->flags |= HAMMER_INODE_REFLUSH;
716 error = hammer_mem_add(record);
718 dip->ino_data.mtime = trans->time;
719 hammer_modify_inode(dip, HAMMER_INODE_MTIME);
722 hammer_done_cursor(&cursor);
727 * Delete the directory entry and update the inode link count. The
728 * cursor must be seeked to the directory entry record being deleted.
730 * The related inode should be share-locked by the caller. The caller is
731 * on the frontend. It could also be NULL indicating that the directory
732 * entry being removed has no related inode.
734 * This function can return EDEADLK requiring the caller to terminate
735 * the cursor, any locks, wait on the returned record, and retry.
738 hammer_ip_del_directory(struct hammer_transaction *trans,
739 hammer_cursor_t cursor, struct hammer_inode *dip,
740 struct hammer_inode *ip)
742 hammer_record_t record;
745 if (hammer_cursor_inmem(cursor)) {
747 * In-memory (unsynchronized) records can simply be freed.
749 * Even though the HAMMER_RECF_DELETED_FE flag is ignored
750 * by the backend, we must still avoid races against the
751 * backend potentially syncing the record to the media.
753 * We cannot call hammer_ip_delete_record(), that routine may
754 * only be called from the backend.
756 record = cursor->iprec;
757 if (record->flags & (HAMMER_RECF_INTERLOCK_BE |
758 HAMMER_RECF_DELETED_BE |
759 HAMMER_RECF_COMMITTED)) {
760 KKASSERT(cursor->deadlk_rec == NULL);
761 hammer_ref(&record->lock);
762 cursor->deadlk_rec = record;
765 KKASSERT(record->type == HAMMER_MEM_RECORD_ADD);
766 record->flags |= HAMMER_RECF_DELETED_FE;
771 * If the record is on-disk we have to queue the deletion by
772 * the record's key. This also causes lookups to skip the
775 KKASSERT(dip->flags &
776 (HAMMER_INODE_ONDISK | HAMMER_INODE_DONDISK));
777 record = hammer_alloc_mem_record(dip, 0);
778 record->type = HAMMER_MEM_RECORD_DEL;
779 record->leaf.base = cursor->leaf->base;
782 * ip may be NULL, indicating the deletion of a directory
783 * entry which has no related inode.
785 record->target_ip = ip;
787 record->flush_state = HAMMER_FST_SETUP;
788 TAILQ_INSERT_TAIL(&ip->target_list, record,
791 record->flush_state = HAMMER_FST_IDLE;
795 * The inode now has a dependancy and must be taken out of
796 * the idle state. An inode not in an idle state is given
797 * an extra reference.
799 * When transitioning to a SETUP state flag for an automatic
800 * reflush when the dependancies are disposed of if someone
801 * is waiting on the inode.
803 if (ip && ip->flush_state == HAMMER_FST_IDLE) {
804 hammer_ref(&ip->lock);
805 ip->flush_state = HAMMER_FST_SETUP;
806 if (ip->flags & HAMMER_INODE_FLUSHW)
807 ip->flags |= HAMMER_INODE_REFLUSH;
810 error = hammer_mem_add(record);
814 * One less link. The file may still be open in the OS even after
815 * all links have gone away.
817 * We have to terminate the cursor before syncing the inode to
818 * avoid deadlocking against ourselves. XXX this may no longer
821 * If nlinks drops to zero and the vnode is inactive (or there is
822 * no vnode), call hammer_inode_unloadable_check() to zonk the
823 * inode. If we don't do this here the inode will not be destroyed
824 * on-media until we unmount.
828 --ip->ino_data.nlinks; /* do before we might block */
829 ip->ino_data.ctime = trans->time;
831 dip->ino_data.mtime = trans->time;
832 hammer_modify_inode(dip, HAMMER_INODE_MTIME);
834 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
835 if (ip->ino_data.nlinks == 0 &&
836 (ip->vp == NULL || (ip->vp->v_flag & VINACTIVE))) {
837 hammer_done_cursor(cursor);
838 hammer_inode_unloadable_check(ip, 1);
839 hammer_flush_inode(ip, 0);
848 * Add a record to an inode.
850 * The caller must allocate the record with hammer_alloc_mem_record(ip) and
851 * initialize the following additional fields:
853 * The related inode should be share-locked by the caller. The caller is
856 * record->rec.entry.base.base.key
857 * record->rec.entry.base.base.rec_type
858 * record->rec.entry.base.base.data_len
859 * record->data (a copy will be kmalloc'd if it cannot be embedded)
862 hammer_ip_add_record(struct hammer_transaction *trans, hammer_record_t record)
864 hammer_inode_t ip = record->ip;
867 KKASSERT(record->leaf.base.localization != 0);
868 record->leaf.base.obj_id = ip->obj_id;
869 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
870 error = hammer_mem_add(record);
875 * Locate a bulk record in-memory. Bulk records allow disk space to be
876 * reserved so the front-end can flush large data writes without having
877 * to queue the BIO to the flusher. Only the related record gets queued
881 static hammer_record_t
882 hammer_ip_get_bulk(hammer_inode_t ip, off_t file_offset, int bytes)
884 struct hammer_bulk_info info;
886 bzero(&info, sizeof(info));
887 info.leaf.base.obj_id = ip->obj_id;
888 info.leaf.base.key = file_offset + bytes;
889 info.leaf.base.create_tid = 0;
890 info.leaf.base.delete_tid = 0;
891 info.leaf.base.rec_type = HAMMER_RECTYPE_DATA;
892 info.leaf.base.obj_type = 0; /* unused */
893 info.leaf.base.btype = HAMMER_BTREE_TYPE_RECORD; /* unused */
894 info.leaf.base.localization = ip->obj_localization + /* unused */
895 HAMMER_LOCALIZE_MISC;
896 info.leaf.data_len = bytes;
898 hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_overlap_cmp,
899 hammer_bulk_scan_callback, &info);
901 return(info.record); /* may be NULL */
905 * Take records vetted by overlap_cmp. The first non-deleted record
906 * (if any) stops the scan.
909 hammer_bulk_scan_callback(hammer_record_t record, void *data)
911 struct hammer_bulk_info *info = data;
913 if (record->flags & (HAMMER_RECF_DELETED_FE | HAMMER_RECF_DELETED_BE |
914 HAMMER_RECF_COMMITTED)) {
917 hammer_ref(&record->lock);
918 info->record = record;
919 return(-1); /* stop scan */
923 * Reserve blockmap space placemarked with an in-memory record.
925 * This routine is called by the frontend in order to be able to directly
926 * flush a buffer cache buffer. The frontend has locked the related buffer
927 * cache buffers and we should be able to manipulate any overlapping
930 * The caller is responsible for adding the returned record.
933 hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset, void *data, int bytes,
936 hammer_record_t record;
937 hammer_record_t conflict;
941 * Deal with conflicting in-memory records. We cannot have multiple
942 * in-memory records for the same base offset without seriously
943 * confusing the backend, including but not limited to the backend
944 * issuing delete-create-delete or create-delete-create sequences
945 * and asserting on the delete_tid being the same as the create_tid.
947 * If we encounter a record with the backend interlock set we cannot
948 * immediately delete it without confusing the backend.
950 while ((conflict = hammer_ip_get_bulk(ip, file_offset, bytes)) !=NULL) {
951 if (conflict->flags & HAMMER_RECF_INTERLOCK_BE) {
952 conflict->flags |= HAMMER_RECF_WANTED;
953 tsleep(conflict, 0, "hmrrc3", 0);
955 conflict->flags |= HAMMER_RECF_DELETED_FE;
957 hammer_rel_mem_record(conflict);
961 * Create a record to cover the direct write. This is called with
962 * the related BIO locked so there should be no possible conflict.
964 * The backend is responsible for finalizing the space reserved in
967 * XXX bytes not aligned, depend on the reservation code to
968 * align the reservation.
970 record = hammer_alloc_mem_record(ip, 0);
971 zone = (bytes >= HAMMER_BUFSIZE) ? HAMMER_ZONE_LARGE_DATA_INDEX :
972 HAMMER_ZONE_SMALL_DATA_INDEX;
973 record->resv = hammer_blockmap_reserve(ip->hmp, zone, bytes,
974 &record->leaf.data_offset,
976 if (record->resv == NULL) {
977 kprintf("hammer_ip_add_bulk: reservation failed\n");
978 hammer_rel_mem_record(record);
981 record->type = HAMMER_MEM_RECORD_DATA;
982 record->leaf.base.rec_type = HAMMER_RECTYPE_DATA;
983 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
984 record->leaf.base.obj_id = ip->obj_id;
985 record->leaf.base.key = file_offset + bytes;
986 record->leaf.base.localization = ip->obj_localization +
987 HAMMER_LOCALIZE_MISC;
988 record->leaf.data_len = bytes;
989 hammer_crc_set_leaf(data, &record->leaf);
990 KKASSERT(*errorp == 0);
995 * Frontend truncation code. Scan in-memory records only. On-disk records
996 * and records in a flushing state are handled by the backend. The vnops
997 * setattr code will handle the block containing the truncation point.
999 * Partial blocks are not deleted.
1002 hammer_ip_frontend_trunc(struct hammer_inode *ip, off_t file_size)
1004 struct rec_trunc_info info;
1006 switch(ip->ino_data.obj_type) {
1007 case HAMMER_OBJTYPE_REGFILE:
1008 info.rec_type = HAMMER_RECTYPE_DATA;
1010 case HAMMER_OBJTYPE_DBFILE:
1011 info.rec_type = HAMMER_RECTYPE_DB;
1016 info.trunc_off = file_size;
1017 hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_trunc_cmp,
1018 hammer_frontend_trunc_callback, &info);
1023 hammer_frontend_trunc_callback(hammer_record_t record, void *data __unused)
1025 if (record->flags & HAMMER_RECF_DELETED_FE)
1027 if (record->flush_state == HAMMER_FST_FLUSH)
1029 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
1030 hammer_ref(&record->lock);
1031 record->flags |= HAMMER_RECF_DELETED_FE;
1032 hammer_rel_mem_record(record);
1037 * Return 1 if the caller must check for and delete existing records
1038 * before writing out a new data record.
1040 * Return 0 if the caller can just insert the record into the B-Tree without
1044 hammer_record_needs_overwrite_delete(hammer_record_t record)
1046 hammer_inode_t ip = record->ip;
1047 int64_t file_offset;
1050 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE)
1051 file_offset = record->leaf.base.key;
1053 file_offset = record->leaf.base.key - record->leaf.data_len;
1054 r = (file_offset < ip->save_trunc_off);
1055 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1056 if (ip->save_trunc_off <= record->leaf.base.key)
1057 ip->save_trunc_off = record->leaf.base.key + 1;
1059 if (ip->save_trunc_off < record->leaf.base.key)
1060 ip->save_trunc_off = record->leaf.base.key;
1066 * Backend code. Sync a record to the media.
1069 hammer_ip_sync_record_cursor(hammer_cursor_t cursor, hammer_record_t record)
1071 hammer_transaction_t trans = cursor->trans;
1072 int64_t file_offset;
1078 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
1079 KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
1080 KKASSERT(record->leaf.base.localization != 0);
1083 * Any direct-write related to the record must complete before we
1084 * can sync the record to the on-disk media.
1086 if (record->flags & (HAMMER_RECF_DIRECT_IO | HAMMER_RECF_DIRECT_INVAL))
1087 hammer_io_direct_wait(record);
1090 * If this is a bulk-data record placemarker there may be an existing
1091 * record on-disk, indicating a data overwrite. If there is the
1092 * on-disk record must be deleted before we can insert our new record.
1094 * We've synthesized this record and do not know what the create_tid
1095 * on-disk is, nor how much data it represents.
1097 * Keep in mind that (key) for data records is (base_offset + len),
1098 * not (base_offset). Also, we only want to get rid of on-disk
1099 * records since we are trying to sync our in-memory record, call
1100 * hammer_ip_delete_range() with truncating set to 1 to make sure
1101 * it skips in-memory records.
1103 * It is ok for the lookup to return ENOENT.
1105 * NOTE OPTIMIZATION: sync_trunc_off is used to determine if we have
1106 * to call hammer_ip_delete_range() or not. This also means we must
1107 * update sync_trunc_off() as we write.
1109 if (record->type == HAMMER_MEM_RECORD_DATA &&
1110 hammer_record_needs_overwrite_delete(record)) {
1111 file_offset = record->leaf.base.key - record->leaf.data_len;
1112 bytes = (record->leaf.data_len + HAMMER_BUFMASK) &
1114 KKASSERT((file_offset & HAMMER_BUFMASK) == 0);
1115 error = hammer_ip_delete_range(
1117 file_offset, file_offset + bytes - 1,
1119 if (error && error != ENOENT)
1124 * If this is a general record there may be an on-disk version
1125 * that must be deleted before we can insert the new record.
1127 if (record->type == HAMMER_MEM_RECORD_GENERAL) {
1128 error = hammer_delete_general(cursor, record->ip,
1130 if (error && error != ENOENT)
1137 hammer_normalize_cursor(cursor);
1138 cursor->key_beg = record->leaf.base;
1139 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1140 cursor->flags |= HAMMER_CURSOR_BACKEND;
1141 cursor->flags &= ~HAMMER_CURSOR_INSERT;
1144 * Records can wind up on-media before the inode itself is on-media.
1147 record->ip->flags |= HAMMER_INODE_DONDISK;
1150 * If we are deleting a directory entry an exact match must be
1153 if (record->type == HAMMER_MEM_RECORD_DEL) {
1154 error = hammer_btree_lookup(cursor);
1156 KKASSERT(cursor->iprec == NULL);
1157 error = hammer_ip_delete_record(cursor, record->ip,
1160 record->flags |= HAMMER_RECF_DELETED_BE |
1161 HAMMER_RECF_COMMITTED;
1162 ++record->ip->rec_generation;
1171 * Issue a lookup to position the cursor and locate the insertion
1172 * point. The target key should not exist. If we are creating a
1173 * directory entry we may have to iterate the low 32 bits of the
1174 * key to find an unused key.
1176 hammer_sync_lock_sh(trans);
1177 cursor->flags |= HAMMER_CURSOR_INSERT;
1178 error = hammer_btree_lookup(cursor);
1179 if (hammer_debug_inode)
1180 kprintf("DOINSERT LOOKUP %d\n", error);
1182 kprintf("hammer_ip_sync_record: duplicate rec "
1183 "at (%016llx)\n", record->leaf.base.key);
1184 Debugger("duplicate record1");
1188 if (record->type == HAMMER_MEM_RECORD_DATA)
1189 kprintf("sync_record %016llx ---------------- %016llx %d\n",
1190 record->leaf.base.key - record->leaf.data_len,
1191 record->leaf.data_offset, error);
1194 if (error != ENOENT)
1198 * Allocate the record and data. The result buffers will be
1199 * marked as being modified and further calls to
1200 * hammer_modify_buffer() will result in unneeded UNDO records.
1202 * Support zero-fill records (data == NULL and data_len != 0)
1204 if (record->type == HAMMER_MEM_RECORD_DATA) {
1206 * The data portion of a bulk-data record has already been
1207 * committed to disk, we need only adjust the layer2
1208 * statistics in the same transaction as our B-Tree insert.
1210 KKASSERT(record->leaf.data_offset != 0);
1211 error = hammer_blockmap_finalize(trans,
1213 record->leaf.data_offset,
1214 record->leaf.data_len);
1215 } else if (record->data && record->leaf.data_len) {
1217 * Wholely cached record, with data. Allocate the data.
1219 bdata = hammer_alloc_data(trans, record->leaf.data_len,
1220 record->leaf.base.rec_type,
1221 &record->leaf.data_offset,
1222 &cursor->data_buffer,
1226 hammer_crc_set_leaf(record->data, &record->leaf);
1227 hammer_modify_buffer(trans, cursor->data_buffer, NULL, 0);
1228 bcopy(record->data, bdata, record->leaf.data_len);
1229 hammer_modify_buffer_done(cursor->data_buffer);
1232 * Wholely cached record, without data.
1234 record->leaf.data_offset = 0;
1235 record->leaf.data_crc = 0;
1238 error = hammer_btree_insert(cursor, &record->leaf, &doprop);
1239 if (hammer_debug_inode && error)
1240 kprintf("BTREE INSERT error %d @ %016llx:%d key %016llx\n", error, cursor->node->node_offset, cursor->index, record->leaf.base.key);
1243 * Our record is on-disk and we normally mark the in-memory version
1244 * as having been committed (and not BE-deleted).
1246 * If the record represented a directory deletion but we had to
1247 * sync a valid directory entry to disk due to dependancies,
1248 * we must convert the record to a covering delete so the
1249 * frontend does not have visibility on the synced entry.
1253 hammer_btree_do_propagation(cursor,
1257 if (record->flags & HAMMER_RECF_CONVERT_DELETE) {
1259 * Must convert deleted directory entry add
1260 * to a directory entry delete.
1262 KKASSERT(record->type == HAMMER_MEM_RECORD_ADD);
1263 record->flags &= ~HAMMER_RECF_DELETED_FE;
1264 record->type = HAMMER_MEM_RECORD_DEL;
1265 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
1266 record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
1267 KKASSERT((record->flags & (HAMMER_RECF_COMMITTED |
1268 HAMMER_RECF_DELETED_BE)) == 0);
1269 /* converted record is not yet committed */
1270 /* hammer_flush_record_done takes care of the rest */
1273 * Everything went fine and we are now done with
1276 record->flags |= HAMMER_RECF_COMMITTED;
1277 ++record->ip->rec_generation;
1280 if (record->leaf.data_offset) {
1281 hammer_blockmap_free(trans, record->leaf.data_offset,
1282 record->leaf.data_len);
1286 hammer_sync_unlock(trans);
1292 * Add the record to the inode's rec_tree. The low 32 bits of a directory
1293 * entry's key is used to deal with hash collisions in the upper 32 bits.
1294 * A unique 64 bit key is generated in-memory and may be regenerated a
1295 * second time when the directory record is flushed to the on-disk B-Tree.
1297 * A referenced record is passed to this function. This function
1298 * eats the reference. If an error occurs the record will be deleted.
1300 * A copy of the temporary record->data pointer provided by the caller
1304 hammer_mem_add(hammer_record_t record)
1306 hammer_mount_t hmp = record->ip->hmp;
1309 * Make a private copy of record->data
1312 KKASSERT(record->flags & HAMMER_RECF_ALLOCDATA);
1315 * Insert into the RB tree. A unique key should have already
1316 * been selected if this is a directory entry.
1318 if (RB_INSERT(hammer_rec_rb_tree, &record->ip->rec_tree, record)) {
1319 record->flags |= HAMMER_RECF_DELETED_FE;
1320 hammer_rel_mem_record(record);
1323 ++hmp->count_newrecords;
1325 ++record->ip->rsv_recs;
1326 record->ip->hmp->rsv_databytes += record->leaf.data_len;
1327 record->flags |= HAMMER_RECF_ONRBTREE;
1328 hammer_modify_inode(record->ip, HAMMER_INODE_XDIRTY);
1329 hammer_rel_mem_record(record);
1333 /************************************************************************
1334 * HAMMER INODE MERGED-RECORD FUNCTIONS *
1335 ************************************************************************
1337 * These functions augment the B-Tree scanning functions in hammer_btree.c
1338 * by merging in-memory records with on-disk records.
1342 * Locate a particular record either in-memory or on-disk.
1344 * NOTE: This is basically a standalone routine, hammer_ip_next() may
1345 * NOT be called to iterate results.
1348 hammer_ip_lookup(hammer_cursor_t cursor)
1353 * If the element is in-memory return it without searching the
1356 KKASSERT(cursor->ip);
1357 error = hammer_mem_lookup(cursor);
1359 cursor->leaf = &cursor->iprec->leaf;
1362 if (error != ENOENT)
1366 * If the inode has on-disk components search the on-disk B-Tree.
1368 if ((cursor->ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) == 0)
1370 error = hammer_btree_lookup(cursor);
1372 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1377 * Helper for hammer_ip_first()/hammer_ip_next()
1379 * NOTE: Both ATEDISK and DISKEOF will be set the same. This sets up
1380 * hammer_ip_first() for calling hammer_ip_next(), and sets up the re-seek
1381 * state if hammer_ip_next() needs to re-seek.
1385 _hammer_ip_seek_btree(hammer_cursor_t cursor)
1387 hammer_inode_t ip = cursor->ip;
1390 if (ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) {
1391 error = hammer_btree_lookup(cursor);
1392 if (error == ENOENT || error == EDEADLK) {
1393 if (hammer_debug_general & 0x2000)
1394 kprintf("error %d node %p %016llx index %d\n", error, cursor->node, cursor->node->node_offset, cursor->index);
1395 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1396 error = hammer_btree_iterate(cursor);
1399 cursor->flags &= ~(HAMMER_CURSOR_DISKEOF |
1400 HAMMER_CURSOR_ATEDISK);
1402 cursor->flags |= HAMMER_CURSOR_DISKEOF |
1403 HAMMER_CURSOR_ATEDISK;
1404 if (error == ENOENT)
1408 cursor->flags |= HAMMER_CURSOR_DISKEOF | HAMMER_CURSOR_ATEDISK;
1415 * Helper for hammer_ip_next()
1417 * The caller has determined that the media cursor is further along than the
1418 * memory cursor and must be reseeked after a generation number change.
1422 _hammer_ip_reseek(hammer_cursor_t cursor)
1424 struct hammer_base_elm save;
1425 hammer_btree_elm_t elm;
1433 kprintf("HAMMER: Debug: re-seeked during scan @ino=%016llx\n",
1434 (long long)cursor->ip->obj_id);
1435 save = cursor->key_beg;
1436 cursor->key_beg = cursor->iprec->leaf.base;
1437 error = _hammer_ip_seek_btree(cursor);
1438 KKASSERT(error == 0);
1439 cursor->key_beg = save;
1442 * If the memory record was previous returned to
1443 * the caller and the media record matches
1444 * (-1/+1: only create_tid differs), then iterate
1445 * the media record to avoid a double result.
1447 if ((cursor->flags & HAMMER_CURSOR_ATEDISK) == 0 &&
1448 (cursor->flags & HAMMER_CURSOR_LASTWASMEM)) {
1449 elm = &cursor->node->ondisk->elms[cursor->index];
1450 r = hammer_btree_cmp(&elm->base,
1451 &cursor->iprec->leaf.base);
1452 if (cursor->flags & HAMMER_CURSOR_ASOF) {
1453 if (r >= -1 && r <= 1) {
1454 kprintf("HAMMER: Debug: iterated after "
1455 "re-seek (asof r=%d)\n", r);
1456 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1461 kprintf("HAMMER: Debug: iterated after "
1463 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1472 * Locate the first record within the cursor's key_beg/key_end range,
1473 * restricted to a particular inode. 0 is returned on success, ENOENT
1474 * if no records matched the requested range, or some other error.
1476 * When 0 is returned hammer_ip_next() may be used to iterate additional
1477 * records within the requested range.
1479 * This function can return EDEADLK, requiring the caller to terminate
1480 * the cursor and try again.
1484 hammer_ip_first(hammer_cursor_t cursor)
1486 hammer_inode_t ip = cursor->ip;
1489 KKASSERT(ip != NULL);
1492 * Clean up fields and setup for merged scan
1494 cursor->flags &= ~HAMMER_CURSOR_RETEST;
1497 * Search the in-memory record list (Red-Black tree). Unlike the
1498 * B-Tree search, mem_first checks for records in the range.
1500 * This function will setup both ATEMEM and MEMEOF properly for
1501 * the ip iteration. ATEMEM will be set if MEMEOF is set.
1503 hammer_mem_first(cursor);
1506 * Detect generation changes during blockages, including
1507 * blockages which occur on the initial btree search.
1509 cursor->rec_generation = cursor->ip->rec_generation;
1512 * Initial search and result
1514 error = _hammer_ip_seek_btree(cursor);
1516 error = hammer_ip_next(cursor);
1522 * Retrieve the next record in a merged iteration within the bounds of the
1523 * cursor. This call may be made multiple times after the cursor has been
1524 * initially searched with hammer_ip_first().
1526 * There are numerous special cases in this code to deal with races between
1527 * in-memory records and on-media records.
1529 * 0 is returned on success, ENOENT if no further records match the
1530 * requested range, or some other error code is returned.
1533 hammer_ip_next(hammer_cursor_t cursor)
1535 hammer_btree_elm_t elm;
1536 hammer_record_t rec;
1537 hammer_record_t tmprec;
1543 * Get the next on-disk record
1545 * NOTE: If we deleted the last on-disk record we had scanned
1546 * ATEDISK will be clear and RETEST will be set, forcing
1547 * a call to iterate. The fact that ATEDISK is clear causes
1548 * iterate to re-test the 'current' element. If ATEDISK is
1549 * set, iterate will skip the 'current' element.
1552 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
1553 if (cursor->flags & (HAMMER_CURSOR_ATEDISK |
1554 HAMMER_CURSOR_RETEST)) {
1555 error = hammer_btree_iterate(cursor);
1556 cursor->flags &= ~HAMMER_CURSOR_RETEST;
1558 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1559 hammer_cache_node(&cursor->ip->cache[1],
1561 } else if (error == ENOENT) {
1562 cursor->flags |= HAMMER_CURSOR_DISKEOF |
1563 HAMMER_CURSOR_ATEDISK;
1570 * If the generation changed the backend has deleted or committed
1571 * one or more memory records since our last check.
1573 * When this case occurs if the disk cursor is > current memory record
1574 * or the disk cursor is at EOF, we must re-seek the disk-cursor.
1575 * Since the cursor is ahead it must have not yet been eaten (if
1576 * not at eof anyway). (XXX data offset case?)
1578 * NOTE: we are not doing a full check here. That will be handled
1581 * If we have exhausted all memory records we do not have to do any
1584 while (cursor->rec_generation != cursor->ip->rec_generation &&
1587 kprintf("HAMMER: Debug: generation changed during scan @ino=%016llx\n", (long long)cursor->ip->obj_id);
1588 cursor->rec_generation = cursor->ip->rec_generation;
1589 if (cursor->flags & HAMMER_CURSOR_MEMEOF)
1591 if (cursor->flags & HAMMER_CURSOR_DISKEOF) {
1594 KKASSERT((cursor->flags & HAMMER_CURSOR_ATEDISK) == 0);
1595 elm = &cursor->node->ondisk->elms[cursor->index];
1596 r = hammer_btree_cmp(&elm->base,
1597 &cursor->iprec->leaf.base);
1601 * Do we re-seek the media cursor?
1604 if (_hammer_ip_reseek(cursor))
1610 * We can now safely get the next in-memory record. We cannot
1613 * hammer_rec_scan_cmp: Is the record still in our general range,
1614 * (non-inclusive of snapshot exclusions)?
1615 * hammer_rec_scan_callback: Is the record in our snapshot?
1618 if ((cursor->flags & HAMMER_CURSOR_MEMEOF) == 0) {
1620 * If the current memory record was eaten then get the next
1621 * one. Stale records are skipped.
1623 if (cursor->flags & HAMMER_CURSOR_ATEMEM) {
1624 tmprec = cursor->iprec;
1625 cursor->iprec = NULL;
1626 rec = hammer_rec_rb_tree_RB_NEXT(tmprec);
1628 if (hammer_rec_scan_cmp(rec, cursor) != 0)
1630 if (hammer_rec_scan_callback(rec, cursor) != 0)
1632 rec = hammer_rec_rb_tree_RB_NEXT(rec);
1634 if (cursor->iprec) {
1635 KKASSERT(cursor->iprec == rec);
1636 cursor->flags &= ~HAMMER_CURSOR_ATEMEM;
1638 cursor->flags |= HAMMER_CURSOR_MEMEOF;
1640 cursor->flags &= ~HAMMER_CURSOR_LASTWASMEM;
1645 * MEMORY RECORD VALIDITY TEST
1647 * (We still can't block, which is why tmprec is being held so
1650 * If the memory record is no longer valid we skip it. It may
1651 * have been deleted by the frontend. If it was deleted or
1652 * committed by the backend the generation change re-seeked the
1653 * disk cursor and the record will be present there.
1655 if (error == 0 && (cursor->flags & HAMMER_CURSOR_MEMEOF) == 0) {
1656 KKASSERT(cursor->iprec);
1657 KKASSERT((cursor->flags & HAMMER_CURSOR_ATEMEM) == 0);
1658 if (!hammer_ip_iterate_mem_good(cursor, cursor->iprec)) {
1659 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1661 hammer_rel_mem_record(tmprec);
1666 hammer_rel_mem_record(tmprec);
1669 * Extract either the disk or memory record depending on their
1670 * relative position.
1673 switch(cursor->flags & (HAMMER_CURSOR_ATEDISK | HAMMER_CURSOR_ATEMEM)) {
1676 * Both entries valid. Compare the entries and nominally
1677 * return the first one in the sort order. Numerous cases
1678 * require special attention, however.
1680 elm = &cursor->node->ondisk->elms[cursor->index];
1681 r = hammer_btree_cmp(&elm->base, &cursor->iprec->leaf.base);
1684 * If the two entries differ only by their key (-2/2) or
1685 * create_tid (-1/1), and are DATA records, we may have a
1686 * nominal match. We have to calculate the base file
1687 * offset of the data.
1689 if (r <= 2 && r >= -2 && r != 0 &&
1690 cursor->ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE &&
1691 cursor->iprec->type == HAMMER_MEM_RECORD_DATA) {
1692 int64_t base1 = elm->leaf.base.key - elm->leaf.data_len;
1693 int64_t base2 = cursor->iprec->leaf.base.key -
1694 cursor->iprec->leaf.data_len;
1700 error = hammer_btree_extract(cursor,
1701 HAMMER_CURSOR_GET_LEAF);
1702 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1703 cursor->flags &= ~HAMMER_CURSOR_LASTWASMEM;
1708 * If the entries match exactly the memory entry is either
1709 * an on-disk directory entry deletion or a bulk data
1710 * overwrite. If it is a directory entry deletion we eat
1713 * For the bulk-data overwrite case it is possible to have
1714 * visibility into both, which simply means the syncer
1715 * hasn't gotten around to doing the delete+insert sequence
1716 * on the B-Tree. Use the memory entry and throw away the
1719 * If the in-memory record is not either of these we
1720 * probably caught the syncer while it was syncing it to
1721 * the media. Since we hold a shared lock on the cursor,
1722 * the in-memory record had better be marked deleted at
1726 if (cursor->iprec->type == HAMMER_MEM_RECORD_DEL) {
1727 if ((cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1728 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1729 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1732 } else if (cursor->iprec->type == HAMMER_MEM_RECORD_DATA) {
1733 if ((cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1734 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1736 /* fall through to memory entry */
1738 panic("hammer_ip_next: duplicate mem/b-tree entry %p %d %08x", cursor->iprec, cursor->iprec->type, cursor->iprec->flags);
1739 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1743 /* fall through to the memory entry */
1744 case HAMMER_CURSOR_ATEDISK:
1746 * Only the memory entry is valid.
1748 cursor->leaf = &cursor->iprec->leaf;
1749 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1750 cursor->flags |= HAMMER_CURSOR_LASTWASMEM;
1753 * If the memory entry is an on-disk deletion we should have
1754 * also had found a B-Tree record. If the backend beat us
1755 * to it it would have interlocked the cursor and we should
1756 * have seen the in-memory record marked DELETED_FE.
1758 if (cursor->iprec->type == HAMMER_MEM_RECORD_DEL &&
1759 (cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1760 panic("hammer_ip_next: del-on-disk with no b-tree entry iprec %p flags %08x", cursor->iprec, cursor->iprec->flags);
1763 case HAMMER_CURSOR_ATEMEM:
1765 * Only the disk entry is valid
1767 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1768 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1769 cursor->flags &= ~HAMMER_CURSOR_LASTWASMEM;
1773 * Neither entry is valid
1775 * XXX error not set properly
1777 cursor->flags &= ~HAMMER_CURSOR_LASTWASMEM;
1778 cursor->leaf = NULL;
1786 * Resolve the cursor->data pointer for the current cursor position in
1787 * a merged iteration.
1790 hammer_ip_resolve_data(hammer_cursor_t cursor)
1792 hammer_record_t record;
1795 if (hammer_cursor_inmem(cursor)) {
1797 * The data associated with an in-memory record is usually
1798 * kmalloced, but reserve-ahead data records will have an
1799 * on-disk reference.
1801 * NOTE: Reserve-ahead data records must be handled in the
1802 * context of the related high level buffer cache buffer
1803 * to interlock against async writes.
1805 record = cursor->iprec;
1806 cursor->data = record->data;
1808 if (cursor->data == NULL) {
1809 KKASSERT(record->leaf.base.rec_type ==
1810 HAMMER_RECTYPE_DATA);
1811 cursor->data = hammer_bread_ext(cursor->trans->hmp,
1812 record->leaf.data_offset,
1813 record->leaf.data_len,
1815 &cursor->data_buffer);
1818 cursor->leaf = &cursor->node->ondisk->elms[cursor->index].leaf;
1819 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA);
1825 * Backend truncation / record replacement - delete records in range.
1827 * Delete all records within the specified range for inode ip. In-memory
1828 * records still associated with the frontend are ignored.
1830 * If truncating is non-zero in-memory records associated with the back-end
1831 * are ignored. If truncating is > 1 we can return EWOULDBLOCK.
1835 * * An unaligned range will cause new records to be added to cover
1836 * the edge cases. (XXX not implemented yet).
1838 * * Replacement via reservations (see hammer_ip_sync_record_cursor())
1839 * also do not deal with unaligned ranges.
1841 * * ran_end is inclusive (e.g. 0,1023 instead of 0,1024).
1843 * * Record keys for regular file data have to be special-cased since
1844 * they indicate the end of the range (key = base + bytes).
1846 * * This function may be asked to delete ridiculously huge ranges, for
1847 * example if someone truncates or removes a 1TB regular file. We
1848 * must be very careful on restarts and we may have to stop w/
1849 * EWOULDBLOCK to avoid blowing out the buffer cache.
1852 hammer_ip_delete_range(hammer_cursor_t cursor, hammer_inode_t ip,
1853 int64_t ran_beg, int64_t ran_end, int truncating)
1855 hammer_transaction_t trans = cursor->trans;
1856 hammer_btree_leaf_elm_t leaf;
1862 kprintf("delete_range %p %016llx-%016llx\n", ip, ran_beg, ran_end);
1865 KKASSERT(trans->type == HAMMER_TRANS_FLS);
1867 hammer_normalize_cursor(cursor);
1868 cursor->key_beg.localization = ip->obj_localization +
1869 HAMMER_LOCALIZE_MISC;
1870 cursor->key_beg.obj_id = ip->obj_id;
1871 cursor->key_beg.create_tid = 0;
1872 cursor->key_beg.delete_tid = 0;
1873 cursor->key_beg.obj_type = 0;
1875 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1876 cursor->key_beg.key = ran_beg;
1877 cursor->key_beg.rec_type = HAMMER_RECTYPE_DB;
1880 * The key in the B-Tree is (base+bytes), so the first possible
1881 * matching key is ran_beg + 1.
1883 cursor->key_beg.key = ran_beg + 1;
1884 cursor->key_beg.rec_type = HAMMER_RECTYPE_DATA;
1887 cursor->key_end = cursor->key_beg;
1888 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1889 cursor->key_end.key = ran_end;
1891 tmp64 = ran_end + MAXPHYS + 1; /* work around GCC-4 bug */
1892 if (tmp64 < ran_end)
1893 cursor->key_end.key = 0x7FFFFFFFFFFFFFFFLL;
1895 cursor->key_end.key = ran_end + MAXPHYS + 1;
1898 cursor->asof = ip->obj_asof;
1899 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1900 cursor->flags |= HAMMER_CURSOR_ASOF;
1901 cursor->flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
1902 cursor->flags |= HAMMER_CURSOR_BACKEND;
1903 cursor->flags |= HAMMER_CURSOR_END_INCLUSIVE;
1905 error = hammer_ip_first(cursor);
1908 * Iterate through matching records and mark them as deleted.
1910 while (error == 0) {
1911 leaf = cursor->leaf;
1913 KKASSERT(leaf->base.delete_tid == 0);
1914 KKASSERT(leaf->base.obj_id == ip->obj_id);
1917 * There may be overlap cases for regular file data. Also
1918 * remember the key for a regular file record is (base + len),
1921 * Note that do to duplicates (mem & media) allowed by
1922 * DELETE_VISIBILITY, off can wind up less then ran_beg.
1924 if (leaf->base.rec_type == HAMMER_RECTYPE_DATA) {
1925 off = leaf->base.key - leaf->data_len;
1927 * Check the left edge case. We currently do not
1928 * split existing records.
1930 if (off < ran_beg && leaf->base.key > ran_beg) {
1931 panic("hammer left edge case %016llx %d\n",
1932 leaf->base.key, leaf->data_len);
1936 * Check the right edge case. Note that the
1937 * record can be completely out of bounds, which
1938 * terminates the search.
1940 * base->key is exclusive of the right edge while
1941 * ran_end is inclusive of the right edge. The
1942 * (key - data_len) left boundary is inclusive.
1944 * XXX theory-check this test at some point, are
1945 * we missing a + 1 somewhere? Note that ran_end
1948 if (leaf->base.key - 1 > ran_end) {
1949 if (leaf->base.key - leaf->data_len > ran_end)
1951 panic("hammer right edge case\n");
1954 off = leaf->base.key;
1958 * Delete the record. When truncating we do not delete
1959 * in-memory (data) records because they represent data
1960 * written after the truncation.
1962 * This will also physically destroy the B-Tree entry and
1963 * data if the retention policy dictates. The function
1964 * will set HAMMER_CURSOR_RETEST to cause hammer_ip_next()
1965 * to retest the new 'current' element.
1967 if (truncating == 0 || hammer_cursor_ondisk(cursor)) {
1968 error = hammer_ip_delete_record(cursor, ip, trans->tid);
1970 * If we have built up too many meta-buffers we risk
1971 * deadlocking the kernel and must stop. This can
1972 * occur when deleting ridiculously huge files.
1973 * sync_trunc_off is updated so the next cycle does
1974 * not re-iterate records we have already deleted.
1976 * This is only done with formal truncations.
1978 if (truncating > 1 && error == 0 &&
1979 hammer_flusher_meta_limit(ip->hmp)) {
1980 ip->sync_trunc_off = off;
1981 error = EWOULDBLOCK;
1986 ran_beg = off; /* for restart */
1987 error = hammer_ip_next(cursor);
1990 hammer_cache_node(&ip->cache[1], cursor->node);
1992 if (error == EDEADLK) {
1993 hammer_done_cursor(cursor);
1994 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
1998 if (error == ENOENT)
2004 * This backend function deletes the specified record on-disk, similar to
2005 * delete_range but for a specific record. Unlike the exact deletions
2006 * used when deleting a directory entry this function uses an ASOF search
2007 * like delete_range.
2009 * This function may be called with ip->obj_asof set for a slave snapshot,
2010 * so don't use it. We always delete non-historical records only.
2013 hammer_delete_general(hammer_cursor_t cursor, hammer_inode_t ip,
2014 hammer_btree_leaf_elm_t leaf)
2016 hammer_transaction_t trans = cursor->trans;
2019 KKASSERT(trans->type == HAMMER_TRANS_FLS);
2021 hammer_normalize_cursor(cursor);
2022 cursor->key_beg = leaf->base;
2023 cursor->asof = HAMMER_MAX_TID;
2024 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
2025 cursor->flags |= HAMMER_CURSOR_ASOF;
2026 cursor->flags |= HAMMER_CURSOR_BACKEND;
2027 cursor->flags &= ~HAMMER_CURSOR_INSERT;
2029 error = hammer_btree_lookup(cursor);
2031 error = hammer_ip_delete_record(cursor, ip, trans->tid);
2033 if (error == EDEADLK) {
2034 hammer_done_cursor(cursor);
2035 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
2043 * This function deletes remaining auxillary records when an inode is
2044 * being deleted. This function explicitly does not delete the
2045 * inode record, directory entry, data, or db records. Those must be
2046 * properly disposed of prior to this call.
2049 hammer_ip_delete_clean(hammer_cursor_t cursor, hammer_inode_t ip, int *countp)
2051 hammer_transaction_t trans = cursor->trans;
2052 hammer_btree_leaf_elm_t leaf;
2055 KKASSERT(trans->type == HAMMER_TRANS_FLS);
2057 hammer_normalize_cursor(cursor);
2058 cursor->key_beg.localization = ip->obj_localization +
2059 HAMMER_LOCALIZE_MISC;
2060 cursor->key_beg.obj_id = ip->obj_id;
2061 cursor->key_beg.create_tid = 0;
2062 cursor->key_beg.delete_tid = 0;
2063 cursor->key_beg.obj_type = 0;
2064 cursor->key_beg.rec_type = HAMMER_RECTYPE_CLEAN_START;
2065 cursor->key_beg.key = HAMMER_MIN_KEY;
2067 cursor->key_end = cursor->key_beg;
2068 cursor->key_end.rec_type = HAMMER_RECTYPE_MAX;
2069 cursor->key_end.key = HAMMER_MAX_KEY;
2071 cursor->asof = ip->obj_asof;
2072 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
2073 cursor->flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
2074 cursor->flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
2075 cursor->flags |= HAMMER_CURSOR_BACKEND;
2077 error = hammer_ip_first(cursor);
2080 * Iterate through matching records and mark them as deleted.
2082 while (error == 0) {
2083 leaf = cursor->leaf;
2085 KKASSERT(leaf->base.delete_tid == 0);
2088 * Mark the record and B-Tree entry as deleted. This will
2089 * also physically delete the B-Tree entry, record, and
2090 * data if the retention policy dictates. The function
2091 * will set HAMMER_CURSOR_RETEST to cause hammer_ip_next()
2092 * to retest the new 'current' element.
2094 * Directory entries (and delete-on-disk directory entries)
2095 * must be synced and cannot be deleted.
2097 error = hammer_ip_delete_record(cursor, ip, trans->tid);
2101 error = hammer_ip_next(cursor);
2104 hammer_cache_node(&ip->cache[1], cursor->node);
2105 if (error == EDEADLK) {
2106 hammer_done_cursor(cursor);
2107 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
2111 if (error == ENOENT)
2117 * Delete the record at the current cursor. On success the cursor will
2118 * be positioned appropriately for an iteration but may no longer be at
2121 * This routine is only called from the backend.
2123 * NOTE: This can return EDEADLK, requiring the caller to terminate the
2127 hammer_ip_delete_record(hammer_cursor_t cursor, hammer_inode_t ip,
2130 hammer_record_t iprec;
2134 KKASSERT(cursor->flags & HAMMER_CURSOR_BACKEND);
2136 hmp = cursor->node->hmp;
2139 * In-memory (unsynchronized) records can simply be freed. This
2140 * only occurs in range iterations since all other records are
2141 * individually synchronized. Thus there should be no confusion with
2144 * An in-memory record may be deleted before being committed to disk,
2145 * but could have been accessed in the mean time. The reservation
2146 * code will deal with the case.
2148 if (hammer_cursor_inmem(cursor)) {
2149 iprec = cursor->iprec;
2150 KKASSERT((iprec->flags & HAMMER_RECF_INTERLOCK_BE) ==0);
2151 iprec->flags |= HAMMER_RECF_DELETED_FE;
2152 iprec->flags |= HAMMER_RECF_DELETED_BE;
2153 KKASSERT(iprec->ip == ip);
2154 ++ip->rec_generation;
2159 * On-disk records are marked as deleted by updating their delete_tid.
2160 * This does not effect their position in the B-Tree (which is based
2161 * on their create_tid).
2163 * Frontend B-Tree operations track inodes so we tell
2164 * hammer_delete_at_cursor() not to.
2166 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
2169 error = hammer_delete_at_cursor(
2171 HAMMER_DELETE_ADJUST | hammer_nohistory(ip),
2173 cursor->trans->time32,
2180 * Delete the B-Tree element at the current cursor and do any necessary
2181 * mirror propagation.
2183 * The cursor must be properly positioned for an iteration on return but
2184 * may be pointing at an internal element.
2186 * An element can be un-deleted by passing a delete_tid of 0 with
2187 * HAMMER_DELETE_ADJUST.
2190 hammer_delete_at_cursor(hammer_cursor_t cursor, int delete_flags,
2191 hammer_tid_t delete_tid, u_int32_t delete_ts,
2192 int track, int64_t *stat_bytes)
2194 struct hammer_btree_leaf_elm save_leaf;
2195 hammer_transaction_t trans;
2196 hammer_btree_leaf_elm_t leaf;
2198 hammer_btree_elm_t elm;
2199 hammer_off_t data_offset;
2206 error = hammer_cursor_upgrade(cursor);
2210 trans = cursor->trans;
2211 node = cursor->node;
2212 elm = &node->ondisk->elms[cursor->index];
2214 KKASSERT(elm->base.btype == HAMMER_BTREE_TYPE_RECORD);
2216 hammer_sync_lock_sh(trans);
2221 * Adjust the delete_tid. Update the mirror_tid propagation field
2222 * as well. delete_tid can be 0 (undelete -- used by mirroring).
2224 if (delete_flags & HAMMER_DELETE_ADJUST) {
2225 if (elm->base.rec_type == HAMMER_RECTYPE_INODE) {
2226 if (elm->leaf.base.delete_tid == 0 && delete_tid)
2228 if (elm->leaf.base.delete_tid && delete_tid == 0)
2232 hammer_modify_node(trans, node, elm, sizeof(*elm));
2233 elm->leaf.base.delete_tid = delete_tid;
2234 elm->leaf.delete_ts = delete_ts;
2235 hammer_modify_node_done(node);
2237 if (elm->leaf.base.delete_tid > node->ondisk->mirror_tid) {
2238 hammer_modify_node_field(trans, node, mirror_tid);
2239 node->ondisk->mirror_tid = elm->leaf.base.delete_tid;
2240 hammer_modify_node_done(node);
2242 if (hammer_debug_general & 0x0002) {
2243 kprintf("delete_at_cursor: propagate %016llx"
2245 elm->leaf.base.delete_tid,
2251 * Adjust for the iteration. We have deleted the current
2252 * element and want to clear ATEDISK so the iteration does
2253 * not skip the element after, which now becomes the current
2254 * element. This element must be re-tested if doing an
2255 * iteration, which is handled by the RETEST flag.
2257 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
2258 cursor->flags |= HAMMER_CURSOR_RETEST;
2259 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
2263 * An on-disk record cannot have the same delete_tid
2264 * as its create_tid. In a chain of record updates
2265 * this could result in a duplicate record.
2267 KKASSERT(elm->leaf.base.delete_tid !=
2268 elm->leaf.base.create_tid);
2272 * Destroy the B-Tree element if asked (typically if a nohistory
2273 * file or mount, or when called by the pruning code).
2275 * Adjust the ATEDISK flag to properly support iterations.
2277 if (delete_flags & HAMMER_DELETE_DESTROY) {
2278 data_offset = elm->leaf.data_offset;
2279 data_len = elm->leaf.data_len;
2280 rec_type = elm->leaf.base.rec_type;
2282 save_leaf = elm->leaf;
2285 if (elm->base.rec_type == HAMMER_RECTYPE_INODE &&
2286 elm->leaf.base.delete_tid == 0) {
2290 error = hammer_btree_delete(cursor);
2293 * The deletion moves the next element (if any) to
2294 * the current element position. We must clear
2295 * ATEDISK so this element is not skipped and we
2296 * must set RETEST to force any iteration to re-test
2299 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
2300 cursor->flags |= HAMMER_CURSOR_RETEST;
2301 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
2305 switch(data_offset & HAMMER_OFF_ZONE_MASK) {
2306 case HAMMER_ZONE_LARGE_DATA:
2307 case HAMMER_ZONE_SMALL_DATA:
2308 case HAMMER_ZONE_META:
2309 hammer_blockmap_free(trans,
2310 data_offset, data_len);
2319 * Track inode count and next_tid. This is used by the mirroring
2320 * and PFS code. icount can be negative, zero, or positive.
2322 if (error == 0 && track) {
2324 hammer_modify_volume_field(trans, trans->rootvol,
2326 trans->rootvol->ondisk->vol0_stat_inodes += icount;
2327 hammer_modify_volume_done(trans->rootvol);
2329 if (trans->rootvol->ondisk->vol0_next_tid < delete_tid) {
2330 hammer_modify_volume(trans, trans->rootvol, NULL, 0);
2331 trans->rootvol->ondisk->vol0_next_tid = delete_tid;
2332 hammer_modify_volume_done(trans->rootvol);
2337 * mirror_tid propagation occurs if the node's mirror_tid had to be
2338 * updated while adjusting the delete_tid.
2340 * This occurs when deleting even in nohistory mode, but does not
2341 * occur when pruning an already-deleted node.
2343 * cursor->ip is NULL when called from the pruning, mirroring,
2344 * and pfs code. If non-NULL propagation will be conditionalized
2345 * on whether the PFS is in no-history mode or not.
2349 hammer_btree_do_propagation(cursor, cursor->ip->pfsm, leaf);
2351 hammer_btree_do_propagation(cursor, NULL, leaf);
2353 hammer_sync_unlock(trans);
2358 * Determine whether we can remove a directory. This routine checks whether
2359 * a directory is empty or not and enforces flush connectivity.
2361 * Flush connectivity requires that we block if the target directory is
2362 * currently flushing, otherwise it may not end up in the same flush group.
2364 * Returns 0 on success, ENOTEMPTY or EDEADLK (or other errors) on failure.
2367 hammer_ip_check_directory_empty(hammer_transaction_t trans, hammer_inode_t ip)
2369 struct hammer_cursor cursor;
2373 * Check directory empty
2375 hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
2377 cursor.key_beg.localization = ip->obj_localization +
2378 HAMMER_LOCALIZE_MISC;
2379 cursor.key_beg.obj_id = ip->obj_id;
2380 cursor.key_beg.create_tid = 0;
2381 cursor.key_beg.delete_tid = 0;
2382 cursor.key_beg.obj_type = 0;
2383 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE + 1;
2384 cursor.key_beg.key = HAMMER_MIN_KEY;
2386 cursor.key_end = cursor.key_beg;
2387 cursor.key_end.rec_type = 0xFFFF;
2388 cursor.key_end.key = HAMMER_MAX_KEY;
2390 cursor.asof = ip->obj_asof;
2391 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
2393 error = hammer_ip_first(&cursor);
2394 if (error == ENOENT)
2396 else if (error == 0)
2398 hammer_done_cursor(&cursor);