2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_object.c,v 1.83 2008/07/07 22:42:35 dillon Exp $
39 static int hammer_mem_add(hammer_record_t record);
40 static int hammer_mem_lookup(hammer_cursor_t cursor);
41 static int hammer_mem_first(hammer_cursor_t cursor);
42 static int hammer_frontend_trunc_callback(hammer_record_t record,
44 static int hammer_record_needs_overwrite_delete(hammer_record_t record);
45 static int hammer_delete_general(hammer_cursor_t cursor, hammer_inode_t ip,
46 hammer_btree_leaf_elm_t leaf);
48 struct rec_trunc_info {
54 * Red-black tree support. Comparison code for insertion.
57 hammer_rec_rb_compare(hammer_record_t rec1, hammer_record_t rec2)
59 if (rec1->leaf.base.rec_type < rec2->leaf.base.rec_type)
61 if (rec1->leaf.base.rec_type > rec2->leaf.base.rec_type)
64 if (rec1->leaf.base.key < rec2->leaf.base.key)
66 if (rec1->leaf.base.key > rec2->leaf.base.key)
70 * Never match against an item deleted by the front-end.
72 * rec1 is greater then rec2 if rec1 is marked deleted.
73 * rec1 is less then rec2 if rec2 is marked deleted.
75 * Multiple deleted records may be present, do not return 0
76 * if both are marked deleted.
78 if (rec1->flags & HAMMER_RECF_DELETED_FE)
80 if (rec2->flags & HAMMER_RECF_DELETED_FE)
87 * Basic record comparison code similar to hammer_btree_cmp().
90 hammer_rec_cmp(hammer_base_elm_t elm, hammer_record_t rec)
92 if (elm->rec_type < rec->leaf.base.rec_type)
94 if (elm->rec_type > rec->leaf.base.rec_type)
97 if (elm->key < rec->leaf.base.key)
99 if (elm->key > rec->leaf.base.key)
103 * Never match against an item deleted by the front-end.
104 * elm is less then rec if rec is marked deleted.
106 if (rec->flags & HAMMER_RECF_DELETED_FE)
112 * Special LOOKUP_INFO to locate an overlapping record. This used by
113 * the reservation code to implement small-block records (whos keys will
114 * be different depending on data_len, when representing the same base
117 * NOTE: The base file offset of a data record is (key - data_len), not (key).
120 hammer_rec_overlap_compare(hammer_btree_leaf_elm_t leaf, hammer_record_t rec)
122 if (leaf->base.rec_type < rec->leaf.base.rec_type)
124 if (leaf->base.rec_type > rec->leaf.base.rec_type)
130 if (leaf->base.rec_type == HAMMER_RECTYPE_DATA) {
131 /* leaf_end <= rec_beg */
132 if (leaf->base.key <= rec->leaf.base.key - rec->leaf.data_len)
134 /* leaf_beg >= rec_end */
135 if (leaf->base.key - leaf->data_len >= rec->leaf.base.key)
138 if (leaf->base.key < rec->leaf.base.key)
140 if (leaf->base.key > rec->leaf.base.key)
145 * Never match against an item deleted by the front-end.
146 * leaf is less then rec if rec is marked deleted.
148 * We must still return the proper code for the scan to continue
149 * along the correct branches.
151 if (rec->flags & HAMMER_RECF_DELETED_FE) {
152 if (leaf->base.key < rec->leaf.base.key)
154 if (leaf->base.key > rec->leaf.base.key)
162 * RB_SCAN comparison code for hammer_mem_first(). The argument order
163 * is reversed so the comparison result has to be negated. key_beg and
164 * key_end are both range-inclusive.
166 * Localized deletions are not cached in-memory.
170 hammer_rec_scan_cmp(hammer_record_t rec, void *data)
172 hammer_cursor_t cursor = data;
175 r = hammer_rec_cmp(&cursor->key_beg, rec);
178 r = hammer_rec_cmp(&cursor->key_end, rec);
185 * This compare function is used when simply looking up key_beg.
189 hammer_rec_find_cmp(hammer_record_t rec, void *data)
191 hammer_cursor_t cursor = data;
194 r = hammer_rec_cmp(&cursor->key_beg, rec);
203 * Locate blocks within the truncation range. Partial blocks do not count.
207 hammer_rec_trunc_cmp(hammer_record_t rec, void *data)
209 struct rec_trunc_info *info = data;
211 if (rec->leaf.base.rec_type < info->rec_type)
213 if (rec->leaf.base.rec_type > info->rec_type)
216 switch(rec->leaf.base.rec_type) {
217 case HAMMER_RECTYPE_DB:
219 * DB record key is not beyond the truncation point, retain.
221 if (rec->leaf.base.key < info->trunc_off)
224 case HAMMER_RECTYPE_DATA:
226 * DATA record offset start is not beyond the truncation point,
229 if (rec->leaf.base.key - rec->leaf.data_len < info->trunc_off)
233 panic("hammer_rec_trunc_cmp: unexpected record type");
237 * The record start is >= the truncation point, return match,
238 * the record should be destroyed.
243 RB_GENERATE(hammer_rec_rb_tree, hammer_record, rb_node, hammer_rec_rb_compare);
244 RB_GENERATE_XLOOKUP(hammer_rec_rb_tree, INFO, hammer_record, rb_node,
245 hammer_rec_overlap_compare, hammer_btree_leaf_elm_t);
248 * Allocate a record for the caller to finish filling in. The record is
249 * returned referenced.
252 hammer_alloc_mem_record(hammer_inode_t ip, int data_len)
254 hammer_record_t record;
256 ++hammer_count_records;
257 record = kmalloc(sizeof(*record), M_HAMMER,
258 M_WAITOK | M_ZERO | M_USE_RESERVE);
259 record->flush_state = HAMMER_FST_IDLE;
261 record->leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
262 record->leaf.data_len = data_len;
263 hammer_ref(&record->lock);
266 record->data = kmalloc(data_len, M_HAMMER, M_WAITOK | M_ZERO);
267 record->flags |= HAMMER_RECF_ALLOCDATA;
268 ++hammer_count_record_datas;
275 hammer_wait_mem_record_ident(hammer_record_t record, const char *ident)
277 while (record->flush_state == HAMMER_FST_FLUSH) {
278 record->flags |= HAMMER_RECF_WANTED;
279 tsleep(record, 0, ident, 0);
284 * Called from the backend, hammer_inode.c, after a record has been
285 * flushed to disk. The record has been exclusively locked by the
286 * caller and interlocked with BE.
288 * We clean up the state, unlock, and release the record (the record
289 * was referenced by the fact that it was in the HAMMER_FST_FLUSH state).
292 hammer_flush_record_done(hammer_record_t record, int error)
294 hammer_inode_t target_ip;
296 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
297 KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
301 * An error occured, the backend was unable to sync the
302 * record to its media. Leave the record intact.
304 Debugger("flush_record_done error");
307 if (record->flags & HAMMER_RECF_DELETED_BE) {
308 if ((target_ip = record->target_ip) != NULL) {
309 TAILQ_REMOVE(&target_ip->target_list, record,
311 record->target_ip = NULL;
312 hammer_test_inode(target_ip);
314 record->flush_state = HAMMER_FST_IDLE;
316 if (record->target_ip) {
317 record->flush_state = HAMMER_FST_SETUP;
318 hammer_test_inode(record->ip);
319 hammer_test_inode(record->target_ip);
321 record->flush_state = HAMMER_FST_IDLE;
324 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
325 if (record->flags & HAMMER_RECF_WANTED) {
326 record->flags &= ~HAMMER_RECF_WANTED;
329 hammer_rel_mem_record(record);
333 * Release a memory record. Records marked for deletion are immediately
334 * removed from the RB-Tree but otherwise left intact until the last ref
338 hammer_rel_mem_record(struct hammer_record *record)
340 hammer_inode_t ip, target_ip;
342 hammer_unref(&record->lock);
344 if (record->lock.refs == 0) {
346 * Upon release of the last reference wakeup any waiters.
347 * The record structure may get destroyed so callers will
348 * loop up and do a relookup.
350 * WARNING! Record must be removed from RB-TREE before we
351 * might possibly block. hammer_test_inode() can block!
356 * Upon release of the last reference a record marked deleted
359 if (record->flags & HAMMER_RECF_DELETED_FE) {
360 KKASSERT(ip->lock.refs > 0);
361 KKASSERT(record->flush_state != HAMMER_FST_FLUSH);
364 * target_ip may have zero refs, we have to ref it
365 * to prevent it from being ripped out from under
368 if ((target_ip = record->target_ip) != NULL) {
369 TAILQ_REMOVE(&target_ip->target_list,
370 record, target_entry);
371 record->target_ip = NULL;
372 hammer_ref(&target_ip->lock);
375 if (record->flags & HAMMER_RECF_ONRBTREE) {
376 RB_REMOVE(hammer_rec_rb_tree,
377 &record->ip->rec_tree,
379 KKASSERT(ip->rsv_recs > 0);
382 ip->hmp->rsv_databytes -= record->leaf.data_len;
383 record->flags &= ~HAMMER_RECF_ONRBTREE;
385 if (RB_EMPTY(&record->ip->rec_tree)) {
386 record->ip->flags &= ~HAMMER_INODE_XDIRTY;
387 record->ip->sync_flags &= ~HAMMER_INODE_XDIRTY;
388 hammer_test_inode(record->ip);
393 * Do this test after removing record from the B-Tree.
396 hammer_test_inode(target_ip);
397 hammer_rel_inode(target_ip, 0);
400 if (record->flags & HAMMER_RECF_ALLOCDATA) {
401 --hammer_count_record_datas;
402 kfree(record->data, M_HAMMER);
403 record->flags &= ~HAMMER_RECF_ALLOCDATA;
406 hammer_blockmap_reserve_complete(ip->hmp,
411 --hammer_count_records;
412 kfree(record, M_HAMMER);
418 * Record visibility depends on whether the record is being accessed by
419 * the backend or the frontend.
421 * Return non-zero if the record is visible, zero if it isn't or if it is
426 hammer_ip_iterate_mem_good(hammer_cursor_t cursor, hammer_record_t record)
428 if (cursor->flags & HAMMER_CURSOR_BACKEND) {
429 if (record->flags & HAMMER_RECF_DELETED_BE)
432 if (record->flags & HAMMER_RECF_DELETED_FE)
439 * This callback is used as part of the RB_SCAN function for in-memory
440 * records. We terminate it (return -1) as soon as we get a match.
442 * This routine is used by frontend code.
444 * The primary compare code does not account for ASOF lookups. This
445 * code handles that case as well as a few others.
449 hammer_rec_scan_callback(hammer_record_t rec, void *data)
451 hammer_cursor_t cursor = data;
454 * We terminate on success, so this should be NULL on entry.
456 KKASSERT(cursor->iprec == NULL);
459 * Skip if the record was marked deleted.
461 if (hammer_ip_iterate_mem_good(cursor, rec) == 0)
465 * Skip if not visible due to our as-of TID
467 if (cursor->flags & HAMMER_CURSOR_ASOF) {
468 if (cursor->asof < rec->leaf.base.create_tid)
470 if (rec->leaf.base.delete_tid &&
471 cursor->asof >= rec->leaf.base.delete_tid) {
477 * If the record is queued to the flusher we have to block until
478 * it isn't. Otherwise we may see duplication between our memory
479 * cache and the media.
481 hammer_ref(&rec->lock);
483 #warning "This deadlocks"
485 if (rec->flush_state == HAMMER_FST_FLUSH)
486 hammer_wait_mem_record(rec);
490 * The record may have been deleted while we were blocked.
492 if (hammer_ip_iterate_mem_good(cursor, rec) == 0) {
493 hammer_rel_mem_record(rec);
498 * Set the matching record and stop the scan.
506 * Lookup an in-memory record given the key specified in the cursor. Works
507 * just like hammer_btree_lookup() but operates on an inode's in-memory
510 * The lookup must fail if the record is marked for deferred deletion.
514 hammer_mem_lookup(hammer_cursor_t cursor)
518 KKASSERT(cursor->ip);
520 hammer_rel_mem_record(cursor->iprec);
521 cursor->iprec = NULL;
523 hammer_rec_rb_tree_RB_SCAN(&cursor->ip->rec_tree, hammer_rec_find_cmp,
524 hammer_rec_scan_callback, cursor);
526 if (cursor->iprec == NULL)
534 * hammer_mem_first() - locate the first in-memory record matching the
535 * cursor within the bounds of the key range.
539 hammer_mem_first(hammer_cursor_t cursor)
544 KKASSERT(ip != NULL);
547 hammer_rel_mem_record(cursor->iprec);
548 cursor->iprec = NULL;
551 hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_scan_cmp,
552 hammer_rec_scan_callback, cursor);
555 * Adjust scan.node and keep it linked into the RB-tree so we can
556 * hold the cursor through third party modifications of the RB-tree.
563 /************************************************************************
564 * HAMMER IN-MEMORY RECORD FUNCTIONS *
565 ************************************************************************
567 * These functions manipulate in-memory records. Such records typically
568 * exist prior to being committed to disk or indexed via the on-disk B-Tree.
572 * Add a directory entry (dip,ncp) which references inode (ip).
574 * Note that the low 32 bits of the namekey are set temporarily to create
575 * a unique in-memory record, and may be modified a second time when the
576 * record is synchronized to disk. In particular, the low 32 bits cannot be
577 * all 0's when synching to disk, which is not handled here.
579 * NOTE: bytes does not include any terminating \0 on name, and name might
583 hammer_ip_add_directory(struct hammer_transaction *trans,
584 struct hammer_inode *dip, const char *name, int bytes,
585 struct hammer_inode *ip)
587 struct hammer_cursor cursor;
588 hammer_record_t record;
593 record = hammer_alloc_mem_record(dip, HAMMER_ENTRY_SIZE(bytes));
594 if (++trans->hmp->namekey_iterator == 0)
595 ++trans->hmp->namekey_iterator;
597 record->type = HAMMER_MEM_RECORD_ADD;
598 record->leaf.base.localization = dip->obj_localization +
599 HAMMER_LOCALIZE_MISC;
600 record->leaf.base.obj_id = dip->obj_id;
601 record->leaf.base.key = hammer_directory_namekey(name, bytes);
602 record->leaf.base.key += trans->hmp->namekey_iterator;
603 record->leaf.base.rec_type = HAMMER_RECTYPE_DIRENTRY;
604 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
605 record->data->entry.obj_id = ip->obj_id;
606 record->data->entry.localization = ip->obj_localization;
607 bcopy(name, record->data->entry.name, bytes);
609 ++ip->ino_data.nlinks;
610 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
613 * Find an unused namekey. Both the in-memory record tree and
614 * the B-Tree are checked. Exact matches also match create_tid
615 * so use an ASOF search to (mostly) ignore it.
617 * delete-visibility is set so pending deletions do not give us
618 * a false-negative on our ability to use an iterator.
620 hammer_init_cursor(trans, &cursor, &dip->cache[1], dip);
621 cursor.key_beg = record->leaf.base;
622 cursor.flags |= HAMMER_CURSOR_ASOF;
623 cursor.flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
624 cursor.asof = ip->obj_asof;
627 while (hammer_ip_lookup(&cursor) == 0) {
628 iterator = (u_int32_t)record->leaf.base.key + 1;
631 record->leaf.base.key &= ~0xFFFFFFFFLL;
632 record->leaf.base.key |= iterator;
633 cursor.key_beg.key = record->leaf.base.key;
634 if (++count == 1000000000) {
635 hammer_rel_mem_record(record);
642 * The target inode and the directory entry are bound together.
644 record->target_ip = ip;
645 record->flush_state = HAMMER_FST_SETUP;
646 TAILQ_INSERT_TAIL(&ip->target_list, record, target_entry);
649 * The inode now has a dependancy and must be taken out of the idle
650 * state. An inode not in an idle state is given an extra reference.
652 if (ip->flush_state == HAMMER_FST_IDLE) {
653 hammer_ref(&ip->lock);
654 ip->flush_state = HAMMER_FST_SETUP;
656 error = hammer_mem_add(record);
658 hammer_done_cursor(&cursor);
663 * Delete the directory entry and update the inode link count. The
664 * cursor must be seeked to the directory entry record being deleted.
666 * The related inode should be share-locked by the caller. The caller is
669 * This function can return EDEADLK requiring the caller to terminate
670 * the cursor, any locks, wait on the returned record, and retry.
673 hammer_ip_del_directory(struct hammer_transaction *trans,
674 hammer_cursor_t cursor, struct hammer_inode *dip,
675 struct hammer_inode *ip)
677 hammer_record_t record;
680 if (hammer_cursor_inmem(cursor)) {
682 * In-memory (unsynchronized) records can simply be freed.
683 * Even though the HAMMER_RECF_DELETED_FE flag is ignored
684 * by the backend, we must still avoid races against the
685 * backend potentially syncing the record to the media.
687 * We cannot call hammer_ip_delete_record(), that routine may
688 * only be called from the backend.
690 record = cursor->iprec;
691 if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
692 KKASSERT(cursor->deadlk_rec == NULL);
693 hammer_ref(&record->lock);
694 cursor->deadlk_rec = record;
697 KKASSERT(record->type == HAMMER_MEM_RECORD_ADD);
698 record->flags |= HAMMER_RECF_DELETED_FE;
703 * If the record is on-disk we have to queue the deletion by
704 * the record's key. This also causes lookups to skip the
707 KKASSERT(dip->flags &
708 (HAMMER_INODE_ONDISK | HAMMER_INODE_DONDISK));
709 record = hammer_alloc_mem_record(dip, 0);
710 record->type = HAMMER_MEM_RECORD_DEL;
711 record->leaf.base = cursor->leaf->base;
713 record->target_ip = ip;
714 record->flush_state = HAMMER_FST_SETUP;
715 TAILQ_INSERT_TAIL(&ip->target_list, record, target_entry);
718 * The inode now has a dependancy and must be taken out of
719 * the idle state. An inode not in an idle state is given
720 * an extra reference.
722 if (ip->flush_state == HAMMER_FST_IDLE) {
723 hammer_ref(&ip->lock);
724 ip->flush_state = HAMMER_FST_SETUP;
727 error = hammer_mem_add(record);
731 * One less link. The file may still be open in the OS even after
732 * all links have gone away.
734 * We have to terminate the cursor before syncing the inode to
735 * avoid deadlocking against ourselves. XXX this may no longer
738 * If nlinks drops to zero and the vnode is inactive (or there is
739 * no vnode), call hammer_inode_unloadable_check() to zonk the
740 * inode. If we don't do this here the inode will not be destroyed
741 * on-media until we unmount.
744 --ip->ino_data.nlinks;
745 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
746 if (ip->ino_data.nlinks == 0 &&
747 (ip->vp == NULL || (ip->vp->v_flag & VINACTIVE))) {
748 hammer_done_cursor(cursor);
749 hammer_inode_unloadable_check(ip, 1);
750 hammer_flush_inode(ip, 0);
758 * Add a record to an inode.
760 * The caller must allocate the record with hammer_alloc_mem_record(ip) and
761 * initialize the following additional fields:
763 * The related inode should be share-locked by the caller. The caller is
766 * record->rec.entry.base.base.key
767 * record->rec.entry.base.base.rec_type
768 * record->rec.entry.base.base.data_len
769 * record->data (a copy will be kmalloc'd if it cannot be embedded)
772 hammer_ip_add_record(struct hammer_transaction *trans, hammer_record_t record)
774 hammer_inode_t ip = record->ip;
777 KKASSERT(record->leaf.base.localization != 0);
778 record->leaf.base.obj_id = ip->obj_id;
779 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
780 error = hammer_mem_add(record);
785 * Locate a bulk record in-memory. Bulk records allow disk space to be
786 * reserved so the front-end can flush large data writes without having
787 * to queue the BIO to the flusher. Only the related record gets queued
790 static hammer_record_t
791 hammer_ip_get_bulk(hammer_inode_t ip, off_t file_offset, int bytes)
793 hammer_record_t record;
794 struct hammer_btree_leaf_elm leaf;
796 bzero(&leaf, sizeof(leaf));
797 leaf.base.obj_id = ip->obj_id;
798 leaf.base.key = file_offset + bytes;
799 leaf.base.create_tid = 0;
800 leaf.base.delete_tid = 0;
801 leaf.base.rec_type = HAMMER_RECTYPE_DATA;
802 leaf.base.obj_type = 0; /* unused */
803 leaf.base.btype = HAMMER_BTREE_TYPE_RECORD; /* unused */
804 leaf.base.localization = ip->obj_localization + HAMMER_LOCALIZE_MISC;
805 leaf.data_len = bytes;
807 record = hammer_rec_rb_tree_RB_LOOKUP_INFO(&ip->rec_tree, &leaf);
809 hammer_ref(&record->lock);
814 * Reserve blockmap space placemarked with an in-memory record.
816 * This routine is called by the frontend in order to be able to directly
817 * flush a buffer cache buffer. The frontend has locked the related buffer
818 * cache buffers and we should be able to manipulate any overlapping
822 hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset, void *data, int bytes,
825 hammer_record_t record;
826 hammer_record_t conflict;
831 * Deal with conflicting in-memory records. We cannot have multiple
832 * in-memory records for the same offset without seriously confusing
833 * the backend, including but not limited to the backend issuing
834 * delete-create-delete sequences and asserting on the delete_tid
835 * being the same as the create_tid.
837 * If we encounter a record with the backend interlock set we cannot
838 * immediately delete it without confusing the backend.
840 while ((conflict = hammer_ip_get_bulk(ip, file_offset, bytes)) !=NULL) {
841 if (conflict->flags & HAMMER_RECF_INTERLOCK_BE) {
842 conflict->flags |= HAMMER_RECF_WANTED;
843 tsleep(conflict, 0, "hmrrc3", 0);
845 conflict->flags |= HAMMER_RECF_DELETED_FE;
847 hammer_rel_mem_record(conflict);
851 * Create a record to cover the direct write. This is called with
852 * the related BIO locked so there should be no possible conflict.
854 * The backend is responsible for finalizing the space reserved in
857 * XXX bytes not aligned, depend on the reservation code to
858 * align the reservation.
860 record = hammer_alloc_mem_record(ip, 0);
861 zone = (bytes >= HAMMER_BUFSIZE) ? HAMMER_ZONE_LARGE_DATA_INDEX :
862 HAMMER_ZONE_SMALL_DATA_INDEX;
863 record->resv = hammer_blockmap_reserve(ip->hmp, zone, bytes,
864 &record->leaf.data_offset,
866 if (record->resv == NULL) {
867 kprintf("hammer_ip_add_bulk: reservation failed\n");
868 hammer_rel_mem_record(record);
871 record->type = HAMMER_MEM_RECORD_DATA;
872 record->leaf.base.rec_type = HAMMER_RECTYPE_DATA;
873 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
874 record->leaf.base.obj_id = ip->obj_id;
875 record->leaf.base.key = file_offset + bytes;
876 record->leaf.base.localization = ip->obj_localization +
877 HAMMER_LOCALIZE_MISC;
878 record->leaf.data_len = bytes;
879 hammer_crc_set_leaf(data, &record->leaf);
880 flags = record->flags;
882 hammer_ref(&record->lock); /* mem_add eats a reference */
883 *errorp = hammer_mem_add(record);
885 conflict = hammer_ip_get_bulk(ip, file_offset, bytes);
886 kprintf("hammer_ip_add_bulk: error %d conflict %p file_offset %lld bytes %d\n",
887 *errorp, conflict, file_offset, bytes);
889 kprintf("conflict %lld %d\n", conflict->leaf.base.key, conflict->leaf.data_len);
891 hammer_rel_mem_record(conflict);
893 KKASSERT(*errorp == 0);
894 conflict = hammer_ip_get_bulk(ip, file_offset, bytes);
895 if (conflict != record) {
896 kprintf("conflict mismatch %p %p %08x\n", conflict, record, record->flags);
898 kprintf("conflict mismatch %lld/%d %lld/%d\n", conflict->leaf.base.key, conflict->leaf.data_len, record->leaf.base.key, record->leaf.data_len);
900 KKASSERT(conflict == record);
901 hammer_rel_mem_record(conflict);
907 * Frontend truncation code. Scan in-memory records only. On-disk records
908 * and records in a flushing state are handled by the backend. The vnops
909 * setattr code will handle the block containing the truncation point.
911 * Partial blocks are not deleted.
914 hammer_ip_frontend_trunc(struct hammer_inode *ip, off_t file_size)
916 struct rec_trunc_info info;
918 switch(ip->ino_data.obj_type) {
919 case HAMMER_OBJTYPE_REGFILE:
920 info.rec_type = HAMMER_RECTYPE_DATA;
922 case HAMMER_OBJTYPE_DBFILE:
923 info.rec_type = HAMMER_RECTYPE_DB;
928 info.trunc_off = file_size;
929 hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_trunc_cmp,
930 hammer_frontend_trunc_callback, &info);
935 hammer_frontend_trunc_callback(hammer_record_t record, void *data __unused)
937 if (record->flags & HAMMER_RECF_DELETED_FE)
939 if (record->flush_state == HAMMER_FST_FLUSH)
941 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
942 hammer_ref(&record->lock);
943 record->flags |= HAMMER_RECF_DELETED_FE;
944 hammer_rel_mem_record(record);
949 * Return 1 if the caller must check for and delete existing records
950 * before writing out a new data record.
952 * Return 0 if the caller can just insert the record into the B-Tree without
956 hammer_record_needs_overwrite_delete(hammer_record_t record)
958 hammer_inode_t ip = record->ip;
962 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE)
963 file_offset = record->leaf.base.key;
965 file_offset = record->leaf.base.key - record->leaf.data_len;
966 r = (file_offset < ip->save_trunc_off);
967 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
968 if (ip->save_trunc_off <= record->leaf.base.key)
969 ip->save_trunc_off = record->leaf.base.key + 1;
971 if (ip->save_trunc_off < record->leaf.base.key)
972 ip->save_trunc_off = record->leaf.base.key;
978 * Backend code. Sync a record to the media.
981 hammer_ip_sync_record_cursor(hammer_cursor_t cursor, hammer_record_t record)
983 hammer_transaction_t trans = cursor->trans;
990 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
991 KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
992 KKASSERT(record->leaf.base.localization != 0);
995 * If this is a bulk-data record placemarker there may be an existing
996 * record on-disk, indicating a data overwrite. If there is the
997 * on-disk record must be deleted before we can insert our new record.
999 * We've synthesized this record and do not know what the create_tid
1000 * on-disk is, nor how much data it represents.
1002 * Keep in mind that (key) for data records is (base_offset + len),
1003 * not (base_offset). Also, we only want to get rid of on-disk
1004 * records since we are trying to sync our in-memory record, call
1005 * hammer_ip_delete_range() with truncating set to 1 to make sure
1006 * it skips in-memory records.
1008 * It is ok for the lookup to return ENOENT.
1010 * NOTE OPTIMIZATION: sync_trunc_off is used to determine if we have
1011 * to call hammer_ip_delete_range() or not. This also means we must
1012 * update sync_trunc_off() as we write.
1014 if (record->type == HAMMER_MEM_RECORD_DATA &&
1015 hammer_record_needs_overwrite_delete(record)) {
1016 file_offset = record->leaf.base.key - record->leaf.data_len;
1017 bytes = (record->leaf.data_len + HAMMER_BUFMASK) &
1019 KKASSERT((file_offset & HAMMER_BUFMASK) == 0);
1020 error = hammer_ip_delete_range(
1022 file_offset, file_offset + bytes - 1,
1024 if (error && error != ENOENT)
1029 * If this is a general record there may be an on-disk version
1030 * that must be deleted before we can insert the new record.
1032 if (record->type == HAMMER_MEM_RECORD_GENERAL) {
1033 error = hammer_delete_general(cursor, record->ip,
1035 if (error && error != ENOENT)
1042 hammer_normalize_cursor(cursor);
1043 cursor->key_beg = record->leaf.base;
1044 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1045 cursor->flags |= HAMMER_CURSOR_BACKEND;
1046 cursor->flags &= ~HAMMER_CURSOR_INSERT;
1049 * Records can wind up on-media before the inode itself is on-media.
1052 record->ip->flags |= HAMMER_INODE_DONDISK;
1055 * If we are deleting a directory entry an exact match must be
1058 if (record->type == HAMMER_MEM_RECORD_DEL) {
1059 error = hammer_btree_lookup(cursor);
1061 error = hammer_ip_delete_record(cursor, record->ip,
1064 record->flags |= HAMMER_RECF_DELETED_FE;
1065 record->flags |= HAMMER_RECF_DELETED_BE;
1074 * Issue a lookup to position the cursor and locate the cluster. The
1075 * target key should not exist. If we are creating a directory entry
1076 * we may have to iterate the low 32 bits of the key to find an unused
1079 cursor->flags |= HAMMER_CURSOR_INSERT;
1081 error = hammer_btree_lookup(cursor);
1082 if (hammer_debug_inode)
1083 kprintf("DOINSERT LOOKUP %d\n", error);
1085 kprintf("hammer_ip_sync_record: duplicate rec "
1086 "at (%016llx)\n", record->leaf.base.key);
1087 Debugger("duplicate record1");
1091 if (record->type == HAMMER_MEM_RECORD_DATA)
1092 kprintf("sync_record %016llx ---------------- %016llx %d\n",
1093 record->leaf.base.key - record->leaf.data_len,
1094 record->leaf.data_offset, error);
1097 if (error != ENOENT)
1101 * Allocate the record and data. The result buffers will be
1102 * marked as being modified and further calls to
1103 * hammer_modify_buffer() will result in unneeded UNDO records.
1105 * Support zero-fill records (data == NULL and data_len != 0)
1107 if (record->type == HAMMER_MEM_RECORD_DATA) {
1109 * The data portion of a bulk-data record has already been
1110 * committed to disk, we need only adjust the layer2
1111 * statistics in the same transaction as our B-Tree insert.
1113 KKASSERT(record->leaf.data_offset != 0);
1114 hammer_blockmap_finalize(trans, record->leaf.data_offset,
1115 record->leaf.data_len);
1117 } else if (record->data && record->leaf.data_len) {
1119 * Wholely cached record, with data. Allocate the data.
1121 bdata = hammer_alloc_data(trans, record->leaf.data_len,
1122 record->leaf.base.rec_type,
1123 &record->leaf.data_offset,
1124 &cursor->data_buffer, &error);
1127 hammer_crc_set_leaf(record->data, &record->leaf);
1128 hammer_modify_buffer(trans, cursor->data_buffer, NULL, 0);
1129 bcopy(record->data, bdata, record->leaf.data_len);
1130 hammer_modify_buffer_done(cursor->data_buffer);
1133 * Wholely cached record, without data.
1135 record->leaf.data_offset = 0;
1136 record->leaf.data_crc = 0;
1139 error = hammer_btree_insert(cursor, &record->leaf, &doprop);
1140 if (hammer_debug_inode && error)
1141 kprintf("BTREE INSERT error %d @ %016llx:%d key %016llx\n", error, cursor->node->node_offset, cursor->index, record->leaf.base.key);
1144 * Our record is on-disk, normally mark the in-memory version as
1145 * deleted. If the record represented a directory deletion but
1146 * we had to sync a valid directory entry to disk we must convert
1147 * the record to a covering delete so the frontend does not have
1148 * visibility on the synced entry.
1152 hammer_btree_do_propagation(cursor, record->ip,
1155 if (record->flags & HAMMER_RECF_CONVERT_DELETE) {
1156 KKASSERT(record->type == HAMMER_MEM_RECORD_ADD);
1157 record->flags &= ~HAMMER_RECF_DELETED_FE;
1158 record->type = HAMMER_MEM_RECORD_DEL;
1159 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
1160 record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
1161 /* hammer_flush_record_done takes care of the rest */
1163 record->flags |= HAMMER_RECF_DELETED_FE;
1164 record->flags |= HAMMER_RECF_DELETED_BE;
1167 if (record->leaf.data_offset) {
1168 hammer_blockmap_free(trans, record->leaf.data_offset,
1169 record->leaf.data_len);
1178 * Add the record to the inode's rec_tree. The low 32 bits of a directory
1179 * entry's key is used to deal with hash collisions in the upper 32 bits.
1180 * A unique 64 bit key is generated in-memory and may be regenerated a
1181 * second time when the directory record is flushed to the on-disk B-Tree.
1183 * A referenced record is passed to this function. This function
1184 * eats the reference. If an error occurs the record will be deleted.
1186 * A copy of the temporary record->data pointer provided by the caller
1191 hammer_mem_add(hammer_record_t record)
1193 hammer_mount_t hmp = record->ip->hmp;
1196 * Make a private copy of record->data
1199 KKASSERT(record->flags & HAMMER_RECF_ALLOCDATA);
1202 * Insert into the RB tree. A unique key should have already
1203 * been selected if this is a directory entry.
1205 if (RB_INSERT(hammer_rec_rb_tree, &record->ip->rec_tree, record)) {
1206 record->flags |= HAMMER_RECF_DELETED_FE;
1207 hammer_rel_mem_record(record);
1210 ++hmp->count_newrecords;
1212 ++record->ip->rsv_recs;
1213 record->ip->hmp->rsv_databytes += record->leaf.data_len;
1214 record->flags |= HAMMER_RECF_ONRBTREE;
1215 hammer_modify_inode(record->ip, HAMMER_INODE_XDIRTY);
1216 hammer_rel_mem_record(record);
1220 /************************************************************************
1221 * HAMMER INODE MERGED-RECORD FUNCTIONS *
1222 ************************************************************************
1224 * These functions augment the B-Tree scanning functions in hammer_btree.c
1225 * by merging in-memory records with on-disk records.
1229 * Locate a particular record either in-memory or on-disk.
1231 * NOTE: This is basically a standalone routine, hammer_ip_next() may
1232 * NOT be called to iterate results.
1235 hammer_ip_lookup(hammer_cursor_t cursor)
1240 * If the element is in-memory return it without searching the
1243 KKASSERT(cursor->ip);
1244 error = hammer_mem_lookup(cursor);
1246 cursor->leaf = &cursor->iprec->leaf;
1249 if (error != ENOENT)
1253 * If the inode has on-disk components search the on-disk B-Tree.
1255 if ((cursor->ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) == 0)
1257 error = hammer_btree_lookup(cursor);
1259 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1264 * Locate the first record within the cursor's key_beg/key_end range,
1265 * restricted to a particular inode. 0 is returned on success, ENOENT
1266 * if no records matched the requested range, or some other error.
1268 * When 0 is returned hammer_ip_next() may be used to iterate additional
1269 * records within the requested range.
1271 * This function can return EDEADLK, requiring the caller to terminate
1272 * the cursor and try again.
1275 hammer_ip_first(hammer_cursor_t cursor)
1277 hammer_inode_t ip = cursor->ip;
1280 KKASSERT(ip != NULL);
1283 * Clean up fields and setup for merged scan
1285 cursor->flags &= ~HAMMER_CURSOR_DELBTREE;
1286 cursor->flags |= HAMMER_CURSOR_ATEDISK | HAMMER_CURSOR_ATEMEM;
1287 cursor->flags |= HAMMER_CURSOR_DISKEOF | HAMMER_CURSOR_MEMEOF;
1288 if (cursor->iprec) {
1289 hammer_rel_mem_record(cursor->iprec);
1290 cursor->iprec = NULL;
1294 * Search the on-disk B-Tree. hammer_btree_lookup() only does an
1295 * exact lookup so if we get ENOENT we have to call the iterate
1296 * function to validate the first record after the begin key.
1298 * The ATEDISK flag is used by hammer_btree_iterate to determine
1299 * whether it must index forwards or not. It is also used here
1300 * to select the next record from in-memory or on-disk.
1302 * EDEADLK can only occur if the lookup hit an empty internal
1303 * element and couldn't delete it. Since this could only occur
1304 * in-range, we can just iterate from the failure point.
1306 if (ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) {
1307 error = hammer_btree_lookup(cursor);
1308 if (error == ENOENT || error == EDEADLK) {
1309 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1310 if (hammer_debug_general & 0x2000)
1311 kprintf("error %d node %p %016llx index %d\n", error, cursor->node, cursor->node->node_offset, cursor->index);
1312 error = hammer_btree_iterate(cursor);
1314 if (error && error != ENOENT)
1317 cursor->flags &= ~HAMMER_CURSOR_DISKEOF;
1318 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1320 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1325 * Search the in-memory record list (Red-Black tree). Unlike the
1326 * B-Tree search, mem_first checks for records in the range.
1328 error = hammer_mem_first(cursor);
1329 if (error && error != ENOENT)
1332 cursor->flags &= ~HAMMER_CURSOR_MEMEOF;
1333 cursor->flags &= ~HAMMER_CURSOR_ATEMEM;
1334 if (hammer_ip_iterate_mem_good(cursor, cursor->iprec) == 0)
1335 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1339 * This will return the first matching record.
1341 return(hammer_ip_next(cursor));
1345 * Retrieve the next record in a merged iteration within the bounds of the
1346 * cursor. This call may be made multiple times after the cursor has been
1347 * initially searched with hammer_ip_first().
1349 * 0 is returned on success, ENOENT if no further records match the
1350 * requested range, or some other error code is returned.
1353 hammer_ip_next(hammer_cursor_t cursor)
1355 hammer_btree_elm_t elm;
1356 hammer_record_t rec, save;
1362 * Load the current on-disk and in-memory record. If we ate any
1363 * records we have to get the next one.
1365 * If we deleted the last on-disk record we had scanned ATEDISK will
1366 * be clear and DELBTREE will be set, forcing a call to iterate. The
1367 * fact that ATEDISK is clear causes iterate to re-test the 'current'
1368 * element. If ATEDISK is set, iterate will skip the 'current'
1371 * Get the next on-disk record
1373 if (cursor->flags & (HAMMER_CURSOR_ATEDISK|HAMMER_CURSOR_DELBTREE)) {
1374 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
1375 error = hammer_btree_iterate(cursor);
1376 cursor->flags &= ~HAMMER_CURSOR_DELBTREE;
1378 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1379 hammer_cache_node(&cursor->ip->cache[1],
1382 cursor->flags |= HAMMER_CURSOR_DISKEOF |
1383 HAMMER_CURSOR_ATEDISK;
1390 * Get the next in-memory record. The record can be ripped out
1391 * of the RB tree so we maintain a scan_info structure to track
1394 * hammer_rec_scan_cmp: Is the record still in our general range,
1395 * (non-inclusive of snapshot exclusions)?
1396 * hammer_rec_scan_callback: Is the record in our snapshot?
1398 if (cursor->flags & HAMMER_CURSOR_ATEMEM) {
1399 if ((cursor->flags & HAMMER_CURSOR_MEMEOF) == 0) {
1400 save = cursor->iprec;
1401 cursor->iprec = NULL;
1402 rec = save ? hammer_rec_rb_tree_RB_NEXT(save) : NULL;
1404 if (hammer_rec_scan_cmp(rec, cursor) != 0)
1406 if (hammer_rec_scan_callback(rec, cursor) != 0)
1408 rec = hammer_rec_rb_tree_RB_NEXT(rec);
1411 hammer_rel_mem_record(save);
1412 if (cursor->iprec) {
1413 KKASSERT(cursor->iprec == rec);
1414 cursor->flags &= ~HAMMER_CURSOR_ATEMEM;
1416 cursor->flags |= HAMMER_CURSOR_MEMEOF;
1422 * The memory record may have become stale while being held in
1423 * cursor->iprec. We are interlocked against the backend on
1424 * with regards to B-Tree entries.
1426 if ((cursor->flags & HAMMER_CURSOR_ATEMEM) == 0) {
1427 if (hammer_ip_iterate_mem_good(cursor, cursor->iprec) == 0) {
1428 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1434 * Extract either the disk or memory record depending on their
1435 * relative position.
1438 switch(cursor->flags & (HAMMER_CURSOR_ATEDISK | HAMMER_CURSOR_ATEMEM)) {
1441 * Both entries valid. Compare the entries and nominally
1442 * return the first one in the sort order. Numerous cases
1443 * require special attention, however.
1445 elm = &cursor->node->ondisk->elms[cursor->index];
1446 r = hammer_btree_cmp(&elm->base, &cursor->iprec->leaf.base);
1449 * If the two entries differ only by their key (-2/2) or
1450 * create_tid (-1/1), and are DATA records, we may have a
1451 * nominal match. We have to calculate the base file
1452 * offset of the data.
1454 if (r <= 2 && r >= -2 && r != 0 &&
1455 cursor->ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE &&
1456 cursor->iprec->type == HAMMER_MEM_RECORD_DATA) {
1457 int64_t base1 = elm->leaf.base.key - elm->leaf.data_len;
1458 int64_t base2 = cursor->iprec->leaf.base.key -
1459 cursor->iprec->leaf.data_len;
1465 error = hammer_btree_extract(cursor,
1466 HAMMER_CURSOR_GET_LEAF);
1467 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1472 * If the entries match exactly the memory entry is either
1473 * an on-disk directory entry deletion or a bulk data
1474 * overwrite. If it is a directory entry deletion we eat
1477 * For the bulk-data overwrite case it is possible to have
1478 * visibility into both, which simply means the syncer
1479 * hasn't gotten around to doing the delete+insert sequence
1480 * on the B-Tree. Use the memory entry and throw away the
1483 * If the in-memory record is not either of these we
1484 * probably caught the syncer while it was syncing it to
1485 * the media. Since we hold a shared lock on the cursor,
1486 * the in-memory record had better be marked deleted at
1490 if (cursor->iprec->type == HAMMER_MEM_RECORD_DEL) {
1491 if ((cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1492 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1493 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1496 } else if (cursor->iprec->type == HAMMER_MEM_RECORD_DATA) {
1497 if ((cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1498 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1500 /* fall through to memory entry */
1502 panic("hammer_ip_next: duplicate mem/b-tree entry");
1503 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1507 /* fall through to the memory entry */
1508 case HAMMER_CURSOR_ATEDISK:
1510 * Only the memory entry is valid.
1512 cursor->leaf = &cursor->iprec->leaf;
1513 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1516 * If the memory entry is an on-disk deletion we should have
1517 * also had found a B-Tree record. If the backend beat us
1518 * to it it would have interlocked the cursor and we should
1519 * have seen the in-memory record marked DELETED_FE.
1521 if (cursor->iprec->type == HAMMER_MEM_RECORD_DEL &&
1522 (cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1523 panic("hammer_ip_next: del-on-disk with no b-tree entry");
1526 case HAMMER_CURSOR_ATEMEM:
1528 * Only the disk entry is valid
1530 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1531 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1535 * Neither entry is valid
1537 * XXX error not set properly
1539 cursor->leaf = NULL;
1547 * Resolve the cursor->data pointer for the current cursor position in
1548 * a merged iteration.
1551 hammer_ip_resolve_data(hammer_cursor_t cursor)
1553 hammer_record_t record;
1556 if (hammer_cursor_inmem(cursor)) {
1558 * The data associated with an in-memory record is usually
1559 * kmalloced, but reserve-ahead data records will have an
1560 * on-disk reference.
1562 * NOTE: Reserve-ahead data records must be handled in the
1563 * context of the related high level buffer cache buffer
1564 * to interlock against async writes.
1566 record = cursor->iprec;
1567 cursor->data = record->data;
1569 if (cursor->data == NULL) {
1570 KKASSERT(record->leaf.base.rec_type ==
1571 HAMMER_RECTYPE_DATA);
1572 cursor->data = hammer_bread_ext(cursor->trans->hmp,
1573 record->leaf.data_offset,
1574 record->leaf.data_len,
1576 &cursor->data_buffer);
1579 cursor->leaf = &cursor->node->ondisk->elms[cursor->index].leaf;
1580 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA);
1586 * Backend truncation / record replacement - delete records in range.
1588 * Delete all records within the specified range for inode ip. In-memory
1589 * records still associated with the frontend are ignored.
1591 * If truncating is non-zero in-memory records associated with the back-end
1592 * are ignored. If truncating is > 1 we can return EWOULDBLOCK.
1596 * * An unaligned range will cause new records to be added to cover
1597 * the edge cases. (XXX not implemented yet).
1599 * * Replacement via reservations (see hammer_ip_sync_record_cursor())
1600 * also do not deal with unaligned ranges.
1602 * * ran_end is inclusive (e.g. 0,1023 instead of 0,1024).
1604 * * Record keys for regular file data have to be special-cased since
1605 * they indicate the end of the range (key = base + bytes).
1607 * * This function may be asked to delete ridiculously huge ranges, for
1608 * example if someone truncates or removes a 1TB regular file. We
1609 * must be very careful on restarts and we may have to stop w/
1610 * EWOULDBLOCK to avoid blowing out the buffer cache.
1613 hammer_ip_delete_range(hammer_cursor_t cursor, hammer_inode_t ip,
1614 int64_t ran_beg, int64_t ran_end, int truncating)
1616 hammer_transaction_t trans = cursor->trans;
1617 hammer_btree_leaf_elm_t leaf;
1623 kprintf("delete_range %p %016llx-%016llx\n", ip, ran_beg, ran_end);
1626 KKASSERT(trans->type == HAMMER_TRANS_FLS);
1628 hammer_normalize_cursor(cursor);
1629 cursor->key_beg.localization = ip->obj_localization +
1630 HAMMER_LOCALIZE_MISC;
1631 cursor->key_beg.obj_id = ip->obj_id;
1632 cursor->key_beg.create_tid = 0;
1633 cursor->key_beg.delete_tid = 0;
1634 cursor->key_beg.obj_type = 0;
1636 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1637 cursor->key_beg.key = ran_beg;
1638 cursor->key_beg.rec_type = HAMMER_RECTYPE_DB;
1641 * The key in the B-Tree is (base+bytes), so the first possible
1642 * matching key is ran_beg + 1.
1644 cursor->key_beg.key = ran_beg + 1;
1645 cursor->key_beg.rec_type = HAMMER_RECTYPE_DATA;
1648 cursor->key_end = cursor->key_beg;
1649 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1650 cursor->key_end.key = ran_end;
1652 tmp64 = ran_end + MAXPHYS + 1; /* work around GCC-4 bug */
1653 if (tmp64 < ran_end)
1654 cursor->key_end.key = 0x7FFFFFFFFFFFFFFFLL;
1656 cursor->key_end.key = ran_end + MAXPHYS + 1;
1659 cursor->asof = ip->obj_asof;
1660 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1661 cursor->flags |= HAMMER_CURSOR_ASOF;
1662 cursor->flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
1663 cursor->flags |= HAMMER_CURSOR_BACKEND;
1664 cursor->flags |= HAMMER_CURSOR_END_INCLUSIVE;
1666 error = hammer_ip_first(cursor);
1669 * Iterate through matching records and mark them as deleted.
1671 while (error == 0) {
1672 leaf = cursor->leaf;
1674 KKASSERT(leaf->base.delete_tid == 0);
1675 KKASSERT(leaf->base.obj_id == ip->obj_id);
1678 * There may be overlap cases for regular file data. Also
1679 * remember the key for a regular file record is (base + len),
1682 * Note that do to duplicates (mem & media) allowed by
1683 * DELETE_VISIBILITY, off can wind up less then ran_beg.
1685 if (leaf->base.rec_type == HAMMER_RECTYPE_DATA) {
1686 off = leaf->base.key - leaf->data_len;
1688 * Check the left edge case. We currently do not
1689 * split existing records.
1691 if (off < ran_beg && leaf->base.key > ran_beg) {
1692 panic("hammer left edge case %016llx %d\n",
1693 leaf->base.key, leaf->data_len);
1697 * Check the right edge case. Note that the
1698 * record can be completely out of bounds, which
1699 * terminates the search.
1701 * base->key is exclusive of the right edge while
1702 * ran_end is inclusive of the right edge. The
1703 * (key - data_len) left boundary is inclusive.
1705 * XXX theory-check this test at some point, are
1706 * we missing a + 1 somewhere? Note that ran_end
1709 if (leaf->base.key - 1 > ran_end) {
1710 if (leaf->base.key - leaf->data_len > ran_end)
1712 panic("hammer right edge case\n");
1715 off = leaf->base.key;
1719 * Delete the record. When truncating we do not delete
1720 * in-memory (data) records because they represent data
1721 * written after the truncation.
1723 * This will also physically destroy the B-Tree entry and
1724 * data if the retention policy dictates. The function
1725 * will set HAMMER_CURSOR_DELBTREE which hammer_ip_next()
1726 * uses to perform a fixup.
1728 if (truncating == 0 || hammer_cursor_ondisk(cursor)) {
1729 error = hammer_ip_delete_record(cursor, ip, trans->tid);
1731 * If we have built up too many meta-buffers we risk
1732 * deadlocking the kernel and must stop. This can
1733 * occur when deleting ridiculously huge files.
1734 * sync_trunc_off is updated so the next cycle does
1735 * not re-iterate records we have already deleted.
1737 * This is only done with formal truncations.
1739 if (truncating > 1 && error == 0 &&
1740 hammer_flusher_meta_limit(ip->hmp)) {
1741 ip->sync_trunc_off = off;
1742 error = EWOULDBLOCK;
1747 ran_beg = off; /* for restart */
1748 error = hammer_ip_next(cursor);
1751 hammer_cache_node(&ip->cache[1], cursor->node);
1753 if (error == EDEADLK) {
1754 hammer_done_cursor(cursor);
1755 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
1759 if (error == ENOENT)
1765 * This backend function deletes the specified record on-disk, similar to
1766 * delete_range but for a specific record. Unlike the exact deletions
1767 * used when deleting a directory entry this function uses an ASOF search
1768 * like delete_range.
1770 * This function may be called with ip->obj_asof set for a slave snapshot,
1771 * so don't use it. We always delete non-historical records only.
1774 hammer_delete_general(hammer_cursor_t cursor, hammer_inode_t ip,
1775 hammer_btree_leaf_elm_t leaf)
1777 hammer_transaction_t trans = cursor->trans;
1780 KKASSERT(trans->type == HAMMER_TRANS_FLS);
1782 hammer_normalize_cursor(cursor);
1783 cursor->key_beg = leaf->base;
1784 cursor->asof = HAMMER_MAX_TID;
1785 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1786 cursor->flags |= HAMMER_CURSOR_ASOF;
1787 cursor->flags |= HAMMER_CURSOR_BACKEND;
1788 cursor->flags &= ~HAMMER_CURSOR_INSERT;
1790 error = hammer_btree_lookup(cursor);
1792 error = hammer_ip_delete_record(cursor, ip, trans->tid);
1794 if (error == EDEADLK) {
1795 hammer_done_cursor(cursor);
1796 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
1804 * This function deletes remaining auxillary records when an inode is
1805 * being deleted. This function explicitly does not delete the
1806 * inode record, directory entry, data, or db records. Those must be
1807 * properly disposed of prior to this call.
1810 hammer_ip_delete_clean(hammer_cursor_t cursor, hammer_inode_t ip, int *countp)
1812 hammer_transaction_t trans = cursor->trans;
1813 hammer_btree_leaf_elm_t leaf;
1816 KKASSERT(trans->type == HAMMER_TRANS_FLS);
1818 hammer_normalize_cursor(cursor);
1819 cursor->key_beg.localization = ip->obj_localization +
1820 HAMMER_LOCALIZE_MISC;
1821 cursor->key_beg.obj_id = ip->obj_id;
1822 cursor->key_beg.create_tid = 0;
1823 cursor->key_beg.delete_tid = 0;
1824 cursor->key_beg.obj_type = 0;
1825 cursor->key_beg.rec_type = HAMMER_RECTYPE_CLEAN_START;
1826 cursor->key_beg.key = HAMMER_MIN_KEY;
1828 cursor->key_end = cursor->key_beg;
1829 cursor->key_end.rec_type = HAMMER_RECTYPE_MAX;
1830 cursor->key_end.key = HAMMER_MAX_KEY;
1832 cursor->asof = ip->obj_asof;
1833 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1834 cursor->flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1835 cursor->flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
1836 cursor->flags |= HAMMER_CURSOR_BACKEND;
1838 error = hammer_ip_first(cursor);
1841 * Iterate through matching records and mark them as deleted.
1843 while (error == 0) {
1844 leaf = cursor->leaf;
1846 KKASSERT(leaf->base.delete_tid == 0);
1849 * Mark the record and B-Tree entry as deleted. This will
1850 * also physically delete the B-Tree entry, record, and
1851 * data if the retention policy dictates. The function
1852 * will set HAMMER_CURSOR_DELBTREE which hammer_ip_next()
1853 * uses to perform a fixup.
1855 * Directory entries (and delete-on-disk directory entries)
1856 * must be synced and cannot be deleted.
1858 error = hammer_ip_delete_record(cursor, ip, trans->tid);
1862 error = hammer_ip_next(cursor);
1865 hammer_cache_node(&ip->cache[1], cursor->node);
1866 if (error == EDEADLK) {
1867 hammer_done_cursor(cursor);
1868 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
1872 if (error == ENOENT)
1878 * Delete the record at the current cursor. On success the cursor will
1879 * be positioned appropriately for an iteration but may no longer be at
1882 * This routine is only called from the backend.
1884 * NOTE: This can return EDEADLK, requiring the caller to terminate the
1888 hammer_ip_delete_record(hammer_cursor_t cursor, hammer_inode_t ip,
1891 hammer_off_t zone2_offset;
1892 hammer_record_t iprec;
1893 hammer_btree_elm_t elm;
1897 KKASSERT(cursor->flags & HAMMER_CURSOR_BACKEND);
1899 hmp = cursor->node->hmp;
1902 * In-memory (unsynchronized) records can simply be freed. This
1903 * only occurs in range iterations since all other records are
1904 * individually synchronized. Thus there should be no confusion with
1907 * An in-memory record may be deleted before being committed to disk,
1908 * but could have been accessed in the mean time. The backing store
1909 * may never been marked allocated and so hammer_blockmap_free() may
1910 * never get called on it. Because of this we have to make sure that
1911 * we've gotten rid of any related hammer_buffer or buffer cache
1914 if (hammer_cursor_inmem(cursor)) {
1915 iprec = cursor->iprec;
1916 KKASSERT((iprec->flags & HAMMER_RECF_INTERLOCK_BE) ==0);
1917 iprec->flags |= HAMMER_RECF_DELETED_FE;
1918 iprec->flags |= HAMMER_RECF_DELETED_BE;
1920 if (iprec->leaf.data_offset && iprec->leaf.data_len) {
1921 zone2_offset = hammer_blockmap_lookup(hmp, iprec->leaf.data_offset, &error);
1922 KKASSERT(error == 0);
1923 hammer_del_buffers(hmp,
1924 iprec->leaf.data_offset,
1926 iprec->leaf.data_len);
1932 * On-disk records are marked as deleted by updating their delete_tid.
1933 * This does not effect their position in the B-Tree (which is based
1934 * on their create_tid).
1936 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1940 error = hammer_delete_at_cursor(
1942 HAMMER_DELETE_ADJUST | hammer_nohistory(ip),
1949 * Delete the B-Tree element at the current cursor and do any necessary
1950 * mirror propagation.
1952 * The cursor must be properly positioned for an iteration on return but
1953 * may be pointing at an internal element.
1956 hammer_delete_at_cursor(hammer_cursor_t cursor, int delete_flags,
1957 int64_t *stat_bytes)
1959 struct hammer_btree_leaf_elm save_leaf;
1960 hammer_btree_leaf_elm_t leaf;
1962 hammer_btree_elm_t elm;
1963 hammer_off_t data_offset;
1969 error = hammer_cursor_upgrade(cursor);
1973 node = cursor->node;
1974 elm = &node->ondisk->elms[cursor->index];
1976 KKASSERT(elm->base.btype == HAMMER_BTREE_TYPE_RECORD);
1979 * Adjust the delete_tid. Update the mirror_tid propagation field
1983 if (delete_flags & HAMMER_DELETE_ADJUST) {
1984 hammer_modify_node(cursor->trans, node, elm, sizeof(*elm));
1985 elm->leaf.base.delete_tid = cursor->trans->tid;
1986 elm->leaf.delete_ts = cursor->trans->time32;
1987 hammer_modify_node_done(node);
1989 if (elm->leaf.base.delete_tid > node->ondisk->mirror_tid) {
1990 hammer_modify_node_field(cursor->trans, node, mirror_tid);
1991 node->ondisk->mirror_tid = elm->leaf.base.delete_tid;
1992 hammer_modify_node_done(node);
1997 * Adjust for the iteration. We have deleted the current
1998 * element and want to clear ATEDISK so the iteration does
1999 * not skip the element after, which now becomes the current
2002 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
2003 cursor->flags |= HAMMER_CURSOR_DELBTREE;
2004 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
2008 * An on-disk record cannot have the same delete_tid
2009 * as its create_tid. In a chain of record updates
2010 * this could result in a duplicate record.
2012 KKASSERT(elm->leaf.base.delete_tid !=
2013 elm->leaf.base.create_tid);
2017 * Destroy the B-Tree element if asked (typically if a nohistory
2018 * file or mount, or when called by the pruning code).
2020 * Adjust the ATEDISK flag to properly support iterations.
2022 if (delete_flags & HAMMER_DELETE_DESTROY) {
2023 data_offset = elm->leaf.data_offset;
2024 data_len = elm->leaf.data_len;
2025 rec_type = elm->leaf.base.rec_type;
2027 save_leaf = elm->leaf;
2031 error = hammer_btree_delete(cursor);
2034 * This forces a fixup for the iteration because
2035 * the cursor is now either sitting at the 'next'
2036 * element or sitting at the end of a leaf.
2038 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
2039 cursor->flags |= HAMMER_CURSOR_DELBTREE;
2040 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
2044 switch(data_offset & HAMMER_OFF_ZONE_MASK) {
2045 case HAMMER_ZONE_LARGE_DATA:
2046 case HAMMER_ZONE_SMALL_DATA:
2047 case HAMMER_ZONE_META:
2048 hammer_blockmap_free(cursor->trans,
2049 data_offset, data_len);
2058 * mirror_tid propagation occurs if the node's mirror_tid had to be
2059 * updated while adjusting the delete_tid.
2061 * This occurs when deleting even in nohistory mode, but does not
2062 * occur when pruning an already-deleted node.
2065 KKASSERT(cursor->ip != NULL);
2066 hammer_btree_do_propagation(cursor, cursor->ip, leaf);
2072 * Determine whether we can remove a directory. This routine checks whether
2073 * a directory is empty or not and enforces flush connectivity.
2075 * Flush connectivity requires that we block if the target directory is
2076 * currently flushing, otherwise it may not end up in the same flush group.
2078 * Returns 0 on success, ENOTEMPTY or EDEADLK (or other errors) on failure.
2081 hammer_ip_check_directory_empty(hammer_transaction_t trans, hammer_inode_t ip)
2083 struct hammer_cursor cursor;
2087 * Check directory empty
2089 hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
2091 cursor.key_beg.localization = ip->obj_localization +
2092 HAMMER_LOCALIZE_MISC;
2093 cursor.key_beg.obj_id = ip->obj_id;
2094 cursor.key_beg.create_tid = 0;
2095 cursor.key_beg.delete_tid = 0;
2096 cursor.key_beg.obj_type = 0;
2097 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE + 1;
2098 cursor.key_beg.key = HAMMER_MIN_KEY;
2100 cursor.key_end = cursor.key_beg;
2101 cursor.key_end.rec_type = 0xFFFF;
2102 cursor.key_end.key = HAMMER_MAX_KEY;
2104 cursor.asof = ip->obj_asof;
2105 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
2107 error = hammer_ip_first(&cursor);
2108 if (error == ENOENT)
2110 else if (error == 0)
2112 hammer_done_cursor(&cursor);