2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_object.c,v 1.80 2008/07/01 02:08:58 dillon Exp $
39 static int hammer_mem_add(hammer_record_t record);
40 static int hammer_mem_lookup(hammer_cursor_t cursor);
41 static int hammer_mem_first(hammer_cursor_t cursor);
42 static int hammer_frontend_trunc_callback(hammer_record_t record,
44 static int hammer_record_needs_overwrite_delete(hammer_record_t record);
46 struct rec_trunc_info {
52 * Red-black tree support. Comparison code for insertion.
55 hammer_rec_rb_compare(hammer_record_t rec1, hammer_record_t rec2)
57 if (rec1->leaf.base.rec_type < rec2->leaf.base.rec_type)
59 if (rec1->leaf.base.rec_type > rec2->leaf.base.rec_type)
62 if (rec1->leaf.base.key < rec2->leaf.base.key)
64 if (rec1->leaf.base.key > rec2->leaf.base.key)
68 * Never match against an item deleted by the front-end.
70 * rec1 is greater then rec2 if rec1 is marked deleted.
71 * rec1 is less then rec2 if rec2 is marked deleted.
73 * Multiple deleted records may be present, do not return 0
74 * if both are marked deleted.
76 if (rec1->flags & HAMMER_RECF_DELETED_FE)
78 if (rec2->flags & HAMMER_RECF_DELETED_FE)
85 * Basic record comparison code similar to hammer_btree_cmp().
88 hammer_rec_cmp(hammer_base_elm_t elm, hammer_record_t rec)
90 if (elm->rec_type < rec->leaf.base.rec_type)
92 if (elm->rec_type > rec->leaf.base.rec_type)
95 if (elm->key < rec->leaf.base.key)
97 if (elm->key > rec->leaf.base.key)
101 * Never match against an item deleted by the front-end.
102 * elm is less then rec if rec is marked deleted.
104 if (rec->flags & HAMMER_RECF_DELETED_FE)
110 * Special LOOKUP_INFO to locate an overlapping record. This used by
111 * the reservation code to implement small-block records (whos keys will
112 * be different depending on data_len, when representing the same base
115 * NOTE: The base file offset of a data record is (key - data_len), not (key).
118 hammer_rec_overlap_compare(hammer_btree_leaf_elm_t leaf, hammer_record_t rec)
120 if (leaf->base.rec_type < rec->leaf.base.rec_type)
122 if (leaf->base.rec_type > rec->leaf.base.rec_type)
128 if (leaf->base.rec_type == HAMMER_RECTYPE_DATA) {
129 /* leaf_end <= rec_beg */
130 if (leaf->base.key <= rec->leaf.base.key - rec->leaf.data_len)
132 /* leaf_beg >= rec_end */
133 if (leaf->base.key - leaf->data_len >= rec->leaf.base.key)
136 if (leaf->base.key < rec->leaf.base.key)
138 if (leaf->base.key > rec->leaf.base.key)
143 * Never match against an item deleted by the front-end.
144 * leaf is less then rec if rec is marked deleted.
146 * We must still return the proper code for the scan to continue
147 * along the correct branches.
149 if (rec->flags & HAMMER_RECF_DELETED_FE) {
150 if (leaf->base.key < rec->leaf.base.key)
152 if (leaf->base.key > rec->leaf.base.key)
160 * RB_SCAN comparison code for hammer_mem_first(). The argument order
161 * is reversed so the comparison result has to be negated. key_beg and
162 * key_end are both range-inclusive.
164 * Localized deletions are not cached in-memory.
168 hammer_rec_scan_cmp(hammer_record_t rec, void *data)
170 hammer_cursor_t cursor = data;
173 r = hammer_rec_cmp(&cursor->key_beg, rec);
176 r = hammer_rec_cmp(&cursor->key_end, rec);
183 * This compare function is used when simply looking up key_beg.
187 hammer_rec_find_cmp(hammer_record_t rec, void *data)
189 hammer_cursor_t cursor = data;
192 r = hammer_rec_cmp(&cursor->key_beg, rec);
201 * Locate blocks within the truncation range. Partial blocks do not count.
205 hammer_rec_trunc_cmp(hammer_record_t rec, void *data)
207 struct rec_trunc_info *info = data;
209 if (rec->leaf.base.rec_type < info->rec_type)
211 if (rec->leaf.base.rec_type > info->rec_type)
214 switch(rec->leaf.base.rec_type) {
215 case HAMMER_RECTYPE_DB:
217 * DB record key is not beyond the truncation point, retain.
219 if (rec->leaf.base.key < info->trunc_off)
222 case HAMMER_RECTYPE_DATA:
224 * DATA record offset start is not beyond the truncation point,
227 if (rec->leaf.base.key - rec->leaf.data_len < info->trunc_off)
231 panic("hammer_rec_trunc_cmp: unexpected record type");
235 * The record start is >= the truncation point, return match,
236 * the record should be destroyed.
241 RB_GENERATE(hammer_rec_rb_tree, hammer_record, rb_node, hammer_rec_rb_compare);
242 RB_GENERATE_XLOOKUP(hammer_rec_rb_tree, INFO, hammer_record, rb_node,
243 hammer_rec_overlap_compare, hammer_btree_leaf_elm_t);
246 * Allocate a record for the caller to finish filling in. The record is
247 * returned referenced.
250 hammer_alloc_mem_record(hammer_inode_t ip, int data_len)
252 hammer_record_t record;
254 ++hammer_count_records;
255 record = kmalloc(sizeof(*record), M_HAMMER,
256 M_WAITOK | M_ZERO | M_USE_RESERVE);
257 record->flush_state = HAMMER_FST_IDLE;
259 record->leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
260 record->leaf.data_len = data_len;
261 hammer_ref(&record->lock);
264 record->data = kmalloc(data_len, M_HAMMER, M_WAITOK | M_ZERO);
265 record->flags |= HAMMER_RECF_ALLOCDATA;
266 ++hammer_count_record_datas;
273 hammer_wait_mem_record_ident(hammer_record_t record, const char *ident)
275 while (record->flush_state == HAMMER_FST_FLUSH) {
276 record->flags |= HAMMER_RECF_WANTED;
277 tsleep(record, 0, ident, 0);
282 * Called from the backend, hammer_inode.c, after a record has been
283 * flushed to disk. The record has been exclusively locked by the
284 * caller and interlocked with BE.
286 * We clean up the state, unlock, and release the record (the record
287 * was referenced by the fact that it was in the HAMMER_FST_FLUSH state).
290 hammer_flush_record_done(hammer_record_t record, int error)
292 hammer_inode_t target_ip;
294 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
295 KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
299 * An error occured, the backend was unable to sync the
300 * record to its media. Leave the record intact.
302 Debugger("flush_record_done error");
305 if (record->flags & HAMMER_RECF_DELETED_BE) {
306 if ((target_ip = record->target_ip) != NULL) {
307 TAILQ_REMOVE(&target_ip->target_list, record,
309 record->target_ip = NULL;
310 hammer_test_inode(target_ip);
312 record->flush_state = HAMMER_FST_IDLE;
314 if (record->target_ip) {
315 record->flush_state = HAMMER_FST_SETUP;
316 hammer_test_inode(record->ip);
317 hammer_test_inode(record->target_ip);
319 record->flush_state = HAMMER_FST_IDLE;
322 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
323 if (record->flags & HAMMER_RECF_WANTED) {
324 record->flags &= ~HAMMER_RECF_WANTED;
327 hammer_rel_mem_record(record);
331 * Release a memory record. Records marked for deletion are immediately
332 * removed from the RB-Tree but otherwise left intact until the last ref
336 hammer_rel_mem_record(struct hammer_record *record)
338 hammer_inode_t ip, target_ip;
340 hammer_unref(&record->lock);
342 if (record->lock.refs == 0) {
344 * Upon release of the last reference wakeup any waiters.
345 * The record structure may get destroyed so callers will
346 * loop up and do a relookup.
348 * WARNING! Record must be removed from RB-TREE before we
349 * might possibly block. hammer_test_inode() can block!
354 * Upon release of the last reference a record marked deleted
357 if (record->flags & HAMMER_RECF_DELETED_FE) {
358 KKASSERT(ip->lock.refs > 0);
359 KKASSERT(record->flush_state != HAMMER_FST_FLUSH);
362 * target_ip may have zero refs, we have to ref it
363 * to prevent it from being ripped out from under
366 if ((target_ip = record->target_ip) != NULL) {
367 TAILQ_REMOVE(&target_ip->target_list,
368 record, target_entry);
369 record->target_ip = NULL;
370 hammer_ref(&target_ip->lock);
373 if (record->flags & HAMMER_RECF_ONRBTREE) {
374 RB_REMOVE(hammer_rec_rb_tree,
375 &record->ip->rec_tree,
377 KKASSERT(ip->rsv_recs > 0);
380 ip->hmp->rsv_databytes -= record->leaf.data_len;
381 record->flags &= ~HAMMER_RECF_ONRBTREE;
383 if (RB_EMPTY(&record->ip->rec_tree)) {
384 record->ip->flags &= ~HAMMER_INODE_XDIRTY;
385 record->ip->sync_flags &= ~HAMMER_INODE_XDIRTY;
386 hammer_test_inode(record->ip);
391 * Do this test after removing record from the B-Tree.
394 hammer_test_inode(target_ip);
395 hammer_rel_inode(target_ip, 0);
398 if (record->flags & HAMMER_RECF_ALLOCDATA) {
399 --hammer_count_record_datas;
400 kfree(record->data, M_HAMMER);
401 record->flags &= ~HAMMER_RECF_ALLOCDATA;
404 hammer_blockmap_reserve_complete(ip->hmp,
409 --hammer_count_records;
410 kfree(record, M_HAMMER);
416 * Record visibility depends on whether the record is being accessed by
417 * the backend or the frontend.
419 * Return non-zero if the record is visible, zero if it isn't or if it is
424 hammer_ip_iterate_mem_good(hammer_cursor_t cursor, hammer_record_t record)
426 if (cursor->flags & HAMMER_CURSOR_BACKEND) {
427 if (record->flags & HAMMER_RECF_DELETED_BE)
430 if (record->flags & HAMMER_RECF_DELETED_FE)
437 * This callback is used as part of the RB_SCAN function for in-memory
438 * records. We terminate it (return -1) as soon as we get a match.
440 * This routine is used by frontend code.
442 * The primary compare code does not account for ASOF lookups. This
443 * code handles that case as well as a few others.
447 hammer_rec_scan_callback(hammer_record_t rec, void *data)
449 hammer_cursor_t cursor = data;
452 * We terminate on success, so this should be NULL on entry.
454 KKASSERT(cursor->iprec == NULL);
457 * Skip if the record was marked deleted.
459 if (hammer_ip_iterate_mem_good(cursor, rec) == 0)
463 * Skip if not visible due to our as-of TID
465 if (cursor->flags & HAMMER_CURSOR_ASOF) {
466 if (cursor->asof < rec->leaf.base.create_tid)
468 if (rec->leaf.base.delete_tid &&
469 cursor->asof >= rec->leaf.base.delete_tid) {
475 * If the record is queued to the flusher we have to block until
476 * it isn't. Otherwise we may see duplication between our memory
477 * cache and the media.
479 hammer_ref(&rec->lock);
481 #warning "This deadlocks"
483 if (rec->flush_state == HAMMER_FST_FLUSH)
484 hammer_wait_mem_record(rec);
488 * The record may have been deleted while we were blocked.
490 if (hammer_ip_iterate_mem_good(cursor, rec) == 0) {
491 hammer_rel_mem_record(rec);
496 * Set the matching record and stop the scan.
504 * Lookup an in-memory record given the key specified in the cursor. Works
505 * just like hammer_btree_lookup() but operates on an inode's in-memory
508 * The lookup must fail if the record is marked for deferred deletion.
512 hammer_mem_lookup(hammer_cursor_t cursor)
516 KKASSERT(cursor->ip);
518 hammer_rel_mem_record(cursor->iprec);
519 cursor->iprec = NULL;
521 hammer_rec_rb_tree_RB_SCAN(&cursor->ip->rec_tree, hammer_rec_find_cmp,
522 hammer_rec_scan_callback, cursor);
524 if (cursor->iprec == NULL)
532 * hammer_mem_first() - locate the first in-memory record matching the
533 * cursor within the bounds of the key range.
537 hammer_mem_first(hammer_cursor_t cursor)
542 KKASSERT(ip != NULL);
545 hammer_rel_mem_record(cursor->iprec);
546 cursor->iprec = NULL;
549 hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_scan_cmp,
550 hammer_rec_scan_callback, cursor);
553 * Adjust scan.node and keep it linked into the RB-tree so we can
554 * hold the cursor through third party modifications of the RB-tree.
562 hammer_mem_done(hammer_cursor_t cursor)
565 hammer_rel_mem_record(cursor->iprec);
566 cursor->iprec = NULL;
570 /************************************************************************
571 * HAMMER IN-MEMORY RECORD FUNCTIONS *
572 ************************************************************************
574 * These functions manipulate in-memory records. Such records typically
575 * exist prior to being committed to disk or indexed via the on-disk B-Tree.
579 * Add a directory entry (dip,ncp) which references inode (ip).
581 * Note that the low 32 bits of the namekey are set temporarily to create
582 * a unique in-memory record, and may be modified a second time when the
583 * record is synchronized to disk. In particular, the low 32 bits cannot be
584 * all 0's when synching to disk, which is not handled here.
586 * NOTE: bytes does not include any terminating \0 on name, and name might
590 hammer_ip_add_directory(struct hammer_transaction *trans,
591 struct hammer_inode *dip, const char *name, int bytes,
592 struct hammer_inode *ip)
594 struct hammer_cursor cursor;
595 hammer_record_t record;
600 record = hammer_alloc_mem_record(dip, HAMMER_ENTRY_SIZE(bytes));
601 if (++trans->hmp->namekey_iterator == 0)
602 ++trans->hmp->namekey_iterator;
604 record->type = HAMMER_MEM_RECORD_ADD;
605 record->leaf.base.localization = dip->obj_localization +
606 HAMMER_LOCALIZE_MISC;
607 record->leaf.base.obj_id = dip->obj_id;
608 record->leaf.base.key = hammer_directory_namekey(name, bytes);
609 record->leaf.base.key += trans->hmp->namekey_iterator;
610 record->leaf.base.rec_type = HAMMER_RECTYPE_DIRENTRY;
611 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
612 record->data->entry.obj_id = ip->obj_id;
613 record->data->entry.localization = ip->obj_localization;
614 bcopy(name, record->data->entry.name, bytes);
616 ++ip->ino_data.nlinks;
617 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
620 * Find an unused namekey. Both the in-memory record tree and
621 * the B-Tree are checked. Exact matches also match create_tid
622 * so use an ASOF search to (mostly) ignore it.
624 * delete-visibility is set so pending deletions do not give us
625 * a false-negative on our ability to use an iterator.
627 hammer_init_cursor(trans, &cursor, &dip->cache[1], dip);
628 cursor.key_beg = record->leaf.base;
629 cursor.flags |= HAMMER_CURSOR_ASOF;
630 cursor.flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
631 cursor.asof = ip->obj_asof;
634 while (hammer_ip_lookup(&cursor) == 0) {
635 iterator = (u_int32_t)record->leaf.base.key + 1;
638 record->leaf.base.key &= ~0xFFFFFFFFLL;
639 record->leaf.base.key |= iterator;
640 cursor.key_beg.key = record->leaf.base.key;
641 if (++count == 1000000000) {
642 hammer_rel_mem_record(record);
649 * The target inode and the directory entry are bound together.
651 record->target_ip = ip;
652 record->flush_state = HAMMER_FST_SETUP;
653 TAILQ_INSERT_TAIL(&ip->target_list, record, target_entry);
656 * The inode now has a dependancy and must be taken out of the idle
657 * state. An inode not in an idle state is given an extra reference.
659 if (ip->flush_state == HAMMER_FST_IDLE) {
660 hammer_ref(&ip->lock);
661 ip->flush_state = HAMMER_FST_SETUP;
663 error = hammer_mem_add(record);
665 hammer_done_cursor(&cursor);
670 * Delete the directory entry and update the inode link count. The
671 * cursor must be seeked to the directory entry record being deleted.
673 * The related inode should be share-locked by the caller. The caller is
676 * This function can return EDEADLK requiring the caller to terminate
677 * the cursor, any locks, wait on the returned record, and retry.
680 hammer_ip_del_directory(struct hammer_transaction *trans,
681 hammer_cursor_t cursor, struct hammer_inode *dip,
682 struct hammer_inode *ip)
684 hammer_record_t record;
687 if (hammer_cursor_inmem(cursor)) {
689 * In-memory (unsynchronized) records can simply be freed.
690 * Even though the HAMMER_RECF_DELETED_FE flag is ignored
691 * by the backend, we must still avoid races against the
692 * backend potentially syncing the record to the media.
694 * We cannot call hammer_ip_delete_record(), that routine may
695 * only be called from the backend.
697 record = cursor->iprec;
698 if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
699 KKASSERT(cursor->deadlk_rec == NULL);
700 hammer_ref(&record->lock);
701 cursor->deadlk_rec = record;
704 KKASSERT(record->type == HAMMER_MEM_RECORD_ADD);
705 record->flags |= HAMMER_RECF_DELETED_FE;
710 * If the record is on-disk we have to queue the deletion by
711 * the record's key. This also causes lookups to skip the
714 KKASSERT(dip->flags &
715 (HAMMER_INODE_ONDISK | HAMMER_INODE_DONDISK));
716 record = hammer_alloc_mem_record(dip, 0);
717 record->type = HAMMER_MEM_RECORD_DEL;
718 record->leaf.base = cursor->leaf->base;
720 record->target_ip = ip;
721 record->flush_state = HAMMER_FST_SETUP;
722 TAILQ_INSERT_TAIL(&ip->target_list, record, target_entry);
725 * The inode now has a dependancy and must be taken out of
726 * the idle state. An inode not in an idle state is given
727 * an extra reference.
729 if (ip->flush_state == HAMMER_FST_IDLE) {
730 hammer_ref(&ip->lock);
731 ip->flush_state = HAMMER_FST_SETUP;
734 error = hammer_mem_add(record);
738 * One less link. The file may still be open in the OS even after
739 * all links have gone away.
741 * We have to terminate the cursor before syncing the inode to
742 * avoid deadlocking against ourselves. XXX this may no longer
745 * If nlinks drops to zero and the vnode is inactive (or there is
746 * no vnode), call hammer_inode_unloadable_check() to zonk the
747 * inode. If we don't do this here the inode will not be destroyed
748 * on-media until we unmount.
751 --ip->ino_data.nlinks;
752 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
753 if (ip->ino_data.nlinks == 0 &&
754 (ip->vp == NULL || (ip->vp->v_flag & VINACTIVE))) {
755 hammer_done_cursor(cursor);
756 hammer_inode_unloadable_check(ip, 1);
757 hammer_flush_inode(ip, 0);
765 * Add a record to an inode.
767 * The caller must allocate the record with hammer_alloc_mem_record(ip) and
768 * initialize the following additional fields:
770 * The related inode should be share-locked by the caller. The caller is
773 * record->rec.entry.base.base.key
774 * record->rec.entry.base.base.rec_type
775 * record->rec.entry.base.base.data_len
776 * record->data (a copy will be kmalloc'd if it cannot be embedded)
779 hammer_ip_add_record(struct hammer_transaction *trans, hammer_record_t record)
781 hammer_inode_t ip = record->ip;
784 KKASSERT(record->leaf.base.localization != 0);
785 record->leaf.base.obj_id = ip->obj_id;
786 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
787 error = hammer_mem_add(record);
792 * Locate a bulk record in-memory. Bulk records allow disk space to be
793 * reserved so the front-end can flush large data writes without having
794 * to queue the BIO to the flusher. Only the related record gets queued
797 static hammer_record_t
798 hammer_ip_get_bulk(hammer_inode_t ip, off_t file_offset, int bytes)
800 hammer_record_t record;
801 struct hammer_btree_leaf_elm leaf;
803 bzero(&leaf, sizeof(leaf));
804 leaf.base.obj_id = ip->obj_id;
805 leaf.base.key = file_offset + bytes;
806 leaf.base.create_tid = 0;
807 leaf.base.delete_tid = 0;
808 leaf.base.rec_type = HAMMER_RECTYPE_DATA;
809 leaf.base.obj_type = 0; /* unused */
810 leaf.base.btype = HAMMER_BTREE_TYPE_RECORD; /* unused */
811 leaf.base.localization = ip->obj_localization + HAMMER_LOCALIZE_MISC;
812 leaf.data_len = bytes;
814 record = hammer_rec_rb_tree_RB_LOOKUP_INFO(&ip->rec_tree, &leaf);
816 hammer_ref(&record->lock);
821 * Reserve blockmap space placemarked with an in-memory record.
823 * This routine is called by the frontend in order to be able to directly
824 * flush a buffer cache buffer. The frontend has locked the related buffer
825 * cache buffers and we should be able to manipulate any overlapping
829 hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset, void *data, int bytes,
832 hammer_record_t record;
833 hammer_record_t conflict;
838 * Deal with conflicting in-memory records. We cannot have multiple
839 * in-memory records for the same offset without seriously confusing
840 * the backend, including but not limited to the backend issuing
841 * delete-create-delete sequences and asserting on the delete_tid
842 * being the same as the create_tid.
844 * If we encounter a record with the backend interlock set we cannot
845 * immediately delete it without confusing the backend.
847 while ((conflict = hammer_ip_get_bulk(ip, file_offset, bytes)) !=NULL) {
848 if (conflict->flags & HAMMER_RECF_INTERLOCK_BE) {
849 conflict->flags |= HAMMER_RECF_WANTED;
850 tsleep(conflict, 0, "hmrrc3", 0);
852 conflict->flags |= HAMMER_RECF_DELETED_FE;
854 hammer_rel_mem_record(conflict);
858 * Create a record to cover the direct write. This is called with
859 * the related BIO locked so there should be no possible conflict.
861 * The backend is responsible for finalizing the space reserved in
864 * XXX bytes not aligned, depend on the reservation code to
865 * align the reservation.
867 record = hammer_alloc_mem_record(ip, 0);
868 zone = (bytes >= HAMMER_BUFSIZE) ? HAMMER_ZONE_LARGE_DATA_INDEX :
869 HAMMER_ZONE_SMALL_DATA_INDEX;
870 record->resv = hammer_blockmap_reserve(ip->hmp, zone, bytes,
871 &record->leaf.data_offset,
873 if (record->resv == NULL) {
874 kprintf("hammer_ip_add_bulk: reservation failed\n");
875 hammer_rel_mem_record(record);
878 record->type = HAMMER_MEM_RECORD_DATA;
879 record->leaf.base.rec_type = HAMMER_RECTYPE_DATA;
880 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
881 record->leaf.base.obj_id = ip->obj_id;
882 record->leaf.base.key = file_offset + bytes;
883 record->leaf.base.localization = ip->obj_localization +
884 HAMMER_LOCALIZE_MISC;
885 record->leaf.data_len = bytes;
886 hammer_crc_set_leaf(data, &record->leaf);
887 flags = record->flags;
889 hammer_ref(&record->lock); /* mem_add eats a reference */
890 *errorp = hammer_mem_add(record);
892 conflict = hammer_ip_get_bulk(ip, file_offset, bytes);
893 kprintf("hammer_ip_add_bulk: error %d conflict %p file_offset %lld bytes %d\n",
894 *errorp, conflict, file_offset, bytes);
896 kprintf("conflict %lld %d\n", conflict->leaf.base.key, conflict->leaf.data_len);
898 hammer_rel_mem_record(conflict);
900 KKASSERT(*errorp == 0);
901 conflict = hammer_ip_get_bulk(ip, file_offset, bytes);
902 if (conflict != record) {
903 kprintf("conflict mismatch %p %p %08x\n", conflict, record, record->flags);
905 kprintf("conflict mismatch %lld/%d %lld/%d\n", conflict->leaf.base.key, conflict->leaf.data_len, record->leaf.base.key, record->leaf.data_len);
907 KKASSERT(conflict == record);
908 hammer_rel_mem_record(conflict);
914 * Frontend truncation code. Scan in-memory records only. On-disk records
915 * and records in a flushing state are handled by the backend. The vnops
916 * setattr code will handle the block containing the truncation point.
918 * Partial blocks are not deleted.
921 hammer_ip_frontend_trunc(struct hammer_inode *ip, off_t file_size)
923 struct rec_trunc_info info;
925 switch(ip->ino_data.obj_type) {
926 case HAMMER_OBJTYPE_REGFILE:
927 info.rec_type = HAMMER_RECTYPE_DATA;
929 case HAMMER_OBJTYPE_DBFILE:
930 info.rec_type = HAMMER_RECTYPE_DB;
935 info.trunc_off = file_size;
936 hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_trunc_cmp,
937 hammer_frontend_trunc_callback, &info);
942 hammer_frontend_trunc_callback(hammer_record_t record, void *data __unused)
944 if (record->flags & HAMMER_RECF_DELETED_FE)
946 if (record->flush_state == HAMMER_FST_FLUSH)
948 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
949 hammer_ref(&record->lock);
950 record->flags |= HAMMER_RECF_DELETED_FE;
951 hammer_rel_mem_record(record);
956 * Return 1 if the caller must check for and delete existing records
957 * before writing out a new data record.
959 * Return 0 if the caller can just insert the record into the B-Tree without
963 hammer_record_needs_overwrite_delete(hammer_record_t record)
965 hammer_inode_t ip = record->ip;
969 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE)
970 file_offset = record->leaf.base.key;
972 file_offset = record->leaf.base.key - record->leaf.data_len;
973 r = (file_offset < ip->save_trunc_off);
974 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
975 if (ip->save_trunc_off <= record->leaf.base.key)
976 ip->save_trunc_off = record->leaf.base.key + 1;
978 if (ip->save_trunc_off < record->leaf.base.key)
979 ip->save_trunc_off = record->leaf.base.key;
985 * Backend code. Sync a record to the media.
988 hammer_ip_sync_record_cursor(hammer_cursor_t cursor, hammer_record_t record)
990 hammer_transaction_t trans = cursor->trans;
996 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
997 KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
998 KKASSERT(record->leaf.base.localization != 0);
1001 * If this is a bulk-data record placemarker there may be an existing
1002 * record on-disk, indicating a data overwrite. If there is the
1003 * on-disk record must be deleted before we can insert our new record.
1005 * We've synthesized this record and do not know what the create_tid
1006 * on-disk is, nor how much data it represents.
1008 * Keep in mind that (key) for data records is (base_offset + len),
1009 * not (base_offset). Also, we only want to get rid of on-disk
1010 * records since we are trying to sync our in-memory record, call
1011 * hammer_ip_delete_range() with truncating set to 1 to make sure
1012 * it skips in-memory records.
1014 * It is ok for the lookup to return ENOENT.
1016 * NOTE OPTIMIZATION: sync_trunc_off is used to determine if we have
1017 * to call hammer_ip_delete_range() or not. This also means we must
1018 * update sync_trunc_off() as we write.
1020 if (record->type == HAMMER_MEM_RECORD_DATA &&
1021 hammer_record_needs_overwrite_delete(record)) {
1022 file_offset = record->leaf.base.key - record->leaf.data_len;
1023 bytes = (record->leaf.data_len + HAMMER_BUFMASK) &
1025 KKASSERT((file_offset & HAMMER_BUFMASK) == 0);
1026 error = hammer_ip_delete_range(
1028 file_offset, file_offset + bytes - 1,
1030 if (error && error != ENOENT)
1037 hammer_normalize_cursor(cursor);
1038 cursor->key_beg = record->leaf.base;
1039 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1040 cursor->flags |= HAMMER_CURSOR_BACKEND;
1041 cursor->flags &= ~HAMMER_CURSOR_INSERT;
1044 * Records can wind up on-media before the inode itself is on-media.
1047 record->ip->flags |= HAMMER_INODE_DONDISK;
1050 * If we are deleting a directory entry an exact match must be
1053 if (record->type == HAMMER_MEM_RECORD_DEL) {
1054 error = hammer_btree_lookup(cursor);
1056 error = hammer_ip_delete_record(cursor, record->ip,
1059 record->flags |= HAMMER_RECF_DELETED_FE;
1060 record->flags |= HAMMER_RECF_DELETED_BE;
1069 * Issue a lookup to position the cursor and locate the cluster. The
1070 * target key should not exist. If we are creating a directory entry
1071 * we may have to iterate the low 32 bits of the key to find an unused
1074 cursor->flags |= HAMMER_CURSOR_INSERT;
1076 error = hammer_btree_lookup(cursor);
1077 if (hammer_debug_inode)
1078 kprintf("DOINSERT LOOKUP %d\n", error);
1080 kprintf("hammer_ip_sync_record: duplicate rec "
1081 "at (%016llx)\n", record->leaf.base.key);
1082 Debugger("duplicate record1");
1086 if (record->type == HAMMER_MEM_RECORD_DATA)
1087 kprintf("sync_record %016llx ---------------- %016llx %d\n",
1088 record->leaf.base.key - record->leaf.data_len,
1089 record->leaf.data_offset, error);
1092 if (error != ENOENT)
1096 * Allocate the record and data. The result buffers will be
1097 * marked as being modified and further calls to
1098 * hammer_modify_buffer() will result in unneeded UNDO records.
1100 * Support zero-fill records (data == NULL and data_len != 0)
1102 if (record->type == HAMMER_MEM_RECORD_DATA) {
1104 * The data portion of a bulk-data record has already been
1105 * committed to disk, we need only adjust the layer2
1106 * statistics in the same transaction as our B-Tree insert.
1108 KKASSERT(record->leaf.data_offset != 0);
1109 hammer_blockmap_finalize(trans, record->leaf.data_offset,
1110 record->leaf.data_len);
1112 } else if (record->data && record->leaf.data_len) {
1114 * Wholely cached record, with data. Allocate the data.
1116 bdata = hammer_alloc_data(trans, record->leaf.data_len,
1117 record->leaf.base.rec_type,
1118 &record->leaf.data_offset,
1119 &cursor->data_buffer, &error);
1122 hammer_crc_set_leaf(record->data, &record->leaf);
1123 hammer_modify_buffer(trans, cursor->data_buffer, NULL, 0);
1124 bcopy(record->data, bdata, record->leaf.data_len);
1125 hammer_modify_buffer_done(cursor->data_buffer);
1128 * Wholely cached record, without data.
1130 record->leaf.data_offset = 0;
1131 record->leaf.data_crc = 0;
1134 error = hammer_btree_insert(cursor, &record->leaf);
1135 if (hammer_debug_inode && error)
1136 kprintf("BTREE INSERT error %d @ %016llx:%d key %016llx\n", error, cursor->node->node_offset, cursor->index, record->leaf.base.key);
1139 * Our record is on-disk, normally mark the in-memory version as
1140 * deleted. If the record represented a directory deletion but
1141 * we had to sync a valid directory entry to disk we must convert
1142 * the record to a covering delete so the frontend does not have
1143 * visibility on the synced entry.
1146 if (record->flags & HAMMER_RECF_CONVERT_DELETE) {
1147 KKASSERT(record->type == HAMMER_MEM_RECORD_ADD);
1148 record->flags &= ~HAMMER_RECF_DELETED_FE;
1149 record->type = HAMMER_MEM_RECORD_DEL;
1150 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
1151 record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
1152 /* hammer_flush_record_done takes care of the rest */
1154 record->flags |= HAMMER_RECF_DELETED_FE;
1155 record->flags |= HAMMER_RECF_DELETED_BE;
1158 if (record->leaf.data_offset) {
1159 hammer_blockmap_free(trans, record->leaf.data_offset,
1160 record->leaf.data_len);
1169 * Add the record to the inode's rec_tree. The low 32 bits of a directory
1170 * entry's key is used to deal with hash collisions in the upper 32 bits.
1171 * A unique 64 bit key is generated in-memory and may be regenerated a
1172 * second time when the directory record is flushed to the on-disk B-Tree.
1174 * A referenced record is passed to this function. This function
1175 * eats the reference. If an error occurs the record will be deleted.
1177 * A copy of the temporary record->data pointer provided by the caller
1182 hammer_mem_add(hammer_record_t record)
1184 hammer_mount_t hmp = record->ip->hmp;
1187 * Make a private copy of record->data
1190 KKASSERT(record->flags & HAMMER_RECF_ALLOCDATA);
1193 * Insert into the RB tree. A unique key should have already
1194 * been selected if this is a directory entry.
1196 if (RB_INSERT(hammer_rec_rb_tree, &record->ip->rec_tree, record)) {
1197 record->flags |= HAMMER_RECF_DELETED_FE;
1198 hammer_rel_mem_record(record);
1201 ++hmp->count_newrecords;
1203 ++record->ip->rsv_recs;
1204 record->ip->hmp->rsv_databytes += record->leaf.data_len;
1205 record->flags |= HAMMER_RECF_ONRBTREE;
1206 hammer_modify_inode(record->ip, HAMMER_INODE_XDIRTY);
1207 hammer_rel_mem_record(record);
1211 /************************************************************************
1212 * HAMMER INODE MERGED-RECORD FUNCTIONS *
1213 ************************************************************************
1215 * These functions augment the B-Tree scanning functions in hammer_btree.c
1216 * by merging in-memory records with on-disk records.
1220 * Locate a particular record either in-memory or on-disk.
1222 * NOTE: This is basically a standalone routine, hammer_ip_next() may
1223 * NOT be called to iterate results.
1226 hammer_ip_lookup(hammer_cursor_t cursor)
1231 * If the element is in-memory return it without searching the
1234 KKASSERT(cursor->ip);
1235 error = hammer_mem_lookup(cursor);
1237 cursor->leaf = &cursor->iprec->leaf;
1240 if (error != ENOENT)
1244 * If the inode has on-disk components search the on-disk B-Tree.
1246 if ((cursor->ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) == 0)
1248 error = hammer_btree_lookup(cursor);
1250 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1255 * Locate the first record within the cursor's key_beg/key_end range,
1256 * restricted to a particular inode. 0 is returned on success, ENOENT
1257 * if no records matched the requested range, or some other error.
1259 * When 0 is returned hammer_ip_next() may be used to iterate additional
1260 * records within the requested range.
1262 * This function can return EDEADLK, requiring the caller to terminate
1263 * the cursor and try again.
1266 hammer_ip_first(hammer_cursor_t cursor)
1268 hammer_inode_t ip = cursor->ip;
1271 KKASSERT(ip != NULL);
1274 * Clean up fields and setup for merged scan
1276 cursor->flags &= ~HAMMER_CURSOR_DELBTREE;
1277 cursor->flags |= HAMMER_CURSOR_ATEDISK | HAMMER_CURSOR_ATEMEM;
1278 cursor->flags |= HAMMER_CURSOR_DISKEOF | HAMMER_CURSOR_MEMEOF;
1279 if (cursor->iprec) {
1280 hammer_rel_mem_record(cursor->iprec);
1281 cursor->iprec = NULL;
1285 * Search the on-disk B-Tree. hammer_btree_lookup() only does an
1286 * exact lookup so if we get ENOENT we have to call the iterate
1287 * function to validate the first record after the begin key.
1289 * The ATEDISK flag is used by hammer_btree_iterate to determine
1290 * whether it must index forwards or not. It is also used here
1291 * to select the next record from in-memory or on-disk.
1293 * EDEADLK can only occur if the lookup hit an empty internal
1294 * element and couldn't delete it. Since this could only occur
1295 * in-range, we can just iterate from the failure point.
1297 if (ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) {
1298 error = hammer_btree_lookup(cursor);
1299 if (error == ENOENT || error == EDEADLK) {
1300 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1301 if (hammer_debug_general & 0x2000)
1302 kprintf("error %d node %p %016llx index %d\n", error, cursor->node, cursor->node->node_offset, cursor->index);
1303 error = hammer_btree_iterate(cursor);
1305 if (error && error != ENOENT)
1308 cursor->flags &= ~HAMMER_CURSOR_DISKEOF;
1309 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1311 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1316 * Search the in-memory record list (Red-Black tree). Unlike the
1317 * B-Tree search, mem_first checks for records in the range.
1319 error = hammer_mem_first(cursor);
1320 if (error && error != ENOENT)
1323 cursor->flags &= ~HAMMER_CURSOR_MEMEOF;
1324 cursor->flags &= ~HAMMER_CURSOR_ATEMEM;
1325 if (hammer_ip_iterate_mem_good(cursor, cursor->iprec) == 0)
1326 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1330 * This will return the first matching record.
1332 return(hammer_ip_next(cursor));
1336 * Retrieve the next record in a merged iteration within the bounds of the
1337 * cursor. This call may be made multiple times after the cursor has been
1338 * initially searched with hammer_ip_first().
1340 * 0 is returned on success, ENOENT if no further records match the
1341 * requested range, or some other error code is returned.
1344 hammer_ip_next(hammer_cursor_t cursor)
1346 hammer_btree_elm_t elm;
1347 hammer_record_t rec, save;
1353 * Load the current on-disk and in-memory record. If we ate any
1354 * records we have to get the next one.
1356 * If we deleted the last on-disk record we had scanned ATEDISK will
1357 * be clear and DELBTREE will be set, forcing a call to iterate. The
1358 * fact that ATEDISK is clear causes iterate to re-test the 'current'
1359 * element. If ATEDISK is set, iterate will skip the 'current'
1362 * Get the next on-disk record
1364 if (cursor->flags & (HAMMER_CURSOR_ATEDISK|HAMMER_CURSOR_DELBTREE)) {
1365 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
1366 error = hammer_btree_iterate(cursor);
1367 cursor->flags &= ~HAMMER_CURSOR_DELBTREE;
1369 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1370 hammer_cache_node(&cursor->ip->cache[1],
1373 cursor->flags |= HAMMER_CURSOR_DISKEOF |
1374 HAMMER_CURSOR_ATEDISK;
1381 * Get the next in-memory record. The record can be ripped out
1382 * of the RB tree so we maintain a scan_info structure to track
1385 * hammer_rec_scan_cmp: Is the record still in our general range,
1386 * (non-inclusive of snapshot exclusions)?
1387 * hammer_rec_scan_callback: Is the record in our snapshot?
1389 if (cursor->flags & HAMMER_CURSOR_ATEMEM) {
1390 if ((cursor->flags & HAMMER_CURSOR_MEMEOF) == 0) {
1391 save = cursor->iprec;
1392 cursor->iprec = NULL;
1393 rec = save ? hammer_rec_rb_tree_RB_NEXT(save) : NULL;
1395 if (hammer_rec_scan_cmp(rec, cursor) != 0)
1397 if (hammer_rec_scan_callback(rec, cursor) != 0)
1399 rec = hammer_rec_rb_tree_RB_NEXT(rec);
1402 hammer_rel_mem_record(save);
1403 if (cursor->iprec) {
1404 KKASSERT(cursor->iprec == rec);
1405 cursor->flags &= ~HAMMER_CURSOR_ATEMEM;
1407 cursor->flags |= HAMMER_CURSOR_MEMEOF;
1413 * The memory record may have become stale while being held in
1414 * cursor->iprec. We are interlocked against the backend on
1415 * with regards to B-Tree entries.
1417 if ((cursor->flags & HAMMER_CURSOR_ATEMEM) == 0) {
1418 if (hammer_ip_iterate_mem_good(cursor, cursor->iprec) == 0) {
1419 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1425 * Extract either the disk or memory record depending on their
1426 * relative position.
1429 switch(cursor->flags & (HAMMER_CURSOR_ATEDISK | HAMMER_CURSOR_ATEMEM)) {
1432 * Both entries valid. Compare the entries and nominally
1433 * return the first one in the sort order. Numerous cases
1434 * require special attention, however.
1436 elm = &cursor->node->ondisk->elms[cursor->index];
1437 r = hammer_btree_cmp(&elm->base, &cursor->iprec->leaf.base);
1440 * If the two entries differ only by their key (-2/2) or
1441 * create_tid (-1/1), and are DATA records, we may have a
1442 * nominal match. We have to calculate the base file
1443 * offset of the data.
1445 if (r <= 2 && r >= -2 && r != 0 &&
1446 cursor->ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE &&
1447 cursor->iprec->type == HAMMER_MEM_RECORD_DATA) {
1448 int64_t base1 = elm->leaf.base.key - elm->leaf.data_len;
1449 int64_t base2 = cursor->iprec->leaf.base.key -
1450 cursor->iprec->leaf.data_len;
1456 error = hammer_btree_extract(cursor,
1457 HAMMER_CURSOR_GET_LEAF);
1458 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1463 * If the entries match exactly the memory entry is either
1464 * an on-disk directory entry deletion or a bulk data
1465 * overwrite. If it is a directory entry deletion we eat
1468 * For the bulk-data overwrite case it is possible to have
1469 * visibility into both, which simply means the syncer
1470 * hasn't gotten around to doing the delete+insert sequence
1471 * on the B-Tree. Use the memory entry and throw away the
1474 * If the in-memory record is not either of these we
1475 * probably caught the syncer while it was syncing it to
1476 * the media. Since we hold a shared lock on the cursor,
1477 * the in-memory record had better be marked deleted at
1481 if (cursor->iprec->type == HAMMER_MEM_RECORD_DEL) {
1482 if ((cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1483 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1484 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1487 } else if (cursor->iprec->type == HAMMER_MEM_RECORD_DATA) {
1488 if ((cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1489 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1491 /* fall through to memory entry */
1493 panic("hammer_ip_next: duplicate mem/b-tree entry");
1494 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1498 /* fall through to the memory entry */
1499 case HAMMER_CURSOR_ATEDISK:
1501 * Only the memory entry is valid.
1503 cursor->leaf = &cursor->iprec->leaf;
1504 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1507 * If the memory entry is an on-disk deletion we should have
1508 * also had found a B-Tree record. If the backend beat us
1509 * to it it would have interlocked the cursor and we should
1510 * have seen the in-memory record marked DELETED_FE.
1512 if (cursor->iprec->type == HAMMER_MEM_RECORD_DEL &&
1513 (cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1514 panic("hammer_ip_next: del-on-disk with no b-tree entry");
1517 case HAMMER_CURSOR_ATEMEM:
1519 * Only the disk entry is valid
1521 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1522 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1526 * Neither entry is valid
1528 * XXX error not set properly
1530 cursor->leaf = NULL;
1538 * Resolve the cursor->data pointer for the current cursor position in
1539 * a merged iteration.
1542 hammer_ip_resolve_data(hammer_cursor_t cursor)
1544 hammer_record_t record;
1547 if (hammer_cursor_inmem(cursor)) {
1549 * The data associated with an in-memory record is usually
1550 * kmalloced, but reserve-ahead data records will have an
1551 * on-disk reference.
1553 * NOTE: Reserve-ahead data records must be handled in the
1554 * context of the related high level buffer cache buffer
1555 * to interlock against async writes.
1557 record = cursor->iprec;
1558 cursor->data = record->data;
1560 if (cursor->data == NULL) {
1561 KKASSERT(record->leaf.base.rec_type ==
1562 HAMMER_RECTYPE_DATA);
1563 cursor->data = hammer_bread_ext(cursor->trans->hmp,
1564 record->leaf.data_offset,
1565 record->leaf.data_len,
1567 &cursor->data_buffer);
1570 cursor->leaf = &cursor->node->ondisk->elms[cursor->index].leaf;
1571 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA);
1577 * Backend truncation / record replacement - delete records in range.
1579 * Delete all records within the specified range for inode ip. In-memory
1580 * records still associated with the frontend are ignored.
1582 * If truncating is non-zero in-memory records associated with the back-end
1583 * are ignored. If truncating is > 1 we can return EWOULDBLOCK.
1587 * * An unaligned range will cause new records to be added to cover
1588 * the edge cases. (XXX not implemented yet).
1590 * * Replacement via reservations (see hammer_ip_sync_record_cursor())
1591 * also do not deal with unaligned ranges.
1593 * * ran_end is inclusive (e.g. 0,1023 instead of 0,1024).
1595 * * Record keys for regular file data have to be special-cased since
1596 * they indicate the end of the range (key = base + bytes).
1598 * * This function may be asked to delete ridiculously huge ranges, for
1599 * example if someone truncates or removes a 1TB regular file. We
1600 * must be very careful on restarts and we may have to stop w/
1601 * EWOULDBLOCK to avoid blowing out the buffer cache.
1604 hammer_ip_delete_range(hammer_cursor_t cursor, hammer_inode_t ip,
1605 int64_t ran_beg, int64_t ran_end, int truncating)
1607 hammer_transaction_t trans = cursor->trans;
1608 hammer_btree_leaf_elm_t leaf;
1614 kprintf("delete_range %p %016llx-%016llx\n", ip, ran_beg, ran_end);
1617 KKASSERT(trans->type == HAMMER_TRANS_FLS);
1619 hammer_normalize_cursor(cursor);
1620 cursor->key_beg.localization = ip->obj_localization +
1621 HAMMER_LOCALIZE_MISC;
1622 cursor->key_beg.obj_id = ip->obj_id;
1623 cursor->key_beg.create_tid = 0;
1624 cursor->key_beg.delete_tid = 0;
1625 cursor->key_beg.obj_type = 0;
1627 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1628 cursor->key_beg.key = ran_beg;
1629 cursor->key_beg.rec_type = HAMMER_RECTYPE_DB;
1632 * The key in the B-Tree is (base+bytes), so the first possible
1633 * matching key is ran_beg + 1.
1635 cursor->key_beg.key = ran_beg + 1;
1636 cursor->key_beg.rec_type = HAMMER_RECTYPE_DATA;
1639 cursor->key_end = cursor->key_beg;
1640 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1641 cursor->key_end.key = ran_end;
1643 tmp64 = ran_end + MAXPHYS + 1; /* work around GCC-4 bug */
1644 if (tmp64 < ran_end)
1645 cursor->key_end.key = 0x7FFFFFFFFFFFFFFFLL;
1647 cursor->key_end.key = ran_end + MAXPHYS + 1;
1650 cursor->asof = ip->obj_asof;
1651 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1652 cursor->flags |= HAMMER_CURSOR_ASOF;
1653 cursor->flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
1654 cursor->flags |= HAMMER_CURSOR_BACKEND;
1655 cursor->flags |= HAMMER_CURSOR_END_INCLUSIVE;
1657 error = hammer_ip_first(cursor);
1660 * Iterate through matching records and mark them as deleted.
1662 while (error == 0) {
1663 leaf = cursor->leaf;
1665 KKASSERT(leaf->base.delete_tid == 0);
1668 * There may be overlap cases for regular file data. Also
1669 * remember the key for a regular file record is (base + len),
1672 if (leaf->base.rec_type == HAMMER_RECTYPE_DATA) {
1673 off = leaf->base.key - leaf->data_len;
1675 * Check the left edge case. We currently do not
1676 * split existing records.
1678 if (off < ran_beg) {
1679 panic("hammer left edge case %016llx %d\n",
1680 leaf->base.key, leaf->data_len);
1684 * Check the right edge case. Note that the
1685 * record can be completely out of bounds, which
1686 * terminates the search.
1688 * base->key is exclusive of the right edge while
1689 * ran_end is inclusive of the right edge. The
1690 * (key - data_len) left boundary is inclusive.
1692 * XXX theory-check this test at some point, are
1693 * we missing a + 1 somewhere? Note that ran_end
1696 if (leaf->base.key - 1 > ran_end) {
1697 if (leaf->base.key - leaf->data_len > ran_end)
1699 panic("hammer right edge case\n");
1702 off = leaf->base.key;
1706 * Delete the record. When truncating we do not delete
1707 * in-memory (data) records because they represent data
1708 * written after the truncation.
1710 * This will also physically destroy the B-Tree entry and
1711 * data if the retention policy dictates. The function
1712 * will set HAMMER_CURSOR_DELBTREE which hammer_ip_next()
1713 * uses to perform a fixup.
1715 if (truncating == 0 || hammer_cursor_ondisk(cursor)) {
1716 error = hammer_ip_delete_record(cursor, ip, trans->tid);
1718 * If we have built up too many meta-buffers we risk
1719 * deadlocking the kernel and must stop. This can
1720 * occur when deleting ridiculously huge files.
1721 * sync_trunc_off is updated so the next cycle does
1722 * not re-iterate records we have already deleted.
1724 * This is only done with formal truncations.
1726 if (truncating > 1 && error == 0 &&
1727 hammer_flusher_meta_limit(ip->hmp)) {
1728 ip->sync_trunc_off = off;
1729 error = EWOULDBLOCK;
1734 ran_beg = off; /* for restart */
1735 error = hammer_ip_next(cursor);
1738 hammer_cache_node(&ip->cache[1], cursor->node);
1740 if (error == EDEADLK) {
1741 hammer_done_cursor(cursor);
1742 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
1746 if (error == ENOENT)
1752 * This function deletes remaining auxillary records when an inode is
1753 * being deleted. This function explicitly does not delete the
1754 * inode record, directory entry, data, or db records. Those must be
1755 * properly disposed of prior to this call.
1758 hammer_ip_delete_clean(hammer_cursor_t cursor, hammer_inode_t ip, int *countp)
1760 hammer_transaction_t trans = cursor->trans;
1761 hammer_btree_leaf_elm_t leaf;
1764 KKASSERT(trans->type == HAMMER_TRANS_FLS);
1766 hammer_normalize_cursor(cursor);
1767 cursor->key_beg.localization = ip->obj_localization +
1768 HAMMER_LOCALIZE_MISC;
1769 cursor->key_beg.obj_id = ip->obj_id;
1770 cursor->key_beg.create_tid = 0;
1771 cursor->key_beg.delete_tid = 0;
1772 cursor->key_beg.obj_type = 0;
1773 cursor->key_beg.rec_type = HAMMER_RECTYPE_CLEAN_START;
1774 cursor->key_beg.key = HAMMER_MIN_KEY;
1776 cursor->key_end = cursor->key_beg;
1777 cursor->key_end.rec_type = HAMMER_RECTYPE_MAX;
1778 cursor->key_end.key = HAMMER_MAX_KEY;
1780 cursor->asof = ip->obj_asof;
1781 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1782 cursor->flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1783 cursor->flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
1784 cursor->flags |= HAMMER_CURSOR_BACKEND;
1786 error = hammer_ip_first(cursor);
1789 * Iterate through matching records and mark them as deleted.
1791 while (error == 0) {
1792 leaf = cursor->leaf;
1794 KKASSERT(leaf->base.delete_tid == 0);
1797 * Mark the record and B-Tree entry as deleted. This will
1798 * also physically delete the B-Tree entry, record, and
1799 * data if the retention policy dictates. The function
1800 * will set HAMMER_CURSOR_DELBTREE which hammer_ip_next()
1801 * uses to perform a fixup.
1803 * Directory entries (and delete-on-disk directory entries)
1804 * must be synced and cannot be deleted.
1806 error = hammer_ip_delete_record(cursor, ip, trans->tid);
1810 error = hammer_ip_next(cursor);
1813 hammer_cache_node(&ip->cache[1], cursor->node);
1814 if (error == EDEADLK) {
1815 hammer_done_cursor(cursor);
1816 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
1820 if (error == ENOENT)
1826 * Delete the record at the current cursor. On success the cursor will
1827 * be positioned appropriately for an iteration but may no longer be at
1830 * This routine is only called from the backend.
1832 * NOTE: This can return EDEADLK, requiring the caller to terminate the
1836 hammer_ip_delete_record(hammer_cursor_t cursor, hammer_inode_t ip,
1839 hammer_off_t zone2_offset;
1840 hammer_record_t iprec;
1841 hammer_btree_elm_t elm;
1846 KKASSERT(cursor->flags & HAMMER_CURSOR_BACKEND);
1848 hmp = cursor->node->hmp;
1851 * In-memory (unsynchronized) records can simply be freed. This
1852 * only occurs in range iterations since all other records are
1853 * individually synchronized. Thus there should be no confusion with
1856 * An in-memory record may be deleted before being committed to disk,
1857 * but could have been accessed in the mean time. The backing store
1858 * may never been marked allocated and so hammer_blockmap_free() may
1859 * never get called on it. Because of this we have to make sure that
1860 * we've gotten rid of any related hammer_buffer or buffer cache
1863 if (hammer_cursor_inmem(cursor)) {
1864 iprec = cursor->iprec;
1865 KKASSERT((iprec->flags & HAMMER_RECF_INTERLOCK_BE) ==0);
1866 iprec->flags |= HAMMER_RECF_DELETED_FE;
1867 iprec->flags |= HAMMER_RECF_DELETED_BE;
1869 if (iprec->leaf.data_offset && iprec->leaf.data_len) {
1870 zone2_offset = hammer_blockmap_lookup(hmp, iprec->leaf.data_offset, &error);
1871 KKASSERT(error == 0);
1872 hammer_del_buffers(hmp,
1873 iprec->leaf.data_offset,
1875 iprec->leaf.data_len);
1881 * On-disk records are marked as deleted by updating their delete_tid.
1882 * This does not effect their position in the B-Tree (which is based
1883 * on their create_tid).
1885 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1889 * If we were mounted with the nohistory option, we physically
1890 * delete the record.
1892 dodelete = hammer_nohistory(ip);
1895 error = hammer_cursor_upgrade(cursor);
1897 elm = &cursor->node->ondisk->elms[cursor->index];
1898 hammer_modify_node(cursor->trans, cursor->node,
1900 elm->leaf.base.delete_tid = tid;
1901 elm->leaf.delete_ts = cursor->trans->time32;
1902 hammer_modify_node_done(cursor->node);
1905 * An on-disk record cannot have the same delete_tid
1906 * as its create_tid. In a chain of record updates
1907 * this could result in a duplicate record.
1909 KKASSERT(elm->leaf.base.delete_tid != elm->leaf.base.create_tid);
1913 if (error == 0 && dodelete) {
1914 error = hammer_delete_at_cursor(cursor, NULL);
1916 panic("hammer_ip_delete_record: unable to physically delete the record!\n");
1924 hammer_delete_at_cursor(hammer_cursor_t cursor, int64_t *stat_bytes)
1926 hammer_btree_elm_t elm;
1927 hammer_off_t data_offset;
1932 elm = &cursor->node->ondisk->elms[cursor->index];
1933 KKASSERT(elm->base.btype == HAMMER_BTREE_TYPE_RECORD);
1935 data_offset = elm->leaf.data_offset;
1936 data_len = elm->leaf.data_len;
1937 rec_type = elm->leaf.base.rec_type;
1939 error = hammer_btree_delete(cursor);
1942 * This forces a fixup for the iteration because
1943 * the cursor is now either sitting at the 'next'
1944 * element or sitting at the end of a leaf.
1946 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
1947 cursor->flags |= HAMMER_CURSOR_DELBTREE;
1948 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1952 switch(data_offset & HAMMER_OFF_ZONE_MASK) {
1953 case HAMMER_ZONE_LARGE_DATA:
1954 case HAMMER_ZONE_SMALL_DATA:
1955 case HAMMER_ZONE_META:
1956 hammer_blockmap_free(cursor->trans,
1957 data_offset, data_len);
1967 * Determine whether we can remove a directory. This routine checks whether
1968 * a directory is empty or not and enforces flush connectivity.
1970 * Flush connectivity requires that we block if the target directory is
1971 * currently flushing, otherwise it may not end up in the same flush group.
1973 * Returns 0 on success, ENOTEMPTY or EDEADLK (or other errors) on failure.
1976 hammer_ip_check_directory_empty(hammer_transaction_t trans, hammer_inode_t ip)
1978 struct hammer_cursor cursor;
1982 * Check directory empty
1984 hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
1986 cursor.key_beg.localization = ip->obj_localization +
1987 HAMMER_LOCALIZE_MISC;
1988 cursor.key_beg.obj_id = ip->obj_id;
1989 cursor.key_beg.create_tid = 0;
1990 cursor.key_beg.delete_tid = 0;
1991 cursor.key_beg.obj_type = 0;
1992 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE + 1;
1993 cursor.key_beg.key = HAMMER_MIN_KEY;
1995 cursor.key_end = cursor.key_beg;
1996 cursor.key_end.rec_type = 0xFFFF;
1997 cursor.key_end.key = HAMMER_MAX_KEY;
1999 cursor.asof = ip->obj_asof;
2000 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
2002 error = hammer_ip_first(&cursor);
2003 if (error == ENOENT)
2005 else if (error == 0)
2007 hammer_done_cursor(&cursor);