2 * Copyright (c) 2007-2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_object.c,v 1.72 2008/06/20 21:24:53 dillon Exp $
39 static int hammer_mem_add(hammer_record_t record);
40 static int hammer_mem_lookup(hammer_cursor_t cursor);
41 static int hammer_mem_first(hammer_cursor_t cursor);
42 static int hammer_rec_trunc_callback(hammer_record_t record,
44 static int hammer_record_needs_overwrite_delete(hammer_record_t record);
46 struct rec_trunc_info {
52 * Red-black tree support. Comparison code for insertion.
55 hammer_rec_rb_compare(hammer_record_t rec1, hammer_record_t rec2)
57 if (rec1->leaf.base.rec_type < rec2->leaf.base.rec_type)
59 if (rec1->leaf.base.rec_type > rec2->leaf.base.rec_type)
62 if (rec1->leaf.base.key < rec2->leaf.base.key)
64 if (rec1->leaf.base.key > rec2->leaf.base.key)
68 * Never match against an item deleted by the front-end.
70 * rec1 is greater then rec2 if rec1 is marked deleted.
71 * rec1 is less then rec2 if rec2 is marked deleted.
73 * Multiple deleted records may be present, do not return 0
74 * if both are marked deleted.
76 if (rec1->flags & HAMMER_RECF_DELETED_FE)
78 if (rec2->flags & HAMMER_RECF_DELETED_FE)
85 * Basic record comparison code similar to hammer_btree_cmp().
88 hammer_rec_cmp(hammer_base_elm_t elm, hammer_record_t rec)
90 if (elm->rec_type < rec->leaf.base.rec_type)
92 if (elm->rec_type > rec->leaf.base.rec_type)
95 if (elm->key < rec->leaf.base.key)
97 if (elm->key > rec->leaf.base.key)
101 * Never match against an item deleted by the front-end.
102 * elm is less then rec if rec is marked deleted.
104 if (rec->flags & HAMMER_RECF_DELETED_FE)
110 * Special LOOKUP_INFO to locate an overlapping record. This used by
111 * the reservation code to implement small-block records (whos keys will
112 * be different depending on data_len, when representing the same base
115 * NOTE: The base file offset of a data record is (key - data_len), not (key).
118 hammer_rec_overlap_compare(hammer_btree_leaf_elm_t leaf, hammer_record_t rec)
120 if (leaf->base.rec_type < rec->leaf.base.rec_type)
122 if (leaf->base.rec_type > rec->leaf.base.rec_type)
128 if (leaf->base.rec_type == HAMMER_RECTYPE_DATA) {
129 /* leaf_end <= rec_beg */
130 if (leaf->base.key <= rec->leaf.base.key - rec->leaf.data_len)
132 /* leaf_beg >= rec_end */
133 if (leaf->base.key - leaf->data_len >= rec->leaf.base.key)
136 if (leaf->base.key < rec->leaf.base.key)
138 if (leaf->base.key > rec->leaf.base.key)
143 * Never match against an item deleted by the front-end.
144 * leaf is less then rec if rec is marked deleted.
146 * We must still return the proper code for the scan to continue
147 * along the correct branches.
149 if (rec->flags & HAMMER_RECF_DELETED_FE) {
150 if (leaf->base.key < rec->leaf.base.key)
152 if (leaf->base.key > rec->leaf.base.key)
160 * RB_SCAN comparison code for hammer_mem_first(). The argument order
161 * is reversed so the comparison result has to be negated. key_beg and
162 * key_end are both range-inclusive.
164 * Localized deletions are not cached in-memory.
168 hammer_rec_scan_cmp(hammer_record_t rec, void *data)
170 hammer_cursor_t cursor = data;
173 r = hammer_rec_cmp(&cursor->key_beg, rec);
176 r = hammer_rec_cmp(&cursor->key_end, rec);
183 * This compare function is used when simply looking up key_beg.
187 hammer_rec_find_cmp(hammer_record_t rec, void *data)
189 hammer_cursor_t cursor = data;
192 r = hammer_rec_cmp(&cursor->key_beg, rec);
201 * Locate blocks within the truncation range. Partial blocks do not count.
205 hammer_rec_trunc_cmp(hammer_record_t rec, void *data)
207 struct rec_trunc_info *info = data;
209 if (rec->leaf.base.rec_type < info->rec_type)
211 if (rec->leaf.base.rec_type > info->rec_type)
214 switch(rec->leaf.base.rec_type) {
215 case HAMMER_RECTYPE_DB:
217 * DB record key is not beyond the truncation point, retain.
219 if (rec->leaf.base.key < info->trunc_off)
222 case HAMMER_RECTYPE_DATA:
224 * DATA record offset start is not beyond the truncation point,
227 if (rec->leaf.base.key - rec->leaf.data_len < info->trunc_off)
231 panic("hammer_rec_trunc_cmp: unexpected record type");
235 * The record start is >= the truncation point, return match,
236 * the record should be destroyed.
241 RB_GENERATE(hammer_rec_rb_tree, hammer_record, rb_node, hammer_rec_rb_compare);
242 RB_GENERATE_XLOOKUP(hammer_rec_rb_tree, INFO, hammer_record, rb_node,
243 hammer_rec_overlap_compare, hammer_btree_leaf_elm_t);
246 * Allocate a record for the caller to finish filling in. The record is
247 * returned referenced.
250 hammer_alloc_mem_record(hammer_inode_t ip, int data_len)
252 hammer_record_t record;
254 ++hammer_count_records;
255 record = kmalloc(sizeof(*record), M_HAMMER, M_WAITOK | M_ZERO);
256 record->flush_state = HAMMER_FST_IDLE;
258 record->leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
259 record->leaf.data_len = data_len;
260 hammer_ref(&record->lock);
263 record->data = kmalloc(data_len, M_HAMMER, M_WAITOK | M_ZERO);
264 record->flags |= HAMMER_RECF_ALLOCDATA;
265 ++hammer_count_record_datas;
272 hammer_wait_mem_record_ident(hammer_record_t record, const char *ident)
274 while (record->flush_state == HAMMER_FST_FLUSH) {
275 record->flags |= HAMMER_RECF_WANTED;
276 tsleep(record, 0, ident, 0);
281 * Called from the backend, hammer_inode.c, after a record has been
282 * flushed to disk. The record has been exclusively locked by the
283 * caller and interlocked with BE.
285 * We clean up the state, unlock, and release the record (the record
286 * was referenced by the fact that it was in the HAMMER_FST_FLUSH state).
289 hammer_flush_record_done(hammer_record_t record, int error)
291 hammer_inode_t target_ip;
293 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
294 KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
298 * An error occured, the backend was unable to sync the
299 * record to its media. Leave the record intact.
301 Debugger("flush_record_done error");
304 if (record->flags & HAMMER_RECF_DELETED_BE) {
305 if ((target_ip = record->target_ip) != NULL) {
306 TAILQ_REMOVE(&target_ip->target_list, record,
308 record->target_ip = NULL;
309 hammer_test_inode(target_ip);
311 record->flush_state = HAMMER_FST_IDLE;
313 if (record->target_ip) {
314 record->flush_state = HAMMER_FST_SETUP;
315 hammer_test_inode(record->ip);
316 hammer_test_inode(record->target_ip);
318 record->flush_state = HAMMER_FST_IDLE;
321 record->flags &= ~HAMMER_RECF_INTERLOCK_BE;
322 if (record->flags & HAMMER_RECF_WANTED) {
323 record->flags &= ~HAMMER_RECF_WANTED;
326 hammer_rel_mem_record(record);
330 * Release a memory record. Records marked for deletion are immediately
331 * removed from the RB-Tree but otherwise left intact until the last ref
335 hammer_rel_mem_record(struct hammer_record *record)
337 hammer_inode_t ip, target_ip;
339 hammer_unref(&record->lock);
341 if (record->lock.refs == 0) {
343 * Upon release of the last reference wakeup any waiters.
344 * The record structure may get destroyed so callers will
345 * loop up and do a relookup.
347 * WARNING! Record must be removed from RB-TREE before we
348 * might possibly block. hammer_test_inode() can block!
353 * Upon release of the last reference a record marked deleted
356 if (record->flags & HAMMER_RECF_DELETED_FE) {
357 KKASSERT(ip->lock.refs > 0);
358 KKASSERT(record->flush_state != HAMMER_FST_FLUSH);
361 * target_ip may have zero refs, we have to ref it
362 * to prevent it from being ripped out from under
365 if ((target_ip = record->target_ip) != NULL) {
366 TAILQ_REMOVE(&target_ip->target_list,
367 record, target_entry);
368 record->target_ip = NULL;
369 hammer_ref(&target_ip->lock);
372 if (record->flags & HAMMER_RECF_ONRBTREE) {
373 RB_REMOVE(hammer_rec_rb_tree,
374 &record->ip->rec_tree,
376 KKASSERT(ip->rsv_recs > 0);
379 ip->hmp->rsv_databytes -= record->leaf.data_len;
380 record->flags &= ~HAMMER_RECF_ONRBTREE;
382 if (RB_EMPTY(&record->ip->rec_tree)) {
383 record->ip->flags &= ~HAMMER_INODE_XDIRTY;
384 record->ip->sync_flags &= ~HAMMER_INODE_XDIRTY;
385 hammer_test_inode(record->ip);
390 * Do this test after removing record from the B-Tree.
393 hammer_test_inode(target_ip);
394 hammer_rel_inode(target_ip, 0);
397 if (record->flags & HAMMER_RECF_ALLOCDATA) {
398 --hammer_count_record_datas;
399 kfree(record->data, M_HAMMER);
400 record->flags &= ~HAMMER_RECF_ALLOCDATA;
403 hammer_blockmap_reserve_complete(ip->hmp,
408 --hammer_count_records;
409 kfree(record, M_HAMMER);
415 * Record visibility depends on whether the record is being accessed by
416 * the backend or the frontend.
418 * Return non-zero if the record is visible, zero if it isn't or if it is
423 hammer_ip_iterate_mem_good(hammer_cursor_t cursor, hammer_record_t record)
425 if (cursor->flags & HAMMER_CURSOR_BACKEND) {
426 if (record->flags & HAMMER_RECF_DELETED_BE)
429 if (record->flags & HAMMER_RECF_DELETED_FE)
436 * This callback is used as part of the RB_SCAN function for in-memory
437 * records. We terminate it (return -1) as soon as we get a match.
439 * This routine is used by frontend code.
441 * The primary compare code does not account for ASOF lookups. This
442 * code handles that case as well as a few others.
446 hammer_rec_scan_callback(hammer_record_t rec, void *data)
448 hammer_cursor_t cursor = data;
451 * We terminate on success, so this should be NULL on entry.
453 KKASSERT(cursor->iprec == NULL);
456 * Skip if the record was marked deleted.
458 if (hammer_ip_iterate_mem_good(cursor, rec) == 0)
462 * Skip if not visible due to our as-of TID
464 if (cursor->flags & HAMMER_CURSOR_ASOF) {
465 if (cursor->asof < rec->leaf.base.create_tid)
467 if (rec->leaf.base.delete_tid &&
468 cursor->asof >= rec->leaf.base.delete_tid) {
474 * If the record is queued to the flusher we have to block until
475 * it isn't. Otherwise we may see duplication between our memory
476 * cache and the media.
478 hammer_ref(&rec->lock);
480 #warning "This deadlocks"
482 if (rec->flush_state == HAMMER_FST_FLUSH)
483 hammer_wait_mem_record(rec);
487 * The record may have been deleted while we were blocked.
489 if (hammer_ip_iterate_mem_good(cursor, rec) == 0) {
490 hammer_rel_mem_record(rec);
495 * Set the matching record and stop the scan.
503 * Lookup an in-memory record given the key specified in the cursor. Works
504 * just like hammer_btree_lookup() but operates on an inode's in-memory
507 * The lookup must fail if the record is marked for deferred deletion.
511 hammer_mem_lookup(hammer_cursor_t cursor)
515 KKASSERT(cursor->ip);
517 hammer_rel_mem_record(cursor->iprec);
518 cursor->iprec = NULL;
520 hammer_rec_rb_tree_RB_SCAN(&cursor->ip->rec_tree, hammer_rec_find_cmp,
521 hammer_rec_scan_callback, cursor);
523 if (cursor->iprec == NULL)
531 * hammer_mem_first() - locate the first in-memory record matching the
532 * cursor within the bounds of the key range.
536 hammer_mem_first(hammer_cursor_t cursor)
541 KKASSERT(ip != NULL);
544 hammer_rel_mem_record(cursor->iprec);
545 cursor->iprec = NULL;
548 hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_scan_cmp,
549 hammer_rec_scan_callback, cursor);
552 * Adjust scan.node and keep it linked into the RB-tree so we can
553 * hold the cursor through third party modifications of the RB-tree.
561 hammer_mem_done(hammer_cursor_t cursor)
564 hammer_rel_mem_record(cursor->iprec);
565 cursor->iprec = NULL;
569 /************************************************************************
570 * HAMMER IN-MEMORY RECORD FUNCTIONS *
571 ************************************************************************
573 * These functions manipulate in-memory records. Such records typically
574 * exist prior to being committed to disk or indexed via the on-disk B-Tree.
578 * Add a directory entry (dip,ncp) which references inode (ip).
580 * Note that the low 32 bits of the namekey are set temporarily to create
581 * a unique in-memory record, and may be modified a second time when the
582 * record is synchronized to disk. In particular, the low 32 bits cannot be
583 * all 0's when synching to disk, which is not handled here.
586 hammer_ip_add_directory(struct hammer_transaction *trans,
587 struct hammer_inode *dip, struct namecache *ncp,
588 struct hammer_inode *ip)
590 hammer_record_t record;
594 bytes = ncp->nc_nlen; /* NOTE: terminating \0 is NOT included */
595 record = hammer_alloc_mem_record(dip, HAMMER_ENTRY_SIZE(bytes));
596 if (++trans->hmp->namekey_iterator == 0)
597 ++trans->hmp->namekey_iterator;
599 record->type = HAMMER_MEM_RECORD_ADD;
600 record->leaf.base.localization = HAMMER_LOCALIZE_MISC;
601 record->leaf.base.obj_id = dip->obj_id;
602 record->leaf.base.key = hammer_directory_namekey(ncp->nc_name, bytes);
603 record->leaf.base.key += trans->hmp->namekey_iterator;
604 record->leaf.base.rec_type = HAMMER_RECTYPE_DIRENTRY;
605 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
606 record->data->entry.obj_id = ip->obj_id;
607 bcopy(ncp->nc_name, record->data->entry.name, bytes);
609 ++ip->ino_data.nlinks;
610 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
613 * The target inode and the directory entry are bound together.
615 record->target_ip = ip;
616 record->flush_state = HAMMER_FST_SETUP;
617 TAILQ_INSERT_TAIL(&ip->target_list, record, target_entry);
620 * The inode now has a dependancy and must be taken out of the idle
621 * state. An inode not in an idle state is given an extra reference.
623 if (ip->flush_state == HAMMER_FST_IDLE) {
624 hammer_ref(&ip->lock);
625 ip->flush_state = HAMMER_FST_SETUP;
627 error = hammer_mem_add(record);
632 * Delete the directory entry and update the inode link count. The
633 * cursor must be seeked to the directory entry record being deleted.
635 * The related inode should be share-locked by the caller. The caller is
638 * This function can return EDEADLK requiring the caller to terminate
639 * the cursor, any locks, wait on the returned record, and retry.
642 hammer_ip_del_directory(struct hammer_transaction *trans,
643 hammer_cursor_t cursor, struct hammer_inode *dip,
644 struct hammer_inode *ip)
646 hammer_record_t record;
649 if (hammer_cursor_inmem(cursor)) {
651 * In-memory (unsynchronized) records can simply be freed.
652 * Even though the HAMMER_RECF_DELETED_FE flag is ignored
653 * by the backend, we must still avoid races against the
654 * backend potentially syncing the record to the media.
656 * We cannot call hammer_ip_delete_record(), that routine may
657 * only be called from the backend.
659 record = cursor->iprec;
660 if (record->flags & HAMMER_RECF_INTERLOCK_BE) {
661 KKASSERT(cursor->deadlk_rec == NULL);
662 hammer_ref(&record->lock);
663 cursor->deadlk_rec = record;
666 KKASSERT(record->type == HAMMER_MEM_RECORD_ADD);
667 record->flags |= HAMMER_RECF_DELETED_FE;
672 * If the record is on-disk we have to queue the deletion by
673 * the record's key. This also causes lookups to skip the
676 KKASSERT(dip->flags &
677 (HAMMER_INODE_ONDISK | HAMMER_INODE_DONDISK));
678 record = hammer_alloc_mem_record(dip, 0);
679 record->type = HAMMER_MEM_RECORD_DEL;
680 record->leaf.base = cursor->leaf->base;
682 record->target_ip = ip;
683 record->flush_state = HAMMER_FST_SETUP;
684 TAILQ_INSERT_TAIL(&ip->target_list, record, target_entry);
687 * The inode now has a dependancy and must be taken out of
688 * the idle state. An inode not in an idle state is given
689 * an extra reference.
691 if (ip->flush_state == HAMMER_FST_IDLE) {
692 hammer_ref(&ip->lock);
693 ip->flush_state = HAMMER_FST_SETUP;
696 error = hammer_mem_add(record);
700 * One less link. The file may still be open in the OS even after
701 * all links have gone away.
703 * We have to terminate the cursor before syncing the inode to
704 * avoid deadlocking against ourselves. XXX this may no longer
707 * If nlinks drops to zero and the vnode is inactive (or there is
708 * no vnode), call hammer_inode_unloadable_check() to zonk the
709 * inode. If we don't do this here the inode will not be destroyed
710 * on-media until we unmount.
713 --ip->ino_data.nlinks;
714 hammer_modify_inode(ip, HAMMER_INODE_DDIRTY);
715 if (ip->ino_data.nlinks == 0 &&
716 (ip->vp == NULL || (ip->vp->v_flag & VINACTIVE))) {
717 hammer_done_cursor(cursor);
718 hammer_inode_unloadable_check(ip, 1);
719 hammer_flush_inode(ip, 0);
727 * Add a record to an inode.
729 * The caller must allocate the record with hammer_alloc_mem_record(ip) and
730 * initialize the following additional fields:
732 * The related inode should be share-locked by the caller. The caller is
735 * record->rec.entry.base.base.key
736 * record->rec.entry.base.base.rec_type
737 * record->rec.entry.base.base.data_len
738 * record->data (a copy will be kmalloc'd if it cannot be embedded)
741 hammer_ip_add_record(struct hammer_transaction *trans, hammer_record_t record)
743 hammer_inode_t ip = record->ip;
746 KKASSERT(record->leaf.base.localization != 0);
747 record->leaf.base.obj_id = ip->obj_id;
748 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
749 error = hammer_mem_add(record);
754 * Locate a bulk record in-memory. Bulk records allow disk space to be
755 * reserved so the front-end can flush large data writes without having
756 * to queue the BIO to the flusher. Only the related record gets queued
759 static hammer_record_t
760 hammer_ip_get_bulk(hammer_inode_t ip, off_t file_offset, int bytes)
762 hammer_record_t record;
763 struct hammer_btree_leaf_elm leaf;
765 bzero(&leaf, sizeof(leaf));
766 leaf.base.obj_id = ip->obj_id;
767 leaf.base.key = file_offset + bytes;
768 leaf.base.create_tid = 0;
769 leaf.base.delete_tid = 0;
770 leaf.base.rec_type = HAMMER_RECTYPE_DATA;
771 leaf.base.obj_type = 0; /* unused */
772 leaf.base.btype = HAMMER_BTREE_TYPE_RECORD; /* unused */
773 leaf.base.localization = HAMMER_LOCALIZE_MISC;
774 leaf.data_len = bytes;
776 record = hammer_rec_rb_tree_RB_LOOKUP_INFO(&ip->rec_tree, &leaf);
778 hammer_ref(&record->lock);
783 * Reserve blockmap space placemarked with an in-memory record.
785 * This routine is called by the frontend in order to be able to directly
786 * flush a buffer cache buffer. The frontend has locked the related buffer
787 * cache buffers and we should be able to manipulate any overlapping
791 hammer_ip_add_bulk(hammer_inode_t ip, off_t file_offset, void *data, int bytes,
794 hammer_record_t record;
795 hammer_record_t conflict;
800 * Deal with conflicting in-memory records. We cannot have multiple
801 * in-memory records for the same offset without seriously confusing
802 * the backend, including but not limited to the backend issuing
803 * delete-create-delete sequences and asserting on the delete_tid
804 * being the same as the create_tid.
806 * If we encounter a record with the backend interlock set we cannot
807 * immediately delete it without confusing the backend.
809 while ((conflict = hammer_ip_get_bulk(ip, file_offset, bytes)) !=NULL) {
810 if (conflict->flags & HAMMER_RECF_INTERLOCK_BE) {
811 conflict->flags |= HAMMER_RECF_WANTED;
812 tsleep(conflict, 0, "hmrrc3", 0);
814 conflict->flags |= HAMMER_RECF_DELETED_FE;
816 hammer_rel_mem_record(conflict);
820 * Create a record to cover the direct write. This is called with
821 * the related BIO locked so there should be no possible conflict.
823 * The backend is responsible for finalizing the space reserved in
826 * XXX bytes not aligned, depend on the reservation code to
827 * align the reservation.
829 record = hammer_alloc_mem_record(ip, 0);
830 zone = (bytes >= HAMMER_BUFSIZE) ? HAMMER_ZONE_LARGE_DATA_INDEX :
831 HAMMER_ZONE_SMALL_DATA_INDEX;
832 record->resv = hammer_blockmap_reserve(ip->hmp, zone, bytes,
833 &record->leaf.data_offset,
835 if (record->resv == NULL) {
836 kprintf("hammer_ip_add_bulk: reservation failed\n");
837 hammer_rel_mem_record(record);
840 record->type = HAMMER_MEM_RECORD_DATA;
841 record->leaf.base.rec_type = HAMMER_RECTYPE_DATA;
842 record->leaf.base.obj_type = ip->ino_leaf.base.obj_type;
843 record->leaf.base.obj_id = ip->obj_id;
844 record->leaf.base.key = file_offset + bytes;
845 record->leaf.base.localization = HAMMER_LOCALIZE_MISC;
846 record->leaf.data_len = bytes;
847 hammer_crc_set_leaf(data, &record->leaf);
848 flags = record->flags;
850 hammer_ref(&record->lock); /* mem_add eats a reference */
851 *errorp = hammer_mem_add(record);
853 conflict = hammer_ip_get_bulk(ip, file_offset, bytes);
854 kprintf("hammer_ip_add_bulk: error %d conflict %p file_offset %lld bytes %d\n",
855 *errorp, conflict, file_offset, bytes);
857 kprintf("conflict %lld %d\n", conflict->leaf.base.key, conflict->leaf.data_len);
859 hammer_rel_mem_record(conflict);
861 KKASSERT(*errorp == 0);
862 conflict = hammer_ip_get_bulk(ip, file_offset, bytes);
863 if (conflict != record) {
864 kprintf("conflict mismatch %p %p %08x\n", conflict, record, record->flags);
866 kprintf("conflict mismatch %lld/%d %lld/%d\n", conflict->leaf.base.key, conflict->leaf.data_len, record->leaf.base.key, record->leaf.data_len);
868 KKASSERT(conflict == record);
869 hammer_rel_mem_record(conflict);
875 * Frontend truncation code. Scan in-memory records only. On-disk records
876 * and records in a flushing state are handled by the backend. The vnops
877 * setattr code will handle the block containing the truncation point.
879 * Partial blocks are not deleted.
882 hammer_ip_frontend_trunc(struct hammer_inode *ip, off_t file_size)
884 struct rec_trunc_info info;
886 switch(ip->ino_data.obj_type) {
887 case HAMMER_OBJTYPE_REGFILE:
888 info.rec_type = HAMMER_RECTYPE_DATA;
890 case HAMMER_OBJTYPE_DBFILE:
891 info.rec_type = HAMMER_RECTYPE_DB;
896 info.trunc_off = file_size;
897 hammer_rec_rb_tree_RB_SCAN(&ip->rec_tree, hammer_rec_trunc_cmp,
898 hammer_rec_trunc_callback, &info);
903 hammer_rec_trunc_callback(hammer_record_t record, void *data __unused)
905 if (record->flags & HAMMER_RECF_DELETED_FE)
907 if (record->flush_state == HAMMER_FST_FLUSH)
909 KKASSERT((record->flags & HAMMER_RECF_INTERLOCK_BE) == 0);
910 hammer_ref(&record->lock);
911 record->flags |= HAMMER_RECF_DELETED_FE;
912 hammer_rel_mem_record(record);
917 * Return 1 if the caller must check for and delete existing records
918 * before writing out a new data record.
920 * Return 0 if the caller can just insert the record into the B-Tree without
924 hammer_record_needs_overwrite_delete(hammer_record_t record)
926 hammer_inode_t ip = record->ip;
930 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE)
931 file_offset = record->leaf.base.key;
933 file_offset = record->leaf.base.key - record->leaf.data_len;
934 r = (file_offset < ip->sync_trunc_off);
935 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
936 if (ip->sync_trunc_off <= record->leaf.base.key)
937 ip->sync_trunc_off = record->leaf.base.key + 1;
939 if (ip->sync_trunc_off < record->leaf.base.key)
940 ip->sync_trunc_off = record->leaf.base.key;
946 * Backend code. Sync a record to the media.
949 hammer_ip_sync_record_cursor(hammer_cursor_t cursor, hammer_record_t record)
951 hammer_transaction_t trans = cursor->trans;
957 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
958 KKASSERT(record->flags & HAMMER_RECF_INTERLOCK_BE);
959 KKASSERT(record->leaf.base.localization != 0);
962 * If this is a bulk-data record placemarker there may be an existing
963 * record on-disk, indicating a data overwrite. If there is the
964 * on-disk record must be deleted before we can insert our new record.
966 * We've synthesized this record and do not know what the create_tid
967 * on-disk is, nor how much data it represents.
969 * Keep in mind that (key) for data records is (base_offset + len),
970 * not (base_offset). Also, we only want to get rid of on-disk
971 * records since we are trying to sync our in-memory record, call
972 * hammer_ip_delete_range() with truncating set to 1 to make sure
973 * it skips in-memory records.
975 * It is ok for the lookup to return ENOENT.
977 * NOTE OPTIMIZATION: sync_trunc_off is used to determine if we have
978 * to call hammer_ip_delete_range() or not. This also means we must
979 * update sync_trunc_off() as we write.
981 if (record->type == HAMMER_MEM_RECORD_DATA &&
982 hammer_record_needs_overwrite_delete(record)) {
983 file_offset = record->leaf.base.key - record->leaf.data_len;
984 bytes = (record->leaf.data_len + HAMMER_BUFMASK) &
986 KKASSERT((file_offset & HAMMER_BUFMASK) == 0);
987 error = hammer_ip_delete_range(
989 file_offset, file_offset + bytes - 1,
991 if (error && error != ENOENT)
998 hammer_normalize_cursor(cursor);
999 cursor->key_beg = record->leaf.base;
1000 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1001 cursor->flags |= HAMMER_CURSOR_BACKEND;
1002 cursor->flags &= ~HAMMER_CURSOR_INSERT;
1005 * Records can wind up on-media before the inode itself is on-media.
1008 record->ip->flags |= HAMMER_INODE_DONDISK;
1011 * If we are deleting a directory entry an exact match must be
1014 if (record->type == HAMMER_MEM_RECORD_DEL) {
1015 error = hammer_btree_lookup(cursor);
1017 error = hammer_ip_delete_record(cursor, record->ip,
1020 record->flags |= HAMMER_RECF_DELETED_FE;
1021 record->flags |= HAMMER_RECF_DELETED_BE;
1030 * Issue a lookup to position the cursor and locate the cluster. The
1031 * target key should not exist. If we are creating a directory entry
1032 * we may have to iterate the low 32 bits of the key to find an unused
1035 cursor->flags |= HAMMER_CURSOR_INSERT;
1038 error = hammer_btree_lookup(cursor);
1039 if (hammer_debug_inode)
1040 kprintf("DOINSERT LOOKUP %d\n", error);
1043 if (record->leaf.base.rec_type != HAMMER_RECTYPE_DIRENTRY) {
1044 kprintf("hammer_ip_sync_record: duplicate rec "
1045 "at (%016llx)\n", record->leaf.base.key);
1046 Debugger("duplicate record1");
1050 if (++trans->hmp->namekey_iterator == 0)
1051 ++trans->hmp->namekey_iterator;
1052 record->leaf.base.key &= ~(0xFFFFFFFFLL);
1053 record->leaf.base.key |= trans->hmp->namekey_iterator;
1054 cursor->key_beg.key = record->leaf.base.key;
1057 if (record->type == HAMMER_MEM_RECORD_DATA)
1058 kprintf("sync_record %016llx ---------------- %016llx %d\n",
1059 record->leaf.base.key - record->leaf.data_len,
1060 record->leaf.data_offset, error);
1064 if (error != ENOENT)
1068 * Allocate the record and data. The result buffers will be
1069 * marked as being modified and further calls to
1070 * hammer_modify_buffer() will result in unneeded UNDO records.
1072 * Support zero-fill records (data == NULL and data_len != 0)
1074 if (record->type == HAMMER_MEM_RECORD_DATA) {
1076 * The data portion of a bulk-data record has already been
1077 * committed to disk, we need only adjust the layer2
1078 * statistics in the same transaction as our B-Tree insert.
1080 KKASSERT(record->leaf.data_offset != 0);
1081 hammer_blockmap_finalize(trans, record->leaf.data_offset,
1082 record->leaf.data_len);
1084 } else if (record->data && record->leaf.data_len) {
1086 * Wholely cached record, with data. Allocate the data.
1088 bdata = hammer_alloc_data(trans, record->leaf.data_len,
1089 record->leaf.base.rec_type,
1090 &record->leaf.data_offset,
1091 &cursor->data_buffer, &error);
1094 hammer_crc_set_leaf(record->data, &record->leaf);
1095 hammer_modify_buffer(trans, cursor->data_buffer, NULL, 0);
1096 bcopy(record->data, bdata, record->leaf.data_len);
1097 hammer_modify_buffer_done(cursor->data_buffer);
1100 * Wholely cached record, without data.
1102 record->leaf.data_offset = 0;
1103 record->leaf.data_crc = 0;
1106 error = hammer_btree_insert(cursor, &record->leaf);
1107 if (hammer_debug_inode && error)
1108 kprintf("BTREE INSERT error %d @ %016llx:%d key %016llx\n", error, cursor->node->node_offset, cursor->index, record->leaf.base.key);
1111 * Our record is on-disk, normally mark the in-memory version as
1112 * deleted. If the record represented a directory deletion but
1113 * we had to sync a valid directory entry to disk we must convert
1114 * the record to a covering delete so the frontend does not have
1115 * visibility on the synced entry.
1118 if (record->flags & HAMMER_RECF_CONVERT_DELETE) {
1119 KKASSERT(record->type == HAMMER_MEM_RECORD_ADD);
1120 record->flags &= ~HAMMER_RECF_DELETED_FE;
1121 record->type = HAMMER_MEM_RECORD_DEL;
1122 KKASSERT(record->flush_state == HAMMER_FST_FLUSH);
1123 record->flags &= ~HAMMER_RECF_CONVERT_DELETE;
1124 /* hammer_flush_record_done takes care of the rest */
1126 record->flags |= HAMMER_RECF_DELETED_FE;
1127 record->flags |= HAMMER_RECF_DELETED_BE;
1130 if (record->leaf.data_offset) {
1131 hammer_blockmap_free(trans, record->leaf.data_offset,
1132 record->leaf.data_len);
1141 * Add the record to the inode's rec_tree. The low 32 bits of a directory
1142 * entry's key is used to deal with hash collisions in the upper 32 bits.
1143 * A unique 64 bit key is generated in-memory and may be regenerated a
1144 * second time when the directory record is flushed to the on-disk B-Tree.
1146 * A referenced record is passed to this function. This function
1147 * eats the reference. If an error occurs the record will be deleted.
1149 * A copy of the temporary record->data pointer provided by the caller
1154 hammer_mem_add(hammer_record_t record)
1156 hammer_mount_t hmp = record->ip->hmp;
1159 * Make a private copy of record->data
1162 KKASSERT(record->flags & HAMMER_RECF_ALLOCDATA);
1165 * Insert into the RB tree, find an unused iterator if this is
1166 * a directory entry.
1168 while (RB_INSERT(hammer_rec_rb_tree, &record->ip->rec_tree, record)) {
1169 if (record->leaf.base.rec_type != HAMMER_RECTYPE_DIRENTRY){
1170 record->flags |= HAMMER_RECF_DELETED_FE;
1171 hammer_rel_mem_record(record);
1174 if (++hmp->namekey_iterator == 0)
1175 ++hmp->namekey_iterator;
1176 record->leaf.base.key &= ~(0xFFFFFFFFLL);
1177 record->leaf.base.key |= hmp->namekey_iterator;
1179 ++hmp->count_newrecords;
1181 ++record->ip->rsv_recs;
1182 record->ip->hmp->rsv_databytes += record->leaf.data_len;
1183 record->flags |= HAMMER_RECF_ONRBTREE;
1184 hammer_modify_inode(record->ip, HAMMER_INODE_XDIRTY);
1185 hammer_rel_mem_record(record);
1189 /************************************************************************
1190 * HAMMER INODE MERGED-RECORD FUNCTIONS *
1191 ************************************************************************
1193 * These functions augment the B-Tree scanning functions in hammer_btree.c
1194 * by merging in-memory records with on-disk records.
1198 * Locate a particular record either in-memory or on-disk.
1200 * NOTE: This is basically a standalone routine, hammer_ip_next() may
1201 * NOT be called to iterate results.
1204 hammer_ip_lookup(hammer_cursor_t cursor)
1209 * If the element is in-memory return it without searching the
1212 KKASSERT(cursor->ip);
1213 error = hammer_mem_lookup(cursor);
1215 cursor->leaf = &cursor->iprec->leaf;
1218 if (error != ENOENT)
1222 * If the inode has on-disk components search the on-disk B-Tree.
1224 if ((cursor->ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) == 0)
1226 error = hammer_btree_lookup(cursor);
1228 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1233 * Locate the first record within the cursor's key_beg/key_end range,
1234 * restricted to a particular inode. 0 is returned on success, ENOENT
1235 * if no records matched the requested range, or some other error.
1237 * When 0 is returned hammer_ip_next() may be used to iterate additional
1238 * records within the requested range.
1240 * This function can return EDEADLK, requiring the caller to terminate
1241 * the cursor and try again.
1244 hammer_ip_first(hammer_cursor_t cursor)
1246 hammer_inode_t ip = cursor->ip;
1249 KKASSERT(ip != NULL);
1252 * Clean up fields and setup for merged scan
1254 cursor->flags &= ~HAMMER_CURSOR_DELBTREE;
1255 cursor->flags |= HAMMER_CURSOR_ATEDISK | HAMMER_CURSOR_ATEMEM;
1256 cursor->flags |= HAMMER_CURSOR_DISKEOF | HAMMER_CURSOR_MEMEOF;
1257 if (cursor->iprec) {
1258 hammer_rel_mem_record(cursor->iprec);
1259 cursor->iprec = NULL;
1263 * Search the on-disk B-Tree. hammer_btree_lookup() only does an
1264 * exact lookup so if we get ENOENT we have to call the iterate
1265 * function to validate the first record after the begin key.
1267 * The ATEDISK flag is used by hammer_btree_iterate to determine
1268 * whether it must index forwards or not. It is also used here
1269 * to select the next record from in-memory or on-disk.
1271 * EDEADLK can only occur if the lookup hit an empty internal
1272 * element and couldn't delete it. Since this could only occur
1273 * in-range, we can just iterate from the failure point.
1275 if (ip->flags & (HAMMER_INODE_ONDISK|HAMMER_INODE_DONDISK)) {
1276 error = hammer_btree_lookup(cursor);
1277 if (error == ENOENT || error == EDEADLK) {
1278 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1279 if (hammer_debug_general & 0x2000)
1280 kprintf("error %d node %p %016llx index %d\n", error, cursor->node, cursor->node->node_offset, cursor->index);
1281 error = hammer_btree_iterate(cursor);
1283 if (error && error != ENOENT)
1286 cursor->flags &= ~HAMMER_CURSOR_DISKEOF;
1287 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1289 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1294 * Search the in-memory record list (Red-Black tree). Unlike the
1295 * B-Tree search, mem_first checks for records in the range.
1297 error = hammer_mem_first(cursor);
1298 if (error && error != ENOENT)
1301 cursor->flags &= ~HAMMER_CURSOR_MEMEOF;
1302 cursor->flags &= ~HAMMER_CURSOR_ATEMEM;
1303 if (hammer_ip_iterate_mem_good(cursor, cursor->iprec) == 0)
1304 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1308 * This will return the first matching record.
1310 return(hammer_ip_next(cursor));
1314 * Retrieve the next record in a merged iteration within the bounds of the
1315 * cursor. This call may be made multiple times after the cursor has been
1316 * initially searched with hammer_ip_first().
1318 * 0 is returned on success, ENOENT if no further records match the
1319 * requested range, or some other error code is returned.
1322 hammer_ip_next(hammer_cursor_t cursor)
1324 hammer_btree_elm_t elm;
1325 hammer_record_t rec, save;
1331 * Load the current on-disk and in-memory record. If we ate any
1332 * records we have to get the next one.
1334 * If we deleted the last on-disk record we had scanned ATEDISK will
1335 * be clear and DELBTREE will be set, forcing a call to iterate. The
1336 * fact that ATEDISK is clear causes iterate to re-test the 'current'
1337 * element. If ATEDISK is set, iterate will skip the 'current'
1340 * Get the next on-disk record
1342 if (cursor->flags & (HAMMER_CURSOR_ATEDISK|HAMMER_CURSOR_DELBTREE)) {
1343 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
1344 error = hammer_btree_iterate(cursor);
1345 cursor->flags &= ~HAMMER_CURSOR_DELBTREE;
1347 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1348 hammer_cache_node(&cursor->ip->cache[1],
1351 cursor->flags |= HAMMER_CURSOR_DISKEOF |
1352 HAMMER_CURSOR_ATEDISK;
1359 * Get the next in-memory record. The record can be ripped out
1360 * of the RB tree so we maintain a scan_info structure to track
1363 * hammer_rec_scan_cmp: Is the record still in our general range,
1364 * (non-inclusive of snapshot exclusions)?
1365 * hammer_rec_scan_callback: Is the record in our snapshot?
1367 if (cursor->flags & HAMMER_CURSOR_ATEMEM) {
1368 if ((cursor->flags & HAMMER_CURSOR_MEMEOF) == 0) {
1369 save = cursor->iprec;
1370 cursor->iprec = NULL;
1371 rec = save ? hammer_rec_rb_tree_RB_NEXT(save) : NULL;
1373 if (hammer_rec_scan_cmp(rec, cursor) != 0)
1375 if (hammer_rec_scan_callback(rec, cursor) != 0)
1377 rec = hammer_rec_rb_tree_RB_NEXT(rec);
1380 hammer_rel_mem_record(save);
1381 if (cursor->iprec) {
1382 KKASSERT(cursor->iprec == rec);
1383 cursor->flags &= ~HAMMER_CURSOR_ATEMEM;
1385 cursor->flags |= HAMMER_CURSOR_MEMEOF;
1391 * The memory record may have become stale while being held in
1392 * cursor->iprec. We are interlocked against the backend on
1393 * with regards to B-Tree entries.
1395 if ((cursor->flags & HAMMER_CURSOR_ATEMEM) == 0) {
1396 if (hammer_ip_iterate_mem_good(cursor, cursor->iprec) == 0) {
1397 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1403 * Extract either the disk or memory record depending on their
1404 * relative position.
1407 switch(cursor->flags & (HAMMER_CURSOR_ATEDISK | HAMMER_CURSOR_ATEMEM)) {
1410 * Both entries valid. Compare the entries and nominally
1411 * return the first one in the sort order. Numerous cases
1412 * require special attention, however.
1414 elm = &cursor->node->ondisk->elms[cursor->index];
1415 r = hammer_btree_cmp(&elm->base, &cursor->iprec->leaf.base);
1418 * If the two entries differ only by their key (-2/2) or
1419 * create_tid (-1/1), and are DATA records, we may have a
1420 * nominal match. We have to calculate the base file
1421 * offset of the data.
1423 if (r <= 2 && r >= -2 && r != 0 &&
1424 cursor->ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE &&
1425 cursor->iprec->type == HAMMER_MEM_RECORD_DATA) {
1426 int64_t base1 = elm->leaf.base.key - elm->leaf.data_len;
1427 int64_t base2 = cursor->iprec->leaf.base.key -
1428 cursor->iprec->leaf.data_len;
1434 error = hammer_btree_extract(cursor,
1435 HAMMER_CURSOR_GET_LEAF);
1436 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1441 * If the entries match exactly the memory entry is either
1442 * an on-disk directory entry deletion or a bulk data
1443 * overwrite. If it is a directory entry deletion we eat
1446 * For the bulk-data overwrite case it is possible to have
1447 * visibility into both, which simply means the syncer
1448 * hasn't gotten around to doing the delete+insert sequence
1449 * on the B-Tree. Use the memory entry and throw away the
1452 * If the in-memory record is not either of these we
1453 * probably caught the syncer while it was syncing it to
1454 * the media. Since we hold a shared lock on the cursor,
1455 * the in-memory record had better be marked deleted at
1459 if (cursor->iprec->type == HAMMER_MEM_RECORD_DEL) {
1460 if ((cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1461 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1462 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1465 } else if (cursor->iprec->type == HAMMER_MEM_RECORD_DATA) {
1466 if ((cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1467 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1469 /* fall through to memory entry */
1471 panic("hammer_ip_next: duplicate mem/b-tree entry");
1472 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1476 /* fall through to the memory entry */
1477 case HAMMER_CURSOR_ATEDISK:
1479 * Only the memory entry is valid.
1481 cursor->leaf = &cursor->iprec->leaf;
1482 cursor->flags |= HAMMER_CURSOR_ATEMEM;
1485 * If the memory entry is an on-disk deletion we should have
1486 * also had found a B-Tree record. If the backend beat us
1487 * to it it would have interlocked the cursor and we should
1488 * have seen the in-memory record marked DELETED_FE.
1490 if (cursor->iprec->type == HAMMER_MEM_RECORD_DEL &&
1491 (cursor->flags & HAMMER_CURSOR_DELETE_VISIBILITY) == 0) {
1492 panic("hammer_ip_next: del-on-disk with no b-tree entry");
1495 case HAMMER_CURSOR_ATEMEM:
1497 * Only the disk entry is valid
1499 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1500 cursor->flags |= HAMMER_CURSOR_ATEDISK;
1504 * Neither entry is valid
1506 * XXX error not set properly
1508 cursor->leaf = NULL;
1516 * Resolve the cursor->data pointer for the current cursor position in
1517 * a merged iteration.
1520 hammer_ip_resolve_data(hammer_cursor_t cursor)
1522 hammer_record_t record;
1525 if (hammer_cursor_inmem(cursor)) {
1527 * The data associated with an in-memory record is usually
1528 * kmalloced, but reserve-ahead data records will have an
1529 * on-disk reference.
1531 * NOTE: Reserve-ahead data records must be handled in the
1532 * context of the related high level buffer cache buffer
1533 * to interlock against async writes.
1535 record = cursor->iprec;
1536 cursor->data = record->data;
1538 if (cursor->data == NULL) {
1539 KKASSERT(record->leaf.base.rec_type ==
1540 HAMMER_RECTYPE_DATA);
1541 cursor->data = hammer_bread_ext(cursor->trans->hmp,
1542 record->leaf.data_offset,
1543 record->leaf.data_len,
1545 &cursor->data_buffer);
1548 cursor->leaf = &cursor->node->ondisk->elms[cursor->index].leaf;
1549 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA);
1555 * Backend truncation / record replacement - delete records in range.
1557 * Delete all records within the specified range for inode ip. In-memory
1558 * records still associated with the frontend are ignored.
1560 * NOTE: An unaligned range will cause new records to be added to cover
1561 * the edge cases. (XXX not implemented yet).
1563 * NOTE: Replacement via reservations (see hammer_ip_sync_record_cursor())
1564 * also do not deal with unaligned ranges.
1566 * NOTE: ran_end is inclusive (e.g. 0,1023 instead of 0,1024).
1568 * NOTE: Record keys for regular file data have to be special-cased since
1569 * they indicate the end of the range (key = base + bytes).
1572 hammer_ip_delete_range(hammer_cursor_t cursor, hammer_inode_t ip,
1573 int64_t ran_beg, int64_t ran_end, int truncating)
1575 hammer_transaction_t trans = cursor->trans;
1576 hammer_btree_leaf_elm_t leaf;
1581 kprintf("delete_range %p %016llx-%016llx\n", ip, ran_beg, ran_end);
1584 KKASSERT(trans->type == HAMMER_TRANS_FLS);
1586 hammer_normalize_cursor(cursor);
1587 cursor->key_beg.localization = HAMMER_LOCALIZE_MISC;
1588 cursor->key_beg.obj_id = ip->obj_id;
1589 cursor->key_beg.create_tid = 0;
1590 cursor->key_beg.delete_tid = 0;
1591 cursor->key_beg.obj_type = 0;
1592 cursor->asof = ip->obj_asof;
1593 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1594 cursor->flags |= HAMMER_CURSOR_ASOF;
1595 cursor->flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
1596 cursor->flags |= HAMMER_CURSOR_BACKEND;
1598 cursor->key_end = cursor->key_beg;
1599 if (ip->ino_data.obj_type == HAMMER_OBJTYPE_DBFILE) {
1600 cursor->key_beg.key = ran_beg;
1601 cursor->key_beg.rec_type = HAMMER_RECTYPE_DB;
1602 cursor->key_end.rec_type = HAMMER_RECTYPE_DB;
1603 cursor->key_end.key = ran_end;
1606 * The key in the B-Tree is (base+bytes), so the first possible
1607 * matching key is ran_beg + 1.
1611 cursor->key_beg.key = ran_beg + 1;
1612 cursor->key_beg.rec_type = HAMMER_RECTYPE_DATA;
1613 cursor->key_end.rec_type = HAMMER_RECTYPE_DATA;
1615 tmp64 = ran_end + MAXPHYS + 1; /* work around GCC-4 bug */
1616 if (tmp64 < ran_end)
1617 cursor->key_end.key = 0x7FFFFFFFFFFFFFFFLL;
1619 cursor->key_end.key = ran_end + MAXPHYS + 1;
1621 cursor->flags |= HAMMER_CURSOR_END_INCLUSIVE;
1623 error = hammer_ip_first(cursor);
1626 * Iterate through matching records and mark them as deleted.
1628 while (error == 0) {
1629 leaf = cursor->leaf;
1631 KKASSERT(leaf->base.delete_tid == 0);
1634 * There may be overlap cases for regular file data. Also
1635 * remember the key for a regular file record is (base + len),
1638 if (leaf->base.rec_type == HAMMER_RECTYPE_DATA) {
1639 off = leaf->base.key - leaf->data_len;
1641 * Check the left edge case. We currently do not
1642 * split existing records.
1644 if (off < ran_beg) {
1645 panic("hammer left edge case %016llx %d\n",
1646 leaf->base.key, leaf->data_len);
1650 * Check the right edge case. Note that the
1651 * record can be completely out of bounds, which
1652 * terminates the search.
1654 * base->key is exclusive of the right edge while
1655 * ran_end is inclusive of the right edge. The
1656 * (key - data_len) left boundary is inclusive.
1658 * XXX theory-check this test at some point, are
1659 * we missing a + 1 somewhere? Note that ran_end
1662 if (leaf->base.key - 1 > ran_end) {
1663 if (leaf->base.key - leaf->data_len > ran_end)
1665 panic("hammer right edge case\n");
1670 * Delete the record. When truncating we do not delete
1671 * in-memory (data) records because they represent data
1672 * written after the truncation.
1674 * This will also physically destroy the B-Tree entry and
1675 * data if the retention policy dictates. The function
1676 * will set HAMMER_CURSOR_DELBTREE which hammer_ip_next()
1677 * uses to perform a fixup.
1679 if (truncating == 0 || hammer_cursor_ondisk(cursor))
1680 error = hammer_ip_delete_record(cursor, ip, trans->tid);
1683 error = hammer_ip_next(cursor);
1686 hammer_cache_node(&ip->cache[1], cursor->node);
1688 if (error == EDEADLK) {
1689 hammer_done_cursor(cursor);
1690 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
1694 if (error == ENOENT)
1700 * Backend truncation - delete all records.
1702 * Delete all user records associated with an inode except the inode record
1703 * itself. Directory entries are not deleted (they must be properly disposed
1704 * of or nlinks would get upset).
1707 hammer_ip_delete_range_all(hammer_cursor_t cursor, hammer_inode_t ip,
1710 hammer_transaction_t trans = cursor->trans;
1711 hammer_btree_leaf_elm_t leaf;
1714 KKASSERT(trans->type == HAMMER_TRANS_FLS);
1716 hammer_normalize_cursor(cursor);
1717 cursor->key_beg.localization = HAMMER_LOCALIZE_MISC;
1718 cursor->key_beg.obj_id = ip->obj_id;
1719 cursor->key_beg.create_tid = 0;
1720 cursor->key_beg.delete_tid = 0;
1721 cursor->key_beg.obj_type = 0;
1722 cursor->key_beg.rec_type = HAMMER_RECTYPE_INODE + 1;
1723 cursor->key_beg.key = HAMMER_MIN_KEY;
1725 cursor->key_end = cursor->key_beg;
1726 cursor->key_end.rec_type = 0xFFFF;
1727 cursor->key_end.key = HAMMER_MAX_KEY;
1729 cursor->asof = ip->obj_asof;
1730 cursor->flags &= ~HAMMER_CURSOR_INITMASK;
1731 cursor->flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1732 cursor->flags |= HAMMER_CURSOR_DELETE_VISIBILITY;
1733 cursor->flags |= HAMMER_CURSOR_BACKEND;
1735 error = hammer_ip_first(cursor);
1738 * Iterate through matching records and mark them as deleted.
1740 while (error == 0) {
1741 leaf = cursor->leaf;
1743 KKASSERT(leaf->base.delete_tid == 0);
1746 * Mark the record and B-Tree entry as deleted. This will
1747 * also physically delete the B-Tree entry, record, and
1748 * data if the retention policy dictates. The function
1749 * will set HAMMER_CURSOR_DELBTREE which hammer_ip_next()
1750 * uses to perform a fixup.
1752 * Directory entries (and delete-on-disk directory entries)
1753 * must be synced and cannot be deleted.
1755 if (leaf->base.rec_type != HAMMER_RECTYPE_DIRENTRY) {
1756 error = hammer_ip_delete_record(cursor, ip, trans->tid);
1761 error = hammer_ip_next(cursor);
1764 hammer_cache_node(&ip->cache[1], cursor->node);
1765 if (error == EDEADLK) {
1766 hammer_done_cursor(cursor);
1767 error = hammer_init_cursor(trans, cursor, &ip->cache[1], ip);
1771 if (error == ENOENT)
1777 * Delete the record at the current cursor. On success the cursor will
1778 * be positioned appropriately for an iteration but may no longer be at
1781 * This routine is only called from the backend.
1783 * NOTE: This can return EDEADLK, requiring the caller to terminate the
1787 hammer_ip_delete_record(hammer_cursor_t cursor, hammer_inode_t ip,
1790 hammer_off_t zone2_offset;
1791 hammer_record_t iprec;
1792 hammer_btree_elm_t elm;
1797 KKASSERT(cursor->flags & HAMMER_CURSOR_BACKEND);
1799 hmp = cursor->node->hmp;
1802 * In-memory (unsynchronized) records can simply be freed. This
1803 * only occurs in range iterations since all other records are
1804 * individually synchronized. Thus there should be no confusion with
1807 * An in-memory record may be deleted before being committed to disk,
1808 * but could have been accessed in the mean time. The backing store
1809 * may never been marked allocated and so hammer_blockmap_free() may
1810 * never get called on it. Because of this we have to make sure that
1811 * we've gotten rid of any related hammer_buffer or buffer cache
1814 if (hammer_cursor_inmem(cursor)) {
1815 iprec = cursor->iprec;
1816 KKASSERT((iprec->flags & HAMMER_RECF_INTERLOCK_BE) ==0);
1817 iprec->flags |= HAMMER_RECF_DELETED_FE;
1818 iprec->flags |= HAMMER_RECF_DELETED_BE;
1820 if (iprec->leaf.data_offset && iprec->leaf.data_len) {
1821 zone2_offset = hammer_blockmap_lookup(hmp, iprec->leaf.data_offset, &error);
1822 KKASSERT(error == 0);
1823 hammer_del_buffers(hmp,
1824 iprec->leaf.data_offset,
1826 iprec->leaf.data_len);
1832 * On-disk records are marked as deleted by updating their delete_tid.
1833 * This does not effect their position in the B-Tree (which is based
1834 * on their create_tid).
1836 error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_LEAF);
1840 * If we were mounted with the nohistory option, we physically
1841 * delete the record.
1843 dodelete = hammer_nohistory(ip);
1846 error = hammer_cursor_upgrade(cursor);
1848 elm = &cursor->node->ondisk->elms[cursor->index];
1849 hammer_modify_node(cursor->trans, cursor->node,
1850 &elm->leaf.base.delete_tid,
1851 sizeof(elm->leaf.base.delete_tid));
1852 elm->leaf.base.delete_tid = tid;
1853 hammer_modify_node_done(cursor->node);
1856 * An on-disk record cannot have the same delete_tid
1857 * as its create_tid. In a chain of record updates
1858 * this could result in a duplicate record.
1860 KKASSERT(elm->leaf.base.delete_tid != elm->leaf.base.create_tid);
1864 if (error == 0 && dodelete) {
1865 error = hammer_delete_at_cursor(cursor, NULL);
1867 panic("hammer_ip_delete_record: unable to physically delete the record!\n");
1875 hammer_delete_at_cursor(hammer_cursor_t cursor, int64_t *stat_bytes)
1877 hammer_btree_elm_t elm;
1878 hammer_off_t data_offset;
1883 elm = &cursor->node->ondisk->elms[cursor->index];
1884 KKASSERT(elm->base.btype == HAMMER_BTREE_TYPE_RECORD);
1886 data_offset = elm->leaf.data_offset;
1887 data_len = elm->leaf.data_len;
1888 rec_type = elm->leaf.base.rec_type;
1890 error = hammer_btree_delete(cursor);
1893 * This forces a fixup for the iteration because
1894 * the cursor is now either sitting at the 'next'
1895 * element or sitting at the end of a leaf.
1897 if ((cursor->flags & HAMMER_CURSOR_DISKEOF) == 0) {
1898 cursor->flags |= HAMMER_CURSOR_DELBTREE;
1899 cursor->flags &= ~HAMMER_CURSOR_ATEDISK;
1903 switch(data_offset & HAMMER_OFF_ZONE_MASK) {
1904 case HAMMER_ZONE_LARGE_DATA:
1905 case HAMMER_ZONE_SMALL_DATA:
1906 case HAMMER_ZONE_META:
1907 hammer_blockmap_free(cursor->trans,
1908 data_offset, data_len);
1918 * Determine whether we can remove a directory. This routine checks whether
1919 * a directory is empty or not and enforces flush connectivity.
1921 * Flush connectivity requires that we block if the target directory is
1922 * currently flushing, otherwise it may not end up in the same flush group.
1924 * Returns 0 on success, ENOTEMPTY or EDEADLK (or other errors) on failure.
1927 hammer_ip_check_directory_empty(hammer_transaction_t trans, hammer_inode_t ip)
1929 struct hammer_cursor cursor;
1933 * Check directory empty
1935 hammer_init_cursor(trans, &cursor, &ip->cache[1], ip);
1937 cursor.key_beg.localization = HAMMER_LOCALIZE_MISC;
1938 cursor.key_beg.obj_id = ip->obj_id;
1939 cursor.key_beg.create_tid = 0;
1940 cursor.key_beg.delete_tid = 0;
1941 cursor.key_beg.obj_type = 0;
1942 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE + 1;
1943 cursor.key_beg.key = HAMMER_MIN_KEY;
1945 cursor.key_end = cursor.key_beg;
1946 cursor.key_end.rec_type = 0xFFFF;
1947 cursor.key_end.key = HAMMER_MAX_KEY;
1949 cursor.asof = ip->obj_asof;
1950 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE | HAMMER_CURSOR_ASOF;
1952 error = hammer_ip_first(&cursor);
1953 if (error == ENOENT)
1955 else if (error == 0)
1957 hammer_done_cursor(&cursor);