2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_ioctl.c,v 1.32 2008/11/13 02:23:29 dillon Exp $
39 static int hammer_ioc_gethistory(hammer_transaction_t trans, hammer_inode_t ip,
40 struct hammer_ioc_history *hist);
41 static int hammer_ioc_synctid(hammer_transaction_t trans, hammer_inode_t ip,
42 struct hammer_ioc_synctid *std);
43 static int hammer_ioc_get_version(hammer_transaction_t trans,
45 struct hammer_ioc_version *ver);
46 static int hammer_ioc_set_version(hammer_transaction_t trans,
48 struct hammer_ioc_version *ver);
49 static int hammer_ioc_get_info(hammer_transaction_t trans,
50 struct hammer_ioc_info *info);
51 static int hammer_ioc_add_snapshot(hammer_transaction_t trans, hammer_inode_t ip,
52 struct hammer_ioc_snapshot *snap);
53 static int hammer_ioc_del_snapshot(hammer_transaction_t trans, hammer_inode_t ip,
54 struct hammer_ioc_snapshot *snap);
55 static int hammer_ioc_get_snapshot(hammer_transaction_t trans, hammer_inode_t ip,
56 struct hammer_ioc_snapshot *snap);
57 static int hammer_ioc_get_config(hammer_transaction_t trans, hammer_inode_t ip,
58 struct hammer_ioc_config *snap);
59 static int hammer_ioc_set_config(hammer_transaction_t trans, hammer_inode_t ip,
60 struct hammer_ioc_config *snap);
61 static int hammer_ioc_get_data(hammer_transaction_t trans, hammer_inode_t ip,
62 struct hammer_ioc_data *data);
65 hammer_ioctl(hammer_inode_t ip, u_long com, caddr_t data, int fflag,
68 struct hammer_transaction trans;
71 error = priv_check_cred(cred, PRIV_HAMMER_IOCTL, 0);
73 hammer_start_transaction(&trans, ip->hmp);
78 error = hammer_ioc_prune(&trans, ip,
79 (struct hammer_ioc_prune *)data);
82 case HAMMERIOC_GETHISTORY:
83 error = hammer_ioc_gethistory(&trans, ip,
84 (struct hammer_ioc_history *)data);
86 case HAMMERIOC_REBLOCK:
88 error = hammer_ioc_reblock(&trans, ip,
89 (struct hammer_ioc_reblock *)data);
92 case HAMMERIOC_REBALANCE:
94 * Rebalancing needs to lock a lot of B-Tree nodes. The
95 * children and children's children. Systems with very
96 * little memory will not be able to do it.
98 if (error == 0 && nbuf < HAMMER_REBALANCE_MIN_BUFS) {
99 hkprintf("System has insufficient buffers "
100 "to rebalance the tree. nbuf < %d\n",
101 HAMMER_REBALANCE_MIN_BUFS);
105 error = hammer_ioc_rebalance(&trans, ip,
106 (struct hammer_ioc_rebalance *)data);
109 case HAMMERIOC_SYNCTID:
110 error = hammer_ioc_synctid(&trans, ip,
111 (struct hammer_ioc_synctid *)data);
113 case HAMMERIOC_GET_PSEUDOFS:
114 error = hammer_ioc_get_pseudofs(&trans, ip,
115 (struct hammer_ioc_pseudofs_rw *)data);
117 case HAMMERIOC_SET_PSEUDOFS:
119 error = hammer_ioc_set_pseudofs(&trans, ip, cred,
120 (struct hammer_ioc_pseudofs_rw *)data);
123 case HAMMERIOC_UPG_PSEUDOFS:
125 error = hammer_ioc_upgrade_pseudofs(&trans, ip,
126 (struct hammer_ioc_pseudofs_rw *)data);
129 case HAMMERIOC_DGD_PSEUDOFS:
131 error = hammer_ioc_downgrade_pseudofs(&trans, ip,
132 (struct hammer_ioc_pseudofs_rw *)data);
135 case HAMMERIOC_RMR_PSEUDOFS:
137 error = hammer_ioc_destroy_pseudofs(&trans, ip,
138 (struct hammer_ioc_pseudofs_rw *)data);
141 case HAMMERIOC_WAI_PSEUDOFS:
143 error = hammer_ioc_wait_pseudofs(&trans, ip,
144 (struct hammer_ioc_pseudofs_rw *)data);
147 case HAMMERIOC_MIRROR_READ:
149 error = hammer_ioc_mirror_read(&trans, ip,
150 (struct hammer_ioc_mirror_rw *)data);
153 case HAMMERIOC_MIRROR_WRITE:
155 error = hammer_ioc_mirror_write(&trans, ip,
156 (struct hammer_ioc_mirror_rw *)data);
159 case HAMMERIOC_GET_VERSION:
160 error = hammer_ioc_get_version(&trans, ip,
161 (struct hammer_ioc_version *)data);
163 case HAMMERIOC_GET_INFO:
164 error = hammer_ioc_get_info(&trans,
165 (struct hammer_ioc_info *)data);
167 case HAMMERIOC_SET_VERSION:
169 error = hammer_ioc_set_version(&trans, ip,
170 (struct hammer_ioc_version *)data);
173 case HAMMERIOC_ADD_VOLUME:
175 error = priv_check_cred(cred, PRIV_HAMMER_VOLUME, 0);
177 error = hammer_ioc_volume_add(&trans, ip,
178 (struct hammer_ioc_volume *)data);
181 case HAMMERIOC_DEL_VOLUME:
183 error = priv_check_cred(cred, PRIV_HAMMER_VOLUME, 0);
185 error = hammer_ioc_volume_del(&trans, ip,
186 (struct hammer_ioc_volume *)data);
189 case HAMMERIOC_LIST_VOLUMES:
190 error = hammer_ioc_volume_list(&trans, ip,
191 (struct hammer_ioc_volume_list *)data);
193 case HAMMERIOC_ADD_SNAPSHOT:
195 error = hammer_ioc_add_snapshot(
196 &trans, ip, (struct hammer_ioc_snapshot *)data);
199 case HAMMERIOC_DEL_SNAPSHOT:
201 error = hammer_ioc_del_snapshot(
202 &trans, ip, (struct hammer_ioc_snapshot *)data);
205 case HAMMERIOC_GET_SNAPSHOT:
206 error = hammer_ioc_get_snapshot(
207 &trans, ip, (struct hammer_ioc_snapshot *)data);
209 case HAMMERIOC_GET_CONFIG:
210 error = hammer_ioc_get_config(
211 &trans, ip, (struct hammer_ioc_config *)data);
213 case HAMMERIOC_SET_CONFIG:
215 error = hammer_ioc_set_config(
216 &trans, ip, (struct hammer_ioc_config *)data);
219 case HAMMERIOC_DEDUP:
221 error = hammer_ioc_dedup(
222 &trans, ip, (struct hammer_ioc_dedup *)data);
225 case HAMMERIOC_GET_DATA:
227 error = hammer_ioc_get_data(
228 &trans, ip, (struct hammer_ioc_data *)data);
231 case HAMMERIOC_SCAN_PSEUDOFS:
232 error = hammer_ioc_scan_pseudofs(
233 &trans, ip, (struct hammer_ioc_pseudofs_rw *)data);
239 hammer_done_transaction(&trans);
244 * Iterate through an object's inode or an object's records and record
247 static void add_history(hammer_inode_t ip, struct hammer_ioc_history *hist,
248 hammer_btree_elm_t elm);
252 hammer_ioc_gethistory(hammer_transaction_t trans, hammer_inode_t ip,
253 struct hammer_ioc_history *hist)
255 struct hammer_cursor cursor;
256 hammer_btree_elm_t elm;
260 * Validate the structure and initialize for return.
262 if (hist->beg_tid > hist->end_tid)
264 if (hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) {
265 if (hist->key > hist->nxt_key)
269 hist->obj_id = ip->obj_id;
271 hist->nxt_tid = hist->end_tid;
272 hist->head.flags &= ~HAMMER_IOC_HISTORY_NEXT_TID;
273 hist->head.flags &= ~HAMMER_IOC_HISTORY_NEXT_KEY;
274 hist->head.flags &= ~HAMMER_IOC_HISTORY_EOF;
275 hist->head.flags &= ~HAMMER_IOC_HISTORY_UNSYNCED;
276 if ((ip->flags & HAMMER_INODE_MODMASK) &
277 ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) {
278 hist->head.flags |= HAMMER_IOC_HISTORY_UNSYNCED;
282 * Setup the cursor. We can't handle undeletable records
283 * (create_tid of 0) at the moment. A create_tid of 0 has
284 * a special meaning and cannot be specified in the cursor.
286 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL);
288 hammer_done_cursor(&cursor);
292 cursor.key_beg.obj_id = hist->obj_id;
293 cursor.key_beg.create_tid = hist->beg_tid;
294 cursor.key_beg.delete_tid = 0;
295 cursor.key_beg.obj_type = 0;
296 if (cursor.key_beg.create_tid == HAMMER_MIN_TID)
297 cursor.key_beg.create_tid = 1;
299 cursor.key_end.obj_id = hist->obj_id;
300 cursor.key_end.create_tid = hist->end_tid;
301 cursor.key_end.delete_tid = 0;
302 cursor.key_end.obj_type = 0;
304 cursor.flags |= HAMMER_CURSOR_END_EXCLUSIVE;
306 if (hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) {
308 * key-range within the file. For a regular file the
309 * on-disk key represents BASE+LEN, not BASE, so the
310 * first possible record containing the offset 'key'
311 * has an on-disk key of (key + 1).
313 cursor.key_beg.key = hist->key;
314 cursor.key_end.key = HAMMER_MAX_KEY;
315 cursor.key_beg.localization = ip->obj_localization |
316 HAMMER_LOCALIZE_MISC;
317 cursor.key_end.localization = ip->obj_localization |
318 HAMMER_LOCALIZE_MISC;
320 switch(ip->ino_data.obj_type) {
321 case HAMMER_OBJTYPE_REGFILE:
322 ++cursor.key_beg.key;
323 cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
325 case HAMMER_OBJTYPE_DIRECTORY:
326 cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
327 cursor.key_beg.localization = ip->obj_localization |
328 hammer_dir_localization(ip);
329 cursor.key_end.localization = ip->obj_localization |
330 hammer_dir_localization(ip);
332 case HAMMER_OBJTYPE_DBFILE:
333 cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
339 cursor.key_end.rec_type = cursor.key_beg.rec_type;
344 cursor.key_beg.key = 0;
345 cursor.key_end.key = 0;
346 cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
347 cursor.key_end.rec_type = HAMMER_RECTYPE_INODE;
348 cursor.key_beg.localization = ip->obj_localization |
349 HAMMER_LOCALIZE_INODE;
350 cursor.key_end.localization = ip->obj_localization |
351 HAMMER_LOCALIZE_INODE;
354 error = hammer_btree_first(&cursor);
356 elm = &cursor.node->ondisk->elms[cursor.index];
358 add_history(ip, hist, elm);
359 if (hist->head.flags & (HAMMER_IOC_HISTORY_NEXT_TID |
360 HAMMER_IOC_HISTORY_NEXT_KEY |
361 HAMMER_IOC_HISTORY_EOF)) {
364 error = hammer_btree_iterate(&cursor);
366 if (error == ENOENT) {
367 hist->head.flags |= HAMMER_IOC_HISTORY_EOF;
370 hammer_done_cursor(&cursor);
375 * Add the scanned element to the ioctl return structure. Some special
376 * casing is required for regular files to accomodate how data ranges are
380 add_history(hammer_inode_t ip, struct hammer_ioc_history *hist,
381 hammer_btree_elm_t elm)
385 if (elm->base.btype != HAMMER_BTREE_TYPE_RECORD)
387 if ((hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) &&
388 ip->ino_data.obj_type == HAMMER_OBJTYPE_REGFILE) {
392 if (hist->nxt_key > elm->leaf.base.key - elm->leaf.data_len &&
393 hist->key < elm->leaf.base.key - elm->leaf.data_len) {
394 hist->nxt_key = elm->leaf.base.key - elm->leaf.data_len;
396 if (hist->nxt_key > elm->leaf.base.key)
397 hist->nxt_key = elm->leaf.base.key;
400 * Record is beyond MAXPHYS, there won't be any more records
401 * in the iteration covering the requested offset (key).
403 if (elm->leaf.base.key >= MAXPHYS &&
404 elm->leaf.base.key - MAXPHYS > hist->key) {
405 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_KEY;
409 * Data-range of record does not cover the key.
411 if (elm->leaf.base.key - elm->leaf.data_len > hist->key)
414 } else if (hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) {
418 if (hist->nxt_key > elm->leaf.base.key &&
419 hist->key < elm->leaf.base.key) {
420 hist->nxt_key = elm->leaf.base.key;
424 * Record is beyond the requested key.
426 if (elm->leaf.base.key > hist->key)
427 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_KEY;
431 * Add create_tid if it is in-bounds.
435 elm->leaf.base.create_tid != hist->hist_ary[i - 1].tid) &&
436 elm->leaf.base.create_tid >= hist->beg_tid &&
437 elm->leaf.base.create_tid < hist->end_tid) {
438 if (hist->count == HAMMER_MAX_HISTORY_ELMS) {
439 hist->nxt_tid = elm->leaf.base.create_tid;
440 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_TID;
443 hist->hist_ary[i].tid = elm->leaf.base.create_tid;
444 hist->hist_ary[i].time32 = elm->leaf.create_ts;
449 * Add delete_tid if it is in-bounds. Note that different portions
450 * of the history may have overlapping data ranges with different
451 * delete_tid's. If this case occurs the delete_tid may match the
452 * create_tid of a following record. XXX
458 if (elm->leaf.base.delete_tid &&
459 elm->leaf.base.delete_tid >= hist->beg_tid &&
460 elm->leaf.base.delete_tid < hist->end_tid) {
461 if (i == HAMMER_MAX_HISTORY_ELMS) {
462 hist->nxt_tid = elm->leaf.base.delete_tid;
463 hist->head.flags |= HAMMER_IOC_HISTORY_NEXT_TID;
466 hist->hist_ary[i].tid = elm->leaf.base.delete_tid;
467 hist->hist_ary[i].time32 = elm->leaf.delete_ts;
473 * Acquire synchronization TID
477 hammer_ioc_synctid(hammer_transaction_t trans, hammer_inode_t ip,
478 struct hammer_ioc_synctid *std)
480 hammer_mount_t hmp = ip->hmp;
484 case HAMMER_SYNCTID_NONE:
485 std->tid = hmp->flusher.tid; /* inaccurate */
487 case HAMMER_SYNCTID_ASYNC:
488 hammer_queue_inodes_flusher(hmp, MNT_NOWAIT);
489 hammer_flusher_async(hmp, NULL);
490 std->tid = hmp->flusher.tid; /* inaccurate */
492 case HAMMER_SYNCTID_SYNC1:
493 hammer_queue_inodes_flusher(hmp, MNT_WAIT);
494 hammer_flusher_sync(hmp);
495 std->tid = hmp->flusher.tid;
497 case HAMMER_SYNCTID_SYNC2:
498 hammer_queue_inodes_flusher(hmp, MNT_WAIT);
499 hammer_flusher_sync(hmp);
500 std->tid = hmp->flusher.tid;
501 hammer_flusher_sync(hmp);
511 * Retrieve version info.
513 * Load min_version, wip_version, and max_versino. If cur_version is passed
514 * as 0 then load the current version into cur_version. Load the description
515 * for cur_version into the description array.
517 * Returns 0 on success, EINVAL if cur_version is non-zero and set to an
522 hammer_ioc_get_version(hammer_transaction_t trans, hammer_inode_t ip,
523 struct hammer_ioc_version *ver)
527 ver->min_version = HAMMER_VOL_VERSION_MIN;
528 ver->wip_version = HAMMER_VOL_VERSION_WIP;
529 ver->max_version = HAMMER_VOL_VERSION_MAX;
530 if (ver->cur_version == 0)
531 ver->cur_version = trans->hmp->version;
532 switch(ver->cur_version) {
534 ksnprintf(ver->description, sizeof(ver->description),
535 "First HAMMER release (DragonFly 2.0+)");
538 ksnprintf(ver->description, sizeof(ver->description),
539 "New directory entry layout (DragonFly 2.3+)");
542 ksnprintf(ver->description, sizeof(ver->description),
543 "New snapshot management (DragonFly 2.5+)");
546 ksnprintf(ver->description, sizeof(ver->description),
547 "New undo/flush, faster flush/sync (DragonFly 2.5+)");
550 ksnprintf(ver->description, sizeof(ver->description),
551 "Adjustments for dedup support (DragonFly 2.9+)");
554 ksnprintf(ver->description, sizeof(ver->description),
555 "Directory Hash ALG1 (tmp/rename resistance)");
558 ksnprintf(ver->description, sizeof(ver->description),
571 hammer_ioc_set_version(hammer_transaction_t trans, hammer_inode_t ip,
572 struct hammer_ioc_version *ver)
574 hammer_mount_t hmp = trans->hmp;
575 struct hammer_cursor cursor;
576 hammer_volume_t volume;
578 int over = hmp->version;
581 * Generally do not allow downgrades. However, version 4 can
582 * be downgraded to version 3.
584 if (ver->cur_version < hmp->version) {
585 if (!(ver->cur_version == 3 && hmp->version == 4))
588 if (ver->cur_version == hmp->version)
590 if (ver->cur_version > HAMMER_VOL_VERSION_MAX)
596 * Update the root volume header and the version cached in
597 * the hammer_mount structure.
599 error = hammer_init_cursor(trans, &cursor, NULL, NULL);
602 hammer_lock_ex(&hmp->flusher.finalize_lock);
603 hammer_sync_lock_ex(trans);
604 hmp->version = ver->cur_version;
607 * If upgrading from version < 4 to version >= 4 the UNDO FIFO
608 * must be reinitialized.
610 if (over < HAMMER_VOL_VERSION_FOUR &&
611 ver->cur_version >= HAMMER_VOL_VERSION_FOUR) {
612 hkprintf("upgrade undo to version 4\n");
613 error = hammer_upgrade_undo_4(trans);
619 * Adjust the version in the volume header
621 volume = hammer_get_root_volume(hmp, &error);
622 KKASSERT(error == 0);
623 hammer_modify_volume_field(cursor.trans, volume, vol_version);
624 volume->ondisk->vol_version = ver->cur_version;
625 hammer_modify_volume_done(volume);
626 hammer_rel_volume(volume, 0);
628 hammer_sync_unlock(trans);
629 hammer_unlock(&hmp->flusher.finalize_lock);
631 ver->head.error = error;
632 hammer_done_cursor(&cursor);
641 hammer_ioc_get_info(hammer_transaction_t trans, struct hammer_ioc_info *info)
643 hammer_volume_ondisk_t ondisk = trans->hmp->rootvol->ondisk;
644 hammer_mount_t hmp = trans->hmp;
646 /* Fill the structure with the necessary information */
647 _hammer_checkspace(hmp, HAMMER_CHKSPC_WRITE, &info->rsvbigblocks);
648 info->rsvbigblocks = info->rsvbigblocks >> HAMMER_BIGBLOCK_BITS;
649 strlcpy(info->vol_label, ondisk->vol_label, sizeof(ondisk->vol_label));
651 info->vol_fsid = hmp->fsid;
652 info->vol_fstype = ondisk->vol_fstype;
653 info->version = hmp->version;
655 info->inodes = ondisk->vol0_stat_inodes;
656 info->bigblocks = ondisk->vol0_stat_bigblocks;
657 info->freebigblocks = ondisk->vol0_stat_freebigblocks;
658 info->nvolumes = hmp->nvolumes;
659 info->rootvol = ondisk->vol_rootvol;
665 * Add a snapshot transaction id(s) to the list of snapshots.
667 * NOTE: Records are created with an allocated TID. If a flush cycle
668 * is in progress the record may be synced in the current flush
669 * cycle and the volume header will reflect the allocation of the
670 * TID, but the synchronization point may not catch up to the
671 * TID until the next flush cycle.
675 hammer_ioc_add_snapshot(hammer_transaction_t trans, hammer_inode_t ip,
676 struct hammer_ioc_snapshot *snap)
678 hammer_mount_t hmp = ip->hmp;
679 struct hammer_btree_leaf_elm leaf;
680 struct hammer_cursor cursor;
686 if (snap->count > HAMMER_SNAPS_PER_IOCTL)
688 if (snap->index >= snap->count)
691 hammer_lock_ex(&hmp->snapshot_lock);
694 * Look for keys starting after the previous iteration, or at
695 * the beginning if snap->count is 0.
697 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL);
699 hammer_done_cursor(&cursor);
703 cursor.asof = HAMMER_MAX_TID;
704 cursor.flags |= HAMMER_CURSOR_BACKEND | HAMMER_CURSOR_ASOF;
706 bzero(&leaf, sizeof(leaf));
707 leaf.base.obj_id = HAMMER_OBJID_ROOT;
708 leaf.base.rec_type = HAMMER_RECTYPE_SNAPSHOT;
709 leaf.base.create_tid = hammer_alloc_tid(hmp, 1);
710 leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
711 leaf.base.localization = ip->obj_localization | HAMMER_LOCALIZE_INODE;
712 leaf.data_len = sizeof(struct hammer_snapshot_data);
714 while (snap->index < snap->count) {
715 leaf.base.key = (int64_t)snap->snaps[snap->index].tid;
716 cursor.key_beg = leaf.base;
717 error = hammer_btree_lookup(&cursor);
724 * NOTE: Must reload key_beg after an ASOF search because
725 * the create_tid may have been modified during the
728 cursor.flags &= ~HAMMER_CURSOR_ASOF;
729 cursor.key_beg = leaf.base;
730 error = hammer_create_at_cursor(&cursor, &leaf,
731 &snap->snaps[snap->index],
732 HAMMER_CREATE_MODE_SYS);
733 if (error == EDEADLK) {
734 hammer_done_cursor(&cursor);
737 cursor.flags |= HAMMER_CURSOR_ASOF;
742 snap->head.error = error;
743 hammer_done_cursor(&cursor);
744 hammer_unlock(&hmp->snapshot_lock);
749 * Delete snapshot transaction id(s) from the list of snapshots.
753 hammer_ioc_del_snapshot(hammer_transaction_t trans, hammer_inode_t ip,
754 struct hammer_ioc_snapshot *snap)
756 hammer_mount_t hmp = ip->hmp;
757 struct hammer_cursor cursor;
763 if (snap->count > HAMMER_SNAPS_PER_IOCTL)
765 if (snap->index >= snap->count)
768 hammer_lock_ex(&hmp->snapshot_lock);
771 * Look for keys starting after the previous iteration, or at
772 * the beginning if snap->count is 0.
774 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL);
776 hammer_done_cursor(&cursor);
780 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
781 cursor.key_beg.create_tid = 0;
782 cursor.key_beg.delete_tid = 0;
783 cursor.key_beg.obj_type = 0;
784 cursor.key_beg.rec_type = HAMMER_RECTYPE_SNAPSHOT;
785 cursor.key_beg.localization = ip->obj_localization | HAMMER_LOCALIZE_INODE;
786 cursor.asof = HAMMER_MAX_TID;
787 cursor.flags |= HAMMER_CURSOR_ASOF;
789 while (snap->index < snap->count) {
790 cursor.key_beg.key = (int64_t)snap->snaps[snap->index].tid;
791 error = hammer_btree_lookup(&cursor);
794 error = hammer_btree_extract_leaf(&cursor);
797 error = hammer_delete_at_cursor(&cursor, HAMMER_DELETE_DESTROY,
799 if (error == EDEADLK) {
800 hammer_done_cursor(&cursor);
807 snap->head.error = error;
808 hammer_done_cursor(&cursor);
809 hammer_unlock(&hmp->snapshot_lock);
814 * Retrieve as many snapshot ids as possible or until the array is
815 * full, starting after the last transaction id passed in. If count
816 * is 0 we retrieve starting at the beginning.
818 * NOTE: Because the b-tree key field is signed but transaction ids
819 * are unsigned the returned list will be signed-sorted instead
820 * of unsigned sorted. The Caller must still sort the aggregate
825 hammer_ioc_get_snapshot(hammer_transaction_t trans, hammer_inode_t ip,
826 struct hammer_ioc_snapshot *snap)
828 struct hammer_cursor cursor;
834 if (snap->index != 0)
836 if (snap->count > HAMMER_SNAPS_PER_IOCTL)
840 * Look for keys starting after the previous iteration, or at
841 * the beginning if snap->count is 0.
843 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL);
845 hammer_done_cursor(&cursor);
849 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
850 cursor.key_beg.create_tid = 0;
851 cursor.key_beg.delete_tid = 0;
852 cursor.key_beg.obj_type = 0;
853 cursor.key_beg.rec_type = HAMMER_RECTYPE_SNAPSHOT;
854 cursor.key_beg.localization = ip->obj_localization | HAMMER_LOCALIZE_INODE;
855 if (snap->count == 0)
856 cursor.key_beg.key = HAMMER_MIN_KEY;
858 cursor.key_beg.key = (int64_t)snap->snaps[snap->count - 1].tid + 1;
860 cursor.key_end = cursor.key_beg;
861 cursor.key_end.key = HAMMER_MAX_KEY;
862 cursor.asof = HAMMER_MAX_TID;
863 cursor.flags |= HAMMER_CURSOR_END_EXCLUSIVE | HAMMER_CURSOR_ASOF;
867 error = hammer_btree_first(&cursor);
868 while (error == 0 && snap->count < HAMMER_SNAPS_PER_IOCTL) {
869 error = hammer_btree_extract_leaf(&cursor);
872 if (cursor.leaf->base.rec_type == HAMMER_RECTYPE_SNAPSHOT) {
873 error = hammer_btree_extract_data(&cursor);
874 snap->snaps[snap->count] = cursor.data->snap;
877 * The snap data tid should match the key but might
878 * not due to a bug in the HAMMER v3 conversion code.
880 * This error will work itself out over time but we
881 * have to force a match or the snapshot will not
884 if (cursor.data->snap.tid !=
885 (hammer_tid_t)cursor.leaf->base.key) {
886 hkprintf("lo=%08x snapshot key "
887 "0x%016jx data mismatch 0x%016jx\n",
888 cursor.key_beg.localization,
889 (uintmax_t)cursor.data->snap.tid,
890 cursor.leaf->base.key);
891 hkprintf("Probably left over from the "
892 "original v3 conversion, hammer "
893 "cleanup should get it eventually\n");
894 snap->snaps[snap->count].tid =
895 cursor.leaf->base.key;
899 error = hammer_btree_iterate(&cursor);
902 if (error == ENOENT) {
903 snap->head.flags |= HAMMER_IOC_SNAPSHOT_EOF;
906 snap->head.error = error;
907 hammer_done_cursor(&cursor);
912 * Retrieve the PFS hammer cleanup utility config record. This is
913 * different (newer than) the PFS config.
917 hammer_ioc_get_config(hammer_transaction_t trans, hammer_inode_t ip,
918 struct hammer_ioc_config *config)
920 struct hammer_cursor cursor;
923 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL);
925 hammer_done_cursor(&cursor);
929 cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
930 cursor.key_beg.create_tid = 0;
931 cursor.key_beg.delete_tid = 0;
932 cursor.key_beg.obj_type = 0;
933 cursor.key_beg.rec_type = HAMMER_RECTYPE_CONFIG;
934 cursor.key_beg.localization = ip->obj_localization | HAMMER_LOCALIZE_INODE;
935 cursor.key_beg.key = 0; /* config space page 0 */
937 cursor.asof = HAMMER_MAX_TID;
938 cursor.flags |= HAMMER_CURSOR_ASOF;
940 error = hammer_btree_lookup(&cursor);
942 error = hammer_btree_extract_data(&cursor);
944 config->config = cursor.data->config;
946 /* error can be ENOENT */
947 config->head.error = error;
948 hammer_done_cursor(&cursor);
953 * Retrieve the PFS hammer cleanup utility config record. This is
954 * different (newer than) the PFS config.
956 * This is kinda a hack.
960 hammer_ioc_set_config(hammer_transaction_t trans, hammer_inode_t ip,
961 struct hammer_ioc_config *config)
963 struct hammer_btree_leaf_elm leaf;
964 struct hammer_cursor cursor;
965 hammer_mount_t hmp = ip->hmp;
969 error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL);
971 hammer_done_cursor(&cursor);
975 bzero(&leaf, sizeof(leaf));
976 leaf.base.obj_id = HAMMER_OBJID_ROOT;
977 leaf.base.rec_type = HAMMER_RECTYPE_CONFIG;
978 leaf.base.create_tid = hammer_alloc_tid(hmp, 1);
979 leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
980 leaf.base.localization = ip->obj_localization | HAMMER_LOCALIZE_INODE;
981 leaf.base.key = 0; /* page 0 */
982 leaf.data_len = sizeof(struct hammer_config_data);
984 cursor.key_beg = leaf.base;
986 cursor.asof = HAMMER_MAX_TID;
987 cursor.flags |= HAMMER_CURSOR_BACKEND | HAMMER_CURSOR_ASOF;
989 error = hammer_btree_lookup(&cursor);
991 error = hammer_btree_extract_data(&cursor);
992 error = hammer_delete_at_cursor(&cursor, HAMMER_DELETE_DESTROY,
994 if (error == EDEADLK) {
995 hammer_done_cursor(&cursor);
1003 * NOTE: Must reload key_beg after an ASOF search because
1004 * the create_tid may have been modified during the
1007 cursor.flags &= ~HAMMER_CURSOR_ASOF;
1008 cursor.key_beg = leaf.base;
1009 error = hammer_create_at_cursor(&cursor, &leaf,
1011 HAMMER_CREATE_MODE_SYS);
1012 if (error == EDEADLK) {
1013 hammer_done_cursor(&cursor);
1017 config->head.error = error;
1018 hammer_done_cursor(&cursor);
1024 hammer_ioc_get_data(hammer_transaction_t trans, hammer_inode_t ip,
1025 struct hammer_ioc_data *data)
1027 struct hammer_cursor cursor;
1031 /* XXX cached inode ? */
1032 error = hammer_init_cursor(trans, &cursor, NULL, NULL);
1036 cursor.key_beg = data->elm;
1037 cursor.flags |= HAMMER_CURSOR_BACKEND;
1039 error = hammer_btree_lookup(&cursor);
1041 error = hammer_btree_extract_data(&cursor);
1043 data->leaf = *cursor.leaf;
1044 bytes = cursor.leaf->data_len;
1045 if (bytes > data->size)
1047 error = copyout(cursor.data, data->ubuf, bytes);
1052 hammer_done_cursor(&cursor);