2 * Copyright (c) 2008 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/vfs/hammer/hammer_mirror.c,v 1.3 2008/06/27 20:56:59 dillon Exp $
37 * HAMMER mirroring ioctls - serialize and deserialize modifications made
43 static int hammer_mirror_check(hammer_cursor_t cursor,
44 struct hammer_ioc_mrecord *mrec);
45 static int hammer_mirror_update(hammer_cursor_t cursor,
46 struct hammer_ioc_mrecord *mrec);
47 static int hammer_mirror_write(hammer_cursor_t cursor,
48 struct hammer_ioc_mrecord *mrec,
50 static int hammer_mirror_localize_data(hammer_data_ondisk_t data,
51 hammer_btree_leaf_elm_t leaf);
54 * All B-Tree records within the specified key range which also conform
55 * to the transaction id range are returned. Mirroring code keeps track
56 * of the last transaction id fully scanned and can efficiently pick up
57 * where it left off if interrupted.
60 hammer_ioc_mirror_read(hammer_transaction_t trans, hammer_inode_t ip,
61 struct hammer_ioc_mirror_rw *mirror)
63 struct hammer_cursor cursor;
64 struct hammer_ioc_mrecord mrec;
65 hammer_btree_leaf_elm_t elm;
66 const int head_size = HAMMER_MREC_HEADSIZE;
67 const int crc_start = HAMMER_MREC_CRCOFF;
73 if ((mirror->key_beg.localization | mirror->key_end.localization) &
74 HAMMER_LOCALIZE_PSEUDOFS_MASK) {
77 if (hammer_btree_cmp(&mirror->key_beg, &mirror->key_end) > 0)
80 mirror->key_cur = mirror->key_beg;
81 mirror->key_cur.localization += ip->obj_localization;
82 bzero(&mrec, sizeof(mrec));
85 error = hammer_init_cursor(trans, &cursor, NULL, NULL);
87 hammer_done_cursor(&cursor);
90 cursor.key_beg = mirror->key_cur;
91 cursor.key_end = mirror->key_end;
92 cursor.key_end.localization += ip->obj_localization;
94 cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
95 cursor.flags |= HAMMER_CURSOR_BACKEND;
98 * This flag filters the search to only return elements whos create
99 * or delete TID is >= mirror_tid. The B-Tree uses the mirror_tid
100 * field stored with internal and leaf nodes to shortcut the scan.
102 cursor.flags |= HAMMER_CURSOR_MIRROR_FILTERED;
103 cursor.mirror_tid = mirror->tid_beg;
105 error = hammer_btree_first(&cursor);
108 * Leaf node. Only return elements modified in the range
109 * requested by userland.
111 KKASSERT(cursor.node->ondisk->type == HAMMER_BTREE_TYPE_LEAF);
112 elm = &cursor.node->ondisk->elms[cursor.index].leaf;
114 if (elm->base.create_tid < mirror->tid_beg ||
115 elm->base.create_tid >= mirror->tid_end) {
116 if (elm->base.delete_tid < mirror->tid_beg ||
117 elm->base.delete_tid >= mirror->tid_end) {
122 mirror->key_cur = elm->base;
125 * Yield to more important tasks
127 if ((error = hammer_signal_check(trans->hmp)) != 0)
129 if (trans->hmp->sync_lock.wanted) {
130 tsleep(trans, 0, "hmrslo", hz / 10);
132 if (trans->hmp->locked_dirty_count +
133 trans->hmp->io_running_count > hammer_limit_dirtybufs) {
134 hammer_flusher_async(trans->hmp);
135 tsleep(trans, 0, "hmrslo", hz / 10);
139 * The core code exports the data to userland.
141 data_len = (elm->data_offset) ? elm->data_len : 0;
143 error = hammer_btree_extract(&cursor,
144 HAMMER_CURSOR_GET_DATA);
148 bytes = offsetof(struct hammer_ioc_mrecord, data[data_len]);
149 bytes = (bytes + HAMMER_HEAD_ALIGN_MASK) &
150 ~HAMMER_HEAD_ALIGN_MASK;
151 if (mirror->count + bytes > mirror->size)
155 * Construct the record for userland and copyout.
157 * The user is asking for a snapshot, if the record was
158 * deleted beyond the user-requested ending tid, the record
159 * is not considered deleted from the point of view of
160 * userland and delete_tid is cleared.
162 mrec.signature = HAMMER_IOC_MIRROR_SIGNATURE;
163 mrec.rec_size = bytes;
165 if (elm->base.delete_tid >= mirror->tid_end)
166 mrec.leaf.base.delete_tid = 0;
167 mrec.rec_crc = crc32(&mrec.rec_size, head_size - crc_start);
168 uptr = (char *)mirror->ubuf + mirror->count;
169 error = copyout(&mrec, uptr, head_size);
170 if (data_len && error == 0) {
171 error = copyout(cursor.data, uptr + head_size,
175 mirror->count += bytes;
178 cursor.flags |= HAMMER_CURSOR_ATEDISK;
179 error = hammer_btree_iterate(&cursor);
182 if (error == ENOENT) {
183 mirror->key_cur = mirror->key_end;
186 hammer_done_cursor(&cursor);
187 if (error == EDEADLK)
189 if (error == EINTR) {
190 mirror->head.flags |= HAMMER_IOC_HEAD_INTR;
194 mirror->key_cur.localization &= HAMMER_LOCALIZE_MASK;
199 * Copy records from userland to the target mirror. Records which already
200 * exist may only have their delete_tid updated.
203 hammer_ioc_mirror_write(hammer_transaction_t trans, hammer_inode_t ip,
204 struct hammer_ioc_mirror_rw *mirror)
206 struct hammer_cursor cursor;
207 struct hammer_ioc_mrecord mrec;
208 const int head_size = HAMMER_MREC_HEADSIZE;
209 const int crc_start = HAMMER_MREC_CRCOFF;
214 if (mirror->size < 0 || mirror->size > 0x70000000)
217 error = hammer_init_cursor(trans, &cursor, NULL, NULL);
219 hammer_normalize_cursor(&cursor);
221 while (error == 0 && mirror->count + head_size <= mirror->size) {
223 * Acquire and validate header
225 uptr = (char *)mirror->ubuf + mirror->count;
226 error = copyin(uptr, &mrec, head_size);
229 rec_crc = crc32(&mrec.rec_size, head_size - crc_start);
230 if (mrec.signature != HAMMER_IOC_MIRROR_SIGNATURE) {
234 if (rec_crc != mrec.rec_crc) {
238 if (mrec.rec_size < head_size ||
239 mrec.rec_size > head_size + HAMMER_XBUFSIZE + 16 ||
240 mirror->count + mrec.rec_size > mirror->size) {
244 if (mrec.leaf.data_len < 0 ||
245 mrec.leaf.data_len > HAMMER_XBUFSIZE ||
246 offsetof(struct hammer_ioc_mrecord, data[mrec.leaf.data_len]) > mrec.rec_size) {
251 * Re-localize for target. relocalization of data is handled
252 * by hammer_mirror_write().
254 mrec.leaf.base.localization &= HAMMER_LOCALIZE_MASK;
255 mrec.leaf.base.localization += ip->obj_localization;
260 * If the record exists only the delete_tid may be updated.
262 * If the record does not exist we create it. For now we
263 * ignore records with a non-zero delete_tid. Note that
264 * mirror operations are effective an as-of operation and
265 * delete_tid can be 0 for mirroring purposes even if it is
266 * not actually 0 at the originator.
268 hammer_normalize_cursor(&cursor);
269 cursor.key_beg = mrec.leaf.base;
270 cursor.flags |= HAMMER_CURSOR_BACKEND;
271 cursor.flags &= ~HAMMER_CURSOR_INSERT;
272 error = hammer_btree_lookup(&cursor);
274 if (error == 0 && hammer_mirror_check(&cursor, &mrec)) {
275 hammer_sync_lock_sh(trans);
276 error = hammer_mirror_update(&cursor, &mrec);
277 hammer_sync_unlock(trans);
278 } else if (error == ENOENT && mrec.leaf.base.delete_tid == 0) {
279 hammer_sync_lock_sh(trans);
280 error = hammer_mirror_write(&cursor, &mrec,
282 hammer_sync_unlock(trans);
288 if (error == EDEADLK) {
289 hammer_done_cursor(&cursor);
290 error = hammer_init_cursor(trans, &cursor, NULL, NULL);
294 mirror->count += mrec.rec_size;
297 hammer_done_cursor(&cursor);
302 * Check whether an update is needed in the case where a match already
303 * exists on the target. The only type of update allowed in this case
304 * is an update of the delete_tid.
306 * Return non-zero if the update should proceed.
310 hammer_mirror_check(hammer_cursor_t cursor, struct hammer_ioc_mrecord *mrec)
312 hammer_btree_leaf_elm_t leaf = cursor->leaf;
314 if (leaf->base.delete_tid != mrec->leaf.base.delete_tid) {
315 if (leaf->base.delete_tid != 0)
322 * Update a record in-place. Only the delete_tid can change.
326 hammer_mirror_update(hammer_cursor_t cursor, struct hammer_ioc_mrecord *mrec)
328 hammer_btree_leaf_elm_t elm;
332 if (mrec->leaf.base.delete_tid == 0) {
333 kprintf("mirror_write: object %016llx:%016llx deleted on "
334 "target, not deleted on source\n",
335 elm->base.obj_id, elm->base.key);
339 KKASSERT(elm->base.create_tid < mrec->leaf.base.delete_tid);
340 hammer_modify_node(cursor->trans, cursor->node, elm, sizeof(*elm));
341 elm->base.delete_tid = mrec->leaf.base.delete_tid;
342 elm->delete_ts = mrec->leaf.delete_ts;
343 hammer_modify_node_done(cursor->node);
348 * Write out a new record.
354 hammer_mirror_write(hammer_cursor_t cursor, struct hammer_ioc_mrecord *mrec,
357 hammer_buffer_t data_buffer = NULL;
358 hammer_off_t ndata_offset;
363 if (mrec->leaf.data_len && mrec->leaf.data_offset) {
364 ndata = hammer_alloc_data(cursor->trans, mrec->leaf.data_len,
365 mrec->leaf.base.rec_type,
366 &ndata_offset, &data_buffer, &error);
369 mrec->leaf.data_offset = ndata_offset;
370 hammer_modify_buffer(cursor->trans, data_buffer, NULL, 0);
371 error = copyin(udata, ndata, mrec->leaf.data_len);
373 if (hammer_crc_test_leaf(ndata, &mrec->leaf) == 0) {
374 kprintf("data crc mismatch on pipe\n");
377 error = hammer_mirror_localize_data(
383 hammer_modify_buffer_done(data_buffer);
385 mrec->leaf.data_offset = 0;
391 cursor->flags |= HAMMER_CURSOR_INSERT;
392 error = hammer_btree_lookup(cursor);
393 if (error != ENOENT) {
403 error = hammer_btree_insert(cursor, &mrec->leaf);
409 if (error && mrec->leaf.data_offset) {
410 hammer_blockmap_free(cursor->trans,
411 mrec->leaf.data_offset,
412 mrec->leaf.data_len);
415 hammer_rel_buffer(data_buffer, 0);
422 * Localize the data payload. Directory entries may need their
423 * localization adjusted.
425 * Pseudo-fs directory entries must be skipped entirely (EBADF).
427 * The root inode must be skipped, it will exist on the target with a
428 * different create_tid so updating it would result in a duplicate. This
429 * also prevents inode updates on the root directory (aka mtime, ctime, etc)
430 * from mirroring, which is ok.
432 * XXX Root directory inode updates - parent_obj_localization is broken.
436 hammer_mirror_localize_data(hammer_data_ondisk_t data,
437 hammer_btree_leaf_elm_t leaf)
441 u_int32_t localization;
443 if (leaf->base.rec_type == HAMMER_RECTYPE_DIRENTRY) {
444 localization = leaf->base.localization &
445 HAMMER_LOCALIZE_PSEUDOFS_MASK;
446 if (data->entry.localization != localization) {
447 data->entry.localization = localization;
450 if (data->entry.obj_id == 1)
453 if (leaf->base.rec_type == HAMMER_RECTYPE_INODE) {
454 if (leaf->base.obj_id == HAMMER_OBJID_ROOT)
458 hammer_crc_set_leaf(data, leaf);