4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 * Copyright (c) 2012, Martin Matuska <mm@FreeBSD.org>. All rights reserved.
27 * Copyright 2014 HybridCluster. All rights reserved.
28 * Copyright 2016 RackTop Systems.
29 * Copyright (c) 2014 Integros [integros.com]
30 * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
34 #include <sys/dmu_impl.h>
35 #include <sys/dmu_tx.h>
37 #include <sys/dnode.h>
38 #include <sys/zfs_context.h>
39 #include <sys/dmu_objset.h>
40 #include <sys/dmu_traverse.h>
41 #include <sys/dsl_dataset.h>
42 #include <sys/dsl_dir.h>
43 #include <sys/dsl_prop.h>
44 #include <sys/dsl_pool.h>
45 #include <sys/dsl_synctask.h>
46 #include <sys/zfs_ioctl.h>
48 #include <sys/zio_checksum.h>
49 #include <sys/zfs_znode.h>
50 #include <zfs_fletcher.h>
53 #include <sys/zfs_onexit.h>
54 #include <sys/dmu_send.h>
55 #include <sys/dsl_destroy.h>
56 #include <sys/blkptr.h>
57 #include <sys/dsl_bookmark.h>
58 #include <sys/zfeature.h>
59 #include <sys/bqueue.h>
66 #define dump_write dmu_dump_write
69 /* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
70 int zfs_send_corrupt_data = B_FALSE;
71 int zfs_send_queue_length = 16 * 1024 * 1024;
72 int zfs_recv_queue_length = 16 * 1024 * 1024;
73 /* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */
74 int zfs_send_set_freerecords_bit = B_TRUE;
77 TUNABLE_INT("vfs.zfs.send_set_freerecords_bit", &zfs_send_set_freerecords_bit);
80 static char *dmu_recv_tag = "dmu_recv_tag";
81 const char *recv_clone_name = "%recv";
84 * Use this to override the recordsize calculation for fast zfs send estimates.
86 uint64_t zfs_override_estimate_recordsize = 0;
88 #define BP_SPAN(datablkszsec, indblkshift, level) \
89 (((uint64_t)datablkszsec) << (SPA_MINBLOCKSHIFT + \
90 (level) * (indblkshift - SPA_BLKPTRSHIFT)))
92 static void byteswap_record(dmu_replay_record_t *drr);
94 struct send_thread_arg {
96 dsl_dataset_t *ds; /* Dataset to traverse */
97 uint64_t fromtxg; /* Traverse from this txg */
98 int flags; /* flags to pass to traverse_dataset */
101 zbookmark_phys_t resume;
104 struct send_block_record {
105 boolean_t eos_marker; /* Marks the end of the stream */
109 uint16_t datablkszsec;
114 dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
116 dsl_dataset_t *ds = dmu_objset_ds(dsp->dsa_os);
121 * The code does not rely on this (len being a multiple of 8). We keep
122 * this assertion because of the corresponding assertion in
123 * receive_read(). Keeping this assertion ensures that we do not
124 * inadvertently break backwards compatibility (causing the assertion
125 * in receive_read() to trigger on old software).
127 * Removing the assertions could be rolled into a new feature that uses
128 * data that isn't 8-byte aligned; if the assertions were removed, a
129 * feature flag would have to be added.
136 auio.uio_iov = &aiov;
138 auio.uio_resid = len;
139 auio.uio_segflg = UIO_SYSSPACE;
140 auio.uio_rw = UIO_WRITE;
141 auio.uio_offset = (off_t)-1;
142 auio.uio_td = dsp->dsa_td;
144 if (dsp->dsa_fp->f_type == DTYPE_VNODE)
146 dsp->dsa_err = fo_write(dsp->dsa_fp, &auio, dsp->dsa_td->td_ucred, 0,
149 fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__);
150 dsp->dsa_err = EOPNOTSUPP;
152 mutex_enter(&ds->ds_sendstream_lock);
153 *dsp->dsa_off += len;
154 mutex_exit(&ds->ds_sendstream_lock);
156 return (dsp->dsa_err);
160 * For all record types except BEGIN, fill in the checksum (overlaid in
161 * drr_u.drr_checksum.drr_checksum). The checksum verifies everything
162 * up to the start of the checksum itself.
165 dump_record(dmu_sendarg_t *dsp, void *payload, int payload_len)
167 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
168 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
169 (void) fletcher_4_incremental_native(dsp->dsa_drr,
170 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
172 if (dsp->dsa_drr->drr_type == DRR_BEGIN) {
173 dsp->dsa_sent_begin = B_TRUE;
175 ASSERT(ZIO_CHECKSUM_IS_ZERO(&dsp->dsa_drr->drr_u.
176 drr_checksum.drr_checksum));
177 dsp->dsa_drr->drr_u.drr_checksum.drr_checksum = dsp->dsa_zc;
179 if (dsp->dsa_drr->drr_type == DRR_END) {
180 dsp->dsa_sent_end = B_TRUE;
182 (void) fletcher_4_incremental_native(&dsp->dsa_drr->
183 drr_u.drr_checksum.drr_checksum,
184 sizeof (zio_cksum_t), &dsp->dsa_zc);
185 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
186 return (SET_ERROR(EINTR));
187 if (payload_len != 0) {
188 (void) fletcher_4_incremental_native(payload, payload_len,
190 if (dump_bytes(dsp, payload, payload_len) != 0)
191 return (SET_ERROR(EINTR));
197 * Fill in the drr_free struct, or perform aggregation if the previous record is
198 * also a free record, and the two are adjacent.
200 * Note that we send free records even for a full send, because we want to be
201 * able to receive a full send as a clone, which requires a list of all the free
202 * and freeobject records that were generated on the source.
205 dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
208 struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free);
211 * When we receive a free record, dbuf_free_range() assumes
212 * that the receiving system doesn't have any dbufs in the range
213 * being freed. This is always true because there is a one-record
214 * constraint: we only send one WRITE record for any given
215 * object,offset. We know that the one-record constraint is
216 * true because we always send data in increasing order by
219 * If the increasing-order constraint ever changes, we should find
220 * another way to assert that the one-record constraint is still
223 ASSERT(object > dsp->dsa_last_data_object ||
224 (object == dsp->dsa_last_data_object &&
225 offset > dsp->dsa_last_data_offset));
227 if (length != -1ULL && offset + length < offset)
231 * If there is a pending op, but it's not PENDING_FREE, push it out,
232 * since free block aggregation can only be done for blocks of the
233 * same type (i.e., DRR_FREE records can only be aggregated with
234 * other DRR_FREE records. DRR_FREEOBJECTS records can only be
235 * aggregated with other DRR_FREEOBJECTS records.
237 if (dsp->dsa_pending_op != PENDING_NONE &&
238 dsp->dsa_pending_op != PENDING_FREE) {
239 if (dump_record(dsp, NULL, 0) != 0)
240 return (SET_ERROR(EINTR));
241 dsp->dsa_pending_op = PENDING_NONE;
244 if (dsp->dsa_pending_op == PENDING_FREE) {
246 * There should never be a PENDING_FREE if length is -1
247 * (because dump_dnode is the only place where this
248 * function is called with a -1, and only after flushing
249 * any pending record).
251 ASSERT(length != -1ULL);
253 * Check to see whether this free block can be aggregated
256 if (drrf->drr_object == object && drrf->drr_offset +
257 drrf->drr_length == offset) {
258 drrf->drr_length += length;
261 /* not a continuation. Push out pending record */
262 if (dump_record(dsp, NULL, 0) != 0)
263 return (SET_ERROR(EINTR));
264 dsp->dsa_pending_op = PENDING_NONE;
267 /* create a FREE record and make it pending */
268 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
269 dsp->dsa_drr->drr_type = DRR_FREE;
270 drrf->drr_object = object;
271 drrf->drr_offset = offset;
272 drrf->drr_length = length;
273 drrf->drr_toguid = dsp->dsa_toguid;
274 if (length == -1ULL) {
275 if (dump_record(dsp, NULL, 0) != 0)
276 return (SET_ERROR(EINTR));
278 dsp->dsa_pending_op = PENDING_FREE;
285 dump_write(dmu_sendarg_t *dsp, dmu_object_type_t type,
286 uint64_t object, uint64_t offset, int lsize, int psize, const blkptr_t *bp,
289 uint64_t payload_size;
290 struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write);
293 * We send data in increasing object, offset order.
294 * See comment in dump_free() for details.
296 ASSERT(object > dsp->dsa_last_data_object ||
297 (object == dsp->dsa_last_data_object &&
298 offset > dsp->dsa_last_data_offset));
299 dsp->dsa_last_data_object = object;
300 dsp->dsa_last_data_offset = offset + lsize - 1;
303 * If there is any kind of pending aggregation (currently either
304 * a grouping of free objects or free blocks), push it out to
305 * the stream, since aggregation can't be done across operations
306 * of different types.
308 if (dsp->dsa_pending_op != PENDING_NONE) {
309 if (dump_record(dsp, NULL, 0) != 0)
310 return (SET_ERROR(EINTR));
311 dsp->dsa_pending_op = PENDING_NONE;
313 /* write a WRITE record */
314 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
315 dsp->dsa_drr->drr_type = DRR_WRITE;
316 drrw->drr_object = object;
317 drrw->drr_type = type;
318 drrw->drr_offset = offset;
319 drrw->drr_toguid = dsp->dsa_toguid;
320 drrw->drr_logical_size = lsize;
322 /* only set the compression fields if the buf is compressed */
323 if (lsize != psize) {
324 ASSERT(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_COMPRESSED);
325 ASSERT(!BP_IS_EMBEDDED(bp));
326 ASSERT(!BP_SHOULD_BYTESWAP(bp));
327 ASSERT(!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)));
328 ASSERT3U(BP_GET_COMPRESS(bp), !=, ZIO_COMPRESS_OFF);
329 ASSERT3S(psize, >, 0);
330 ASSERT3S(lsize, >=, psize);
332 drrw->drr_compressiontype = BP_GET_COMPRESS(bp);
333 drrw->drr_compressed_size = psize;
334 payload_size = drrw->drr_compressed_size;
336 payload_size = drrw->drr_logical_size;
339 if (bp == NULL || BP_IS_EMBEDDED(bp)) {
341 * There's no pre-computed checksum for partial-block
342 * writes or embedded BP's, so (like
343 * fletcher4-checkummed blocks) userland will have to
344 * compute a dedup-capable checksum itself.
346 drrw->drr_checksumtype = ZIO_CHECKSUM_OFF;
348 drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
349 if (zio_checksum_table[drrw->drr_checksumtype].ci_flags &
350 ZCHECKSUM_FLAG_DEDUP)
351 drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP;
352 DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
353 DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
354 DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
355 drrw->drr_key.ddk_cksum = bp->blk_cksum;
358 if (dump_record(dsp, data, payload_size) != 0)
359 return (SET_ERROR(EINTR));
364 dump_write_embedded(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
365 int blksz, const blkptr_t *bp)
367 char buf[BPE_PAYLOAD_SIZE];
368 struct drr_write_embedded *drrw =
369 &(dsp->dsa_drr->drr_u.drr_write_embedded);
371 if (dsp->dsa_pending_op != PENDING_NONE) {
372 if (dump_record(dsp, NULL, 0) != 0)
374 dsp->dsa_pending_op = PENDING_NONE;
377 ASSERT(BP_IS_EMBEDDED(bp));
379 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
380 dsp->dsa_drr->drr_type = DRR_WRITE_EMBEDDED;
381 drrw->drr_object = object;
382 drrw->drr_offset = offset;
383 drrw->drr_length = blksz;
384 drrw->drr_toguid = dsp->dsa_toguid;
385 drrw->drr_compression = BP_GET_COMPRESS(bp);
386 drrw->drr_etype = BPE_GET_ETYPE(bp);
387 drrw->drr_lsize = BPE_GET_LSIZE(bp);
388 drrw->drr_psize = BPE_GET_PSIZE(bp);
390 decode_embedded_bp_compressed(bp, buf);
392 if (dump_record(dsp, buf, P2ROUNDUP(drrw->drr_psize, 8)) != 0)
398 dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data)
400 struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill);
402 if (dsp->dsa_pending_op != PENDING_NONE) {
403 if (dump_record(dsp, NULL, 0) != 0)
404 return (SET_ERROR(EINTR));
405 dsp->dsa_pending_op = PENDING_NONE;
408 /* write a SPILL record */
409 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
410 dsp->dsa_drr->drr_type = DRR_SPILL;
411 drrs->drr_object = object;
412 drrs->drr_length = blksz;
413 drrs->drr_toguid = dsp->dsa_toguid;
415 if (dump_record(dsp, data, blksz) != 0)
416 return (SET_ERROR(EINTR));
421 dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs)
423 struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects);
426 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
427 * push it out, since free block aggregation can only be done for
428 * blocks of the same type (i.e., DRR_FREE records can only be
429 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
430 * can only be aggregated with other DRR_FREEOBJECTS records.
432 if (dsp->dsa_pending_op != PENDING_NONE &&
433 dsp->dsa_pending_op != PENDING_FREEOBJECTS) {
434 if (dump_record(dsp, NULL, 0) != 0)
435 return (SET_ERROR(EINTR));
436 dsp->dsa_pending_op = PENDING_NONE;
438 if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) {
440 * See whether this free object array can be aggregated
443 if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
444 drrfo->drr_numobjs += numobjs;
447 /* can't be aggregated. Push out pending record */
448 if (dump_record(dsp, NULL, 0) != 0)
449 return (SET_ERROR(EINTR));
450 dsp->dsa_pending_op = PENDING_NONE;
454 /* write a FREEOBJECTS record */
455 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
456 dsp->dsa_drr->drr_type = DRR_FREEOBJECTS;
457 drrfo->drr_firstobj = firstobj;
458 drrfo->drr_numobjs = numobjs;
459 drrfo->drr_toguid = dsp->dsa_toguid;
461 dsp->dsa_pending_op = PENDING_FREEOBJECTS;
467 dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp)
469 struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object);
471 if (object < dsp->dsa_resume_object) {
473 * Note: when resuming, we will visit all the dnodes in
474 * the block of dnodes that we are resuming from. In
475 * this case it's unnecessary to send the dnodes prior to
476 * the one we are resuming from. We should be at most one
477 * block's worth of dnodes behind the resume point.
479 ASSERT3U(dsp->dsa_resume_object - object, <,
480 1 << (DNODE_BLOCK_SHIFT - DNODE_SHIFT));
484 if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
485 return (dump_freeobjects(dsp, object, 1));
487 if (dsp->dsa_pending_op != PENDING_NONE) {
488 if (dump_record(dsp, NULL, 0) != 0)
489 return (SET_ERROR(EINTR));
490 dsp->dsa_pending_op = PENDING_NONE;
493 /* write an OBJECT record */
494 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
495 dsp->dsa_drr->drr_type = DRR_OBJECT;
496 drro->drr_object = object;
497 drro->drr_type = dnp->dn_type;
498 drro->drr_bonustype = dnp->dn_bonustype;
499 drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
500 drro->drr_bonuslen = dnp->dn_bonuslen;
501 drro->drr_dn_slots = dnp->dn_extra_slots + 1;
502 drro->drr_checksumtype = dnp->dn_checksum;
503 drro->drr_compress = dnp->dn_compress;
504 drro->drr_toguid = dsp->dsa_toguid;
506 if (!(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
507 drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE)
508 drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE;
510 if (dump_record(dsp, DN_BONUS(dnp),
511 P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0) {
512 return (SET_ERROR(EINTR));
515 /* Free anything past the end of the file. */
516 if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) *
517 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL) != 0)
518 return (SET_ERROR(EINTR));
519 if (dsp->dsa_err != 0)
520 return (SET_ERROR(EINTR));
525 backup_do_embed(dmu_sendarg_t *dsp, const blkptr_t *bp)
527 if (!BP_IS_EMBEDDED(bp))
531 * Compression function must be legacy, or explicitly enabled.
533 if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS &&
534 !(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LZ4)))
538 * Embed type must be explicitly enabled.
540 switch (BPE_GET_ETYPE(bp)) {
541 case BP_EMBEDDED_TYPE_DATA:
542 if (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
552 * This is the callback function to traverse_dataset that acts as the worker
553 * thread for dmu_send_impl.
557 send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
558 const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg)
560 struct send_thread_arg *sta = arg;
561 struct send_block_record *record;
562 uint64_t record_size;
565 ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
566 zb->zb_object >= sta->resume.zb_object);
569 return (SET_ERROR(EINTR));
572 ASSERT3U(zb->zb_level, ==, ZB_DNODE_LEVEL);
574 } else if (zb->zb_level < 0) {
578 record = kmem_zalloc(sizeof (struct send_block_record), KM_SLEEP);
579 record->eos_marker = B_FALSE;
582 record->indblkshift = dnp->dn_indblkshift;
583 record->datablkszsec = dnp->dn_datablkszsec;
584 record_size = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
585 bqueue_enqueue(&sta->q, record, record_size);
591 * This function kicks off the traverse_dataset. It also handles setting the
592 * error code of the thread in case something goes wrong, and pushes the End of
593 * Stream record when the traverse_dataset call has finished. If there is no
594 * dataset to traverse, the thread immediately pushes End of Stream marker.
597 send_traverse_thread(void *arg)
599 struct send_thread_arg *st_arg = arg;
601 struct send_block_record *data;
603 if (st_arg->ds != NULL) {
604 err = traverse_dataset_resume(st_arg->ds,
605 st_arg->fromtxg, &st_arg->resume,
606 st_arg->flags, send_cb, st_arg);
609 st_arg->error_code = err;
611 data = kmem_zalloc(sizeof (*data), KM_SLEEP);
612 data->eos_marker = B_TRUE;
613 bqueue_enqueue(&st_arg->q, data, 1);
618 * This function actually handles figuring out what kind of record needs to be
619 * dumped, reading the data (which has hopefully been prefetched), and calling
620 * the appropriate helper function.
623 do_dump(dmu_sendarg_t *dsa, struct send_block_record *data)
625 dsl_dataset_t *ds = dmu_objset_ds(dsa->dsa_os);
626 const blkptr_t *bp = &data->bp;
627 const zbookmark_phys_t *zb = &data->zb;
628 uint8_t indblkshift = data->indblkshift;
629 uint16_t dblkszsec = data->datablkszsec;
630 spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
631 dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
634 ASSERT3U(zb->zb_level, >=, 0);
636 ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
637 zb->zb_object >= dsa->dsa_resume_object);
639 if (zb->zb_object != DMU_META_DNODE_OBJECT &&
640 DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
642 } else if (BP_IS_HOLE(bp) &&
643 zb->zb_object == DMU_META_DNODE_OBJECT) {
644 uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level);
645 uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
646 err = dump_freeobjects(dsa, dnobj, span >> DNODE_SHIFT);
647 } else if (BP_IS_HOLE(bp)) {
648 uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level);
649 uint64_t offset = zb->zb_blkid * span;
650 err = dump_free(dsa, zb->zb_object, offset, span);
651 } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) {
653 } else if (type == DMU_OT_DNODE) {
654 int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
655 arc_flags_t aflags = ARC_FLAG_WAIT;
658 ASSERT0(zb->zb_level);
660 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
661 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
663 return (SET_ERROR(EIO));
665 dnode_phys_t *blk = abuf->b_data;
666 uint64_t dnobj = zb->zb_blkid * epb;
667 for (int i = 0; i < epb; i += blk[i].dn_extra_slots + 1) {
668 err = dump_dnode(dsa, dnobj + i, blk + i);
672 arc_buf_destroy(abuf, &abuf);
673 } else if (type == DMU_OT_SA) {
674 arc_flags_t aflags = ARC_FLAG_WAIT;
676 int blksz = BP_GET_LSIZE(bp);
678 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
679 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
681 return (SET_ERROR(EIO));
683 err = dump_spill(dsa, zb->zb_object, blksz, abuf->b_data);
684 arc_buf_destroy(abuf, &abuf);
685 } else if (backup_do_embed(dsa, bp)) {
686 /* it's an embedded level-0 block of a regular object */
687 int blksz = dblkszsec << SPA_MINBLOCKSHIFT;
688 ASSERT0(zb->zb_level);
689 err = dump_write_embedded(dsa, zb->zb_object,
690 zb->zb_blkid * blksz, blksz, bp);
692 /* it's a level-0 block of a regular object */
693 arc_flags_t aflags = ARC_FLAG_WAIT;
695 int blksz = dblkszsec << SPA_MINBLOCKSHIFT;
699 * If we have large blocks stored on disk but the send flags
700 * don't allow us to send large blocks, we split the data from
701 * the arc buf into chunks.
703 boolean_t split_large_blocks = blksz > SPA_OLD_MAXBLOCKSIZE &&
704 !(dsa->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS);
706 * We should only request compressed data from the ARC if all
707 * the following are true:
708 * - stream compression was requested
709 * - we aren't splitting large blocks into smaller chunks
710 * - the data won't need to be byteswapped before sending
711 * - this isn't an embedded block
712 * - this isn't metadata (if receiving on a different endian
713 * system it can be byteswapped more easily)
715 boolean_t request_compressed =
716 (dsa->dsa_featureflags & DMU_BACKUP_FEATURE_COMPRESSED) &&
717 !split_large_blocks && !BP_SHOULD_BYTESWAP(bp) &&
718 !BP_IS_EMBEDDED(bp) && !DMU_OT_IS_METADATA(BP_GET_TYPE(bp));
720 ASSERT0(zb->zb_level);
721 ASSERT(zb->zb_object > dsa->dsa_resume_object ||
722 (zb->zb_object == dsa->dsa_resume_object &&
723 zb->zb_blkid * blksz >= dsa->dsa_resume_offset));
725 ASSERT0(zb->zb_level);
726 ASSERT(zb->zb_object > dsa->dsa_resume_object ||
727 (zb->zb_object == dsa->dsa_resume_object &&
728 zb->zb_blkid * blksz >= dsa->dsa_resume_offset));
730 ASSERT3U(blksz, ==, BP_GET_LSIZE(bp));
732 enum zio_flag zioflags = ZIO_FLAG_CANFAIL;
733 if (request_compressed)
734 zioflags |= ZIO_FLAG_RAW;
735 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
736 ZIO_PRIORITY_ASYNC_READ, zioflags, &aflags, zb) != 0) {
737 if (zfs_send_corrupt_data) {
738 /* Send a block filled with 0x"zfs badd bloc" */
739 abuf = arc_alloc_buf(spa, &abuf, ARC_BUFC_DATA,
742 for (ptr = abuf->b_data;
743 (char *)ptr < (char *)abuf->b_data + blksz;
745 *ptr = 0x2f5baddb10cULL;
747 return (SET_ERROR(EIO));
751 offset = zb->zb_blkid * blksz;
753 if (split_large_blocks) {
754 ASSERT3U(arc_get_compression(abuf), ==,
756 char *buf = abuf->b_data;
757 while (blksz > 0 && err == 0) {
758 int n = MIN(blksz, SPA_OLD_MAXBLOCKSIZE);
759 err = dump_write(dsa, type, zb->zb_object,
760 offset, n, n, NULL, buf);
766 err = dump_write(dsa, type, zb->zb_object, offset,
767 blksz, arc_buf_size(abuf), bp, abuf->b_data);
769 arc_buf_destroy(abuf, &abuf);
772 ASSERT(err == 0 || err == EINTR);
777 * Pop the new data off the queue, and free the old data.
779 static struct send_block_record *
780 get_next_record(bqueue_t *bq, struct send_block_record *data)
782 struct send_block_record *tmp = bqueue_dequeue(bq);
783 kmem_free(data, sizeof (*data));
788 * Actually do the bulk of the work in a zfs send.
790 * Note: Releases dp using the specified tag.
793 dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *to_ds,
794 zfs_bookmark_phys_t *ancestor_zb, boolean_t is_clone,
795 boolean_t embedok, boolean_t large_block_ok, boolean_t compressok,
796 int outfd, uint64_t resumeobj, uint64_t resumeoff,
798 vnode_t *vp, offset_t *off)
800 struct file *fp, offset_t *off)
804 dmu_replay_record_t *drr;
807 uint64_t fromtxg = 0;
808 uint64_t featureflags = 0;
809 struct send_thread_arg to_arg = { 0 };
811 err = dmu_objset_from_ds(to_ds, &os);
813 dsl_pool_rele(dp, tag);
817 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
818 drr->drr_type = DRR_BEGIN;
819 drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
820 DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo,
824 if (dmu_objset_type(os) == DMU_OST_ZFS) {
826 if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) {
827 kmem_free(drr, sizeof (dmu_replay_record_t));
828 dsl_pool_rele(dp, tag);
829 return (SET_ERROR(EINVAL));
831 if (version >= ZPL_VERSION_SA) {
832 featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
837 if (large_block_ok && to_ds->ds_feature_inuse[SPA_FEATURE_LARGE_BLOCKS])
838 featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS;
839 if (to_ds->ds_feature_inuse[SPA_FEATURE_LARGE_DNODE])
840 featureflags |= DMU_BACKUP_FEATURE_LARGE_DNODE;
842 spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) {
843 featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA;
844 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
845 featureflags |= DMU_BACKUP_FEATURE_LZ4;
848 featureflags |= DMU_BACKUP_FEATURE_COMPRESSED;
851 (DMU_BACKUP_FEATURE_EMBED_DATA | DMU_BACKUP_FEATURE_COMPRESSED)) !=
852 0 && spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) {
853 featureflags |= DMU_BACKUP_FEATURE_LZ4;
856 if (resumeobj != 0 || resumeoff != 0) {
857 featureflags |= DMU_BACKUP_FEATURE_RESUMING;
860 DMU_SET_FEATUREFLAGS(drr->drr_u.drr_begin.drr_versioninfo,
863 drr->drr_u.drr_begin.drr_creation_time =
864 dsl_dataset_phys(to_ds)->ds_creation_time;
865 drr->drr_u.drr_begin.drr_type = dmu_objset_type(os);
867 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
868 drr->drr_u.drr_begin.drr_toguid = dsl_dataset_phys(to_ds)->ds_guid;
869 if (dsl_dataset_phys(to_ds)->ds_flags & DS_FLAG_CI_DATASET)
870 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
871 if (zfs_send_set_freerecords_bit)
872 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_FREERECORDS;
874 if (ancestor_zb != NULL) {
875 drr->drr_u.drr_begin.drr_fromguid =
876 ancestor_zb->zbm_guid;
877 fromtxg = ancestor_zb->zbm_creation_txg;
879 dsl_dataset_name(to_ds, drr->drr_u.drr_begin.drr_toname);
880 if (!to_ds->ds_is_snapshot) {
881 (void) strlcat(drr->drr_u.drr_begin.drr_toname, "@--head--",
882 sizeof (drr->drr_u.drr_begin.drr_toname));
885 dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP);
888 dsp->dsa_outfd = outfd;
889 dsp->dsa_proc = curproc;
890 dsp->dsa_td = curthread;
894 dsp->dsa_toguid = dsl_dataset_phys(to_ds)->ds_guid;
895 dsp->dsa_pending_op = PENDING_NONE;
896 dsp->dsa_featureflags = featureflags;
897 dsp->dsa_resume_object = resumeobj;
898 dsp->dsa_resume_offset = resumeoff;
900 mutex_enter(&to_ds->ds_sendstream_lock);
901 list_insert_head(&to_ds->ds_sendstreams, dsp);
902 mutex_exit(&to_ds->ds_sendstream_lock);
904 dsl_dataset_long_hold(to_ds, FTAG);
905 dsl_pool_rele(dp, tag);
907 void *payload = NULL;
908 size_t payload_len = 0;
909 if (resumeobj != 0 || resumeoff != 0) {
910 dmu_object_info_t to_doi;
911 err = dmu_object_info(os, resumeobj, &to_doi);
914 SET_BOOKMARK(&to_arg.resume, to_ds->ds_object, resumeobj, 0,
915 resumeoff / to_doi.doi_data_block_size);
917 nvlist_t *nvl = fnvlist_alloc();
918 fnvlist_add_uint64(nvl, "resume_object", resumeobj);
919 fnvlist_add_uint64(nvl, "resume_offset", resumeoff);
920 payload = fnvlist_pack(nvl, &payload_len);
921 drr->drr_payloadlen = payload_len;
925 err = dump_record(dsp, payload, payload_len);
926 fnvlist_pack_free(payload, payload_len);
932 err = bqueue_init(&to_arg.q, zfs_send_queue_length,
933 offsetof(struct send_block_record, ln));
934 to_arg.error_code = 0;
935 to_arg.cancel = B_FALSE;
937 to_arg.fromtxg = fromtxg;
938 to_arg.flags = TRAVERSE_PRE | TRAVERSE_PREFETCH;
939 (void) thread_create(NULL, 0, send_traverse_thread, &to_arg, 0, &p0,
940 TS_RUN, minclsyspri);
942 struct send_block_record *to_data;
943 to_data = bqueue_dequeue(&to_arg.q);
945 while (!to_data->eos_marker && err == 0) {
946 err = do_dump(dsp, to_data);
947 to_data = get_next_record(&to_arg.q, to_data);
948 if (issig(JUSTLOOKING) && issig(FORREAL))
953 to_arg.cancel = B_TRUE;
954 while (!to_data->eos_marker) {
955 to_data = get_next_record(&to_arg.q, to_data);
958 kmem_free(to_data, sizeof (*to_data));
960 bqueue_destroy(&to_arg.q);
962 if (err == 0 && to_arg.error_code != 0)
963 err = to_arg.error_code;
968 if (dsp->dsa_pending_op != PENDING_NONE)
969 if (dump_record(dsp, NULL, 0) != 0)
970 err = SET_ERROR(EINTR);
973 if (err == EINTR && dsp->dsa_err != 0)
978 bzero(drr, sizeof (dmu_replay_record_t));
979 drr->drr_type = DRR_END;
980 drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc;
981 drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid;
983 if (dump_record(dsp, NULL, 0) != 0)
987 mutex_enter(&to_ds->ds_sendstream_lock);
988 list_remove(&to_ds->ds_sendstreams, dsp);
989 mutex_exit(&to_ds->ds_sendstream_lock);
991 VERIFY(err != 0 || (dsp->dsa_sent_begin && dsp->dsa_sent_end));
993 kmem_free(drr, sizeof (dmu_replay_record_t));
994 kmem_free(dsp, sizeof (dmu_sendarg_t));
996 dsl_dataset_long_rele(to_ds, FTAG);
1002 dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
1003 boolean_t embedok, boolean_t large_block_ok, boolean_t compressok,
1005 int outfd, vnode_t *vp, offset_t *off)
1007 int outfd, struct file *fp, offset_t *off)
1012 dsl_dataset_t *fromds = NULL;
1015 err = dsl_pool_hold(pool, FTAG, &dp);
1019 err = dsl_dataset_hold_obj(dp, tosnap, FTAG, &ds);
1021 dsl_pool_rele(dp, FTAG);
1025 if (fromsnap != 0) {
1026 zfs_bookmark_phys_t zb;
1029 err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds);
1031 dsl_dataset_rele(ds, FTAG);
1032 dsl_pool_rele(dp, FTAG);
1035 if (!dsl_dataset_is_before(ds, fromds, 0))
1036 err = SET_ERROR(EXDEV);
1037 zb.zbm_creation_time =
1038 dsl_dataset_phys(fromds)->ds_creation_time;
1039 zb.zbm_creation_txg = dsl_dataset_phys(fromds)->ds_creation_txg;
1040 zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
1041 is_clone = (fromds->ds_dir != ds->ds_dir);
1042 dsl_dataset_rele(fromds, FTAG);
1043 err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
1044 embedok, large_block_ok, compressok, outfd, 0, 0, fp, off);
1046 err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
1047 embedok, large_block_ok, compressok, outfd, 0, 0, fp, off);
1049 dsl_dataset_rele(ds, FTAG);
1054 dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok,
1055 boolean_t large_block_ok, boolean_t compressok, int outfd,
1056 uint64_t resumeobj, uint64_t resumeoff,
1058 vnode_t *vp, offset_t *off)
1060 struct file *fp, offset_t *off)
1066 boolean_t owned = B_FALSE;
1068 if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL)
1069 return (SET_ERROR(EINVAL));
1071 err = dsl_pool_hold(tosnap, FTAG, &dp);
1075 if (strchr(tosnap, '@') == NULL && spa_writeable(dp->dp_spa)) {
1077 * We are sending a filesystem or volume. Ensure
1078 * that it doesn't change by owning the dataset.
1080 err = dsl_dataset_own(dp, tosnap, FTAG, &ds);
1083 err = dsl_dataset_hold(dp, tosnap, FTAG, &ds);
1086 dsl_pool_rele(dp, FTAG);
1090 if (fromsnap != NULL) {
1091 zfs_bookmark_phys_t zb;
1092 boolean_t is_clone = B_FALSE;
1093 int fsnamelen = strchr(tosnap, '@') - tosnap;
1096 * If the fromsnap is in a different filesystem, then
1097 * mark the send stream as a clone.
1099 if (strncmp(tosnap, fromsnap, fsnamelen) != 0 ||
1100 (fromsnap[fsnamelen] != '@' &&
1101 fromsnap[fsnamelen] != '#')) {
1105 if (strchr(fromsnap, '@')) {
1106 dsl_dataset_t *fromds;
1107 err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds);
1109 if (!dsl_dataset_is_before(ds, fromds, 0))
1110 err = SET_ERROR(EXDEV);
1111 zb.zbm_creation_time =
1112 dsl_dataset_phys(fromds)->ds_creation_time;
1113 zb.zbm_creation_txg =
1114 dsl_dataset_phys(fromds)->ds_creation_txg;
1115 zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
1116 is_clone = (ds->ds_dir != fromds->ds_dir);
1117 dsl_dataset_rele(fromds, FTAG);
1120 err = dsl_bookmark_lookup(dp, fromsnap, ds, &zb);
1123 dsl_dataset_rele(ds, FTAG);
1124 dsl_pool_rele(dp, FTAG);
1127 err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
1128 embedok, large_block_ok, compressok,
1129 outfd, resumeobj, resumeoff, fp, off);
1131 err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
1132 embedok, large_block_ok, compressok,
1133 outfd, resumeobj, resumeoff, fp, off);
1136 dsl_dataset_disown(ds, FTAG);
1138 dsl_dataset_rele(ds, FTAG);
1143 dmu_adjust_send_estimate_for_indirects(dsl_dataset_t *ds, uint64_t uncompressed,
1144 uint64_t compressed, boolean_t stream_compressed, uint64_t *sizep)
1149 * Assume that space (both on-disk and in-stream) is dominated by
1150 * data. We will adjust for indirect blocks and the copies property,
1151 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
1153 uint64_t recordsize;
1154 uint64_t record_count;
1156 VERIFY0(dmu_objset_from_ds(ds, &os));
1158 /* Assume all (uncompressed) blocks are recordsize. */
1159 if (zfs_override_estimate_recordsize != 0) {
1160 recordsize = zfs_override_estimate_recordsize;
1161 } else if (os->os_phys->os_type == DMU_OST_ZVOL) {
1162 err = dsl_prop_get_int_ds(ds,
1163 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &recordsize);
1165 err = dsl_prop_get_int_ds(ds,
1166 zfs_prop_to_name(ZFS_PROP_RECORDSIZE), &recordsize);
1170 record_count = uncompressed / recordsize;
1173 * If we're estimating a send size for a compressed stream, use the
1174 * compressed data size to estimate the stream size. Otherwise, use the
1175 * uncompressed data size.
1177 size = stream_compressed ? compressed : uncompressed;
1180 * Subtract out approximate space used by indirect blocks.
1181 * Assume most space is used by data blocks (non-indirect, non-dnode).
1182 * Assume no ditto blocks or internal fragmentation.
1184 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
1187 size -= record_count * sizeof (blkptr_t);
1189 /* Add in the space for the record associated with each block. */
1190 size += record_count * sizeof (dmu_replay_record_t);
1198 dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds,
1199 boolean_t stream_compressed, uint64_t *sizep)
1201 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1203 uint64_t uncomp, comp;
1205 ASSERT(dsl_pool_config_held(dp));
1207 /* tosnap must be a snapshot */
1208 if (!ds->ds_is_snapshot)
1209 return (SET_ERROR(EINVAL));
1211 /* fromsnap, if provided, must be a snapshot */
1212 if (fromds != NULL && !fromds->ds_is_snapshot)
1213 return (SET_ERROR(EINVAL));
1216 * fromsnap must be an earlier snapshot from the same fs as tosnap,
1217 * or the origin's fs.
1219 if (fromds != NULL && !dsl_dataset_is_before(ds, fromds, 0))
1220 return (SET_ERROR(EXDEV));
1222 /* Get compressed and uncompressed size estimates of changed data. */
1223 if (fromds == NULL) {
1224 uncomp = dsl_dataset_phys(ds)->ds_uncompressed_bytes;
1225 comp = dsl_dataset_phys(ds)->ds_compressed_bytes;
1228 err = dsl_dataset_space_written(fromds, ds,
1229 &used, &comp, &uncomp);
1234 err = dmu_adjust_send_estimate_for_indirects(ds, uncomp, comp,
1235 stream_compressed, sizep);
1237 * Add the size of the BEGIN and END records to the estimate.
1239 *sizep += 2 * sizeof (dmu_replay_record_t);
1243 struct calculate_send_arg {
1244 uint64_t uncompressed;
1245 uint64_t compressed;
1249 * Simple callback used to traverse the blocks of a snapshot and sum their
1250 * uncompressed and compressed sizes.
1254 dmu_calculate_send_traversal(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
1255 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
1257 struct calculate_send_arg *space = arg;
1258 if (bp != NULL && !BP_IS_HOLE(bp)) {
1259 space->uncompressed += BP_GET_UCSIZE(bp);
1260 space->compressed += BP_GET_PSIZE(bp);
1266 * Given a desination snapshot and a TXG, calculate the approximate size of a
1267 * send stream sent from that TXG. from_txg may be zero, indicating that the
1268 * whole snapshot will be sent.
1271 dmu_send_estimate_from_txg(dsl_dataset_t *ds, uint64_t from_txg,
1272 boolean_t stream_compressed, uint64_t *sizep)
1274 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1276 struct calculate_send_arg size = { 0 };
1278 ASSERT(dsl_pool_config_held(dp));
1280 /* tosnap must be a snapshot */
1281 if (!ds->ds_is_snapshot)
1282 return (SET_ERROR(EINVAL));
1284 /* verify that from_txg is before the provided snapshot was taken */
1285 if (from_txg >= dsl_dataset_phys(ds)->ds_creation_txg) {
1286 return (SET_ERROR(EXDEV));
1290 * traverse the blocks of the snapshot with birth times after
1291 * from_txg, summing their uncompressed size
1293 err = traverse_dataset(ds, from_txg, TRAVERSE_POST,
1294 dmu_calculate_send_traversal, &size);
1298 err = dmu_adjust_send_estimate_for_indirects(ds, size.uncompressed,
1299 size.compressed, stream_compressed, sizep);
1303 typedef struct dmu_recv_begin_arg {
1304 const char *drba_origin;
1305 dmu_recv_cookie_t *drba_cookie;
1307 uint64_t drba_snapobj;
1308 } dmu_recv_begin_arg_t;
1311 recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
1317 dsl_pool_t *dp = ds->ds_dir->dd_pool;
1319 /* Temporary clone name must not exist. */
1320 error = zap_lookup(dp->dp_meta_objset,
1321 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name,
1323 if (error != ENOENT)
1324 return (error == 0 ? SET_ERROR(EBUSY) : error);
1326 /* Resume state must not be set. */
1327 if (dsl_dataset_has_resume_receive_state(ds))
1328 return (SET_ERROR(EBUSY));
1330 /* New snapshot name must not exist. */
1331 error = zap_lookup(dp->dp_meta_objset,
1332 dsl_dataset_phys(ds)->ds_snapnames_zapobj,
1333 drba->drba_cookie->drc_tosnap, 8, 1, &val);
1334 if (error != ENOENT)
1335 return (error == 0 ? SET_ERROR(EEXIST) : error);
1337 /* must not have children if receiving a ZVOL */
1338 error = zap_count(dp->dp_meta_objset,
1339 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, &children);
1342 if (drba->drba_cookie->drc_drrb->drr_type != DMU_OST_ZFS &&
1344 return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
1347 * Check snapshot limit before receiving. We'll recheck again at the
1348 * end, but might as well abort before receiving if we're already over
1351 * Note that we do not check the file system limit with
1352 * dsl_dir_fscount_check because the temporary %clones don't count
1353 * against that limit.
1355 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT,
1356 NULL, drba->drba_cred);
1360 if (fromguid != 0) {
1361 dsl_dataset_t *snap;
1362 uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
1364 /* Find snapshot in this dir that matches fromguid. */
1366 error = dsl_dataset_hold_obj(dp, obj, FTAG,
1369 return (SET_ERROR(ENODEV));
1370 if (snap->ds_dir != ds->ds_dir) {
1371 dsl_dataset_rele(snap, FTAG);
1372 return (SET_ERROR(ENODEV));
1374 if (dsl_dataset_phys(snap)->ds_guid == fromguid)
1376 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
1377 dsl_dataset_rele(snap, FTAG);
1380 return (SET_ERROR(ENODEV));
1382 if (drba->drba_cookie->drc_force) {
1383 drba->drba_snapobj = obj;
1386 * If we are not forcing, there must be no
1387 * changes since fromsnap.
1389 if (dsl_dataset_modified_since_snap(ds, snap)) {
1390 dsl_dataset_rele(snap, FTAG);
1391 return (SET_ERROR(ETXTBSY));
1393 drba->drba_snapobj = ds->ds_prev->ds_object;
1396 dsl_dataset_rele(snap, FTAG);
1398 /* if full, then must be forced */
1399 if (!drba->drba_cookie->drc_force)
1400 return (SET_ERROR(EEXIST));
1401 /* start from $ORIGIN@$ORIGIN, if supported */
1402 drba->drba_snapobj = dp->dp_origin_snap != NULL ?
1403 dp->dp_origin_snap->ds_object : 0;
1411 dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
1413 dmu_recv_begin_arg_t *drba = arg;
1414 dsl_pool_t *dp = dmu_tx_pool(tx);
1415 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1416 uint64_t fromguid = drrb->drr_fromguid;
1417 int flags = drrb->drr_flags;
1419 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
1421 const char *tofs = drba->drba_cookie->drc_tofs;
1423 /* already checked */
1424 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
1425 ASSERT(!(featureflags & DMU_BACKUP_FEATURE_RESUMING));
1427 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
1428 DMU_COMPOUNDSTREAM ||
1429 drrb->drr_type >= DMU_OST_NUMTYPES ||
1430 ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
1431 return (SET_ERROR(EINVAL));
1433 /* Verify pool version supports SA if SA_SPILL feature set */
1434 if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
1435 spa_version(dp->dp_spa) < SPA_VERSION_SA)
1436 return (SET_ERROR(ENOTSUP));
1438 if (drba->drba_cookie->drc_resumable &&
1439 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EXTENSIBLE_DATASET))
1440 return (SET_ERROR(ENOTSUP));
1443 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
1444 * record to a plain WRITE record, so the pool must have the
1445 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
1446 * records. Same with WRITE_EMBEDDED records that use LZ4 compression.
1448 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
1449 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA))
1450 return (SET_ERROR(ENOTSUP));
1451 if ((featureflags & DMU_BACKUP_FEATURE_LZ4) &&
1452 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
1453 return (SET_ERROR(ENOTSUP));
1456 * The receiving code doesn't know how to translate large blocks
1457 * to smaller ones, so the pool must have the LARGE_BLOCKS
1458 * feature enabled if the stream has LARGE_BLOCKS. Same with
1461 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
1462 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS))
1463 return (SET_ERROR(ENOTSUP));
1464 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_DNODE) &&
1465 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_DNODE))
1466 return (SET_ERROR(ENOTSUP));
1468 error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1470 /* target fs already exists; recv into temp clone */
1472 /* Can't recv a clone into an existing fs */
1473 if (flags & DRR_FLAG_CLONE || drba->drba_origin) {
1474 dsl_dataset_rele(ds, FTAG);
1475 return (SET_ERROR(EINVAL));
1478 error = recv_begin_check_existing_impl(drba, ds, fromguid);
1479 dsl_dataset_rele(ds, FTAG);
1480 } else if (error == ENOENT) {
1481 /* target fs does not exist; must be a full backup or clone */
1482 char buf[ZFS_MAX_DATASET_NAME_LEN];
1486 * If it's a non-clone incremental, we are missing the
1487 * target fs, so fail the recv.
1489 if (fromguid != 0 && !(flags & DRR_FLAG_CLONE ||
1491 return (SET_ERROR(ENOENT));
1494 * If we're receiving a full send as a clone, and it doesn't
1495 * contain all the necessary free records and freeobject
1496 * records, reject it.
1498 if (fromguid == 0 && drba->drba_origin &&
1499 !(flags & DRR_FLAG_FREERECORDS))
1500 return (SET_ERROR(EINVAL));
1502 /* Open the parent of tofs */
1503 ASSERT3U(strlen(tofs), <, sizeof (buf));
1504 (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
1505 error = dsl_dataset_hold(dp, buf, FTAG, &ds);
1510 * Check filesystem and snapshot limits before receiving. We'll
1511 * recheck snapshot limits again at the end (we create the
1512 * filesystems and increment those counts during begin_sync).
1514 error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1515 ZFS_PROP_FILESYSTEM_LIMIT, NULL, drba->drba_cred);
1517 dsl_dataset_rele(ds, FTAG);
1521 error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1522 ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred);
1524 dsl_dataset_rele(ds, FTAG);
1528 /* can't recv below anything but filesystems (eg. no ZVOLs) */
1529 error = dmu_objset_from_ds(ds, &os);
1531 dsl_dataset_rele(ds, FTAG);
1534 if (dmu_objset_type(os) != DMU_OST_ZFS) {
1535 dsl_dataset_rele(ds, FTAG);
1536 return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
1539 if (drba->drba_origin != NULL) {
1540 dsl_dataset_t *origin;
1541 error = dsl_dataset_hold(dp, drba->drba_origin,
1544 dsl_dataset_rele(ds, FTAG);
1547 if (!origin->ds_is_snapshot) {
1548 dsl_dataset_rele(origin, FTAG);
1549 dsl_dataset_rele(ds, FTAG);
1550 return (SET_ERROR(EINVAL));
1552 if (dsl_dataset_phys(origin)->ds_guid != fromguid &&
1554 dsl_dataset_rele(origin, FTAG);
1555 dsl_dataset_rele(ds, FTAG);
1556 return (SET_ERROR(ENODEV));
1558 dsl_dataset_rele(origin, FTAG);
1561 dsl_dataset_rele(ds, FTAG);
1568 dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
1570 dmu_recv_begin_arg_t *drba = arg;
1571 dsl_pool_t *dp = dmu_tx_pool(tx);
1572 objset_t *mos = dp->dp_meta_objset;
1573 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1574 const char *tofs = drba->drba_cookie->drc_tofs;
1575 dsl_dataset_t *ds, *newds;
1578 uint64_t crflags = 0;
1580 if (drrb->drr_flags & DRR_FLAG_CI_DATA)
1581 crflags |= DS_FLAG_CI_DATASET;
1583 error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1585 /* create temporary clone */
1586 dsl_dataset_t *snap = NULL;
1587 if (drba->drba_snapobj != 0) {
1588 VERIFY0(dsl_dataset_hold_obj(dp,
1589 drba->drba_snapobj, FTAG, &snap));
1591 dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name,
1592 snap, crflags, drba->drba_cred, tx);
1593 if (drba->drba_snapobj != 0)
1594 dsl_dataset_rele(snap, FTAG);
1595 dsl_dataset_rele(ds, FTAG);
1599 dsl_dataset_t *origin = NULL;
1601 VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
1603 if (drba->drba_origin != NULL) {
1604 VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
1608 /* Create new dataset. */
1609 dsobj = dsl_dataset_create_sync(dd,
1610 strrchr(tofs, '/') + 1,
1611 origin, crflags, drba->drba_cred, tx);
1613 dsl_dataset_rele(origin, FTAG);
1614 dsl_dir_rele(dd, FTAG);
1615 drba->drba_cookie->drc_newfs = B_TRUE;
1617 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &newds));
1619 if (drba->drba_cookie->drc_resumable) {
1620 dsl_dataset_zapify(newds, tx);
1621 if (drrb->drr_fromguid != 0) {
1622 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_FROMGUID,
1623 8, 1, &drrb->drr_fromguid, tx));
1625 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TOGUID,
1626 8, 1, &drrb->drr_toguid, tx));
1627 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TONAME,
1628 1, strlen(drrb->drr_toname) + 1, drrb->drr_toname, tx));
1631 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OBJECT,
1633 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OFFSET,
1635 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_BYTES,
1637 if (DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
1638 DMU_BACKUP_FEATURE_LARGE_BLOCKS) {
1639 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_LARGEBLOCK,
1642 if (DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
1643 DMU_BACKUP_FEATURE_EMBED_DATA) {
1644 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_EMBEDOK,
1647 if (DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
1648 DMU_BACKUP_FEATURE_COMPRESSED) {
1649 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_COMPRESSOK,
1654 dmu_buf_will_dirty(newds->ds_dbuf, tx);
1655 dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT;
1658 * If we actually created a non-clone, we need to create the
1659 * objset in our new dataset.
1661 rrw_enter(&newds->ds_bp_rwlock, RW_READER, FTAG);
1662 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds))) {
1663 (void) dmu_objset_create_impl(dp->dp_spa,
1664 newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
1666 rrw_exit(&newds->ds_bp_rwlock, FTAG);
1668 drba->drba_cookie->drc_ds = newds;
1670 spa_history_log_internal_ds(newds, "receive", tx, "");
1674 dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx)
1676 dmu_recv_begin_arg_t *drba = arg;
1677 dsl_pool_t *dp = dmu_tx_pool(tx);
1678 struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1680 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
1682 const char *tofs = drba->drba_cookie->drc_tofs;
1684 /* 6 extra bytes for /%recv */
1685 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
1687 /* already checked */
1688 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
1689 ASSERT(featureflags & DMU_BACKUP_FEATURE_RESUMING);
1691 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
1692 DMU_COMPOUNDSTREAM ||
1693 drrb->drr_type >= DMU_OST_NUMTYPES)
1694 return (SET_ERROR(EINVAL));
1696 /* Verify pool version supports SA if SA_SPILL feature set */
1697 if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
1698 spa_version(dp->dp_spa) < SPA_VERSION_SA)
1699 return (SET_ERROR(ENOTSUP));
1702 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
1703 * record to a plain WRITE record, so the pool must have the
1704 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
1705 * records. Same with WRITE_EMBEDDED records that use LZ4 compression.
1707 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
1708 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA))
1709 return (SET_ERROR(ENOTSUP));
1710 if ((featureflags & DMU_BACKUP_FEATURE_LZ4) &&
1711 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
1712 return (SET_ERROR(ENOTSUP));
1715 * The receiving code doesn't know how to translate large blocks
1716 * to smaller ones, so the pool must have the LARGE_BLOCKS
1717 * feature enabled if the stream has LARGE_BLOCKS. Same with
1720 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
1721 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS))
1722 return (SET_ERROR(ENOTSUP));
1723 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_DNODE) &&
1724 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_DNODE))
1725 return (SET_ERROR(ENOTSUP));
1727 (void) snprintf(recvname, sizeof (recvname), "%s/%s",
1728 tofs, recv_clone_name);
1730 if (dsl_dataset_hold(dp, recvname, FTAG, &ds) != 0) {
1731 /* %recv does not exist; continue in tofs */
1732 error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1737 /* check that ds is marked inconsistent */
1738 if (!DS_IS_INCONSISTENT(ds)) {
1739 dsl_dataset_rele(ds, FTAG);
1740 return (SET_ERROR(EINVAL));
1743 /* check that there is resuming data, and that the toguid matches */
1744 if (!dsl_dataset_is_zapified(ds)) {
1745 dsl_dataset_rele(ds, FTAG);
1746 return (SET_ERROR(EINVAL));
1749 error = zap_lookup(dp->dp_meta_objset, ds->ds_object,
1750 DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val);
1751 if (error != 0 || drrb->drr_toguid != val) {
1752 dsl_dataset_rele(ds, FTAG);
1753 return (SET_ERROR(EINVAL));
1757 * Check if the receive is still running. If so, it will be owned.
1758 * Note that nothing else can own the dataset (e.g. after the receive
1759 * fails) because it will be marked inconsistent.
1761 if (dsl_dataset_has_owner(ds)) {
1762 dsl_dataset_rele(ds, FTAG);
1763 return (SET_ERROR(EBUSY));
1766 /* There should not be any snapshots of this fs yet. */
1767 if (ds->ds_prev != NULL && ds->ds_prev->ds_dir == ds->ds_dir) {
1768 dsl_dataset_rele(ds, FTAG);
1769 return (SET_ERROR(EINVAL));
1773 * Note: resume point will be checked when we process the first WRITE
1777 /* check that the origin matches */
1779 (void) zap_lookup(dp->dp_meta_objset, ds->ds_object,
1780 DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val);
1781 if (drrb->drr_fromguid != val) {
1782 dsl_dataset_rele(ds, FTAG);
1783 return (SET_ERROR(EINVAL));
1786 dsl_dataset_rele(ds, FTAG);
1791 dmu_recv_resume_begin_sync(void *arg, dmu_tx_t *tx)
1793 dmu_recv_begin_arg_t *drba = arg;
1794 dsl_pool_t *dp = dmu_tx_pool(tx);
1795 const char *tofs = drba->drba_cookie->drc_tofs;
1798 /* 6 extra bytes for /%recv */
1799 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
1801 (void) snprintf(recvname, sizeof (recvname), "%s/%s",
1802 tofs, recv_clone_name);
1804 if (dsl_dataset_hold(dp, recvname, FTAG, &ds) != 0) {
1805 /* %recv does not exist; continue in tofs */
1806 VERIFY0(dsl_dataset_hold(dp, tofs, FTAG, &ds));
1807 drba->drba_cookie->drc_newfs = B_TRUE;
1810 /* clear the inconsistent flag so that we can own it */
1811 ASSERT(DS_IS_INCONSISTENT(ds));
1812 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1813 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
1814 dsobj = ds->ds_object;
1815 dsl_dataset_rele(ds, FTAG);
1817 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &ds));
1819 dmu_buf_will_dirty(ds->ds_dbuf, tx);
1820 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT;
1822 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
1823 ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds)));
1824 rrw_exit(&ds->ds_bp_rwlock, FTAG);
1826 drba->drba_cookie->drc_ds = ds;
1828 spa_history_log_internal_ds(ds, "resume receive", tx, "");
1832 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
1833 * succeeds; otherwise we will leak the holds on the datasets.
1836 dmu_recv_begin(char *tofs, char *tosnap, dmu_replay_record_t *drr_begin,
1837 boolean_t force, boolean_t resumable, char *origin, dmu_recv_cookie_t *drc)
1839 dmu_recv_begin_arg_t drba = { 0 };
1841 bzero(drc, sizeof (dmu_recv_cookie_t));
1842 drc->drc_drr_begin = drr_begin;
1843 drc->drc_drrb = &drr_begin->drr_u.drr_begin;
1844 drc->drc_tosnap = tosnap;
1845 drc->drc_tofs = tofs;
1846 drc->drc_force = force;
1847 drc->drc_resumable = resumable;
1848 drc->drc_cred = CRED();
1849 drc->drc_clone = (origin != NULL);
1851 if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
1852 drc->drc_byteswap = B_TRUE;
1853 (void) fletcher_4_incremental_byteswap(drr_begin,
1854 sizeof (dmu_replay_record_t), &drc->drc_cksum);
1855 byteswap_record(drr_begin);
1856 } else if (drc->drc_drrb->drr_magic == DMU_BACKUP_MAGIC) {
1857 (void) fletcher_4_incremental_native(drr_begin,
1858 sizeof (dmu_replay_record_t), &drc->drc_cksum);
1860 return (SET_ERROR(EINVAL));
1863 drba.drba_origin = origin;
1864 drba.drba_cookie = drc;
1865 drba.drba_cred = CRED();
1867 if (DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) &
1868 DMU_BACKUP_FEATURE_RESUMING) {
1869 return (dsl_sync_task(tofs,
1870 dmu_recv_resume_begin_check, dmu_recv_resume_begin_sync,
1871 &drba, 5, ZFS_SPACE_CHECK_NORMAL));
1873 return (dsl_sync_task(tofs,
1874 dmu_recv_begin_check, dmu_recv_begin_sync,
1875 &drba, 5, ZFS_SPACE_CHECK_NORMAL));
1879 struct receive_record_arg {
1880 dmu_replay_record_t header;
1881 void *payload; /* Pointer to a buffer containing the payload */
1883 * If the record is a write, pointer to the arc_buf_t containing the
1886 arc_buf_t *write_buf;
1888 uint64_t bytes_read; /* bytes read from stream when record created */
1889 boolean_t eos_marker; /* Marks the end of the stream */
1893 struct receive_writer_arg {
1899 * These three args are used to signal to the main thread that we're
1907 /* A map from guid to dataset to help handle dedup'd streams. */
1908 avl_tree_t *guid_to_ds_map;
1909 boolean_t resumable;
1910 uint64_t last_object;
1911 uint64_t last_offset;
1912 uint64_t max_object; /* highest object ID referenced in stream */
1913 uint64_t bytes_read; /* bytes read when current record created */
1917 list_t list; /* List of struct receive_objnode. */
1919 * Last object looked up. Used to assert that objects are being looked
1920 * up in ascending order.
1922 uint64_t last_lookup;
1925 struct receive_objnode {
1930 struct receive_arg {
1934 uint64_t voff; /* The current offset in the stream */
1935 uint64_t bytes_read;
1937 * A record that has had its payload read in, but hasn't yet been handed
1938 * off to the worker thread.
1940 struct receive_record_arg *rrd;
1941 /* A record that has had its header read in, but not its payload. */
1942 struct receive_record_arg *next_rrd;
1944 zio_cksum_t prev_cksum;
1947 /* Sorted list of objects not to issue prefetches for. */
1948 struct objlist ignore_objlist;
1951 typedef struct guid_map_entry {
1953 dsl_dataset_t *gme_ds;
1958 guid_compare(const void *arg1, const void *arg2)
1960 const guid_map_entry_t *gmep1 = (const guid_map_entry_t *)arg1;
1961 const guid_map_entry_t *gmep2 = (const guid_map_entry_t *)arg2;
1963 return (AVL_CMP(gmep1->guid, gmep2->guid));
1967 free_guid_map_onexit(void *arg)
1969 avl_tree_t *ca = arg;
1970 void *cookie = NULL;
1971 guid_map_entry_t *gmep;
1973 while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
1974 dsl_dataset_long_rele(gmep->gme_ds, gmep);
1975 dsl_dataset_rele(gmep->gme_ds, gmep);
1976 kmem_free(gmep, sizeof (guid_map_entry_t));
1979 kmem_free(ca, sizeof (avl_tree_t));
1983 restore_bytes(struct receive_arg *ra, void *buf, int len, off_t off, ssize_t *resid)
1989 aiov.iov_base = buf;
1991 auio.uio_iov = &aiov;
1992 auio.uio_iovcnt = 1;
1993 auio.uio_resid = len;
1994 auio.uio_segflg = UIO_SYSSPACE;
1995 auio.uio_rw = UIO_READ;
1996 auio.uio_offset = off;
1997 auio.uio_td = ra->td;
1999 error = fo_read(ra->fp, &auio, ra->td->td_ucred, FOF_OFFSET, ra->td);
2001 fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__);
2004 *resid = auio.uio_resid;
2009 receive_read(struct receive_arg *ra, int len, void *buf)
2014 * The code doesn't rely on this (lengths being multiples of 8). See
2015 * comment in dump_bytes.
2019 while (done < len) {
2022 ra->err = restore_bytes(ra, buf + done,
2023 len - done, ra->voff, &resid);
2025 if (resid == len - done) {
2027 * Note: ECKSUM indicates that the receive
2028 * was interrupted and can potentially be resumed.
2030 ra->err = SET_ERROR(ECKSUM);
2032 ra->voff += len - done - resid;
2038 ra->bytes_read += len;
2040 ASSERT3U(done, ==, len);
2044 noinline static void
2045 byteswap_record(dmu_replay_record_t *drr)
2047 #define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
2048 #define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
2049 drr->drr_type = BSWAP_32(drr->drr_type);
2050 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
2052 switch (drr->drr_type) {
2054 DO64(drr_begin.drr_magic);
2055 DO64(drr_begin.drr_versioninfo);
2056 DO64(drr_begin.drr_creation_time);
2057 DO32(drr_begin.drr_type);
2058 DO32(drr_begin.drr_flags);
2059 DO64(drr_begin.drr_toguid);
2060 DO64(drr_begin.drr_fromguid);
2063 DO64(drr_object.drr_object);
2064 DO32(drr_object.drr_type);
2065 DO32(drr_object.drr_bonustype);
2066 DO32(drr_object.drr_blksz);
2067 DO32(drr_object.drr_bonuslen);
2068 DO64(drr_object.drr_toguid);
2070 case DRR_FREEOBJECTS:
2071 DO64(drr_freeobjects.drr_firstobj);
2072 DO64(drr_freeobjects.drr_numobjs);
2073 DO64(drr_freeobjects.drr_toguid);
2076 DO64(drr_write.drr_object);
2077 DO32(drr_write.drr_type);
2078 DO64(drr_write.drr_offset);
2079 DO64(drr_write.drr_logical_size);
2080 DO64(drr_write.drr_toguid);
2081 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum);
2082 DO64(drr_write.drr_key.ddk_prop);
2083 DO64(drr_write.drr_compressed_size);
2085 case DRR_WRITE_BYREF:
2086 DO64(drr_write_byref.drr_object);
2087 DO64(drr_write_byref.drr_offset);
2088 DO64(drr_write_byref.drr_length);
2089 DO64(drr_write_byref.drr_toguid);
2090 DO64(drr_write_byref.drr_refguid);
2091 DO64(drr_write_byref.drr_refobject);
2092 DO64(drr_write_byref.drr_refoffset);
2093 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write_byref.
2095 DO64(drr_write_byref.drr_key.ddk_prop);
2097 case DRR_WRITE_EMBEDDED:
2098 DO64(drr_write_embedded.drr_object);
2099 DO64(drr_write_embedded.drr_offset);
2100 DO64(drr_write_embedded.drr_length);
2101 DO64(drr_write_embedded.drr_toguid);
2102 DO32(drr_write_embedded.drr_lsize);
2103 DO32(drr_write_embedded.drr_psize);
2106 DO64(drr_free.drr_object);
2107 DO64(drr_free.drr_offset);
2108 DO64(drr_free.drr_length);
2109 DO64(drr_free.drr_toguid);
2112 DO64(drr_spill.drr_object);
2113 DO64(drr_spill.drr_length);
2114 DO64(drr_spill.drr_toguid);
2117 DO64(drr_end.drr_toguid);
2118 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum);
2122 if (drr->drr_type != DRR_BEGIN) {
2123 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum);
2130 static inline uint8_t
2131 deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size)
2133 if (bonus_type == DMU_OT_SA) {
2137 ((DN_OLD_MAX_BONUSLEN -
2138 MIN(DN_OLD_MAX_BONUSLEN, bonus_size)) >> SPA_BLKPTRSHIFT));
2143 save_resume_state(struct receive_writer_arg *rwa,
2144 uint64_t object, uint64_t offset, dmu_tx_t *tx)
2146 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
2148 if (!rwa->resumable)
2152 * We use ds_resume_bytes[] != 0 to indicate that we need to
2153 * update this on disk, so it must not be 0.
2155 ASSERT(rwa->bytes_read != 0);
2158 * We only resume from write records, which have a valid
2159 * (non-meta-dnode) object number.
2161 ASSERT(object != 0);
2164 * For resuming to work correctly, we must receive records in order,
2165 * sorted by object,offset. This is checked by the callers, but
2166 * assert it here for good measure.
2168 ASSERT3U(object, >=, rwa->os->os_dsl_dataset->ds_resume_object[txgoff]);
2169 ASSERT(object != rwa->os->os_dsl_dataset->ds_resume_object[txgoff] ||
2170 offset >= rwa->os->os_dsl_dataset->ds_resume_offset[txgoff]);
2171 ASSERT3U(rwa->bytes_read, >=,
2172 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff]);
2174 rwa->os->os_dsl_dataset->ds_resume_object[txgoff] = object;
2175 rwa->os->os_dsl_dataset->ds_resume_offset[txgoff] = offset;
2176 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff] = rwa->bytes_read;
2180 receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
2183 dmu_object_info_t doi;
2187 uint8_t dn_slots = drro->drr_dn_slots != 0 ?
2188 drro->drr_dn_slots : DNODE_MIN_SLOTS;
2190 if (drro->drr_type == DMU_OT_NONE ||
2191 !DMU_OT_IS_VALID(drro->drr_type) ||
2192 !DMU_OT_IS_VALID(drro->drr_bonustype) ||
2193 drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
2194 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
2195 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
2196 drro->drr_blksz < SPA_MINBLOCKSIZE ||
2197 drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) ||
2198 drro->drr_bonuslen >
2199 DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa->os))) ||
2201 (spa_maxdnodesize(dmu_objset_spa(rwa->os)) >> DNODE_SHIFT)) {
2202 return (SET_ERROR(EINVAL));
2205 err = dmu_object_info(rwa->os, drro->drr_object, &doi);
2207 if (err != 0 && err != ENOENT && err != EEXIST)
2208 return (SET_ERROR(EINVAL));
2210 if (drro->drr_object > rwa->max_object)
2211 rwa->max_object = drro->drr_object;
2214 * If we are losing blkptrs or changing the block size this must
2215 * be a new file instance. We must clear out the previous file
2216 * contents before we can change this type of metadata in the dnode.
2221 object = drro->drr_object;
2223 nblkptr = deduce_nblkptr(drro->drr_bonustype,
2224 drro->drr_bonuslen);
2226 if (drro->drr_blksz != doi.doi_data_block_size ||
2227 nblkptr < doi.doi_nblkptr ||
2228 dn_slots != doi.doi_dnodesize >> DNODE_SHIFT) {
2229 err = dmu_free_long_range(rwa->os, drro->drr_object,
2232 return (SET_ERROR(EINVAL));
2234 } else if (err == EEXIST) {
2236 * The object requested is currently an interior slot of a
2237 * multi-slot dnode. This will be resolved when the next txg
2238 * is synced out, since the send stream will have told us
2239 * to free this slot when we freed the associated dnode
2240 * earlier in the stream.
2242 txg_wait_synced(dmu_objset_pool(rwa->os), 0);
2243 object = drro->drr_object;
2245 /* object is free and we are about to allocate a new one */
2246 object = DMU_NEW_OBJECT;
2250 * If this is a multi-slot dnode there is a chance that this
2251 * object will expand into a slot that is already used by
2252 * another object from the previous snapshot. We must free
2253 * these objects before we attempt to allocate the new dnode.
2256 boolean_t need_sync = B_FALSE;
2258 for (uint64_t slot = drro->drr_object + 1;
2259 slot < drro->drr_object + dn_slots;
2261 dmu_object_info_t slot_doi;
2263 err = dmu_object_info(rwa->os, slot, &slot_doi);
2264 if (err == ENOENT || err == EEXIST)
2269 err = dmu_free_long_object(rwa->os, slot);
2278 txg_wait_synced(dmu_objset_pool(rwa->os), 0);
2281 tx = dmu_tx_create(rwa->os);
2282 dmu_tx_hold_bonus(tx, object);
2283 err = dmu_tx_assign(tx, TXG_WAIT);
2289 if (object == DMU_NEW_OBJECT) {
2290 /* currently free, want to be allocated */
2291 err = dmu_object_claim_dnsize(rwa->os, drro->drr_object,
2292 drro->drr_type, drro->drr_blksz,
2293 drro->drr_bonustype, drro->drr_bonuslen,
2294 dn_slots << DNODE_SHIFT, tx);
2295 } else if (drro->drr_type != doi.doi_type ||
2296 drro->drr_blksz != doi.doi_data_block_size ||
2297 drro->drr_bonustype != doi.doi_bonus_type ||
2298 drro->drr_bonuslen != doi.doi_bonus_size ||
2299 drro->drr_dn_slots != (doi.doi_dnodesize >> DNODE_SHIFT)) {
2300 /* currently allocated, but with different properties */
2301 err = dmu_object_reclaim_dnsize(rwa->os, drro->drr_object,
2302 drro->drr_type, drro->drr_blksz,
2303 drro->drr_bonustype, drro->drr_bonuslen,
2304 drro->drr_dn_slots << DNODE_SHIFT, tx);
2308 return (SET_ERROR(EINVAL));
2311 dmu_object_set_checksum(rwa->os, drro->drr_object,
2312 drro->drr_checksumtype, tx);
2313 dmu_object_set_compress(rwa->os, drro->drr_object,
2314 drro->drr_compress, tx);
2319 VERIFY0(dmu_bonus_hold(rwa->os, drro->drr_object, FTAG, &db));
2320 dmu_buf_will_dirty(db, tx);
2322 ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
2323 bcopy(data, db->db_data, drro->drr_bonuslen);
2324 if (rwa->byteswap) {
2325 dmu_object_byteswap_t byteswap =
2326 DMU_OT_BYTESWAP(drro->drr_bonustype);
2327 dmu_ot_byteswap[byteswap].ob_func(db->db_data,
2328 drro->drr_bonuslen);
2330 dmu_buf_rele(db, FTAG);
2339 receive_freeobjects(struct receive_writer_arg *rwa,
2340 struct drr_freeobjects *drrfo)
2345 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
2346 return (SET_ERROR(EINVAL));
2348 for (obj = drrfo->drr_firstobj == 0 ? 1 : drrfo->drr_firstobj;
2349 obj < drrfo->drr_firstobj + drrfo->drr_numobjs && next_err == 0;
2350 next_err = dmu_object_next(rwa->os, &obj, FALSE, 0)) {
2351 dmu_object_info_t doi;
2354 err = dmu_object_info(rwa->os, obj, NULL);
2360 err = dmu_free_long_object(rwa->os, obj);
2364 if (obj > rwa->max_object)
2365 rwa->max_object = obj;
2367 if (next_err != ESRCH)
2373 receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw,
2379 if (drrw->drr_offset + drrw->drr_logical_size < drrw->drr_offset ||
2380 !DMU_OT_IS_VALID(drrw->drr_type))
2381 return (SET_ERROR(EINVAL));
2384 * For resuming to work, records must be in increasing order
2385 * by (object, offset).
2387 if (drrw->drr_object < rwa->last_object ||
2388 (drrw->drr_object == rwa->last_object &&
2389 drrw->drr_offset < rwa->last_offset)) {
2390 return (SET_ERROR(EINVAL));
2392 rwa->last_object = drrw->drr_object;
2393 rwa->last_offset = drrw->drr_offset;
2395 if (rwa->last_object > rwa->max_object)
2396 rwa->max_object = rwa->last_object;
2398 if (dmu_object_info(rwa->os, drrw->drr_object, NULL) != 0)
2399 return (SET_ERROR(EINVAL));
2401 tx = dmu_tx_create(rwa->os);
2402 dmu_tx_hold_write(tx, drrw->drr_object,
2403 drrw->drr_offset, drrw->drr_logical_size);
2404 err = dmu_tx_assign(tx, TXG_WAIT);
2409 if (rwa->byteswap) {
2410 dmu_object_byteswap_t byteswap =
2411 DMU_OT_BYTESWAP(drrw->drr_type);
2412 dmu_ot_byteswap[byteswap].ob_func(abuf->b_data,
2413 DRR_WRITE_PAYLOAD_SIZE(drrw));
2416 /* use the bonus buf to look up the dnode in dmu_assign_arcbuf */
2418 if (dmu_bonus_hold(rwa->os, drrw->drr_object, FTAG, &bonus) != 0)
2419 return (SET_ERROR(EINVAL));
2420 dmu_assign_arcbuf(bonus, drrw->drr_offset, abuf, tx);
2423 * Note: If the receive fails, we want the resume stream to start
2424 * with the same record that we last successfully received (as opposed
2425 * to the next record), so that we can verify that we are
2426 * resuming from the correct location.
2428 save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx);
2430 dmu_buf_rele(bonus, FTAG);
2436 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed
2437 * streams to refer to a copy of the data that is already on the
2438 * system because it came in earlier in the stream. This function
2439 * finds the earlier copy of the data, and uses that copy instead of
2440 * data from the stream to fulfill this write.
2443 receive_write_byref(struct receive_writer_arg *rwa,
2444 struct drr_write_byref *drrwbr)
2448 guid_map_entry_t gmesrch;
2449 guid_map_entry_t *gmep;
2451 objset_t *ref_os = NULL;
2454 if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
2455 return (SET_ERROR(EINVAL));
2458 * If the GUID of the referenced dataset is different from the
2459 * GUID of the target dataset, find the referenced dataset.
2461 if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
2462 gmesrch.guid = drrwbr->drr_refguid;
2463 if ((gmep = avl_find(rwa->guid_to_ds_map, &gmesrch,
2465 return (SET_ERROR(EINVAL));
2467 if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
2468 return (SET_ERROR(EINVAL));
2473 if (drrwbr->drr_object > rwa->max_object)
2474 rwa->max_object = drrwbr->drr_object;
2476 err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
2477 drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH);
2481 tx = dmu_tx_create(rwa->os);
2483 dmu_tx_hold_write(tx, drrwbr->drr_object,
2484 drrwbr->drr_offset, drrwbr->drr_length);
2485 err = dmu_tx_assign(tx, TXG_WAIT);
2490 dmu_write(rwa->os, drrwbr->drr_object,
2491 drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
2492 dmu_buf_rele(dbp, FTAG);
2494 /* See comment in restore_write. */
2495 save_resume_state(rwa, drrwbr->drr_object, drrwbr->drr_offset, tx);
2501 receive_write_embedded(struct receive_writer_arg *rwa,
2502 struct drr_write_embedded *drrwe, void *data)
2507 if (drrwe->drr_offset + drrwe->drr_length < drrwe->drr_offset)
2510 if (drrwe->drr_psize > BPE_PAYLOAD_SIZE)
2513 if (drrwe->drr_etype >= NUM_BP_EMBEDDED_TYPES)
2515 if (drrwe->drr_compression >= ZIO_COMPRESS_FUNCTIONS)
2518 if (drrwe->drr_object > rwa->max_object)
2519 rwa->max_object = drrwe->drr_object;
2521 tx = dmu_tx_create(rwa->os);
2523 dmu_tx_hold_write(tx, drrwe->drr_object,
2524 drrwe->drr_offset, drrwe->drr_length);
2525 err = dmu_tx_assign(tx, TXG_WAIT);
2531 dmu_write_embedded(rwa->os, drrwe->drr_object,
2532 drrwe->drr_offset, data, drrwe->drr_etype,
2533 drrwe->drr_compression, drrwe->drr_lsize, drrwe->drr_psize,
2534 rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx);
2536 /* See comment in restore_write. */
2537 save_resume_state(rwa, drrwe->drr_object, drrwe->drr_offset, tx);
2543 receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
2547 dmu_buf_t *db, *db_spill;
2550 if (drrs->drr_length < SPA_MINBLOCKSIZE ||
2551 drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os)))
2552 return (SET_ERROR(EINVAL));
2554 if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0)
2555 return (SET_ERROR(EINVAL));
2557 if (drrs->drr_object > rwa->max_object)
2558 rwa->max_object = drrs->drr_object;
2560 VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db));
2561 if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
2562 dmu_buf_rele(db, FTAG);
2566 tx = dmu_tx_create(rwa->os);
2568 dmu_tx_hold_spill(tx, db->db_object);
2570 err = dmu_tx_assign(tx, TXG_WAIT);
2572 dmu_buf_rele(db, FTAG);
2573 dmu_buf_rele(db_spill, FTAG);
2577 dmu_buf_will_dirty(db_spill, tx);
2579 if (db_spill->db_size < drrs->drr_length)
2580 VERIFY(0 == dbuf_spill_set_blksz(db_spill,
2581 drrs->drr_length, tx));
2582 bcopy(data, db_spill->db_data, drrs->drr_length);
2584 dmu_buf_rele(db, FTAG);
2585 dmu_buf_rele(db_spill, FTAG);
2593 receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf)
2597 if (drrf->drr_length != -1ULL &&
2598 drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
2599 return (SET_ERROR(EINVAL));
2601 if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0)
2602 return (SET_ERROR(EINVAL));
2604 if (drrf->drr_object > rwa->max_object)
2605 rwa->max_object = drrf->drr_object;
2607 err = dmu_free_long_range(rwa->os, drrf->drr_object,
2608 drrf->drr_offset, drrf->drr_length);
2613 /* used to destroy the drc_ds on error */
2615 dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
2617 if (drc->drc_resumable) {
2618 /* wait for our resume state to be written to disk */
2619 txg_wait_synced(drc->drc_ds->ds_dir->dd_pool, 0);
2620 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
2622 char name[ZFS_MAX_DATASET_NAME_LEN];
2623 dsl_dataset_name(drc->drc_ds, name);
2624 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
2625 (void) dsl_destroy_head(name);
2630 receive_cksum(struct receive_arg *ra, int len, void *buf)
2633 (void) fletcher_4_incremental_byteswap(buf, len, &ra->cksum);
2635 (void) fletcher_4_incremental_native(buf, len, &ra->cksum);
2640 * Read the payload into a buffer of size len, and update the current record's
2642 * Allocate ra->next_rrd and read the next record's header into
2643 * ra->next_rrd->header.
2644 * Verify checksum of payload and next record.
2647 receive_read_payload_and_next_header(struct receive_arg *ra, int len, void *buf)
2652 ASSERT3U(len, <=, SPA_MAXBLOCKSIZE);
2653 err = receive_read(ra, len, buf);
2656 receive_cksum(ra, len, buf);
2658 /* note: rrd is NULL when reading the begin record's payload */
2659 if (ra->rrd != NULL) {
2660 ra->rrd->payload = buf;
2661 ra->rrd->payload_size = len;
2662 ra->rrd->bytes_read = ra->bytes_read;
2666 ra->prev_cksum = ra->cksum;
2668 ra->next_rrd = kmem_zalloc(sizeof (*ra->next_rrd), KM_SLEEP);
2669 err = receive_read(ra, sizeof (ra->next_rrd->header),
2670 &ra->next_rrd->header);
2671 ra->next_rrd->bytes_read = ra->bytes_read;
2673 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
2674 ra->next_rrd = NULL;
2677 if (ra->next_rrd->header.drr_type == DRR_BEGIN) {
2678 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
2679 ra->next_rrd = NULL;
2680 return (SET_ERROR(EINVAL));
2684 * Note: checksum is of everything up to but not including the
2687 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
2688 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
2690 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
2691 &ra->next_rrd->header);
2693 zio_cksum_t cksum_orig =
2694 ra->next_rrd->header.drr_u.drr_checksum.drr_checksum;
2695 zio_cksum_t *cksump =
2696 &ra->next_rrd->header.drr_u.drr_checksum.drr_checksum;
2699 byteswap_record(&ra->next_rrd->header);
2701 if ((!ZIO_CHECKSUM_IS_ZERO(cksump)) &&
2702 !ZIO_CHECKSUM_EQUAL(ra->cksum, *cksump)) {
2703 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
2704 ra->next_rrd = NULL;
2705 return (SET_ERROR(ECKSUM));
2708 receive_cksum(ra, sizeof (cksum_orig), &cksum_orig);
2714 objlist_create(struct objlist *list)
2716 list_create(&list->list, sizeof (struct receive_objnode),
2717 offsetof(struct receive_objnode, node));
2718 list->last_lookup = 0;
2722 objlist_destroy(struct objlist *list)
2724 for (struct receive_objnode *n = list_remove_head(&list->list);
2725 n != NULL; n = list_remove_head(&list->list)) {
2726 kmem_free(n, sizeof (*n));
2728 list_destroy(&list->list);
2732 * This function looks through the objlist to see if the specified object number
2733 * is contained in the objlist. In the process, it will remove all object
2734 * numbers in the list that are smaller than the specified object number. Thus,
2735 * any lookup of an object number smaller than a previously looked up object
2736 * number will always return false; therefore, all lookups should be done in
2740 objlist_exists(struct objlist *list, uint64_t object)
2742 struct receive_objnode *node = list_head(&list->list);
2743 ASSERT3U(object, >=, list->last_lookup);
2744 list->last_lookup = object;
2745 while (node != NULL && node->object < object) {
2746 VERIFY3P(node, ==, list_remove_head(&list->list));
2747 kmem_free(node, sizeof (*node));
2748 node = list_head(&list->list);
2750 return (node != NULL && node->object == object);
2754 * The objlist is a list of object numbers stored in ascending order. However,
2755 * the insertion of new object numbers does not seek out the correct location to
2756 * store a new object number; instead, it appends it to the list for simplicity.
2757 * Thus, any users must take care to only insert new object numbers in ascending
2761 objlist_insert(struct objlist *list, uint64_t object)
2763 struct receive_objnode *node = kmem_zalloc(sizeof (*node), KM_SLEEP);
2764 node->object = object;
2766 struct receive_objnode *last_object = list_tail(&list->list);
2767 uint64_t last_objnum = (last_object != NULL ? last_object->object : 0);
2768 ASSERT3U(node->object, >, last_objnum);
2770 list_insert_tail(&list->list, node);
2774 * Issue the prefetch reads for any necessary indirect blocks.
2776 * We use the object ignore list to tell us whether or not to issue prefetches
2777 * for a given object. We do this for both correctness (in case the blocksize
2778 * of an object has changed) and performance (if the object doesn't exist, don't
2779 * needlessly try to issue prefetches). We also trim the list as we go through
2780 * the stream to prevent it from growing to an unbounded size.
2782 * The object numbers within will always be in sorted order, and any write
2783 * records we see will also be in sorted order, but they're not sorted with
2784 * respect to each other (i.e. we can get several object records before
2785 * receiving each object's write records). As a result, once we've reached a
2786 * given object number, we can safely remove any reference to lower object
2787 * numbers in the ignore list. In practice, we receive up to 32 object records
2788 * before receiving write records, so the list can have up to 32 nodes in it.
2792 receive_read_prefetch(struct receive_arg *ra,
2793 uint64_t object, uint64_t offset, uint64_t length)
2795 if (!objlist_exists(&ra->ignore_objlist, object)) {
2796 dmu_prefetch(ra->os, object, 1, offset, length,
2797 ZIO_PRIORITY_SYNC_READ);
2802 * Read records off the stream, issuing any necessary prefetches.
2805 receive_read_record(struct receive_arg *ra)
2809 switch (ra->rrd->header.drr_type) {
2812 struct drr_object *drro = &ra->rrd->header.drr_u.drr_object;
2813 uint32_t size = P2ROUNDUP(drro->drr_bonuslen, 8);
2814 void *buf = kmem_zalloc(size, KM_SLEEP);
2815 dmu_object_info_t doi;
2816 err = receive_read_payload_and_next_header(ra, size, buf);
2818 kmem_free(buf, size);
2821 err = dmu_object_info(ra->os, drro->drr_object, &doi);
2823 * See receive_read_prefetch for an explanation why we're
2824 * storing this object in the ignore_obj_list.
2826 if (err == ENOENT ||
2827 (err == 0 && doi.doi_data_block_size != drro->drr_blksz)) {
2828 objlist_insert(&ra->ignore_objlist, drro->drr_object);
2833 case DRR_FREEOBJECTS:
2835 err = receive_read_payload_and_next_header(ra, 0, NULL);
2840 struct drr_write *drrw = &ra->rrd->header.drr_u.drr_write;
2842 boolean_t is_meta = DMU_OT_IS_METADATA(drrw->drr_type);
2843 if (DRR_WRITE_COMPRESSED(drrw)) {
2844 ASSERT3U(drrw->drr_compressed_size, >, 0);
2845 ASSERT3U(drrw->drr_logical_size, >=,
2846 drrw->drr_compressed_size);
2848 abuf = arc_loan_compressed_buf(
2849 dmu_objset_spa(ra->os),
2850 drrw->drr_compressed_size, drrw->drr_logical_size,
2851 drrw->drr_compressiontype);
2853 abuf = arc_loan_buf(dmu_objset_spa(ra->os),
2854 is_meta, drrw->drr_logical_size);
2857 err = receive_read_payload_and_next_header(ra,
2858 DRR_WRITE_PAYLOAD_SIZE(drrw), abuf->b_data);
2860 dmu_return_arcbuf(abuf);
2863 ra->rrd->write_buf = abuf;
2864 receive_read_prefetch(ra, drrw->drr_object, drrw->drr_offset,
2865 drrw->drr_logical_size);
2868 case DRR_WRITE_BYREF:
2870 struct drr_write_byref *drrwb =
2871 &ra->rrd->header.drr_u.drr_write_byref;
2872 err = receive_read_payload_and_next_header(ra, 0, NULL);
2873 receive_read_prefetch(ra, drrwb->drr_object, drrwb->drr_offset,
2877 case DRR_WRITE_EMBEDDED:
2879 struct drr_write_embedded *drrwe =
2880 &ra->rrd->header.drr_u.drr_write_embedded;
2881 uint32_t size = P2ROUNDUP(drrwe->drr_psize, 8);
2882 void *buf = kmem_zalloc(size, KM_SLEEP);
2884 err = receive_read_payload_and_next_header(ra, size, buf);
2886 kmem_free(buf, size);
2890 receive_read_prefetch(ra, drrwe->drr_object, drrwe->drr_offset,
2897 * It might be beneficial to prefetch indirect blocks here, but
2898 * we don't really have the data to decide for sure.
2900 err = receive_read_payload_and_next_header(ra, 0, NULL);
2905 struct drr_end *drre = &ra->rrd->header.drr_u.drr_end;
2906 if (!ZIO_CHECKSUM_EQUAL(ra->prev_cksum, drre->drr_checksum))
2907 return (SET_ERROR(ECKSUM));
2912 struct drr_spill *drrs = &ra->rrd->header.drr_u.drr_spill;
2913 void *buf = kmem_zalloc(drrs->drr_length, KM_SLEEP);
2914 err = receive_read_payload_and_next_header(ra, drrs->drr_length,
2917 kmem_free(buf, drrs->drr_length);
2921 return (SET_ERROR(EINVAL));
2926 * Commit the records to the pool.
2929 receive_process_record(struct receive_writer_arg *rwa,
2930 struct receive_record_arg *rrd)
2934 /* Processing in order, therefore bytes_read should be increasing. */
2935 ASSERT3U(rrd->bytes_read, >=, rwa->bytes_read);
2936 rwa->bytes_read = rrd->bytes_read;
2938 switch (rrd->header.drr_type) {
2941 struct drr_object *drro = &rrd->header.drr_u.drr_object;
2942 err = receive_object(rwa, drro, rrd->payload);
2943 kmem_free(rrd->payload, rrd->payload_size);
2944 rrd->payload = NULL;
2947 case DRR_FREEOBJECTS:
2949 struct drr_freeobjects *drrfo =
2950 &rrd->header.drr_u.drr_freeobjects;
2951 return (receive_freeobjects(rwa, drrfo));
2955 struct drr_write *drrw = &rrd->header.drr_u.drr_write;
2956 err = receive_write(rwa, drrw, rrd->write_buf);
2957 /* if receive_write() is successful, it consumes the arc_buf */
2959 dmu_return_arcbuf(rrd->write_buf);
2960 rrd->write_buf = NULL;
2961 rrd->payload = NULL;
2964 case DRR_WRITE_BYREF:
2966 struct drr_write_byref *drrwbr =
2967 &rrd->header.drr_u.drr_write_byref;
2968 return (receive_write_byref(rwa, drrwbr));
2970 case DRR_WRITE_EMBEDDED:
2972 struct drr_write_embedded *drrwe =
2973 &rrd->header.drr_u.drr_write_embedded;
2974 err = receive_write_embedded(rwa, drrwe, rrd->payload);
2975 kmem_free(rrd->payload, rrd->payload_size);
2976 rrd->payload = NULL;
2981 struct drr_free *drrf = &rrd->header.drr_u.drr_free;
2982 return (receive_free(rwa, drrf));
2986 struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
2987 err = receive_spill(rwa, drrs, rrd->payload);
2988 kmem_free(rrd->payload, rrd->payload_size);
2989 rrd->payload = NULL;
2993 return (SET_ERROR(EINVAL));
2998 * dmu_recv_stream's worker thread; pull records off the queue, and then call
2999 * receive_process_record When we're done, signal the main thread and exit.
3002 receive_writer_thread(void *arg)
3004 struct receive_writer_arg *rwa = arg;
3005 struct receive_record_arg *rrd;
3006 for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker;
3007 rrd = bqueue_dequeue(&rwa->q)) {
3009 * If there's an error, the main thread will stop putting things
3010 * on the queue, but we need to clear everything in it before we
3013 if (rwa->err == 0) {
3014 rwa->err = receive_process_record(rwa, rrd);
3015 } else if (rrd->write_buf != NULL) {
3016 dmu_return_arcbuf(rrd->write_buf);
3017 rrd->write_buf = NULL;
3018 rrd->payload = NULL;
3019 } else if (rrd->payload != NULL) {
3020 kmem_free(rrd->payload, rrd->payload_size);
3021 rrd->payload = NULL;
3023 kmem_free(rrd, sizeof (*rrd));
3025 kmem_free(rrd, sizeof (*rrd));
3026 mutex_enter(&rwa->mutex);
3028 cv_signal(&rwa->cv);
3029 mutex_exit(&rwa->mutex);
3034 resume_check(struct receive_arg *ra, nvlist_t *begin_nvl)
3037 objset_t *mos = dmu_objset_pool(ra->os)->dp_meta_objset;
3038 uint64_t dsobj = dmu_objset_id(ra->os);
3039 uint64_t resume_obj, resume_off;
3041 if (nvlist_lookup_uint64(begin_nvl,
3042 "resume_object", &resume_obj) != 0 ||
3043 nvlist_lookup_uint64(begin_nvl,
3044 "resume_offset", &resume_off) != 0) {
3045 return (SET_ERROR(EINVAL));
3047 VERIFY0(zap_lookup(mos, dsobj,
3048 DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val));
3049 if (resume_obj != val)
3050 return (SET_ERROR(EINVAL));
3051 VERIFY0(zap_lookup(mos, dsobj,
3052 DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val));
3053 if (resume_off != val)
3054 return (SET_ERROR(EINVAL));
3060 * Read in the stream's records, one by one, and apply them to the pool. There
3061 * are two threads involved; the thread that calls this function will spin up a
3062 * worker thread, read the records off the stream one by one, and issue
3063 * prefetches for any necessary indirect blocks. It will then push the records
3064 * onto an internal blocking queue. The worker thread will pull the records off
3065 * the queue, and actually write the data into the DMU. This way, the worker
3066 * thread doesn't have to wait for reads to complete, since everything it needs
3067 * (the indirect blocks) will be prefetched.
3069 * NB: callers *must* call dmu_recv_end() if this succeeds.
3072 dmu_recv_stream(dmu_recv_cookie_t *drc, struct file *fp, offset_t *voffp,
3073 int cleanup_fd, uint64_t *action_handlep)
3076 struct receive_arg ra = { 0 };
3077 struct receive_writer_arg rwa = { 0 };
3079 nvlist_t *begin_nvl = NULL;
3081 ra.byteswap = drc->drc_byteswap;
3082 ra.cksum = drc->drc_cksum;
3087 if (dsl_dataset_is_zapified(drc->drc_ds)) {
3088 (void) zap_lookup(drc->drc_ds->ds_dir->dd_pool->dp_meta_objset,
3089 drc->drc_ds->ds_object, DS_FIELD_RESUME_BYTES,
3090 sizeof (ra.bytes_read), 1, &ra.bytes_read);
3093 objlist_create(&ra.ignore_objlist);
3095 /* these were verified in dmu_recv_begin */
3096 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
3098 ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
3101 * Open the objset we are modifying.
3103 VERIFY0(dmu_objset_from_ds(drc->drc_ds, &ra.os));
3105 ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT);
3107 featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
3109 /* if this stream is dedup'ed, set up the avl tree for guid mapping */
3110 if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
3113 if (cleanup_fd == -1) {
3114 ra.err = SET_ERROR(EBADF);
3117 ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor);
3123 if (*action_handlep == 0) {
3124 rwa.guid_to_ds_map =
3125 kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
3126 avl_create(rwa.guid_to_ds_map, guid_compare,
3127 sizeof (guid_map_entry_t),
3128 offsetof(guid_map_entry_t, avlnode));
3129 err = zfs_onexit_add_cb(minor,
3130 free_guid_map_onexit, rwa.guid_to_ds_map,
3135 err = zfs_onexit_cb_data(minor, *action_handlep,
3136 (void **)&rwa.guid_to_ds_map);
3141 drc->drc_guid_to_ds_map = rwa.guid_to_ds_map;
3144 uint32_t payloadlen = drc->drc_drr_begin->drr_payloadlen;
3145 void *payload = NULL;
3146 if (payloadlen != 0)
3147 payload = kmem_alloc(payloadlen, KM_SLEEP);
3149 err = receive_read_payload_and_next_header(&ra, payloadlen, payload);
3151 if (payloadlen != 0)
3152 kmem_free(payload, payloadlen);
3155 if (payloadlen != 0) {
3156 err = nvlist_unpack(payload, payloadlen, &begin_nvl, KM_SLEEP);
3157 kmem_free(payload, payloadlen);
3162 if (featureflags & DMU_BACKUP_FEATURE_RESUMING) {
3163 err = resume_check(&ra, begin_nvl);
3168 (void) bqueue_init(&rwa.q, zfs_recv_queue_length,
3169 offsetof(struct receive_record_arg, node));
3170 cv_init(&rwa.cv, NULL, CV_DEFAULT, NULL);
3171 mutex_init(&rwa.mutex, NULL, MUTEX_DEFAULT, NULL);
3173 rwa.byteswap = drc->drc_byteswap;
3174 rwa.resumable = drc->drc_resumable;
3176 (void) thread_create(NULL, 0, receive_writer_thread, &rwa, 0, &p0,
3177 TS_RUN, minclsyspri);
3179 * We're reading rwa.err without locks, which is safe since we are the
3180 * only reader, and the worker thread is the only writer. It's ok if we
3181 * miss a write for an iteration or two of the loop, since the writer
3182 * thread will keep freeing records we send it until we send it an eos
3185 * We can leave this loop in 3 ways: First, if rwa.err is
3186 * non-zero. In that case, the writer thread will free the rrd we just
3187 * pushed. Second, if we're interrupted; in that case, either it's the
3188 * first loop and ra.rrd was never allocated, or it's later, and ra.rrd
3189 * has been handed off to the writer thread who will free it. Finally,
3190 * if receive_read_record fails or we're at the end of the stream, then
3191 * we free ra.rrd and exit.
3193 while (rwa.err == 0) {
3194 if (issig(JUSTLOOKING) && issig(FORREAL)) {
3195 err = SET_ERROR(EINTR);
3199 ASSERT3P(ra.rrd, ==, NULL);
3200 ra.rrd = ra.next_rrd;
3202 /* Allocates and loads header into ra.next_rrd */
3203 err = receive_read_record(&ra);
3205 if (ra.rrd->header.drr_type == DRR_END || err != 0) {
3206 kmem_free(ra.rrd, sizeof (*ra.rrd));
3211 bqueue_enqueue(&rwa.q, ra.rrd,
3212 sizeof (struct receive_record_arg) + ra.rrd->payload_size);
3215 if (ra.next_rrd == NULL)
3216 ra.next_rrd = kmem_zalloc(sizeof (*ra.next_rrd), KM_SLEEP);
3217 ra.next_rrd->eos_marker = B_TRUE;
3218 bqueue_enqueue(&rwa.q, ra.next_rrd, 1);
3220 mutex_enter(&rwa.mutex);
3222 cv_wait(&rwa.cv, &rwa.mutex);
3224 mutex_exit(&rwa.mutex);
3227 * If we are receiving a full stream as a clone, all object IDs which
3228 * are greater than the maximum ID referenced in the stream are
3229 * by definition unused and must be freed. Note that it's possible that
3230 * we've resumed this send and the first record we received was the END
3231 * record. In that case, max_object would be 0, but we shouldn't start
3232 * freeing all objects from there; instead we should start from the
3235 if (drc->drc_clone && drc->drc_drrb->drr_fromguid == 0) {
3237 if (nvlist_lookup_uint64(begin_nvl, "resume_object", &obj) != 0)
3239 if (rwa.max_object > obj)
3240 obj = rwa.max_object;
3245 while (next_err == 0) {
3246 free_err = dmu_free_long_object(rwa.os, obj);
3247 if (free_err != 0 && free_err != ENOENT)
3250 next_err = dmu_object_next(rwa.os, &obj, FALSE, 0);
3254 if (free_err != 0 && free_err != ENOENT)
3256 else if (next_err != ESRCH)
3261 cv_destroy(&rwa.cv);
3262 mutex_destroy(&rwa.mutex);
3263 bqueue_destroy(&rwa.q);
3268 nvlist_free(begin_nvl);
3269 if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
3270 zfs_onexit_fd_rele(cleanup_fd);
3274 * Clean up references. If receive is not resumable,
3275 * destroy what we created, so we don't leave it in
3276 * the inconsistent state.
3278 dmu_recv_cleanup_ds(drc);
3282 objlist_destroy(&ra.ignore_objlist);
3287 dmu_recv_end_check(void *arg, dmu_tx_t *tx)
3289 dmu_recv_cookie_t *drc = arg;
3290 dsl_pool_t *dp = dmu_tx_pool(tx);
3293 ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
3295 if (!drc->drc_newfs) {
3296 dsl_dataset_t *origin_head;
3298 error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
3301 if (drc->drc_force) {
3303 * We will destroy any snapshots in tofs (i.e. before
3304 * origin_head) that are after the origin (which is
3305 * the snap before drc_ds, because drc_ds can not
3306 * have any snaps of its own).
3310 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
3312 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
3313 dsl_dataset_t *snap;
3314 error = dsl_dataset_hold_obj(dp, obj, FTAG,
3318 if (snap->ds_dir != origin_head->ds_dir)
3319 error = SET_ERROR(EINVAL);
3321 error = dsl_destroy_snapshot_check_impl(
3324 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
3325 dsl_dataset_rele(snap, FTAG);
3330 dsl_dataset_rele(origin_head, FTAG);
3334 error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
3335 origin_head, drc->drc_force, drc->drc_owner, tx);
3337 dsl_dataset_rele(origin_head, FTAG);
3340 error = dsl_dataset_snapshot_check_impl(origin_head,
3341 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
3342 dsl_dataset_rele(origin_head, FTAG);
3346 error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
3348 error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
3349 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
3355 dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
3357 dmu_recv_cookie_t *drc = arg;
3358 dsl_pool_t *dp = dmu_tx_pool(tx);
3360 spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
3361 tx, "snap=%s", drc->drc_tosnap);
3363 if (!drc->drc_newfs) {
3364 dsl_dataset_t *origin_head;
3366 VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
3369 if (drc->drc_force) {
3371 * Destroy any snapshots of drc_tofs (origin_head)
3372 * after the origin (the snap before drc_ds).
3376 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
3378 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
3379 dsl_dataset_t *snap;
3380 VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG,
3382 ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir);
3383 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
3384 dsl_destroy_snapshot_sync_impl(snap,
3386 dsl_dataset_rele(snap, FTAG);
3389 VERIFY3P(drc->drc_ds->ds_prev, ==,
3390 origin_head->ds_prev);
3392 dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
3394 dsl_dataset_snapshot_sync_impl(origin_head,
3395 drc->drc_tosnap, tx);
3397 /* set snapshot's creation time and guid */
3398 dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
3399 dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time =
3400 drc->drc_drrb->drr_creation_time;
3401 dsl_dataset_phys(origin_head->ds_prev)->ds_guid =
3402 drc->drc_drrb->drr_toguid;
3403 dsl_dataset_phys(origin_head->ds_prev)->ds_flags &=
3404 ~DS_FLAG_INCONSISTENT;
3406 dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
3407 dsl_dataset_phys(origin_head)->ds_flags &=
3408 ~DS_FLAG_INCONSISTENT;
3410 drc->drc_newsnapobj =
3411 dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
3413 dsl_dataset_rele(origin_head, FTAG);
3414 dsl_destroy_head_sync_impl(drc->drc_ds, tx);
3416 if (drc->drc_owner != NULL)
3417 VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
3419 dsl_dataset_t *ds = drc->drc_ds;
3421 dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
3423 /* set snapshot's creation time and guid */
3424 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
3425 dsl_dataset_phys(ds->ds_prev)->ds_creation_time =
3426 drc->drc_drrb->drr_creation_time;
3427 dsl_dataset_phys(ds->ds_prev)->ds_guid =
3428 drc->drc_drrb->drr_toguid;
3429 dsl_dataset_phys(ds->ds_prev)->ds_flags &=
3430 ~DS_FLAG_INCONSISTENT;
3432 dmu_buf_will_dirty(ds->ds_dbuf, tx);
3433 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
3434 if (dsl_dataset_has_resume_receive_state(ds)) {
3435 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3436 DS_FIELD_RESUME_FROMGUID, tx);
3437 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3438 DS_FIELD_RESUME_OBJECT, tx);
3439 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3440 DS_FIELD_RESUME_OFFSET, tx);
3441 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3442 DS_FIELD_RESUME_BYTES, tx);
3443 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3444 DS_FIELD_RESUME_TOGUID, tx);
3445 (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3446 DS_FIELD_RESUME_TONAME, tx);
3448 drc->drc_newsnapobj =
3449 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj;
3452 #if defined(__FreeBSD__) && defined(_KERNEL)
3453 zvol_create_minors(dp->dp_spa, drc->drc_tofs);
3457 * Release the hold from dmu_recv_begin. This must be done before
3458 * we return to open context, so that when we free the dataset's dnode,
3459 * we can evict its bonus buffer.
3461 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
3466 add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj)
3469 dsl_dataset_t *snapds;
3470 guid_map_entry_t *gmep;
3473 ASSERT(guid_map != NULL);
3475 err = dsl_pool_hold(name, FTAG, &dp);
3478 gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP);
3479 err = dsl_dataset_hold_obj(dp, snapobj, gmep, &snapds);
3481 gmep->guid = dsl_dataset_phys(snapds)->ds_guid;
3482 gmep->gme_ds = snapds;
3483 avl_add(guid_map, gmep);
3484 dsl_dataset_long_hold(snapds, gmep);
3486 kmem_free(gmep, sizeof (*gmep));
3488 dsl_pool_rele(dp, FTAG);
3492 static int dmu_recv_end_modified_blocks = 3;
3495 dmu_recv_existing_end(dmu_recv_cookie_t *drc)
3499 * We will be destroying the ds; make sure its origin is unmounted if
3502 char name[ZFS_MAX_DATASET_NAME_LEN];
3503 dsl_dataset_name(drc->drc_ds, name);
3504 zfs_destroy_unmount_origin(name);
3507 return (dsl_sync_task(drc->drc_tofs,
3508 dmu_recv_end_check, dmu_recv_end_sync, drc,
3509 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL));
3513 dmu_recv_new_end(dmu_recv_cookie_t *drc)
3515 return (dsl_sync_task(drc->drc_tofs,
3516 dmu_recv_end_check, dmu_recv_end_sync, drc,
3517 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL));
3521 dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
3525 drc->drc_owner = owner;
3528 error = dmu_recv_new_end(drc);
3530 error = dmu_recv_existing_end(drc);
3533 dmu_recv_cleanup_ds(drc);
3534 } else if (drc->drc_guid_to_ds_map != NULL) {
3535 (void) add_ds_to_guidmap(drc->drc_tofs,
3536 drc->drc_guid_to_ds_map,
3537 drc->drc_newsnapobj);
3543 * Return TRUE if this objset is currently being received into.
3546 dmu_objset_is_receiving(objset_t *os)
3548 return (os->os_dsl_dataset != NULL &&
3549 os->os_dsl_dataset->ds_owner == dmu_recv_tag);