4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2016, 2019 by Delphix. All rights reserved.
27 #include <sys/spa_impl.h>
29 #include <sys/vdev_impl.h>
30 #include <sys/metaslab_impl.h>
31 #include <sys/dsl_synctask.h>
33 #include <sys/dmu_tx.h>
34 #include <sys/vdev_initialize.h>
37 * Value that is written to disk during initialization.
40 unsigned long zfs_initialize_value = 0xdeadbeefUL;
42 unsigned long zfs_initialize_value = 0xdeadbeefdeadbeeeULL;
45 /* maximum number of I/Os outstanding per leaf vdev */
46 int zfs_initialize_limit = 1;
48 /* size of initializing writes; default 1MiB, see zfs_remove_max_segment */
49 unsigned long zfs_initialize_chunk_size = 1024 * 1024;
52 vdev_initialize_should_stop(vdev_t *vd)
54 return (vd->vdev_initialize_exit_wanted || !vdev_writeable(vd) ||
55 vd->vdev_detached || vd->vdev_top->vdev_removing);
59 vdev_initialize_zap_update_sync(void *arg, dmu_tx_t *tx)
62 * We pass in the guid instead of the vdev_t since the vdev may
63 * have been freed prior to the sync task being processed. This
64 * happens when a vdev is detached as we call spa_config_vdev_exit(),
65 * stop the initializing thread, schedule the sync task, and free
66 * the vdev. Later when the scheduled sync task is invoked, it would
67 * find that the vdev has been freed.
69 uint64_t guid = *(uint64_t *)arg;
70 uint64_t txg = dmu_tx_get_txg(tx);
71 kmem_free(arg, sizeof (uint64_t));
73 vdev_t *vd = spa_lookup_by_guid(tx->tx_pool->dp_spa, guid, B_FALSE);
74 if (vd == NULL || vd->vdev_top->vdev_removing || !vdev_is_concrete(vd))
77 uint64_t last_offset = vd->vdev_initialize_offset[txg & TXG_MASK];
78 vd->vdev_initialize_offset[txg & TXG_MASK] = 0;
80 VERIFY(vd->vdev_leaf_zap != 0);
82 objset_t *mos = vd->vdev_spa->spa_meta_objset;
84 if (last_offset > 0) {
85 vd->vdev_initialize_last_offset = last_offset;
86 VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
87 VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET,
88 sizeof (last_offset), 1, &last_offset, tx));
90 if (vd->vdev_initialize_action_time > 0) {
91 uint64_t val = (uint64_t)vd->vdev_initialize_action_time;
92 VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
93 VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME, sizeof (val),
97 uint64_t initialize_state = vd->vdev_initialize_state;
98 VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
99 VDEV_LEAF_ZAP_INITIALIZE_STATE, sizeof (initialize_state), 1,
100 &initialize_state, tx));
104 vdev_initialize_change_state(vdev_t *vd, vdev_initializing_state_t new_state)
106 ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
107 spa_t *spa = vd->vdev_spa;
109 if (new_state == vd->vdev_initialize_state)
113 * Copy the vd's guid, this will be freed by the sync task.
115 uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
116 *guid = vd->vdev_guid;
119 * If we're suspending, then preserving the original start time.
121 if (vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED) {
122 vd->vdev_initialize_action_time = gethrestime_sec();
124 vd->vdev_initialize_state = new_state;
126 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
127 VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
128 dsl_sync_task_nowait(spa_get_dsl(spa), vdev_initialize_zap_update_sync,
132 case VDEV_INITIALIZE_ACTIVE:
133 spa_history_log_internal(spa, "initialize", tx,
134 "vdev=%s activated", vd->vdev_path);
136 case VDEV_INITIALIZE_SUSPENDED:
137 spa_history_log_internal(spa, "initialize", tx,
138 "vdev=%s suspended", vd->vdev_path);
140 case VDEV_INITIALIZE_CANCELED:
141 spa_history_log_internal(spa, "initialize", tx,
142 "vdev=%s canceled", vd->vdev_path);
144 case VDEV_INITIALIZE_COMPLETE:
145 spa_history_log_internal(spa, "initialize", tx,
146 "vdev=%s complete", vd->vdev_path);
149 panic("invalid state %llu", (unsigned long long)new_state);
154 if (new_state != VDEV_INITIALIZE_ACTIVE)
155 spa_notify_waiters(spa);
159 vdev_initialize_cb(zio_t *zio)
161 vdev_t *vd = zio->io_vd;
162 mutex_enter(&vd->vdev_initialize_io_lock);
163 if (zio->io_error == ENXIO && !vdev_writeable(vd)) {
165 * The I/O failed because the vdev was unavailable; roll the
166 * last offset back. (This works because spa_sync waits on
167 * spa_txg_zio before it runs sync tasks.)
170 &vd->vdev_initialize_offset[zio->io_txg & TXG_MASK];
171 *off = MIN(*off, zio->io_offset);
174 * Since initializing is best-effort, we ignore I/O errors and
175 * rely on vdev_probe to determine if the errors are more
178 if (zio->io_error != 0)
179 vd->vdev_stat.vs_initialize_errors++;
181 vd->vdev_initialize_bytes_done += zio->io_orig_size;
183 ASSERT3U(vd->vdev_initialize_inflight, >, 0);
184 vd->vdev_initialize_inflight--;
185 cv_broadcast(&vd->vdev_initialize_io_cv);
186 mutex_exit(&vd->vdev_initialize_io_lock);
188 spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
191 /* Takes care of physical writing and limiting # of concurrent ZIOs. */
193 vdev_initialize_write(vdev_t *vd, uint64_t start, uint64_t size, abd_t *data)
195 spa_t *spa = vd->vdev_spa;
197 /* Limit inflight initializing I/Os */
198 mutex_enter(&vd->vdev_initialize_io_lock);
199 while (vd->vdev_initialize_inflight >= zfs_initialize_limit) {
200 cv_wait(&vd->vdev_initialize_io_cv,
201 &vd->vdev_initialize_io_lock);
203 vd->vdev_initialize_inflight++;
204 mutex_exit(&vd->vdev_initialize_io_lock);
206 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
207 VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
208 uint64_t txg = dmu_tx_get_txg(tx);
210 spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER);
211 mutex_enter(&vd->vdev_initialize_lock);
213 if (vd->vdev_initialize_offset[txg & TXG_MASK] == 0) {
214 uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
215 *guid = vd->vdev_guid;
217 /* This is the first write of this txg. */
218 dsl_sync_task_nowait(spa_get_dsl(spa),
219 vdev_initialize_zap_update_sync, guid, tx);
223 * We know the vdev struct will still be around since all
224 * consumers of vdev_free must stop the initialization first.
226 if (vdev_initialize_should_stop(vd)) {
227 mutex_enter(&vd->vdev_initialize_io_lock);
228 ASSERT3U(vd->vdev_initialize_inflight, >, 0);
229 vd->vdev_initialize_inflight--;
230 mutex_exit(&vd->vdev_initialize_io_lock);
231 spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
232 mutex_exit(&vd->vdev_initialize_lock);
234 return (SET_ERROR(EINTR));
236 mutex_exit(&vd->vdev_initialize_lock);
238 vd->vdev_initialize_offset[txg & TXG_MASK] = start + size;
239 zio_nowait(zio_write_phys(spa->spa_txg_zio[txg & TXG_MASK], vd, start,
240 size, data, ZIO_CHECKSUM_OFF, vdev_initialize_cb, NULL,
241 ZIO_PRIORITY_INITIALIZING, ZIO_FLAG_CANFAIL, B_FALSE));
242 /* vdev_initialize_cb releases SCL_STATE_ALL */
250 * Callback to fill each ABD chunk with zfs_initialize_value. len must be
251 * divisible by sizeof (uint64_t), and buf must be 8-byte aligned. The ABD
252 * allocation will guarantee these for us.
256 vdev_initialize_block_fill(void *buf, size_t len, void *unused)
258 ASSERT0(len % sizeof (uint64_t));
260 for (uint64_t i = 0; i < len; i += sizeof (uint32_t)) {
261 *(uint32_t *)((char *)(buf) + i) = zfs_initialize_value;
264 for (uint64_t i = 0; i < len; i += sizeof (uint64_t)) {
265 *(uint64_t *)((char *)(buf) + i) = zfs_initialize_value;
272 vdev_initialize_block_alloc(void)
274 /* Allocate ABD for filler data */
275 abd_t *data = abd_alloc_for_io(zfs_initialize_chunk_size, B_FALSE);
277 ASSERT0(zfs_initialize_chunk_size % sizeof (uint64_t));
278 (void) abd_iterate_func(data, 0, zfs_initialize_chunk_size,
279 vdev_initialize_block_fill, NULL);
285 vdev_initialize_block_free(abd_t *data)
291 vdev_initialize_ranges(vdev_t *vd, abd_t *data)
293 range_tree_t *rt = vd->vdev_initialize_tree;
294 zfs_btree_t *bt = &rt->rt_root;
295 zfs_btree_index_t where;
297 for (range_seg_t *rs = zfs_btree_first(bt, &where); rs != NULL;
298 rs = zfs_btree_next(bt, &where, &where)) {
299 uint64_t size = rs_get_end(rs, rt) - rs_get_start(rs, rt);
301 /* Split range into legally-sized physical chunks */
302 uint64_t writes_required =
303 ((size - 1) / zfs_initialize_chunk_size) + 1;
305 for (uint64_t w = 0; w < writes_required; w++) {
308 error = vdev_initialize_write(vd,
309 VDEV_LABEL_START_SIZE + rs_get_start(rs, rt) +
310 (w * zfs_initialize_chunk_size),
311 MIN(size - (w * zfs_initialize_chunk_size),
312 zfs_initialize_chunk_size), data);
321 vdev_initialize_calculate_progress(vdev_t *vd)
323 ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) ||
324 spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER));
325 ASSERT(vd->vdev_leaf_zap != 0);
327 vd->vdev_initialize_bytes_est = 0;
328 vd->vdev_initialize_bytes_done = 0;
330 for (uint64_t i = 0; i < vd->vdev_top->vdev_ms_count; i++) {
331 metaslab_t *msp = vd->vdev_top->vdev_ms[i];
332 mutex_enter(&msp->ms_lock);
334 uint64_t ms_free = msp->ms_size -
335 metaslab_allocated_space(msp);
337 if (vd->vdev_top->vdev_ops == &vdev_raidz_ops)
338 ms_free /= vd->vdev_top->vdev_children;
341 * Convert the metaslab range to a physical range
342 * on our vdev. We use this to determine if we are
343 * in the middle of this metaslab range.
345 range_seg64_t logical_rs, physical_rs;
346 logical_rs.rs_start = msp->ms_start;
347 logical_rs.rs_end = msp->ms_start + msp->ms_size;
348 vdev_xlate(vd, &logical_rs, &physical_rs);
350 if (vd->vdev_initialize_last_offset <= physical_rs.rs_start) {
351 vd->vdev_initialize_bytes_est += ms_free;
352 mutex_exit(&msp->ms_lock);
354 } else if (vd->vdev_initialize_last_offset >
355 physical_rs.rs_end) {
356 vd->vdev_initialize_bytes_done += ms_free;
357 vd->vdev_initialize_bytes_est += ms_free;
358 mutex_exit(&msp->ms_lock);
363 * If we get here, we're in the middle of initializing this
364 * metaslab. Load it and walk the free tree for more accurate
365 * progress estimation.
367 VERIFY0(metaslab_load(msp));
369 zfs_btree_index_t where;
370 range_tree_t *rt = msp->ms_allocatable;
371 for (range_seg_t *rs =
372 zfs_btree_first(&rt->rt_root, &where); rs;
373 rs = zfs_btree_next(&rt->rt_root, &where,
375 logical_rs.rs_start = rs_get_start(rs, rt);
376 logical_rs.rs_end = rs_get_end(rs, rt);
377 vdev_xlate(vd, &logical_rs, &physical_rs);
379 uint64_t size = physical_rs.rs_end -
380 physical_rs.rs_start;
381 vd->vdev_initialize_bytes_est += size;
382 if (vd->vdev_initialize_last_offset >
383 physical_rs.rs_end) {
384 vd->vdev_initialize_bytes_done += size;
385 } else if (vd->vdev_initialize_last_offset >
386 physical_rs.rs_start &&
387 vd->vdev_initialize_last_offset <
388 physical_rs.rs_end) {
389 vd->vdev_initialize_bytes_done +=
390 vd->vdev_initialize_last_offset -
391 physical_rs.rs_start;
394 mutex_exit(&msp->ms_lock);
399 vdev_initialize_load(vdev_t *vd)
402 ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) ||
403 spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER));
404 ASSERT(vd->vdev_leaf_zap != 0);
406 if (vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE ||
407 vd->vdev_initialize_state == VDEV_INITIALIZE_SUSPENDED) {
408 err = zap_lookup(vd->vdev_spa->spa_meta_objset,
409 vd->vdev_leaf_zap, VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET,
410 sizeof (vd->vdev_initialize_last_offset), 1,
411 &vd->vdev_initialize_last_offset);
413 vd->vdev_initialize_last_offset = 0;
418 vdev_initialize_calculate_progress(vd);
423 * Convert the logical range into a physical range and add it to our
427 vdev_initialize_range_add(void *arg, uint64_t start, uint64_t size)
430 range_seg64_t logical_rs, physical_rs;
431 logical_rs.rs_start = start;
432 logical_rs.rs_end = start + size;
434 ASSERT(vd->vdev_ops->vdev_op_leaf);
435 vdev_xlate(vd, &logical_rs, &physical_rs);
437 IMPLY(vd->vdev_top == vd,
438 logical_rs.rs_start == physical_rs.rs_start);
439 IMPLY(vd->vdev_top == vd,
440 logical_rs.rs_end == physical_rs.rs_end);
442 /* Only add segments that we have not visited yet */
443 if (physical_rs.rs_end <= vd->vdev_initialize_last_offset)
446 /* Pick up where we left off mid-range. */
447 if (vd->vdev_initialize_last_offset > physical_rs.rs_start) {
448 zfs_dbgmsg("range write: vd %s changed (%llu, %llu) to "
449 "(%llu, %llu)", vd->vdev_path,
450 (u_longlong_t)physical_rs.rs_start,
451 (u_longlong_t)physical_rs.rs_end,
452 (u_longlong_t)vd->vdev_initialize_last_offset,
453 (u_longlong_t)physical_rs.rs_end);
454 ASSERT3U(physical_rs.rs_end, >,
455 vd->vdev_initialize_last_offset);
456 physical_rs.rs_start = vd->vdev_initialize_last_offset;
458 ASSERT3U(physical_rs.rs_end, >=, physical_rs.rs_start);
461 * With raidz, it's possible that the logical range does not live on
462 * this leaf vdev. We only add the physical range to this vdev's if it
463 * has a length greater than 0.
465 if (physical_rs.rs_end > physical_rs.rs_start) {
466 range_tree_add(vd->vdev_initialize_tree, physical_rs.rs_start,
467 physical_rs.rs_end - physical_rs.rs_start);
469 ASSERT3U(physical_rs.rs_end, ==, physical_rs.rs_start);
474 vdev_initialize_thread(void *arg)
477 spa_t *spa = vd->vdev_spa;
479 uint64_t ms_count = 0;
481 ASSERT(vdev_is_concrete(vd));
482 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
484 vd->vdev_initialize_last_offset = 0;
485 VERIFY0(vdev_initialize_load(vd));
487 abd_t *deadbeef = vdev_initialize_block_alloc();
489 vd->vdev_initialize_tree = range_tree_create(NULL, RANGE_SEG64, NULL,
492 for (uint64_t i = 0; !vd->vdev_detached &&
493 i < vd->vdev_top->vdev_ms_count; i++) {
494 metaslab_t *msp = vd->vdev_top->vdev_ms[i];
495 boolean_t unload_when_done = B_FALSE;
498 * If we've expanded the top-level vdev or it's our
499 * first pass, calculate our progress.
501 if (vd->vdev_top->vdev_ms_count != ms_count) {
502 vdev_initialize_calculate_progress(vd);
503 ms_count = vd->vdev_top->vdev_ms_count;
506 spa_config_exit(spa, SCL_CONFIG, FTAG);
507 metaslab_disable(msp);
508 mutex_enter(&msp->ms_lock);
509 if (!msp->ms_loaded && !msp->ms_loading)
510 unload_when_done = B_TRUE;
511 VERIFY0(metaslab_load(msp));
513 range_tree_walk(msp->ms_allocatable, vdev_initialize_range_add,
515 mutex_exit(&msp->ms_lock);
517 error = vdev_initialize_ranges(vd, deadbeef);
518 metaslab_enable(msp, B_TRUE, unload_when_done);
519 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
521 range_tree_vacate(vd->vdev_initialize_tree, NULL, NULL);
526 spa_config_exit(spa, SCL_CONFIG, FTAG);
527 mutex_enter(&vd->vdev_initialize_io_lock);
528 while (vd->vdev_initialize_inflight > 0) {
529 cv_wait(&vd->vdev_initialize_io_cv,
530 &vd->vdev_initialize_io_lock);
532 mutex_exit(&vd->vdev_initialize_io_lock);
534 range_tree_destroy(vd->vdev_initialize_tree);
535 vdev_initialize_block_free(deadbeef);
536 vd->vdev_initialize_tree = NULL;
538 mutex_enter(&vd->vdev_initialize_lock);
539 if (!vd->vdev_initialize_exit_wanted && vdev_writeable(vd)) {
540 vdev_initialize_change_state(vd, VDEV_INITIALIZE_COMPLETE);
542 ASSERT(vd->vdev_initialize_thread != NULL ||
543 vd->vdev_initialize_inflight == 0);
546 * Drop the vdev_initialize_lock while we sync out the
547 * txg since it's possible that a device might be trying to
548 * come online and must check to see if it needs to restart an
549 * initialization. That thread will be holding the spa_config_lock
550 * which would prevent the txg_wait_synced from completing.
552 mutex_exit(&vd->vdev_initialize_lock);
553 txg_wait_synced(spa_get_dsl(spa), 0);
554 mutex_enter(&vd->vdev_initialize_lock);
556 vd->vdev_initialize_thread = NULL;
557 cv_broadcast(&vd->vdev_initialize_cv);
558 mutex_exit(&vd->vdev_initialize_lock);
564 * Initiates a device. Caller must hold vdev_initialize_lock.
565 * Device must be a leaf and not already be initializing.
568 vdev_initialize(vdev_t *vd)
570 ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
571 ASSERT(vd->vdev_ops->vdev_op_leaf);
572 ASSERT(vdev_is_concrete(vd));
573 ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
574 ASSERT(!vd->vdev_detached);
575 ASSERT(!vd->vdev_initialize_exit_wanted);
576 ASSERT(!vd->vdev_top->vdev_removing);
578 vdev_initialize_change_state(vd, VDEV_INITIALIZE_ACTIVE);
579 vd->vdev_initialize_thread = thread_create(NULL, 0,
580 vdev_initialize_thread, vd, 0, &p0, TS_RUN, maxclsyspri);
584 * Wait for the initialize thread to be terminated (cancelled or stopped).
587 vdev_initialize_stop_wait_impl(vdev_t *vd)
589 ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
591 while (vd->vdev_initialize_thread != NULL)
592 cv_wait(&vd->vdev_initialize_cv, &vd->vdev_initialize_lock);
594 ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
595 vd->vdev_initialize_exit_wanted = B_FALSE;
599 * Wait for vdev initialize threads which were either to cleanly exit.
602 vdev_initialize_stop_wait(spa_t *spa, list_t *vd_list)
606 ASSERT(MUTEX_HELD(&spa_namespace_lock));
608 while ((vd = list_remove_head(vd_list)) != NULL) {
609 mutex_enter(&vd->vdev_initialize_lock);
610 vdev_initialize_stop_wait_impl(vd);
611 mutex_exit(&vd->vdev_initialize_lock);
616 * Stop initializing a device, with the resultant initializing state being
617 * tgt_state. For blocking behavior pass NULL for vd_list. Otherwise, when
618 * a list_t is provided the stopping vdev is inserted in to the list. Callers
619 * are then required to call vdev_initialize_stop_wait() to block for all the
620 * initialization threads to exit. The caller must hold vdev_initialize_lock
621 * and must not be writing to the spa config, as the initializing thread may
622 * try to enter the config as a reader before exiting.
625 vdev_initialize_stop(vdev_t *vd, vdev_initializing_state_t tgt_state,
628 ASSERT(!spa_config_held(vd->vdev_spa, SCL_CONFIG|SCL_STATE, RW_WRITER));
629 ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
630 ASSERT(vd->vdev_ops->vdev_op_leaf);
631 ASSERT(vdev_is_concrete(vd));
634 * Allow cancel requests to proceed even if the initialize thread
637 if (vd->vdev_initialize_thread == NULL &&
638 tgt_state != VDEV_INITIALIZE_CANCELED) {
642 vdev_initialize_change_state(vd, tgt_state);
643 vd->vdev_initialize_exit_wanted = B_TRUE;
645 if (vd_list == NULL) {
646 vdev_initialize_stop_wait_impl(vd);
648 ASSERT(MUTEX_HELD(&spa_namespace_lock));
649 list_insert_tail(vd_list, vd);
654 vdev_initialize_stop_all_impl(vdev_t *vd, vdev_initializing_state_t tgt_state,
657 if (vd->vdev_ops->vdev_op_leaf && vdev_is_concrete(vd)) {
658 mutex_enter(&vd->vdev_initialize_lock);
659 vdev_initialize_stop(vd, tgt_state, vd_list);
660 mutex_exit(&vd->vdev_initialize_lock);
664 for (uint64_t i = 0; i < vd->vdev_children; i++) {
665 vdev_initialize_stop_all_impl(vd->vdev_child[i], tgt_state,
671 * Convenience function to stop initializing of a vdev tree and set all
672 * initialize thread pointers to NULL.
675 vdev_initialize_stop_all(vdev_t *vd, vdev_initializing_state_t tgt_state)
677 spa_t *spa = vd->vdev_spa;
680 ASSERT(MUTEX_HELD(&spa_namespace_lock));
682 list_create(&vd_list, sizeof (vdev_t),
683 offsetof(vdev_t, vdev_initialize_node));
685 vdev_initialize_stop_all_impl(vd, tgt_state, &vd_list);
686 vdev_initialize_stop_wait(spa, &vd_list);
688 if (vd->vdev_spa->spa_sync_on) {
689 /* Make sure that our state has been synced to disk */
690 txg_wait_synced(spa_get_dsl(vd->vdev_spa), 0);
693 list_destroy(&vd_list);
697 vdev_initialize_restart(vdev_t *vd)
699 ASSERT(MUTEX_HELD(&spa_namespace_lock));
700 ASSERT(!spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
702 if (vd->vdev_leaf_zap != 0) {
703 mutex_enter(&vd->vdev_initialize_lock);
704 uint64_t initialize_state = VDEV_INITIALIZE_NONE;
705 int err = zap_lookup(vd->vdev_spa->spa_meta_objset,
706 vd->vdev_leaf_zap, VDEV_LEAF_ZAP_INITIALIZE_STATE,
707 sizeof (initialize_state), 1, &initialize_state);
708 ASSERT(err == 0 || err == ENOENT);
709 vd->vdev_initialize_state = initialize_state;
711 uint64_t timestamp = 0;
712 err = zap_lookup(vd->vdev_spa->spa_meta_objset,
713 vd->vdev_leaf_zap, VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME,
714 sizeof (timestamp), 1, ×tamp);
715 ASSERT(err == 0 || err == ENOENT);
716 vd->vdev_initialize_action_time = timestamp;
718 if (vd->vdev_initialize_state == VDEV_INITIALIZE_SUSPENDED ||
720 /* load progress for reporting, but don't resume */
721 VERIFY0(vdev_initialize_load(vd));
722 } else if (vd->vdev_initialize_state ==
723 VDEV_INITIALIZE_ACTIVE && vdev_writeable(vd) &&
724 !vd->vdev_top->vdev_removing &&
725 vd->vdev_initialize_thread == NULL) {
729 mutex_exit(&vd->vdev_initialize_lock);
732 for (uint64_t i = 0; i < vd->vdev_children; i++) {
733 vdev_initialize_restart(vd->vdev_child[i]);
737 EXPORT_SYMBOL(vdev_initialize);
738 EXPORT_SYMBOL(vdev_initialize_stop);
739 EXPORT_SYMBOL(vdev_initialize_stop_all);
740 EXPORT_SYMBOL(vdev_initialize_stop_wait);
741 EXPORT_SYMBOL(vdev_initialize_restart);
744 ZFS_MODULE_PARAM(zfs, zfs_, initialize_value, ULONG, ZMOD_RW,
745 "Value written during zpool initialize");
747 ZFS_MODULE_PARAM(zfs, zfs_, initialize_chunk_size, ULONG, ZMOD_RW,
748 "Size in bytes of writes by zpool initialize");