4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2017 by Lawrence Livermore National Security, LLC.
28 #include <sys/spa_impl.h>
31 #include <sys/vdev_impl.h>
32 #include <sys/zfs_context.h>
33 #include <sys/callb.h>
36 * Multi-Modifier Protection (MMP) attempts to prevent a user from importing
37 * or opening a pool on more than one host at a time. In particular, it
38 * prevents "zpool import -f" on a host from succeeding while the pool is
39 * already imported on another host. There are many other ways in which a
40 * device could be used by two hosts for different purposes at the same time
41 * resulting in pool damage. This implementation does not attempt to detect
44 * MMP operates by ensuring there are frequent visible changes on disk (a
45 * "heartbeat") at all times. And by altering the import process to check
46 * for these changes and failing the import when they are detected. This
47 * functionality is enabled by setting the 'multihost' pool property to on.
49 * Uberblocks written by the txg_sync thread always go into the first
50 * (N-MMP_BLOCKS_PER_LABEL) slots, the remaining slots are reserved for MMP.
51 * They are used to hold uberblocks which are exactly the same as the last
52 * synced uberblock except that the ub_timestamp and mmp_config are frequently
53 * updated. Like all other uberblocks, the slot is written with an embedded
54 * checksum, and slots with invalid checksums are ignored. This provides the
55 * "heartbeat", with no risk of overwriting good uberblocks that must be
56 * preserved, e.g. previous txgs and associated block pointers.
58 * Three optional fields are added to uberblock structure; ub_mmp_magic,
59 * ub_mmp_config, and ub_mmp_delay. The ub_mmp_magic value allows zfs to tell
60 * whether the other ub_mmp_* fields are valid. The ub_mmp_config field tells
61 * the importing host the settings of zfs_multihost_interval and
62 * zfs_multihost_fail_intervals on the host which last had (or currently has)
63 * the pool imported. These determine how long a host must wait to detect
64 * activity in the pool, before concluding the pool is not in use. The
65 * mmp_delay field is a decaying average of the amount of time between
66 * completion of successive MMP writes, in nanoseconds. It indicates whether
69 * During import an activity test may now be performed to determine if
70 * the pool is in use. The activity test is typically required if the
71 * ZPOOL_CONFIG_HOSTID does not match the system hostid, the pool state is
72 * POOL_STATE_ACTIVE, and the pool is not a root pool.
74 * The activity test finds the "best" uberblock (highest txg, timestamp, and, if
75 * ub_mmp_magic is valid, sequence number from ub_mmp_config). It then waits
76 * some time, and finds the "best" uberblock again. If any of the mentioned
77 * fields have different values in the newly read uberblock, the pool is in use
78 * by another host and the import fails. In order to assure the accuracy of the
79 * activity test, the default values result in an activity test duration of 20x
80 * the mmp write interval.
82 * The duration of the "zpool import" activity test depends on the information
83 * available in the "best" uberblock:
85 * 1) If uberblock was written by zfs-0.8 or newer and fail_intervals > 0:
86 * ub_mmp_config.fail_intervals * ub_mmp_config.multihost_interval * 2
88 * In this case, a weak guarantee is provided. Since the host which last had
89 * the pool imported will suspend the pool if no mmp writes land within
90 * fail_intervals * multihost_interval ms, the absence of writes during that
91 * time means either the pool is not imported, or it is imported but the pool
92 * is suspended and no further writes will occur.
94 * Note that resuming the suspended pool on the remote host would invalidate
95 * this guarantee, and so it is not allowed.
97 * The factor of 2 provides a conservative safety factor and derives from
98 * MMP_IMPORT_SAFETY_FACTOR;
100 * 2) If uberblock was written by zfs-0.8 or newer and fail_intervals == 0:
101 * (ub_mmp_config.multihost_interval + ub_mmp_delay) *
102 * zfs_multihost_import_intervals
104 * In this case no guarantee can provided. However, as long as some devices
105 * are healthy and connected, it is likely that at least one write will land
106 * within (multihost_interval + mmp_delay) because multihost_interval is
107 * enough time for a write to be attempted to each leaf vdev, and mmp_delay
108 * is enough for one to land, based on past delays. Multiplying by
109 * zfs_multihost_import_intervals provides a conservative safety factor.
111 * 3) If uberblock was written by zfs-0.7:
112 * (zfs_multihost_interval + ub_mmp_delay) * zfs_multihost_import_intervals
114 * The same logic as case #2 applies, but we do not know remote tunables.
116 * We use the local value for zfs_multihost_interval because the original MMP
117 * did not record this value in the uberblock.
119 * ub_mmp_delay >= (zfs_multihost_interval / leaves), so if the other host
120 * has a much larger zfs_multihost_interval set, ub_mmp_delay will reflect
121 * that. We will have waited enough time for zfs_multihost_import_intervals
122 * writes to be issued and all but one to land.
124 * single device pool example delays
126 * import_delay = (1 + 1) * 20 = 40s #defaults, no I/O delay
127 * import_delay = (1 + 10) * 20 = 220s #defaults, 10s I/O delay
128 * import_delay = (10 + 10) * 20 = 400s #10s multihost_interval,
130 * 100 device pool example delays
132 * import_delay = (1 + .01) * 20 = 20s #defaults, no I/O delay
133 * import_delay = (1 + 10) * 20 = 220s #defaults, 10s I/O delay
134 * import_delay = (10 + .1) * 20 = 202s #10s multihost_interval,
137 * 4) Otherwise, this uberblock was written by a pre-MMP zfs:
138 * zfs_multihost_import_intervals * zfs_multihost_interval
140 * In this case local tunables are used. By default this product = 10s, long
141 * enough for a pool with any activity at all to write at least one
142 * uberblock. No guarantee can be provided.
144 * Additionally, the duration is then extended by a random 25% to attempt to to
145 * detect simultaneous imports. For example, if both partner hosts are rebooted
146 * at the same time and automatically attempt to import the pool.
150 * Used to control the frequency of mmp writes which are performed when the
151 * 'multihost' pool property is on. This is one factor used to determine the
152 * length of the activity check during import.
154 * On average an mmp write will be issued for each leaf vdev every
155 * zfs_multihost_interval milliseconds. In practice, the observed period can
156 * vary with the I/O load and this observed value is the ub_mmp_delay which is
157 * stored in the uberblock. The minimum allowed value is 100 ms.
159 ulong_t zfs_multihost_interval = MMP_DEFAULT_INTERVAL;
162 * Used to control the duration of the activity test on import. Smaller values
163 * of zfs_multihost_import_intervals will reduce the import time but increase
164 * the risk of failing to detect an active pool. The total activity check time
165 * is never allowed to drop below one second. A value of 0 is ignored and
166 * treated as if it was set to 1.
168 uint_t zfs_multihost_import_intervals = MMP_DEFAULT_IMPORT_INTERVALS;
171 * Controls the behavior of the pool when mmp write failures or delays are
174 * When zfs_multihost_fail_intervals = 0, mmp write failures or delays are
175 * ignored. The failures will still be reported to the ZED which depending on
176 * its configuration may take action such as suspending the pool or taking a
179 * When zfs_multihost_fail_intervals > 0, the pool will be suspended if
180 * zfs_multihost_fail_intervals * zfs_multihost_interval milliseconds pass
181 * without a successful mmp write. This guarantees the activity test will see
182 * mmp writes if the pool is imported. A value of 1 is ignored and treated as
183 * if it was set to 2, because a single leaf vdev pool will issue a write once
184 * per multihost_interval and thus any variation in latency would cause the
185 * pool to be suspended.
187 uint_t zfs_multihost_fail_intervals = MMP_DEFAULT_FAIL_INTERVALS;
189 char *mmp_tag = "mmp_write_uberblock";
190 static void mmp_thread(void *arg);
195 mmp_thread_t *mmp = &spa->spa_mmp;
197 mutex_init(&mmp->mmp_thread_lock, NULL, MUTEX_DEFAULT, NULL);
198 cv_init(&mmp->mmp_thread_cv, NULL, CV_DEFAULT, NULL);
199 mutex_init(&mmp->mmp_io_lock, NULL, MUTEX_DEFAULT, NULL);
200 mmp->mmp_kstat_id = 1;
206 mmp_thread_t *mmp = &spa->spa_mmp;
208 mutex_destroy(&mmp->mmp_thread_lock);
209 cv_destroy(&mmp->mmp_thread_cv);
210 mutex_destroy(&mmp->mmp_io_lock);
214 mmp_thread_enter(mmp_thread_t *mmp, callb_cpr_t *cpr)
216 CALLB_CPR_INIT(cpr, &mmp->mmp_thread_lock, callb_generic_cpr, FTAG);
217 mutex_enter(&mmp->mmp_thread_lock);
221 mmp_thread_exit(mmp_thread_t *mmp, kthread_t **mpp, callb_cpr_t *cpr)
223 ASSERT(*mpp != NULL);
225 cv_broadcast(&mmp->mmp_thread_cv);
226 CALLB_CPR_EXIT(cpr); /* drops &mmp->mmp_thread_lock */
231 mmp_thread_start(spa_t *spa)
233 mmp_thread_t *mmp = &spa->spa_mmp;
235 if (spa_writeable(spa)) {
236 mutex_enter(&mmp->mmp_thread_lock);
237 if (!mmp->mmp_thread) {
238 mmp->mmp_thread = thread_create(NULL, 0, mmp_thread,
239 spa, 0, &p0, TS_RUN, defclsyspri);
240 zfs_dbgmsg("MMP thread started pool '%s' "
241 "gethrtime %llu", spa_name(spa), gethrtime());
243 mutex_exit(&mmp->mmp_thread_lock);
248 mmp_thread_stop(spa_t *spa)
250 mmp_thread_t *mmp = &spa->spa_mmp;
252 mutex_enter(&mmp->mmp_thread_lock);
253 mmp->mmp_thread_exiting = 1;
254 cv_broadcast(&mmp->mmp_thread_cv);
256 while (mmp->mmp_thread) {
257 cv_wait(&mmp->mmp_thread_cv, &mmp->mmp_thread_lock);
259 mutex_exit(&mmp->mmp_thread_lock);
260 zfs_dbgmsg("MMP thread stopped pool '%s' gethrtime %llu",
261 spa_name(spa), gethrtime());
263 ASSERT(mmp->mmp_thread == NULL);
264 mmp->mmp_thread_exiting = 0;
267 typedef enum mmp_vdev_state_flag {
268 MMP_FAIL_NOT_WRITABLE = (1 << 0),
269 MMP_FAIL_WRITE_PENDING = (1 << 1),
270 } mmp_vdev_state_flag_t;
273 * Find a leaf vdev to write an MMP block to. It must not have an outstanding
274 * mmp write (if so a new write will also likely block). If there is no usable
275 * leaf, a nonzero error value is returned. The error value returned is a bit
278 * MMP_FAIL_WRITE_PENDING One or more leaf vdevs are writeable, but have an
279 * outstanding MMP write.
280 * MMP_FAIL_NOT_WRITABLE One or more leaf vdevs are not writeable.
284 mmp_next_leaf(spa_t *spa)
287 vdev_t *starting_leaf;
290 ASSERT(MUTEX_HELD(&spa->spa_mmp.mmp_io_lock));
291 ASSERT(spa_config_held(spa, SCL_STATE, RW_READER));
292 ASSERT(list_link_active(&spa->spa_leaf_list.list_head) == B_TRUE);
293 ASSERT(!list_is_empty(&spa->spa_leaf_list));
295 if (spa->spa_mmp.mmp_leaf_last_gen != spa->spa_leaf_list_gen) {
296 spa->spa_mmp.mmp_last_leaf = list_head(&spa->spa_leaf_list);
297 spa->spa_mmp.mmp_leaf_last_gen = spa->spa_leaf_list_gen;
300 leaf = spa->spa_mmp.mmp_last_leaf;
302 leaf = list_head(&spa->spa_leaf_list);
303 starting_leaf = leaf;
306 leaf = list_next(&spa->spa_leaf_list, leaf);
308 leaf = list_head(&spa->spa_leaf_list);
310 if (!vdev_writeable(leaf)) {
311 fail_mask |= MMP_FAIL_NOT_WRITABLE;
312 } else if (leaf->vdev_mmp_pending != 0) {
313 fail_mask |= MMP_FAIL_WRITE_PENDING;
315 spa->spa_mmp.mmp_last_leaf = leaf;
318 } while (leaf != starting_leaf);
326 * MMP writes are issued on a fixed schedule, but may complete at variable,
327 * much longer, intervals. The mmp_delay captures long periods between
328 * successful writes for any reason, including disk latency, scheduling delays,
331 * The mmp_delay is usually calculated as a decaying average, but if the latest
332 * delay is higher we do not average it, so that we do not hide sudden spikes
333 * which the importing host must wait for.
335 * If writes are occurring frequently, such as due to a high rate of txg syncs,
336 * the mmp_delay could become very small. Since those short delays depend on
337 * activity we cannot count on, we never allow mmp_delay to get lower than rate
338 * expected if only mmp_thread writes occur.
340 * If an mmp write was skipped or fails, and we have already waited longer than
341 * mmp_delay, we need to update it so the next write reflects the longer delay.
343 * Do not set mmp_delay if the multihost property is not on, so as not to
344 * trigger an activity check on import.
347 mmp_delay_update(spa_t *spa, boolean_t write_completed)
349 mmp_thread_t *mts = &spa->spa_mmp;
350 hrtime_t delay = gethrtime() - mts->mmp_last_write;
352 ASSERT(MUTEX_HELD(&mts->mmp_io_lock));
354 if (spa_multihost(spa) == B_FALSE) {
359 if (delay > mts->mmp_delay)
360 mts->mmp_delay = delay;
362 if (write_completed == B_FALSE)
365 mts->mmp_last_write = gethrtime();
368 * strictly less than, in case delay was changed above.
370 if (delay < mts->mmp_delay) {
372 MSEC2NSEC(MMP_INTERVAL_OK(zfs_multihost_interval)) /
373 MAX(1, vdev_count_leaves(spa));
374 mts->mmp_delay = MAX(((delay + mts->mmp_delay * 127) / 128),
380 mmp_write_done(zio_t *zio)
382 spa_t *spa = zio->io_spa;
383 vdev_t *vd = zio->io_vd;
384 mmp_thread_t *mts = zio->io_private;
386 mutex_enter(&mts->mmp_io_lock);
387 uint64_t mmp_kstat_id = vd->vdev_mmp_kstat_id;
388 hrtime_t mmp_write_duration = gethrtime() - vd->vdev_mmp_pending;
390 mmp_delay_update(spa, (zio->io_error == 0));
392 vd->vdev_mmp_pending = 0;
393 vd->vdev_mmp_kstat_id = 0;
395 mutex_exit(&mts->mmp_io_lock);
396 spa_config_exit(spa, SCL_STATE, mmp_tag);
398 spa_mmp_history_set(spa, mmp_kstat_id, zio->io_error,
401 abd_free(zio->io_abd);
405 * When the uberblock on-disk is updated by a spa_sync,
406 * creating a new "best" uberblock, update the one stored
407 * in the mmp thread state, used for mmp writes.
410 mmp_update_uberblock(spa_t *spa, uberblock_t *ub)
412 mmp_thread_t *mmp = &spa->spa_mmp;
414 mutex_enter(&mmp->mmp_io_lock);
417 mmp->mmp_ub.ub_timestamp = gethrestime_sec();
418 mmp_delay_update(spa, B_TRUE);
419 mutex_exit(&mmp->mmp_io_lock);
423 * Choose a random vdev, label, and MMP block, and write over it
424 * with a copy of the last-synced uberblock, whose timestamp
425 * has been updated to reflect that the pool is in use.
428 mmp_write_uberblock(spa_t *spa)
430 int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
431 mmp_thread_t *mmp = &spa->spa_mmp;
437 hrtime_t lock_acquire_time = gethrtime();
438 spa_config_enter(spa, SCL_STATE, mmp_tag, RW_READER);
439 lock_acquire_time = gethrtime() - lock_acquire_time;
440 if (lock_acquire_time > (MSEC2NSEC(MMP_MIN_INTERVAL) / 10))
441 zfs_dbgmsg("MMP SCL_STATE acquisition pool '%s' took %llu ns "
442 "gethrtime %llu", spa_name(spa), lock_acquire_time,
445 mutex_enter(&mmp->mmp_io_lock);
447 error = mmp_next_leaf(spa);
450 * spa_mmp_history has two types of entries:
451 * Issued MMP write: records time issued, error status, etc.
452 * Skipped MMP write: an MMP write could not be issued because no
453 * suitable leaf vdev was available. See comment above struct
454 * spa_mmp_history for details.
458 mmp_delay_update(spa, B_FALSE);
459 if (mmp->mmp_skip_error == error) {
460 spa_mmp_history_set_skip(spa, mmp->mmp_kstat_id - 1);
462 mmp->mmp_skip_error = error;
463 spa_mmp_history_add(spa, mmp->mmp_ub.ub_txg,
464 gethrestime_sec(), mmp->mmp_delay, NULL, 0,
465 mmp->mmp_kstat_id++, error);
466 zfs_dbgmsg("MMP error choosing leaf pool '%s' "
467 "gethrtime %llu fail_mask %#x", spa_name(spa),
470 mutex_exit(&mmp->mmp_io_lock);
471 spa_config_exit(spa, SCL_STATE, mmp_tag);
475 vd = spa->spa_mmp.mmp_last_leaf;
476 if (mmp->mmp_skip_error != 0) {
477 mmp->mmp_skip_error = 0;
478 zfs_dbgmsg("MMP write after skipping due to unavailable "
479 "leaves, pool '%s' gethrtime %llu leaf %#llu",
480 spa_name(spa), gethrtime(), vd->vdev_guid);
483 if (mmp->mmp_zio_root == NULL)
484 mmp->mmp_zio_root = zio_root(spa, NULL, NULL,
485 flags | ZIO_FLAG_GODFATHER);
487 if (mmp->mmp_ub.ub_timestamp != gethrestime_sec()) {
489 * Want to reset mmp_seq when timestamp advances because after
490 * an mmp_seq wrap new values will not be chosen by
491 * uberblock_compare() as the "best".
493 mmp->mmp_ub.ub_timestamp = gethrestime_sec();
498 ub->ub_mmp_magic = MMP_MAGIC;
499 ub->ub_mmp_delay = mmp->mmp_delay;
500 ub->ub_mmp_config = MMP_SEQ_SET(mmp->mmp_seq) |
501 MMP_INTERVAL_SET(MMP_INTERVAL_OK(zfs_multihost_interval)) |
502 MMP_FAIL_INT_SET(MMP_FAIL_INTVS_OK(
503 zfs_multihost_fail_intervals));
504 vd->vdev_mmp_pending = gethrtime();
505 vd->vdev_mmp_kstat_id = mmp->mmp_kstat_id;
507 zio_t *zio = zio_null(mmp->mmp_zio_root, spa, NULL, NULL, NULL, flags);
508 abd_t *ub_abd = abd_alloc_for_io(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
509 abd_zero(ub_abd, VDEV_UBERBLOCK_SIZE(vd));
510 abd_copy_from_buf(ub_abd, ub, sizeof (uberblock_t));
514 mutex_exit(&mmp->mmp_io_lock);
516 offset = VDEV_UBERBLOCK_OFFSET(vd, VDEV_UBERBLOCK_COUNT(vd) -
517 MMP_BLOCKS_PER_LABEL + spa_get_random(MMP_BLOCKS_PER_LABEL));
519 label = spa_get_random(VDEV_LABELS);
520 vdev_label_write(zio, vd, label, ub_abd, offset,
521 VDEV_UBERBLOCK_SIZE(vd), mmp_write_done, mmp,
522 flags | ZIO_FLAG_DONT_PROPAGATE);
524 (void) spa_mmp_history_add(spa, ub->ub_txg, ub->ub_timestamp,
525 ub->ub_mmp_delay, vd, label, vd->vdev_mmp_kstat_id, 0);
531 mmp_thread(void *arg)
533 spa_t *spa = (spa_t *)arg;
534 mmp_thread_t *mmp = &spa->spa_mmp;
535 boolean_t suspended = spa_suspended(spa);
536 boolean_t multihost = spa_multihost(spa);
537 uint64_t mmp_interval = MSEC2NSEC(MMP_INTERVAL_OK(
538 zfs_multihost_interval));
539 uint32_t mmp_fail_intervals = MMP_FAIL_INTVS_OK(
540 zfs_multihost_fail_intervals);
541 hrtime_t mmp_fail_ns = mmp_fail_intervals * mmp_interval;
542 boolean_t last_spa_suspended = suspended;
543 boolean_t last_spa_multihost = multihost;
544 uint64_t last_mmp_interval = mmp_interval;
545 uint32_t last_mmp_fail_intervals = mmp_fail_intervals;
546 hrtime_t last_mmp_fail_ns = mmp_fail_ns;
550 mmp_thread_enter(mmp, &cpr);
553 * There have been no MMP writes yet. Setting mmp_last_write here gives
554 * us one mmp_fail_ns period, which is consistent with the activity
555 * check duration, to try to land an MMP write before MMP suspends the
556 * pool (if so configured).
559 mutex_enter(&mmp->mmp_io_lock);
560 mmp->mmp_last_write = gethrtime();
561 mmp->mmp_delay = MSEC2NSEC(MMP_INTERVAL_OK(zfs_multihost_interval));
562 mutex_exit(&mmp->mmp_io_lock);
564 while (!mmp->mmp_thread_exiting) {
565 hrtime_t next_time = gethrtime() +
566 MSEC2NSEC(MMP_DEFAULT_INTERVAL);
567 int leaves = MAX(vdev_count_leaves(spa), 1);
569 /* Detect changes in tunables or state */
571 last_spa_suspended = suspended;
572 last_spa_multihost = multihost;
573 suspended = spa_suspended(spa);
574 multihost = spa_multihost(spa);
576 last_mmp_interval = mmp_interval;
577 last_mmp_fail_intervals = mmp_fail_intervals;
578 last_mmp_fail_ns = mmp_fail_ns;
579 mmp_interval = MSEC2NSEC(MMP_INTERVAL_OK(
580 zfs_multihost_interval));
581 mmp_fail_intervals = MMP_FAIL_INTVS_OK(
582 zfs_multihost_fail_intervals);
584 /* Smooth so pool is not suspended when reducing tunables */
585 if (mmp_fail_intervals * mmp_interval < mmp_fail_ns) {
586 mmp_fail_ns = (mmp_fail_ns * 31 +
587 mmp_fail_intervals * mmp_interval) / 32;
589 mmp_fail_ns = mmp_fail_intervals *
593 if (mmp_interval != last_mmp_interval ||
594 mmp_fail_intervals != last_mmp_fail_intervals) {
596 * We want other hosts to see new tunables as quickly as
597 * possible. Write out at higher frequency than usual.
603 next_time = gethrtime() + mmp_interval / leaves;
605 if (mmp_fail_ns != last_mmp_fail_ns) {
606 zfs_dbgmsg("MMP interval change pool '%s' "
607 "gethrtime %llu last_mmp_interval %llu "
608 "mmp_interval %llu last_mmp_fail_intervals %u "
609 "mmp_fail_intervals %u mmp_fail_ns %llu "
610 "skip_wait %d leaves %d next_time %llu",
611 spa_name(spa), gethrtime(), last_mmp_interval,
612 mmp_interval, last_mmp_fail_intervals,
613 mmp_fail_intervals, mmp_fail_ns, skip_wait, leaves,
618 * MMP off => on, or suspended => !suspended:
619 * No writes occurred recently. Update mmp_last_write to give
620 * us some time to try.
622 if ((!last_spa_multihost && multihost) ||
623 (last_spa_suspended && !suspended)) {
624 zfs_dbgmsg("MMP state change pool '%s': gethrtime %llu "
625 "last_spa_multihost %u multihost %u "
626 "last_spa_suspended %u suspended %u",
627 spa_name(spa), last_spa_multihost, multihost,
628 last_spa_suspended, suspended);
629 mutex_enter(&mmp->mmp_io_lock);
630 mmp->mmp_last_write = gethrtime();
631 mmp->mmp_delay = mmp_interval;
632 mutex_exit(&mmp->mmp_io_lock);
637 * mmp_delay == 0 tells importing node to skip activity check.
639 if (last_spa_multihost && !multihost) {
640 mutex_enter(&mmp->mmp_io_lock);
642 mutex_exit(&mmp->mmp_io_lock);
646 * Suspend the pool if no MMP write has succeeded in over
647 * mmp_interval * mmp_fail_intervals nanoseconds.
649 if (multihost && !suspended && mmp_fail_intervals &&
650 (gethrtime() - mmp->mmp_last_write) > mmp_fail_ns) {
651 zfs_dbgmsg("MMP suspending pool '%s': gethrtime %llu "
652 "mmp_last_write %llu mmp_interval %llu "
653 "mmp_fail_intervals %llu mmp_fail_ns %llu",
654 spa_name(spa), (u_longlong_t)gethrtime(),
655 (u_longlong_t)mmp->mmp_last_write,
656 (u_longlong_t)mmp_interval,
657 (u_longlong_t)mmp_fail_intervals,
658 (u_longlong_t)mmp_fail_ns);
659 cmn_err(CE_WARN, "MMP writes to pool '%s' have not "
660 "succeeded in over %llu ms; suspending pool. "
663 NSEC2MSEC(gethrtime() - mmp->mmp_last_write),
665 zio_suspend(spa, NULL, ZIO_SUSPEND_MMP);
668 if (multihost && !suspended)
669 mmp_write_uberblock(spa);
672 next_time = gethrtime() + MSEC2NSEC(MMP_MIN_INTERVAL) /
677 CALLB_CPR_SAFE_BEGIN(&cpr);
678 (void) cv_timedwait_idle_hires(&mmp->mmp_thread_cv,
679 &mmp->mmp_thread_lock, next_time, USEC2NSEC(100),
680 CALLOUT_FLAG_ABSOLUTE);
681 CALLB_CPR_SAFE_END(&cpr, &mmp->mmp_thread_lock);
684 /* Outstanding writes are allowed to complete. */
685 zio_wait(mmp->mmp_zio_root);
687 mmp->mmp_zio_root = NULL;
688 mmp_thread_exit(mmp, &mmp->mmp_thread, &cpr);
692 * Signal the MMP thread to wake it, when it is sleeping on
693 * its cv. Used when some module parameter has changed and
694 * we want the thread to know about it.
695 * Only signal if the pool is active and mmp thread is
696 * running, otherwise there is no thread to wake.
699 mmp_signal_thread(spa_t *spa)
701 mmp_thread_t *mmp = &spa->spa_mmp;
703 mutex_enter(&mmp->mmp_thread_lock);
705 cv_broadcast(&mmp->mmp_thread_cv);
706 mutex_exit(&mmp->mmp_thread_lock);
710 mmp_signal_all_threads(void)
714 mutex_enter(&spa_namespace_lock);
715 while ((spa = spa_next(spa))) {
716 if (spa->spa_state == POOL_STATE_ACTIVE)
717 mmp_signal_thread(spa);
719 mutex_exit(&spa_namespace_lock);
723 ZFS_MODULE_PARAM_CALL(zfs_multihost, zfs_multihost_, interval,
724 param_set_multihost_interval, param_get_ulong, ZMOD_RW,
725 "Milliseconds between mmp writes to each leaf");
728 ZFS_MODULE_PARAM(zfs_multihost, zfs_multihost_, fail_intervals, UINT, ZMOD_RW,
729 "Max allowed period without a successful mmp write");
731 ZFS_MODULE_PARAM(zfs_multihost, zfs_multihost_, import_intervals, UINT, ZMOD_RW,
732 "Number of zfs_multihost_interval periods to wait for activity");