4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Portions Copyright 2011 Martin Matuska
24 * Copyright (c) 2012, 2019 by Delphix. All rights reserved.
27 #include <sys/zfs_context.h>
28 #include <sys/txg_impl.h>
29 #include <sys/dmu_impl.h>
30 #include <sys/spa_impl.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/dsl_pool.h>
33 #include <sys/dsl_scan.h>
35 #include <sys/callb.h>
36 #include <sys/trace_zfs.h>
39 * ZFS Transaction Groups
40 * ----------------------
42 * ZFS transaction groups are, as the name implies, groups of transactions
43 * that act on persistent state. ZFS asserts consistency at the granularity of
44 * these transaction groups. Each successive transaction group (txg) is
45 * assigned a 64-bit consecutive identifier. There are three active
46 * transaction group states: open, quiescing, or syncing. At any given time,
47 * there may be an active txg associated with each state; each active txg may
48 * either be processing, or blocked waiting to enter the next state. There may
49 * be up to three active txgs, and there is always a txg in the open state
50 * (though it may be blocked waiting to enter the quiescing state). In broad
51 * strokes, transactions -- operations that change in-memory structures -- are
52 * accepted into the txg in the open state, and are completed while the txg is
53 * in the open or quiescing states. The accumulated changes are written to
54 * disk in the syncing state.
58 * When a new txg becomes active, it first enters the open state. New
59 * transactions -- updates to in-memory structures -- are assigned to the
60 * currently open txg. There is always a txg in the open state so that ZFS can
61 * accept new changes (though the txg may refuse new changes if it has hit
62 * some limit). ZFS advances the open txg to the next state for a variety of
63 * reasons such as it hitting a time or size threshold, or the execution of an
64 * administrative action that must be completed in the syncing state.
68 * After a txg exits the open state, it enters the quiescing state. The
69 * quiescing state is intended to provide a buffer between accepting new
70 * transactions in the open state and writing them out to stable storage in
71 * the syncing state. While quiescing, transactions can continue their
72 * operation without delaying either of the other states. Typically, a txg is
73 * in the quiescing state very briefly since the operations are bounded by
74 * software latencies rather than, say, slower I/O latencies. After all
75 * transactions complete, the txg is ready to enter the next state.
79 * In the syncing state, the in-memory state built up during the open and (to
80 * a lesser degree) the quiescing states is written to stable storage. The
81 * process of writing out modified data can, in turn modify more data. For
82 * example when we write new blocks, we need to allocate space for them; those
83 * allocations modify metadata (space maps)... which themselves must be
84 * written to stable storage. During the sync state, ZFS iterates, writing out
85 * data until it converges and all in-memory changes have been written out.
86 * The first such pass is the largest as it encompasses all the modified user
87 * data (as opposed to filesystem metadata). Subsequent passes typically have
88 * far less data to write as they consist exclusively of filesystem metadata.
90 * To ensure convergence, after a certain number of passes ZFS begins
91 * overwriting locations on stable storage that had been allocated earlier in
92 * the syncing state (and subsequently freed). ZFS usually allocates new
93 * blocks to optimize for large, continuous, writes. For the syncing state to
94 * converge however it must complete a pass where no new blocks are allocated
95 * since each allocation requires a modification of persistent metadata.
96 * Further, to hasten convergence, after a prescribed number of passes, ZFS
97 * also defers frees, and stops compressing.
99 * In addition to writing out user data, we must also execute synctasks during
100 * the syncing context. A synctask is the mechanism by which some
101 * administrative activities work such as creating and destroying snapshots or
102 * datasets. Note that when a synctask is initiated it enters the open txg,
103 * and ZFS then pushes that txg as quickly as possible to completion of the
104 * syncing state in order to reduce the latency of the administrative
105 * activity. To complete the syncing state, ZFS writes out a new uberblock,
106 * the root of the tree of blocks that comprise all state stored on the ZFS
107 * pool. Finally, if there is a quiesced txg waiting, we signal that it can
108 * now transition to the syncing state.
111 static void txg_sync_thread(void *arg);
112 static void txg_quiesce_thread(void *arg);
114 int zfs_txg_timeout = 5; /* max seconds worth of delta per txg */
117 * Prepare the txg subsystem.
120 txg_init(dsl_pool_t *dp, uint64_t txg)
122 tx_state_t *tx = &dp->dp_tx;
124 bzero(tx, sizeof (tx_state_t));
126 tx->tx_cpu = vmem_zalloc(max_ncpus * sizeof (tx_cpu_t), KM_SLEEP);
128 for (c = 0; c < max_ncpus; c++) {
131 mutex_init(&tx->tx_cpu[c].tc_lock, NULL, MUTEX_DEFAULT, NULL);
132 mutex_init(&tx->tx_cpu[c].tc_open_lock, NULL, MUTEX_NOLOCKDEP,
134 for (i = 0; i < TXG_SIZE; i++) {
135 cv_init(&tx->tx_cpu[c].tc_cv[i], NULL, CV_DEFAULT,
137 list_create(&tx->tx_cpu[c].tc_callbacks[i],
138 sizeof (dmu_tx_callback_t),
139 offsetof(dmu_tx_callback_t, dcb_node));
143 mutex_init(&tx->tx_sync_lock, NULL, MUTEX_DEFAULT, NULL);
145 cv_init(&tx->tx_sync_more_cv, NULL, CV_DEFAULT, NULL);
146 cv_init(&tx->tx_sync_done_cv, NULL, CV_DEFAULT, NULL);
147 cv_init(&tx->tx_quiesce_more_cv, NULL, CV_DEFAULT, NULL);
148 cv_init(&tx->tx_quiesce_done_cv, NULL, CV_DEFAULT, NULL);
149 cv_init(&tx->tx_exit_cv, NULL, CV_DEFAULT, NULL);
151 tx->tx_open_txg = txg;
155 * Close down the txg subsystem.
158 txg_fini(dsl_pool_t *dp)
160 tx_state_t *tx = &dp->dp_tx;
163 ASSERT0(tx->tx_threads);
165 mutex_destroy(&tx->tx_sync_lock);
167 cv_destroy(&tx->tx_sync_more_cv);
168 cv_destroy(&tx->tx_sync_done_cv);
169 cv_destroy(&tx->tx_quiesce_more_cv);
170 cv_destroy(&tx->tx_quiesce_done_cv);
171 cv_destroy(&tx->tx_exit_cv);
173 for (c = 0; c < max_ncpus; c++) {
176 mutex_destroy(&tx->tx_cpu[c].tc_open_lock);
177 mutex_destroy(&tx->tx_cpu[c].tc_lock);
178 for (i = 0; i < TXG_SIZE; i++) {
179 cv_destroy(&tx->tx_cpu[c].tc_cv[i]);
180 list_destroy(&tx->tx_cpu[c].tc_callbacks[i]);
184 if (tx->tx_commit_cb_taskq != NULL)
185 taskq_destroy(tx->tx_commit_cb_taskq);
187 vmem_free(tx->tx_cpu, max_ncpus * sizeof (tx_cpu_t));
189 bzero(tx, sizeof (tx_state_t));
193 * Start syncing transaction groups.
196 txg_sync_start(dsl_pool_t *dp)
198 tx_state_t *tx = &dp->dp_tx;
200 mutex_enter(&tx->tx_sync_lock);
202 dprintf("pool %p\n", dp);
204 ASSERT0(tx->tx_threads);
208 tx->tx_quiesce_thread = thread_create(NULL, 0, txg_quiesce_thread,
209 dp, 0, &p0, TS_RUN, defclsyspri);
212 * The sync thread can need a larger-than-default stack size on
213 * 32-bit x86. This is due in part to nested pools and
214 * scrub_visitbp() recursion.
216 tx->tx_sync_thread = thread_create(NULL, 0, txg_sync_thread,
217 dp, 0, &p0, TS_RUN, defclsyspri);
219 mutex_exit(&tx->tx_sync_lock);
223 txg_thread_enter(tx_state_t *tx, callb_cpr_t *cpr)
225 CALLB_CPR_INIT(cpr, &tx->tx_sync_lock, callb_generic_cpr, FTAG);
226 mutex_enter(&tx->tx_sync_lock);
230 txg_thread_exit(tx_state_t *tx, callb_cpr_t *cpr, kthread_t **tpp)
232 ASSERT(*tpp != NULL);
235 cv_broadcast(&tx->tx_exit_cv);
236 CALLB_CPR_EXIT(cpr); /* drops &tx->tx_sync_lock */
241 txg_thread_wait(tx_state_t *tx, callb_cpr_t *cpr, kcondvar_t *cv, clock_t time)
243 CALLB_CPR_SAFE_BEGIN(cpr);
246 (void) cv_timedwait_idle(cv, &tx->tx_sync_lock,
247 ddi_get_lbolt() + time);
249 cv_wait_idle(cv, &tx->tx_sync_lock);
252 CALLB_CPR_SAFE_END(cpr, &tx->tx_sync_lock);
256 * Stop syncing transaction groups.
259 txg_sync_stop(dsl_pool_t *dp)
261 tx_state_t *tx = &dp->dp_tx;
263 dprintf("pool %p\n", dp);
265 * Finish off any work in progress.
267 ASSERT3U(tx->tx_threads, ==, 2);
270 * We need to ensure that we've vacated the deferred metaslab trees.
272 txg_wait_synced(dp, tx->tx_open_txg + TXG_DEFER_SIZE);
275 * Wake all sync threads and wait for them to die.
277 mutex_enter(&tx->tx_sync_lock);
279 ASSERT3U(tx->tx_threads, ==, 2);
283 cv_broadcast(&tx->tx_quiesce_more_cv);
284 cv_broadcast(&tx->tx_quiesce_done_cv);
285 cv_broadcast(&tx->tx_sync_more_cv);
287 while (tx->tx_threads != 0)
288 cv_wait(&tx->tx_exit_cv, &tx->tx_sync_lock);
292 mutex_exit(&tx->tx_sync_lock);
296 txg_hold_open(dsl_pool_t *dp, txg_handle_t *th)
298 tx_state_t *tx = &dp->dp_tx;
303 * It appears the processor id is simply used as a "random"
304 * number to index into the array, and there isn't any other
305 * significance to the chosen tx_cpu. Because.. Why not use
306 * the current cpu to index into the array?
309 tc = &tx->tx_cpu[CPU_SEQID];
312 mutex_enter(&tc->tc_open_lock);
313 txg = tx->tx_open_txg;
315 mutex_enter(&tc->tc_lock);
316 tc->tc_count[txg & TXG_MASK]++;
317 mutex_exit(&tc->tc_lock);
326 txg_rele_to_quiesce(txg_handle_t *th)
328 tx_cpu_t *tc = th->th_cpu;
330 ASSERT(!MUTEX_HELD(&tc->tc_lock));
331 mutex_exit(&tc->tc_open_lock);
335 txg_register_callbacks(txg_handle_t *th, list_t *tx_callbacks)
337 tx_cpu_t *tc = th->th_cpu;
338 int g = th->th_txg & TXG_MASK;
340 mutex_enter(&tc->tc_lock);
341 list_move_tail(&tc->tc_callbacks[g], tx_callbacks);
342 mutex_exit(&tc->tc_lock);
346 txg_rele_to_sync(txg_handle_t *th)
348 tx_cpu_t *tc = th->th_cpu;
349 int g = th->th_txg & TXG_MASK;
351 mutex_enter(&tc->tc_lock);
352 ASSERT(tc->tc_count[g] != 0);
353 if (--tc->tc_count[g] == 0)
354 cv_broadcast(&tc->tc_cv[g]);
355 mutex_exit(&tc->tc_lock);
357 th->th_cpu = NULL; /* defensive */
361 * Blocks until all transactions in the group are committed.
363 * On return, the transaction group has reached a stable state in which it can
364 * then be passed off to the syncing context.
367 txg_quiesce(dsl_pool_t *dp, uint64_t txg)
369 tx_state_t *tx = &dp->dp_tx;
370 uint64_t tx_open_time;
371 int g = txg & TXG_MASK;
375 * Grab all tc_open_locks so nobody else can get into this txg.
377 for (c = 0; c < max_ncpus; c++)
378 mutex_enter(&tx->tx_cpu[c].tc_open_lock);
380 ASSERT(txg == tx->tx_open_txg);
382 tx->tx_open_time = tx_open_time = gethrtime();
384 DTRACE_PROBE2(txg__quiescing, dsl_pool_t *, dp, uint64_t, txg);
385 DTRACE_PROBE2(txg__opened, dsl_pool_t *, dp, uint64_t, tx->tx_open_txg);
388 * Now that we've incremented tx_open_txg, we can let threads
389 * enter the next transaction group.
391 for (c = 0; c < max_ncpus; c++)
392 mutex_exit(&tx->tx_cpu[c].tc_open_lock);
394 spa_txg_history_set(dp->dp_spa, txg, TXG_STATE_OPEN, tx_open_time);
395 spa_txg_history_add(dp->dp_spa, txg + 1, tx_open_time);
398 * Quiesce the transaction group by waiting for everyone to txg_exit().
400 for (c = 0; c < max_ncpus; c++) {
401 tx_cpu_t *tc = &tx->tx_cpu[c];
402 mutex_enter(&tc->tc_lock);
403 while (tc->tc_count[g] != 0)
404 cv_wait(&tc->tc_cv[g], &tc->tc_lock);
405 mutex_exit(&tc->tc_lock);
408 spa_txg_history_set(dp->dp_spa, txg, TXG_STATE_QUIESCED, gethrtime());
412 txg_do_callbacks(list_t *cb_list)
414 dmu_tx_do_callbacks(cb_list, 0);
416 list_destroy(cb_list);
418 kmem_free(cb_list, sizeof (list_t));
422 * Dispatch the commit callbacks registered on this txg to worker threads.
424 * If no callbacks are registered for a given TXG, nothing happens.
425 * This function creates a taskq for the associated pool, if needed.
428 txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg)
431 tx_state_t *tx = &dp->dp_tx;
434 for (c = 0; c < max_ncpus; c++) {
435 tx_cpu_t *tc = &tx->tx_cpu[c];
437 * No need to lock tx_cpu_t at this point, since this can
438 * only be called once a txg has been synced.
441 int g = txg & TXG_MASK;
443 if (list_is_empty(&tc->tc_callbacks[g]))
446 if (tx->tx_commit_cb_taskq == NULL) {
448 * Commit callback taskq hasn't been created yet.
450 tx->tx_commit_cb_taskq = taskq_create("tx_commit_cb",
451 boot_ncpus, defclsyspri, boot_ncpus, boot_ncpus * 2,
452 TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
455 cb_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
456 list_create(cb_list, sizeof (dmu_tx_callback_t),
457 offsetof(dmu_tx_callback_t, dcb_node));
459 list_move_tail(cb_list, &tc->tc_callbacks[g]);
461 (void) taskq_dispatch(tx->tx_commit_cb_taskq, (task_func_t *)
462 txg_do_callbacks, cb_list, TQ_SLEEP);
467 * Wait for pending commit callbacks of already-synced transactions to finish
469 * Calling this function from within a commit callback will deadlock.
472 txg_wait_callbacks(dsl_pool_t *dp)
474 tx_state_t *tx = &dp->dp_tx;
476 if (tx->tx_commit_cb_taskq != NULL)
477 taskq_wait_outstanding(tx->tx_commit_cb_taskq, 0);
481 txg_is_syncing(dsl_pool_t *dp)
483 tx_state_t *tx = &dp->dp_tx;
484 ASSERT(MUTEX_HELD(&tx->tx_sync_lock));
485 return (tx->tx_syncing_txg != 0);
489 txg_is_quiescing(dsl_pool_t *dp)
491 tx_state_t *tx = &dp->dp_tx;
492 ASSERT(MUTEX_HELD(&tx->tx_sync_lock));
493 return (tx->tx_quiescing_txg != 0);
497 txg_has_quiesced_to_sync(dsl_pool_t *dp)
499 tx_state_t *tx = &dp->dp_tx;
500 ASSERT(MUTEX_HELD(&tx->tx_sync_lock));
501 return (tx->tx_quiesced_txg != 0);
505 txg_sync_thread(void *arg)
507 dsl_pool_t *dp = arg;
508 spa_t *spa = dp->dp_spa;
509 tx_state_t *tx = &dp->dp_tx;
511 clock_t start, delta;
513 (void) spl_fstrans_mark();
514 txg_thread_enter(tx, &cpr);
518 clock_t timeout = zfs_txg_timeout * hz;
521 uint64_t dirty_min_bytes =
522 zfs_dirty_data_max * zfs_dirty_data_sync_percent / 100;
525 * We sync when we're scanning, there's someone waiting
526 * on us, or the quiesce thread has handed off a txg to
527 * us, or we have reached our timeout.
529 timer = (delta >= timeout ? 0 : timeout - delta);
530 while (!dsl_scan_active(dp->dp_scan) &&
531 !tx->tx_exiting && timer > 0 &&
532 tx->tx_synced_txg >= tx->tx_sync_txg_waiting &&
533 !txg_has_quiesced_to_sync(dp) &&
534 dp->dp_dirty_total < dirty_min_bytes) {
535 dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n",
536 tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
537 txg_thread_wait(tx, &cpr, &tx->tx_sync_more_cv, timer);
538 delta = ddi_get_lbolt() - start;
539 timer = (delta > timeout ? 0 : timeout - delta);
543 * Wait until the quiesce thread hands off a txg to us,
544 * prompting it to do so if necessary.
546 while (!tx->tx_exiting && !txg_has_quiesced_to_sync(dp)) {
547 if (tx->tx_quiesce_txg_waiting < tx->tx_open_txg+1)
548 tx->tx_quiesce_txg_waiting = tx->tx_open_txg+1;
549 cv_broadcast(&tx->tx_quiesce_more_cv);
550 txg_thread_wait(tx, &cpr, &tx->tx_quiesce_done_cv, 0);
554 txg_thread_exit(tx, &cpr, &tx->tx_sync_thread);
557 * Consume the quiesced txg which has been handed off to
558 * us. This may cause the quiescing thread to now be
559 * able to quiesce another txg, so we must signal it.
561 ASSERT(tx->tx_quiesced_txg != 0);
562 txg = tx->tx_quiesced_txg;
563 tx->tx_quiesced_txg = 0;
564 tx->tx_syncing_txg = txg;
565 DTRACE_PROBE2(txg__syncing, dsl_pool_t *, dp, uint64_t, txg);
566 cv_broadcast(&tx->tx_quiesce_more_cv);
568 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
569 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
570 mutex_exit(&tx->tx_sync_lock);
572 txg_stat_t *ts = spa_txg_history_init_io(spa, txg, dp);
573 start = ddi_get_lbolt();
575 delta = ddi_get_lbolt() - start;
576 spa_txg_history_fini_io(spa, ts);
578 mutex_enter(&tx->tx_sync_lock);
579 tx->tx_synced_txg = txg;
580 tx->tx_syncing_txg = 0;
581 DTRACE_PROBE2(txg__synced, dsl_pool_t *, dp, uint64_t, txg);
582 cv_broadcast(&tx->tx_sync_done_cv);
585 * Dispatch commit callbacks to worker threads.
587 txg_dispatch_callbacks(dp, txg);
592 txg_quiesce_thread(void *arg)
594 dsl_pool_t *dp = arg;
595 tx_state_t *tx = &dp->dp_tx;
598 txg_thread_enter(tx, &cpr);
604 * We quiesce when there's someone waiting on us.
605 * However, we can only have one txg in "quiescing" or
606 * "quiesced, waiting to sync" state. So we wait until
607 * the "quiesced, waiting to sync" txg has been consumed
608 * by the sync thread.
610 while (!tx->tx_exiting &&
611 (tx->tx_open_txg >= tx->tx_quiesce_txg_waiting ||
612 txg_has_quiesced_to_sync(dp)))
613 txg_thread_wait(tx, &cpr, &tx->tx_quiesce_more_cv, 0);
616 txg_thread_exit(tx, &cpr, &tx->tx_quiesce_thread);
618 txg = tx->tx_open_txg;
619 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
620 txg, tx->tx_quiesce_txg_waiting,
621 tx->tx_sync_txg_waiting);
622 tx->tx_quiescing_txg = txg;
624 mutex_exit(&tx->tx_sync_lock);
625 txg_quiesce(dp, txg);
626 mutex_enter(&tx->tx_sync_lock);
629 * Hand this txg off to the sync thread.
631 dprintf("quiesce done, handing off txg %llu\n", txg);
632 tx->tx_quiescing_txg = 0;
633 tx->tx_quiesced_txg = txg;
634 DTRACE_PROBE2(txg__quiesced, dsl_pool_t *, dp, uint64_t, txg);
635 cv_broadcast(&tx->tx_sync_more_cv);
636 cv_broadcast(&tx->tx_quiesce_done_cv);
641 * Delay this thread by delay nanoseconds if we are still in the open
642 * transaction group and there is already a waiting txg quiescing or quiesced.
643 * Abort the delay if this txg stalls or enters the quiescing state.
646 txg_delay(dsl_pool_t *dp, uint64_t txg, hrtime_t delay, hrtime_t resolution)
648 tx_state_t *tx = &dp->dp_tx;
649 hrtime_t start = gethrtime();
651 /* don't delay if this txg could transition to quiescing immediately */
652 if (tx->tx_open_txg > txg ||
653 tx->tx_syncing_txg == txg-1 || tx->tx_synced_txg == txg-1)
656 mutex_enter(&tx->tx_sync_lock);
657 if (tx->tx_open_txg > txg || tx->tx_synced_txg == txg-1) {
658 mutex_exit(&tx->tx_sync_lock);
662 while (gethrtime() - start < delay &&
663 tx->tx_syncing_txg < txg-1 && !txg_stalled(dp)) {
664 (void) cv_timedwait_hires(&tx->tx_quiesce_more_cv,
665 &tx->tx_sync_lock, delay, resolution, 0);
668 DMU_TX_STAT_BUMP(dmu_tx_delay);
670 mutex_exit(&tx->tx_sync_lock);
674 txg_wait_synced_impl(dsl_pool_t *dp, uint64_t txg, boolean_t wait_sig)
676 tx_state_t *tx = &dp->dp_tx;
678 ASSERT(!dsl_pool_config_held(dp));
680 mutex_enter(&tx->tx_sync_lock);
681 ASSERT3U(tx->tx_threads, ==, 2);
683 txg = tx->tx_open_txg + TXG_DEFER_SIZE;
684 if (tx->tx_sync_txg_waiting < txg)
685 tx->tx_sync_txg_waiting = txg;
686 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
687 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
688 while (tx->tx_synced_txg < txg) {
689 dprintf("broadcasting sync more "
690 "tx_synced=%llu waiting=%llu dp=%px\n",
691 tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
692 cv_broadcast(&tx->tx_sync_more_cv);
695 * Condition wait here but stop if the thread receives a
696 * signal. The caller may call txg_wait_synced*() again
697 * to resume waiting for this txg.
699 if (cv_wait_io_sig(&tx->tx_sync_done_cv,
700 &tx->tx_sync_lock) == 0) {
701 mutex_exit(&tx->tx_sync_lock);
705 cv_wait_io(&tx->tx_sync_done_cv, &tx->tx_sync_lock);
708 mutex_exit(&tx->tx_sync_lock);
713 txg_wait_synced(dsl_pool_t *dp, uint64_t txg)
715 VERIFY0(txg_wait_synced_impl(dp, txg, B_FALSE));
719 * Similar to a txg_wait_synced but it can be interrupted from a signal.
720 * Returns B_TRUE if the thread was signaled while waiting.
723 txg_wait_synced_sig(dsl_pool_t *dp, uint64_t txg)
725 return (txg_wait_synced_impl(dp, txg, B_TRUE));
729 * Wait for the specified open transaction group. Set should_quiesce
730 * when the current open txg should be quiesced immediately.
733 txg_wait_open(dsl_pool_t *dp, uint64_t txg, boolean_t should_quiesce)
735 tx_state_t *tx = &dp->dp_tx;
737 ASSERT(!dsl_pool_config_held(dp));
739 mutex_enter(&tx->tx_sync_lock);
740 ASSERT3U(tx->tx_threads, ==, 2);
742 txg = tx->tx_open_txg + 1;
743 if (tx->tx_quiesce_txg_waiting < txg && should_quiesce)
744 tx->tx_quiesce_txg_waiting = txg;
745 dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
746 txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
747 while (tx->tx_open_txg < txg) {
748 cv_broadcast(&tx->tx_quiesce_more_cv);
750 * Callers setting should_quiesce will use cv_wait_io() and
751 * be accounted for as iowait time. Otherwise, the caller is
752 * understood to be idle and cv_wait_sig() is used to prevent
753 * incorrectly inflating the system load average.
755 if (should_quiesce == B_TRUE) {
756 cv_wait_io(&tx->tx_quiesce_done_cv, &tx->tx_sync_lock);
758 cv_wait_idle(&tx->tx_quiesce_done_cv,
762 mutex_exit(&tx->tx_sync_lock);
766 * If there isn't a txg syncing or in the pipeline, push another txg through
767 * the pipeline by quiescing the open txg.
770 txg_kick(dsl_pool_t *dp)
772 tx_state_t *tx = &dp->dp_tx;
774 ASSERT(!dsl_pool_config_held(dp));
776 mutex_enter(&tx->tx_sync_lock);
777 if (!txg_is_syncing(dp) &&
778 !txg_is_quiescing(dp) &&
779 tx->tx_quiesce_txg_waiting <= tx->tx_open_txg &&
780 tx->tx_sync_txg_waiting <= tx->tx_synced_txg &&
781 tx->tx_quiesced_txg <= tx->tx_synced_txg) {
782 tx->tx_quiesce_txg_waiting = tx->tx_open_txg + 1;
783 cv_broadcast(&tx->tx_quiesce_more_cv);
785 mutex_exit(&tx->tx_sync_lock);
789 txg_stalled(dsl_pool_t *dp)
791 tx_state_t *tx = &dp->dp_tx;
792 return (tx->tx_quiesce_txg_waiting > tx->tx_open_txg);
796 txg_sync_waiting(dsl_pool_t *dp)
798 tx_state_t *tx = &dp->dp_tx;
800 return (tx->tx_syncing_txg <= tx->tx_sync_txg_waiting ||
801 tx->tx_quiesced_txg != 0);
805 * Verify that this txg is active (open, quiescing, syncing). Non-active
806 * txg's should not be manipulated.
810 txg_verify(spa_t *spa, uint64_t txg)
812 dsl_pool_t *dp __maybe_unused = spa_get_dsl(spa);
813 if (txg <= TXG_INITIAL || txg == ZILTEST_TXG)
815 ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg);
816 ASSERT3U(txg, >=, dp->dp_tx.tx_synced_txg);
817 ASSERT3U(txg, >=, dp->dp_tx.tx_open_txg - TXG_CONCURRENT_STATES);
822 * Per-txg object lists.
825 txg_list_create(txg_list_t *tl, spa_t *spa, size_t offset)
829 mutex_init(&tl->tl_lock, NULL, MUTEX_DEFAULT, NULL);
831 tl->tl_offset = offset;
834 for (t = 0; t < TXG_SIZE; t++)
835 tl->tl_head[t] = NULL;
839 txg_list_empty_impl(txg_list_t *tl, uint64_t txg)
841 ASSERT(MUTEX_HELD(&tl->tl_lock));
842 TXG_VERIFY(tl->tl_spa, txg);
843 return (tl->tl_head[txg & TXG_MASK] == NULL);
847 txg_list_empty(txg_list_t *tl, uint64_t txg)
849 mutex_enter(&tl->tl_lock);
850 boolean_t ret = txg_list_empty_impl(tl, txg);
851 mutex_exit(&tl->tl_lock);
857 txg_list_destroy(txg_list_t *tl)
861 mutex_enter(&tl->tl_lock);
862 for (t = 0; t < TXG_SIZE; t++)
863 ASSERT(txg_list_empty_impl(tl, t));
864 mutex_exit(&tl->tl_lock);
866 mutex_destroy(&tl->tl_lock);
870 * Returns true if all txg lists are empty.
872 * Warning: this is inherently racy (an item could be added immediately
873 * after this function returns).
876 txg_all_lists_empty(txg_list_t *tl)
878 mutex_enter(&tl->tl_lock);
879 for (int i = 0; i < TXG_SIZE; i++) {
880 if (!txg_list_empty_impl(tl, i)) {
881 mutex_exit(&tl->tl_lock);
885 mutex_exit(&tl->tl_lock);
890 * Add an entry to the list (unless it's already on the list).
891 * Returns B_TRUE if it was actually added.
894 txg_list_add(txg_list_t *tl, void *p, uint64_t txg)
896 int t = txg & TXG_MASK;
897 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
900 TXG_VERIFY(tl->tl_spa, txg);
901 mutex_enter(&tl->tl_lock);
902 add = (tn->tn_member[t] == 0);
904 tn->tn_member[t] = 1;
905 tn->tn_next[t] = tl->tl_head[t];
908 mutex_exit(&tl->tl_lock);
914 * Add an entry to the end of the list, unless it's already on the list.
915 * (walks list to find end)
916 * Returns B_TRUE if it was actually added.
919 txg_list_add_tail(txg_list_t *tl, void *p, uint64_t txg)
921 int t = txg & TXG_MASK;
922 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
925 TXG_VERIFY(tl->tl_spa, txg);
926 mutex_enter(&tl->tl_lock);
927 add = (tn->tn_member[t] == 0);
931 for (tp = &tl->tl_head[t]; *tp != NULL; tp = &(*tp)->tn_next[t])
934 tn->tn_member[t] = 1;
935 tn->tn_next[t] = NULL;
938 mutex_exit(&tl->tl_lock);
944 * Remove the head of the list and return it.
947 txg_list_remove(txg_list_t *tl, uint64_t txg)
949 int t = txg & TXG_MASK;
953 TXG_VERIFY(tl->tl_spa, txg);
954 mutex_enter(&tl->tl_lock);
955 if ((tn = tl->tl_head[t]) != NULL) {
956 ASSERT(tn->tn_member[t]);
957 ASSERT(tn->tn_next[t] == NULL || tn->tn_next[t]->tn_member[t]);
958 p = (char *)tn - tl->tl_offset;
959 tl->tl_head[t] = tn->tn_next[t];
960 tn->tn_next[t] = NULL;
961 tn->tn_member[t] = 0;
963 mutex_exit(&tl->tl_lock);
969 * Remove a specific item from the list and return it.
972 txg_list_remove_this(txg_list_t *tl, void *p, uint64_t txg)
974 int t = txg & TXG_MASK;
975 txg_node_t *tn, **tp;
977 TXG_VERIFY(tl->tl_spa, txg);
978 mutex_enter(&tl->tl_lock);
980 for (tp = &tl->tl_head[t]; (tn = *tp) != NULL; tp = &tn->tn_next[t]) {
981 if ((char *)tn - tl->tl_offset == p) {
982 *tp = tn->tn_next[t];
983 tn->tn_next[t] = NULL;
984 tn->tn_member[t] = 0;
985 mutex_exit(&tl->tl_lock);
990 mutex_exit(&tl->tl_lock);
996 txg_list_member(txg_list_t *tl, void *p, uint64_t txg)
998 int t = txg & TXG_MASK;
999 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
1001 TXG_VERIFY(tl->tl_spa, txg);
1002 return (tn->tn_member[t] != 0);
1009 txg_list_head(txg_list_t *tl, uint64_t txg)
1011 int t = txg & TXG_MASK;
1014 mutex_enter(&tl->tl_lock);
1015 tn = tl->tl_head[t];
1016 mutex_exit(&tl->tl_lock);
1018 TXG_VERIFY(tl->tl_spa, txg);
1019 return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);
1023 txg_list_next(txg_list_t *tl, void *p, uint64_t txg)
1025 int t = txg & TXG_MASK;
1026 txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
1028 TXG_VERIFY(tl->tl_spa, txg);
1030 mutex_enter(&tl->tl_lock);
1031 tn = tn->tn_next[t];
1032 mutex_exit(&tl->tl_lock);
1034 return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);
1037 EXPORT_SYMBOL(txg_init);
1038 EXPORT_SYMBOL(txg_fini);
1039 EXPORT_SYMBOL(txg_sync_start);
1040 EXPORT_SYMBOL(txg_sync_stop);
1041 EXPORT_SYMBOL(txg_hold_open);
1042 EXPORT_SYMBOL(txg_rele_to_quiesce);
1043 EXPORT_SYMBOL(txg_rele_to_sync);
1044 EXPORT_SYMBOL(txg_register_callbacks);
1045 EXPORT_SYMBOL(txg_delay);
1046 EXPORT_SYMBOL(txg_wait_synced);
1047 EXPORT_SYMBOL(txg_wait_open);
1048 EXPORT_SYMBOL(txg_wait_callbacks);
1049 EXPORT_SYMBOL(txg_stalled);
1050 EXPORT_SYMBOL(txg_sync_waiting);
1053 ZFS_MODULE_PARAM(zfs_txg, zfs_txg_, timeout, INT, ZMOD_RW,
1054 "Max seconds worth of delta per txg");