2 * Copyright (c) 2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * This module implements the hammer2 helper thread API, including
36 * the frontend/backend XOP API.
41 * Initialize the suspplied thread structure, starting the specified
45 hammer2_thr_create(hammer2_thread_t *thr, hammer2_pfs_t *pmp,
46 const char *id, int clindex, int repidx,
47 void (*func)(void *arg))
49 lockinit(&thr->lk, "h2thr", 0, 0);
51 thr->xopq = &pmp->xopq[clindex];
52 thr->clindex = clindex;
55 lwkt_create(func, thr, &thr->td, NULL, 0, -1,
56 "%s-%s.%02d", id, pmp->pfs_names[clindex], repidx);
58 lwkt_create(func, thr, &thr->td, NULL, 0, -1,
59 "%s-%s", id, pmp->pfs_names[clindex]);
64 * Terminate a thread. This function will silently return if the thread
65 * was never initialized or has already been deleted.
67 * This is accomplished by setting the STOP flag and waiting for the td
68 * structure to become NULL.
71 hammer2_thr_delete(hammer2_thread_t *thr)
75 lockmgr(&thr->lk, LK_EXCLUSIVE);
76 atomic_set_int(&thr->flags, HAMMER2_THREAD_STOP);
79 lksleep(thr, &thr->lk, 0, "h2thr", hz);
81 lockmgr(&thr->lk, LK_RELEASE);
88 * Asynchronous remaster request. Ask the synchronization thread to
89 * start over soon (as if it were frozen and unfrozen, but without waiting).
90 * The thread always recalculates mastership relationships when restarting.
93 hammer2_thr_remaster(hammer2_thread_t *thr)
97 lockmgr(&thr->lk, LK_EXCLUSIVE);
98 atomic_set_int(&thr->flags, HAMMER2_THREAD_REMASTER);
100 lockmgr(&thr->lk, LK_RELEASE);
104 hammer2_thr_freeze_async(hammer2_thread_t *thr)
106 atomic_set_int(&thr->flags, HAMMER2_THREAD_FREEZE);
111 hammer2_thr_freeze(hammer2_thread_t *thr)
115 lockmgr(&thr->lk, LK_EXCLUSIVE);
116 atomic_set_int(&thr->flags, HAMMER2_THREAD_FREEZE);
118 while ((thr->flags & HAMMER2_THREAD_FROZEN) == 0) {
119 lksleep(thr, &thr->lk, 0, "h2frz", hz);
121 lockmgr(&thr->lk, LK_RELEASE);
125 hammer2_thr_unfreeze(hammer2_thread_t *thr)
129 lockmgr(&thr->lk, LK_EXCLUSIVE);
130 atomic_clear_int(&thr->flags, HAMMER2_THREAD_FROZEN);
132 lockmgr(&thr->lk, LK_RELEASE);
136 hammer2_thr_break(hammer2_thread_t *thr)
138 if (thr->flags & (HAMMER2_THREAD_STOP |
139 HAMMER2_THREAD_REMASTER |
140 HAMMER2_THREAD_FREEZE)) {
146 /****************************************************************************
148 ****************************************************************************/
151 hammer2_xop_group_init(hammer2_pfs_t *pmp, hammer2_xop_group_t *xgrp)
153 /* no extra fields in structure at the moment */
157 * Allocate a XOP request.
159 * Once allocated a XOP request can be started, collected, and retired,
160 * and can be retired early if desired.
162 * NOTE: Fifo indices might not be zero but ri == wi on objcache_get().
165 hammer2_xop_alloc(hammer2_inode_t *ip, int flags)
169 xop = objcache_get(cache_xops, M_WAITOK);
170 KKASSERT(xop->head.cluster.array[0].chain == NULL);
172 xop->head.func = NULL;
175 xop->head.collect_key = 0;
176 if (flags & HAMMER2_XOP_MODIFYING)
177 xop->head.mtid = hammer2_trans_sub(ip->pmp);
181 xop->head.cluster.nchains = ip->cluster.nchains;
182 xop->head.cluster.pmp = ip->pmp;
183 xop->head.cluster.flags = HAMMER2_CLUSTER_LOCKED;
186 * run_mask - Active thread (or frontend) associated with XOP
188 xop->head.run_mask = HAMMER2_XOPMASK_VOP;
190 hammer2_inode_ref(ip);
196 hammer2_xop_setname(hammer2_xop_head_t *xop, const char *name, size_t name_len)
198 xop->name1 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO);
199 xop->name1_len = name_len;
200 bcopy(name, xop->name1, name_len);
204 hammer2_xop_setname2(hammer2_xop_head_t *xop, const char *name, size_t name_len)
206 xop->name2 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO);
207 xop->name2_len = name_len;
208 bcopy(name, xop->name2, name_len);
213 hammer2_xop_setip2(hammer2_xop_head_t *xop, hammer2_inode_t *ip2)
216 hammer2_inode_ref(ip2);
220 hammer2_xop_setip3(hammer2_xop_head_t *xop, hammer2_inode_t *ip3)
223 hammer2_inode_ref(ip3);
227 hammer2_xop_reinit(hammer2_xop_head_t *xop)
231 xop->collect_key = 0;
232 xop->run_mask = HAMMER2_XOPMASK_VOP;
236 * A mounted PFS needs Xops threads to support frontend operations.
239 hammer2_xop_helper_create(hammer2_pfs_t *pmp)
244 lockmgr(&pmp->lock, LK_EXCLUSIVE);
245 pmp->has_xop_threads = 1;
247 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) {
248 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
249 if (pmp->xop_groups[j].thrs[i].td)
251 hammer2_thr_create(&pmp->xop_groups[j].thrs[i], pmp,
253 hammer2_primary_xops_thread);
256 lockmgr(&pmp->lock, LK_RELEASE);
260 hammer2_xop_helper_cleanup(hammer2_pfs_t *pmp)
265 for (i = 0; i < pmp->pfs_nmasters; ++i) {
266 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
267 if (pmp->xop_groups[j].thrs[i].td)
268 hammer2_thr_delete(&pmp->xop_groups[j].thrs[i]);
274 * Start a XOP request, queueing it to all nodes in the cluster to
275 * execute the cluster op.
277 * XXX optimize single-target case.
280 hammer2_xop_start_except(hammer2_xop_head_t *xop, hammer2_xop_func_t func,
284 hammer2_xop_group_t *xgrp;
285 hammer2_thread_t *thr;
295 if (pmp->has_xop_threads == 0)
296 hammer2_xop_helper_create(pmp);
299 g = pmp->xop_iterator++;
300 g = g & HAMMER2_XOPGROUPS_MASK;
301 xgrp = &pmp->xop_groups[g];
307 * The XOP sequencer is based on ip1, ip2, and ip3. Because ops can
308 * finish early and unlock the related inodes, some targets may get
309 * behind. The sequencer ensures that ops on the same inode execute
312 * The instant xop is queued another thread can pick it off. In the
313 * case of asynchronous ops, another thread might even finish and
316 hammer2_spin_ex(&pmp->xop_spin);
317 nchains = xop->ip1->cluster.nchains;
318 for (i = 0; i < nchains; ++i) {
320 atomic_set_int(&xop->run_mask, 1U << i);
321 atomic_set_int(&xop->chk_mask, 1U << i);
322 TAILQ_INSERT_TAIL(&pmp->xopq[i], xop, collect[i].entry);
325 hammer2_spin_unex(&pmp->xop_spin);
326 /* xop can become invalid at this point */
329 * Try to wakeup just one xop thread for each cluster node.
331 for (i = 0; i < nchains; ++i) {
333 wakeup_one(&pmp->xopq[i]);
338 hammer2_xop_start(hammer2_xop_head_t *xop, hammer2_xop_func_t func)
340 hammer2_xop_start_except(xop, func, -1);
344 * Retire a XOP. Used by both the VOP frontend and by the XOP backend.
347 hammer2_xop_retire(hammer2_xop_head_t *xop, uint32_t mask)
349 hammer2_chain_t *chain;
353 * Remove the frontend collector or remove a backend feeder.
354 * When removing the frontend we must wakeup any backend feeders
355 * who are waiting for FIFO space.
357 * XXX optimize wakeup.
359 KKASSERT(xop->run_mask & mask);
360 if (atomic_fetchadd_int(&xop->run_mask, -mask) != mask) {
361 if (mask == HAMMER2_XOPMASK_VOP)
367 * All collectors are gone, we can cleanup and dispose of the XOP.
368 * Note that this can wind up being a frontend OR a backend.
369 * Pending chains are locked shared and not owned by any thread.
371 * Cleanup the collection cluster.
373 for (i = 0; i < xop->cluster.nchains; ++i) {
374 xop->cluster.array[i].flags = 0;
375 chain = xop->cluster.array[i].chain;
377 xop->cluster.array[i].chain = NULL;
378 hammer2_chain_pull_shared_lock(chain);
379 hammer2_chain_unlock(chain);
380 hammer2_chain_drop(chain);
385 * Cleanup the fifos, use check_counter to optimize the loop.
387 mask = xop->chk_mask;
388 for (i = 0; mask && i < HAMMER2_MAXCLUSTER; ++i) {
389 hammer2_xop_fifo_t *fifo = &xop->collect[i];
390 while (fifo->ri != fifo->wi) {
392 chain = fifo->array[fifo->ri & HAMMER2_XOPFIFO_MASK];
394 hammer2_chain_pull_shared_lock(chain);
395 hammer2_chain_unlock(chain);
396 hammer2_chain_drop(chain);
399 if (fifo->wi - fifo->ri < HAMMER2_XOPFIFO / 2)
400 wakeup(xop); /* XXX optimize */
406 * The inode is only held at this point, simply drop it.
409 hammer2_inode_drop(xop->ip1);
413 hammer2_inode_drop(xop->ip2);
417 hammer2_inode_drop(xop->ip3);
421 kfree(xop->name1, M_HAMMER2);
426 kfree(xop->name2, M_HAMMER2);
431 objcache_put(cache_xops, xop);
435 * (Backend) Returns non-zero if the frontend is still attached.
438 hammer2_xop_active(hammer2_xop_head_t *xop)
440 if (xop->run_mask & HAMMER2_XOPMASK_VOP)
447 * (Backend) Feed chain data through the cluster validator and back to
448 * the frontend. Chains are fed from multiple nodes concurrently
449 * and pipelined via per-node FIFOs in the XOP.
451 * The chain must be locked shared. This function adds an additional
452 * shared-lock and ref to the chain for the frontend to collect. Caller
453 * must still unlock/drop the chain.
455 * No xop lock is needed because we are only manipulating fields under
456 * our direct control.
458 * Returns 0 on success and a hammer error code if sync is permanently
459 * lost. The caller retains a ref on the chain but by convention
460 * the lock is typically inherited by the xop (caller loses lock).
462 * Returns non-zero on error. In this situation the caller retains a
463 * ref on the chain but loses the lock (we unlock here).
465 * WARNING! The chain is moving between two different threads, it must
466 * be locked SHARED to retain its data mapping, not exclusive.
467 * When multiple operations are in progress at once, chains fed
468 * back to the frontend for collection can wind up being locked
469 * in different orders, only a shared lock can prevent a deadlock.
471 * Exclusive locks may only be used by a XOP backend node thread
472 * temporarily, with no direct or indirect dependencies (aka
473 * blocking/waiting) on other nodes.
476 hammer2_xop_feed(hammer2_xop_head_t *xop, hammer2_chain_t *chain,
477 int clindex, int error)
479 hammer2_xop_fifo_t *fifo;
482 * Multi-threaded entry into the XOP collector. We own the
483 * fifo->wi for our clindex.
485 fifo = &xop->collect[clindex];
487 while (fifo->ri == fifo->wi - HAMMER2_XOPFIFO) {
488 tsleep_interlock(xop, 0);
489 if (hammer2_xop_active(xop) == 0) {
493 if (fifo->ri == fifo->wi - HAMMER2_XOPFIFO) {
494 tsleep(xop, PINTERLOCKED, "h2feed", hz*60);
498 hammer2_chain_ref(chain);
499 hammer2_chain_push_shared_lock(chain);
501 if (error == 0 && chain)
502 error = chain->error;
503 fifo->errors[fifo->wi & HAMMER2_XOPFIFO_MASK] = error;
504 fifo->array[fifo->wi & HAMMER2_XOPFIFO_MASK] = chain;
507 atomic_add_int(&xop->check_counter, 1);
508 wakeup(&xop->check_counter); /* XXX optimize */
512 * Cleanup. If an error occurred we eat the lock. If no error
513 * occurred the fifo inherits the lock and gains an additional ref.
515 * The caller's ref remains in both cases.
522 * (Frontend) collect a response from a running cluster op.
524 * Responses are fed from all appropriate nodes concurrently
525 * and collected into a cohesive response >= collect_key.
527 * The collector will return the instant quorum or other requirements
528 * are met, even if some nodes get behind or become non-responsive.
530 * HAMMER2_XOP_COLLECT_NOWAIT - Used to 'poll' a completed collection,
531 * usually called synchronously from the
532 * node XOPs for the strategy code to
533 * fake the frontend collection and complete
534 * the BIO as soon as possible.
536 * HAMMER2_XOP_SYNCHRONIZER - Reqeuest synchronization with a particular
537 * cluster index, prevents looping when that
538 * index is out of sync so caller can act on
539 * the out of sync element. ESRCH and EDEADLK
540 * can be returned if this flag is specified.
542 * Returns 0 on success plus a filled out xop->cluster structure.
543 * Return ENOENT on normal termination.
544 * Otherwise return an error.
547 hammer2_xop_collect(hammer2_xop_head_t *xop, int flags)
549 hammer2_xop_fifo_t *fifo;
550 hammer2_chain_t *chain;
554 int adv; /* advance the element */
556 uint32_t check_counter;
560 * First loop tries to advance pieces of the cluster which
563 lokey = HAMMER2_KEY_MAX;
564 keynull = HAMMER2_CHECK_NULL;
565 check_counter = xop->check_counter;
568 for (i = 0; i < xop->cluster.nchains; ++i) {
569 chain = xop->cluster.array[i].chain;
572 } else if (chain->bref.key < xop->collect_key) {
575 keynull &= ~HAMMER2_CHECK_NULL;
576 if (lokey > chain->bref.key)
577 lokey = chain->bref.key;
584 * Advance element if possible, advanced element may be NULL.
587 hammer2_chain_unlock(chain);
588 hammer2_chain_drop(chain);
590 fifo = &xop->collect[i];
591 if (fifo->ri != fifo->wi) {
593 chain = fifo->array[fifo->ri & HAMMER2_XOPFIFO_MASK];
594 error = fifo->errors[fifo->ri & HAMMER2_XOPFIFO_MASK];
596 xop->cluster.array[i].chain = chain;
597 xop->cluster.array[i].error = error;
600 xop->cluster.array[i].flags |=
603 if (fifo->wi - fifo->ri < HAMMER2_XOPFIFO / 2)
604 wakeup(xop); /* XXX optimize */
605 --i; /* loop on same index */
608 * Retain CITEM_NULL flag. If set just repeat EOF.
609 * If not, the NULL,0 combination indicates an
610 * operation in-progress.
612 xop->cluster.array[i].chain = NULL;
613 /* retain any CITEM_NULL setting */
618 * Determine whether the lowest collected key meets clustering
619 * requirements. Returns:
621 * 0 - key valid, cluster can be returned.
623 * ENOENT - normal end of scan, return ENOENT.
625 * ESRCH - sufficient elements collected, quorum agreement
626 * that lokey is not a valid element and should be
629 * EDEADLK - sufficient elements collected, no quorum agreement
630 * (and no agreement possible). In this situation a
631 * repair is needed, for now we loop.
633 * EINPROGRESS - insufficient elements collected to resolve, wait
634 * for event and loop.
636 if ((flags & HAMMER2_XOP_COLLECT_WAITALL) &&
637 xop->run_mask != HAMMER2_XOPMASK_VOP) {
640 error = hammer2_cluster_check(&xop->cluster, lokey, keynull);
642 if (error == EINPROGRESS) {
643 if (xop->check_counter == check_counter) {
644 if (flags & HAMMER2_XOP_COLLECT_NOWAIT)
646 tsleep_interlock(&xop->check_counter, 0);
648 if (xop->check_counter == check_counter) {
649 tsleep(&xop->check_counter, PINTERLOCKED,
655 if (error == ESRCH) {
656 if (lokey != HAMMER2_KEY_MAX) {
657 xop->collect_key = lokey + 1;
662 if (error == EDEADLK) {
663 kprintf("hammer2: no quorum possible lokey %016jx\n",
665 if (lokey != HAMMER2_KEY_MAX) {
666 xop->collect_key = lokey + 1;
671 if (lokey == HAMMER2_KEY_MAX)
672 xop->collect_key = lokey;
674 xop->collect_key = lokey + 1;
680 * N x M processing threads are available to handle XOPs, N per cluster
681 * index x M cluster nodes. All the threads for any given cluster index
682 * share and pull from the same xopq.
684 * Locate and return the next runnable xop, or NULL if no xops are
685 * present or none of the xops are currently runnable (for various reasons).
686 * The xop is left on the queue and serves to block other dependent xops
689 * Dependent xops will not be returned.
691 * Sets HAMMER2_XOP_FIFO_RUN on the returned xop or returns NULL.
693 * NOTE! Xops run concurrently for each cluster index.
695 #define XOP_HASH_SIZE 16
696 #define XOP_HASH_MASK (XOP_HASH_SIZE - 1)
700 xop_testhash(hammer2_thread_t *thr, hammer2_inode_t *ip, uint32_t *hash)
705 hv = (int)((uintptr_t)ip + (uintptr_t)thr) / sizeof(hammer2_inode_t);
706 mask = 1U << (hv & 31);
709 return ((int)(hash[hv & XOP_HASH_MASK] & mask));
714 xop_sethash(hammer2_thread_t *thr, hammer2_inode_t *ip, uint32_t *hash)
719 hv = (int)((uintptr_t)ip + (uintptr_t)thr) / sizeof(hammer2_inode_t);
720 mask = 1U << (hv & 31);
723 hash[hv & XOP_HASH_MASK] |= mask;
728 hammer2_xop_next(hammer2_thread_t *thr)
730 hammer2_pfs_t *pmp = thr->pmp;
731 int clindex = thr->clindex;
732 uint32_t hash[XOP_HASH_SIZE] = { 0 };
733 hammer2_xop_head_t *xop;
735 hammer2_spin_ex(&pmp->xop_spin);
736 TAILQ_FOREACH(xop, thr->xopq, collect[clindex].entry) {
740 if (xop_testhash(thr, xop->ip1, hash) ||
741 (xop->ip2 && xop_testhash(thr, xop->ip2, hash)) ||
742 (xop->ip3 && xop_testhash(thr, xop->ip3, hash))) {
745 xop_sethash(thr, xop->ip1, hash);
747 xop_sethash(thr, xop->ip2, hash);
749 xop_sethash(thr, xop->ip3, hash);
752 * Check already running
754 if (xop->collect[clindex].flags & HAMMER2_XOP_FIFO_RUN)
758 * Found a good one, return it.
760 atomic_set_int(&xop->collect[clindex].flags,
761 HAMMER2_XOP_FIFO_RUN);
764 hammer2_spin_unex(&pmp->xop_spin);
770 * Remove the completed XOP from the queue, clear HAMMER2_XOP_FIFO_RUN.
772 * NOTE! Xops run concurrently for each cluster index.
776 hammer2_xop_dequeue(hammer2_thread_t *thr, hammer2_xop_head_t *xop)
778 hammer2_pfs_t *pmp = thr->pmp;
779 int clindex = thr->clindex;
781 hammer2_spin_ex(&pmp->xop_spin);
782 TAILQ_REMOVE(thr->xopq, xop, collect[clindex].entry);
783 atomic_clear_int(&xop->collect[clindex].flags,
784 HAMMER2_XOP_FIFO_RUN);
785 hammer2_spin_unex(&pmp->xop_spin);
789 * Primary management thread for xops support. Each node has several such
790 * threads which replicate front-end operations on cluster nodes.
792 * XOPS thread node operations, allowing the function to focus on a single
793 * node in the cluster after validating the operation with the cluster.
794 * This is primarily what prevents dead or stalled nodes from stalling
798 hammer2_primary_xops_thread(void *arg)
800 hammer2_thread_t *thr = arg;
802 hammer2_xop_head_t *xop;
804 hammer2_xop_func_t last_func = NULL;
807 /*xgrp = &pmp->xop_groups[thr->repidx]; not needed */
808 mask = 1U << thr->clindex;
810 lockmgr(&thr->lk, LK_EXCLUSIVE);
811 while ((thr->flags & HAMMER2_THREAD_STOP) == 0) {
813 * Handle freeze request
815 if (thr->flags & HAMMER2_THREAD_FREEZE) {
816 atomic_set_int(&thr->flags, HAMMER2_THREAD_FROZEN);
817 atomic_clear_int(&thr->flags, HAMMER2_THREAD_FREEZE);
821 * Force idle if frozen until unfrozen or stopped.
823 if (thr->flags & HAMMER2_THREAD_FROZEN) {
824 lksleep(thr->xopq, &thr->lk, 0, "frozen", 0);
829 * Reset state on REMASTER request
831 if (thr->flags & HAMMER2_THREAD_REMASTER) {
832 atomic_clear_int(&thr->flags, HAMMER2_THREAD_REMASTER);
837 * Process requests. Each request can be multi-queued.
839 * If we get behind and the frontend VOP is no longer active,
840 * we retire the request without processing it. The callback
841 * may also abort processing if the frontend VOP becomes
844 tsleep_interlock(thr->xopq, 0);
845 while ((xop = hammer2_xop_next(thr)) != NULL) {
846 if (hammer2_xop_active(xop)) {
847 lockmgr(&thr->lk, LK_RELEASE);
848 last_func = xop->func;
849 xop->func((hammer2_xop_t *)xop, thr->clindex);
850 hammer2_xop_dequeue(thr, xop);
851 hammer2_xop_retire(xop, mask);
852 lockmgr(&thr->lk, LK_EXCLUSIVE);
854 last_func = xop->func;
855 hammer2_xop_feed(xop, NULL, thr->clindex,
857 hammer2_xop_dequeue(thr, xop);
858 hammer2_xop_retire(xop, mask);
863 * Wait for event. The xopq is not interlocked by thr->lk,
864 * use the tsleep interlock sequence.
866 * For robustness poll on a 30-second interval, but nominally
867 * expect to be woken up.
869 lksleep(thr->xopq, &thr->lk, PINTERLOCKED, "h2idle", hz*30);
874 * Cleanup / termination
876 while ((xop = TAILQ_FIRST(&thr->xopq)) != NULL) {
877 kprintf("hammer2_thread: aborting xop %p\n", xop->func);
878 TAILQ_REMOVE(&thr->xopq, xop,
879 collect[thr->clindex].entry);
880 hammer2_xop_retire(xop, mask);
886 lockmgr(&thr->lk, LK_RELEASE);
887 /* thr structure can go invalid after this point */