2 * Copyright (c) 2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * This module implements the hammer2 helper thread API, including
36 * the frontend/backend XOP API.
41 * Set flags and wakeup any waiters.
43 * WARNING! During teardown (thr) can disappear the instant our cmpset
47 hammer2_thr_signal(hammer2_thread_t *thr, uint32_t flags)
55 nflags = (oflags | flags) & ~HAMMER2_THREAD_WAITING;
57 if (oflags & HAMMER2_THREAD_WAITING) {
58 if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
63 if (atomic_cmpset_int(&thr->flags, oflags, nflags))
70 * Set and clear flags and wakeup any waiters.
72 * WARNING! During teardown (thr) can disappear the instant our cmpset
76 hammer2_thr_signal2(hammer2_thread_t *thr, uint32_t posflags, uint32_t negflags)
84 nflags = (oflags | posflags) &
85 ~(negflags | HAMMER2_THREAD_WAITING);
86 if (oflags & HAMMER2_THREAD_WAITING) {
87 if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
92 if (atomic_cmpset_int(&thr->flags, oflags, nflags))
99 * Wait until all the bits in flags are set.
101 * WARNING! During teardown (thr) can disappear the instant our cmpset
105 hammer2_thr_wait(hammer2_thread_t *thr, uint32_t flags)
113 if ((oflags & flags) == flags)
115 nflags = oflags | HAMMER2_THREAD_WAITING;
116 tsleep_interlock(&thr->flags, 0);
117 if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
118 tsleep(&thr->flags, PINTERLOCKED, "h2twait", hz*60);
124 * Wait until any of the bits in flags are set, with timeout.
126 * WARNING! During teardown (thr) can disappear the instant our cmpset
130 hammer2_thr_wait_any(hammer2_thread_t *thr, uint32_t flags, int timo)
142 nflags = oflags | HAMMER2_THREAD_WAITING;
143 tsleep_interlock(&thr->flags, 0);
144 if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
145 error = tsleep(&thr->flags, PINTERLOCKED,
148 if (error == ETIMEDOUT) {
149 error = HAMMER2_ERROR_ETIMEDOUT;
157 * Wait until the bits in flags are clear.
159 * WARNING! During teardown (thr) can disappear the instant our cmpset
163 hammer2_thr_wait_neg(hammer2_thread_t *thr, uint32_t flags)
171 if ((oflags & flags) == 0)
173 nflags = oflags | HAMMER2_THREAD_WAITING;
174 tsleep_interlock(&thr->flags, 0);
175 if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
176 tsleep(&thr->flags, PINTERLOCKED, "h2twait", hz*60);
182 * Initialize the supplied thread structure, starting the specified
185 * NOTE: thr structure can be retained across mounts and unmounts for this
186 * pmp, so make sure the flags are in a sane state.
189 hammer2_thr_create(hammer2_thread_t *thr, hammer2_pfs_t *pmp,
191 const char *id, int clindex, int repidx,
192 void (*func)(void *arg))
194 thr->pmp = pmp; /* xop helpers */
195 thr->hmp = hmp; /* bulkfree */
196 thr->clindex = clindex;
197 thr->repidx = repidx;
198 TAILQ_INIT(&thr->xopq);
199 atomic_clear_int(&thr->flags, HAMMER2_THREAD_STOP |
200 HAMMER2_THREAD_STOPPED |
201 HAMMER2_THREAD_FREEZE |
202 HAMMER2_THREAD_FROZEN);
203 if (thr->scratch == NULL)
204 thr->scratch = kmalloc(MAXPHYS, M_HAMMER2, M_WAITOK | M_ZERO);
206 lwkt_create(func, thr, &thr->td, NULL, 0, repidx % ncpus,
207 "%s-%s.%02d", id, pmp->pfs_names[clindex], repidx);
209 lwkt_create(func, thr, &thr->td, NULL, 0, -1,
210 "%s-%s", id, pmp->pfs_names[clindex]);
212 lwkt_create(func, thr, &thr->td, NULL, 0, -1, "%s", id);
217 * Terminate a thread. This function will silently return if the thread
218 * was never initialized or has already been deleted.
220 * This is accomplished by setting the STOP flag and waiting for the td
221 * structure to become NULL.
224 hammer2_thr_delete(hammer2_thread_t *thr)
228 hammer2_thr_signal(thr, HAMMER2_THREAD_STOP);
229 hammer2_thr_wait(thr, HAMMER2_THREAD_STOPPED);
232 kfree(thr->scratch, M_HAMMER2);
235 KKASSERT(TAILQ_EMPTY(&thr->xopq));
239 * Asynchronous remaster request. Ask the synchronization thread to
240 * start over soon (as if it were frozen and unfrozen, but without waiting).
241 * The thread always recalculates mastership relationships when restarting.
244 hammer2_thr_remaster(hammer2_thread_t *thr)
248 hammer2_thr_signal(thr, HAMMER2_THREAD_REMASTER);
252 hammer2_thr_freeze_async(hammer2_thread_t *thr)
254 hammer2_thr_signal(thr, HAMMER2_THREAD_FREEZE);
258 hammer2_thr_freeze(hammer2_thread_t *thr)
262 hammer2_thr_signal(thr, HAMMER2_THREAD_FREEZE);
263 hammer2_thr_wait(thr, HAMMER2_THREAD_FROZEN);
267 hammer2_thr_unfreeze(hammer2_thread_t *thr)
271 hammer2_thr_signal(thr, HAMMER2_THREAD_UNFREEZE);
272 hammer2_thr_wait_neg(thr, HAMMER2_THREAD_FROZEN);
276 hammer2_thr_break(hammer2_thread_t *thr)
278 if (thr->flags & (HAMMER2_THREAD_STOP |
279 HAMMER2_THREAD_REMASTER |
280 HAMMER2_THREAD_FREEZE)) {
286 /****************************************************************************
288 ****************************************************************************/
291 hammer2_xop_group_init(hammer2_pfs_t *pmp, hammer2_xop_group_t *xgrp)
293 /* no extra fields in structure at the moment */
297 * Allocate a XOP request.
299 * Once allocated a XOP request can be started, collected, and retired,
300 * and can be retired early if desired.
302 * NOTE: Fifo indices might not be zero but ri == wi on objcache_get().
305 hammer2_xop_alloc(hammer2_inode_t *ip, int flags)
309 xop = objcache_get(cache_xops, M_WAITOK);
310 KKASSERT(xop->head.cluster.array[0].chain == NULL);
313 xop->head.func = NULL;
314 xop->head.flags = flags;
317 xop->head.collect_key = 0;
318 xop->head.check_counter = 0;
319 if (flags & HAMMER2_XOP_MODIFYING)
320 xop->head.mtid = hammer2_trans_sub(ip->pmp);
324 xop->head.cluster.nchains = ip->cluster.nchains;
325 xop->head.cluster.pmp = ip->pmp;
326 xop->head.cluster.flags = HAMMER2_CLUSTER_LOCKED;
329 * run_mask - Active thread (or frontend) associated with XOP
331 xop->head.run_mask = HAMMER2_XOPMASK_VOP;
333 hammer2_inode_ref(ip);
339 hammer2_xop_setname(hammer2_xop_head_t *xop, const char *name, size_t name_len)
341 xop->name1 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO);
342 xop->name1_len = name_len;
343 bcopy(name, xop->name1, name_len);
347 hammer2_xop_setname2(hammer2_xop_head_t *xop, const char *name, size_t name_len)
349 xop->name2 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO);
350 xop->name2_len = name_len;
351 bcopy(name, xop->name2, name_len);
355 hammer2_xop_setname_inum(hammer2_xop_head_t *xop, hammer2_key_t inum)
357 const size_t name_len = 18;
359 xop->name1 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO);
360 xop->name1_len = name_len;
361 ksnprintf(xop->name1, name_len + 1, "0x%016jx", (intmax_t)inum);
368 hammer2_xop_setip2(hammer2_xop_head_t *xop, hammer2_inode_t *ip2)
371 hammer2_inode_ref(ip2);
375 hammer2_xop_setip3(hammer2_xop_head_t *xop, hammer2_inode_t *ip3)
378 hammer2_inode_ref(ip3);
382 hammer2_xop_reinit(hammer2_xop_head_t *xop)
386 xop->collect_key = 0;
387 xop->run_mask = HAMMER2_XOPMASK_VOP;
391 * A mounted PFS needs Xops threads to support frontend operations.
394 hammer2_xop_helper_create(hammer2_pfs_t *pmp)
399 lockmgr(&pmp->lock, LK_EXCLUSIVE);
400 pmp->has_xop_threads = 1;
402 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) {
403 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
404 if (pmp->xop_groups[j].thrs[i].td)
406 hammer2_thr_create(&pmp->xop_groups[j].thrs[i],
409 hammer2_primary_xops_thread);
412 lockmgr(&pmp->lock, LK_RELEASE);
416 hammer2_xop_helper_cleanup(hammer2_pfs_t *pmp)
421 for (i = 0; i < pmp->pfs_nmasters; ++i) {
422 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
423 if (pmp->xop_groups[j].thrs[i].td)
424 hammer2_thr_delete(&pmp->xop_groups[j].thrs[i]);
427 pmp->has_xop_threads = 0;
431 * Start a XOP request, queueing it to all nodes in the cluster to
432 * execute the cluster op.
434 * XXX optimize single-target case.
437 hammer2_xop_start_except(hammer2_xop_head_t *xop, hammer2_xop_func_t func,
440 hammer2_inode_t *ip1;
442 hammer2_thread_t *thr;
449 if (pmp->has_xop_threads == 0)
450 hammer2_xop_helper_create(pmp);
453 * The intent of the XOP sequencer is to ensure that ops on the same
454 * inode execute in the same order. This is necessary when issuing
455 * modifying operations to multiple targets because some targets might
456 * get behind and the frontend is allowed to complete the moment a
457 * quorum of targets succeed.
459 * Strategy operations must be segregated from non-strategy operations
460 * to avoid a deadlock. For example, if a vfsync and a bread/bwrite
461 * were queued to the same worker thread, the locked buffer in the
462 * strategy operation can deadlock the vfsync's buffer list scan.
464 * TODO - RENAME fails here because it is potentially modifying
465 * three different inodes.
467 if (xop->flags & HAMMER2_XOP_STRATEGY) {
468 hammer2_xop_strategy_t *xopst;
470 xopst = &((hammer2_xop_t *)xop)->xop_strategy;
471 ng = (int)(hammer2_icrc32(&xop->ip1, sizeof(xop->ip1)) ^
472 hammer2_icrc32(&xopst->lbase, sizeof(xopst->lbase)));
473 ng = ng & (HAMMER2_XOPGROUPS_MASK >> 1);
474 ng += HAMMER2_XOPGROUPS / 2;
476 ng = (int)(hammer2_icrc32(&xop->ip1, sizeof(xop->ip1)));
477 ng = ng & (HAMMER2_XOPGROUPS_MASK >> 1);
482 * The instant xop is queued another thread can pick it off. In the
483 * case of asynchronous ops, another thread might even finish and
486 hammer2_spin_ex(&pmp->xop_spin);
487 nchains = ip1->cluster.nchains;
488 for (i = 0; i < nchains; ++i) {
490 * XXX ip1->cluster.array* not stable here. This temporary
491 * hack fixes basic issues in target XOPs which need to
492 * obtain a starting chain from the inode but does not
493 * address possible races against inode updates which
494 * might NULL-out a chain.
496 if (i != notidx && ip1->cluster.array[i].chain) {
497 thr = &pmp->xop_groups[ng].thrs[i];
498 atomic_set_int(&xop->run_mask, 1U << i);
499 atomic_set_int(&xop->chk_mask, 1U << i);
500 xop->collect[i].thr = thr;
501 TAILQ_INSERT_TAIL(&thr->xopq, xop, collect[i].entry);
504 hammer2_spin_unex(&pmp->xop_spin);
505 /* xop can become invalid at this point */
508 * Each thread has its own xopq
510 for (i = 0; i < nchains; ++i) {
512 thr = &pmp->xop_groups[ng].thrs[i];
513 hammer2_thr_signal(thr, HAMMER2_THREAD_XOPQ);
519 hammer2_xop_start(hammer2_xop_head_t *xop, hammer2_xop_func_t func)
521 hammer2_xop_start_except(xop, func, -1);
525 * Retire a XOP. Used by both the VOP frontend and by the XOP backend.
528 hammer2_xop_retire(hammer2_xop_head_t *xop, uint32_t mask)
530 hammer2_chain_t *chain;
535 * Remove the frontend collector or remove a backend feeder.
536 * When removing the frontend we must wakeup any backend feeders
537 * who are waiting for FIFO space.
539 * XXX optimize wakeup.
541 KKASSERT(xop->run_mask & mask);
542 nmask = atomic_fetchadd_int(&xop->run_mask, -mask);
543 if ((nmask & ~HAMMER2_XOPMASK_FIFOW) != mask) {
544 if (mask == HAMMER2_XOPMASK_VOP) {
545 if (nmask & HAMMER2_XOPMASK_FIFOW)
550 /* else nobody else left, we can ignore FIFOW */
553 * All collectors are gone, we can cleanup and dispose of the XOP.
554 * Note that this can wind up being a frontend OR a backend.
555 * Pending chains are locked shared and not owned by any thread.
557 * Cleanup the collection cluster.
559 for (i = 0; i < xop->cluster.nchains; ++i) {
560 xop->cluster.array[i].flags = 0;
561 chain = xop->cluster.array[i].chain;
563 xop->cluster.array[i].chain = NULL;
564 hammer2_chain_drop_unhold(chain);
569 * Cleanup the fifos, use check_counter to optimize the loop.
570 * Since we are the only entity left on this xop we don't have
571 * to worry about fifo flow control, and one lfence() will do the
575 mask = xop->chk_mask;
576 for (i = 0; mask && i < HAMMER2_MAXCLUSTER; ++i) {
577 hammer2_xop_fifo_t *fifo = &xop->collect[i];
578 while (fifo->ri != fifo->wi) {
579 chain = fifo->array[fifo->ri & HAMMER2_XOPFIFO_MASK];
581 hammer2_chain_drop_unhold(chain);
588 * The inode is only held at this point, simply drop it.
591 hammer2_inode_drop(xop->ip1);
595 hammer2_inode_drop(xop->ip2);
599 hammer2_inode_drop(xop->ip3);
603 kfree(xop->name1, M_HAMMER2);
608 kfree(xop->name2, M_HAMMER2);
613 objcache_put(cache_xops, xop);
617 * (Backend) Returns non-zero if the frontend is still attached.
620 hammer2_xop_active(hammer2_xop_head_t *xop)
622 if (xop->run_mask & HAMMER2_XOPMASK_VOP)
629 * (Backend) Feed chain data through the cluster validator and back to
630 * the frontend. Chains are fed from multiple nodes concurrently
631 * and pipelined via per-node FIFOs in the XOP.
633 * The chain must be locked (either shared or exclusive). The caller may
634 * unlock and drop the chain on return. This function will add an extra
635 * ref and hold the chain's data for the pass-back.
637 * No xop lock is needed because we are only manipulating fields under
638 * our direct control.
640 * Returns 0 on success and a hammer error code if sync is permanently
641 * lost. The caller retains a ref on the chain but by convention
642 * the lock is typically inherited by the xop (caller loses lock).
644 * Returns non-zero on error. In this situation the caller retains a
645 * ref on the chain but loses the lock (we unlock here).
648 hammer2_xop_feed(hammer2_xop_head_t *xop, hammer2_chain_t *chain,
649 int clindex, int error)
651 hammer2_xop_fifo_t *fifo;
655 * Early termination (typicaly of xop_readir)
657 if (hammer2_xop_active(xop) == 0) {
658 error = HAMMER2_ERROR_ABORTED;
663 * Multi-threaded entry into the XOP collector. We own the
664 * fifo->wi for our clindex.
666 fifo = &xop->collect[clindex];
668 if (fifo->ri == fifo->wi - HAMMER2_XOPFIFO)
670 while (fifo->ri == fifo->wi - HAMMER2_XOPFIFO) {
671 atomic_set_int(&fifo->flags, HAMMER2_XOP_FIFO_STALL);
672 mask = xop->run_mask;
673 if ((mask & HAMMER2_XOPMASK_VOP) == 0) {
674 error = HAMMER2_ERROR_ABORTED;
677 tsleep_interlock(xop, 0);
678 if (atomic_cmpset_int(&xop->run_mask, mask,
679 mask | HAMMER2_XOPMASK_FIFOW)) {
680 if (fifo->ri == fifo->wi - HAMMER2_XOPFIFO) {
681 tsleep(xop, PINTERLOCKED, "h2feed", hz*60);
686 atomic_clear_int(&fifo->flags, HAMMER2_XOP_FIFO_STALL);
688 hammer2_chain_ref_hold(chain);
689 if (error == 0 && chain)
690 error = chain->error;
691 fifo->errors[fifo->wi & HAMMER2_XOPFIFO_MASK] = error;
692 fifo->array[fifo->wi & HAMMER2_XOPFIFO_MASK] = chain;
695 if (atomic_fetchadd_int(&xop->check_counter, HAMMER2_XOP_CHKINC) &
696 HAMMER2_XOP_CHKWAIT) {
697 atomic_clear_int(&xop->check_counter, HAMMER2_XOP_CHKWAIT);
698 wakeup(&xop->check_counter);
703 * Cleanup. If an error occurred we eat the lock. If no error
704 * occurred the fifo inherits the lock and gains an additional ref.
706 * The caller's ref remains in both cases.
713 * (Frontend) collect a response from a running cluster op.
715 * Responses are fed from all appropriate nodes concurrently
716 * and collected into a cohesive response >= collect_key.
718 * The collector will return the instant quorum or other requirements
719 * are met, even if some nodes get behind or become non-responsive.
721 * HAMMER2_XOP_COLLECT_NOWAIT - Used to 'poll' a completed collection,
722 * usually called synchronously from the
723 * node XOPs for the strategy code to
724 * fake the frontend collection and complete
725 * the BIO as soon as possible.
727 * HAMMER2_XOP_SYNCHRONIZER - Reqeuest synchronization with a particular
728 * cluster index, prevents looping when that
729 * index is out of sync so caller can act on
730 * the out of sync element. ESRCH and EDEADLK
731 * can be returned if this flag is specified.
733 * Returns 0 on success plus a filled out xop->cluster structure.
734 * Return ENOENT on normal termination.
735 * Otherwise return an error.
738 hammer2_xop_collect(hammer2_xop_head_t *xop, int flags)
740 hammer2_xop_fifo_t *fifo;
741 hammer2_chain_t *chain;
745 int adv; /* advance the element */
747 uint32_t check_counter;
751 * First loop tries to advance pieces of the cluster which
754 lokey = HAMMER2_KEY_MAX;
755 keynull = HAMMER2_CHECK_NULL;
756 check_counter = xop->check_counter;
759 for (i = 0; i < xop->cluster.nchains; ++i) {
760 chain = xop->cluster.array[i].chain;
763 } else if (chain->bref.key < xop->collect_key) {
766 keynull &= ~HAMMER2_CHECK_NULL;
767 if (lokey > chain->bref.key)
768 lokey = chain->bref.key;
775 * Advance element if possible, advanced element may be NULL.
778 hammer2_chain_drop_unhold(chain);
780 fifo = &xop->collect[i];
781 if (fifo->ri != fifo->wi) {
783 chain = fifo->array[fifo->ri & HAMMER2_XOPFIFO_MASK];
784 error = fifo->errors[fifo->ri & HAMMER2_XOPFIFO_MASK];
786 xop->cluster.array[i].chain = chain;
787 xop->cluster.array[i].error = error;
790 xop->cluster.array[i].flags |=
793 if (fifo->wi - fifo->ri <= HAMMER2_XOPFIFO / 2) {
794 if (fifo->flags & HAMMER2_XOP_FIFO_STALL) {
795 atomic_clear_int(&fifo->flags,
796 HAMMER2_XOP_FIFO_STALL);
801 --i; /* loop on same index */
804 * Retain CITEM_NULL flag. If set just repeat EOF.
805 * If not, the NULL,0 combination indicates an
806 * operation in-progress.
808 xop->cluster.array[i].chain = NULL;
809 /* retain any CITEM_NULL setting */
814 * Determine whether the lowest collected key meets clustering
815 * requirements. Returns:
817 * 0 - key valid, cluster can be returned.
819 * ENOENT - normal end of scan, return ENOENT.
821 * ESRCH - sufficient elements collected, quorum agreement
822 * that lokey is not a valid element and should be
825 * EDEADLK - sufficient elements collected, no quorum agreement
826 * (and no agreement possible). In this situation a
827 * repair is needed, for now we loop.
829 * EINPROGRESS - insufficient elements collected to resolve, wait
830 * for event and loop.
832 if ((flags & HAMMER2_XOP_COLLECT_WAITALL) &&
833 xop->run_mask != HAMMER2_XOPMASK_VOP) {
834 error = HAMMER2_ERROR_EINPROGRESS;
836 error = hammer2_cluster_check(&xop->cluster, lokey, keynull);
838 if (error == HAMMER2_ERROR_EINPROGRESS) {
839 if ((flags & HAMMER2_XOP_COLLECT_NOWAIT) == 0)
840 tsleep_interlock(&xop->check_counter, 0);
841 if (atomic_cmpset_int(&xop->check_counter,
843 check_counter | HAMMER2_XOP_CHKWAIT)) {
844 if (flags & HAMMER2_XOP_COLLECT_NOWAIT)
846 tsleep(&xop->check_counter, PINTERLOCKED, "h2coll", hz*60);
850 if (error == HAMMER2_ERROR_ESRCH) {
851 if (lokey != HAMMER2_KEY_MAX) {
852 xop->collect_key = lokey + 1;
855 error = HAMMER2_ERROR_ENOENT;
857 if (error == HAMMER2_ERROR_EDEADLK) {
858 kprintf("hammer2: no quorum possible lokey %016jx\n",
860 if (lokey != HAMMER2_KEY_MAX) {
861 xop->collect_key = lokey + 1;
864 error = HAMMER2_ERROR_ENOENT;
866 if (lokey == HAMMER2_KEY_MAX)
867 xop->collect_key = lokey;
869 xop->collect_key = lokey + 1;
875 * N x M processing threads are available to handle XOPs, N per cluster
876 * index x M cluster nodes.
878 * Locate and return the next runnable xop, or NULL if no xops are
879 * present or none of the xops are currently runnable (for various reasons).
880 * The xop is left on the queue and serves to block other dependent xops
883 * Dependent xops will not be returned.
885 * Sets HAMMER2_XOP_FIFO_RUN on the returned xop or returns NULL.
887 * NOTE! Xops run concurrently for each cluster index.
889 #define XOP_HASH_SIZE 16
890 #define XOP_HASH_MASK (XOP_HASH_SIZE - 1)
894 xop_testhash(hammer2_thread_t *thr, hammer2_inode_t *ip, uint32_t *hash)
899 hv = (int)((uintptr_t)ip + (uintptr_t)thr) / sizeof(hammer2_inode_t);
900 mask = 1U << (hv & 31);
903 return ((int)(hash[hv & XOP_HASH_MASK] & mask));
908 xop_sethash(hammer2_thread_t *thr, hammer2_inode_t *ip, uint32_t *hash)
913 hv = (int)((uintptr_t)ip + (uintptr_t)thr) / sizeof(hammer2_inode_t);
914 mask = 1U << (hv & 31);
917 hash[hv & XOP_HASH_MASK] |= mask;
922 hammer2_xop_next(hammer2_thread_t *thr)
924 hammer2_pfs_t *pmp = thr->pmp;
925 int clindex = thr->clindex;
926 uint32_t hash[XOP_HASH_SIZE] = { 0 };
927 hammer2_xop_head_t *xop;
929 hammer2_spin_ex(&pmp->xop_spin);
930 TAILQ_FOREACH(xop, &thr->xopq, collect[clindex].entry) {
934 if (xop_testhash(thr, xop->ip1, hash) ||
935 (xop->ip2 && xop_testhash(thr, xop->ip2, hash)) ||
936 (xop->ip3 && xop_testhash(thr, xop->ip3, hash))) {
939 xop_sethash(thr, xop->ip1, hash);
941 xop_sethash(thr, xop->ip2, hash);
943 xop_sethash(thr, xop->ip3, hash);
946 * Check already running
948 if (xop->collect[clindex].flags & HAMMER2_XOP_FIFO_RUN)
952 * Found a good one, return it.
954 atomic_set_int(&xop->collect[clindex].flags,
955 HAMMER2_XOP_FIFO_RUN);
958 hammer2_spin_unex(&pmp->xop_spin);
964 * Remove the completed XOP from the queue, clear HAMMER2_XOP_FIFO_RUN.
966 * NOTE! Xops run concurrently for each cluster index.
970 hammer2_xop_dequeue(hammer2_thread_t *thr, hammer2_xop_head_t *xop)
972 hammer2_pfs_t *pmp = thr->pmp;
973 int clindex = thr->clindex;
975 hammer2_spin_ex(&pmp->xop_spin);
976 TAILQ_REMOVE(&thr->xopq, xop, collect[clindex].entry);
977 atomic_clear_int(&xop->collect[clindex].flags,
978 HAMMER2_XOP_FIFO_RUN);
979 hammer2_spin_unex(&pmp->xop_spin);
980 if (TAILQ_FIRST(&thr->xopq))
981 hammer2_thr_signal(thr, HAMMER2_THREAD_XOPQ);
985 * Primary management thread for xops support. Each node has several such
986 * threads which replicate front-end operations on cluster nodes.
988 * XOPS thread node operations, allowing the function to focus on a single
989 * node in the cluster after validating the operation with the cluster.
990 * This is primarily what prevents dead or stalled nodes from stalling
994 hammer2_primary_xops_thread(void *arg)
996 hammer2_thread_t *thr = arg;
998 hammer2_xop_head_t *xop;
1002 hammer2_xop_func_t last_func = NULL;
1005 /*xgrp = &pmp->xop_groups[thr->repidx]; not needed */
1006 mask = 1U << thr->clindex;
1012 * Handle stop request
1014 if (flags & HAMMER2_THREAD_STOP)
1018 * Handle freeze request
1020 if (flags & HAMMER2_THREAD_FREEZE) {
1021 hammer2_thr_signal2(thr, HAMMER2_THREAD_FROZEN,
1022 HAMMER2_THREAD_FREEZE);
1026 if (flags & HAMMER2_THREAD_UNFREEZE) {
1027 hammer2_thr_signal2(thr, 0,
1028 HAMMER2_THREAD_FROZEN |
1029 HAMMER2_THREAD_UNFREEZE);
1034 * Force idle if frozen until unfrozen or stopped.
1036 if (flags & HAMMER2_THREAD_FROZEN) {
1037 hammer2_thr_wait_any(thr,
1038 HAMMER2_THREAD_UNFREEZE |
1039 HAMMER2_THREAD_STOP,
1045 * Reset state on REMASTER request
1047 if (flags & HAMMER2_THREAD_REMASTER) {
1048 hammer2_thr_signal2(thr, 0, HAMMER2_THREAD_REMASTER);
1049 /* reset state here */
1054 * Process requests. Each request can be multi-queued.
1056 * If we get behind and the frontend VOP is no longer active,
1057 * we retire the request without processing it. The callback
1058 * may also abort processing if the frontend VOP becomes
1061 if (flags & HAMMER2_THREAD_XOPQ) {
1062 nflags = flags & ~HAMMER2_THREAD_XOPQ;
1063 if (!atomic_cmpset_int(&thr->flags, flags, nflags))
1068 while ((xop = hammer2_xop_next(thr)) != NULL) {
1069 if (hammer2_xop_active(xop)) {
1070 last_func = xop->func;
1071 xop->func(thr, (hammer2_xop_t *)xop);
1072 hammer2_xop_dequeue(thr, xop);
1073 hammer2_xop_retire(xop, mask);
1075 last_func = xop->func;
1076 hammer2_xop_feed(xop, NULL, thr->clindex,
1078 hammer2_xop_dequeue(thr, xop);
1079 hammer2_xop_retire(xop, mask);
1084 * Wait for event, interlock using THREAD_WAITING and
1087 * For robustness poll on a 30-second interval, but nominally
1088 * expect to be woken up.
1090 nflags = flags | HAMMER2_THREAD_WAITING;
1092 tsleep_interlock(&thr->flags, 0);
1093 if (atomic_cmpset_int(&thr->flags, flags, nflags)) {
1094 tsleep(&thr->flags, PINTERLOCKED, "h2idle", hz*30);
1100 * Cleanup / termination
1102 while ((xop = TAILQ_FIRST(&thr->xopq)) != NULL) {
1103 kprintf("hammer2_thread: aborting xop %p\n", xop->func);
1104 TAILQ_REMOVE(&thr->xopq, xop,
1105 collect[thr->clindex].entry);
1106 hammer2_xop_retire(xop, mask);
1110 hammer2_thr_signal(thr, HAMMER2_THREAD_STOPPED);
1111 /* thr structure can go invalid after this point */