2 * Copyright (c) 2015-2018 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * This module implements the hammer2 helper thread API, including
36 * the frontend/backend XOP API.
40 #define H2XOPDESCRIPTOR(label) \
41 hammer2_xop_desc_t hammer2_##label##_desc = { \
42 .storage_func = hammer2_xop_##label, \
46 H2XOPDESCRIPTOR(ipcluster);
47 H2XOPDESCRIPTOR(readdir);
48 H2XOPDESCRIPTOR(nresolve);
49 H2XOPDESCRIPTOR(unlink);
50 H2XOPDESCRIPTOR(nrename);
51 H2XOPDESCRIPTOR(scanlhc);
52 H2XOPDESCRIPTOR(scanall);
53 H2XOPDESCRIPTOR(lookup);
54 H2XOPDESCRIPTOR(delete);
55 H2XOPDESCRIPTOR(inode_mkdirent);
56 H2XOPDESCRIPTOR(inode_create);
57 H2XOPDESCRIPTOR(inode_create_det);
58 H2XOPDESCRIPTOR(inode_create_ins);
59 H2XOPDESCRIPTOR(inode_destroy);
60 H2XOPDESCRIPTOR(inode_chain_sync);
61 H2XOPDESCRIPTOR(inode_unlinkall);
62 H2XOPDESCRIPTOR(inode_connect);
63 H2XOPDESCRIPTOR(inode_flush);
64 H2XOPDESCRIPTOR(strategy_read);
65 H2XOPDESCRIPTOR(strategy_write);
68 * Set flags and wakeup any waiters.
70 * WARNING! During teardown (thr) can disappear the instant our cmpset
74 hammer2_thr_signal(hammer2_thread_t *thr, uint32_t flags)
82 nflags = (oflags | flags) & ~HAMMER2_THREAD_WAITING;
84 if (oflags & HAMMER2_THREAD_WAITING) {
85 if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
90 if (atomic_cmpset_int(&thr->flags, oflags, nflags))
97 * Set and clear flags and wakeup any waiters.
99 * WARNING! During teardown (thr) can disappear the instant our cmpset
103 hammer2_thr_signal2(hammer2_thread_t *thr, uint32_t posflags, uint32_t negflags)
111 nflags = (oflags | posflags) &
112 ~(negflags | HAMMER2_THREAD_WAITING);
113 if (oflags & HAMMER2_THREAD_WAITING) {
114 if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
119 if (atomic_cmpset_int(&thr->flags, oflags, nflags))
126 * Wait until all the bits in flags are set.
128 * WARNING! During teardown (thr) can disappear the instant our cmpset
132 hammer2_thr_wait(hammer2_thread_t *thr, uint32_t flags)
140 if ((oflags & flags) == flags)
142 nflags = oflags | HAMMER2_THREAD_WAITING;
143 tsleep_interlock(&thr->flags, 0);
144 if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
145 tsleep(&thr->flags, PINTERLOCKED, "h2twait", hz*60);
151 * Wait until any of the bits in flags are set, with timeout.
153 * WARNING! During teardown (thr) can disappear the instant our cmpset
157 hammer2_thr_wait_any(hammer2_thread_t *thr, uint32_t flags, int timo)
169 nflags = oflags | HAMMER2_THREAD_WAITING;
170 tsleep_interlock(&thr->flags, 0);
171 if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
172 error = tsleep(&thr->flags, PINTERLOCKED,
175 if (error == ETIMEDOUT) {
176 error = HAMMER2_ERROR_ETIMEDOUT;
184 * Wait until the bits in flags are clear.
186 * WARNING! During teardown (thr) can disappear the instant our cmpset
190 hammer2_thr_wait_neg(hammer2_thread_t *thr, uint32_t flags)
198 if ((oflags & flags) == 0)
200 nflags = oflags | HAMMER2_THREAD_WAITING;
201 tsleep_interlock(&thr->flags, 0);
202 if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
203 tsleep(&thr->flags, PINTERLOCKED, "h2twait", hz*60);
209 * Initialize the supplied thread structure, starting the specified
212 * NOTE: thr structure can be retained across mounts and unmounts for this
213 * pmp, so make sure the flags are in a sane state.
216 hammer2_thr_create(hammer2_thread_t *thr, hammer2_pfs_t *pmp,
218 const char *id, int clindex, int repidx,
219 void (*func)(void *arg))
221 thr->pmp = pmp; /* xop helpers */
222 thr->hmp = hmp; /* bulkfree */
223 thr->clindex = clindex;
224 thr->repidx = repidx;
225 TAILQ_INIT(&thr->xopq);
226 atomic_clear_int(&thr->flags, HAMMER2_THREAD_STOP |
227 HAMMER2_THREAD_STOPPED |
228 HAMMER2_THREAD_FREEZE |
229 HAMMER2_THREAD_FROZEN);
230 if (thr->scratch == NULL)
231 thr->scratch = kmalloc(MAXPHYS, M_HAMMER2, M_WAITOK | M_ZERO);
233 lwkt_create(func, thr, &thr->td, NULL, 0, repidx % ncpus,
234 "%s-%s.%02d", id, pmp->pfs_names[clindex], repidx);
236 lwkt_create(func, thr, &thr->td, NULL, 0, -1,
237 "%s-%s", id, pmp->pfs_names[clindex]);
239 lwkt_create(func, thr, &thr->td, NULL, 0, -1, "%s", id);
244 * Terminate a thread. This function will silently return if the thread
245 * was never initialized or has already been deleted.
247 * This is accomplished by setting the STOP flag and waiting for the td
248 * structure to become NULL.
251 hammer2_thr_delete(hammer2_thread_t *thr)
255 hammer2_thr_signal(thr, HAMMER2_THREAD_STOP);
256 hammer2_thr_wait(thr, HAMMER2_THREAD_STOPPED);
259 kfree(thr->scratch, M_HAMMER2);
262 KKASSERT(TAILQ_EMPTY(&thr->xopq));
266 * Asynchronous remaster request. Ask the synchronization thread to
267 * start over soon (as if it were frozen and unfrozen, but without waiting).
268 * The thread always recalculates mastership relationships when restarting.
271 hammer2_thr_remaster(hammer2_thread_t *thr)
275 hammer2_thr_signal(thr, HAMMER2_THREAD_REMASTER);
279 hammer2_thr_freeze_async(hammer2_thread_t *thr)
281 hammer2_thr_signal(thr, HAMMER2_THREAD_FREEZE);
285 hammer2_thr_freeze(hammer2_thread_t *thr)
289 hammer2_thr_signal(thr, HAMMER2_THREAD_FREEZE);
290 hammer2_thr_wait(thr, HAMMER2_THREAD_FROZEN);
294 hammer2_thr_unfreeze(hammer2_thread_t *thr)
298 hammer2_thr_signal(thr, HAMMER2_THREAD_UNFREEZE);
299 hammer2_thr_wait_neg(thr, HAMMER2_THREAD_FROZEN);
303 hammer2_thr_break(hammer2_thread_t *thr)
305 if (thr->flags & (HAMMER2_THREAD_STOP |
306 HAMMER2_THREAD_REMASTER |
307 HAMMER2_THREAD_FREEZE)) {
313 /****************************************************************************
315 ****************************************************************************/
318 hammer2_xop_group_init(hammer2_pfs_t *pmp, hammer2_xop_group_t *xgrp)
320 /* no extra fields in structure at the moment */
324 * Allocate a XOP request.
326 * Once allocated a XOP request can be started, collected, and retired,
327 * and can be retired early if desired.
329 * NOTE: Fifo indices might not be zero but ri == wi on objcache_get().
332 hammer2_xop_alloc(hammer2_inode_t *ip, int flags)
336 xop = objcache_get(cache_xops, M_WAITOK);
337 KKASSERT(xop->head.cluster.array[0].chain == NULL);
340 xop->head.desc = NULL;
341 xop->head.flags = flags;
344 xop->head.collect_key = 0;
345 xop->head.focus_dio = NULL;
347 if (flags & HAMMER2_XOP_MODIFYING)
348 xop->head.mtid = hammer2_trans_sub(ip->pmp);
352 xop->head.cluster.nchains = ip->cluster.nchains;
353 xop->head.cluster.pmp = ip->pmp;
354 xop->head.cluster.flags = HAMMER2_CLUSTER_LOCKED;
357 * run_mask - Active thread (or frontend) associated with XOP
359 xop->head.run_mask = HAMMER2_XOPMASK_VOP;
361 hammer2_inode_ref(ip);
367 hammer2_xop_setname(hammer2_xop_head_t *xop, const char *name, size_t name_len)
369 xop->name1 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO);
370 xop->name1_len = name_len;
371 bcopy(name, xop->name1, name_len);
375 hammer2_xop_setname2(hammer2_xop_head_t *xop, const char *name, size_t name_len)
377 xop->name2 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO);
378 xop->name2_len = name_len;
379 bcopy(name, xop->name2, name_len);
383 hammer2_xop_setname_inum(hammer2_xop_head_t *xop, hammer2_key_t inum)
385 const size_t name_len = 18;
387 xop->name1 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO);
388 xop->name1_len = name_len;
389 ksnprintf(xop->name1, name_len + 1, "0x%016jx", (intmax_t)inum);
396 hammer2_xop_setip2(hammer2_xop_head_t *xop, hammer2_inode_t *ip2)
399 hammer2_inode_ref(ip2);
403 hammer2_xop_setip3(hammer2_xop_head_t *xop, hammer2_inode_t *ip3)
406 hammer2_inode_ref(ip3);
410 hammer2_xop_reinit(hammer2_xop_head_t *xop)
414 xop->collect_key = 0;
415 xop->run_mask = HAMMER2_XOPMASK_VOP;
419 * A mounted PFS needs Xops threads to support frontend operations.
422 hammer2_xop_helper_create(hammer2_pfs_t *pmp)
427 lockmgr(&pmp->lock, LK_EXCLUSIVE);
428 pmp->has_xop_threads = 1;
430 for (i = 0; i < pmp->iroot->cluster.nchains; ++i) {
431 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
432 if (pmp->xop_groups[j].thrs[i].td)
434 hammer2_thr_create(&pmp->xop_groups[j].thrs[i],
437 hammer2_primary_xops_thread);
440 lockmgr(&pmp->lock, LK_RELEASE);
444 hammer2_xop_helper_cleanup(hammer2_pfs_t *pmp)
449 for (i = 0; i < pmp->pfs_nmasters; ++i) {
450 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
451 if (pmp->xop_groups[j].thrs[i].td)
452 hammer2_thr_delete(&pmp->xop_groups[j].thrs[i]);
455 pmp->has_xop_threads = 0;
459 * Start a XOP request, queueing it to all nodes in the cluster to
460 * execute the cluster op.
462 * XXX optimize single-target case.
465 hammer2_xop_start_except(hammer2_xop_head_t *xop, hammer2_xop_desc_t *desc,
468 hammer2_inode_t *ip1;
470 hammer2_thread_t *thr;
477 if (pmp->has_xop_threads == 0)
478 hammer2_xop_helper_create(pmp);
481 * The intent of the XOP sequencer is to ensure that ops on the same
482 * inode execute in the same order. This is necessary when issuing
483 * modifying operations to multiple targets because some targets might
484 * get behind and the frontend is allowed to complete the moment a
485 * quorum of targets succeed.
487 * Strategy operations:
489 * (1) Must be segregated from non-strategy operations to
490 * avoid a deadlock. A vfsync and a bread/bwrite can
491 * deadlock the vfsync's buffer list scan.
493 * (2) Reads are separated from writes to avoid write stalls
494 * from excessively intefering with reads. Reads are allowed
495 * to wander across multiple worker threads for potential
496 * single-file concurrency improvements.
498 * (3) Writes are serialized to a single worker thread (for any
499 * given inode) in order to try to improve block allocation
500 * sequentiality and to reduce lock contention.
502 * TODO - RENAME fails here because it is potentially modifying
503 * three different inodes, but we triple-lock the inodes
504 * involved so it shouldn't create a sequencing schism.
506 if (xop->flags & HAMMER2_XOP_STRATEGY) {
507 hammer2_xop_strategy_t *xopst;
511 xopst = &((hammer2_xop_t *)xop)->xop_strategy;
512 ng = (int)(hammer2_icrc32(&xop->ip1, sizeof(xop->ip1)));
513 if (desc == &hammer2_strategy_read_desc) {
514 off = xopst->lbase / HAMMER2_PBUFSIZE;
515 cdr = hammer2_cluster_data_read;
516 /* sysctl race, load into var */
520 ng ^= hammer2_icrc32(&off, sizeof(off)) &
521 (hammer2_worker_rmask << 1);
525 off = xopst->lbase >> 21;
526 ng ^= hammer2_icrc32(&off, sizeof(off)) & 3;
530 ng = ng & (HAMMER2_XOPGROUPS_MASK >> 1);
531 ng += HAMMER2_XOPGROUPS / 2;
533 ng = (int)(hammer2_icrc32(&xop->ip1, sizeof(xop->ip1)));
534 ng = ng & (HAMMER2_XOPGROUPS_MASK >> 1);
539 * The instant xop is queued another thread can pick it off. In the
540 * case of asynchronous ops, another thread might even finish and
543 hammer2_spin_ex(&pmp->xop_spin);
544 nchains = ip1->cluster.nchains;
545 for (i = 0; i < nchains; ++i) {
547 * XXX ip1->cluster.array* not stable here. This temporary
548 * hack fixes basic issues in target XOPs which need to
549 * obtain a starting chain from the inode but does not
550 * address possible races against inode updates which
551 * might NULL-out a chain.
553 if (i != notidx && ip1->cluster.array[i].chain) {
554 thr = &pmp->xop_groups[ng].thrs[i];
555 atomic_set_64(&xop->run_mask, 1LLU << i);
556 atomic_set_64(&xop->chk_mask, 1LLU << i);
557 xop->collect[i].thr = thr;
558 TAILQ_INSERT_TAIL(&thr->xopq, xop, collect[i].entry);
561 hammer2_spin_unex(&pmp->xop_spin);
562 /* xop can become invalid at this point */
565 * Each thread has its own xopq
567 for (i = 0; i < nchains; ++i) {
569 thr = &pmp->xop_groups[ng].thrs[i];
570 hammer2_thr_signal(thr, HAMMER2_THREAD_XOPQ);
576 hammer2_xop_start(hammer2_xop_head_t *xop, hammer2_xop_desc_t *desc)
578 hammer2_xop_start_except(xop, desc, -1);
582 * Retire a XOP. Used by both the VOP frontend and by the XOP backend.
585 hammer2_xop_retire(hammer2_xop_head_t *xop, uint64_t mask)
587 hammer2_chain_t *chain;
592 * Remove the frontend collector or remove a backend feeder.
594 * When removing the frontend we must wakeup any backend feeders
595 * who are waiting for FIFO space.
597 * When removing the last backend feeder we must wakeup any waiting
600 KKASSERT(xop->run_mask & mask);
601 nmask = atomic_fetchadd_64(&xop->run_mask,
602 -mask + HAMMER2_XOPMASK_FEED);
605 * More than one entity left
607 if ((nmask & HAMMER2_XOPMASK_ALLDONE) != mask) {
609 * Frontend terminating, wakeup any backends waiting on
612 * NOTE!!! The xop can get ripped out from under us at
613 * this point, so do not reference it again.
614 * The wakeup(xop) doesn't touch the xop and
617 if (mask == HAMMER2_XOPMASK_VOP) {
618 if (nmask & HAMMER2_XOPMASK_FIFOW)
623 * Wakeup frontend if the last backend is terminating.
626 if ((nmask & HAMMER2_XOPMASK_ALLDONE) == HAMMER2_XOPMASK_VOP) {
627 if (nmask & HAMMER2_XOPMASK_WAIT)
633 /* else nobody else left, we can ignore FIFOW */
636 * All collectors are gone, we can cleanup and dispose of the XOP.
637 * Note that this can wind up being a frontend OR a backend.
638 * Pending chains are locked shared and not owned by any thread.
640 * Cleanup the collection cluster.
642 for (i = 0; i < xop->cluster.nchains; ++i) {
643 xop->cluster.array[i].flags = 0;
644 chain = xop->cluster.array[i].chain;
646 xop->cluster.array[i].chain = NULL;
647 hammer2_chain_drop_unhold(chain);
652 * Cleanup the fifos. Since we are the only entity left on this
653 * xop we don't have to worry about fifo flow control, and one
654 * lfence() will do the job.
657 mask = xop->chk_mask;
658 for (i = 0; mask && i < HAMMER2_MAXCLUSTER; ++i) {
659 hammer2_xop_fifo_t *fifo = &xop->collect[i];
660 while (fifo->ri != fifo->wi) {
661 chain = fifo->array[fifo->ri & HAMMER2_XOPFIFO_MASK];
663 hammer2_chain_drop_unhold(chain);
670 * The inode is only held at this point, simply drop it.
673 hammer2_inode_drop(xop->ip1);
677 hammer2_inode_drop(xop->ip2);
681 hammer2_inode_drop(xop->ip3);
685 kfree(xop->name1, M_HAMMER2);
690 kfree(xop->name2, M_HAMMER2);
695 objcache_put(cache_xops, xop);
699 * (Backend) Returns non-zero if the frontend is still attached.
702 hammer2_xop_active(hammer2_xop_head_t *xop)
704 if (xop->run_mask & HAMMER2_XOPMASK_VOP)
711 * (Backend) Feed chain data through the cluster validator and back to
712 * the frontend. Chains are fed from multiple nodes concurrently
713 * and pipelined via per-node FIFOs in the XOP.
715 * The chain must be locked (either shared or exclusive). The caller may
716 * unlock and drop the chain on return. This function will add an extra
717 * ref and hold the chain's data for the pass-back.
719 * No xop lock is needed because we are only manipulating fields under
720 * our direct control.
722 * Returns 0 on success and a hammer2 error code if sync is permanently
723 * lost. The caller retains a ref on the chain but by convention
724 * the lock is typically inherited by the xop (caller loses lock).
726 * Returns non-zero on error. In this situation the caller retains a
727 * ref on the chain but loses the lock (we unlock here).
730 hammer2_xop_feed(hammer2_xop_head_t *xop, hammer2_chain_t *chain,
731 int clindex, int error)
733 hammer2_xop_fifo_t *fifo;
737 * Early termination (typicaly of xop_readir)
739 if (hammer2_xop_active(xop) == 0) {
740 error = HAMMER2_ERROR_ABORTED;
745 * Multi-threaded entry into the XOP collector. We own the
746 * fifo->wi for our clindex.
748 fifo = &xop->collect[clindex];
750 if (fifo->ri == fifo->wi - HAMMER2_XOPFIFO)
752 while (fifo->ri == fifo->wi - HAMMER2_XOPFIFO) {
753 atomic_set_int(&fifo->flags, HAMMER2_XOP_FIFO_STALL);
754 mask = xop->run_mask;
755 if ((mask & HAMMER2_XOPMASK_VOP) == 0) {
756 error = HAMMER2_ERROR_ABORTED;
759 tsleep_interlock(xop, 0);
760 if (atomic_cmpset_64(&xop->run_mask, mask,
761 mask | HAMMER2_XOPMASK_FIFOW)) {
762 if (fifo->ri == fifo->wi - HAMMER2_XOPFIFO) {
763 tsleep(xop, PINTERLOCKED, "h2feed", hz*60);
768 atomic_clear_int(&fifo->flags, HAMMER2_XOP_FIFO_STALL);
770 hammer2_chain_ref_hold(chain);
771 if (error == 0 && chain)
772 error = chain->error;
773 fifo->errors[fifo->wi & HAMMER2_XOPFIFO_MASK] = error;
774 fifo->array[fifo->wi & HAMMER2_XOPFIFO_MASK] = chain;
778 mask = atomic_fetchadd_64(&xop->run_mask, HAMMER2_XOPMASK_FEED);
779 if (mask & HAMMER2_XOPMASK_WAIT) {
780 atomic_clear_64(&xop->run_mask, HAMMER2_XOPMASK_WAIT);
786 * Cleanup. If an error occurred we eat the lock. If no error
787 * occurred the fifo inherits the lock and gains an additional ref.
789 * The caller's ref remains in both cases.
796 * (Frontend) collect a response from a running cluster op.
798 * Responses are fed from all appropriate nodes concurrently
799 * and collected into a cohesive response >= collect_key.
801 * The collector will return the instant quorum or other requirements
802 * are met, even if some nodes get behind or become non-responsive.
804 * HAMMER2_XOP_COLLECT_NOWAIT - Used to 'poll' a completed collection,
805 * usually called synchronously from the
806 * node XOPs for the strategy code to
807 * fake the frontend collection and complete
808 * the BIO as soon as possible.
810 * HAMMER2_XOP_SYNCHRONIZER - Reqeuest synchronization with a particular
811 * cluster index, prevents looping when that
812 * index is out of sync so caller can act on
813 * the out of sync element. ESRCH and EDEADLK
814 * can be returned if this flag is specified.
816 * Returns 0 on success plus a filled out xop->cluster structure.
817 * Return ENOENT on normal termination.
818 * Otherwise return an error.
820 * WARNING! If the xop returns a cluster with a non-NULL focus, note that
821 * none of the chains in the cluster (or the focus) are either
822 * locked or I/O synchronized with the cpu. hammer2_xop_gdata()
823 * and hammer2_xop_pdata() must be used to safely access the focus
826 * The frontend can make certain assumptions based on higher-level
827 * locking done by the frontend, but data integrity absolutely
828 * requires using the gdata/pdata API.
831 hammer2_xop_collect(hammer2_xop_head_t *xop, int flags)
833 hammer2_xop_fifo_t *fifo;
834 hammer2_chain_t *chain;
839 int adv; /* advance the element */
844 * First loop tries to advance pieces of the cluster which
847 lokey = HAMMER2_KEY_MAX;
848 keynull = HAMMER2_CHECK_NULL;
849 mask = xop->run_mask;
852 for (i = 0; i < xop->cluster.nchains; ++i) {
853 chain = xop->cluster.array[i].chain;
856 } else if (chain->bref.key < xop->collect_key) {
859 keynull &= ~HAMMER2_CHECK_NULL;
860 if (lokey > chain->bref.key)
861 lokey = chain->bref.key;
868 * Advance element if possible, advanced element may be NULL.
871 hammer2_chain_drop_unhold(chain);
873 fifo = &xop->collect[i];
874 if (fifo->ri != fifo->wi) {
876 chain = fifo->array[fifo->ri & HAMMER2_XOPFIFO_MASK];
877 error = fifo->errors[fifo->ri & HAMMER2_XOPFIFO_MASK];
879 xop->cluster.array[i].chain = chain;
880 xop->cluster.array[i].error = error;
883 xop->cluster.array[i].flags |=
886 if (fifo->wi - fifo->ri <= HAMMER2_XOPFIFO / 2) {
887 if (fifo->flags & HAMMER2_XOP_FIFO_STALL) {
888 atomic_clear_int(&fifo->flags,
889 HAMMER2_XOP_FIFO_STALL);
894 --i; /* loop on same index */
897 * Retain CITEM_NULL flag. If set just repeat EOF.
898 * If not, the NULL,0 combination indicates an
899 * operation in-progress.
901 xop->cluster.array[i].chain = NULL;
902 /* retain any CITEM_NULL setting */
907 * Determine whether the lowest collected key meets clustering
908 * requirements. Returns:
910 * 0 - key valid, cluster can be returned.
912 * ENOENT - normal end of scan, return ENOENT.
914 * ESRCH - sufficient elements collected, quorum agreement
915 * that lokey is not a valid element and should be
918 * EDEADLK - sufficient elements collected, no quorum agreement
919 * (and no agreement possible). In this situation a
920 * repair is needed, for now we loop.
922 * EINPROGRESS - insufficient elements collected to resolve, wait
923 * for event and loop.
925 if ((flags & HAMMER2_XOP_COLLECT_WAITALL) &&
926 (mask & HAMMER2_XOPMASK_ALLDONE) != HAMMER2_XOPMASK_VOP) {
927 error = HAMMER2_ERROR_EINPROGRESS;
929 error = hammer2_cluster_check(&xop->cluster, lokey, keynull);
931 if (error == HAMMER2_ERROR_EINPROGRESS) {
932 if (flags & HAMMER2_XOP_COLLECT_NOWAIT)
934 tsleep_interlock(xop, 0);
935 if (atomic_cmpset_64(&xop->run_mask,
936 mask, mask | HAMMER2_XOPMASK_WAIT)) {
937 tsleep(xop, PINTERLOCKED, "h2coll", hz*60);
941 if (error == HAMMER2_ERROR_ESRCH) {
942 if (lokey != HAMMER2_KEY_MAX) {
943 xop->collect_key = lokey + 1;
946 error = HAMMER2_ERROR_ENOENT;
948 if (error == HAMMER2_ERROR_EDEADLK) {
949 kprintf("hammer2: no quorum possible lokey %016jx\n",
951 if (lokey != HAMMER2_KEY_MAX) {
952 xop->collect_key = lokey + 1;
955 error = HAMMER2_ERROR_ENOENT;
957 if (lokey == HAMMER2_KEY_MAX)
958 xop->collect_key = lokey;
960 xop->collect_key = lokey + 1;
966 * N x M processing threads are available to handle XOPs, N per cluster
967 * index x M cluster nodes.
969 * Locate and return the next runnable xop, or NULL if no xops are
970 * present or none of the xops are currently runnable (for various reasons).
971 * The xop is left on the queue and serves to block other dependent xops
974 * Dependent xops will not be returned.
976 * Sets HAMMER2_XOP_FIFO_RUN on the returned xop or returns NULL.
978 * NOTE! Xops run concurrently for each cluster index.
980 #define XOP_HASH_SIZE 16
981 #define XOP_HASH_MASK (XOP_HASH_SIZE - 1)
985 xop_testhash(hammer2_thread_t *thr, hammer2_inode_t *ip, uint32_t *hash)
990 hv = (int)((uintptr_t)ip + (uintptr_t)thr) / sizeof(hammer2_inode_t);
991 mask = 1U << (hv & 31);
994 return ((int)(hash[hv & XOP_HASH_MASK] & mask));
999 xop_sethash(hammer2_thread_t *thr, hammer2_inode_t *ip, uint32_t *hash)
1004 hv = (int)((uintptr_t)ip + (uintptr_t)thr) / sizeof(hammer2_inode_t);
1005 mask = 1U << (hv & 31);
1008 hash[hv & XOP_HASH_MASK] |= mask;
1012 hammer2_xop_head_t *
1013 hammer2_xop_next(hammer2_thread_t *thr)
1015 hammer2_pfs_t *pmp = thr->pmp;
1016 int clindex = thr->clindex;
1017 uint32_t hash[XOP_HASH_SIZE] = { 0 };
1018 hammer2_xop_head_t *xop;
1020 hammer2_spin_ex(&pmp->xop_spin);
1021 TAILQ_FOREACH(xop, &thr->xopq, collect[clindex].entry) {
1025 if (xop_testhash(thr, xop->ip1, hash) ||
1026 (xop->ip2 && xop_testhash(thr, xop->ip2, hash)) ||
1027 (xop->ip3 && xop_testhash(thr, xop->ip3, hash))) {
1030 xop_sethash(thr, xop->ip1, hash);
1032 xop_sethash(thr, xop->ip2, hash);
1034 xop_sethash(thr, xop->ip3, hash);
1037 * Check already running
1039 if (xop->collect[clindex].flags & HAMMER2_XOP_FIFO_RUN)
1043 * Found a good one, return it.
1045 atomic_set_int(&xop->collect[clindex].flags,
1046 HAMMER2_XOP_FIFO_RUN);
1049 hammer2_spin_unex(&pmp->xop_spin);
1055 * Remove the completed XOP from the queue, clear HAMMER2_XOP_FIFO_RUN.
1057 * NOTE! Xops run concurrently for each cluster index.
1061 hammer2_xop_dequeue(hammer2_thread_t *thr, hammer2_xop_head_t *xop)
1063 hammer2_pfs_t *pmp = thr->pmp;
1064 int clindex = thr->clindex;
1066 hammer2_spin_ex(&pmp->xop_spin);
1067 TAILQ_REMOVE(&thr->xopq, xop, collect[clindex].entry);
1068 atomic_clear_int(&xop->collect[clindex].flags,
1069 HAMMER2_XOP_FIFO_RUN);
1070 hammer2_spin_unex(&pmp->xop_spin);
1071 if (TAILQ_FIRST(&thr->xopq))
1072 hammer2_thr_signal(thr, HAMMER2_THREAD_XOPQ);
1076 * Primary management thread for xops support. Each node has several such
1077 * threads which replicate front-end operations on cluster nodes.
1079 * XOPS thread node operations, allowing the function to focus on a single
1080 * node in the cluster after validating the operation with the cluster.
1081 * This is primarily what prevents dead or stalled nodes from stalling
1085 hammer2_primary_xops_thread(void *arg)
1087 hammer2_thread_t *thr = arg;
1089 hammer2_xop_head_t *xop;
1093 hammer2_xop_desc_t *last_desc = NULL;
1096 /*xgrp = &pmp->xop_groups[thr->repidx]; not needed */
1097 mask = 1LLU << thr->clindex;
1103 * Handle stop request
1105 if (flags & HAMMER2_THREAD_STOP)
1109 * Handle freeze request
1111 if (flags & HAMMER2_THREAD_FREEZE) {
1112 hammer2_thr_signal2(thr, HAMMER2_THREAD_FROZEN,
1113 HAMMER2_THREAD_FREEZE);
1117 if (flags & HAMMER2_THREAD_UNFREEZE) {
1118 hammer2_thr_signal2(thr, 0,
1119 HAMMER2_THREAD_FROZEN |
1120 HAMMER2_THREAD_UNFREEZE);
1125 * Force idle if frozen until unfrozen or stopped.
1127 if (flags & HAMMER2_THREAD_FROZEN) {
1128 hammer2_thr_wait_any(thr,
1129 HAMMER2_THREAD_UNFREEZE |
1130 HAMMER2_THREAD_STOP,
1136 * Reset state on REMASTER request
1138 if (flags & HAMMER2_THREAD_REMASTER) {
1139 hammer2_thr_signal2(thr, 0, HAMMER2_THREAD_REMASTER);
1140 /* reset state here */
1145 * Process requests. Each request can be multi-queued.
1147 * If we get behind and the frontend VOP is no longer active,
1148 * we retire the request without processing it. The callback
1149 * may also abort processing if the frontend VOP becomes
1152 if (flags & HAMMER2_THREAD_XOPQ) {
1153 nflags = flags & ~HAMMER2_THREAD_XOPQ;
1154 if (!atomic_cmpset_int(&thr->flags, flags, nflags))
1159 while ((xop = hammer2_xop_next(thr)) != NULL) {
1160 if (hammer2_xop_active(xop)) {
1161 last_desc = xop->desc;
1162 xop->desc->storage_func((hammer2_xop_t *)xop,
1165 hammer2_xop_dequeue(thr, xop);
1166 hammer2_xop_retire(xop, mask);
1168 last_desc = xop->desc;
1169 hammer2_xop_feed(xop, NULL, thr->clindex,
1171 hammer2_xop_dequeue(thr, xop);
1172 hammer2_xop_retire(xop, mask);
1177 * Wait for event, interlock using THREAD_WAITING and
1180 * For robustness poll on a 30-second interval, but nominally
1181 * expect to be woken up.
1183 nflags = flags | HAMMER2_THREAD_WAITING;
1185 tsleep_interlock(&thr->flags, 0);
1186 if (atomic_cmpset_int(&thr->flags, flags, nflags)) {
1187 tsleep(&thr->flags, PINTERLOCKED, "h2idle", hz*30);
1193 * Cleanup / termination
1195 while ((xop = TAILQ_FIRST(&thr->xopq)) != NULL) {
1196 kprintf("hammer2_thread: aborting xop %s\n", xop->desc->id);
1197 TAILQ_REMOVE(&thr->xopq, xop,
1198 collect[thr->clindex].entry);
1199 hammer2_xop_retire(xop, mask);
1203 hammer2_thr_signal(thr, HAMMER2_THREAD_STOPPED);
1204 /* thr structure can go invalid after this point */