Merge branch 'vendor/LIBEDIT'
[dragonfly.git] / sys / vfs / hammer2 / hammer2_thread.c
1 /*
2  * Copyright (c) 2015 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * This module implements the hammer2 helper thread API, including
36  * the frontend/backend XOP API.
37  */
38 #include "hammer2.h"
39
40 /*
41  * Initialize the suspplied thread structure, starting the specified
42  * thread.
43  */
44 void
45 hammer2_thr_create(hammer2_thread_t *thr, hammer2_pfs_t *pmp,
46                    const char *id, int clindex, int repidx,
47                    void (*func)(void *arg))
48 {
49         lockinit(&thr->lk, "h2thr", 0, 0);
50         thr->pmp = pmp;
51         thr->xopq = &pmp->xopq[clindex];
52         thr->clindex = clindex;
53         thr->repidx = repidx;
54         if (repidx >= 0) {
55                 lwkt_create(func, thr, &thr->td, NULL, 0, -1,
56                             "%s-%s.%02d", id, pmp->pfs_names[clindex], repidx);
57         } else {
58                 lwkt_create(func, thr, &thr->td, NULL, 0, -1,
59                             "%s-%s", id, pmp->pfs_names[clindex]);
60         }
61 }
62
63 /*
64  * Terminate a thread.  This function will silently return if the thread
65  * was never initialized or has already been deleted.
66  *
67  * This is accomplished by setting the STOP flag and waiting for the td
68  * structure to become NULL.
69  */
70 void
71 hammer2_thr_delete(hammer2_thread_t *thr)
72 {
73         if (thr->td == NULL)
74                 return;
75         lockmgr(&thr->lk, LK_EXCLUSIVE);
76         atomic_set_int(&thr->flags, HAMMER2_THREAD_STOP);
77         wakeup(thr->xopq);
78         while (thr->td) {
79                 lksleep(thr, &thr->lk, 0, "h2thr", hz);
80         }
81         lockmgr(&thr->lk, LK_RELEASE);
82         thr->pmp = NULL;
83         thr->xopq = NULL;
84         lockuninit(&thr->lk);
85 }
86
87 /*
88  * Asynchronous remaster request.  Ask the synchronization thread to
89  * start over soon (as if it were frozen and unfrozen, but without waiting).
90  * The thread always recalculates mastership relationships when restarting.
91  */
92 void
93 hammer2_thr_remaster(hammer2_thread_t *thr)
94 {
95         if (thr->td == NULL)
96                 return;
97         lockmgr(&thr->lk, LK_EXCLUSIVE);
98         atomic_set_int(&thr->flags, HAMMER2_THREAD_REMASTER);
99         wakeup(thr->xopq);
100         lockmgr(&thr->lk, LK_RELEASE);
101 }
102
103 void
104 hammer2_thr_freeze_async(hammer2_thread_t *thr)
105 {
106         atomic_set_int(&thr->flags, HAMMER2_THREAD_FREEZE);
107         wakeup(thr->xopq);
108 }
109
110 void
111 hammer2_thr_freeze(hammer2_thread_t *thr)
112 {
113         if (thr->td == NULL)
114                 return;
115         lockmgr(&thr->lk, LK_EXCLUSIVE);
116         atomic_set_int(&thr->flags, HAMMER2_THREAD_FREEZE);
117         wakeup(thr->xopq);
118         while ((thr->flags & HAMMER2_THREAD_FROZEN) == 0) {
119                 lksleep(thr, &thr->lk, 0, "h2frz", hz);
120         }
121         lockmgr(&thr->lk, LK_RELEASE);
122 }
123
124 void
125 hammer2_thr_unfreeze(hammer2_thread_t *thr)
126 {
127         if (thr->td == NULL)
128                 return;
129         lockmgr(&thr->lk, LK_EXCLUSIVE);
130         atomic_clear_int(&thr->flags, HAMMER2_THREAD_FROZEN);
131         wakeup(thr->xopq);
132         lockmgr(&thr->lk, LK_RELEASE);
133 }
134
135 int
136 hammer2_thr_break(hammer2_thread_t *thr)
137 {
138         if (thr->flags & (HAMMER2_THREAD_STOP |
139                           HAMMER2_THREAD_REMASTER |
140                           HAMMER2_THREAD_FREEZE)) {
141                 return 1;
142         }
143         return 0;
144 }
145
146 /****************************************************************************
147  *                          HAMMER2 XOPS API                                *
148  ****************************************************************************/
149
150 void
151 hammer2_xop_group_init(hammer2_pfs_t *pmp, hammer2_xop_group_t *xgrp)
152 {
153         /* no extra fields in structure at the moment */
154 }
155
156 /*
157  * Allocate a XOP request.
158  *
159  * Once allocated a XOP request can be started, collected, and retired,
160  * and can be retired early if desired.
161  *
162  * NOTE: Fifo indices might not be zero but ri == wi on objcache_get().
163  */
164 void *
165 hammer2_xop_alloc(hammer2_inode_t *ip, int flags)
166 {
167         hammer2_xop_t *xop;
168
169         xop = objcache_get(cache_xops, M_WAITOK);
170         KKASSERT(xop->head.cluster.array[0].chain == NULL);
171         xop->head.ip1 = ip;
172         xop->head.func = NULL;
173         xop->head.state = 0;
174         xop->head.error = 0;
175         xop->head.collect_key = 0;
176         if (flags & HAMMER2_XOP_MODIFYING)
177                 xop->head.mtid = hammer2_trans_sub(ip->pmp);
178         else
179                 xop->head.mtid = 0;
180
181         xop->head.cluster.nchains = ip->cluster.nchains;
182         xop->head.cluster.pmp = ip->pmp;
183         xop->head.cluster.flags = HAMMER2_CLUSTER_LOCKED;
184
185         /*
186          * run_mask - Active thread (or frontend) associated with XOP
187          */
188         xop->head.run_mask = HAMMER2_XOPMASK_VOP;
189
190         hammer2_inode_ref(ip);
191
192         return xop;
193 }
194
195 void
196 hammer2_xop_setname(hammer2_xop_head_t *xop, const char *name, size_t name_len)
197 {
198         xop->name1 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO);
199         xop->name1_len = name_len;
200         bcopy(name, xop->name1, name_len);
201 }
202
203 void
204 hammer2_xop_setname2(hammer2_xop_head_t *xop, const char *name, size_t name_len)
205 {
206         xop->name2 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO);
207         xop->name2_len = name_len;
208         bcopy(name, xop->name2, name_len);
209 }
210
211
212 void
213 hammer2_xop_setip2(hammer2_xop_head_t *xop, hammer2_inode_t *ip2)
214 {
215         xop->ip2 = ip2;
216         hammer2_inode_ref(ip2);
217 }
218
219 void
220 hammer2_xop_setip3(hammer2_xop_head_t *xop, hammer2_inode_t *ip3)
221 {
222         xop->ip3 = ip3;
223         hammer2_inode_ref(ip3);
224 }
225
226 void
227 hammer2_xop_reinit(hammer2_xop_head_t *xop)
228 {
229         xop->state = 0;
230         xop->error = 0;
231         xop->collect_key = 0;
232         xop->run_mask = HAMMER2_XOPMASK_VOP;
233 }
234
235 /*
236  * A mounted PFS needs Xops threads to support frontend operations.
237  */
238 void
239 hammer2_xop_helper_create(hammer2_pfs_t *pmp)
240 {
241         int i;
242         int j;
243
244         lockmgr(&pmp->lock, LK_EXCLUSIVE);
245         pmp->has_xop_threads = 1;
246
247         for (i = 0; i < pmp->iroot->cluster.nchains; ++i) {
248                 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
249                         if (pmp->xop_groups[j].thrs[i].td)
250                                 continue;
251                         hammer2_thr_create(&pmp->xop_groups[j].thrs[i], pmp,
252                                            "h2xop", i, j,
253                                            hammer2_primary_xops_thread);
254                 }
255         }
256         lockmgr(&pmp->lock, LK_RELEASE);
257 }
258
259 void
260 hammer2_xop_helper_cleanup(hammer2_pfs_t *pmp)
261 {
262         int i;
263         int j;
264
265         for (i = 0; i < pmp->pfs_nmasters; ++i) {
266                 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
267                         if (pmp->xop_groups[j].thrs[i].td)
268                                 hammer2_thr_delete(&pmp->xop_groups[j].thrs[i]);
269                 }
270         }
271 }
272
273 /*
274  * Start a XOP request, queueing it to all nodes in the cluster to
275  * execute the cluster op.
276  *
277  * XXX optimize single-target case.
278  */
279 void
280 hammer2_xop_start_except(hammer2_xop_head_t *xop, hammer2_xop_func_t func,
281                          int notidx)
282 {
283 #if 0
284         hammer2_xop_group_t *xgrp;
285         hammer2_thread_t *thr;
286 #endif
287         hammer2_pfs_t *pmp;
288 #if 0
289         int g;
290 #endif
291         int i;
292         int nchains;
293
294         pmp = xop->ip1->pmp;
295         if (pmp->has_xop_threads == 0)
296                 hammer2_xop_helper_create(pmp);
297
298 #if 0
299         g = pmp->xop_iterator++;
300         g = g & HAMMER2_XOPGROUPS_MASK;
301         xgrp = &pmp->xop_groups[g];
302         xop->xgrp = xgrp;
303 #endif
304         xop->func = func;
305
306         /*
307          * The XOP sequencer is based on ip1, ip2, and ip3.  Because ops can
308          * finish early and unlock the related inodes, some targets may get
309          * behind.  The sequencer ensures that ops on the same inode execute
310          * in the same order.
311          *
312          * The instant xop is queued another thread can pick it off.  In the
313          * case of asynchronous ops, another thread might even finish and
314          * deallocate it.
315          */
316         hammer2_spin_ex(&pmp->xop_spin);
317         nchains = xop->ip1->cluster.nchains;
318         for (i = 0; i < nchains; ++i) {
319                 if (i != notidx) {
320                         atomic_set_int(&xop->run_mask, 1U << i);
321                         atomic_set_int(&xop->chk_mask, 1U << i);
322                         TAILQ_INSERT_TAIL(&pmp->xopq[i], xop, collect[i].entry);
323                 }
324         }
325         hammer2_spin_unex(&pmp->xop_spin);
326         /* xop can become invalid at this point */
327
328         /*
329          * Try to wakeup just one xop thread for each cluster node.
330          */
331         for (i = 0; i < nchains; ++i) {
332                 if (i != notidx)
333                         wakeup_one(&pmp->xopq[i]);
334         }
335 }
336
337 void
338 hammer2_xop_start(hammer2_xop_head_t *xop, hammer2_xop_func_t func)
339 {
340         hammer2_xop_start_except(xop, func, -1);
341 }
342
343 /*
344  * Retire a XOP.  Used by both the VOP frontend and by the XOP backend.
345  */
346 void
347 hammer2_xop_retire(hammer2_xop_head_t *xop, uint32_t mask)
348 {
349         hammer2_chain_t *chain;
350         int i;
351
352         /*
353          * Remove the frontend collector or remove a backend feeder.
354          * When removing the frontend we must wakeup any backend feeders
355          * who are waiting for FIFO space.
356          *
357          * XXX optimize wakeup.
358          */
359         KKASSERT(xop->run_mask & mask);
360         if (atomic_fetchadd_int(&xop->run_mask, -mask) != mask) {
361                 if (mask == HAMMER2_XOPMASK_VOP)
362                         wakeup(xop);
363                 return;
364         }
365
366         /*
367          * All collectors are gone, we can cleanup and dispose of the XOP.
368          * Note that this can wind up being a frontend OR a backend.
369          * Pending chains are locked shared and not owned by any thread.
370          *
371          * Cleanup the collection cluster.
372          */
373         for (i = 0; i < xop->cluster.nchains; ++i) {
374                 xop->cluster.array[i].flags = 0;
375                 chain = xop->cluster.array[i].chain;
376                 if (chain) {
377                         xop->cluster.array[i].chain = NULL;
378                         hammer2_chain_pull_shared_lock(chain);
379                         hammer2_chain_unlock(chain);
380                         hammer2_chain_drop(chain);
381                 }
382         }
383
384         /*
385          * Cleanup the fifos, use check_counter to optimize the loop.
386          */
387         mask = xop->chk_mask;
388         for (i = 0; mask && i < HAMMER2_MAXCLUSTER; ++i) {
389                 hammer2_xop_fifo_t *fifo = &xop->collect[i];
390                 while (fifo->ri != fifo->wi) {
391                         cpu_lfence();
392                         chain = fifo->array[fifo->ri & HAMMER2_XOPFIFO_MASK];
393                         if (chain) {
394                                 hammer2_chain_pull_shared_lock(chain);
395                                 hammer2_chain_unlock(chain);
396                                 hammer2_chain_drop(chain);
397                         }
398                         ++fifo->ri;
399                         if (fifo->wi - fifo->ri < HAMMER2_XOPFIFO / 2)
400                                 wakeup(xop);    /* XXX optimize */
401                 }
402                 mask &= ~(1U << i);
403         }
404
405         /*
406          * The inode is only held at this point, simply drop it.
407          */
408         if (xop->ip1) {
409                 hammer2_inode_drop(xop->ip1);
410                 xop->ip1 = NULL;
411         }
412         if (xop->ip2) {
413                 hammer2_inode_drop(xop->ip2);
414                 xop->ip2 = NULL;
415         }
416         if (xop->ip3) {
417                 hammer2_inode_drop(xop->ip3);
418                 xop->ip3 = NULL;
419         }
420         if (xop->name1) {
421                 kfree(xop->name1, M_HAMMER2);
422                 xop->name1 = NULL;
423                 xop->name1_len = 0;
424         }
425         if (xop->name2) {
426                 kfree(xop->name2, M_HAMMER2);
427                 xop->name2 = NULL;
428                 xop->name2_len = 0;
429         }
430
431         objcache_put(cache_xops, xop);
432 }
433
434 /*
435  * (Backend) Returns non-zero if the frontend is still attached.
436  */
437 int
438 hammer2_xop_active(hammer2_xop_head_t *xop)
439 {
440         if (xop->run_mask & HAMMER2_XOPMASK_VOP)
441                 return 1;
442         else
443                 return 0;
444 }
445
446 /*
447  * (Backend) Feed chain data through the cluster validator and back to
448  * the frontend.  Chains are fed from multiple nodes concurrently
449  * and pipelined via per-node FIFOs in the XOP.
450  *
451  * The chain must be locked shared.  This function adds an additional
452  * shared-lock and ref to the chain for the frontend to collect.  Caller
453  * must still unlock/drop the chain.
454  *
455  * No xop lock is needed because we are only manipulating fields under
456  * our direct control.
457  *
458  * Returns 0 on success and a hammer error code if sync is permanently
459  * lost.  The caller retains a ref on the chain but by convention
460  * the lock is typically inherited by the xop (caller loses lock).
461  *
462  * Returns non-zero on error.  In this situation the caller retains a
463  * ref on the chain but loses the lock (we unlock here).
464  *
465  * WARNING!  The chain is moving between two different threads, it must
466  *           be locked SHARED to retain its data mapping, not exclusive.
467  *           When multiple operations are in progress at once, chains fed
468  *           back to the frontend for collection can wind up being locked
469  *           in different orders, only a shared lock can prevent a deadlock.
470  *
471  *           Exclusive locks may only be used by a XOP backend node thread
472  *           temporarily, with no direct or indirect dependencies (aka
473  *           blocking/waiting) on other nodes.
474  */
475 int
476 hammer2_xop_feed(hammer2_xop_head_t *xop, hammer2_chain_t *chain,
477                  int clindex, int error)
478 {
479         hammer2_xop_fifo_t *fifo;
480
481         /*
482          * Multi-threaded entry into the XOP collector.  We own the
483          * fifo->wi for our clindex.
484          */
485         fifo = &xop->collect[clindex];
486
487         while (fifo->ri == fifo->wi - HAMMER2_XOPFIFO) {
488                 tsleep_interlock(xop, 0);
489                 if (hammer2_xop_active(xop) == 0) {
490                         error = EINTR;
491                         goto done;
492                 }
493                 if (fifo->ri == fifo->wi - HAMMER2_XOPFIFO) {
494                         tsleep(xop, PINTERLOCKED, "h2feed", hz*60);
495                 }
496         }
497         if (chain) {
498                 hammer2_chain_ref(chain);
499                 hammer2_chain_push_shared_lock(chain);
500         }
501         if (error == 0 && chain)
502                 error = chain->error;
503         fifo->errors[fifo->wi & HAMMER2_XOPFIFO_MASK] = error;
504         fifo->array[fifo->wi & HAMMER2_XOPFIFO_MASK] = chain;
505         cpu_sfence();
506         ++fifo->wi;
507         atomic_add_int(&xop->check_counter, 1);
508         wakeup(&xop->check_counter);    /* XXX optimize */
509         error = 0;
510
511         /*
512          * Cleanup.  If an error occurred we eat the lock.  If no error
513          * occurred the fifo inherits the lock and gains an additional ref.
514          *
515          * The caller's ref remains in both cases.
516          */
517 done:
518         return error;
519 }
520
521 /*
522  * (Frontend) collect a response from a running cluster op.
523  *
524  * Responses are fed from all appropriate nodes concurrently
525  * and collected into a cohesive response >= collect_key.
526  *
527  * The collector will return the instant quorum or other requirements
528  * are met, even if some nodes get behind or become non-responsive.
529  *
530  * HAMMER2_XOP_COLLECT_NOWAIT   - Used to 'poll' a completed collection,
531  *                                usually called synchronously from the
532  *                                node XOPs for the strategy code to
533  *                                fake the frontend collection and complete
534  *                                the BIO as soon as possible.
535  *
536  * HAMMER2_XOP_SYNCHRONIZER     - Reqeuest synchronization with a particular
537  *                                cluster index, prevents looping when that
538  *                                index is out of sync so caller can act on
539  *                                the out of sync element.  ESRCH and EDEADLK
540  *                                can be returned if this flag is specified.
541  *
542  * Returns 0 on success plus a filled out xop->cluster structure.
543  * Return ENOENT on normal termination.
544  * Otherwise return an error.
545  */
546 int
547 hammer2_xop_collect(hammer2_xop_head_t *xop, int flags)
548 {
549         hammer2_xop_fifo_t *fifo;
550         hammer2_chain_t *chain;
551         hammer2_key_t lokey;
552         int error;
553         int keynull;
554         int adv;                /* advance the element */
555         int i;
556         uint32_t check_counter;
557
558 loop:
559         /*
560          * First loop tries to advance pieces of the cluster which
561          * are out of sync.
562          */
563         lokey = HAMMER2_KEY_MAX;
564         keynull = HAMMER2_CHECK_NULL;
565         check_counter = xop->check_counter;
566         cpu_lfence();
567
568         for (i = 0; i < xop->cluster.nchains; ++i) {
569                 chain = xop->cluster.array[i].chain;
570                 if (chain == NULL) {
571                         adv = 1;
572                 } else if (chain->bref.key < xop->collect_key) {
573                         adv = 1;
574                 } else {
575                         keynull &= ~HAMMER2_CHECK_NULL;
576                         if (lokey > chain->bref.key)
577                                 lokey = chain->bref.key;
578                         adv = 0;
579                 }
580                 if (adv == 0)
581                         continue;
582
583                 /*
584                  * Advance element if possible, advanced element may be NULL.
585                  */
586                 if (chain) {
587                         hammer2_chain_unlock(chain);
588                         hammer2_chain_drop(chain);
589                 }
590                 fifo = &xop->collect[i];
591                 if (fifo->ri != fifo->wi) {
592                         cpu_lfence();
593                         chain = fifo->array[fifo->ri & HAMMER2_XOPFIFO_MASK];
594                         error = fifo->errors[fifo->ri & HAMMER2_XOPFIFO_MASK];
595                         ++fifo->ri;
596                         xop->cluster.array[i].chain = chain;
597                         xop->cluster.array[i].error = error;
598                         if (chain == NULL) {
599                                 /* XXX */
600                                 xop->cluster.array[i].flags |=
601                                                         HAMMER2_CITEM_NULL;
602                         }
603                         if (fifo->wi - fifo->ri < HAMMER2_XOPFIFO / 2)
604                                 wakeup(xop);    /* XXX optimize */
605                         --i;            /* loop on same index */
606                 } else {
607                         /*
608                          * Retain CITEM_NULL flag.  If set just repeat EOF.
609                          * If not, the NULL,0 combination indicates an
610                          * operation in-progress.
611                          */
612                         xop->cluster.array[i].chain = NULL;
613                         /* retain any CITEM_NULL setting */
614                 }
615         }
616
617         /*
618          * Determine whether the lowest collected key meets clustering
619          * requirements.  Returns:
620          *
621          * 0             - key valid, cluster can be returned.
622          *
623          * ENOENT        - normal end of scan, return ENOENT.
624          *
625          * ESRCH         - sufficient elements collected, quorum agreement
626          *                 that lokey is not a valid element and should be
627          *                 skipped.
628          *
629          * EDEADLK       - sufficient elements collected, no quorum agreement
630          *                 (and no agreement possible).  In this situation a
631          *                 repair is needed, for now we loop.
632          *
633          * EINPROGRESS   - insufficient elements collected to resolve, wait
634          *                 for event and loop.
635          */
636         if ((flags & HAMMER2_XOP_COLLECT_WAITALL) &&
637             xop->run_mask != HAMMER2_XOPMASK_VOP) {
638                 error = EINPROGRESS;
639         } else {
640                 error = hammer2_cluster_check(&xop->cluster, lokey, keynull);
641         }
642         if (error == EINPROGRESS) {
643                 if (xop->check_counter == check_counter) {
644                         if (flags & HAMMER2_XOP_COLLECT_NOWAIT)
645                                 goto done;
646                         tsleep_interlock(&xop->check_counter, 0);
647                         cpu_lfence();
648                         if (xop->check_counter == check_counter) {
649                                 tsleep(&xop->check_counter, PINTERLOCKED,
650                                         "h2coll", hz*60);
651                         }
652                 }
653                 goto loop;
654         }
655         if (error == ESRCH) {
656                 if (lokey != HAMMER2_KEY_MAX) {
657                         xop->collect_key = lokey + 1;
658                         goto loop;
659                 }
660                 error = ENOENT;
661         }
662         if (error == EDEADLK) {
663                 kprintf("hammer2: no quorum possible lokey %016jx\n",
664                         lokey);
665                 if (lokey != HAMMER2_KEY_MAX) {
666                         xop->collect_key = lokey + 1;
667                         goto loop;
668                 }
669                 error = ENOENT;
670         }
671         if (lokey == HAMMER2_KEY_MAX)
672                 xop->collect_key = lokey;
673         else
674                 xop->collect_key = lokey + 1;
675 done:
676         return error;
677 }
678
679 /*
680  * N x M processing threads are available to handle XOPs, N per cluster
681  * index x M cluster nodes.  All the threads for any given cluster index
682  * share and pull from the same xopq.
683  *
684  * Locate and return the next runnable xop, or NULL if no xops are
685  * present or none of the xops are currently runnable (for various reasons).
686  * The xop is left on the queue and serves to block other dependent xops
687  * from being run.
688  *
689  * Dependent xops will not be returned.
690  *
691  * Sets HAMMER2_XOP_FIFO_RUN on the returned xop or returns NULL.
692  *
693  * NOTE! Xops run concurrently for each cluster index.
694  */
695 #define XOP_HASH_SIZE   16
696 #define XOP_HASH_MASK   (XOP_HASH_SIZE - 1)
697
698 static __inline
699 int
700 xop_testhash(hammer2_thread_t *thr, hammer2_inode_t *ip, uint32_t *hash)
701 {
702         uint32_t mask;
703         int hv;
704
705         hv = (int)((uintptr_t)ip + (uintptr_t)thr) / sizeof(hammer2_inode_t);
706         mask = 1U << (hv & 31);
707         hv >>= 5;
708
709         return ((int)(hash[hv & XOP_HASH_MASK] & mask));
710 }
711
712 static __inline
713 void
714 xop_sethash(hammer2_thread_t *thr, hammer2_inode_t *ip, uint32_t *hash)
715 {
716         uint32_t mask;
717         int hv;
718
719         hv = (int)((uintptr_t)ip + (uintptr_t)thr) / sizeof(hammer2_inode_t);
720         mask = 1U << (hv & 31);
721         hv >>= 5;
722
723         hash[hv & XOP_HASH_MASK] |= mask;
724 }
725
726 static
727 hammer2_xop_head_t *
728 hammer2_xop_next(hammer2_thread_t *thr)
729 {
730         hammer2_pfs_t *pmp = thr->pmp;
731         int clindex = thr->clindex;
732         uint32_t hash[XOP_HASH_SIZE] = { 0 };
733         hammer2_xop_head_t *xop;
734
735         hammer2_spin_ex(&pmp->xop_spin);
736         TAILQ_FOREACH(xop, thr->xopq, collect[clindex].entry) {
737                 /*
738                  * Check dependency
739                  */
740                 if (xop_testhash(thr, xop->ip1, hash) ||
741                     (xop->ip2 && xop_testhash(thr, xop->ip2, hash)) ||
742                     (xop->ip3 && xop_testhash(thr, xop->ip3, hash))) {
743                         continue;
744                 }
745                 xop_sethash(thr, xop->ip1, hash);
746                 if (xop->ip2)
747                         xop_sethash(thr, xop->ip2, hash);
748                 if (xop->ip3)
749                         xop_sethash(thr, xop->ip3, hash);
750
751                 /*
752                  * Check already running
753                  */
754                 if (xop->collect[clindex].flags & HAMMER2_XOP_FIFO_RUN)
755                         continue;
756
757                 /*
758                  * Found a good one, return it.
759                  */
760                 atomic_set_int(&xop->collect[clindex].flags,
761                                HAMMER2_XOP_FIFO_RUN);
762                 break;
763         }
764         hammer2_spin_unex(&pmp->xop_spin);
765
766         return xop;
767 }
768
769 /*
770  * Remove the completed XOP from the queue, clear HAMMER2_XOP_FIFO_RUN.
771  *
772  * NOTE! Xops run concurrently for each cluster index.
773  */
774 static
775 void
776 hammer2_xop_dequeue(hammer2_thread_t *thr, hammer2_xop_head_t *xop)
777 {
778         hammer2_pfs_t *pmp = thr->pmp;
779         int clindex = thr->clindex;
780
781         hammer2_spin_ex(&pmp->xop_spin);
782         TAILQ_REMOVE(thr->xopq, xop, collect[clindex].entry);
783         atomic_clear_int(&xop->collect[clindex].flags,
784                          HAMMER2_XOP_FIFO_RUN);
785         hammer2_spin_unex(&pmp->xop_spin);
786 }
787
788 /*
789  * Primary management thread for xops support.  Each node has several such
790  * threads which replicate front-end operations on cluster nodes.
791  *
792  * XOPS thread node operations, allowing the function to focus on a single
793  * node in the cluster after validating the operation with the cluster.
794  * This is primarily what prevents dead or stalled nodes from stalling
795  * the front-end.
796  */
797 void
798 hammer2_primary_xops_thread(void *arg)
799 {
800         hammer2_thread_t *thr = arg;
801         hammer2_pfs_t *pmp;
802         hammer2_xop_head_t *xop;
803         uint32_t mask;
804         hammer2_xop_func_t last_func = NULL;
805
806         pmp = thr->pmp;
807         /*xgrp = &pmp->xop_groups[thr->repidx]; not needed */
808         mask = 1U << thr->clindex;
809
810         lockmgr(&thr->lk, LK_EXCLUSIVE);
811         while ((thr->flags & HAMMER2_THREAD_STOP) == 0) {
812                 /*
813                  * Handle freeze request
814                  */
815                 if (thr->flags & HAMMER2_THREAD_FREEZE) {
816                         atomic_set_int(&thr->flags, HAMMER2_THREAD_FROZEN);
817                         atomic_clear_int(&thr->flags, HAMMER2_THREAD_FREEZE);
818                 }
819
820                 /*
821                  * Force idle if frozen until unfrozen or stopped.
822                  */
823                 if (thr->flags & HAMMER2_THREAD_FROZEN) {
824                         lksleep(thr->xopq, &thr->lk, 0, "frozen", 0);
825                         continue;
826                 }
827
828                 /*
829                  * Reset state on REMASTER request
830                  */
831                 if (thr->flags & HAMMER2_THREAD_REMASTER) {
832                         atomic_clear_int(&thr->flags, HAMMER2_THREAD_REMASTER);
833                         /* reset state */
834                 }
835
836                 /*
837                  * Process requests.  Each request can be multi-queued.
838                  *
839                  * If we get behind and the frontend VOP is no longer active,
840                  * we retire the request without processing it.  The callback
841                  * may also abort processing if the frontend VOP becomes
842                  * inactive.
843                  */
844                 tsleep_interlock(thr->xopq, 0);
845                 while ((xop = hammer2_xop_next(thr)) != NULL) {
846                         if (hammer2_xop_active(xop)) {
847                                 lockmgr(&thr->lk, LK_RELEASE);
848                                 last_func = xop->func;
849                                 xop->func((hammer2_xop_t *)xop, thr->clindex);
850                                 hammer2_xop_dequeue(thr, xop);
851                                 hammer2_xop_retire(xop, mask);
852                                 lockmgr(&thr->lk, LK_EXCLUSIVE);
853                         } else {
854                                 last_func = xop->func;
855                                 hammer2_xop_feed(xop, NULL, thr->clindex,
856                                                  ECONNABORTED);
857                                 hammer2_xop_dequeue(thr, xop);
858                                 hammer2_xop_retire(xop, mask);
859                         }
860                 }
861
862                 /*
863                  * Wait for event.  The xopq is not interlocked by thr->lk,
864                  * use the tsleep interlock sequence.
865                  *
866                  * For robustness poll on a 30-second interval, but nominally
867                  * expect to be woken up.
868                  */
869                 lksleep(thr->xopq, &thr->lk, PINTERLOCKED, "h2idle", hz*30);
870         }
871
872 #if 0
873         /*
874          * Cleanup / termination
875          */
876         while ((xop = TAILQ_FIRST(&thr->xopq)) != NULL) {
877                 kprintf("hammer2_thread: aborting xop %p\n", xop->func);
878                 TAILQ_REMOVE(&thr->xopq, xop,
879                              collect[thr->clindex].entry);
880                 hammer2_xop_retire(xop, mask);
881         }
882 #endif
883
884         thr->td = NULL;
885         wakeup(thr);
886         lockmgr(&thr->lk, LK_RELEASE);
887         /* thr structure can go invalid after this point */
888 }