sys/vfs/msdosfs: Sync with FreeBSD (non functional diffs)
[dragonfly.git] / sys / vfs / hammer2 / hammer2_admin.c
1 /*
2  * Copyright (c) 2015-2018 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * This module implements the hammer2 helper thread API, including
36  * the frontend/backend XOP API.
37  */
38 #include "hammer2.h"
39
40 #define H2XOPDESCRIPTOR(label)                                  \
41         hammer2_xop_desc_t hammer2_##label##_desc = {           \
42                 .storage_func = hammer2_xop_##label,            \
43                 .id = #label                                    \
44         }
45
46 H2XOPDESCRIPTOR(ipcluster);
47 H2XOPDESCRIPTOR(readdir);
48 H2XOPDESCRIPTOR(nresolve);
49 H2XOPDESCRIPTOR(unlink);
50 H2XOPDESCRIPTOR(nrename);
51 H2XOPDESCRIPTOR(scanlhc);
52 H2XOPDESCRIPTOR(scanall);
53 H2XOPDESCRIPTOR(lookup);
54 H2XOPDESCRIPTOR(delete);
55 H2XOPDESCRIPTOR(inode_mkdirent);
56 H2XOPDESCRIPTOR(inode_create);
57 H2XOPDESCRIPTOR(inode_create_det);
58 H2XOPDESCRIPTOR(inode_create_ins);
59 H2XOPDESCRIPTOR(inode_destroy);
60 H2XOPDESCRIPTOR(inode_chain_sync);
61 H2XOPDESCRIPTOR(inode_unlinkall);
62 H2XOPDESCRIPTOR(inode_connect);
63 H2XOPDESCRIPTOR(inode_flush);
64 H2XOPDESCRIPTOR(strategy_read);
65 H2XOPDESCRIPTOR(strategy_write);
66
67 /*
68  * Set flags and wakeup any waiters.
69  *
70  * WARNING! During teardown (thr) can disappear the instant our cmpset
71  *          succeeds.
72  */
73 void
74 hammer2_thr_signal(hammer2_thread_t *thr, uint32_t flags)
75 {
76         uint32_t oflags;
77         uint32_t nflags;
78
79         for (;;) {
80                 oflags = thr->flags;
81                 cpu_ccfence();
82                 nflags = (oflags | flags) & ~HAMMER2_THREAD_WAITING;
83
84                 if (oflags & HAMMER2_THREAD_WAITING) {
85                         if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
86                                 wakeup(&thr->flags);
87                                 break;
88                         }
89                 } else {
90                         if (atomic_cmpset_int(&thr->flags, oflags, nflags))
91                                 break;
92                 }
93         }
94 }
95
96 /*
97  * Set and clear flags and wakeup any waiters.
98  *
99  * WARNING! During teardown (thr) can disappear the instant our cmpset
100  *          succeeds.
101  */
102 void
103 hammer2_thr_signal2(hammer2_thread_t *thr, uint32_t posflags, uint32_t negflags)
104 {
105         uint32_t oflags;
106         uint32_t nflags;
107
108         for (;;) {
109                 oflags = thr->flags;
110                 cpu_ccfence();
111                 nflags = (oflags | posflags) &
112                         ~(negflags | HAMMER2_THREAD_WAITING);
113                 if (oflags & HAMMER2_THREAD_WAITING) {
114                         if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
115                                 wakeup(&thr->flags);
116                                 break;
117                         }
118                 } else {
119                         if (atomic_cmpset_int(&thr->flags, oflags, nflags))
120                                 break;
121                 }
122         }
123 }
124
125 /*
126  * Wait until all the bits in flags are set.
127  *
128  * WARNING! During teardown (thr) can disappear the instant our cmpset
129  *          succeeds.
130  */
131 void
132 hammer2_thr_wait(hammer2_thread_t *thr, uint32_t flags)
133 {
134         uint32_t oflags;
135         uint32_t nflags;
136
137         for (;;) {
138                 oflags = thr->flags;
139                 cpu_ccfence();
140                 if ((oflags & flags) == flags)
141                         break;
142                 nflags = oflags | HAMMER2_THREAD_WAITING;
143                 tsleep_interlock(&thr->flags, 0);
144                 if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
145                         tsleep(&thr->flags, PINTERLOCKED, "h2twait", hz*60);
146                 }
147         }
148 }
149
150 /*
151  * Wait until any of the bits in flags are set, with timeout.
152  *
153  * WARNING! During teardown (thr) can disappear the instant our cmpset
154  *          succeeds.
155  */
156 int
157 hammer2_thr_wait_any(hammer2_thread_t *thr, uint32_t flags, int timo)
158 {
159         uint32_t oflags;
160         uint32_t nflags;
161         int error;
162
163         error = 0;
164         for (;;) {
165                 oflags = thr->flags;
166                 cpu_ccfence();
167                 if (oflags & flags)
168                         break;
169                 nflags = oflags | HAMMER2_THREAD_WAITING;
170                 tsleep_interlock(&thr->flags, 0);
171                 if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
172                         error = tsleep(&thr->flags, PINTERLOCKED,
173                                        "h2twait", timo);
174                 }
175                 if (error == ETIMEDOUT) {
176                         error = HAMMER2_ERROR_ETIMEDOUT;
177                         break;
178                 }
179         }
180         return error;
181 }
182
183 /*
184  * Wait until the bits in flags are clear.
185  *
186  * WARNING! During teardown (thr) can disappear the instant our cmpset
187  *          succeeds.
188  */
189 void
190 hammer2_thr_wait_neg(hammer2_thread_t *thr, uint32_t flags)
191 {
192         uint32_t oflags;
193         uint32_t nflags;
194
195         for (;;) {
196                 oflags = thr->flags;
197                 cpu_ccfence();
198                 if ((oflags & flags) == 0)
199                         break;
200                 nflags = oflags | HAMMER2_THREAD_WAITING;
201                 tsleep_interlock(&thr->flags, 0);
202                 if (atomic_cmpset_int(&thr->flags, oflags, nflags)) {
203                         tsleep(&thr->flags, PINTERLOCKED, "h2twait", hz*60);
204                 }
205         }
206 }
207
208 /*
209  * Initialize the supplied thread structure, starting the specified
210  * thread.
211  *
212  * NOTE: thr structure can be retained across mounts and unmounts for this
213  *       pmp, so make sure the flags are in a sane state.
214  */
215 void
216 hammer2_thr_create(hammer2_thread_t *thr, hammer2_pfs_t *pmp,
217                    hammer2_dev_t *hmp,
218                    const char *id, int clindex, int repidx,
219                    void (*func)(void *arg))
220 {
221         thr->pmp = pmp;         /* xop helpers */
222         thr->hmp = hmp;         /* bulkfree */
223         thr->clindex = clindex;
224         thr->repidx = repidx;
225         TAILQ_INIT(&thr->xopq);
226         atomic_clear_int(&thr->flags, HAMMER2_THREAD_STOP |
227                                       HAMMER2_THREAD_STOPPED |
228                                       HAMMER2_THREAD_FREEZE |
229                                       HAMMER2_THREAD_FROZEN);
230         if (thr->scratch == NULL)
231                 thr->scratch = kmalloc(MAXPHYS, M_HAMMER2, M_WAITOK | M_ZERO);
232         if (repidx >= 0) {
233                 lwkt_create(func, thr, &thr->td, NULL, 0, repidx % ncpus,
234                             "%s-%s.%02d", id, pmp->pfs_names[clindex], repidx);
235         } else if (pmp) {
236                 lwkt_create(func, thr, &thr->td, NULL, 0, -1,
237                             "%s-%s", id, pmp->pfs_names[clindex]);
238         } else {
239                 lwkt_create(func, thr, &thr->td, NULL, 0, -1, "%s", id);
240         }
241 }
242
243 /*
244  * Terminate a thread.  This function will silently return if the thread
245  * was never initialized or has already been deleted.
246  *
247  * This is accomplished by setting the STOP flag and waiting for the td
248  * structure to become NULL.
249  */
250 void
251 hammer2_thr_delete(hammer2_thread_t *thr)
252 {
253         if (thr->td == NULL)
254                 return;
255         hammer2_thr_signal(thr, HAMMER2_THREAD_STOP);
256         hammer2_thr_wait(thr, HAMMER2_THREAD_STOPPED);
257         thr->pmp = NULL;
258         if (thr->scratch) {
259                 kfree(thr->scratch, M_HAMMER2);
260                 thr->scratch = NULL;
261         }
262         KKASSERT(TAILQ_EMPTY(&thr->xopq));
263 }
264
265 /*
266  * Asynchronous remaster request.  Ask the synchronization thread to
267  * start over soon (as if it were frozen and unfrozen, but without waiting).
268  * The thread always recalculates mastership relationships when restarting.
269  */
270 void
271 hammer2_thr_remaster(hammer2_thread_t *thr)
272 {
273         if (thr->td == NULL)
274                 return;
275         hammer2_thr_signal(thr, HAMMER2_THREAD_REMASTER);
276 }
277
278 void
279 hammer2_thr_freeze_async(hammer2_thread_t *thr)
280 {
281         hammer2_thr_signal(thr, HAMMER2_THREAD_FREEZE);
282 }
283
284 void
285 hammer2_thr_freeze(hammer2_thread_t *thr)
286 {
287         if (thr->td == NULL)
288                 return;
289         hammer2_thr_signal(thr, HAMMER2_THREAD_FREEZE);
290         hammer2_thr_wait(thr, HAMMER2_THREAD_FROZEN);
291 }
292
293 void
294 hammer2_thr_unfreeze(hammer2_thread_t *thr)
295 {
296         if (thr->td == NULL)
297                 return;
298         hammer2_thr_signal(thr, HAMMER2_THREAD_UNFREEZE);
299         hammer2_thr_wait_neg(thr, HAMMER2_THREAD_FROZEN);
300 }
301
302 int
303 hammer2_thr_break(hammer2_thread_t *thr)
304 {
305         if (thr->flags & (HAMMER2_THREAD_STOP |
306                           HAMMER2_THREAD_REMASTER |
307                           HAMMER2_THREAD_FREEZE)) {
308                 return 1;
309         }
310         return 0;
311 }
312
313 /****************************************************************************
314  *                          HAMMER2 XOPS API                                *
315  ****************************************************************************/
316
317 void
318 hammer2_xop_group_init(hammer2_pfs_t *pmp, hammer2_xop_group_t *xgrp)
319 {
320         /* no extra fields in structure at the moment */
321 }
322
323 /*
324  * Allocate a XOP request.
325  *
326  * Once allocated a XOP request can be started, collected, and retired,
327  * and can be retired early if desired.
328  *
329  * NOTE: Fifo indices might not be zero but ri == wi on objcache_get().
330  */
331 void *
332 hammer2_xop_alloc(hammer2_inode_t *ip, int flags)
333 {
334         hammer2_xop_t *xop;
335
336         xop = objcache_get(cache_xops, M_WAITOK);
337         KKASSERT(xop->head.cluster.array[0].chain == NULL);
338
339         xop->head.ip1 = ip;
340         xop->head.desc = NULL;
341         xop->head.flags = flags;
342         xop->head.state = 0;
343         xop->head.error = 0;
344         xop->head.collect_key = 0;
345         xop->head.focus_dio = NULL;
346
347         if (flags & HAMMER2_XOP_MODIFYING)
348                 xop->head.mtid = hammer2_trans_sub(ip->pmp);
349         else
350                 xop->head.mtid = 0;
351
352         xop->head.cluster.nchains = ip->cluster.nchains;
353         xop->head.cluster.pmp = ip->pmp;
354         xop->head.cluster.flags = HAMMER2_CLUSTER_LOCKED;
355
356         /*
357          * run_mask - Active thread (or frontend) associated with XOP
358          */
359         xop->head.run_mask = HAMMER2_XOPMASK_VOP;
360
361         hammer2_inode_ref(ip);
362
363         return xop;
364 }
365
366 void
367 hammer2_xop_setname(hammer2_xop_head_t *xop, const char *name, size_t name_len)
368 {
369         xop->name1 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO);
370         xop->name1_len = name_len;
371         bcopy(name, xop->name1, name_len);
372 }
373
374 void
375 hammer2_xop_setname2(hammer2_xop_head_t *xop, const char *name, size_t name_len)
376 {
377         xop->name2 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO);
378         xop->name2_len = name_len;
379         bcopy(name, xop->name2, name_len);
380 }
381
382 size_t
383 hammer2_xop_setname_inum(hammer2_xop_head_t *xop, hammer2_key_t inum)
384 {
385         const size_t name_len = 18;
386
387         xop->name1 = kmalloc(name_len + 1, M_HAMMER2, M_WAITOK | M_ZERO);
388         xop->name1_len = name_len;
389         ksnprintf(xop->name1, name_len + 1, "0x%016jx", (intmax_t)inum);
390
391         return name_len;
392 }
393
394
395 void
396 hammer2_xop_setip2(hammer2_xop_head_t *xop, hammer2_inode_t *ip2)
397 {
398         xop->ip2 = ip2;
399         hammer2_inode_ref(ip2);
400 }
401
402 void
403 hammer2_xop_setip3(hammer2_xop_head_t *xop, hammer2_inode_t *ip3)
404 {
405         xop->ip3 = ip3;
406         hammer2_inode_ref(ip3);
407 }
408
409 void
410 hammer2_xop_reinit(hammer2_xop_head_t *xop)
411 {
412         xop->state = 0;
413         xop->error = 0;
414         xop->collect_key = 0;
415         xop->run_mask = HAMMER2_XOPMASK_VOP;
416 }
417
418 /*
419  * A mounted PFS needs Xops threads to support frontend operations.
420  */
421 void
422 hammer2_xop_helper_create(hammer2_pfs_t *pmp)
423 {
424         int i;
425         int j;
426
427         lockmgr(&pmp->lock, LK_EXCLUSIVE);
428         pmp->has_xop_threads = 1;
429
430         for (i = 0; i < pmp->iroot->cluster.nchains; ++i) {
431                 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
432                         if (pmp->xop_groups[j].thrs[i].td)
433                                 continue;
434                         hammer2_thr_create(&pmp->xop_groups[j].thrs[i],
435                                            pmp, NULL,
436                                            "h2xop", i, j,
437                                            hammer2_primary_xops_thread);
438                 }
439         }
440         lockmgr(&pmp->lock, LK_RELEASE);
441 }
442
443 void
444 hammer2_xop_helper_cleanup(hammer2_pfs_t *pmp)
445 {
446         int i;
447         int j;
448
449         for (i = 0; i < pmp->pfs_nmasters; ++i) {
450                 for (j = 0; j < HAMMER2_XOPGROUPS; ++j) {
451                         if (pmp->xop_groups[j].thrs[i].td)
452                                 hammer2_thr_delete(&pmp->xop_groups[j].thrs[i]);
453                 }
454         }
455         pmp->has_xop_threads = 0;
456 }
457
458 /*
459  * Start a XOP request, queueing it to all nodes in the cluster to
460  * execute the cluster op.
461  *
462  * XXX optimize single-target case.
463  */
464 void
465 hammer2_xop_start_except(hammer2_xop_head_t *xop, hammer2_xop_desc_t *desc,
466                          int notidx)
467 {
468         hammer2_inode_t *ip1;
469         hammer2_pfs_t *pmp;
470         hammer2_thread_t *thr;
471         int i;
472         int ng;
473         int nchains;
474
475         ip1 = xop->ip1;
476         pmp = ip1->pmp;
477         if (pmp->has_xop_threads == 0)
478                 hammer2_xop_helper_create(pmp);
479
480         /*
481          * The intent of the XOP sequencer is to ensure that ops on the same
482          * inode execute in the same order.  This is necessary when issuing
483          * modifying operations to multiple targets because some targets might
484          * get behind and the frontend is allowed to complete the moment a
485          * quorum of targets succeed.
486          *
487          * Strategy operations:
488          *
489          *      (1) Must be segregated from non-strategy operations to
490          *          avoid a deadlock.  A vfsync and a bread/bwrite can
491          *          deadlock the vfsync's buffer list scan.
492          *
493          *      (2) Reads are separated from writes to avoid write stalls
494          *          from excessively intefering with reads.  Reads are allowed
495          *          to wander across multiple worker threads for potential
496          *          single-file concurrency improvements.
497          *
498          *      (3) Writes are serialized to a single worker thread (for any
499          *          given inode) in order to try to improve block allocation
500          *          sequentiality and to reduce lock contention.
501          *
502          * TODO - RENAME fails here because it is potentially modifying
503          *        three different inodes, but we triple-lock the inodes
504          *        involved so it shouldn't create a sequencing schism.
505          */
506         if (xop->flags & HAMMER2_XOP_STRATEGY) {
507                 hammer2_xop_strategy_t *xopst;
508                 hammer2_off_t off;
509                 int cdr;
510
511                 xopst = &((hammer2_xop_t *)xop)->xop_strategy;
512                 ng = (int)(hammer2_icrc32(&xop->ip1, sizeof(xop->ip1)));
513                 if (desc == &hammer2_strategy_read_desc) {
514                         off = xopst->lbase / HAMMER2_PBUFSIZE;
515                         cdr = hammer2_cluster_data_read;
516                         /* sysctl race, load into var */
517                         cpu_ccfence();
518                         if (cdr)
519                                 off /= cdr;
520                         ng ^= hammer2_icrc32(&off, sizeof(off)) &
521                               (hammer2_worker_rmask << 1);
522                         ng |= 1;
523                 } else {
524 #if 0
525                         off = xopst->lbase >> 21;
526                         ng ^= hammer2_icrc32(&off, sizeof(off)) & 3;
527 #endif
528                         ng &= ~1;
529                 }
530                 ng = ng & (HAMMER2_XOPGROUPS_MASK >> 1);
531                 ng += HAMMER2_XOPGROUPS / 2;
532         } else {
533                 ng = (int)(hammer2_icrc32(&xop->ip1, sizeof(xop->ip1)));
534                 ng = ng & (HAMMER2_XOPGROUPS_MASK >> 1);
535         }
536         xop->desc = desc;
537
538         /*
539          * The instant xop is queued another thread can pick it off.  In the
540          * case of asynchronous ops, another thread might even finish and
541          * deallocate it.
542          */
543         hammer2_spin_ex(&pmp->xop_spin);
544         nchains = ip1->cluster.nchains;
545         for (i = 0; i < nchains; ++i) {
546                 /*
547                  * XXX ip1->cluster.array* not stable here.  This temporary
548                  *     hack fixes basic issues in target XOPs which need to
549                  *     obtain a starting chain from the inode but does not
550                  *     address possible races against inode updates which
551                  *     might NULL-out a chain.
552                  */
553                 if (i != notidx && ip1->cluster.array[i].chain) {
554                         thr = &pmp->xop_groups[ng].thrs[i];
555                         atomic_set_64(&xop->run_mask, 1LLU << i);
556                         atomic_set_64(&xop->chk_mask, 1LLU << i);
557                         xop->collect[i].thr = thr;
558                         TAILQ_INSERT_TAIL(&thr->xopq, xop, collect[i].entry);
559                 }
560         }
561         hammer2_spin_unex(&pmp->xop_spin);
562         /* xop can become invalid at this point */
563
564         /*
565          * Each thread has its own xopq
566          */
567         for (i = 0; i < nchains; ++i) {
568                 if (i != notidx) {
569                         thr = &pmp->xop_groups[ng].thrs[i];
570                         hammer2_thr_signal(thr, HAMMER2_THREAD_XOPQ);
571                 }
572         }
573 }
574
575 void
576 hammer2_xop_start(hammer2_xop_head_t *xop, hammer2_xop_desc_t *desc)
577 {
578         hammer2_xop_start_except(xop, desc, -1);
579 }
580
581 /*
582  * Retire a XOP.  Used by both the VOP frontend and by the XOP backend.
583  */
584 void
585 hammer2_xop_retire(hammer2_xop_head_t *xop, uint64_t mask)
586 {
587         hammer2_chain_t *chain;
588         uint64_t nmask;
589         int i;
590
591         /*
592          * Remove the frontend collector or remove a backend feeder.
593          *
594          * When removing the frontend we must wakeup any backend feeders
595          * who are waiting for FIFO space.
596          *
597          * When removing the last backend feeder we must wakeup any waiting
598          * frontend.
599          */
600         KKASSERT(xop->run_mask & mask);
601         nmask = atomic_fetchadd_64(&xop->run_mask,
602                                    -mask + HAMMER2_XOPMASK_FEED);
603
604         /*
605          * More than one entity left
606          */
607         if ((nmask & HAMMER2_XOPMASK_ALLDONE) != mask) {
608                 /*
609                  * Frontend terminating, wakeup any backends waiting on
610                  * fifo full.
611                  *
612                  * NOTE!!! The xop can get ripped out from under us at
613                  *         this point, so do not reference it again.
614                  *         The wakeup(xop) doesn't touch the xop and
615                  *         is ok.
616                  */
617                 if (mask == HAMMER2_XOPMASK_VOP) {
618                         if (nmask & HAMMER2_XOPMASK_FIFOW)
619                                 wakeup(xop);
620                 }
621
622                 /*
623                  * Wakeup frontend if the last backend is terminating.
624                  */
625                 nmask -= mask;
626                 if ((nmask & HAMMER2_XOPMASK_ALLDONE) == HAMMER2_XOPMASK_VOP) {
627                         if (nmask & HAMMER2_XOPMASK_WAIT)
628                                 wakeup(xop);
629                 }
630
631                 return;
632         }
633         /* else nobody else left, we can ignore FIFOW */
634
635         /*
636          * All collectors are gone, we can cleanup and dispose of the XOP.
637          * Note that this can wind up being a frontend OR a backend.
638          * Pending chains are locked shared and not owned by any thread.
639          *
640          * Cleanup the collection cluster.
641          */
642         for (i = 0; i < xop->cluster.nchains; ++i) {
643                 xop->cluster.array[i].flags = 0;
644                 chain = xop->cluster.array[i].chain;
645                 if (chain) {
646                         xop->cluster.array[i].chain = NULL;
647                         hammer2_chain_drop_unhold(chain);
648                 }
649         }
650
651         /*
652          * Cleanup the fifos.  Since we are the only entity left on this
653          * xop we don't have to worry about fifo flow control, and one
654          * lfence() will do the job.
655          */
656         cpu_lfence();
657         mask = xop->chk_mask;
658         for (i = 0; mask && i < HAMMER2_MAXCLUSTER; ++i) {
659                 hammer2_xop_fifo_t *fifo = &xop->collect[i];
660                 while (fifo->ri != fifo->wi) {
661                         chain = fifo->array[fifo->ri & HAMMER2_XOPFIFO_MASK];
662                         if (chain)
663                                 hammer2_chain_drop_unhold(chain);
664                         ++fifo->ri;
665                 }
666                 mask &= ~(1U << i);
667         }
668
669         /*
670          * The inode is only held at this point, simply drop it.
671          */
672         if (xop->ip1) {
673                 hammer2_inode_drop(xop->ip1);
674                 xop->ip1 = NULL;
675         }
676         if (xop->ip2) {
677                 hammer2_inode_drop(xop->ip2);
678                 xop->ip2 = NULL;
679         }
680         if (xop->ip3) {
681                 hammer2_inode_drop(xop->ip3);
682                 xop->ip3 = NULL;
683         }
684         if (xop->name1) {
685                 kfree(xop->name1, M_HAMMER2);
686                 xop->name1 = NULL;
687                 xop->name1_len = 0;
688         }
689         if (xop->name2) {
690                 kfree(xop->name2, M_HAMMER2);
691                 xop->name2 = NULL;
692                 xop->name2_len = 0;
693         }
694
695         objcache_put(cache_xops, xop);
696 }
697
698 /*
699  * (Backend) Returns non-zero if the frontend is still attached.
700  */
701 int
702 hammer2_xop_active(hammer2_xop_head_t *xop)
703 {
704         if (xop->run_mask & HAMMER2_XOPMASK_VOP)
705                 return 1;
706         else
707                 return 0;
708 }
709
710 /*
711  * (Backend) Feed chain data through the cluster validator and back to
712  * the frontend.  Chains are fed from multiple nodes concurrently
713  * and pipelined via per-node FIFOs in the XOP.
714  *
715  * The chain must be locked (either shared or exclusive).  The caller may
716  * unlock and drop the chain on return.  This function will add an extra
717  * ref and hold the chain's data for the pass-back.
718  *
719  * No xop lock is needed because we are only manipulating fields under
720  * our direct control.
721  *
722  * Returns 0 on success and a hammer2 error code if sync is permanently
723  * lost.  The caller retains a ref on the chain but by convention
724  * the lock is typically inherited by the xop (caller loses lock).
725  *
726  * Returns non-zero on error.  In this situation the caller retains a
727  * ref on the chain but loses the lock (we unlock here).
728  */
729 int
730 hammer2_xop_feed(hammer2_xop_head_t *xop, hammer2_chain_t *chain,
731                  int clindex, int error)
732 {
733         hammer2_xop_fifo_t *fifo;
734         uint64_t mask;
735
736         /*
737          * Early termination (typicaly of xop_readir)
738          */
739         if (hammer2_xop_active(xop) == 0) {
740                 error = HAMMER2_ERROR_ABORTED;
741                 goto done;
742         }
743
744         /*
745          * Multi-threaded entry into the XOP collector.  We own the
746          * fifo->wi for our clindex.
747          */
748         fifo = &xop->collect[clindex];
749
750         if (fifo->ri == fifo->wi - HAMMER2_XOPFIFO)
751                 lwkt_yield();
752         while (fifo->ri == fifo->wi - HAMMER2_XOPFIFO) {
753                 atomic_set_int(&fifo->flags, HAMMER2_XOP_FIFO_STALL);
754                 mask = xop->run_mask;
755                 if ((mask & HAMMER2_XOPMASK_VOP) == 0) {
756                         error = HAMMER2_ERROR_ABORTED;
757                         goto done;
758                 }
759                 tsleep_interlock(xop, 0);
760                 if (atomic_cmpset_64(&xop->run_mask, mask,
761                                      mask | HAMMER2_XOPMASK_FIFOW)) {
762                         if (fifo->ri == fifo->wi - HAMMER2_XOPFIFO) {
763                                 tsleep(xop, PINTERLOCKED, "h2feed", hz*60);
764                         }
765                 }
766                 /* retry */
767         }
768         atomic_clear_int(&fifo->flags, HAMMER2_XOP_FIFO_STALL);
769         if (chain)
770                 hammer2_chain_ref_hold(chain);
771         if (error == 0 && chain)
772                 error = chain->error;
773         fifo->errors[fifo->wi & HAMMER2_XOPFIFO_MASK] = error;
774         fifo->array[fifo->wi & HAMMER2_XOPFIFO_MASK] = chain;
775         cpu_sfence();
776         ++fifo->wi;
777
778         mask = atomic_fetchadd_64(&xop->run_mask, HAMMER2_XOPMASK_FEED);
779         if (mask & HAMMER2_XOPMASK_WAIT) {
780                 atomic_clear_64(&xop->run_mask, HAMMER2_XOPMASK_WAIT);
781                 wakeup(xop);
782         }
783         error = 0;
784
785         /*
786          * Cleanup.  If an error occurred we eat the lock.  If no error
787          * occurred the fifo inherits the lock and gains an additional ref.
788          *
789          * The caller's ref remains in both cases.
790          */
791 done:
792         return error;
793 }
794
795 /*
796  * (Frontend) collect a response from a running cluster op.
797  *
798  * Responses are fed from all appropriate nodes concurrently
799  * and collected into a cohesive response >= collect_key.
800  *
801  * The collector will return the instant quorum or other requirements
802  * are met, even if some nodes get behind or become non-responsive.
803  *
804  * HAMMER2_XOP_COLLECT_NOWAIT   - Used to 'poll' a completed collection,
805  *                                usually called synchronously from the
806  *                                node XOPs for the strategy code to
807  *                                fake the frontend collection and complete
808  *                                the BIO as soon as possible.
809  *
810  * HAMMER2_XOP_SYNCHRONIZER     - Reqeuest synchronization with a particular
811  *                                cluster index, prevents looping when that
812  *                                index is out of sync so caller can act on
813  *                                the out of sync element.  ESRCH and EDEADLK
814  *                                can be returned if this flag is specified.
815  *
816  * Returns 0 on success plus a filled out xop->cluster structure.
817  * Return ENOENT on normal termination.
818  * Otherwise return an error.
819  *
820  * WARNING! If the xop returns a cluster with a non-NULL focus, note that
821  *          none of the chains in the cluster (or the focus) are either
822  *          locked or I/O synchronized with the cpu.  hammer2_xop_gdata()
823  *          and hammer2_xop_pdata() must be used to safely access the focus
824  *          chain's content.
825  *
826  *          The frontend can make certain assumptions based on higher-level
827  *          locking done by the frontend, but data integrity absolutely
828  *          requires using the gdata/pdata API.
829  */
830 int
831 hammer2_xop_collect(hammer2_xop_head_t *xop, int flags)
832 {
833         hammer2_xop_fifo_t *fifo;
834         hammer2_chain_t *chain;
835         hammer2_key_t lokey;
836         uint64_t mask;
837         int error;
838         int keynull;
839         int adv;                /* advance the element */
840         int i;
841
842 loop:
843         /*
844          * First loop tries to advance pieces of the cluster which
845          * are out of sync.
846          */
847         lokey = HAMMER2_KEY_MAX;
848         keynull = HAMMER2_CHECK_NULL;
849         mask = xop->run_mask;
850         cpu_lfence();
851
852         for (i = 0; i < xop->cluster.nchains; ++i) {
853                 chain = xop->cluster.array[i].chain;
854                 if (chain == NULL) {
855                         adv = 1;
856                 } else if (chain->bref.key < xop->collect_key) {
857                         adv = 1;
858                 } else {
859                         keynull &= ~HAMMER2_CHECK_NULL;
860                         if (lokey > chain->bref.key)
861                                 lokey = chain->bref.key;
862                         adv = 0;
863                 }
864                 if (adv == 0)
865                         continue;
866
867                 /*
868                  * Advance element if possible, advanced element may be NULL.
869                  */
870                 if (chain)
871                         hammer2_chain_drop_unhold(chain);
872
873                 fifo = &xop->collect[i];
874                 if (fifo->ri != fifo->wi) {
875                         cpu_lfence();
876                         chain = fifo->array[fifo->ri & HAMMER2_XOPFIFO_MASK];
877                         error = fifo->errors[fifo->ri & HAMMER2_XOPFIFO_MASK];
878                         ++fifo->ri;
879                         xop->cluster.array[i].chain = chain;
880                         xop->cluster.array[i].error = error;
881                         if (chain == NULL) {
882                                 /* XXX */
883                                 xop->cluster.array[i].flags |=
884                                                         HAMMER2_CITEM_NULL;
885                         }
886                         if (fifo->wi - fifo->ri <= HAMMER2_XOPFIFO / 2) {
887                                 if (fifo->flags & HAMMER2_XOP_FIFO_STALL) {
888                                         atomic_clear_int(&fifo->flags,
889                                                     HAMMER2_XOP_FIFO_STALL);
890                                         wakeup(xop);
891                                         lwkt_yield();
892                                 }
893                         }
894                         --i;            /* loop on same index */
895                 } else {
896                         /*
897                          * Retain CITEM_NULL flag.  If set just repeat EOF.
898                          * If not, the NULL,0 combination indicates an
899                          * operation in-progress.
900                          */
901                         xop->cluster.array[i].chain = NULL;
902                         /* retain any CITEM_NULL setting */
903                 }
904         }
905
906         /*
907          * Determine whether the lowest collected key meets clustering
908          * requirements.  Returns:
909          *
910          * 0             - key valid, cluster can be returned.
911          *
912          * ENOENT        - normal end of scan, return ENOENT.
913          *
914          * ESRCH         - sufficient elements collected, quorum agreement
915          *                 that lokey is not a valid element and should be
916          *                 skipped.
917          *
918          * EDEADLK       - sufficient elements collected, no quorum agreement
919          *                 (and no agreement possible).  In this situation a
920          *                 repair is needed, for now we loop.
921          *
922          * EINPROGRESS   - insufficient elements collected to resolve, wait
923          *                 for event and loop.
924          */
925         if ((flags & HAMMER2_XOP_COLLECT_WAITALL) &&
926             (mask & HAMMER2_XOPMASK_ALLDONE) != HAMMER2_XOPMASK_VOP) {
927                 error = HAMMER2_ERROR_EINPROGRESS;
928         } else {
929                 error = hammer2_cluster_check(&xop->cluster, lokey, keynull);
930         }
931         if (error == HAMMER2_ERROR_EINPROGRESS) {
932                 if (flags & HAMMER2_XOP_COLLECT_NOWAIT)
933                         goto done;
934                 tsleep_interlock(xop, 0);
935                 if (atomic_cmpset_64(&xop->run_mask,
936                                      mask, mask | HAMMER2_XOPMASK_WAIT)) {
937                         tsleep(xop, PINTERLOCKED, "h2coll", hz*60);
938                 }
939                 goto loop;
940         }
941         if (error == HAMMER2_ERROR_ESRCH) {
942                 if (lokey != HAMMER2_KEY_MAX) {
943                         xop->collect_key = lokey + 1;
944                         goto loop;
945                 }
946                 error = HAMMER2_ERROR_ENOENT;
947         }
948         if (error == HAMMER2_ERROR_EDEADLK) {
949                 kprintf("hammer2: no quorum possible lokey %016jx\n",
950                         lokey);
951                 if (lokey != HAMMER2_KEY_MAX) {
952                         xop->collect_key = lokey + 1;
953                         goto loop;
954                 }
955                 error = HAMMER2_ERROR_ENOENT;
956         }
957         if (lokey == HAMMER2_KEY_MAX)
958                 xop->collect_key = lokey;
959         else
960                 xop->collect_key = lokey + 1;
961 done:
962         return error;
963 }
964
965 /*
966  * N x M processing threads are available to handle XOPs, N per cluster
967  * index x M cluster nodes.
968  *
969  * Locate and return the next runnable xop, or NULL if no xops are
970  * present or none of the xops are currently runnable (for various reasons).
971  * The xop is left on the queue and serves to block other dependent xops
972  * from being run.
973  *
974  * Dependent xops will not be returned.
975  *
976  * Sets HAMMER2_XOP_FIFO_RUN on the returned xop or returns NULL.
977  *
978  * NOTE! Xops run concurrently for each cluster index.
979  */
980 #define XOP_HASH_SIZE   16
981 #define XOP_HASH_MASK   (XOP_HASH_SIZE - 1)
982
983 static __inline
984 int
985 xop_testhash(hammer2_thread_t *thr, hammer2_inode_t *ip, uint32_t *hash)
986 {
987         uint32_t mask;
988         int hv;
989
990         hv = (int)((uintptr_t)ip + (uintptr_t)thr) / sizeof(hammer2_inode_t);
991         mask = 1U << (hv & 31);
992         hv >>= 5;
993
994         return ((int)(hash[hv & XOP_HASH_MASK] & mask));
995 }
996
997 static __inline
998 void
999 xop_sethash(hammer2_thread_t *thr, hammer2_inode_t *ip, uint32_t *hash)
1000 {
1001         uint32_t mask;
1002         int hv;
1003
1004         hv = (int)((uintptr_t)ip + (uintptr_t)thr) / sizeof(hammer2_inode_t);
1005         mask = 1U << (hv & 31);
1006         hv >>= 5;
1007
1008         hash[hv & XOP_HASH_MASK] |= mask;
1009 }
1010
1011 static
1012 hammer2_xop_head_t *
1013 hammer2_xop_next(hammer2_thread_t *thr)
1014 {
1015         hammer2_pfs_t *pmp = thr->pmp;
1016         int clindex = thr->clindex;
1017         uint32_t hash[XOP_HASH_SIZE] = { 0 };
1018         hammer2_xop_head_t *xop;
1019
1020         hammer2_spin_ex(&pmp->xop_spin);
1021         TAILQ_FOREACH(xop, &thr->xopq, collect[clindex].entry) {
1022                 /*
1023                  * Check dependency
1024                  */
1025                 if (xop_testhash(thr, xop->ip1, hash) ||
1026                     (xop->ip2 && xop_testhash(thr, xop->ip2, hash)) ||
1027                     (xop->ip3 && xop_testhash(thr, xop->ip3, hash))) {
1028                         continue;
1029                 }
1030                 xop_sethash(thr, xop->ip1, hash);
1031                 if (xop->ip2)
1032                         xop_sethash(thr, xop->ip2, hash);
1033                 if (xop->ip3)
1034                         xop_sethash(thr, xop->ip3, hash);
1035
1036                 /*
1037                  * Check already running
1038                  */
1039                 if (xop->collect[clindex].flags & HAMMER2_XOP_FIFO_RUN)
1040                         continue;
1041
1042                 /*
1043                  * Found a good one, return it.
1044                  */
1045                 atomic_set_int(&xop->collect[clindex].flags,
1046                                HAMMER2_XOP_FIFO_RUN);
1047                 break;
1048         }
1049         hammer2_spin_unex(&pmp->xop_spin);
1050
1051         return xop;
1052 }
1053
1054 /*
1055  * Remove the completed XOP from the queue, clear HAMMER2_XOP_FIFO_RUN.
1056  *
1057  * NOTE! Xops run concurrently for each cluster index.
1058  */
1059 static
1060 void
1061 hammer2_xop_dequeue(hammer2_thread_t *thr, hammer2_xop_head_t *xop)
1062 {
1063         hammer2_pfs_t *pmp = thr->pmp;
1064         int clindex = thr->clindex;
1065
1066         hammer2_spin_ex(&pmp->xop_spin);
1067         TAILQ_REMOVE(&thr->xopq, xop, collect[clindex].entry);
1068         atomic_clear_int(&xop->collect[clindex].flags,
1069                          HAMMER2_XOP_FIFO_RUN);
1070         hammer2_spin_unex(&pmp->xop_spin);
1071         if (TAILQ_FIRST(&thr->xopq))
1072                 hammer2_thr_signal(thr, HAMMER2_THREAD_XOPQ);
1073 }
1074
1075 /*
1076  * Primary management thread for xops support.  Each node has several such
1077  * threads which replicate front-end operations on cluster nodes.
1078  *
1079  * XOPS thread node operations, allowing the function to focus on a single
1080  * node in the cluster after validating the operation with the cluster.
1081  * This is primarily what prevents dead or stalled nodes from stalling
1082  * the front-end.
1083  */
1084 void
1085 hammer2_primary_xops_thread(void *arg)
1086 {
1087         hammer2_thread_t *thr = arg;
1088         hammer2_pfs_t *pmp;
1089         hammer2_xop_head_t *xop;
1090         uint64_t mask;
1091         uint32_t flags;
1092         uint32_t nflags;
1093         hammer2_xop_desc_t *last_desc = NULL;
1094
1095         pmp = thr->pmp;
1096         /*xgrp = &pmp->xop_groups[thr->repidx]; not needed */
1097         mask = 1LLU << thr->clindex;
1098
1099         for (;;) {
1100                 flags = thr->flags;
1101
1102                 /*
1103                  * Handle stop request
1104                  */
1105                 if (flags & HAMMER2_THREAD_STOP)
1106                         break;
1107
1108                 /*
1109                  * Handle freeze request
1110                  */
1111                 if (flags & HAMMER2_THREAD_FREEZE) {
1112                         hammer2_thr_signal2(thr, HAMMER2_THREAD_FROZEN,
1113                                                  HAMMER2_THREAD_FREEZE);
1114                         continue;
1115                 }
1116
1117                 if (flags & HAMMER2_THREAD_UNFREEZE) {
1118                         hammer2_thr_signal2(thr, 0,
1119                                                  HAMMER2_THREAD_FROZEN |
1120                                                  HAMMER2_THREAD_UNFREEZE);
1121                         continue;
1122                 }
1123
1124                 /*
1125                  * Force idle if frozen until unfrozen or stopped.
1126                  */
1127                 if (flags & HAMMER2_THREAD_FROZEN) {
1128                         hammer2_thr_wait_any(thr,
1129                                              HAMMER2_THREAD_UNFREEZE |
1130                                              HAMMER2_THREAD_STOP,
1131                                              0);
1132                         continue;
1133                 }
1134
1135                 /*
1136                  * Reset state on REMASTER request
1137                  */
1138                 if (flags & HAMMER2_THREAD_REMASTER) {
1139                         hammer2_thr_signal2(thr, 0, HAMMER2_THREAD_REMASTER);
1140                         /* reset state here */
1141                         continue;
1142                 }
1143
1144                 /*
1145                  * Process requests.  Each request can be multi-queued.
1146                  *
1147                  * If we get behind and the frontend VOP is no longer active,
1148                  * we retire the request without processing it.  The callback
1149                  * may also abort processing if the frontend VOP becomes
1150                  * inactive.
1151                  */
1152                 if (flags & HAMMER2_THREAD_XOPQ) {
1153                         nflags = flags & ~HAMMER2_THREAD_XOPQ;
1154                         if (!atomic_cmpset_int(&thr->flags, flags, nflags))
1155                                 continue;
1156                         flags = nflags;
1157                         /* fall through */
1158                 }
1159                 while ((xop = hammer2_xop_next(thr)) != NULL) {
1160                         if (hammer2_xop_active(xop)) {
1161                                 last_desc = xop->desc;
1162                                 xop->desc->storage_func((hammer2_xop_t *)xop,
1163                                                         thr->scratch,
1164                                                         thr->clindex);
1165                                 hammer2_xop_dequeue(thr, xop);
1166                                 hammer2_xop_retire(xop, mask);
1167                         } else {
1168                                 last_desc = xop->desc;
1169                                 hammer2_xop_feed(xop, NULL, thr->clindex,
1170                                                  ECONNABORTED);
1171                                 hammer2_xop_dequeue(thr, xop);
1172                                 hammer2_xop_retire(xop, mask);
1173                         }
1174                 }
1175
1176                 /*
1177                  * Wait for event, interlock using THREAD_WAITING and
1178                  * THREAD_SIGNAL.
1179                  *
1180                  * For robustness poll on a 30-second interval, but nominally
1181                  * expect to be woken up.
1182                  */
1183                 nflags = flags | HAMMER2_THREAD_WAITING;
1184
1185                 tsleep_interlock(&thr->flags, 0);
1186                 if (atomic_cmpset_int(&thr->flags, flags, nflags)) {
1187                         tsleep(&thr->flags, PINTERLOCKED, "h2idle", hz*30);
1188                 }
1189         }
1190
1191 #if 0
1192         /*
1193          * Cleanup / termination
1194          */
1195         while ((xop = TAILQ_FIRST(&thr->xopq)) != NULL) {
1196                 kprintf("hammer2_thread: aborting xop %s\n", xop->desc->id);
1197                 TAILQ_REMOVE(&thr->xopq, xop,
1198                              collect[thr->clindex].entry);
1199                 hammer2_xop_retire(xop, mask);
1200         }
1201 #endif
1202         thr->td = NULL;
1203         hammer2_thr_signal(thr, HAMMER2_THREAD_STOPPED);
1204         /* thr structure can go invalid after this point */
1205 }