hammer2 - Misc cluster protocol work
[dragonfly.git] / lib / libdmsg / msg_lnk.c
1 /*
2  * Copyright (c) 2012-2014 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * LNK_SPAN PROTOCOL SUPPORT FUNCTIONS - Please see sys/dmsg.h for an
36  * involved explanation of the protocol.
37  */
38
39 #include "dmsg_local.h"
40
41 /*
42  * Maximum spanning tree distance.  This has the practical effect of
43  * stopping tail-chasing closed loops when a feeder span is lost.
44  */
45 #define DMSG_SPAN_MAXDIST       16
46
47 /*
48  * RED-BLACK TREE DEFINITIONS
49  *
50  * We need to track:
51  *
52  * (1) shared fsid's (a cluster).
53  * (2) unique fsid's (a node in a cluster) <--- LNK_SPAN transactions.
54  *
55  * We need to aggegate all active LNK_SPANs, aggregate, and create our own
56  * outgoing LNK_SPAN transactions on each of our connections representing
57  * the aggregated state.
58  *
59  * h2span_conn          - list of iocom connections who wish to receive SPAN
60  *                        propagation from other connections.  Might contain
61  *                        a filter string.  Only iocom's with an open
62  *                        LNK_CONN transactions are applicable for SPAN
63  *                        propagation.
64  *
65  * h2span_relay         - List of links relayed (via SPAN).  Essentially
66  *                        each relay structure represents a LNK_SPAN
67  *                        transaction that we initiated, verses h2span_link
68  *                        which is a LNK_SPAN transaction that we received.
69  *
70  * --
71  *
72  * h2span_cluster       - Organizes the shared fsid's.  One structure for
73  *                        each cluster.
74  *
75  * h2span_node          - Organizes the nodes in a cluster.  One structure
76  *                        for each unique {cluster,node}, aka {fsid, pfs_fsid}.
77  *
78  * h2span_link          - Organizes all incoming and outgoing LNK_SPAN message
79  *                        transactions related to a node.
80  *
81  *                        One h2span_link structure for each incoming LNK_SPAN
82  *                        transaction.  Links selected for propagation back
83  *                        out are also where the outgoing LNK_SPAN messages
84  *                        are indexed into (so we can propagate changes).
85  *
86  *                        The h2span_link's use a red-black tree to sort the
87  *                        distance hop metric for the incoming LNK_SPAN.  We
88  *                        then select the top N for outgoing.  When the
89  *                        topology changes the top N may also change and cause
90  *                        new outgoing LNK_SPAN transactions to be opened
91  *                        and less desireable ones to be closed, causing
92  *                        transactional aborts within the message flow in
93  *                        the process.
94  *
95  * Also note            - All outgoing LNK_SPAN message transactions are also
96  *                        entered into a red-black tree for use by the routing
97  *                        function.  This is handled by msg.c in the state
98  *                        code, not here.
99  */
100
101 struct h2span_link;
102 struct h2span_relay;
103 TAILQ_HEAD(h2span_conn_queue, h2span_conn);
104 TAILQ_HEAD(h2span_relay_queue, h2span_relay);
105
106 RB_HEAD(h2span_cluster_tree, h2span_cluster);
107 RB_HEAD(h2span_node_tree, h2span_node);
108 RB_HEAD(h2span_link_tree, h2span_link);
109 RB_HEAD(h2span_relay_tree, h2span_relay);
110 uint32_t DMsgRNSS;
111
112 /*
113  * Received LNK_CONN transaction enables SPAN protocol over connection.
114  * (may contain filter).  Typically one for each mount and several may
115  * share the same media.
116  */
117 struct h2span_conn {
118         TAILQ_ENTRY(h2span_conn) entry;
119         struct h2span_relay_tree tree;
120         dmsg_state_t *state;
121         dmsg_lnk_conn_t lnk_conn;
122 };
123
124 /*
125  * All received LNK_SPANs are organized by cluster (pfs_clid),
126  * node (pfs_fsid), and link (received LNK_SPAN transaction).
127  */
128 struct h2span_cluster {
129         RB_ENTRY(h2span_cluster) rbnode;
130         struct h2span_node_tree tree;
131         uuid_t  pfs_clid;               /* shared fsid */
132         uint8_t peer_type;
133         char    cl_label[128];          /* cluster label (typ PEER_BLOCK) */
134         int     refs;                   /* prevents destruction */
135 };
136
137 struct h2span_node {
138         RB_ENTRY(h2span_node) rbnode;
139         struct h2span_link_tree tree;
140         struct h2span_cluster *cls;
141         uint8_t pfs_type;
142         uuid_t  pfs_fsid;               /* unique fsid */
143         char    fs_label[128];          /* fs label (typ PEER_HAMMER2) */
144         void    *opaque;
145 };
146
147 struct h2span_link {
148         RB_ENTRY(h2span_link) rbnode;
149         dmsg_state_t    *state;         /* state<->link */
150         struct h2span_node *node;       /* related node */
151         struct h2span_relay_queue relayq; /* relay out */
152         dmsg_lnk_span_t lnk_span;
153 };
154
155 /*
156  * Any LNK_SPAN transactions we receive which are relayed out other
157  * connections utilize this structure to track the LNK_SPAN transactions
158  * we initiate (relay out) on other connections.  We only relay out
159  * LNK_SPANs on connections we have an open CONN transaction for.
160  *
161  * The relay structure points to the outgoing LNK_SPAN trans (out_state)
162  * and to the incoming LNK_SPAN transaction (in_state).  The relay
163  * structure holds refs on the related states.
164  *
165  * In many respects this is the core of the protocol... actually figuring
166  * out what LNK_SPANs to relay.  The spanid used for relaying is the
167  * address of the 'state' structure, which is why h2span_relay has to
168  * be entered into a RB-TREE based at h2span_conn (so we can look
169  * up the spanid to validate it).
170  */
171 struct h2span_relay {
172         TAILQ_ENTRY(h2span_relay) entry;        /* from link */
173         RB_ENTRY(h2span_relay) rbnode;          /* from h2span_conn */
174         struct h2span_conn      *conn;          /* related CONN transaction */
175         dmsg_state_t            *source_rt;     /* h2span_link state */
176         dmsg_state_t            *target_rt;     /* h2span_relay state */
177 };
178
179 typedef struct h2span_conn h2span_conn_t;
180 typedef struct h2span_cluster h2span_cluster_t;
181 typedef struct h2span_node h2span_node_t;
182 typedef struct h2span_link h2span_link_t;
183 typedef struct h2span_relay h2span_relay_t;
184
185 #define dmsg_termstr(array)     _dmsg_termstr((array), sizeof(array))
186
187 static h2span_relay_t *dmsg_generate_relay(h2span_conn_t *conn,
188                                         h2span_link_t *slink);
189 static uint32_t dmsg_rnss(void);
190
191 static __inline
192 void
193 _dmsg_termstr(char *base, size_t size)
194 {
195         base[size-1] = 0;
196 }
197
198 /*
199  * Cluster peer_type, uuid, AND label must match for a match
200  */
201 static
202 int
203 h2span_cluster_cmp(h2span_cluster_t *cls1, h2span_cluster_t *cls2)
204 {
205         int r;
206
207         if (cls1->peer_type < cls2->peer_type)
208                 return(-1);
209         if (cls1->peer_type > cls2->peer_type)
210                 return(1);
211         r = uuid_compare(&cls1->pfs_clid, &cls2->pfs_clid, NULL);
212         if (r == 0)
213                 r = strcmp(cls1->cl_label, cls2->cl_label);
214
215         return r;
216 }
217
218 /*
219  * Match against fs_label/pfs_fsid.  Together these two items represent a
220  * unique node.  In most cases the primary differentiator is pfs_fsid but
221  * we also string-match fs_label.
222  */
223 static
224 int
225 h2span_node_cmp(h2span_node_t *node1, h2span_node_t *node2)
226 {
227         int r;
228
229         r = strcmp(node1->fs_label, node2->fs_label);
230         if (r == 0)
231                 r = uuid_compare(&node1->pfs_fsid, &node2->pfs_fsid, NULL);
232         return (r);
233 }
234
235 /*
236  * Sort/subsort must match h2span_relay_cmp() under any given node
237  * to make the aggregation algorithm easier, so the best links are
238  * in the same sorted order as the best relays.
239  *
240  * NOTE: We cannot use link*->state->msgid because this msgid is created
241  *       by each remote host and thus might wind up being the same.
242  */
243 static
244 int
245 h2span_link_cmp(h2span_link_t *link1, h2span_link_t *link2)
246 {
247         if (link1->lnk_span.dist < link2->lnk_span.dist)
248                 return(-1);
249         if (link1->lnk_span.dist > link2->lnk_span.dist)
250                 return(1);
251         if (link1->lnk_span.rnss < link2->lnk_span.rnss)
252                 return(-1);
253         if (link1->lnk_span.rnss > link2->lnk_span.rnss)
254                 return(1);
255 #if 1
256         if ((uintptr_t)link1->state < (uintptr_t)link2->state)
257                 return(-1);
258         if ((uintptr_t)link1->state > (uintptr_t)link2->state)
259                 return(1);
260 #else
261         if (link1->state->msgid < link2->state->msgid)
262                 return(-1);
263         if (link1->state->msgid > link2->state->msgid)
264                 return(1);
265 #endif
266         return(0);
267 }
268
269 /*
270  * Relay entries are sorted by node, subsorted by distance and link
271  * address (so we can match up the conn->tree relay topology with
272  * a node's link topology).
273  */
274 static
275 int
276 h2span_relay_cmp(h2span_relay_t *relay1, h2span_relay_t *relay2)
277 {
278         h2span_link_t *link1 = relay1->source_rt->any.link;
279         h2span_link_t *link2 = relay2->source_rt->any.link;
280
281         if ((intptr_t)link1->node < (intptr_t)link2->node)
282                 return(-1);
283         if ((intptr_t)link1->node > (intptr_t)link2->node)
284                 return(1);
285         if (link1->lnk_span.dist < link2->lnk_span.dist)
286                 return(-1);
287         if (link1->lnk_span.dist > link2->lnk_span.dist)
288                 return(1);
289         if (link1->lnk_span.rnss < link2->lnk_span.rnss)
290                 return(-1);
291         if (link1->lnk_span.rnss > link2->lnk_span.rnss)
292                 return(1);
293 #if 1
294         if ((uintptr_t)link1->state < (uintptr_t)link2->state)
295                 return(-1);
296         if ((uintptr_t)link1->state > (uintptr_t)link2->state)
297                 return(1);
298 #else
299         if (link1->state->msgid < link2->state->msgid)
300                 return(-1);
301         if (link1->state->msgid > link2->state->msgid)
302                 return(1);
303 #endif
304         return(0);
305 }
306
307 RB_PROTOTYPE_STATIC(h2span_cluster_tree, h2span_cluster,
308              rbnode, h2span_cluster_cmp);
309 RB_PROTOTYPE_STATIC(h2span_node_tree, h2span_node,
310              rbnode, h2span_node_cmp);
311 RB_PROTOTYPE_STATIC(h2span_link_tree, h2span_link,
312              rbnode, h2span_link_cmp);
313 RB_PROTOTYPE_STATIC(h2span_relay_tree, h2span_relay,
314              rbnode, h2span_relay_cmp);
315
316 RB_GENERATE_STATIC(h2span_cluster_tree, h2span_cluster,
317              rbnode, h2span_cluster_cmp);
318 RB_GENERATE_STATIC(h2span_node_tree, h2span_node,
319              rbnode, h2span_node_cmp);
320 RB_GENERATE_STATIC(h2span_link_tree, h2span_link,
321              rbnode, h2span_link_cmp);
322 RB_GENERATE_STATIC(h2span_relay_tree, h2span_relay,
323              rbnode, h2span_relay_cmp);
324
325 /*
326  * Global mutex protects cluster_tree lookups, connq, mediaq.
327  */
328 static pthread_mutex_t cluster_mtx;
329 static struct h2span_cluster_tree cluster_tree = RB_INITIALIZER(cluster_tree);
330 static struct h2span_conn_queue connq = TAILQ_HEAD_INITIALIZER(connq);
331 static struct dmsg_media_queue mediaq = TAILQ_HEAD_INITIALIZER(mediaq);
332
333 static void dmsg_lnk_span(dmsg_msg_t *msg);
334 static void dmsg_lnk_conn(dmsg_msg_t *msg);
335 static void dmsg_lnk_ping(dmsg_msg_t *msg);
336 static void dmsg_lnk_relay(dmsg_msg_t *msg);
337 static void dmsg_relay_scan(h2span_conn_t *conn, h2span_node_t *node);
338 static void dmsg_relay_delete(h2span_relay_t *relay);
339
340 void
341 dmsg_msg_lnk_signal(dmsg_iocom_t *iocom __unused)
342 {
343         pthread_mutex_lock(&cluster_mtx);
344         dmsg_relay_scan(NULL, NULL);
345         pthread_mutex_unlock(&cluster_mtx);
346 }
347
348 /*
349  * DMSG_PROTO_LNK - Generic DMSG_PROTO_LNK.
350  *            (incoming iocom lock not held)
351  *
352  * This function is typically called for one-way and opening-transactions
353  * since state->func is assigned after that, but it will also be called
354  * if no state->func is assigned on transaction-open.
355  */
356 void
357 dmsg_msg_lnk(dmsg_msg_t *msg)
358 {
359         dmsg_iocom_t *iocom = msg->state->iocom;
360
361         switch(msg->tcmd & DMSGF_BASECMDMASK) {
362         case DMSG_LNK_CONN:
363                 dmsg_lnk_conn(msg);
364                 break;
365         case DMSG_LNK_SPAN:
366                 dmsg_lnk_span(msg);
367                 break;
368         case DMSG_LNK_PING:
369                 dmsg_lnk_ping(msg);
370                 break;
371         default:
372                 iocom->usrmsg_callback(msg, 1);
373                 /* state invalid after reply */
374                 break;
375         }
376 }
377
378 /*
379  * LNK_CONN - iocom identify message reception.
380  *            (incoming iocom lock not held)
381  *
382  * Remote node identifies itself to us, sets up a SPAN filter, and gives us
383  * the ok to start transmitting SPANs.
384  */
385 void
386 dmsg_lnk_conn(dmsg_msg_t *msg)
387 {
388         dmsg_state_t *state = msg->state;
389         dmsg_iocom_t *iocom = state->iocom;
390         dmsg_media_t *media;
391         h2span_conn_t *conn;
392         h2span_relay_t *relay;
393         char *alloc = NULL;
394
395         pthread_mutex_lock(&cluster_mtx);
396
397         dmio_printf(iocom, 3,
398                 "dmsg_lnk_conn: msg %p cmd %08x state %p "
399                 "txcmd %08x rxcmd %08x\n",
400                 msg, msg->any.head.cmd, state,
401                 state->txcmd, state->rxcmd);
402
403         switch(msg->any.head.cmd & DMSGF_TRANSMASK) {
404         case DMSG_LNK_CONN | DMSGF_CREATE:
405         case DMSG_LNK_CONN | DMSGF_CREATE | DMSGF_DELETE:
406                 /*
407                  * On transaction start we allocate a new h2span_conn and
408                  * acknowledge the request, leaving the transaction open.
409                  * We then relay priority-selected SPANs.
410                  */
411                 dmio_printf(iocom, 3, "LNK_CONN(%08x): %s/%s/%s\n",
412                         (uint32_t)msg->any.head.msgid,
413                         dmsg_uuid_to_str(&msg->any.lnk_conn.pfs_clid,
414                                             &alloc),
415                         msg->any.lnk_conn.cl_label,
416                         msg->any.lnk_conn.fs_label);
417                 free(alloc);
418
419                 conn = dmsg_alloc(sizeof(*conn));
420                 assert(state->iocom->conn == NULL);
421
422                 RB_INIT(&conn->tree);
423                 state->iocom->conn = conn;      /* XXX only one */
424                 state->iocom->conn_msgid = state->msgid;
425                 dmsg_state_hold(state);
426                 conn->state = state;
427                 state->func = dmsg_lnk_conn;
428                 state->any.conn = conn;
429                 TAILQ_INSERT_TAIL(&connq, conn, entry);
430                 conn->lnk_conn = msg->any.lnk_conn;
431
432                 /*
433                  * Set up media
434                  */
435                 TAILQ_FOREACH(media, &mediaq, entry) {
436                         if (uuid_compare(&msg->any.lnk_conn.mediaid,
437                                          &media->mediaid, NULL) == 0) {
438                                 break;
439                         }
440                 }
441                 if (media == NULL) {
442                         media = dmsg_alloc(sizeof(*media));
443                         media->mediaid = msg->any.lnk_conn.mediaid;
444                         TAILQ_INSERT_TAIL(&mediaq, media, entry);
445                 }
446                 state->media = media;
447                 ++media->refs;
448
449                 if ((msg->any.head.cmd & DMSGF_DELETE) == 0) {
450                         iocom->usrmsg_callback(msg, 0);
451                         dmsg_msg_result(msg, 0);
452                         dmsg_iocom_signal(iocom);
453                         break;
454                 }
455                 /* FALL THROUGH */
456         case DMSG_LNK_CONN | DMSGF_DELETE:
457         case DMSG_LNK_ERROR | DMSGF_DELETE:
458                 /*
459                  * On transaction terminate we clean out our h2span_conn
460                  * and acknowledge the request, closing the transaction.
461                  */
462                 dmio_printf(iocom, 3, "%s\n", "LNK_CONN: Terminated");
463                 conn = state->any.conn;
464                 assert(conn);
465
466                 /*
467                  * Adjust media refs
468                  *
469                  * Callback will clean out media config / user-opaque state
470                  */
471                 media = state->media;
472                 --media->refs;
473                 if (media->refs == 0) {
474                         dmio_printf(iocom, 3, "%s\n", "Media shutdown");
475                         TAILQ_REMOVE(&mediaq, media, entry);
476                         pthread_mutex_unlock(&cluster_mtx);
477                         iocom->usrmsg_callback(msg, 0);
478                         pthread_mutex_lock(&cluster_mtx);
479                         dmsg_free(media);
480                 }
481                 state->media = NULL;
482
483                 /*
484                  * Clean out all relays.  This requires terminating each
485                  * relay transaction.
486                  */
487                 while ((relay = RB_ROOT(&conn->tree)) != NULL) {
488                         dmsg_relay_delete(relay);
489                 }
490
491                 /*
492                  * Clean out conn
493                  */
494                 conn->state = NULL;
495                 msg->state->any.conn = NULL;
496                 msg->state->iocom->conn = NULL;
497                 TAILQ_REMOVE(&connq, conn, entry);
498                 dmsg_free(conn);
499
500                 dmsg_msg_reply(msg, 0);
501                 dmsg_state_drop(state);
502                 /* state invalid after reply */
503                 break;
504         default:
505                 iocom->usrmsg_callback(msg, 1);
506 #if 0
507                 if (msg->any.head.cmd & DMSGF_DELETE)
508                         goto deleteconn;
509                 dmsg_msg_reply(msg, DMSG_ERR_NOSUPP);
510 #endif
511                 break;
512         }
513         pthread_mutex_unlock(&cluster_mtx);
514 }
515
516 /*
517  * LNK_SPAN - Spanning tree protocol message reception
518  *            (incoming iocom lock not held)
519  *
520  * Receive a spanning tree transactional message, creating or destroying
521  * a SPAN and propagating it to other iocoms.
522  */
523 void
524 dmsg_lnk_span(dmsg_msg_t *msg)
525 {
526         dmsg_state_t *state = msg->state;
527         dmsg_iocom_t *iocom = state->iocom;
528         h2span_cluster_t dummy_cls;
529         h2span_node_t dummy_node;
530         h2span_cluster_t *cls;
531         h2span_node_t *node;
532         h2span_link_t *slink;
533         h2span_relay_t *relay;
534         char *alloc = NULL;
535
536         /*
537          * XXX
538          *
539          * Ignore reply to LNK_SPAN.  The reply is expected and will commands
540          * to flow in both directions on the open transaction.  This will also
541          * ignore DMSGF_REPLY|DMSGF_DELETE messages.  Since we take no action
542          * if the other end unexpectedly closes their side of the transaction,
543          * we can ignore that too.
544          */
545         if (msg->any.head.cmd & DMSGF_REPLY) {
546                 dmio_printf(iocom, 2, "%s\n",
547                             "Ignore reply to LNK_SPAN");
548                 return;
549         }
550
551         pthread_mutex_lock(&cluster_mtx);
552
553         /*
554          * On transaction start we initialize the tracking infrastructure
555          */
556         if (msg->any.head.cmd & DMSGF_CREATE) {
557                 assert(state->func == NULL);
558                 state->func = dmsg_lnk_span;
559
560                 dmsg_termstr(msg->any.lnk_span.cl_label);
561                 dmsg_termstr(msg->any.lnk_span.fs_label);
562
563                 /*
564                  * Find the cluster
565                  */
566                 dummy_cls.pfs_clid = msg->any.lnk_span.pfs_clid;
567                 dummy_cls.peer_type = msg->any.lnk_span.peer_type;
568                 bcopy(msg->any.lnk_span.cl_label,
569                       dummy_cls.cl_label,
570                       sizeof(dummy_cls.cl_label));
571                 cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls);
572                 if (cls == NULL) {
573                         cls = dmsg_alloc(sizeof(*cls));
574                         cls->pfs_clid = msg->any.lnk_span.pfs_clid;
575                         cls->peer_type = msg->any.lnk_span.peer_type;
576                         bcopy(msg->any.lnk_span.cl_label,
577                               cls->cl_label,
578                               sizeof(cls->cl_label));
579                         RB_INIT(&cls->tree);
580                         RB_INSERT(h2span_cluster_tree, &cluster_tree, cls);
581                 }
582
583                 /*
584                  * Find the node
585                  */
586                 dummy_node.pfs_fsid = msg->any.lnk_span.pfs_fsid;
587                 bcopy(msg->any.lnk_span.fs_label, dummy_node.fs_label,
588                       sizeof(dummy_node.fs_label));
589                 node = RB_FIND(h2span_node_tree, &cls->tree, &dummy_node);
590                 if (node == NULL) {
591                         node = dmsg_alloc(sizeof(*node));
592                         node->pfs_fsid = msg->any.lnk_span.pfs_fsid;
593                         node->pfs_type = msg->any.lnk_span.pfs_type;
594                         bcopy(msg->any.lnk_span.fs_label,
595                               node->fs_label,
596                               sizeof(node->fs_label));
597                         node->cls = cls;
598                         RB_INIT(&node->tree);
599                         RB_INSERT(h2span_node_tree, &cls->tree, node);
600                 }
601
602                 /*
603                  * Create the link
604                  *
605                  * NOTE: Sub-transactions on the incoming SPAN can be used
606                  *       to talk to the originator.  We should not set-up
607                  *       state->relay for incoming SPANs since our sub-trans
608                  *       is running on the same interface (i.e. no actual
609                  *       relaying need be done).
610                  *
611                  * NOTE: Later on when we relay the SPAN out the outgoing
612                  *       SPAN state will be set up to relay back to this
613                  *       state.
614                  *
615                  * NOTE: It is possible for SPAN targets to send one-way
616                  *       messages to the originator but it is not possible
617                  *       for the originator to (currently) broadcast one-way
618                  *       messages to all of its SPAN targets.  The protocol
619                  *       allows such a feature to be added in the future.
620                  */
621                 assert(state->any.link == NULL);
622                 dmsg_state_hold(state);
623                 slink = dmsg_alloc(sizeof(*slink));
624                 TAILQ_INIT(&slink->relayq);
625                 slink->node = node;
626                 slink->state = state;
627                 state->any.link = slink;
628                 slink->lnk_span = msg->any.lnk_span;
629
630                 RB_INSERT(h2span_link_tree, &node->tree, slink);
631
632                 dmio_printf(iocom, 3,
633                             "LNK_SPAN(thr %p): %p %s cl=%s fs=%s dist=%d\n",
634                             iocom, slink,
635                             dmsg_uuid_to_str(&msg->any.lnk_span.pfs_clid,
636                                              &alloc),
637                             msg->any.lnk_span.cl_label,
638                             msg->any.lnk_span.fs_label,
639                             msg->any.lnk_span.dist);
640                 free(alloc);
641 #if 0
642                 dmsg_relay_scan(NULL, node);
643 #endif
644                 /*
645                  * Ack the open, which will issue a CREATE on our side, and
646                  * leave the transaction open.  Necessary to allow the
647                  * transaction to be used as a virtual circuit.
648                  */
649                 dmsg_state_result(state, 0);
650                 dmsg_iocom_signal(iocom);
651         }
652
653         /*
654          * On transaction terminate we remove the tracking infrastructure.
655          */
656         if (msg->any.head.cmd & DMSGF_DELETE) {
657                 slink = state->any.link;
658                 assert(slink->state == state);
659                 assert(slink != NULL);
660                 node = slink->node;
661                 cls = node->cls;
662
663                 dmio_printf(iocom, 3,
664                             "LNK_DELE(thr %p): %p %s cl=%s fs=%s\n",
665                             iocom, slink,
666                             dmsg_uuid_to_str(&cls->pfs_clid, &alloc),
667                             cls->cl_label,
668                             node->fs_label);
669                 free(alloc);
670
671                 /*
672                  * Clean out all relays.  This requires terminating each
673                  * relay transaction.
674                  */
675                 while ((relay = TAILQ_FIRST(&slink->relayq)) != NULL) {
676                         dmsg_relay_delete(relay);
677                 }
678
679                 /*
680                  * Clean out the topology
681                  */
682                 RB_REMOVE(h2span_link_tree, &node->tree, slink);
683                 if (RB_EMPTY(&node->tree)) {
684                         RB_REMOVE(h2span_node_tree, &cls->tree, node);
685                         if (RB_EMPTY(&cls->tree) && cls->refs == 0) {
686                                 RB_REMOVE(h2span_cluster_tree,
687                                           &cluster_tree, cls);
688                                 dmsg_free(cls);
689                         }
690                         node->cls = NULL;
691                         dmsg_free(node);
692                         node = NULL;
693                 }
694                 state->any.link = NULL;
695                 slink->state = NULL;
696                 slink->node = NULL;
697                 dmsg_state_drop(state);
698                 dmsg_free(slink);
699
700                 /*
701                  * We have to terminate the transaction
702                  */
703                 dmsg_state_reply(state, 0);
704                 /* state invalid after reply */
705
706                 /*
707                  * If the node still exists issue any required updates.  If
708                  * it doesn't then all related relays have already been
709                  * removed and there's nothing left to do.
710                  */
711 #if 0
712                 if (node)
713                         dmsg_relay_scan(NULL, node);
714 #endif
715                 if (node)
716                         dmsg_iocom_signal(iocom);
717         }
718
719         pthread_mutex_unlock(&cluster_mtx);
720 }
721
722 /*
723  * Respond to a PING with a PING|REPLY, forward replies to the usermsg
724  * callback.
725  */
726 static
727 void
728 dmsg_lnk_ping(dmsg_msg_t *msg)
729 {
730         dmsg_msg_t *rep;
731
732         if (msg->any.head.cmd & DMSGF_REPLY) {
733                 msg->state->iocom->usrmsg_callback(msg, 1);
734         } else {
735                 rep = dmsg_msg_alloc(msg->state, 0,
736                                      DMSG_LNK_PING | DMSGF_REPLY,
737                                      NULL, NULL);
738                 dmsg_msg_write(rep);
739         }
740 }
741
742 /*
743  * Update relay transactions for SPANs.
744  *
745  * Called with cluster_mtx held.
746  */
747 static void dmsg_relay_scan_specific(h2span_node_t *node,
748                                         h2span_conn_t *conn);
749
750 static void
751 dmsg_relay_scan(h2span_conn_t *conn, h2span_node_t *node)
752 {
753         h2span_cluster_t *cls;
754
755         if (node) {
756                 /*
757                  * Iterate specific node
758                  */
759                 TAILQ_FOREACH(conn, &connq, entry)
760                         dmsg_relay_scan_specific(node, conn);
761         } else {
762                 /*
763                  * Full iteration.
764                  *
765                  * Iterate cluster ids, nodes, and either a specific connection
766                  * or all connections.
767                  */
768                 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
769                         /*
770                          * Iterate node ids
771                          */
772                         RB_FOREACH(node, h2span_node_tree, &cls->tree) {
773                                 /*
774                                  * Synchronize the node's link (received SPANs)
775                                  * with each connection's relays.
776                                  */
777                                 if (conn) {
778                                         dmsg_relay_scan_specific(node, conn);
779                                 } else {
780                                         TAILQ_FOREACH(conn, &connq, entry) {
781                                             dmsg_relay_scan_specific(node,
782                                                                         conn);
783                                         }
784                                         assert(conn == NULL);
785                                 }
786                         }
787                 }
788         }
789 }
790
791 /*
792  * Update the relay'd SPANs for this (node, conn).
793  *
794  * Iterate links and adjust relays to match.  We only propagate the top link
795  * for now (XXX we want to propagate the top two).
796  *
797  * The dmsg_relay_scan_cmp() function locates the first relay element
798  * for any given node.  The relay elements will be sub-sorted by dist.
799  */
800 struct relay_scan_info {
801         h2span_node_t *node;
802         h2span_relay_t *relay;
803 };
804
805 static int
806 dmsg_relay_scan_cmp(h2span_relay_t *relay, void *arg)
807 {
808         struct relay_scan_info *info = arg;
809
810         if ((intptr_t)relay->source_rt->any.link->node < (intptr_t)info->node)
811                 return(-1);
812         if ((intptr_t)relay->source_rt->any.link->node > (intptr_t)info->node)
813                 return(1);
814         return(0);
815 }
816
817 static int
818 dmsg_relay_scan_callback(h2span_relay_t *relay, void *arg)
819 {
820         struct relay_scan_info *info = arg;
821
822         info->relay = relay;
823         return(-1);
824 }
825
826 static void
827 dmsg_relay_scan_specific(h2span_node_t *node, h2span_conn_t *conn)
828 {
829         struct relay_scan_info info;
830         h2span_relay_t *relay;
831         h2span_relay_t *next_relay;
832         h2span_link_t *slink;
833         dmsg_lnk_conn_t *lconn;
834         dmsg_lnk_span_t *lspan;
835         int count;
836         int maxcount = 2;
837 #ifdef REQUIRE_SYMMETRICAL
838         uint32_t lastdist = DMSG_SPAN_MAXDIST;
839         uint32_t lastrnss = 0;
840 #endif
841
842         info.node = node;
843         info.relay = NULL;
844
845         /*
846          * Locate the first related relay for the node on this connection.
847          * relay will be NULL if there were none.
848          */
849         RB_SCAN(h2span_relay_tree, &conn->tree,
850                 dmsg_relay_scan_cmp, dmsg_relay_scan_callback, &info);
851         relay = info.relay;
852         info.relay = NULL;
853         if (relay)
854                 assert(relay->source_rt->any.link->node == node);
855
856         dm_printf(9, "relay scan for connection %p\n", conn);
857
858         /*
859          * Iterate the node's links (received SPANs) in distance order,
860          * lowest (best) dist first.
861          *
862          * PROPAGATE THE BEST LINKS OVER THE SPECIFIED CONNECTION.
863          *
864          * Track relays while iterating the best links and construct
865          * missing relays when necessary.
866          *
867          * (If some prior better link was removed it would have also
868          *  removed the relay, so the relay can only match exactly or
869          *  be worse).
870          */
871         count = 0;
872         RB_FOREACH(slink, h2span_link_tree, &node->tree) {
873                 /*
874                  * Increment count of successful relays.  This isn't
875                  * quite accurate if we break out but nothing after
876                  * the loop uses (count).
877                  *
878                  * If count exceeds the maximum number of relays we desire
879                  * we normally want to break out.  However, in order to
880                  * guarantee a symmetric path we have to continue if both
881                  * (dist) and (rnss) continue to match.  Otherwise the SPAN
882                  * propagation in the reverse direction may choose different
883                  * routes and we will not have a symmetric path.
884                  *
885                  * NOTE: Spanning tree does not have to be symmetrical so
886                  *       this code is not currently enabled.
887                  */
888                 if (++count >= maxcount) {
889 #ifdef REQUIRE_SYMMETRICAL
890                         if (lastdist != slink->lnk_span.dist ||
891                             lastrnss != slink->lnk_span.rnss) {
892                                 break;
893                         }
894 #else
895                         break;
896 #endif
897                         /* go beyond the nominal maximum desired relays */
898                 }
899
900                 /*
901                  * Match, relay already in-place, get the next
902                  * relay to match against the next slink.
903                  */
904                 if (relay && relay->source_rt->any.link == slink) {
905                         relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
906                         continue;
907                 }
908
909                 /*
910                  * We might want this SLINK, if it passes our filters.
911                  *
912                  * The spanning tree can cause closed loops so we have
913                  * to limit slink->dist.
914                  */
915                 if (slink->lnk_span.dist > DMSG_SPAN_MAXDIST)
916                         break;
917
918                 /*
919                  * Don't bother transmitting a LNK_SPAN out the same
920                  * connection it came in on.  Trivial optimization.
921                  */
922                 if (slink->state->iocom == conn->state->iocom)
923                         break;
924
925                 /*
926                  * NOTE ON FILTERS: The protocol spec allows non-requested
927                  * SPANs to be transmitted, the other end is expected to
928                  * leave their transactions open but otherwise ignore them.
929                  *
930                  * Don't bother transmitting if the remote connection
931                  * is not accepting this SPAN's peer_type.
932                  *
933                  * pfs_mask is typically used so pure clients can filter
934                  * out receiving SPANs for other pure clients.
935                  */
936                 lspan = &slink->lnk_span;
937                 lconn = &conn->lnk_conn;
938                 if (((1LLU << lspan->peer_type) & lconn->peer_mask) == 0)
939                         break;
940                 if (((1LLU << lspan->pfs_type) & lconn->pfs_mask) == 0)
941                         break;
942
943                 /*
944                  * Do not give pure clients visibility to other pure clients
945                  */
946                 if (lconn->pfs_type == DMSG_PFSTYPE_CLIENT &&
947                     lspan->pfs_type == DMSG_PFSTYPE_CLIENT) {
948                         break;
949                 }
950
951                 /*
952                  * Connection filter, if cluster uuid is not NULL it must
953                  * match the span cluster uuid.  Only applies when the
954                  * peer_type matches.
955                  */
956                 if (lspan->peer_type == lconn->peer_type &&
957                     !uuid_is_nil(&lconn->pfs_clid, NULL) &&
958                     uuid_compare(&slink->node->cls->pfs_clid,
959                                  &lconn->pfs_clid, NULL)) {
960                         break;
961                 }
962
963                 /*
964                  * Connection filter, if cluster label is not empty it must
965                  * match the span cluster label.  Only applies when the
966                  * peer_type matches.
967                  */
968                 if (lspan->peer_type == lconn->peer_type &&
969                     lconn->cl_label[0] &&
970                     strcmp(lconn->cl_label, slink->node->cls->cl_label)) {
971                         break;
972                 }
973
974                 /*
975                  * NOTE! pfs_fsid differentiates nodes within the same cluster
976                  *       so we obviously don't want to match those.  Similarly
977                  *       for fs_label.
978                  */
979
980                 /*
981                  * Ok, we've accepted this SPAN for relaying.
982                  */
983                 assert(relay == NULL ||
984                        relay->source_rt->any.link->node != slink->node ||
985                        relay->source_rt->any.link->lnk_span.dist >=
986                         slink->lnk_span.dist);
987                 relay = dmsg_generate_relay(conn, slink);
988 #ifdef REQUIRE_SYMMETRICAL
989                 lastdist = slink->lnk_span.dist;
990                 lastrnss = slink->lnk_span.rnss;
991 #endif
992
993                 /*
994                  * Match (created new relay), get the next relay to
995                  * match against the next slink.
996                  */
997                 relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
998         }
999
1000         /*
1001          * Any remaining relay's belonging to this connection which match
1002          * the node are in excess of the current aggregate spanning state
1003          * and should be removed.
1004          */
1005         while (relay && relay->source_rt->any.link->node == node) {
1006                 next_relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
1007                 dm_printf(9, "%s\n", "RELAY DELETE FROM EXTRAS");
1008                 dmsg_relay_delete(relay);
1009                 relay = next_relay;
1010         }
1011 }
1012
1013 /*
1014  * Find the slink associated with the msgid and return its state,
1015  * so the caller can issue a transaction.
1016  */
1017 dmsg_state_t *
1018 dmsg_findspan(const char *label)
1019 {
1020         dmsg_state_t *state;
1021         h2span_cluster_t *cls;
1022         h2span_node_t *node;
1023         h2span_link_t *slink;
1024         uint64_t msgid = strtoull(label, NULL, 16);
1025
1026         pthread_mutex_lock(&cluster_mtx);
1027
1028         state = NULL;
1029         RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
1030                 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
1031                         RB_FOREACH(slink, h2span_link_tree, &node->tree) {
1032                                 if (slink->state->msgid == msgid) {
1033                                         state = slink->state;
1034                                         goto done;
1035                                 }
1036                         }
1037                 }
1038         }
1039 done:
1040         pthread_mutex_unlock(&cluster_mtx);
1041
1042         dm_printf(8, "findspan: %p\n", state);
1043
1044         return state;
1045 }
1046
1047
1048 /*
1049  * Helper function to generate missing relay on target connection.
1050  *
1051  * cluster_mtx must be held
1052  */
1053 static
1054 h2span_relay_t *
1055 dmsg_generate_relay(h2span_conn_t *conn, h2span_link_t *slink)
1056 {
1057         h2span_relay_t *relay;
1058         dmsg_msg_t *msg;
1059
1060         dmsg_state_hold(slink->state);
1061         relay = dmsg_alloc(sizeof(*relay));
1062         relay->conn = conn;
1063         relay->source_rt = slink->state;
1064         /* relay->source_rt->any.link = slink; */
1065
1066         /*
1067          * NOTE: relay->target_rt->any.relay set to relay by alloc.
1068          *
1069          * NOTE: LNK_SPAN is transmitted as a top-level transaction.
1070          */
1071         msg = dmsg_msg_alloc(&conn->state->iocom->state0,
1072                              0, DMSG_LNK_SPAN | DMSGF_CREATE,
1073                              dmsg_lnk_relay, relay);
1074         dmsg_state_hold(msg->state);
1075         relay->target_rt = msg->state;
1076
1077         msg->any.lnk_span = slink->lnk_span;
1078         msg->any.lnk_span.dist = slink->lnk_span.dist + 1;
1079         msg->any.lnk_span.rnss = slink->lnk_span.rnss + dmsg_rnss();
1080
1081         RB_INSERT(h2span_relay_tree, &conn->tree, relay);
1082         TAILQ_INSERT_TAIL(&slink->relayq, relay, entry);
1083
1084         /*
1085          * Seed the relay so new sub-transactions received on the outgoing
1086          * SPAN circuit are relayed back to the originator.
1087          */
1088         msg->state->relay = relay->source_rt;
1089         dmsg_state_hold(msg->state->relay);
1090
1091         dmsg_msg_write(msg);
1092
1093         return (relay);
1094 }
1095
1096 /*
1097  * Messages received on relay SPANs.  These are open transactions so it is
1098  * in fact possible for the other end to close the transaction.
1099  *
1100  * XXX MPRACE on state structure
1101  */
1102 static void
1103 dmsg_lnk_relay(dmsg_msg_t *msg)
1104 {
1105         dmsg_state_t *state = msg->state;
1106         h2span_relay_t *relay;
1107
1108         assert(msg->any.head.cmd & DMSGF_REPLY);
1109
1110         if (msg->any.head.cmd & DMSGF_DELETE) {
1111                 pthread_mutex_lock(&cluster_mtx);
1112                 dm_printf(8, "%s\n", "RELAY DELETE FROM LNK_RELAY MSG");
1113                 if ((relay = state->any.relay) != NULL) {
1114                         dmsg_relay_delete(relay);
1115                 } else {
1116                         dmsg_state_reply(state, 0);
1117                 }
1118                 pthread_mutex_unlock(&cluster_mtx);
1119         }
1120 }
1121
1122 /*
1123  * cluster_mtx held by caller
1124  */
1125 static
1126 void
1127 dmsg_relay_delete(h2span_relay_t *relay)
1128 {
1129         dm_printf(8,
1130                   "RELAY DELETE %p RELAY %p ON CLS=%p NODE=%p "
1131                   "DIST=%d FD %d STATE %p\n",
1132                   relay->source_rt->any.link,
1133                   relay,
1134                   relay->source_rt->any.link->node->cls,
1135                   relay->source_rt->any.link->node,
1136                   relay->source_rt->any.link->lnk_span.dist,
1137                   relay->conn->state->iocom->sock_fd,
1138                   relay->target_rt);
1139
1140         RB_REMOVE(h2span_relay_tree, &relay->conn->tree, relay);
1141         TAILQ_REMOVE(&relay->source_rt->any.link->relayq, relay, entry);
1142
1143         if (relay->target_rt) {
1144                 relay->target_rt->any.relay = NULL;
1145                 dmsg_state_reply(relay->target_rt, 0);
1146                 dmsg_state_drop(relay->target_rt);
1147                 /* state invalid after reply */
1148                 relay->target_rt = NULL;
1149         }
1150
1151         /*
1152          * NOTE: relay->source_rt->refs is held by the relay SPAN
1153          *       state, not by this relay structure.
1154          */
1155         relay->conn = NULL;
1156         if (relay->source_rt) {
1157                 dmsg_state_drop(relay->source_rt);
1158                 relay->source_rt = NULL;
1159         }
1160         dmsg_free(relay);
1161 }
1162
1163 /************************************************************************
1164  *                      ROUTER AND MESSAGING HANDLES                    *
1165  ************************************************************************
1166  *
1167  * Basically the idea here is to provide a stable data structure which
1168  * can be localized to the caller for higher level protocols to work with.
1169  * Depends on the context, these dmsg_handle's can be pooled by use-case
1170  * and remain persistent through a client (or mount point's) life.
1171  */
1172
1173 #if 0
1174 /*
1175  * Obtain a stable handle on a cluster given its uuid.  This ties directly
1176  * into the global cluster topology, creating the structure if necessary
1177  * (even if the uuid does not exist or does not exist yet), and preventing
1178  * the structure from getting ripped out from under us while we hold a
1179  * pointer to it.
1180  */
1181 h2span_cluster_t *
1182 dmsg_cluster_get(uuid_t *pfs_clid)
1183 {
1184         h2span_cluster_t dummy_cls;
1185         h2span_cluster_t *cls;
1186
1187         dummy_cls.pfs_clid = *pfs_clid;
1188         pthread_mutex_lock(&cluster_mtx);
1189         cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls);
1190         if (cls)
1191                 ++cls->refs;
1192         pthread_mutex_unlock(&cluster_mtx);
1193         return (cls);
1194 }
1195
1196 void
1197 dmsg_cluster_put(h2span_cluster_t *cls)
1198 {
1199         pthread_mutex_lock(&cluster_mtx);
1200         assert(cls->refs > 0);
1201         --cls->refs;
1202         if (RB_EMPTY(&cls->tree) && cls->refs == 0) {
1203                 RB_REMOVE(h2span_cluster_tree,
1204                           &cluster_tree, cls);
1205                 dmsg_free(cls);
1206         }
1207         pthread_mutex_unlock(&cluster_mtx);
1208 }
1209
1210 /*
1211  * Obtain a stable handle to a specific cluster node given its uuid.
1212  * This handle does NOT lock in the route to the node and is typically
1213  * used as part of the dmsg_handle_*() API to obtain a set of
1214  * stable nodes.
1215  */
1216 h2span_node_t *
1217 dmsg_node_get(h2span_cluster_t *cls, uuid_t *pfs_fsid)
1218 {
1219 }
1220
1221 #endif
1222
1223 /*
1224  * Dumps the spanning tree
1225  *
1226  * DEBUG ONLY
1227  */
1228 void
1229 dmsg_shell_tree(dmsg_iocom_t *iocom, char *cmdbuf __unused)
1230 {
1231         h2span_cluster_t *cls;
1232         h2span_node_t *node;
1233         h2span_link_t *slink;
1234         h2span_relay_t *relay;
1235         char *uustr = NULL;
1236
1237         pthread_mutex_lock(&cluster_mtx);
1238         RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
1239                 dmsg_printf(iocom, "Cluster %s %s (%s)\n",
1240                                   dmsg_peer_type_to_str(cls->peer_type),
1241                                   dmsg_uuid_to_str(&cls->pfs_clid, &uustr),
1242                                   cls->cl_label);
1243                 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
1244                         dmsg_printf(iocom, "    Node %02x %s (%s)\n",
1245                                 node->pfs_type,
1246                                 dmsg_uuid_to_str(&node->pfs_fsid, &uustr),
1247                                 node->fs_label);
1248                         RB_FOREACH(slink, h2span_link_tree, &node->tree) {
1249                                 dmsg_printf(iocom,
1250                                             "\tSLink msgid %016jx "
1251                                             "dist=%d via %d\n",
1252                                             (intmax_t)slink->state->msgid,
1253                                             slink->lnk_span.dist,
1254                                             slink->state->iocom->sock_fd);
1255                                 TAILQ_FOREACH(relay, &slink->relayq, entry) {
1256                                         dmsg_printf(iocom,
1257                                             "\t    Relay-out msgid %016jx "
1258                                             "via %d\n",
1259                                             (intmax_t)relay->target_rt->msgid,
1260                                             relay->target_rt->iocom->sock_fd);
1261                                 }
1262                         }
1263                 }
1264         }
1265         pthread_mutex_unlock(&cluster_mtx);
1266         if (uustr)
1267                 free(uustr);
1268 #if 0
1269         TAILQ_FOREACH(conn, &connq, entry) {
1270         }
1271 #endif
1272 }
1273
1274 /*
1275  * DEBUG ONLY
1276  *
1277  * Locate the state representing an incoming LNK_SPAN given its msgid.
1278  */
1279 int
1280 dmsg_debug_findspan(uint64_t msgid, dmsg_state_t **statep)
1281 {
1282         h2span_cluster_t *cls;
1283         h2span_node_t *node;
1284         h2span_link_t *slink;
1285
1286         pthread_mutex_lock(&cluster_mtx);
1287         RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
1288                 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
1289                         RB_FOREACH(slink, h2span_link_tree, &node->tree) {
1290                                 if (slink->state->msgid == msgid) {
1291                                         *statep = slink->state;
1292                                         goto found;
1293                                 }
1294                         }
1295                 }
1296         }
1297         pthread_mutex_unlock(&cluster_mtx);
1298         *statep = NULL;
1299         return(ENOENT);
1300 found:
1301         pthread_mutex_unlock(&cluster_mtx);
1302         return(0);
1303 }
1304
1305 /*
1306  * Random number sub-sort value to add to SPAN rnss fields on relay.
1307  * This allows us to differentiate spans with the same <dist> field
1308  * for relaying purposes.  We must normally limit the number of relays
1309  * for any given SPAN origination but we must also guarantee that a
1310  * symmetric reverse path exists, so we use the rnss field as a sub-sort
1311  * (since there can be thousands or millions if we only match on <dist>),
1312  * and if there STILL too many spans we go past the limit.
1313  */
1314 static
1315 uint32_t
1316 dmsg_rnss(void)
1317 {
1318         if (DMsgRNSS == 0) {
1319                 pthread_mutex_lock(&cluster_mtx);
1320                 while (DMsgRNSS == 0) {
1321                         srandomdev();
1322                         DMsgRNSS = random();
1323                 }
1324                 pthread_mutex_unlock(&cluster_mtx);
1325         }
1326         return(DMsgRNSS);
1327 }