2544d8b727d2ff7247c5abd143013768d10dff07
[dragonfly.git] / lib / libdmsg / msg_lnk.c
1 /*
2  * Copyright (c) 2012 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * LNK_SPAN PROTOCOL SUPPORT FUNCTIONS - Please see sys/dmsg.h for an
36  * involved explanation of the protocol.
37  */
38
39 #include "dmsg_local.h"
40
41 void (*dmsg_node_handler)(void **opaquep, struct dmsg_msg *msg, int op);
42
43 /*
44  * Maximum spanning tree distance.  This has the practical effect of
45  * stopping tail-chasing closed loops when a feeder span is lost.
46  */
47 #define DMSG_SPAN_MAXDIST       16
48
49 /*
50  * RED-BLACK TREE DEFINITIONS
51  *
52  * We need to track:
53  *
54  * (1) shared fsid's (a cluster).
55  * (2) unique fsid's (a node in a cluster) <--- LNK_SPAN transactions.
56  *
57  * We need to aggegate all active LNK_SPANs, aggregate, and create our own
58  * outgoing LNK_SPAN transactions on each of our connections representing
59  * the aggregated state.
60  *
61  * h2span_conn          - list of iocom connections who wish to receive SPAN
62  *                        propagation from other connections.  Might contain
63  *                        a filter string.  Only iocom's with an open
64  *                        LNK_CONN transactions are applicable for SPAN
65  *                        propagation.
66  *
67  * h2span_relay         - List of links relayed (via SPAN).  Essentially
68  *                        each relay structure represents a LNK_SPAN
69  *                        transaction that we initiated, verses h2span_link
70  *                        which is a LNK_SPAN transaction that we received.
71  *
72  * --
73  *
74  * h2span_cluster       - Organizes the shared fsid's.  One structure for
75  *                        each cluster.
76  *
77  * h2span_node          - Organizes the nodes in a cluster.  One structure
78  *                        for each unique {cluster,node}, aka {fsid, pfs_fsid}.
79  *
80  * h2span_link          - Organizes all incoming and outgoing LNK_SPAN message
81  *                        transactions related to a node.
82  *
83  *                        One h2span_link structure for each incoming LNK_SPAN
84  *                        transaction.  Links selected for propagation back
85  *                        out are also where the outgoing LNK_SPAN messages
86  *                        are indexed into (so we can propagate changes).
87  *
88  *                        The h2span_link's use a red-black tree to sort the
89  *                        distance hop metric for the incoming LNK_SPAN.  We
90  *                        then select the top N for outgoing.  When the
91  *                        topology changes the top N may also change and cause
92  *                        new outgoing LNK_SPAN transactions to be opened
93  *                        and less desireable ones to be closed, causing
94  *                        transactional aborts within the message flow in
95  *                        the process.
96  *
97  * Also note            - All outgoing LNK_SPAN message transactions are also
98  *                        entered into a red-black tree for use by the routing
99  *                        function.  This is handled by msg.c in the state
100  *                        code, not here.
101  */
102
103 struct h2span_link;
104 struct h2span_relay;
105 TAILQ_HEAD(h2span_media_queue, h2span_media);
106 TAILQ_HEAD(h2span_conn_queue, h2span_conn);
107 TAILQ_HEAD(h2span_relay_queue, h2span_relay);
108
109 RB_HEAD(h2span_cluster_tree, h2span_cluster);
110 RB_HEAD(h2span_node_tree, h2span_node);
111 RB_HEAD(h2span_link_tree, h2span_link);
112 RB_HEAD(h2span_relay_tree, h2span_relay);
113 uint32_t DMsgRNSS;
114
115 /*
116  * This represents a media
117  */
118 struct h2span_media {
119         TAILQ_ENTRY(h2span_media) entry;
120         uuid_t  mediaid;
121         int     refs;
122         struct h2span_media_config {
123                 dmsg_vol_data_t         copy_run;
124                 dmsg_vol_data_t         copy_pend;
125                 pthread_t               thread;
126                 pthread_cond_t          cond;
127                 int                     ctl;
128                 int                     fd;
129                 dmsg_iocom_t            iocom;
130                 pthread_t               iocom_thread;
131                 enum { H2MC_STOPPED, H2MC_CONNECT, H2MC_RUNNING } state;
132         } config[DMSG_COPYID_COUNT];
133 };
134
135 typedef struct h2span_media_config h2span_media_config_t;
136
137 #define H2CONFCTL_STOP          0x00000001
138 #define H2CONFCTL_UPDATE        0x00000002
139
140 /*
141  * Received LNK_CONN transaction enables SPAN protocol over connection.
142  * (may contain filter).  Typically one for each mount and several may
143  * share the same media.
144  */
145 struct h2span_conn {
146         TAILQ_ENTRY(h2span_conn) entry;
147         struct h2span_relay_tree tree;
148         struct h2span_media *media;
149         dmsg_state_t *state;
150 };
151
152 /*
153  * All received LNK_SPANs are organized by cluster (pfs_clid),
154  * node (pfs_fsid), and link (received LNK_SPAN transaction).
155  */
156 struct h2span_cluster {
157         RB_ENTRY(h2span_cluster) rbnode;
158         struct h2span_node_tree tree;
159         uuid_t  pfs_clid;               /* shared fsid */
160         uint8_t peer_type;
161         char    cl_label[128];          /* cluster label (typ PEER_BLOCK) */
162         int     refs;                   /* prevents destruction */
163 };
164
165 struct h2span_node {
166         RB_ENTRY(h2span_node) rbnode;
167         struct h2span_link_tree tree;
168         struct h2span_cluster *cls;
169         uint8_t pfs_type;
170         uuid_t  pfs_fsid;               /* unique fsid */
171         char    fs_label[128];          /* fs label (typ PEER_HAMMER2) */
172         void    *opaque;
173 };
174
175 struct h2span_link {
176         RB_ENTRY(h2span_link) rbnode;
177         dmsg_state_t    *state;         /* state<->link */
178         struct h2span_node *node;       /* related node */
179         uint32_t        dist;
180         uint32_t        rnss;
181         struct h2span_relay_queue relayq; /* relay out */
182 };
183
184 /*
185  * Any LNK_SPAN transactions we receive which are relayed out other
186  * connections utilize this structure to track the LNK_SPAN transactions
187  * we initiate (relay out) on other connections.  We only relay out
188  * LNK_SPANs on connections we have an open CONN transaction for.
189  *
190  * The relay structure points to the outgoing LNK_SPAN trans (out_state)
191  * and to the incoming LNK_SPAN transaction (in_state).  The relay
192  * structure holds refs on the related states.
193  *
194  * In many respects this is the core of the protocol... actually figuring
195  * out what LNK_SPANs to relay.  The spanid used for relaying is the
196  * address of the 'state' structure, which is why h2span_relay has to
197  * be entered into a RB-TREE based at h2span_conn (so we can look
198  * up the spanid to validate it).
199  */
200 struct h2span_relay {
201         TAILQ_ENTRY(h2span_relay) entry;        /* from link */
202         RB_ENTRY(h2span_relay) rbnode;          /* from h2span_conn */
203         struct h2span_conn      *conn;          /* related CONN transaction */
204         dmsg_state_t            *source_rt;     /* h2span_link state */
205         dmsg_state_t            *target_rt;     /* h2span_relay state */
206 };
207
208 typedef struct h2span_media h2span_media_t;
209 typedef struct h2span_conn h2span_conn_t;
210 typedef struct h2span_cluster h2span_cluster_t;
211 typedef struct h2span_node h2span_node_t;
212 typedef struct h2span_link h2span_link_t;
213 typedef struct h2span_relay h2span_relay_t;
214
215 #define dmsg_termstr(array)     _dmsg_termstr((array), sizeof(array))
216
217 static h2span_relay_t *dmsg_generate_relay(h2span_conn_t *conn,
218                                         h2span_link_t *slink);
219 static uint32_t dmsg_rnss(void);
220
221 static __inline
222 void
223 _dmsg_termstr(char *base, size_t size)
224 {
225         base[size-1] = 0;
226 }
227
228 /*
229  * Cluster peer_type, uuid, AND label must match for a match
230  */
231 static
232 int
233 h2span_cluster_cmp(h2span_cluster_t *cls1, h2span_cluster_t *cls2)
234 {
235         int r;
236
237         if (cls1->peer_type < cls2->peer_type)
238                 return(-1);
239         if (cls1->peer_type > cls2->peer_type)
240                 return(1);
241         r = uuid_compare(&cls1->pfs_clid, &cls2->pfs_clid, NULL);
242         if (r == 0)
243                 r = strcmp(cls1->cl_label, cls2->cl_label);
244
245         return r;
246 }
247
248 /*
249  * Match against fs_label/pfs_fsid.  Together these two items represent a
250  * unique node.  In most cases the primary differentiator is pfs_fsid but
251  * we also string-match fs_label.
252  */
253 static
254 int
255 h2span_node_cmp(h2span_node_t *node1, h2span_node_t *node2)
256 {
257         int r;
258
259         r = strcmp(node1->fs_label, node2->fs_label);
260         if (r == 0)
261                 r = uuid_compare(&node1->pfs_fsid, &node2->pfs_fsid, NULL);
262         return (r);
263 }
264
265 /*
266  * Sort/subsort must match h2span_relay_cmp() under any given node
267  * to make the aggregation algorithm easier, so the best links are
268  * in the same sorted order as the best relays.
269  *
270  * NOTE: We cannot use link*->state->msgid because this msgid is created
271  *       by each remote host and thus might wind up being the same.
272  */
273 static
274 int
275 h2span_link_cmp(h2span_link_t *link1, h2span_link_t *link2)
276 {
277         if (link1->dist < link2->dist)
278                 return(-1);
279         if (link1->dist > link2->dist)
280                 return(1);
281         if (link1->rnss < link2->rnss)
282                 return(-1);
283         if (link1->rnss > link2->rnss)
284                 return(1);
285 #if 1
286         if ((uintptr_t)link1->state < (uintptr_t)link2->state)
287                 return(-1);
288         if ((uintptr_t)link1->state > (uintptr_t)link2->state)
289                 return(1);
290 #else
291         if (link1->state->msgid < link2->state->msgid)
292                 return(-1);
293         if (link1->state->msgid > link2->state->msgid)
294                 return(1);
295 #endif
296         return(0);
297 }
298
299 /*
300  * Relay entries are sorted by node, subsorted by distance and link
301  * address (so we can match up the conn->tree relay topology with
302  * a node's link topology).
303  */
304 static
305 int
306 h2span_relay_cmp(h2span_relay_t *relay1, h2span_relay_t *relay2)
307 {
308         h2span_link_t *link1 = relay1->source_rt->any.link;
309         h2span_link_t *link2 = relay2->source_rt->any.link;
310
311         if ((intptr_t)link1->node < (intptr_t)link2->node)
312                 return(-1);
313         if ((intptr_t)link1->node > (intptr_t)link2->node)
314                 return(1);
315         if (link1->dist < link2->dist)
316                 return(-1);
317         if (link1->dist > link2->dist)
318                 return(1);
319         if (link1->rnss < link2->rnss)
320                 return(-1);
321         if (link1->rnss > link2->rnss)
322                 return(1);
323 #if 1
324         if ((uintptr_t)link1->state < (uintptr_t)link2->state)
325                 return(-1);
326         if ((uintptr_t)link1->state > (uintptr_t)link2->state)
327                 return(1);
328 #else
329         if (link1->state->msgid < link2->state->msgid)
330                 return(-1);
331         if (link1->state->msgid > link2->state->msgid)
332                 return(1);
333 #endif
334         return(0);
335 }
336
337 RB_PROTOTYPE_STATIC(h2span_cluster_tree, h2span_cluster,
338              rbnode, h2span_cluster_cmp);
339 RB_PROTOTYPE_STATIC(h2span_node_tree, h2span_node,
340              rbnode, h2span_node_cmp);
341 RB_PROTOTYPE_STATIC(h2span_link_tree, h2span_link,
342              rbnode, h2span_link_cmp);
343 RB_PROTOTYPE_STATIC(h2span_relay_tree, h2span_relay,
344              rbnode, h2span_relay_cmp);
345
346 RB_GENERATE_STATIC(h2span_cluster_tree, h2span_cluster,
347              rbnode, h2span_cluster_cmp);
348 RB_GENERATE_STATIC(h2span_node_tree, h2span_node,
349              rbnode, h2span_node_cmp);
350 RB_GENERATE_STATIC(h2span_link_tree, h2span_link,
351              rbnode, h2span_link_cmp);
352 RB_GENERATE_STATIC(h2span_relay_tree, h2span_relay,
353              rbnode, h2span_relay_cmp);
354
355 /*
356  * Global mutex protects cluster_tree lookups, connq, mediaq.
357  */
358 static pthread_mutex_t cluster_mtx;
359 static struct h2span_cluster_tree cluster_tree = RB_INITIALIZER(cluster_tree);
360 static struct h2span_conn_queue connq = TAILQ_HEAD_INITIALIZER(connq);
361 static struct h2span_media_queue mediaq = TAILQ_HEAD_INITIALIZER(mediaq);
362
363 static void dmsg_lnk_span(dmsg_msg_t *msg);
364 static void dmsg_lnk_conn(dmsg_msg_t *msg);
365 static void dmsg_lnk_circ(dmsg_msg_t *msg);
366 static void dmsg_lnk_relay(dmsg_msg_t *msg);
367 static void dmsg_relay_scan(h2span_conn_t *conn, h2span_node_t *node);
368 static void dmsg_relay_delete(h2span_relay_t *relay);
369
370 static void *dmsg_volconf_thread(void *info);
371 static void dmsg_volconf_stop(h2span_media_config_t *conf);
372 static void dmsg_volconf_start(h2span_media_config_t *conf,
373                                 const char *hostname);
374
375 void
376 dmsg_msg_lnk_signal(dmsg_iocom_t *iocom __unused)
377 {
378         pthread_mutex_lock(&cluster_mtx);
379         dmsg_relay_scan(NULL, NULL);
380         pthread_mutex_unlock(&cluster_mtx);
381 }
382
383 /*
384  * DMSG_PROTO_LNK - Generic DMSG_PROTO_LNK.
385  *            (incoming iocom lock not held)
386  *
387  * This function is typically called for one-way and opening-transactions
388  * since state->func is assigned after that, but it will also be called
389  * if no state->func is assigned on transaction-open.
390  */
391 void
392 dmsg_msg_lnk(dmsg_msg_t *msg)
393 {
394         uint32_t icmd = msg->state ? msg->state->icmd : msg->any.head.cmd;
395
396         switch(icmd & DMSGF_BASECMDMASK) {
397         case DMSG_LNK_CONN:
398                 dmsg_lnk_conn(msg);
399                 break;
400         case DMSG_LNK_SPAN:
401                 dmsg_lnk_span(msg);
402                 break;
403         case DMSG_LNK_CIRC:
404                 dmsg_lnk_circ(msg);
405                 break;
406         default:
407                 fprintf(stderr,
408                         "MSG_PROTO_LNK: Unknown msg %08x\n", msg->any.head.cmd);
409                 dmsg_msg_reply(msg, DMSG_ERR_NOSUPP);
410                 /* state invalid after reply */
411                 break;
412         }
413 }
414
415 /*
416  * LNK_CONN - iocom identify message reception.
417  *            (incoming iocom lock not held)
418  *
419  * Remote node identifies itself to us, sets up a SPAN filter, and gives us
420  * the ok to start transmitting SPANs.
421  */
422 void
423 dmsg_lnk_conn(dmsg_msg_t *msg)
424 {
425         dmsg_state_t *state = msg->state;
426         h2span_media_t *media;
427         h2span_media_config_t *conf;
428         h2span_conn_t *conn;
429         h2span_relay_t *relay;
430         char *alloc = NULL;
431         int i;
432
433         pthread_mutex_lock(&cluster_mtx);
434
435         fprintf(stderr, "dmsg_lnk_conn: msg %p cmd %08x state %p txcmd %08x rxcmd %08x\n",
436                 msg, msg->any.head.cmd, state, state->txcmd, state->rxcmd);
437
438         switch(msg->any.head.cmd & DMSGF_TRANSMASK) {
439         case DMSG_LNK_CONN | DMSGF_CREATE:
440         case DMSG_LNK_CONN | DMSGF_CREATE | DMSGF_DELETE:
441                 /*
442                  * On transaction start we allocate a new h2span_conn and
443                  * acknowledge the request, leaving the transaction open.
444                  * We then relay priority-selected SPANs.
445                  */
446                 fprintf(stderr, "LNK_CONN(%08x): %s/%s/%s\n",
447                         (uint32_t)msg->any.head.msgid,
448                         dmsg_uuid_to_str(&msg->any.lnk_conn.pfs_clid,
449                                             &alloc),
450                         msg->any.lnk_conn.cl_label,
451                         msg->any.lnk_conn.fs_label);
452                 free(alloc);
453
454                 conn = dmsg_alloc(sizeof(*conn));
455
456                 RB_INIT(&conn->tree);
457                 state->iocom->conn = conn;      /* XXX only one */
458                 conn->state = state;
459                 state->func = dmsg_lnk_conn;
460                 state->any.conn = conn;
461                 TAILQ_INSERT_TAIL(&connq, conn, entry);
462
463                 /*
464                  * Set up media
465                  */
466                 TAILQ_FOREACH(media, &mediaq, entry) {
467                         if (uuid_compare(&msg->any.lnk_conn.mediaid,
468                                          &media->mediaid, NULL) == 0) {
469                                 break;
470                         }
471                 }
472                 if (media == NULL) {
473                         media = dmsg_alloc(sizeof(*media));
474                         media->mediaid = msg->any.lnk_conn.mediaid;
475                         TAILQ_INSERT_TAIL(&mediaq, media, entry);
476                 }
477                 conn->media = media;
478                 ++media->refs;
479
480                 if ((msg->any.head.cmd & DMSGF_DELETE) == 0) {
481                         dmsg_msg_result(msg, 0);
482                         dmsg_iocom_signal(msg->iocom);
483                         break;
484                 }
485                 /* FALL THROUGH */
486         case DMSG_LNK_CONN | DMSGF_DELETE:
487         case DMSG_LNK_ERROR | DMSGF_DELETE:
488 deleteconn:
489                 /*
490                  * On transaction terminate we clean out our h2span_conn
491                  * and acknowledge the request, closing the transaction.
492                  */
493                 fprintf(stderr, "LNK_CONN: Terminated\n");
494                 conn = state->any.conn;
495                 assert(conn);
496
497                 /*
498                  * Clean out the media structure. If refs drops to zero we
499                  * also clean out the media config threads.  These threads
500                  * maintain span connections to other hammer2 service daemons.
501                  */
502                 media = conn->media;
503                 if (--media->refs == 0) {
504                         fprintf(stderr, "Shutting down media spans\n");
505                         for (i = 0; i < DMSG_COPYID_COUNT; ++i) {
506                                 conf = &media->config[i];
507
508                                 if (conf->thread == NULL)
509                                         continue;
510                                 conf->ctl = H2CONFCTL_STOP;
511                                 pthread_cond_signal(&conf->cond);
512                         }
513                         for (i = 0; i < DMSG_COPYID_COUNT; ++i) {
514                                 conf = &media->config[i];
515
516                                 if (conf->thread == NULL)
517                                         continue;
518                                 pthread_mutex_unlock(&cluster_mtx);
519                                 pthread_join(conf->thread, NULL);
520                                 pthread_mutex_lock(&cluster_mtx);
521                                 conf->thread = NULL;
522                                 pthread_cond_destroy(&conf->cond);
523                         }
524                         fprintf(stderr, "Media shutdown complete\n");
525                         TAILQ_REMOVE(&mediaq, media, entry);
526                         dmsg_free(media);
527                 }
528
529                 /*
530                  * Clean out all relays.  This requires terminating each
531                  * relay transaction.
532                  */
533                 while ((relay = RB_ROOT(&conn->tree)) != NULL) {
534                         dmsg_relay_delete(relay);
535                 }
536
537                 /*
538                  * Clean out conn
539                  */
540                 conn->media = NULL;
541                 conn->state = NULL;
542                 msg->state->any.conn = NULL;
543                 msg->state->iocom->conn = NULL;
544                 TAILQ_REMOVE(&connq, conn, entry);
545                 dmsg_free(conn);
546
547                 dmsg_msg_reply(msg, 0);
548                 /* state invalid after reply */
549                 break;
550         case DMSG_LNK_VOLCONF:
551                 /*
552                  * One-way volume-configuration message is transmitted
553                  * over the open LNK_CONN transaction.
554                  */
555                 fprintf(stderr, "RECEIVED VOLCONF\n");
556                 if (msg->any.lnk_volconf.index < 0 ||
557                     msg->any.lnk_volconf.index >= DMSG_COPYID_COUNT) {
558                         fprintf(stderr, "VOLCONF: ILLEGAL INDEX %d\n",
559                                 msg->any.lnk_volconf.index);
560                         break;
561                 }
562                 if (msg->any.lnk_volconf.copy.path[sizeof(msg->any.lnk_volconf.copy.path) - 1] != 0 ||
563                     msg->any.lnk_volconf.copy.path[0] == 0) {
564                         fprintf(stderr, "VOLCONF: ILLEGAL PATH %d\n",
565                                 msg->any.lnk_volconf.index);
566                         break;
567                 }
568                 conn = msg->state->any.conn;
569                 if (conn == NULL) {
570                         fprintf(stderr, "VOLCONF: LNK_CONN is missing\n");
571                         break;
572                 }
573                 conf = &conn->media->config[msg->any.lnk_volconf.index];
574                 conf->copy_pend = msg->any.lnk_volconf.copy;
575                 conf->ctl |= H2CONFCTL_UPDATE;
576                 if (conf->thread == NULL) {
577                         fprintf(stderr, "VOLCONF THREAD STARTED\n");
578                         pthread_cond_init(&conf->cond, NULL);
579                         pthread_create(&conf->thread, NULL,
580                                        dmsg_volconf_thread, (void *)conf);
581                 }
582                 pthread_cond_signal(&conf->cond);
583                 break;
584         default:
585                 /*
586                  * Failsafe
587                  */
588                 if (msg->any.head.cmd & DMSGF_DELETE)
589                         goto deleteconn;
590                 dmsg_msg_reply(msg, DMSG_ERR_NOSUPP);
591                 break;
592         }
593         pthread_mutex_unlock(&cluster_mtx);
594 }
595
596 /*
597  * LNK_SPAN - Spanning tree protocol message reception
598  *            (incoming iocom lock not held)
599  *
600  * Receive a spanning tree transactional message, creating or destroying
601  * a SPAN and propagating it to other iocoms.
602  */
603 void
604 dmsg_lnk_span(dmsg_msg_t *msg)
605 {
606         dmsg_state_t *state = msg->state;
607         h2span_cluster_t dummy_cls;
608         h2span_node_t dummy_node;
609         h2span_cluster_t *cls;
610         h2span_node_t *node;
611         h2span_link_t *slink;
612         h2span_relay_t *relay;
613         char *alloc = NULL;
614
615         assert((msg->any.head.cmd & DMSGF_REPLY) == 0);
616
617         pthread_mutex_lock(&cluster_mtx);
618
619         /*
620          * On transaction start we initialize the tracking infrastructure
621          */
622         if (msg->any.head.cmd & DMSGF_CREATE) {
623                 assert(state->func == NULL);
624                 state->func = dmsg_lnk_span;
625
626                 dmsg_termstr(msg->any.lnk_span.cl_label);
627                 dmsg_termstr(msg->any.lnk_span.fs_label);
628
629                 /*
630                  * Find the cluster
631                  */
632                 dummy_cls.pfs_clid = msg->any.lnk_span.pfs_clid;
633                 dummy_cls.peer_type = msg->any.lnk_span.peer_type;
634                 bcopy(msg->any.lnk_span.cl_label,
635                       dummy_cls.cl_label,
636                       sizeof(dummy_cls.cl_label));
637                 cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls);
638                 if (cls == NULL) {
639                         cls = dmsg_alloc(sizeof(*cls));
640                         cls->pfs_clid = msg->any.lnk_span.pfs_clid;
641                         cls->peer_type = msg->any.lnk_span.peer_type;
642                         bcopy(msg->any.lnk_span.cl_label,
643                               cls->cl_label,
644                               sizeof(cls->cl_label));
645                         RB_INIT(&cls->tree);
646                         RB_INSERT(h2span_cluster_tree, &cluster_tree, cls);
647                 }
648
649                 /*
650                  * Find the node
651                  */
652                 dummy_node.pfs_fsid = msg->any.lnk_span.pfs_fsid;
653                 bcopy(msg->any.lnk_span.fs_label, dummy_node.fs_label,
654                       sizeof(dummy_node.fs_label));
655                 node = RB_FIND(h2span_node_tree, &cls->tree, &dummy_node);
656                 if (node == NULL) {
657                         node = dmsg_alloc(sizeof(*node));
658                         node->pfs_fsid = msg->any.lnk_span.pfs_fsid;
659                         node->pfs_type = msg->any.lnk_span.pfs_type;
660                         bcopy(msg->any.lnk_span.fs_label,
661                               node->fs_label,
662                               sizeof(node->fs_label));
663                         node->cls = cls;
664                         RB_INIT(&node->tree);
665                         RB_INSERT(h2span_node_tree, &cls->tree, node);
666                         if (dmsg_node_handler) {
667                                 dmsg_node_handler(&node->opaque, msg,
668                                                   DMSG_NODEOP_ADD);
669                         }
670                 }
671
672                 /*
673                  * Create the link
674                  */
675                 assert(state->any.link == NULL);
676                 slink = dmsg_alloc(sizeof(*slink));
677                 TAILQ_INIT(&slink->relayq);
678                 slink->node = node;
679                 slink->dist = msg->any.lnk_span.dist;
680                 slink->rnss = msg->any.lnk_span.rnss;
681                 slink->state = state;
682                 state->any.link = slink;
683
684                 RB_INSERT(h2span_link_tree, &node->tree, slink);
685
686                 fprintf(stderr,
687                         "LNK_SPAN(thr %p): %p %s cl=%s fs=%s dist=%d\n",
688                         msg->iocom,
689                         slink,
690                         dmsg_uuid_to_str(&msg->any.lnk_span.pfs_clid, &alloc),
691                         msg->any.lnk_span.cl_label,
692                         msg->any.lnk_span.fs_label,
693                         msg->any.lnk_span.dist);
694                 free(alloc);
695 #if 0
696                 dmsg_relay_scan(NULL, node);
697 #endif
698                 dmsg_iocom_signal(msg->iocom);
699         }
700
701         /*
702          * On transaction terminate we remove the tracking infrastructure.
703          */
704         if (msg->any.head.cmd & DMSGF_DELETE) {
705                 slink = state->any.link;
706                 assert(slink != NULL);
707                 node = slink->node;
708                 cls = node->cls;
709
710                 fprintf(stderr, "LNK_DELE(thr %p): %p %s cl=%s fs=%s dist=%d\n",
711                         msg->iocom,
712                         slink,
713                         dmsg_uuid_to_str(&cls->pfs_clid, &alloc),
714                         state->msg->any.lnk_span.cl_label,
715                         state->msg->any.lnk_span.fs_label,
716                         state->msg->any.lnk_span.dist);
717                 free(alloc);
718
719                 /*
720                  * Clean out all relays.  This requires terminating each
721                  * relay transaction.
722                  */
723                 while ((relay = TAILQ_FIRST(&slink->relayq)) != NULL) {
724                         dmsg_relay_delete(relay);
725                 }
726
727                 /*
728                  * Clean out the topology
729                  */
730                 RB_REMOVE(h2span_link_tree, &node->tree, slink);
731                 if (RB_EMPTY(&node->tree)) {
732                         RB_REMOVE(h2span_node_tree, &cls->tree, node);
733                         if (dmsg_node_handler) {
734                                 dmsg_node_handler(&node->opaque, msg,
735                                                   DMSG_NODEOP_DEL);
736                         }
737                         if (RB_EMPTY(&cls->tree) && cls->refs == 0) {
738                                 RB_REMOVE(h2span_cluster_tree,
739                                           &cluster_tree, cls);
740                                 dmsg_free(cls);
741                         }
742                         node->cls = NULL;
743                         dmsg_free(node);
744                         node = NULL;
745                 }
746                 state->any.link = NULL;
747                 slink->state = NULL;
748                 slink->node = NULL;
749                 dmsg_free(slink);
750
751                 /*
752                  * We have to terminate the transaction
753                  */
754                 dmsg_state_reply(state, 0);
755                 /* state invalid after reply */
756
757                 /*
758                  * If the node still exists issue any required updates.  If
759                  * it doesn't then all related relays have already been
760                  * removed and there's nothing left to do.
761                  */
762 #if 0
763                 if (node)
764                         dmsg_relay_scan(NULL, node);
765 #endif
766                 if (node)
767                         dmsg_iocom_signal(msg->iocom);
768         }
769
770         pthread_mutex_unlock(&cluster_mtx);
771 }
772
773 /*
774  * LNK_CIRC - Virtual circuit protocol message reception
775  *            (incoming iocom lock not held)
776  *
777  * Handles all cases.
778  */
779 void
780 dmsg_lnk_circ(dmsg_msg_t *msg)
781 {
782         dmsg_circuit_t *circA;
783         dmsg_circuit_t *circB;
784         dmsg_state_t *rx_state;
785         dmsg_state_t *tx_state;
786         dmsg_state_t *state;
787         dmsg_state_t dummy;
788         dmsg_msg_t *fwd_msg;
789         dmsg_iocom_t *iocomA;
790         dmsg_iocom_t *iocomB;
791         int disconnect;
792
793         /*pthread_mutex_lock(&cluster_mtx);*/
794
795         switch (msg->any.head.cmd & (DMSGF_CREATE |
796                                      DMSGF_DELETE |
797                                      DMSGF_REPLY)) {
798         case DMSGF_CREATE:
799         case DMSGF_CREATE | DMSGF_DELETE:
800                 /*
801                  * (A) wishes to establish a virtual circuit through us to (B).
802                  * (B) is specified by lnk_circ.target (the message id for
803                  * a LNK_SPAN that (A) received from us which represents (B)).
804                  *
805                  * Designate the originator of the circuit (the current
806                  * remote end) as (A) and the other side as (B).
807                  *
808                  * Accept the VC but do not reply.  We will wait for the end-
809                  * to-end reply to propagate back.
810                  */
811                 iocomA = msg->iocom;
812
813                 /*
814                  * Locate the open transaction state that the other end
815                  * specified in <target>.  This will be an open SPAN
816                  * transaction that we transmitted (h2span_relay) over
817                  * the interface the LNK_CIRC is being received on.
818                  *
819                  * (all LNK_CIRC's that we transmit are on circuit0)
820                  */
821                 pthread_mutex_lock(&iocomA->mtx);
822                 dummy.msgid = msg->any.lnk_circ.target;
823                 tx_state = RB_FIND(dmsg_state_tree,
824                                    &iocomA->circuit0.statewr_tree,
825                                    &dummy);
826                 pthread_mutex_unlock(&iocomA->mtx);
827                 if (tx_state == NULL) {
828                         fprintf(stderr, "dmsg_lnk_circ: no circuit\n");
829                         dmsg_msg_reply(msg, DMSG_ERR_CANTCIRC);
830                         break;
831                 }
832
833                 /* locate h2span_link */
834                 rx_state = tx_state->any.relay->source_rt;
835
836                 /*
837                  * A wishes to establish a VC through us to the
838                  * specified target.
839                  *
840                  * A sends us the msgid of an open SPAN transaction
841                  * it received from us as <target>.
842                  */
843                 circA = dmsg_alloc(sizeof(*circA));
844                 circA->iocom = iocomA;
845                 circA->state = msg->state;      /* LNK_CIRC state */
846                 circA->msgid = msg->state->msgid;
847                 circA->span_state = tx_state;   /* H2SPAN_RELAY state */
848                 circA->is_relay = 1;
849                 circA->refs = 2;                /* state and peer */
850                 msg->state->any.circ = circA;
851
852                 iocomB = rx_state->iocom;
853
854                 circB = dmsg_alloc(sizeof(*circB));
855
856                 /*
857                  * Create a LNK_CIRC transaction on B
858                  */
859                 fwd_msg = dmsg_msg_alloc(&iocomB->circuit0,
860                                          0, DMSG_LNK_CIRC | DMSGF_CREATE,
861                                          dmsg_lnk_circ, circB);
862                 fwd_msg->state->any.circ = circB;
863                 circB->iocom = iocomB;
864                 circB->state = fwd_msg->state;  /* LNK_CIRC state */
865                 circB->msgid = fwd_msg->any.head.msgid;
866                 circB->span_state = rx_state;   /* H2SPAN_LINK state */
867                 circB->is_relay = 0;
868                 circB->refs = 2;                /* state and peer */
869
870                 /*
871                  * Link the two circuits together.
872                  */
873                 circA->peer = circB;
874                 circB->peer = circA;
875
876                 if (RB_INSERT(dmsg_circuit_tree, &iocomA->circuit_tree, circA))
877                         assert(0);
878                 if (RB_INSERT(dmsg_circuit_tree, &iocomB->circuit_tree, circB))
879                         assert(0);
880
881                 dmsg_msg_write(fwd_msg);
882
883                 if ((msg->any.head.cmd & DMSGF_DELETE) == 0)
884                         break;
885                 /* FALL THROUGH TO DELETE */
886         case DMSGF_DELETE:
887                 /*
888                  * (A) Is deleting the virtual circuit, propogate closure
889                  * to (B).
890                  */
891                 iocomA = msg->iocom;
892                 if (msg->state->any.circ == NULL) {
893                         /* already returned an error/deleted */
894                         break;
895                 }
896                 circA = msg->state->any.circ;
897                 circB = circA->peer;
898                 assert(msg->state == circA->state);
899
900                 /*
901                  * We are closing B's send side.  If B's receive side is
902                  * already closed we disconnect the circuit from B's state.
903                  */
904                 disconnect = 0;
905                 if (circB && (state = circB->state) != NULL) {
906                         if (state->rxcmd & DMSGF_DELETE) {
907                                 circB->state = NULL;
908                                 state->any.circ = NULL;
909                                 dmsg_circuit_drop(circB);
910                         }
911                         dmsg_state_reply(state, msg->any.head.error);
912                         disconnect = 1;
913                 }
914
915                 /*
916                  * We received a close on A.  If A's send side is already
917                  * closed we disconnect the circuit from A's state.
918                  */
919                 if (circA && (state = circA->state) != NULL) {
920                         if (state->txcmd & DMSGF_DELETE) {
921                                 circA->state = NULL;
922                                 state->any.circ = NULL;
923                                 dmsg_circuit_drop(circA);
924                         }
925                         disconnect = 1;
926                 }
927
928                 /*
929                  * Disconnect the peer<->peer association
930                  */
931                 if (disconnect) {
932                         if (circB) {
933                                 circA->peer = NULL;
934                                 circB->peer = NULL;
935                                 dmsg_circuit_drop(circA);
936                                 dmsg_circuit_drop(circB); /* XXX SMP */
937                         }
938                 }
939                 break;
940         case DMSGF_REPLY | DMSGF_CREATE:
941         case DMSGF_REPLY | DMSGF_CREATE | DMSGF_DELETE:
942                 /*
943                  * (B) is acknowledging the creation of the virtual
944                  * circuit.  This propagates all the way back to (A), though
945                  * it should be noted that (A) can start issuing commands
946                  * via the virtual circuit before seeing this reply.
947                  */
948                 circB = msg->state->any.circ;
949                 assert(circB);
950                 circA = circB->peer;
951                 assert(msg->state == circB->state);
952                 assert(circA);
953                 if ((msg->any.head.cmd & DMSGF_DELETE) == 0) {
954                         dmsg_state_result(circA->state, msg->any.head.error);
955                         break;
956                 }
957                 /* FALL THROUGH TO DELETE */
958         case DMSGF_REPLY | DMSGF_DELETE:
959                 /*
960                  * (B) Is deleting the virtual circuit or acknowledging
961                  * our deletion of the virtual circuit, propogate closure
962                  * to (A).
963                  */
964                 iocomB = msg->iocom;
965                 circB = msg->state->any.circ;
966                 circA = circB->peer;
967                 assert(msg->state == circB->state);
968
969                 /*
970                  * We received a close on (B), propagate to (A).  If we have
971                  * already received the close from (A) we disconnect the state.
972                  */
973                 disconnect = 0;
974                 if (circA && (state = circA->state) != NULL) {
975                         if (state->rxcmd & DMSGF_DELETE) {
976                                 circA->state = NULL;
977                                 state->any.circ = NULL;
978                                 dmsg_circuit_drop(circA);
979                         }
980                         dmsg_state_reply(state, msg->any.head.error);
981                         disconnect = 1;
982                 }
983
984                 /*
985                  * We received a close on (B).  If (B)'s send side is already
986                  * closed we disconnect the state.
987                  */
988                 if (circB && (state = circB->state) != NULL) {
989                         if (state->txcmd & DMSGF_DELETE) {
990                                 circB->state = NULL;
991                                 state->any.circ = NULL;
992                                 dmsg_circuit_drop(circB);
993                         }
994                         disconnect = 1;
995                 }
996
997                 /*
998                  * Disconnect the peer<->peer association
999                  */
1000                 if (disconnect) {
1001                         if (circA) {
1002                                 circB->peer = NULL;
1003                                 circA->peer = NULL;
1004                                 dmsg_circuit_drop(circB);
1005                                 dmsg_circuit_drop(circA); /* XXX SMP */
1006                         }
1007                 }
1008                 break;
1009         }
1010
1011         /*pthread_mutex_lock(&cluster_mtx);*/
1012 }
1013
1014 /*
1015  * Update relay transactions for SPANs.
1016  *
1017  * Called with cluster_mtx held.
1018  */
1019 static void dmsg_relay_scan_specific(h2span_node_t *node,
1020                                         h2span_conn_t *conn);
1021
1022 static void
1023 dmsg_relay_scan(h2span_conn_t *conn, h2span_node_t *node)
1024 {
1025         h2span_cluster_t *cls;
1026
1027         if (node) {
1028                 /*
1029                  * Iterate specific node
1030                  */
1031                 TAILQ_FOREACH(conn, &connq, entry)
1032                         dmsg_relay_scan_specific(node, conn);
1033         } else {
1034                 /*
1035                  * Full iteration.
1036                  *
1037                  * Iterate cluster ids, nodes, and either a specific connection
1038                  * or all connections.
1039                  */
1040                 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
1041                         /*
1042                          * Iterate node ids
1043                          */
1044                         RB_FOREACH(node, h2span_node_tree, &cls->tree) {
1045                                 /*
1046                                  * Synchronize the node's link (received SPANs)
1047                                  * with each connection's relays.
1048                                  */
1049                                 if (conn) {
1050                                         dmsg_relay_scan_specific(node, conn);
1051                                 } else {
1052                                         TAILQ_FOREACH(conn, &connq, entry) {
1053                                             dmsg_relay_scan_specific(node,
1054                                                                         conn);
1055                                         }
1056                                         assert(conn == NULL);
1057                                 }
1058                         }
1059                 }
1060         }
1061 }
1062
1063 /*
1064  * Update the relay'd SPANs for this (node, conn).
1065  *
1066  * Iterate links and adjust relays to match.  We only propagate the top link
1067  * for now (XXX we want to propagate the top two).
1068  *
1069  * The dmsg_relay_scan_cmp() function locates the first relay element
1070  * for any given node.  The relay elements will be sub-sorted by dist.
1071  */
1072 struct relay_scan_info {
1073         h2span_node_t *node;
1074         h2span_relay_t *relay;
1075 };
1076
1077 static int
1078 dmsg_relay_scan_cmp(h2span_relay_t *relay, void *arg)
1079 {
1080         struct relay_scan_info *info = arg;
1081
1082         if ((intptr_t)relay->source_rt->any.link->node < (intptr_t)info->node)
1083                 return(-1);
1084         if ((intptr_t)relay->source_rt->any.link->node > (intptr_t)info->node)
1085                 return(1);
1086         return(0);
1087 }
1088
1089 static int
1090 dmsg_relay_scan_callback(h2span_relay_t *relay, void *arg)
1091 {
1092         struct relay_scan_info *info = arg;
1093
1094         info->relay = relay;
1095         return(-1);
1096 }
1097
1098 static void
1099 dmsg_relay_scan_specific(h2span_node_t *node, h2span_conn_t *conn)
1100 {
1101         struct relay_scan_info info;
1102         h2span_relay_t *relay;
1103         h2span_relay_t *next_relay;
1104         h2span_link_t *slink;
1105         dmsg_lnk_conn_t *lconn;
1106         dmsg_lnk_span_t *lspan;
1107         int count;
1108         int maxcount = 2;
1109         uint32_t lastdist = DMSG_SPAN_MAXDIST;
1110         uint32_t lastrnss = 0;
1111
1112         info.node = node;
1113         info.relay = NULL;
1114
1115         /*
1116          * Locate the first related relay for the node on this connection.
1117          * relay will be NULL if there were none.
1118          */
1119         RB_SCAN(h2span_relay_tree, &conn->tree,
1120                 dmsg_relay_scan_cmp, dmsg_relay_scan_callback, &info);
1121         relay = info.relay;
1122         info.relay = NULL;
1123         if (relay)
1124                 assert(relay->source_rt->any.link->node == node);
1125
1126         if (DMsgDebugOpt > 8)
1127                 fprintf(stderr, "relay scan for connection %p\n", conn);
1128
1129         /*
1130          * Iterate the node's links (received SPANs) in distance order,
1131          * lowest (best) dist first.
1132          *
1133          * PROPAGATE THE BEST LINKS OVER THE SPECIFIED CONNECTION.
1134          *
1135          * Track relays while iterating the best links and construct
1136          * missing relays when necessary.
1137          *
1138          * (If some prior better link was removed it would have also
1139          *  removed the relay, so the relay can only match exactly or
1140          *  be worse).
1141          */
1142         count = 0;
1143         RB_FOREACH(slink, h2span_link_tree, &node->tree) {
1144                 /*
1145                  * Increment count of successful relays.  This isn't
1146                  * quite accurate if we break out but nothing after
1147                  * the loop uses (count).
1148                  *
1149                  * If count exceeds the maximum number of relays we desire
1150                  * we normally want to break out.  However, in order to
1151                  * guarantee a symmetric path we have to continue if both
1152                  * (dist) and (rnss) continue to match.  Otherwise the SPAN
1153                  * propagation in the reverse direction may choose different
1154                  * routes and we will not have a symmetric path.
1155                  *
1156                  * NOTE: Spanning tree does not have to be symmetrical so
1157                  *       this code is not currently enabled.
1158                  */
1159                 if (++count >= maxcount) {
1160 #ifdef REQUIRE_SYMMETRICAL
1161                         if (lastdist != slink->dist || lastrnss != slink->rnss)
1162                                 break;
1163 #else
1164                         break;
1165 #endif
1166                         /* go beyond the nominal maximum desired relays */
1167                 }
1168
1169                 /*
1170                  * Match, relay already in-place, get the next
1171                  * relay to match against the next slink.
1172                  */
1173                 if (relay && relay->source_rt->any.link == slink) {
1174                         relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
1175                         continue;
1176                 }
1177
1178                 /*
1179                  * We might want this SLINK, if it passes our filters.
1180                  *
1181                  * The spanning tree can cause closed loops so we have
1182                  * to limit slink->dist.
1183                  */
1184                 if (slink->dist > DMSG_SPAN_MAXDIST)
1185                         break;
1186
1187                 /*
1188                  * Don't bother transmitting a LNK_SPAN out the same
1189                  * connection it came in on.  Trivial optimization.
1190                  */
1191                 if (slink->state->iocom == conn->state->iocom)
1192                         break;
1193
1194                 /*
1195                  * NOTE ON FILTERS: The protocol spec allows non-requested
1196                  * SPANs to be transmitted, the other end is expected to
1197                  * leave their transactions open but otherwise ignore them.
1198                  *
1199                  * Don't bother transmitting if the remote connection
1200                  * is not accepting this SPAN's peer_type.
1201                  *
1202                  * pfs_mask is typically used so pure clients can filter
1203                  * out receiving SPANs for other pure clients.
1204                  */
1205                 lspan = &slink->state->msg->any.lnk_span;
1206                 lconn = &conn->state->msg->any.lnk_conn;
1207                 if (((1LLU << lspan->peer_type) & lconn->peer_mask) == 0)
1208                         break;
1209                 if (((1LLU << lspan->pfs_type) & lconn->pfs_mask) == 0)
1210                         break;
1211
1212                 /*
1213                  * Do not give pure clients visibility to other pure clients
1214                  */
1215                 if (lconn->pfs_type == DMSG_PFSTYPE_CLIENT &&
1216                     lspan->pfs_type == DMSG_PFSTYPE_CLIENT) {
1217                         break;
1218                 }
1219
1220                 /*
1221                  * Connection filter, if cluster uuid is not NULL it must
1222                  * match the span cluster uuid.  Only applies when the
1223                  * peer_type matches.
1224                  */
1225                 if (lspan->peer_type == lconn->peer_type &&
1226                     !uuid_is_nil(&lconn->pfs_clid, NULL) &&
1227                     uuid_compare(&slink->node->cls->pfs_clid,
1228                                  &lconn->pfs_clid, NULL)) {
1229                         break;
1230                 }
1231
1232                 /*
1233                  * Connection filter, if cluster label is not empty it must
1234                  * match the span cluster label.  Only applies when the
1235                  * peer_type matches.
1236                  */
1237                 if (lspan->peer_type == lconn->peer_type &&
1238                     lconn->cl_label[0] &&
1239                     strcmp(lconn->cl_label, slink->node->cls->cl_label)) {
1240                         break;
1241                 }
1242
1243                 /*
1244                  * NOTE! pfs_fsid differentiates nodes within the same cluster
1245                  *       so we obviously don't want to match those.  Similarly
1246                  *       for fs_label.
1247                  */
1248
1249                 /*
1250                  * Ok, we've accepted this SPAN for relaying.
1251                  */
1252                 assert(relay == NULL ||
1253                        relay->source_rt->any.link->node != slink->node ||
1254                        relay->source_rt->any.link->dist >= slink->dist);
1255                 relay = dmsg_generate_relay(conn, slink);
1256                 lastdist = slink->dist;
1257                 lastrnss = slink->rnss;
1258
1259                 /*
1260                  * Match (created new relay), get the next relay to
1261                  * match against the next slink.
1262                  */
1263                 relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
1264         }
1265
1266         /*
1267          * Any remaining relay's belonging to this connection which match
1268          * the node are in excess of the current aggregate spanning state
1269          * and should be removed.
1270          */
1271         while (relay && relay->source_rt->any.link->node == node) {
1272                 next_relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
1273                 dmsg_relay_delete(relay);
1274                 relay = next_relay;
1275         }
1276 }
1277
1278 /*
1279  * Helper function to generate missing relay.
1280  *
1281  * cluster_mtx must be held
1282  */
1283 static
1284 h2span_relay_t *
1285 dmsg_generate_relay(h2span_conn_t *conn, h2span_link_t *slink)
1286 {
1287         h2span_relay_t *relay;
1288         h2span_node_t *node;
1289         dmsg_msg_t *msg;
1290
1291         node = slink->node;
1292
1293         relay = dmsg_alloc(sizeof(*relay));
1294         relay->conn = conn;
1295         relay->source_rt = slink->state;
1296         /* relay->source_rt->any.link = slink; */
1297
1298         /*
1299          * NOTE: relay->target_rt->any.relay set to relay by alloc.
1300          */
1301         msg = dmsg_msg_alloc(&conn->state->iocom->circuit0,
1302                              0, DMSG_LNK_SPAN | DMSGF_CREATE,
1303                              dmsg_lnk_relay, relay);
1304         relay->target_rt = msg->state;
1305
1306         msg->any.lnk_span = slink->state->msg->any.lnk_span;
1307         msg->any.lnk_span.dist = slink->dist + 1;
1308         msg->any.lnk_span.rnss = slink->rnss + dmsg_rnss();
1309
1310         RB_INSERT(h2span_relay_tree, &conn->tree, relay);
1311         TAILQ_INSERT_TAIL(&slink->relayq, relay, entry);
1312
1313         dmsg_msg_write(msg);
1314
1315         return (relay);
1316 }
1317
1318 /*
1319  * Messages received on relay SPANs.  These are open transactions so it is
1320  * in fact possible for the other end to close the transaction.
1321  *
1322  * XXX MPRACE on state structure
1323  */
1324 static void
1325 dmsg_lnk_relay(dmsg_msg_t *msg)
1326 {
1327         dmsg_state_t *state = msg->state;
1328         h2span_relay_t *relay;
1329
1330         assert(msg->any.head.cmd & DMSGF_REPLY);
1331
1332         if (msg->any.head.cmd & DMSGF_DELETE) {
1333                 pthread_mutex_lock(&cluster_mtx);
1334                 if ((relay = state->any.relay) != NULL) {
1335                         dmsg_relay_delete(relay);
1336                 } else {
1337                         dmsg_state_reply(state, 0);
1338                 }
1339                 pthread_mutex_unlock(&cluster_mtx);
1340         }
1341 }
1342
1343
1344 static
1345 void
1346 dmsg_relay_delete(h2span_relay_t *relay)
1347 {
1348         fprintf(stderr,
1349                 "RELAY DELETE %p RELAY %p ON CLS=%p NODE=%p DIST=%d FD %d STATE %p\n",
1350                 relay->source_rt->any.link,
1351                 relay,
1352                 relay->source_rt->any.link->node->cls, relay->source_rt->any.link->node,
1353                 relay->source_rt->any.link->dist,
1354                 relay->conn->state->iocom->sock_fd, relay->target_rt);
1355
1356         RB_REMOVE(h2span_relay_tree, &relay->conn->tree, relay);
1357         TAILQ_REMOVE(&relay->source_rt->any.link->relayq, relay, entry);
1358
1359         if (relay->target_rt) {
1360                 relay->target_rt->any.relay = NULL;
1361                 dmsg_state_reply(relay->target_rt, 0);
1362                 /* state invalid after reply */
1363                 relay->target_rt = NULL;
1364         }
1365         relay->conn = NULL;
1366         relay->source_rt = NULL;
1367         dmsg_free(relay);
1368 }
1369
1370 static void *
1371 dmsg_volconf_thread(void *info)
1372 {
1373         h2span_media_config_t *conf = info;
1374
1375         pthread_mutex_lock(&cluster_mtx);
1376         while ((conf->ctl & H2CONFCTL_STOP) == 0) {
1377                 if (conf->ctl & H2CONFCTL_UPDATE) {
1378                         fprintf(stderr, "VOLCONF UPDATE\n");
1379                         conf->ctl &= ~H2CONFCTL_UPDATE;
1380                         if (bcmp(&conf->copy_run, &conf->copy_pend,
1381                                  sizeof(conf->copy_run)) == 0) {
1382                                 fprintf(stderr, "VOLCONF: no changes\n");
1383                                 continue;
1384                         }
1385                         /*
1386                          * XXX TODO - auto reconnect on lookup failure or
1387                          *              connect failure or stream failure.
1388                          */
1389
1390                         pthread_mutex_unlock(&cluster_mtx);
1391                         dmsg_volconf_stop(conf);
1392                         conf->copy_run = conf->copy_pend;
1393                         if (conf->copy_run.copyid != 0 &&
1394                             strncmp(conf->copy_run.path, "span:", 5) == 0) {
1395                                 dmsg_volconf_start(conf,
1396                                                       conf->copy_run.path + 5);
1397                         }
1398                         pthread_mutex_lock(&cluster_mtx);
1399                         fprintf(stderr, "VOLCONF UPDATE DONE state %d\n", conf->state);
1400                 }
1401                 if (conf->state == H2MC_CONNECT) {
1402                         dmsg_volconf_start(conf, conf->copy_run.path + 5);
1403                         pthread_mutex_unlock(&cluster_mtx);
1404                         sleep(5);
1405                         pthread_mutex_lock(&cluster_mtx);
1406                 } else {
1407                         pthread_cond_wait(&conf->cond, &cluster_mtx);
1408                 }
1409         }
1410         pthread_mutex_unlock(&cluster_mtx);
1411         dmsg_volconf_stop(conf);
1412         return(NULL);
1413 }
1414
1415 static
1416 void
1417 dmsg_volconf_stop(h2span_media_config_t *conf)
1418 {
1419         switch(conf->state) {
1420         case H2MC_STOPPED:
1421                 break;
1422         case H2MC_CONNECT:
1423                 conf->state = H2MC_STOPPED;
1424                 break;
1425         case H2MC_RUNNING:
1426                 shutdown(conf->fd, SHUT_WR);
1427                 pthread_join(conf->iocom_thread, NULL);
1428                 conf->iocom_thread = NULL;
1429                 break;
1430         }
1431 }
1432
1433 static
1434 void
1435 dmsg_volconf_start(h2span_media_config_t *conf, const char *hostname)
1436 {
1437         dmsg_master_service_info_t *info;
1438
1439         switch(conf->state) {
1440         case H2MC_STOPPED:
1441         case H2MC_CONNECT:
1442                 conf->fd = dmsg_connect(hostname);
1443                 if (conf->fd < 0) {
1444                         fprintf(stderr, "Unable to connect to %s\n", hostname);
1445                         conf->state = H2MC_CONNECT;
1446                 } else {
1447                         info = malloc(sizeof(*info));
1448                         bzero(info, sizeof(*info));
1449                         info->fd = conf->fd;
1450                         info->detachme = 0;
1451                         conf->state = H2MC_RUNNING;
1452                         pthread_create(&conf->iocom_thread, NULL,
1453                                        dmsg_master_service, info);
1454                 }
1455                 break;
1456         case H2MC_RUNNING:
1457                 break;
1458         }
1459 }
1460
1461 /************************************************************************
1462  *                      MESSAGE ROUTING AND SOURCE VALIDATION           *
1463  ************************************************************************/
1464
1465 int
1466 dmsg_circuit_relay(dmsg_msg_t *msg)
1467 {
1468         dmsg_iocom_t *iocom = msg->iocom;
1469         dmsg_circuit_t *circ;
1470         dmsg_circuit_t *peer;
1471         dmsg_circuit_t dummy;
1472         int error = 0;
1473
1474         /*
1475          * Relay occurs before any state processing, msg state should always
1476          * be NULL.
1477          */
1478         assert(msg->state == NULL);
1479
1480         /*
1481          * Lookup the circuit on the incoming iocom.
1482          */
1483         pthread_mutex_lock(&cluster_mtx);
1484
1485         dummy.msgid = msg->any.head.circuit;
1486         circ = RB_FIND(dmsg_circuit_tree, &iocom->circuit_tree, &dummy);
1487         assert(circ);
1488         peer = circ->peer;
1489
1490         msg->iocom = peer->iocom;
1491         msg->any.head.circuit = peer->msgid;
1492
1493         pthread_mutex_unlock(&cluster_mtx);
1494
1495         dmsg_msg_write(msg);
1496         error = DMSG_IOQ_ERROR_ROUTED;
1497
1498         return error;
1499 }
1500
1501 /************************************************************************
1502  *                      ROUTER AND MESSAGING HANDLES                    *
1503  ************************************************************************
1504  *
1505  * Basically the idea here is to provide a stable data structure which
1506  * can be localized to the caller for higher level protocols to work with.
1507  * Depends on the context, these dmsg_handle's can be pooled by use-case
1508  * and remain persistent through a client (or mount point's) life.
1509  */
1510
1511 #if 0
1512 /*
1513  * Obtain a stable handle on a cluster given its uuid.  This ties directly
1514  * into the global cluster topology, creating the structure if necessary
1515  * (even if the uuid does not exist or does not exist yet), and preventing
1516  * the structure from getting ripped out from under us while we hold a
1517  * pointer to it.
1518  */
1519 h2span_cluster_t *
1520 dmsg_cluster_get(uuid_t *pfs_clid)
1521 {
1522         h2span_cluster_t dummy_cls;
1523         h2span_cluster_t *cls;
1524
1525         dummy_cls.pfs_clid = *pfs_clid;
1526         pthread_mutex_lock(&cluster_mtx);
1527         cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls);
1528         if (cls)
1529                 ++cls->refs;
1530         pthread_mutex_unlock(&cluster_mtx);
1531         return (cls);
1532 }
1533
1534 void
1535 dmsg_cluster_put(h2span_cluster_t *cls)
1536 {
1537         pthread_mutex_lock(&cluster_mtx);
1538         assert(cls->refs > 0);
1539         --cls->refs;
1540         if (RB_EMPTY(&cls->tree) && cls->refs == 0) {
1541                 RB_REMOVE(h2span_cluster_tree,
1542                           &cluster_tree, cls);
1543                 dmsg_free(cls);
1544         }
1545         pthread_mutex_unlock(&cluster_mtx);
1546 }
1547
1548 /*
1549  * Obtain a stable handle to a specific cluster node given its uuid.
1550  * This handle does NOT lock in the route to the node and is typically
1551  * used as part of the dmsg_handle_*() API to obtain a set of
1552  * stable nodes.
1553  */
1554 h2span_node_t *
1555 dmsg_node_get(h2span_cluster_t *cls, uuid_t *pfs_fsid)
1556 {
1557 }
1558
1559 #endif
1560
1561 /*
1562  * Dumps the spanning tree
1563  */
1564 void
1565 dmsg_shell_tree(dmsg_circuit_t *circuit, char *cmdbuf __unused)
1566 {
1567         h2span_cluster_t *cls;
1568         h2span_node_t *node;
1569         h2span_link_t *slink;
1570         char *uustr = NULL;
1571
1572         pthread_mutex_lock(&cluster_mtx);
1573         RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
1574                 dmsg_circuit_printf(circuit, "Cluster %s %s (%s)\n",
1575                                   dmsg_peer_type_to_str(cls->peer_type),
1576                                   dmsg_uuid_to_str(&cls->pfs_clid, &uustr),
1577                                   cls->cl_label);
1578                 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
1579                         dmsg_circuit_printf(circuit, "    Node %s %s (%s)\n",
1580                                 dmsg_pfs_type_to_str(node->pfs_type),
1581                                 dmsg_uuid_to_str(&node->pfs_fsid, &uustr),
1582                                 node->fs_label);
1583                         RB_FOREACH(slink, h2span_link_tree, &node->tree) {
1584                                 dmsg_circuit_printf(circuit,
1585                                             "\tLink dist=%d via %d\n",
1586                                             slink->dist,
1587                                             slink->state->iocom->sock_fd);
1588                         }
1589                 }
1590         }
1591         pthread_mutex_unlock(&cluster_mtx);
1592         if (uustr)
1593                 free(uustr);
1594 #if 0
1595         TAILQ_FOREACH(conn, &connq, entry) {
1596         }
1597 #endif
1598 }
1599
1600 /*
1601  * Random number sub-sort value to add to SPAN rnss fields on relay.
1602  * This allows us to differentiate spans with the same <dist> field
1603  * for relaying purposes.  We must normally limit the number of relays
1604  * for any given SPAN origination but we must also guarantee that a
1605  * symmetric reverse path exists, so we use the rnss field as a sub-sort
1606  * (since there can be thousands or millions if we only match on <dist>),
1607  * and if there STILL too many spans we go past the limit.
1608  */
1609 static
1610 uint32_t
1611 dmsg_rnss(void)
1612 {
1613         if (DMsgRNSS == 0) {
1614                 pthread_mutex_lock(&cluster_mtx);
1615                 while (DMsgRNSS == 0) {
1616                         srandomdev();
1617                         DMsgRNSS = random();
1618                 }
1619                 pthread_mutex_unlock(&cluster_mtx);
1620         }
1621         return(DMsgRNSS);
1622 }