cluster - more libdmsg work
[dragonfly.git] / lib / libdmsg / msg_lnk.c
1 /*
2  * Copyright (c) 2012 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * LNK_SPAN PROTOCOL SUPPORT FUNCTIONS - Please see sys/dmsg.h for an
36  * involved explanation of the protocol.
37  */
38
39 #include "dmsg_local.h"
40
41 void (*dmsg_node_handler)(void **opaquep, struct dmsg_msg *msg, int op);
42
43 /*
44  * Maximum spanning tree distance.  This has the practical effect of
45  * stopping tail-chasing closed loops when a feeder span is lost.
46  */
47 #define DMSG_SPAN_MAXDIST       16
48
49 /*
50  * RED-BLACK TREE DEFINITIONS
51  *
52  * We need to track:
53  *
54  * (1) shared fsid's (a cluster).
55  * (2) unique fsid's (a node in a cluster) <--- LNK_SPAN transactions.
56  *
57  * We need to aggegate all active LNK_SPANs, aggregate, and create our own
58  * outgoing LNK_SPAN transactions on each of our connections representing
59  * the aggregated state.
60  *
61  * h2span_conn          - list of iocom connections who wish to receive SPAN
62  *                        propagation from other connections.  Might contain
63  *                        a filter string.  Only iocom's with an open
64  *                        LNK_CONN transactions are applicable for SPAN
65  *                        propagation.
66  *
67  * h2span_relay         - List of links relayed (via SPAN).  Essentially
68  *                        each relay structure represents a LNK_SPAN
69  *                        transaction that we initiated, verses h2span_link
70  *                        which is a LNK_SPAN transaction that we received.
71  *
72  * --
73  *
74  * h2span_cluster       - Organizes the shared fsid's.  One structure for
75  *                        each cluster.
76  *
77  * h2span_node          - Organizes the nodes in a cluster.  One structure
78  *                        for each unique {cluster,node}, aka {fsid, pfs_fsid}.
79  *
80  * h2span_link          - Organizes all incoming and outgoing LNK_SPAN message
81  *                        transactions related to a node.
82  *
83  *                        One h2span_link structure for each incoming LNK_SPAN
84  *                        transaction.  Links selected for propagation back
85  *                        out are also where the outgoing LNK_SPAN messages
86  *                        are indexed into (so we can propagate changes).
87  *
88  *                        The h2span_link's use a red-black tree to sort the
89  *                        distance hop metric for the incoming LNK_SPAN.  We
90  *                        then select the top N for outgoing.  When the
91  *                        topology changes the top N may also change and cause
92  *                        new outgoing LNK_SPAN transactions to be opened
93  *                        and less desireable ones to be closed, causing
94  *                        transactional aborts within the message flow in
95  *                        the process.
96  *
97  * Also note            - All outgoing LNK_SPAN message transactions are also
98  *                        entered into a red-black tree for use by the routing
99  *                        function.  This is handled by msg.c in the state
100  *                        code, not here.
101  */
102
103 struct h2span_link;
104 struct h2span_relay;
105 TAILQ_HEAD(h2span_media_queue, h2span_media);
106 TAILQ_HEAD(h2span_conn_queue, h2span_conn);
107 TAILQ_HEAD(h2span_relay_queue, h2span_relay);
108
109 RB_HEAD(h2span_cluster_tree, h2span_cluster);
110 RB_HEAD(h2span_node_tree, h2span_node);
111 RB_HEAD(h2span_link_tree, h2span_link);
112 RB_HEAD(h2span_relay_tree, h2span_relay);
113 uint32_t DMsgRNSS;
114
115 /*
116  * This represents a media
117  */
118 struct h2span_media {
119         TAILQ_ENTRY(h2span_media) entry;
120         uuid_t  mediaid;
121         int     refs;
122         struct h2span_media_config {
123                 dmsg_vol_data_t         copy_run;
124                 dmsg_vol_data_t         copy_pend;
125                 pthread_t               thread;
126                 pthread_cond_t          cond;
127                 int                     ctl;
128                 int                     fd;
129                 dmsg_iocom_t            iocom;
130                 pthread_t               iocom_thread;
131                 enum { H2MC_STOPPED, H2MC_CONNECT, H2MC_RUNNING } state;
132         } config[DMSG_COPYID_COUNT];
133 };
134
135 typedef struct h2span_media_config h2span_media_config_t;
136
137 #define H2CONFCTL_STOP          0x00000001
138 #define H2CONFCTL_UPDATE        0x00000002
139
140 /*
141  * Received LNK_CONN transaction enables SPAN protocol over connection.
142  * (may contain filter).  Typically one for each mount and several may
143  * share the same media.
144  */
145 struct h2span_conn {
146         TAILQ_ENTRY(h2span_conn) entry;
147         struct h2span_relay_tree tree;
148         struct h2span_media *media;
149         dmsg_state_t *state;
150 };
151
152 /*
153  * All received LNK_SPANs are organized by cluster (pfs_clid),
154  * node (pfs_fsid), and link (received LNK_SPAN transaction).
155  */
156 struct h2span_cluster {
157         RB_ENTRY(h2span_cluster) rbnode;
158         struct h2span_node_tree tree;
159         uuid_t  pfs_clid;               /* shared fsid */
160         uint8_t peer_type;
161         char    cl_label[128];          /* cluster label (typ PEER_BLOCK) */
162         int     refs;                   /* prevents destruction */
163 };
164
165 struct h2span_node {
166         RB_ENTRY(h2span_node) rbnode;
167         struct h2span_link_tree tree;
168         struct h2span_cluster *cls;
169         uint8_t pfs_type;
170         uuid_t  pfs_fsid;               /* unique fsid */
171         char    fs_label[128];          /* fs label (typ PEER_HAMMER2) */
172         void    *opaque;
173 };
174
175 struct h2span_link {
176         RB_ENTRY(h2span_link) rbnode;
177         dmsg_state_t    *state;         /* state<->link */
178         struct h2span_node *node;       /* related node */
179         uint32_t        dist;
180         uint32_t        rnss;
181         struct h2span_relay_queue relayq; /* relay out */
182 };
183
184 /*
185  * Any LNK_SPAN transactions we receive which are relayed out other
186  * connections utilize this structure to track the LNK_SPAN transactions
187  * we initiate (relay out) on other connections.  We only relay out
188  * LNK_SPANs on connections we have an open CONN transaction for.
189  *
190  * The relay structure points to the outgoing LNK_SPAN trans (out_state)
191  * and to the incoming LNK_SPAN transaction (in_state).  The relay
192  * structure holds refs on the related states.
193  *
194  * In many respects this is the core of the protocol... actually figuring
195  * out what LNK_SPANs to relay.  The spanid used for relaying is the
196  * address of the 'state' structure, which is why h2span_relay has to
197  * be entered into a RB-TREE based at h2span_conn (so we can look
198  * up the spanid to validate it).
199  */
200 struct h2span_relay {
201         TAILQ_ENTRY(h2span_relay) entry;        /* from link */
202         RB_ENTRY(h2span_relay) rbnode;          /* from h2span_conn */
203         struct h2span_conn      *conn;          /* related CONN transaction */
204         dmsg_state_t            *source_rt;     /* h2span_link state */
205         dmsg_state_t            *target_rt;     /* h2span_relay state */
206 };
207
208 typedef struct h2span_media h2span_media_t;
209 typedef struct h2span_conn h2span_conn_t;
210 typedef struct h2span_cluster h2span_cluster_t;
211 typedef struct h2span_node h2span_node_t;
212 typedef struct h2span_link h2span_link_t;
213 typedef struct h2span_relay h2span_relay_t;
214
215 #define dmsg_termstr(array)     _dmsg_termstr((array), sizeof(array))
216
217 static h2span_relay_t *dmsg_generate_relay(h2span_conn_t *conn,
218                                         h2span_link_t *slink);
219 static uint32_t dmsg_rnss(void);
220
221 static __inline
222 void
223 _dmsg_termstr(char *base, size_t size)
224 {
225         base[size-1] = 0;
226 }
227
228 /*
229  * Cluster peer_type, uuid, AND label must match for a match
230  */
231 static
232 int
233 h2span_cluster_cmp(h2span_cluster_t *cls1, h2span_cluster_t *cls2)
234 {
235         int r;
236
237         if (cls1->peer_type < cls2->peer_type)
238                 return(-1);
239         if (cls1->peer_type > cls2->peer_type)
240                 return(1);
241         r = uuid_compare(&cls1->pfs_clid, &cls2->pfs_clid, NULL);
242         if (r == 0)
243                 r = strcmp(cls1->cl_label, cls2->cl_label);
244
245         return r;
246 }
247
248 /*
249  * Match against fs_label/pfs_fsid.  Together these two items represent a
250  * unique node.  In most cases the primary differentiator is pfs_fsid but
251  * we also string-match fs_label.
252  */
253 static
254 int
255 h2span_node_cmp(h2span_node_t *node1, h2span_node_t *node2)
256 {
257         int r;
258
259         r = strcmp(node1->fs_label, node2->fs_label);
260         if (r == 0)
261                 r = uuid_compare(&node1->pfs_fsid, &node2->pfs_fsid, NULL);
262         return (r);
263 }
264
265 /*
266  * Sort/subsort must match h2span_relay_cmp() under any given node
267  * to make the aggregation algorithm easier, so the best links are
268  * in the same sorted order as the best relays.
269  *
270  * NOTE: We cannot use link*->state->msgid because this msgid is created
271  *       by each remote host and thus might wind up being the same.
272  */
273 static
274 int
275 h2span_link_cmp(h2span_link_t *link1, h2span_link_t *link2)
276 {
277         if (link1->dist < link2->dist)
278                 return(-1);
279         if (link1->dist > link2->dist)
280                 return(1);
281         if (link1->rnss < link2->rnss)
282                 return(-1);
283         if (link1->rnss > link2->rnss)
284                 return(1);
285 #if 1
286         if ((uintptr_t)link1->state < (uintptr_t)link2->state)
287                 return(-1);
288         if ((uintptr_t)link1->state > (uintptr_t)link2->state)
289                 return(1);
290 #else
291         if (link1->state->msgid < link2->state->msgid)
292                 return(-1);
293         if (link1->state->msgid > link2->state->msgid)
294                 return(1);
295 #endif
296         return(0);
297 }
298
299 /*
300  * Relay entries are sorted by node, subsorted by distance and link
301  * address (so we can match up the conn->tree relay topology with
302  * a node's link topology).
303  */
304 static
305 int
306 h2span_relay_cmp(h2span_relay_t *relay1, h2span_relay_t *relay2)
307 {
308         h2span_link_t *link1 = relay1->source_rt->any.link;
309         h2span_link_t *link2 = relay2->source_rt->any.link;
310
311         if ((intptr_t)link1->node < (intptr_t)link2->node)
312                 return(-1);
313         if ((intptr_t)link1->node > (intptr_t)link2->node)
314                 return(1);
315         if (link1->dist < link2->dist)
316                 return(-1);
317         if (link1->dist > link2->dist)
318                 return(1);
319         if (link1->rnss < link2->rnss)
320                 return(-1);
321         if (link1->rnss > link2->rnss)
322                 return(1);
323 #if 1
324         if ((uintptr_t)link1->state < (uintptr_t)link2->state)
325                 return(-1);
326         if ((uintptr_t)link1->state > (uintptr_t)link2->state)
327                 return(1);
328 #else
329         if (link1->state->msgid < link2->state->msgid)
330                 return(-1);
331         if (link1->state->msgid > link2->state->msgid)
332                 return(1);
333 #endif
334         return(0);
335 }
336
337 RB_PROTOTYPE_STATIC(h2span_cluster_tree, h2span_cluster,
338              rbnode, h2span_cluster_cmp);
339 RB_PROTOTYPE_STATIC(h2span_node_tree, h2span_node,
340              rbnode, h2span_node_cmp);
341 RB_PROTOTYPE_STATIC(h2span_link_tree, h2span_link,
342              rbnode, h2span_link_cmp);
343 RB_PROTOTYPE_STATIC(h2span_relay_tree, h2span_relay,
344              rbnode, h2span_relay_cmp);
345
346 RB_GENERATE_STATIC(h2span_cluster_tree, h2span_cluster,
347              rbnode, h2span_cluster_cmp);
348 RB_GENERATE_STATIC(h2span_node_tree, h2span_node,
349              rbnode, h2span_node_cmp);
350 RB_GENERATE_STATIC(h2span_link_tree, h2span_link,
351              rbnode, h2span_link_cmp);
352 RB_GENERATE_STATIC(h2span_relay_tree, h2span_relay,
353              rbnode, h2span_relay_cmp);
354
355 /*
356  * Global mutex protects cluster_tree lookups, connq, mediaq.
357  */
358 static pthread_mutex_t cluster_mtx;
359 static struct h2span_cluster_tree cluster_tree = RB_INITIALIZER(cluster_tree);
360 static struct h2span_conn_queue connq = TAILQ_HEAD_INITIALIZER(connq);
361 static struct h2span_media_queue mediaq = TAILQ_HEAD_INITIALIZER(mediaq);
362
363 static void dmsg_lnk_span(dmsg_msg_t *msg);
364 static void dmsg_lnk_conn(dmsg_msg_t *msg);
365 static void dmsg_lnk_circ(dmsg_msg_t *msg);
366 static void dmsg_lnk_relay(dmsg_msg_t *msg);
367 static void dmsg_relay_scan(h2span_conn_t *conn, h2span_node_t *node);
368 static void dmsg_relay_delete(h2span_relay_t *relay);
369
370 static void *dmsg_volconf_thread(void *info);
371 static void dmsg_volconf_stop(h2span_media_config_t *conf);
372 static void dmsg_volconf_start(h2span_media_config_t *conf,
373                                 const char *hostname);
374
375 void
376 dmsg_msg_lnk_signal(dmsg_iocom_t *iocom __unused)
377 {
378         pthread_mutex_lock(&cluster_mtx);
379         dmsg_relay_scan(NULL, NULL);
380         pthread_mutex_unlock(&cluster_mtx);
381 }
382
383 /*
384  * DMSG_PROTO_LNK - Generic DMSG_PROTO_LNK.
385  *            (incoming iocom lock not held)
386  *
387  * This function is typically called for one-way and opening-transactions
388  * since state->func is assigned after that, but it will also be called
389  * if no state->func is assigned on transaction-open.
390  */
391 void
392 dmsg_msg_lnk(dmsg_msg_t *msg)
393 {
394         uint32_t icmd = msg->state ? msg->state->icmd : msg->any.head.cmd;
395
396         switch(icmd & DMSGF_BASECMDMASK) {
397         case DMSG_LNK_CONN:
398                 dmsg_lnk_conn(msg);
399                 break;
400         case DMSG_LNK_SPAN:
401                 dmsg_lnk_span(msg);
402                 break;
403         case DMSG_LNK_CIRC:
404                 dmsg_lnk_circ(msg);
405                 break;
406         default:
407                 fprintf(stderr,
408                         "MSG_PROTO_LNK: Unknown msg %08x\n", msg->any.head.cmd);
409                 dmsg_msg_reply(msg, DMSG_ERR_NOSUPP);
410                 /* state invalid after reply */
411                 break;
412         }
413 }
414
415 /*
416  * LNK_CONN - iocom identify message reception.
417  *            (incoming iocom lock not held)
418  *
419  * Remote node identifies itself to us, sets up a SPAN filter, and gives us
420  * the ok to start transmitting SPANs.
421  */
422 void
423 dmsg_lnk_conn(dmsg_msg_t *msg)
424 {
425         dmsg_state_t *state = msg->state;
426         h2span_media_t *media;
427         h2span_media_config_t *conf;
428         h2span_conn_t *conn;
429         h2span_relay_t *relay;
430         char *alloc = NULL;
431         int i;
432
433         pthread_mutex_lock(&cluster_mtx);
434
435         fprintf(stderr, "dmsg_lnk_conn: msg %p cmd %08x state %p txcmd %08x rxcmd %08x\n",
436                 msg, msg->any.head.cmd, state, state->txcmd, state->rxcmd);
437
438         switch(msg->any.head.cmd & DMSGF_TRANSMASK) {
439         case DMSG_LNK_CONN | DMSGF_CREATE:
440         case DMSG_LNK_CONN | DMSGF_CREATE | DMSGF_DELETE:
441                 /*
442                  * On transaction start we allocate a new h2span_conn and
443                  * acknowledge the request, leaving the transaction open.
444                  * We then relay priority-selected SPANs.
445                  */
446                 fprintf(stderr, "LNK_CONN(%08x): %s/%s/%s\n",
447                         (uint32_t)msg->any.head.msgid,
448                         dmsg_uuid_to_str(&msg->any.lnk_conn.pfs_clid,
449                                             &alloc),
450                         msg->any.lnk_conn.cl_label,
451                         msg->any.lnk_conn.fs_label);
452                 free(alloc);
453
454                 conn = dmsg_alloc(sizeof(*conn));
455
456                 RB_INIT(&conn->tree);
457                 state->iocom->conn = conn;      /* XXX only one */
458                 conn->state = state;
459                 state->func = dmsg_lnk_conn;
460                 state->any.conn = conn;
461                 TAILQ_INSERT_TAIL(&connq, conn, entry);
462
463                 /*
464                  * Set up media
465                  */
466                 TAILQ_FOREACH(media, &mediaq, entry) {
467                         if (uuid_compare(&msg->any.lnk_conn.mediaid,
468                                          &media->mediaid, NULL) == 0) {
469                                 break;
470                         }
471                 }
472                 if (media == NULL) {
473                         media = dmsg_alloc(sizeof(*media));
474                         media->mediaid = msg->any.lnk_conn.mediaid;
475                         TAILQ_INSERT_TAIL(&mediaq, media, entry);
476                 }
477                 conn->media = media;
478                 ++media->refs;
479
480                 if ((msg->any.head.cmd & DMSGF_DELETE) == 0) {
481                         dmsg_msg_result(msg, 0);
482                         dmsg_iocom_signal(msg->iocom);
483                         break;
484                 }
485                 /* FALL THROUGH */
486         case DMSG_LNK_CONN | DMSGF_DELETE:
487         case DMSG_LNK_ERROR | DMSGF_DELETE:
488 deleteconn:
489                 /*
490                  * On transaction terminate we clean out our h2span_conn
491                  * and acknowledge the request, closing the transaction.
492                  */
493                 fprintf(stderr, "LNK_CONN: Terminated\n");
494                 conn = state->any.conn;
495                 assert(conn);
496
497                 /*
498                  * Clean out the media structure. If refs drops to zero we
499                  * also clean out the media config threads.  These threads
500                  * maintain span connections to other hammer2 service daemons.
501                  */
502                 media = conn->media;
503                 if (--media->refs == 0) {
504                         fprintf(stderr, "Shutting down media spans\n");
505                         for (i = 0; i < DMSG_COPYID_COUNT; ++i) {
506                                 conf = &media->config[i];
507
508                                 if (conf->thread == NULL)
509                                         continue;
510                                 conf->ctl = H2CONFCTL_STOP;
511                                 pthread_cond_signal(&conf->cond);
512                         }
513                         for (i = 0; i < DMSG_COPYID_COUNT; ++i) {
514                                 conf = &media->config[i];
515
516                                 if (conf->thread == NULL)
517                                         continue;
518                                 pthread_mutex_unlock(&cluster_mtx);
519                                 pthread_join(conf->thread, NULL);
520                                 pthread_mutex_lock(&cluster_mtx);
521                                 conf->thread = NULL;
522                                 pthread_cond_destroy(&conf->cond);
523                         }
524                         fprintf(stderr, "Media shutdown complete\n");
525                         TAILQ_REMOVE(&mediaq, media, entry);
526                         dmsg_free(media);
527                 }
528
529                 /*
530                  * Clean out all relays.  This requires terminating each
531                  * relay transaction.
532                  */
533                 while ((relay = RB_ROOT(&conn->tree)) != NULL) {
534                         dmsg_relay_delete(relay);
535                 }
536
537                 /*
538                  * Clean out conn
539                  */
540                 conn->media = NULL;
541                 conn->state = NULL;
542                 msg->state->any.conn = NULL;
543                 msg->state->iocom->conn = NULL;
544                 TAILQ_REMOVE(&connq, conn, entry);
545                 dmsg_free(conn);
546
547                 dmsg_msg_reply(msg, 0);
548                 /* state invalid after reply */
549                 break;
550         case DMSG_LNK_VOLCONF:
551                 /*
552                  * One-way volume-configuration message is transmitted
553                  * over the open LNK_CONN transaction.
554                  */
555                 fprintf(stderr, "RECEIVED VOLCONF\n");
556                 if (msg->any.lnk_volconf.index < 0 ||
557                     msg->any.lnk_volconf.index >= DMSG_COPYID_COUNT) {
558                         fprintf(stderr, "VOLCONF: ILLEGAL INDEX %d\n",
559                                 msg->any.lnk_volconf.index);
560                         break;
561                 }
562                 if (msg->any.lnk_volconf.copy.path[sizeof(msg->any.lnk_volconf.copy.path) - 1] != 0 ||
563                     msg->any.lnk_volconf.copy.path[0] == 0) {
564                         fprintf(stderr, "VOLCONF: ILLEGAL PATH %d\n",
565                                 msg->any.lnk_volconf.index);
566                         break;
567                 }
568                 conn = msg->state->any.conn;
569                 if (conn == NULL) {
570                         fprintf(stderr, "VOLCONF: LNK_CONN is missing\n");
571                         break;
572                 }
573                 conf = &conn->media->config[msg->any.lnk_volconf.index];
574                 conf->copy_pend = msg->any.lnk_volconf.copy;
575                 conf->ctl |= H2CONFCTL_UPDATE;
576                 if (conf->thread == NULL) {
577                         fprintf(stderr, "VOLCONF THREAD STARTED\n");
578                         pthread_cond_init(&conf->cond, NULL);
579                         pthread_create(&conf->thread, NULL,
580                                        dmsg_volconf_thread, (void *)conf);
581                 }
582                 pthread_cond_signal(&conf->cond);
583                 break;
584         default:
585                 /*
586                  * Failsafe
587                  */
588                 if (msg->any.head.cmd & DMSGF_DELETE)
589                         goto deleteconn;
590                 dmsg_msg_reply(msg, DMSG_ERR_NOSUPP);
591                 break;
592         }
593         pthread_mutex_unlock(&cluster_mtx);
594 }
595
596 /*
597  * LNK_SPAN - Spanning tree protocol message reception
598  *            (incoming iocom lock not held)
599  *
600  * Receive a spanning tree transactional message, creating or destroying
601  * a SPAN and propagating it to other iocoms.
602  */
603 void
604 dmsg_lnk_span(dmsg_msg_t *msg)
605 {
606         dmsg_state_t *state = msg->state;
607         h2span_cluster_t dummy_cls;
608         h2span_node_t dummy_node;
609         h2span_cluster_t *cls;
610         h2span_node_t *node;
611         h2span_link_t *slink;
612         h2span_relay_t *relay;
613         char *alloc = NULL;
614
615         assert((msg->any.head.cmd & DMSGF_REPLY) == 0);
616
617         pthread_mutex_lock(&cluster_mtx);
618
619         /*
620          * On transaction start we initialize the tracking infrastructure
621          */
622         if (msg->any.head.cmd & DMSGF_CREATE) {
623                 assert(state->func == NULL);
624                 state->func = dmsg_lnk_span;
625
626                 dmsg_termstr(msg->any.lnk_span.cl_label);
627                 dmsg_termstr(msg->any.lnk_span.fs_label);
628
629                 /*
630                  * Find the cluster
631                  */
632                 dummy_cls.pfs_clid = msg->any.lnk_span.pfs_clid;
633                 dummy_cls.peer_type = msg->any.lnk_span.peer_type;
634                 bcopy(msg->any.lnk_span.cl_label,
635                       dummy_cls.cl_label,
636                       sizeof(dummy_cls.cl_label));
637                 cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls);
638                 if (cls == NULL) {
639                         cls = dmsg_alloc(sizeof(*cls));
640                         cls->pfs_clid = msg->any.lnk_span.pfs_clid;
641                         cls->peer_type = msg->any.lnk_span.peer_type;
642                         bcopy(msg->any.lnk_span.cl_label,
643                               cls->cl_label,
644                               sizeof(cls->cl_label));
645                         RB_INIT(&cls->tree);
646                         RB_INSERT(h2span_cluster_tree, &cluster_tree, cls);
647                 }
648
649                 /*
650                  * Find the node
651                  */
652                 dummy_node.pfs_fsid = msg->any.lnk_span.pfs_fsid;
653                 bcopy(msg->any.lnk_span.fs_label, dummy_node.fs_label,
654                       sizeof(dummy_node.fs_label));
655                 node = RB_FIND(h2span_node_tree, &cls->tree, &dummy_node);
656                 if (node == NULL) {
657                         node = dmsg_alloc(sizeof(*node));
658                         node->pfs_fsid = msg->any.lnk_span.pfs_fsid;
659                         node->pfs_type = msg->any.lnk_span.pfs_type;
660                         bcopy(msg->any.lnk_span.fs_label,
661                               node->fs_label,
662                               sizeof(node->fs_label));
663                         node->cls = cls;
664                         RB_INIT(&node->tree);
665                         RB_INSERT(h2span_node_tree, &cls->tree, node);
666                         if (dmsg_node_handler) {
667                                 dmsg_node_handler(&node->opaque, msg,
668                                                   DMSG_NODEOP_ADD);
669                         }
670                 }
671
672                 /*
673                  * Create the link
674                  */
675                 assert(state->any.link == NULL);
676                 slink = dmsg_alloc(sizeof(*slink));
677                 TAILQ_INIT(&slink->relayq);
678                 slink->node = node;
679                 slink->dist = msg->any.lnk_span.dist;
680                 slink->rnss = msg->any.lnk_span.rnss;
681                 slink->state = state;
682                 state->any.link = slink;
683
684                 RB_INSERT(h2span_link_tree, &node->tree, slink);
685
686                 fprintf(stderr,
687                         "LNK_SPAN(thr %p): %p %s cl=%s fs=%s dist=%d\n",
688                         msg->iocom,
689                         slink,
690                         dmsg_uuid_to_str(&msg->any.lnk_span.pfs_clid, &alloc),
691                         msg->any.lnk_span.cl_label,
692                         msg->any.lnk_span.fs_label,
693                         msg->any.lnk_span.dist);
694                 free(alloc);
695 #if 0
696                 dmsg_relay_scan(NULL, node);
697 #endif
698                 dmsg_iocom_signal(msg->iocom);
699         }
700
701         /*
702          * On transaction terminate we remove the tracking infrastructure.
703          */
704         if (msg->any.head.cmd & DMSGF_DELETE) {
705                 slink = state->any.link;
706                 assert(slink != NULL);
707                 node = slink->node;
708                 cls = node->cls;
709
710                 fprintf(stderr, "LNK_DELE(thr %p): %p %s cl=%s fs=%s dist=%d\n",
711                         msg->iocom,
712                         slink,
713                         dmsg_uuid_to_str(&cls->pfs_clid, &alloc),
714                         state->msg->any.lnk_span.cl_label,
715                         state->msg->any.lnk_span.fs_label,
716                         state->msg->any.lnk_span.dist);
717                 free(alloc);
718
719                 /*
720                  * Clean out all relays.  This requires terminating each
721                  * relay transaction.
722                  */
723                 while ((relay = TAILQ_FIRST(&slink->relayq)) != NULL) {
724                         dmsg_relay_delete(relay);
725                 }
726
727                 /*
728                  * Clean out the topology
729                  */
730                 RB_REMOVE(h2span_link_tree, &node->tree, slink);
731                 if (RB_EMPTY(&node->tree)) {
732                         RB_REMOVE(h2span_node_tree, &cls->tree, node);
733                         if (dmsg_node_handler) {
734                                 dmsg_node_handler(&node->opaque, msg,
735                                                   DMSG_NODEOP_DEL);
736                         }
737                         if (RB_EMPTY(&cls->tree) && cls->refs == 0) {
738                                 RB_REMOVE(h2span_cluster_tree,
739                                           &cluster_tree, cls);
740                                 dmsg_free(cls);
741                         }
742                         node->cls = NULL;
743                         dmsg_free(node);
744                         node = NULL;
745                 }
746                 state->any.link = NULL;
747                 slink->state = NULL;
748                 slink->node = NULL;
749                 dmsg_free(slink);
750
751                 /*
752                  * We have to terminate the transaction
753                  */
754                 dmsg_state_reply(state, 0);
755                 /* state invalid after reply */
756
757                 /*
758                  * If the node still exists issue any required updates.  If
759                  * it doesn't then all related relays have already been
760                  * removed and there's nothing left to do.
761                  */
762 #if 0
763                 if (node)
764                         dmsg_relay_scan(NULL, node);
765 #endif
766                 if (node)
767                         dmsg_iocom_signal(msg->iocom);
768         }
769
770         pthread_mutex_unlock(&cluster_mtx);
771 }
772
773 /*
774  * LNK_CIRC - Virtual circuit protocol message reception
775  *            (incoming iocom lock not held)
776  *
777  * Handles all cases.
778  */
779 void
780 dmsg_lnk_circ(dmsg_msg_t *msg)
781 {
782         dmsg_circuit_t *circA;
783         dmsg_circuit_t *circB;
784         dmsg_state_t *rx_state;
785         dmsg_state_t *tx_state;
786         dmsg_state_t *state;
787         dmsg_state_t dummy;
788         dmsg_msg_t *fwd_msg;
789         dmsg_iocom_t *iocomA;
790         dmsg_iocom_t *iocomB;
791         int disconnect;
792
793         /*pthread_mutex_lock(&cluster_mtx);*/
794
795         if (DMsgDebugOpt >= 4)
796                 fprintf(stderr, "CIRC receive cmd=%08x\n", msg->any.head.cmd);
797
798         switch (msg->any.head.cmd & (DMSGF_CREATE |
799                                      DMSGF_DELETE |
800                                      DMSGF_REPLY)) {
801         case DMSGF_CREATE:
802         case DMSGF_CREATE | DMSGF_DELETE:
803                 /*
804                  * (A) wishes to establish a virtual circuit through us to (B).
805                  * (B) is specified by lnk_circ.target (the message id for
806                  * a LNK_SPAN that (A) received from us which represents (B)).
807                  *
808                  * Designate the originator of the circuit (the current
809                  * remote end) as (A) and the other side as (B).
810                  *
811                  * Accept the VC but do not reply.  We will wait for the end-
812                  * to-end reply to propagate back.
813                  */
814                 iocomA = msg->iocom;
815
816                 /*
817                  * Locate the open transaction state that the other end
818                  * specified in <target>.  This will be an open SPAN
819                  * transaction that we transmitted (h2span_relay) over
820                  * the interface the LNK_CIRC is being received on.
821                  *
822                  * (all LNK_CIRC's that we transmit are on circuit0)
823                  */
824                 pthread_mutex_lock(&iocomA->mtx);
825                 dummy.msgid = msg->any.lnk_circ.target;
826                 tx_state = RB_FIND(dmsg_state_tree,
827                                    &iocomA->circuit0.statewr_tree,
828                                    &dummy);
829                 pthread_mutex_unlock(&iocomA->mtx);
830                 if (tx_state == NULL) {
831                         /* XXX SMP race */
832                         fprintf(stderr, "dmsg_lnk_circ: no circuit\n");
833                         dmsg_msg_reply(msg, DMSG_ERR_CANTCIRC);
834                         break;
835                 }
836                 if (tx_state->icmd != DMSG_LNK_SPAN) {
837                         /* XXX SMP race */
838                         fprintf(stderr, "dmsg_lnk_circ: not LNK_SPAN\n");
839                         dmsg_msg_reply(msg, DMSG_ERR_CANTCIRC);
840                         break;
841                 }
842
843                 /* locate h2span_link */
844                 rx_state = tx_state->any.relay->source_rt;
845
846                 /*
847                  * A wishes to establish a VC through us to the
848                  * specified target.
849                  *
850                  * A sends us the msgid of an open SPAN transaction
851                  * it received from us as <target>.
852                  */
853                 circA = dmsg_alloc(sizeof(*circA));
854                 dmsg_circuit_init(iocomA, circA);
855                 circA->state = msg->state;      /* LNK_CIRC state */
856                 circA->msgid = msg->state->msgid;
857                 circA->span_state = tx_state;   /* H2SPAN_RELAY state */
858                 circA->is_relay = 1;
859                 circA->refs = 2;                /* state and peer */
860                 msg->state->any.circ = circA;
861
862                 iocomB = rx_state->iocom;
863
864                 circB = dmsg_alloc(sizeof(*circB));
865                 dmsg_circuit_init(iocomB, circB);
866
867                 /*
868                  * Create a LNK_CIRC transaction on B
869                  */
870                 fwd_msg = dmsg_msg_alloc(&iocomB->circuit0,
871                                          0, DMSG_LNK_CIRC | DMSGF_CREATE,
872                                          dmsg_lnk_circ, circB);
873                 fwd_msg->state->any.circ = circB;
874                 fwd_msg->any.lnk_circ.target = rx_state->msgid;
875                 circB->state = fwd_msg->state;  /* LNK_CIRC state */
876                 circB->msgid = fwd_msg->any.head.msgid;
877                 circB->span_state = rx_state;   /* H2SPAN_LINK state */
878                 circB->is_relay = 0;
879                 circB->refs = 2;                /* state and peer */
880
881                 if (DMsgDebugOpt >= 4)
882                         fprintf(stderr, "CIRC forward %p->%p\n", circA, circB);
883
884                 /*
885                  * Link the two circuits together.
886                  */
887                 circA->peer = circB;
888                 circB->peer = circA;
889
890                 if (iocomA < iocomB) {
891                         pthread_mutex_lock(&iocomA->mtx);
892                         pthread_mutex_lock(&iocomB->mtx);
893                 } else {
894                         pthread_mutex_lock(&iocomB->mtx);
895                         pthread_mutex_lock(&iocomA->mtx);
896                 }
897                 if (RB_INSERT(dmsg_circuit_tree, &iocomA->circuit_tree, circA))
898                         assert(0);
899                 if (RB_INSERT(dmsg_circuit_tree, &iocomB->circuit_tree, circB))
900                         assert(0);
901                 if (iocomA < iocomB) {
902                         pthread_mutex_unlock(&iocomB->mtx);
903                         pthread_mutex_unlock(&iocomA->mtx);
904                 } else {
905                         pthread_mutex_unlock(&iocomA->mtx);
906                         pthread_mutex_unlock(&iocomB->mtx);
907                 }
908
909                 dmsg_msg_write(fwd_msg);
910
911                 if ((msg->any.head.cmd & DMSGF_DELETE) == 0)
912                         break;
913                 /* FALL THROUGH TO DELETE */
914         case DMSGF_DELETE:
915                 /*
916                  * (A) Is deleting the virtual circuit, propogate closure
917                  * to (B).
918                  */
919                 iocomA = msg->iocom;
920                 if (msg->state->any.circ == NULL) {
921                         /* already returned an error/deleted */
922                         break;
923                 }
924                 circA = msg->state->any.circ;
925                 circB = circA->peer;
926                 assert(msg->state == circA->state);
927
928                 /*
929                  * We are closing B's send side.  If B's receive side is
930                  * already closed we disconnect the circuit from B's state.
931                  */
932                 disconnect = 0;
933                 if (circB && (state = circB->state) != NULL) {
934                         if (state->rxcmd & DMSGF_DELETE) {
935                                 circB->state = NULL;
936                                 state->any.circ = NULL;
937                                 dmsg_circuit_drop(circB);
938                         }
939                         dmsg_state_reply(state, msg->any.head.error);
940                         disconnect = 1;
941                 }
942
943                 /*
944                  * We received a close on A.  If A's send side is already
945                  * closed we disconnect the circuit from A's state.
946                  */
947                 if (circA && (state = circA->state) != NULL) {
948                         if (state->txcmd & DMSGF_DELETE) {
949                                 circA->state = NULL;
950                                 state->any.circ = NULL;
951                                 dmsg_circuit_drop(circA);
952                         }
953                         disconnect = 1;
954                 }
955
956                 /*
957                  * Disconnect the peer<->peer association
958                  */
959                 if (disconnect) {
960                         if (circB) {
961                                 circA->peer = NULL;
962                                 circB->peer = NULL;
963                                 dmsg_circuit_drop(circA);
964                                 dmsg_circuit_drop(circB); /* XXX SMP */
965                         }
966                 }
967                 break;
968         case DMSGF_REPLY | DMSGF_CREATE:
969         case DMSGF_REPLY | DMSGF_CREATE | DMSGF_DELETE:
970                 /*
971                  * (B) is acknowledging the creation of the virtual
972                  * circuit.  This propagates all the way back to (A), though
973                  * it should be noted that (A) can start issuing commands
974                  * via the virtual circuit before seeing this reply.
975                  */
976                 circB = msg->state->any.circ;
977                 assert(circB);
978                 circA = circB->peer;
979                 assert(msg->state == circB->state);
980                 assert(circA);
981                 if ((msg->any.head.cmd & DMSGF_DELETE) == 0) {
982                         dmsg_state_result(circA->state, msg->any.head.error);
983                         break;
984                 }
985                 /* FALL THROUGH TO DELETE */
986         case DMSGF_REPLY | DMSGF_DELETE:
987                 /*
988                  * (B) Is deleting the virtual circuit or acknowledging
989                  * our deletion of the virtual circuit, propogate closure
990                  * to (A).
991                  */
992                 iocomB = msg->iocom;
993                 circB = msg->state->any.circ;
994                 circA = circB->peer;
995                 assert(msg->state == circB->state);
996
997                 /*
998                  * We received a close on (B), propagate to (A).  If we have
999                  * already received the close from (A) we disconnect the state.
1000                  */
1001                 disconnect = 0;
1002                 if (circA && (state = circA->state) != NULL) {
1003                         if (state->rxcmd & DMSGF_DELETE) {
1004                                 circA->state = NULL;
1005                                 state->any.circ = NULL;
1006                                 dmsg_circuit_drop(circA);
1007                         }
1008                         dmsg_state_reply(state, msg->any.head.error);
1009                         disconnect = 1;
1010                 }
1011
1012                 /*
1013                  * We received a close on (B).  If (B)'s send side is already
1014                  * closed we disconnect the state.
1015                  */
1016                 if (circB && (state = circB->state) != NULL) {
1017                         if (state->txcmd & DMSGF_DELETE) {
1018                                 circB->state = NULL;
1019                                 state->any.circ = NULL;
1020                                 dmsg_circuit_drop(circB);
1021                         }
1022                         disconnect = 1;
1023                 }
1024
1025                 /*
1026                  * Disconnect the peer<->peer association
1027                  */
1028                 if (disconnect) {
1029                         if (circA) {
1030                                 circB->peer = NULL;
1031                                 circA->peer = NULL;
1032                                 dmsg_circuit_drop(circB);
1033                                 dmsg_circuit_drop(circA); /* XXX SMP */
1034                         }
1035                 }
1036                 break;
1037         }
1038
1039         /*pthread_mutex_lock(&cluster_mtx);*/
1040 }
1041
1042 /*
1043  * Update relay transactions for SPANs.
1044  *
1045  * Called with cluster_mtx held.
1046  */
1047 static void dmsg_relay_scan_specific(h2span_node_t *node,
1048                                         h2span_conn_t *conn);
1049
1050 static void
1051 dmsg_relay_scan(h2span_conn_t *conn, h2span_node_t *node)
1052 {
1053         h2span_cluster_t *cls;
1054
1055         if (node) {
1056                 /*
1057                  * Iterate specific node
1058                  */
1059                 TAILQ_FOREACH(conn, &connq, entry)
1060                         dmsg_relay_scan_specific(node, conn);
1061         } else {
1062                 /*
1063                  * Full iteration.
1064                  *
1065                  * Iterate cluster ids, nodes, and either a specific connection
1066                  * or all connections.
1067                  */
1068                 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
1069                         /*
1070                          * Iterate node ids
1071                          */
1072                         RB_FOREACH(node, h2span_node_tree, &cls->tree) {
1073                                 /*
1074                                  * Synchronize the node's link (received SPANs)
1075                                  * with each connection's relays.
1076                                  */
1077                                 if (conn) {
1078                                         dmsg_relay_scan_specific(node, conn);
1079                                 } else {
1080                                         TAILQ_FOREACH(conn, &connq, entry) {
1081                                             dmsg_relay_scan_specific(node,
1082                                                                         conn);
1083                                         }
1084                                         assert(conn == NULL);
1085                                 }
1086                         }
1087                 }
1088         }
1089 }
1090
1091 /*
1092  * Update the relay'd SPANs for this (node, conn).
1093  *
1094  * Iterate links and adjust relays to match.  We only propagate the top link
1095  * for now (XXX we want to propagate the top two).
1096  *
1097  * The dmsg_relay_scan_cmp() function locates the first relay element
1098  * for any given node.  The relay elements will be sub-sorted by dist.
1099  */
1100 struct relay_scan_info {
1101         h2span_node_t *node;
1102         h2span_relay_t *relay;
1103 };
1104
1105 static int
1106 dmsg_relay_scan_cmp(h2span_relay_t *relay, void *arg)
1107 {
1108         struct relay_scan_info *info = arg;
1109
1110         if ((intptr_t)relay->source_rt->any.link->node < (intptr_t)info->node)
1111                 return(-1);
1112         if ((intptr_t)relay->source_rt->any.link->node > (intptr_t)info->node)
1113                 return(1);
1114         return(0);
1115 }
1116
1117 static int
1118 dmsg_relay_scan_callback(h2span_relay_t *relay, void *arg)
1119 {
1120         struct relay_scan_info *info = arg;
1121
1122         info->relay = relay;
1123         return(-1);
1124 }
1125
1126 static void
1127 dmsg_relay_scan_specific(h2span_node_t *node, h2span_conn_t *conn)
1128 {
1129         struct relay_scan_info info;
1130         h2span_relay_t *relay;
1131         h2span_relay_t *next_relay;
1132         h2span_link_t *slink;
1133         dmsg_lnk_conn_t *lconn;
1134         dmsg_lnk_span_t *lspan;
1135         int count;
1136         int maxcount = 2;
1137         uint32_t lastdist = DMSG_SPAN_MAXDIST;
1138         uint32_t lastrnss = 0;
1139
1140         info.node = node;
1141         info.relay = NULL;
1142
1143         /*
1144          * Locate the first related relay for the node on this connection.
1145          * relay will be NULL if there were none.
1146          */
1147         RB_SCAN(h2span_relay_tree, &conn->tree,
1148                 dmsg_relay_scan_cmp, dmsg_relay_scan_callback, &info);
1149         relay = info.relay;
1150         info.relay = NULL;
1151         if (relay)
1152                 assert(relay->source_rt->any.link->node == node);
1153
1154         if (DMsgDebugOpt > 8)
1155                 fprintf(stderr, "relay scan for connection %p\n", conn);
1156
1157         /*
1158          * Iterate the node's links (received SPANs) in distance order,
1159          * lowest (best) dist first.
1160          *
1161          * PROPAGATE THE BEST LINKS OVER THE SPECIFIED CONNECTION.
1162          *
1163          * Track relays while iterating the best links and construct
1164          * missing relays when necessary.
1165          *
1166          * (If some prior better link was removed it would have also
1167          *  removed the relay, so the relay can only match exactly or
1168          *  be worse).
1169          */
1170         count = 0;
1171         RB_FOREACH(slink, h2span_link_tree, &node->tree) {
1172                 /*
1173                  * Increment count of successful relays.  This isn't
1174                  * quite accurate if we break out but nothing after
1175                  * the loop uses (count).
1176                  *
1177                  * If count exceeds the maximum number of relays we desire
1178                  * we normally want to break out.  However, in order to
1179                  * guarantee a symmetric path we have to continue if both
1180                  * (dist) and (rnss) continue to match.  Otherwise the SPAN
1181                  * propagation in the reverse direction may choose different
1182                  * routes and we will not have a symmetric path.
1183                  *
1184                  * NOTE: Spanning tree does not have to be symmetrical so
1185                  *       this code is not currently enabled.
1186                  */
1187                 if (++count >= maxcount) {
1188 #ifdef REQUIRE_SYMMETRICAL
1189                         if (lastdist != slink->dist || lastrnss != slink->rnss)
1190                                 break;
1191 #else
1192                         break;
1193 #endif
1194                         /* go beyond the nominal maximum desired relays */
1195                 }
1196
1197                 /*
1198                  * Match, relay already in-place, get the next
1199                  * relay to match against the next slink.
1200                  */
1201                 if (relay && relay->source_rt->any.link == slink) {
1202                         relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
1203                         continue;
1204                 }
1205
1206                 /*
1207                  * We might want this SLINK, if it passes our filters.
1208                  *
1209                  * The spanning tree can cause closed loops so we have
1210                  * to limit slink->dist.
1211                  */
1212                 if (slink->dist > DMSG_SPAN_MAXDIST)
1213                         break;
1214
1215                 /*
1216                  * Don't bother transmitting a LNK_SPAN out the same
1217                  * connection it came in on.  Trivial optimization.
1218                  */
1219                 if (slink->state->iocom == conn->state->iocom)
1220                         break;
1221
1222                 /*
1223                  * NOTE ON FILTERS: The protocol spec allows non-requested
1224                  * SPANs to be transmitted, the other end is expected to
1225                  * leave their transactions open but otherwise ignore them.
1226                  *
1227                  * Don't bother transmitting if the remote connection
1228                  * is not accepting this SPAN's peer_type.
1229                  *
1230                  * pfs_mask is typically used so pure clients can filter
1231                  * out receiving SPANs for other pure clients.
1232                  */
1233                 lspan = &slink->state->msg->any.lnk_span;
1234                 lconn = &conn->state->msg->any.lnk_conn;
1235                 if (((1LLU << lspan->peer_type) & lconn->peer_mask) == 0)
1236                         break;
1237                 if (((1LLU << lspan->pfs_type) & lconn->pfs_mask) == 0)
1238                         break;
1239
1240                 /*
1241                  * Do not give pure clients visibility to other pure clients
1242                  */
1243                 if (lconn->pfs_type == DMSG_PFSTYPE_CLIENT &&
1244                     lspan->pfs_type == DMSG_PFSTYPE_CLIENT) {
1245                         break;
1246                 }
1247
1248                 /*
1249                  * Connection filter, if cluster uuid is not NULL it must
1250                  * match the span cluster uuid.  Only applies when the
1251                  * peer_type matches.
1252                  */
1253                 if (lspan->peer_type == lconn->peer_type &&
1254                     !uuid_is_nil(&lconn->pfs_clid, NULL) &&
1255                     uuid_compare(&slink->node->cls->pfs_clid,
1256                                  &lconn->pfs_clid, NULL)) {
1257                         break;
1258                 }
1259
1260                 /*
1261                  * Connection filter, if cluster label is not empty it must
1262                  * match the span cluster label.  Only applies when the
1263                  * peer_type matches.
1264                  */
1265                 if (lspan->peer_type == lconn->peer_type &&
1266                     lconn->cl_label[0] &&
1267                     strcmp(lconn->cl_label, slink->node->cls->cl_label)) {
1268                         break;
1269                 }
1270
1271                 /*
1272                  * NOTE! pfs_fsid differentiates nodes within the same cluster
1273                  *       so we obviously don't want to match those.  Similarly
1274                  *       for fs_label.
1275                  */
1276
1277                 /*
1278                  * Ok, we've accepted this SPAN for relaying.
1279                  */
1280                 assert(relay == NULL ||
1281                        relay->source_rt->any.link->node != slink->node ||
1282                        relay->source_rt->any.link->dist >= slink->dist);
1283                 relay = dmsg_generate_relay(conn, slink);
1284                 lastdist = slink->dist;
1285                 lastrnss = slink->rnss;
1286
1287                 /*
1288                  * Match (created new relay), get the next relay to
1289                  * match against the next slink.
1290                  */
1291                 relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
1292         }
1293
1294         /*
1295          * Any remaining relay's belonging to this connection which match
1296          * the node are in excess of the current aggregate spanning state
1297          * and should be removed.
1298          */
1299         while (relay && relay->source_rt->any.link->node == node) {
1300                 next_relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
1301                 fprintf(stderr, "RELAY DELETE FROM EXTRAS\n");
1302                 dmsg_relay_delete(relay);
1303                 relay = next_relay;
1304         }
1305 }
1306
1307 /*
1308  * Helper function to generate missing relay.
1309  *
1310  * cluster_mtx must be held
1311  */
1312 static
1313 h2span_relay_t *
1314 dmsg_generate_relay(h2span_conn_t *conn, h2span_link_t *slink)
1315 {
1316         h2span_relay_t *relay;
1317         h2span_node_t *node;
1318         dmsg_msg_t *msg;
1319
1320         node = slink->node;
1321
1322         relay = dmsg_alloc(sizeof(*relay));
1323         relay->conn = conn;
1324         relay->source_rt = slink->state;
1325         /* relay->source_rt->any.link = slink; */
1326
1327         /*
1328          * NOTE: relay->target_rt->any.relay set to relay by alloc.
1329          */
1330         msg = dmsg_msg_alloc(&conn->state->iocom->circuit0,
1331                              0, DMSG_LNK_SPAN | DMSGF_CREATE,
1332                              dmsg_lnk_relay, relay);
1333         relay->target_rt = msg->state;
1334
1335         msg->any.lnk_span = slink->state->msg->any.lnk_span;
1336         msg->any.lnk_span.dist = slink->dist + 1;
1337         msg->any.lnk_span.rnss = slink->rnss + dmsg_rnss();
1338
1339         RB_INSERT(h2span_relay_tree, &conn->tree, relay);
1340         TAILQ_INSERT_TAIL(&slink->relayq, relay, entry);
1341
1342         dmsg_msg_write(msg);
1343
1344         return (relay);
1345 }
1346
1347 /*
1348  * Messages received on relay SPANs.  These are open transactions so it is
1349  * in fact possible for the other end to close the transaction.
1350  *
1351  * XXX MPRACE on state structure
1352  */
1353 static void
1354 dmsg_lnk_relay(dmsg_msg_t *msg)
1355 {
1356         dmsg_state_t *state = msg->state;
1357         h2span_relay_t *relay;
1358
1359         assert(msg->any.head.cmd & DMSGF_REPLY);
1360
1361         if (msg->any.head.cmd & DMSGF_DELETE) {
1362                 pthread_mutex_lock(&cluster_mtx);
1363                 fprintf(stderr, "RELAY DELETE FROM LNK_RELAY MSG\n");
1364                 if ((relay = state->any.relay) != NULL) {
1365                         dmsg_relay_delete(relay);
1366                 } else {
1367                         dmsg_state_reply(state, 0);
1368                 }
1369                 pthread_mutex_unlock(&cluster_mtx);
1370         }
1371 }
1372
1373 /*
1374  * cluster_mtx held by caller
1375  */
1376 static
1377 void
1378 dmsg_relay_delete(h2span_relay_t *relay)
1379 {
1380         fprintf(stderr,
1381                 "RELAY DELETE %p RELAY %p ON CLS=%p NODE=%p DIST=%d FD %d STATE %p\n",
1382                 relay->source_rt->any.link,
1383                 relay,
1384                 relay->source_rt->any.link->node->cls, relay->source_rt->any.link->node,
1385                 relay->source_rt->any.link->dist,
1386                 relay->conn->state->iocom->sock_fd, relay->target_rt);
1387
1388         RB_REMOVE(h2span_relay_tree, &relay->conn->tree, relay);
1389         TAILQ_REMOVE(&relay->source_rt->any.link->relayq, relay, entry);
1390
1391         if (relay->target_rt) {
1392                 relay->target_rt->any.relay = NULL;
1393                 dmsg_state_reply(relay->target_rt, 0);
1394                 /* state invalid after reply */
1395                 relay->target_rt = NULL;
1396         }
1397         relay->conn = NULL;
1398         relay->source_rt = NULL;
1399         dmsg_free(relay);
1400 }
1401
1402 static void *
1403 dmsg_volconf_thread(void *info)
1404 {
1405         h2span_media_config_t *conf = info;
1406
1407         pthread_mutex_lock(&cluster_mtx);
1408         while ((conf->ctl & H2CONFCTL_STOP) == 0) {
1409                 if (conf->ctl & H2CONFCTL_UPDATE) {
1410                         fprintf(stderr, "VOLCONF UPDATE\n");
1411                         conf->ctl &= ~H2CONFCTL_UPDATE;
1412                         if (bcmp(&conf->copy_run, &conf->copy_pend,
1413                                  sizeof(conf->copy_run)) == 0) {
1414                                 fprintf(stderr, "VOLCONF: no changes\n");
1415                                 continue;
1416                         }
1417                         /*
1418                          * XXX TODO - auto reconnect on lookup failure or
1419                          *              connect failure or stream failure.
1420                          */
1421
1422                         pthread_mutex_unlock(&cluster_mtx);
1423                         dmsg_volconf_stop(conf);
1424                         conf->copy_run = conf->copy_pend;
1425                         if (conf->copy_run.copyid != 0 &&
1426                             strncmp(conf->copy_run.path, "span:", 5) == 0) {
1427                                 dmsg_volconf_start(conf,
1428                                                       conf->copy_run.path + 5);
1429                         }
1430                         pthread_mutex_lock(&cluster_mtx);
1431                         fprintf(stderr, "VOLCONF UPDATE DONE state %d\n", conf->state);
1432                 }
1433                 if (conf->state == H2MC_CONNECT) {
1434                         dmsg_volconf_start(conf, conf->copy_run.path + 5);
1435                         pthread_mutex_unlock(&cluster_mtx);
1436                         sleep(5);
1437                         pthread_mutex_lock(&cluster_mtx);
1438                 } else {
1439                         pthread_cond_wait(&conf->cond, &cluster_mtx);
1440                 }
1441         }
1442         pthread_mutex_unlock(&cluster_mtx);
1443         dmsg_volconf_stop(conf);
1444         return(NULL);
1445 }
1446
1447 static
1448 void
1449 dmsg_volconf_stop(h2span_media_config_t *conf)
1450 {
1451         switch(conf->state) {
1452         case H2MC_STOPPED:
1453                 break;
1454         case H2MC_CONNECT:
1455                 conf->state = H2MC_STOPPED;
1456                 break;
1457         case H2MC_RUNNING:
1458                 shutdown(conf->fd, SHUT_WR);
1459                 pthread_join(conf->iocom_thread, NULL);
1460                 conf->iocom_thread = NULL;
1461                 break;
1462         }
1463 }
1464
1465 static
1466 void
1467 dmsg_volconf_start(h2span_media_config_t *conf, const char *hostname)
1468 {
1469         dmsg_master_service_info_t *info;
1470
1471         switch(conf->state) {
1472         case H2MC_STOPPED:
1473         case H2MC_CONNECT:
1474                 conf->fd = dmsg_connect(hostname);
1475                 if (conf->fd < 0) {
1476                         fprintf(stderr, "Unable to connect to %s\n", hostname);
1477                         conf->state = H2MC_CONNECT;
1478                 } else {
1479                         info = malloc(sizeof(*info));
1480                         bzero(info, sizeof(*info));
1481                         info->fd = conf->fd;
1482                         info->detachme = 0;
1483                         conf->state = H2MC_RUNNING;
1484                         pthread_create(&conf->iocom_thread, NULL,
1485                                        dmsg_master_service, info);
1486                 }
1487                 break;
1488         case H2MC_RUNNING:
1489                 break;
1490         }
1491 }
1492
1493 /************************************************************************
1494  *                      MESSAGE ROUTING AND SOURCE VALIDATION           *
1495  ************************************************************************/
1496
1497 int
1498 dmsg_circuit_relay(dmsg_msg_t *msg)
1499 {
1500         dmsg_iocom_t *iocom = msg->iocom;
1501         dmsg_circuit_t *circ;
1502         dmsg_circuit_t *peer;
1503         dmsg_circuit_t dummy;
1504         int error = 0;
1505
1506         /*
1507          * Relay occurs before any state processing, msg state should always
1508          * be NULL.
1509          */
1510         assert(msg->state == NULL);
1511
1512         /*
1513          * Lookup the circuit on the incoming iocom.
1514          */
1515         pthread_mutex_lock(&iocom->mtx);
1516
1517         dummy.msgid = msg->any.head.circuit;
1518         circ = RB_FIND(dmsg_circuit_tree, &iocom->circuit_tree, &dummy);
1519         assert(circ);
1520         peer = circ->peer;
1521         dmsg_circuit_hold(peer);
1522
1523         if (DMsgDebugOpt >= 4) {
1524                 fprintf(stderr,
1525                         "CIRC relay %08x %p->%p\n",
1526                         msg->any.head.cmd, circ, peer);
1527         }
1528
1529         msg->iocom = peer->iocom;
1530         msg->any.head.circuit = peer->msgid;
1531         dmsg_circuit_drop_locked(msg->circuit);
1532         msg->circuit = peer;
1533
1534         pthread_mutex_unlock(&iocom->mtx);
1535
1536         dmsg_msg_write(msg);
1537         error = DMSG_IOQ_ERROR_ROUTED;
1538
1539         return error;
1540 }
1541
1542 /************************************************************************
1543  *                      ROUTER AND MESSAGING HANDLES                    *
1544  ************************************************************************
1545  *
1546  * Basically the idea here is to provide a stable data structure which
1547  * can be localized to the caller for higher level protocols to work with.
1548  * Depends on the context, these dmsg_handle's can be pooled by use-case
1549  * and remain persistent through a client (or mount point's) life.
1550  */
1551
1552 #if 0
1553 /*
1554  * Obtain a stable handle on a cluster given its uuid.  This ties directly
1555  * into the global cluster topology, creating the structure if necessary
1556  * (even if the uuid does not exist or does not exist yet), and preventing
1557  * the structure from getting ripped out from under us while we hold a
1558  * pointer to it.
1559  */
1560 h2span_cluster_t *
1561 dmsg_cluster_get(uuid_t *pfs_clid)
1562 {
1563         h2span_cluster_t dummy_cls;
1564         h2span_cluster_t *cls;
1565
1566         dummy_cls.pfs_clid = *pfs_clid;
1567         pthread_mutex_lock(&cluster_mtx);
1568         cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls);
1569         if (cls)
1570                 ++cls->refs;
1571         pthread_mutex_unlock(&cluster_mtx);
1572         return (cls);
1573 }
1574
1575 void
1576 dmsg_cluster_put(h2span_cluster_t *cls)
1577 {
1578         pthread_mutex_lock(&cluster_mtx);
1579         assert(cls->refs > 0);
1580         --cls->refs;
1581         if (RB_EMPTY(&cls->tree) && cls->refs == 0) {
1582                 RB_REMOVE(h2span_cluster_tree,
1583                           &cluster_tree, cls);
1584                 dmsg_free(cls);
1585         }
1586         pthread_mutex_unlock(&cluster_mtx);
1587 }
1588
1589 /*
1590  * Obtain a stable handle to a specific cluster node given its uuid.
1591  * This handle does NOT lock in the route to the node and is typically
1592  * used as part of the dmsg_handle_*() API to obtain a set of
1593  * stable nodes.
1594  */
1595 h2span_node_t *
1596 dmsg_node_get(h2span_cluster_t *cls, uuid_t *pfs_fsid)
1597 {
1598 }
1599
1600 #endif
1601
1602 /*
1603  * Dumps the spanning tree
1604  *
1605  * DEBUG ONLY
1606  */
1607 void
1608 dmsg_shell_tree(dmsg_circuit_t *circuit, char *cmdbuf __unused)
1609 {
1610         h2span_cluster_t *cls;
1611         h2span_node_t *node;
1612         h2span_link_t *slink;
1613         h2span_relay_t *relay;
1614         char *uustr = NULL;
1615
1616         pthread_mutex_lock(&cluster_mtx);
1617         RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
1618                 dmsg_circuit_printf(circuit, "Cluster %s %s (%s)\n",
1619                                   dmsg_peer_type_to_str(cls->peer_type),
1620                                   dmsg_uuid_to_str(&cls->pfs_clid, &uustr),
1621                                   cls->cl_label);
1622                 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
1623                         dmsg_circuit_printf(circuit, "    Node %s %s (%s)\n",
1624                                 dmsg_pfs_type_to_str(node->pfs_type),
1625                                 dmsg_uuid_to_str(&node->pfs_fsid, &uustr),
1626                                 node->fs_label);
1627                         RB_FOREACH(slink, h2span_link_tree, &node->tree) {
1628                                 dmsg_circuit_printf(circuit,
1629                                             "\tSLink msgid %016jx "
1630                                             "dist=%d via %d\n",
1631                                             (intmax_t)slink->state->msgid,
1632                                             slink->dist,
1633                                             slink->state->iocom->sock_fd);
1634                                 TAILQ_FOREACH(relay, &slink->relayq, entry) {
1635                                         dmsg_circuit_printf(circuit,
1636                                             "\t    Relay-out msgid %016jx "
1637                                             "via %d\n",
1638                                             (intmax_t)relay->target_rt->msgid,
1639                                             relay->target_rt->iocom->sock_fd);
1640                                 }
1641                         }
1642                 }
1643         }
1644         pthread_mutex_unlock(&cluster_mtx);
1645         if (uustr)
1646                 free(uustr);
1647 #if 0
1648         TAILQ_FOREACH(conn, &connq, entry) {
1649         }
1650 #endif
1651 }
1652
1653 /*
1654  * DEBUG ONLY
1655  *
1656  * Locate the state representing an incoming LNK_SPAN given its msgid.
1657  */
1658 int
1659 dmsg_debug_findspan(uint64_t msgid, dmsg_state_t **statep)
1660 {
1661         h2span_cluster_t *cls;
1662         h2span_node_t *node;
1663         h2span_link_t *slink;
1664         h2span_relay_t *relay;
1665
1666         pthread_mutex_lock(&cluster_mtx);
1667         relay = NULL;
1668         RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
1669                 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
1670                         RB_FOREACH(slink, h2span_link_tree, &node->tree) {
1671                                 if (slink->state->msgid == msgid) {
1672                                         *statep = slink->state;
1673                                         goto found;
1674                                 }
1675                         }
1676                 }
1677         }
1678         pthread_mutex_unlock(&cluster_mtx);
1679         *statep = NULL;
1680         return(ENOENT);
1681 found:
1682         pthread_mutex_unlock(&cluster_mtx);
1683         return(0);
1684 }
1685
1686 /*
1687  * Random number sub-sort value to add to SPAN rnss fields on relay.
1688  * This allows us to differentiate spans with the same <dist> field
1689  * for relaying purposes.  We must normally limit the number of relays
1690  * for any given SPAN origination but we must also guarantee that a
1691  * symmetric reverse path exists, so we use the rnss field as a sub-sort
1692  * (since there can be thousands or millions if we only match on <dist>),
1693  * and if there STILL too many spans we go past the limit.
1694  */
1695 static
1696 uint32_t
1697 dmsg_rnss(void)
1698 {
1699         if (DMsgRNSS == 0) {
1700                 pthread_mutex_lock(&cluster_mtx);
1701                 while (DMsgRNSS == 0) {
1702                         srandomdev();
1703                         DMsgRNSS = random();
1704                 }
1705                 pthread_mutex_unlock(&cluster_mtx);
1706         }
1707         return(DMsgRNSS);
1708 }