cluster - more libdmsg work
[dragonfly.git] / lib / libdmsg / msg_lnk.c
CommitLineData
8c280d5d
MD
1/*
2 * Copyright (c) 2012 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34/*
0d20ec8a
MD
35 * LNK_SPAN PROTOCOL SUPPORT FUNCTIONS - Please see sys/dmsg.h for an
36 * involved explanation of the protocol.
8c280d5d
MD
37 */
38
0c3a8cd0 39#include "dmsg_local.h"
8c280d5d 40
0d20ec8a
MD
41void (*dmsg_node_handler)(void **opaquep, struct dmsg_msg *msg, int op);
42
cf715800
MD
43/*
44 * Maximum spanning tree distance. This has the practical effect of
45 * stopping tail-chasing closed loops when a feeder span is lost.
46 */
0c3a8cd0 47#define DMSG_SPAN_MAXDIST 16
cf715800 48
8c280d5d
MD
49/*
50 * RED-BLACK TREE DEFINITIONS
51 *
7dc0f844 52 * We need to track:
8c280d5d
MD
53 *
54 * (1) shared fsid's (a cluster).
55 * (2) unique fsid's (a node in a cluster) <--- LNK_SPAN transactions.
56 *
57 * We need to aggegate all active LNK_SPANs, aggregate, and create our own
58 * outgoing LNK_SPAN transactions on each of our connections representing
59 * the aggregated state.
60 *
2063f4d7 61 * h2span_conn - list of iocom connections who wish to receive SPAN
8c280d5d
MD
62 * propagation from other connections. Might contain
63 * a filter string. Only iocom's with an open
64 * LNK_CONN transactions are applicable for SPAN
65 * propagation.
66 *
67 * h2span_relay - List of links relayed (via SPAN). Essentially
68 * each relay structure represents a LNK_SPAN
69 * transaction that we initiated, verses h2span_link
70 * which is a LNK_SPAN transaction that we received.
71 *
72 * --
73 *
74 * h2span_cluster - Organizes the shared fsid's. One structure for
75 * each cluster.
76 *
77 * h2span_node - Organizes the nodes in a cluster. One structure
78 * for each unique {cluster,node}, aka {fsid, pfs_fsid}.
79 *
80 * h2span_link - Organizes all incoming and outgoing LNK_SPAN message
81 * transactions related to a node.
82 *
83 * One h2span_link structure for each incoming LNK_SPAN
84 * transaction. Links selected for propagation back
85 * out are also where the outgoing LNK_SPAN messages
86 * are indexed into (so we can propagate changes).
87 *
88 * The h2span_link's use a red-black tree to sort the
7dc0f844 89 * distance hop metric for the incoming LNK_SPAN. We
8c280d5d
MD
90 * then select the top N for outgoing. When the
91 * topology changes the top N may also change and cause
92 * new outgoing LNK_SPAN transactions to be opened
93 * and less desireable ones to be closed, causing
94 * transactional aborts within the message flow in
95 * the process.
96 *
97 * Also note - All outgoing LNK_SPAN message transactions are also
98 * entered into a red-black tree for use by the routing
99 * function. This is handled by msg.c in the state
100 * code, not here.
101 */
102
103struct h2span_link;
104struct h2span_relay;
1a34728c 105TAILQ_HEAD(h2span_media_queue, h2span_media);
2063f4d7 106TAILQ_HEAD(h2span_conn_queue, h2span_conn);
8c280d5d
MD
107TAILQ_HEAD(h2span_relay_queue, h2span_relay);
108
109RB_HEAD(h2span_cluster_tree, h2span_cluster);
110RB_HEAD(h2span_node_tree, h2span_node);
111RB_HEAD(h2span_link_tree, h2span_link);
112RB_HEAD(h2span_relay_tree, h2span_relay);
0d20ec8a 113uint32_t DMsgRNSS;
8c280d5d 114
1a34728c
MD
115/*
116 * This represents a media
117 */
118struct h2span_media {
119 TAILQ_ENTRY(h2span_media) entry;
120 uuid_t mediaid;
121 int refs;
122 struct h2span_media_config {
5bc5bca2
MD
123 dmsg_vol_data_t copy_run;
124 dmsg_vol_data_t copy_pend;
1a34728c
MD
125 pthread_t thread;
126 pthread_cond_t cond;
127 int ctl;
128 int fd;
0c3a8cd0 129 dmsg_iocom_t iocom;
1a34728c
MD
130 pthread_t iocom_thread;
131 enum { H2MC_STOPPED, H2MC_CONNECT, H2MC_RUNNING } state;
0c3a8cd0 132 } config[DMSG_COPYID_COUNT];
1a34728c
MD
133};
134
135typedef struct h2span_media_config h2span_media_config_t;
136
137#define H2CONFCTL_STOP 0x00000001
138#define H2CONFCTL_UPDATE 0x00000002
139
8c280d5d
MD
140/*
141 * Received LNK_CONN transaction enables SPAN protocol over connection.
1a34728c
MD
142 * (may contain filter). Typically one for each mount and several may
143 * share the same media.
8c280d5d 144 */
2063f4d7
MD
145struct h2span_conn {
146 TAILQ_ENTRY(h2span_conn) entry;
8c280d5d 147 struct h2span_relay_tree tree;
1a34728c 148 struct h2span_media *media;
0c3a8cd0 149 dmsg_state_t *state;
8c280d5d
MD
150};
151
152/*
153 * All received LNK_SPANs are organized by cluster (pfs_clid),
154 * node (pfs_fsid), and link (received LNK_SPAN transaction).
155 */
156struct h2span_cluster {
157 RB_ENTRY(h2span_cluster) rbnode;
158 struct h2span_node_tree tree;
159 uuid_t pfs_clid; /* shared fsid */
ddfbb283
MD
160 uint8_t peer_type;
161 char cl_label[128]; /* cluster label (typ PEER_BLOCK) */
90e8cd1d 162 int refs; /* prevents destruction */
8c280d5d
MD
163};
164
7dc0f844 165struct h2span_node {
8c280d5d
MD
166 RB_ENTRY(h2span_node) rbnode;
167 struct h2span_link_tree tree;
168 struct h2span_cluster *cls;
ddfbb283 169 uint8_t pfs_type;
8c280d5d 170 uuid_t pfs_fsid; /* unique fsid */
ddfbb283 171 char fs_label[128]; /* fs label (typ PEER_HAMMER2) */
0d20ec8a 172 void *opaque;
8c280d5d
MD
173};
174
175struct h2span_link {
176 RB_ENTRY(h2span_link) rbnode;
0c3a8cd0 177 dmsg_state_t *state; /* state<->link */
8c280d5d 178 struct h2span_node *node; /* related node */
0d20ec8a
MD
179 uint32_t dist;
180 uint32_t rnss;
8c280d5d
MD
181 struct h2span_relay_queue relayq; /* relay out */
182};
183
184/*
185 * Any LNK_SPAN transactions we receive which are relayed out other
0d20ec8a
MD
186 * connections utilize this structure to track the LNK_SPAN transactions
187 * we initiate (relay out) on other connections. We only relay out
188 * LNK_SPANs on connections we have an open CONN transaction for.
189 *
190 * The relay structure points to the outgoing LNK_SPAN trans (out_state)
191 * and to the incoming LNK_SPAN transaction (in_state). The relay
192 * structure holds refs on the related states.
8c280d5d
MD
193 *
194 * In many respects this is the core of the protocol... actually figuring
195 * out what LNK_SPANs to relay. The spanid used for relaying is the
196 * address of the 'state' structure, which is why h2span_relay has to
2063f4d7 197 * be entered into a RB-TREE based at h2span_conn (so we can look
8c280d5d
MD
198 * up the spanid to validate it).
199 */
200struct h2span_relay {
0d20ec8a
MD
201 TAILQ_ENTRY(h2span_relay) entry; /* from link */
202 RB_ENTRY(h2span_relay) rbnode; /* from h2span_conn */
203 struct h2span_conn *conn; /* related CONN transaction */
204 dmsg_state_t *source_rt; /* h2span_link state */
205 dmsg_state_t *target_rt; /* h2span_relay state */
8c280d5d
MD
206};
207
1a34728c 208typedef struct h2span_media h2span_media_t;
2063f4d7 209typedef struct h2span_conn h2span_conn_t;
8c280d5d
MD
210typedef struct h2span_cluster h2span_cluster_t;
211typedef struct h2span_node h2span_node_t;
212typedef struct h2span_link h2span_link_t;
213typedef struct h2span_relay h2span_relay_t;
214
ddfbb283
MD
215#define dmsg_termstr(array) _dmsg_termstr((array), sizeof(array))
216
0d20ec8a
MD
217static h2span_relay_t *dmsg_generate_relay(h2span_conn_t *conn,
218 h2span_link_t *slink);
219static uint32_t dmsg_rnss(void);
220
ddfbb283
MD
221static __inline
222void
223_dmsg_termstr(char *base, size_t size)
224{
225 base[size-1] = 0;
226}
227
228/*
229 * Cluster peer_type, uuid, AND label must match for a match
230 */
8c280d5d
MD
231static
232int
233h2span_cluster_cmp(h2span_cluster_t *cls1, h2span_cluster_t *cls2)
234{
ddfbb283
MD
235 int r;
236
237 if (cls1->peer_type < cls2->peer_type)
238 return(-1);
239 if (cls1->peer_type > cls2->peer_type)
240 return(1);
241 r = uuid_compare(&cls1->pfs_clid, &cls2->pfs_clid, NULL);
242 if (r == 0)
243 r = strcmp(cls1->cl_label, cls2->cl_label);
244
245 return r;
8c280d5d
MD
246}
247
ddfbb283 248/*
0d20ec8a
MD
249 * Match against fs_label/pfs_fsid. Together these two items represent a
250 * unique node. In most cases the primary differentiator is pfs_fsid but
251 * we also string-match fs_label.
ddfbb283 252 */
8c280d5d
MD
253static
254int
255h2span_node_cmp(h2span_node_t *node1, h2span_node_t *node2)
256{
185ace93
MD
257 int r;
258
0d20ec8a
MD
259 r = strcmp(node1->fs_label, node2->fs_label);
260 if (r == 0)
261 r = uuid_compare(&node1->pfs_fsid, &node2->pfs_fsid, NULL);
185ace93 262 return (r);
8c280d5d
MD
263}
264
cf715800 265/*
10c86c4e
MD
266 * Sort/subsort must match h2span_relay_cmp() under any given node
267 * to make the aggregation algorithm easier, so the best links are
268 * in the same sorted order as the best relays.
269 *
270 * NOTE: We cannot use link*->state->msgid because this msgid is created
271 * by each remote host and thus might wind up being the same.
cf715800 272 */
8c280d5d
MD
273static
274int
275h2span_link_cmp(h2span_link_t *link1, h2span_link_t *link2)
276{
7dc0f844 277 if (link1->dist < link2->dist)
8c280d5d 278 return(-1);
7dc0f844 279 if (link1->dist > link2->dist)
8c280d5d 280 return(1);
0d20ec8a
MD
281 if (link1->rnss < link2->rnss)
282 return(-1);
283 if (link1->rnss > link2->rnss)
284 return(1);
10c86c4e
MD
285#if 1
286 if ((uintptr_t)link1->state < (uintptr_t)link2->state)
287 return(-1);
288 if ((uintptr_t)link1->state > (uintptr_t)link2->state)
289 return(1);
290#else
29ead430 291 if (link1->state->msgid < link2->state->msgid)
8c280d5d 292 return(-1);
29ead430 293 if (link1->state->msgid > link2->state->msgid)
8c280d5d 294 return(1);
10c86c4e 295#endif
8c280d5d
MD
296 return(0);
297}
298
7dc0f844
MD
299/*
300 * Relay entries are sorted by node, subsorted by distance and link
301 * address (so we can match up the conn->tree relay topology with
302 * a node's link topology).
303 */
8c280d5d
MD
304static
305int
306h2span_relay_cmp(h2span_relay_t *relay1, h2span_relay_t *relay2)
307{
0d20ec8a
MD
308 h2span_link_t *link1 = relay1->source_rt->any.link;
309 h2span_link_t *link2 = relay2->source_rt->any.link;
29ead430
MD
310
311 if ((intptr_t)link1->node < (intptr_t)link2->node)
7dc0f844 312 return(-1);
29ead430 313 if ((intptr_t)link1->node > (intptr_t)link2->node)
7dc0f844 314 return(1);
29ead430 315 if (link1->dist < link2->dist)
8c280d5d 316 return(-1);
29ead430 317 if (link1->dist > link2->dist)
7dc0f844 318 return(1);
0d20ec8a
MD
319 if (link1->rnss < link2->rnss)
320 return(-1);
321 if (link1->rnss > link2->rnss)
322 return(1);
10c86c4e
MD
323#if 1
324 if ((uintptr_t)link1->state < (uintptr_t)link2->state)
325 return(-1);
326 if ((uintptr_t)link1->state > (uintptr_t)link2->state)
327 return(1);
328#else
29ead430 329 if (link1->state->msgid < link2->state->msgid)
7dc0f844 330 return(-1);
29ead430 331 if (link1->state->msgid > link2->state->msgid)
8c280d5d 332 return(1);
10c86c4e 333#endif
8c280d5d
MD
334 return(0);
335}
336
337RB_PROTOTYPE_STATIC(h2span_cluster_tree, h2span_cluster,
338 rbnode, h2span_cluster_cmp);
339RB_PROTOTYPE_STATIC(h2span_node_tree, h2span_node,
340 rbnode, h2span_node_cmp);
341RB_PROTOTYPE_STATIC(h2span_link_tree, h2span_link,
342 rbnode, h2span_link_cmp);
343RB_PROTOTYPE_STATIC(h2span_relay_tree, h2span_relay,
344 rbnode, h2span_relay_cmp);
345
346RB_GENERATE_STATIC(h2span_cluster_tree, h2span_cluster,
347 rbnode, h2span_cluster_cmp);
348RB_GENERATE_STATIC(h2span_node_tree, h2span_node,
349 rbnode, h2span_node_cmp);
350RB_GENERATE_STATIC(h2span_link_tree, h2span_link,
351 rbnode, h2span_link_cmp);
352RB_GENERATE_STATIC(h2span_relay_tree, h2span_relay,
353 rbnode, h2span_relay_cmp);
354
355/*
1a34728c 356 * Global mutex protects cluster_tree lookups, connq, mediaq.
8c280d5d
MD
357 */
358static pthread_mutex_t cluster_mtx;
359static struct h2span_cluster_tree cluster_tree = RB_INITIALIZER(cluster_tree);
2063f4d7 360static struct h2span_conn_queue connq = TAILQ_HEAD_INITIALIZER(connq);
1a34728c 361static struct h2span_media_queue mediaq = TAILQ_HEAD_INITIALIZER(mediaq);
8c280d5d 362
0c3a8cd0
MD
363static void dmsg_lnk_span(dmsg_msg_t *msg);
364static void dmsg_lnk_conn(dmsg_msg_t *msg);
0d20ec8a 365static void dmsg_lnk_circ(dmsg_msg_t *msg);
0c3a8cd0
MD
366static void dmsg_lnk_relay(dmsg_msg_t *msg);
367static void dmsg_relay_scan(h2span_conn_t *conn, h2span_node_t *node);
368static void dmsg_relay_delete(h2span_relay_t *relay);
8c280d5d 369
0c3a8cd0
MD
370static void *dmsg_volconf_thread(void *info);
371static void dmsg_volconf_stop(h2span_media_config_t *conf);
372static void dmsg_volconf_start(h2span_media_config_t *conf,
1a34728c
MD
373 const char *hostname);
374
29ead430 375void
0d20ec8a 376dmsg_msg_lnk_signal(dmsg_iocom_t *iocom __unused)
29ead430
MD
377{
378 pthread_mutex_lock(&cluster_mtx);
0c3a8cd0 379 dmsg_relay_scan(NULL, NULL);
29ead430
MD
380 pthread_mutex_unlock(&cluster_mtx);
381}
382
8c280d5d 383/*
0d20ec8a
MD
384 * DMSG_PROTO_LNK - Generic DMSG_PROTO_LNK.
385 * (incoming iocom lock not held)
386 *
387 * This function is typically called for one-way and opening-transactions
388 * since state->func is assigned after that, but it will also be called
389 * if no state->func is assigned on transaction-open.
8c280d5d
MD
390 */
391void
0c3a8cd0 392dmsg_msg_lnk(dmsg_msg_t *msg)
8c280d5d 393{
0d20ec8a
MD
394 uint32_t icmd = msg->state ? msg->state->icmd : msg->any.head.cmd;
395
396 switch(icmd & DMSGF_BASECMDMASK) {
5bc5bca2 397 case DMSG_LNK_CONN:
0c3a8cd0 398 dmsg_lnk_conn(msg);
8c280d5d 399 break;
5bc5bca2 400 case DMSG_LNK_SPAN:
0c3a8cd0 401 dmsg_lnk_span(msg);
8c280d5d 402 break;
0d20ec8a
MD
403 case DMSG_LNK_CIRC:
404 dmsg_lnk_circ(msg);
405 break;
8c280d5d
MD
406 default:
407 fprintf(stderr,
408 "MSG_PROTO_LNK: Unknown msg %08x\n", msg->any.head.cmd);
0c3a8cd0 409 dmsg_msg_reply(msg, DMSG_ERR_NOSUPP);
8c280d5d
MD
410 /* state invalid after reply */
411 break;
412 }
413}
414
0d20ec8a
MD
415/*
416 * LNK_CONN - iocom identify message reception.
417 * (incoming iocom lock not held)
418 *
419 * Remote node identifies itself to us, sets up a SPAN filter, and gives us
420 * the ok to start transmitting SPANs.
421 */
8c280d5d 422void
0c3a8cd0 423dmsg_lnk_conn(dmsg_msg_t *msg)
8c280d5d 424{
0c3a8cd0 425 dmsg_state_t *state = msg->state;
1a34728c
MD
426 h2span_media_t *media;
427 h2span_media_config_t *conf;
2063f4d7 428 h2span_conn_t *conn;
8c280d5d
MD
429 h2span_relay_t *relay;
430 char *alloc = NULL;
1a34728c 431 int i;
8c280d5d
MD
432
433 pthread_mutex_lock(&cluster_mtx);
434
f306de83
MD
435 fprintf(stderr, "dmsg_lnk_conn: msg %p cmd %08x state %p txcmd %08x rxcmd %08x\n",
436 msg, msg->any.head.cmd, state, state->txcmd, state->rxcmd);
437
5bc5bca2
MD
438 switch(msg->any.head.cmd & DMSGF_TRANSMASK) {
439 case DMSG_LNK_CONN | DMSGF_CREATE:
440 case DMSG_LNK_CONN | DMSGF_CREATE | DMSGF_DELETE:
1a34728c 441 /*
2063f4d7 442 * On transaction start we allocate a new h2span_conn and
1a34728c
MD
443 * acknowledge the request, leaving the transaction open.
444 * We then relay priority-selected SPANs.
445 */
ddfbb283 446 fprintf(stderr, "LNK_CONN(%08x): %s/%s/%s\n",
81666e1b 447 (uint32_t)msg->any.head.msgid,
0c3a8cd0 448 dmsg_uuid_to_str(&msg->any.lnk_conn.pfs_clid,
8c280d5d 449 &alloc),
ddfbb283
MD
450 msg->any.lnk_conn.cl_label,
451 msg->any.lnk_conn.fs_label);
8c280d5d
MD
452 free(alloc);
453
0c3a8cd0 454 conn = dmsg_alloc(sizeof(*conn));
8c280d5d
MD
455
456 RB_INIT(&conn->tree);
0d20ec8a 457 state->iocom->conn = conn; /* XXX only one */
8c280d5d 458 conn->state = state;
0c3a8cd0 459 state->func = dmsg_lnk_conn;
8c280d5d
MD
460 state->any.conn = conn;
461 TAILQ_INSERT_TAIL(&connq, conn, entry);
462
02454b3e 463 /*
1a34728c 464 * Set up media
02454b3e 465 */
1a34728c
MD
466 TAILQ_FOREACH(media, &mediaq, entry) {
467 if (uuid_compare(&msg->any.lnk_conn.mediaid,
468 &media->mediaid, NULL) == 0) {
469 break;
470 }
471 }
472 if (media == NULL) {
0c3a8cd0 473 media = dmsg_alloc(sizeof(*media));
1a34728c
MD
474 media->mediaid = msg->any.lnk_conn.mediaid;
475 TAILQ_INSERT_TAIL(&mediaq, media, entry);
476 }
477 conn->media = media;
478 ++media->refs;
8c280d5d 479
5bc5bca2 480 if ((msg->any.head.cmd & DMSGF_DELETE) == 0) {
0c3a8cd0 481 dmsg_msg_result(msg, 0);
0d20ec8a 482 dmsg_iocom_signal(msg->iocom);
1a34728c
MD
483 break;
484 }
485 /* FALL THROUGH */
5bc5bca2
MD
486 case DMSG_LNK_CONN | DMSGF_DELETE:
487 case DMSG_LNK_ERROR | DMSGF_DELETE:
1a34728c
MD
488deleteconn:
489 /*
2063f4d7 490 * On transaction terminate we clean out our h2span_conn
1a34728c
MD
491 * and acknowledge the request, closing the transaction.
492 */
8c280d5d
MD
493 fprintf(stderr, "LNK_CONN: Terminated\n");
494 conn = state->any.conn;
495 assert(conn);
7dc0f844 496
1a34728c
MD
497 /*
498 * Clean out the media structure. If refs drops to zero we
499 * also clean out the media config threads. These threads
500 * maintain span connections to other hammer2 service daemons.
501 */
502 media = conn->media;
503 if (--media->refs == 0) {
504 fprintf(stderr, "Shutting down media spans\n");
0c3a8cd0 505 for (i = 0; i < DMSG_COPYID_COUNT; ++i) {
1a34728c
MD
506 conf = &media->config[i];
507
508 if (conf->thread == NULL)
509 continue;
510 conf->ctl = H2CONFCTL_STOP;
511 pthread_cond_signal(&conf->cond);
512 }
0c3a8cd0 513 for (i = 0; i < DMSG_COPYID_COUNT; ++i) {
1a34728c
MD
514 conf = &media->config[i];
515
516 if (conf->thread == NULL)
517 continue;
518 pthread_mutex_unlock(&cluster_mtx);
519 pthread_join(conf->thread, NULL);
520 pthread_mutex_lock(&cluster_mtx);
521 conf->thread = NULL;
522 pthread_cond_destroy(&conf->cond);
523 }
524 fprintf(stderr, "Media shutdown complete\n");
525 TAILQ_REMOVE(&mediaq, media, entry);
0c3a8cd0 526 dmsg_free(media);
1a34728c
MD
527 }
528
7dc0f844
MD
529 /*
530 * Clean out all relays. This requires terminating each
531 * relay transaction.
532 */
8c280d5d 533 while ((relay = RB_ROOT(&conn->tree)) != NULL) {
0c3a8cd0 534 dmsg_relay_delete(relay);
8c280d5d
MD
535 }
536
537 /*
538 * Clean out conn
539 */
1a34728c 540 conn->media = NULL;
8c280d5d
MD
541 conn->state = NULL;
542 msg->state->any.conn = NULL;
0d20ec8a 543 msg->state->iocom->conn = NULL;
8c280d5d 544 TAILQ_REMOVE(&connq, conn, entry);
0c3a8cd0 545 dmsg_free(conn);
8c280d5d 546
0c3a8cd0 547 dmsg_msg_reply(msg, 0);
8c280d5d 548 /* state invalid after reply */
1a34728c 549 break;
5bc5bca2 550 case DMSG_LNK_VOLCONF:
1a34728c
MD
551 /*
552 * One-way volume-configuration message is transmitted
553 * over the open LNK_CONN transaction.
554 */
555 fprintf(stderr, "RECEIVED VOLCONF\n");
556 if (msg->any.lnk_volconf.index < 0 ||
0c3a8cd0 557 msg->any.lnk_volconf.index >= DMSG_COPYID_COUNT) {
1a34728c
MD
558 fprintf(stderr, "VOLCONF: ILLEGAL INDEX %d\n",
559 msg->any.lnk_volconf.index);
560 break;
561 }
562 if (msg->any.lnk_volconf.copy.path[sizeof(msg->any.lnk_volconf.copy.path) - 1] != 0 ||
563 msg->any.lnk_volconf.copy.path[0] == 0) {
564 fprintf(stderr, "VOLCONF: ILLEGAL PATH %d\n",
565 msg->any.lnk_volconf.index);
566 break;
567 }
568 conn = msg->state->any.conn;
569 if (conn == NULL) {
570 fprintf(stderr, "VOLCONF: LNK_CONN is missing\n");
571 break;
572 }
573 conf = &conn->media->config[msg->any.lnk_volconf.index];
574 conf->copy_pend = msg->any.lnk_volconf.copy;
575 conf->ctl |= H2CONFCTL_UPDATE;
576 if (conf->thread == NULL) {
577 fprintf(stderr, "VOLCONF THREAD STARTED\n");
578 pthread_cond_init(&conf->cond, NULL);
579 pthread_create(&conf->thread, NULL,
0c3a8cd0 580 dmsg_volconf_thread, (void *)conf);
1a34728c
MD
581 }
582 pthread_cond_signal(&conf->cond);
583 break;
584 default:
585 /*
586 * Failsafe
587 */
5bc5bca2 588 if (msg->any.head.cmd & DMSGF_DELETE)
1a34728c 589 goto deleteconn;
0c3a8cd0 590 dmsg_msg_reply(msg, DMSG_ERR_NOSUPP);
1a34728c 591 break;
8c280d5d
MD
592 }
593 pthread_mutex_unlock(&cluster_mtx);
594}
595
0d20ec8a
MD
596/*
597 * LNK_SPAN - Spanning tree protocol message reception
598 * (incoming iocom lock not held)
599 *
600 * Receive a spanning tree transactional message, creating or destroying
601 * a SPAN and propagating it to other iocoms.
602 */
8c280d5d 603void
0c3a8cd0 604dmsg_lnk_span(dmsg_msg_t *msg)
8c280d5d 605{
0c3a8cd0 606 dmsg_state_t *state = msg->state;
8c280d5d
MD
607 h2span_cluster_t dummy_cls;
608 h2span_node_t dummy_node;
609 h2span_cluster_t *cls;
610 h2span_node_t *node;
611 h2span_link_t *slink;
612 h2span_relay_t *relay;
613 char *alloc = NULL;
614
5bc5bca2 615 assert((msg->any.head.cmd & DMSGF_REPLY) == 0);
29ead430 616
8c280d5d
MD
617 pthread_mutex_lock(&cluster_mtx);
618
619 /*
620 * On transaction start we initialize the tracking infrastructure
621 */
5bc5bca2 622 if (msg->any.head.cmd & DMSGF_CREATE) {
29ead430 623 assert(state->func == NULL);
0c3a8cd0 624 state->func = dmsg_lnk_span;
8c280d5d 625
ddfbb283
MD
626 dmsg_termstr(msg->any.lnk_span.cl_label);
627 dmsg_termstr(msg->any.lnk_span.fs_label);
81666e1b 628
8c280d5d
MD
629 /*
630 * Find the cluster
631 */
632 dummy_cls.pfs_clid = msg->any.lnk_span.pfs_clid;
ddfbb283
MD
633 dummy_cls.peer_type = msg->any.lnk_span.peer_type;
634 bcopy(msg->any.lnk_span.cl_label,
635 dummy_cls.cl_label,
636 sizeof(dummy_cls.cl_label));
8c280d5d
MD
637 cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls);
638 if (cls == NULL) {
0c3a8cd0 639 cls = dmsg_alloc(sizeof(*cls));
8c280d5d 640 cls->pfs_clid = msg->any.lnk_span.pfs_clid;
ddfbb283
MD
641 cls->peer_type = msg->any.lnk_span.peer_type;
642 bcopy(msg->any.lnk_span.cl_label,
643 cls->cl_label,
644 sizeof(cls->cl_label));
8c280d5d
MD
645 RB_INIT(&cls->tree);
646 RB_INSERT(h2span_cluster_tree, &cluster_tree, cls);
647 }
648
649 /*
650 * Find the node
651 */
652 dummy_node.pfs_fsid = msg->any.lnk_span.pfs_fsid;
0d20ec8a
MD
653 bcopy(msg->any.lnk_span.fs_label, dummy_node.fs_label,
654 sizeof(dummy_node.fs_label));
8c280d5d
MD
655 node = RB_FIND(h2span_node_tree, &cls->tree, &dummy_node);
656 if (node == NULL) {
0c3a8cd0 657 node = dmsg_alloc(sizeof(*node));
8c280d5d 658 node->pfs_fsid = msg->any.lnk_span.pfs_fsid;
0d20ec8a 659 node->pfs_type = msg->any.lnk_span.pfs_type;
ddfbb283
MD
660 bcopy(msg->any.lnk_span.fs_label,
661 node->fs_label,
662 sizeof(node->fs_label));
8c280d5d
MD
663 node->cls = cls;
664 RB_INIT(&node->tree);
665 RB_INSERT(h2span_node_tree, &cls->tree, node);
0d20ec8a
MD
666 if (dmsg_node_handler) {
667 dmsg_node_handler(&node->opaque, msg,
668 DMSG_NODEOP_ADD);
669 }
8c280d5d
MD
670 }
671
672 /*
673 * Create the link
674 */
675 assert(state->any.link == NULL);
0c3a8cd0 676 slink = dmsg_alloc(sizeof(*slink));
7dc0f844 677 TAILQ_INIT(&slink->relayq);
8c280d5d 678 slink->node = node;
7dc0f844 679 slink->dist = msg->any.lnk_span.dist;
0d20ec8a 680 slink->rnss = msg->any.lnk_span.rnss;
8c280d5d
MD
681 slink->state = state;
682 state->any.link = slink;
29ead430 683
8c280d5d
MD
684 RB_INSERT(h2span_link_tree, &node->tree, slink);
685
ddfbb283
MD
686 fprintf(stderr,
687 "LNK_SPAN(thr %p): %p %s cl=%s fs=%s dist=%d\n",
0d20ec8a 688 msg->iocom,
29ead430 689 slink,
ddfbb283
MD
690 dmsg_uuid_to_str(&msg->any.lnk_span.pfs_clid, &alloc),
691 msg->any.lnk_span.cl_label,
692 msg->any.lnk_span.fs_label,
29ead430
MD
693 msg->any.lnk_span.dist);
694 free(alloc);
29ead430 695#if 0
0c3a8cd0 696 dmsg_relay_scan(NULL, node);
29ead430 697#endif
0d20ec8a 698 dmsg_iocom_signal(msg->iocom);
8c280d5d
MD
699 }
700
701 /*
702 * On transaction terminate we remove the tracking infrastructure.
703 */
5bc5bca2 704 if (msg->any.head.cmd & DMSGF_DELETE) {
8c280d5d
MD
705 slink = state->any.link;
706 assert(slink != NULL);
707 node = slink->node;
708 cls = node->cls;
709
ddfbb283 710 fprintf(stderr, "LNK_DELE(thr %p): %p %s cl=%s fs=%s dist=%d\n",
0d20ec8a 711 msg->iocom,
29ead430 712 slink,
0c3a8cd0 713 dmsg_uuid_to_str(&cls->pfs_clid, &alloc),
ddfbb283
MD
714 state->msg->any.lnk_span.cl_label,
715 state->msg->any.lnk_span.fs_label,
29ead430
MD
716 state->msg->any.lnk_span.dist);
717 free(alloc);
718
8c280d5d 719 /*
7dc0f844
MD
720 * Clean out all relays. This requires terminating each
721 * relay transaction.
8c280d5d
MD
722 */
723 while ((relay = TAILQ_FIRST(&slink->relayq)) != NULL) {
0c3a8cd0 724 dmsg_relay_delete(relay);
8c280d5d
MD
725 }
726
727 /*
728 * Clean out the topology
729 */
730 RB_REMOVE(h2span_link_tree, &node->tree, slink);
731 if (RB_EMPTY(&node->tree)) {
732 RB_REMOVE(h2span_node_tree, &cls->tree, node);
0d20ec8a
MD
733 if (dmsg_node_handler) {
734 dmsg_node_handler(&node->opaque, msg,
735 DMSG_NODEOP_DEL);
736 }
90e8cd1d 737 if (RB_EMPTY(&cls->tree) && cls->refs == 0) {
8c280d5d
MD
738 RB_REMOVE(h2span_cluster_tree,
739 &cluster_tree, cls);
0c3a8cd0 740 dmsg_free(cls);
8c280d5d
MD
741 }
742 node->cls = NULL;
0c3a8cd0 743 dmsg_free(node);
7dc0f844 744 node = NULL;
8c280d5d
MD
745 }
746 state->any.link = NULL;
747 slink->state = NULL;
748 slink->node = NULL;
0c3a8cd0 749 dmsg_free(slink);
7dc0f844
MD
750
751 /*
752 * We have to terminate the transaction
753 */
0c3a8cd0 754 dmsg_state_reply(state, 0);
7dc0f844
MD
755 /* state invalid after reply */
756
757 /*
758 * If the node still exists issue any required updates. If
759 * it doesn't then all related relays have already been
760 * removed and there's nothing left to do.
761 */
29ead430 762#if 0
7dc0f844 763 if (node)
0c3a8cd0 764 dmsg_relay_scan(NULL, node);
29ead430
MD
765#endif
766 if (node)
0d20ec8a 767 dmsg_iocom_signal(msg->iocom);
8c280d5d
MD
768 }
769
770 pthread_mutex_unlock(&cluster_mtx);
771}
772
773/*
0d20ec8a
MD
774 * LNK_CIRC - Virtual circuit protocol message reception
775 * (incoming iocom lock not held)
7dc0f844 776 *
0d20ec8a 777 * Handles all cases.
7dc0f844 778 */
0d20ec8a
MD
779void
780dmsg_lnk_circ(dmsg_msg_t *msg)
7dc0f844 781{
0d20ec8a
MD
782 dmsg_circuit_t *circA;
783 dmsg_circuit_t *circB;
784 dmsg_state_t *rx_state;
785 dmsg_state_t *tx_state;
786 dmsg_state_t *state;
787 dmsg_state_t dummy;
788 dmsg_msg_t *fwd_msg;
789 dmsg_iocom_t *iocomA;
790 dmsg_iocom_t *iocomB;
f306de83 791 int disconnect;
0d20ec8a
MD
792
793 /*pthread_mutex_lock(&cluster_mtx);*/
794
a2179323
MD
795 if (DMsgDebugOpt >= 4)
796 fprintf(stderr, "CIRC receive cmd=%08x\n", msg->any.head.cmd);
797
0d20ec8a
MD
798 switch (msg->any.head.cmd & (DMSGF_CREATE |
799 DMSGF_DELETE |
800 DMSGF_REPLY)) {
801 case DMSGF_CREATE:
802 case DMSGF_CREATE | DMSGF_DELETE:
803 /*
804 * (A) wishes to establish a virtual circuit through us to (B).
805 * (B) is specified by lnk_circ.target (the message id for
806 * a LNK_SPAN that (A) received from us which represents (B)).
807 *
808 * Designate the originator of the circuit (the current
809 * remote end) as (A) and the other side as (B).
810 *
811 * Accept the VC but do not reply. We will wait for the end-
812 * to-end reply to propagate back.
813 */
814 iocomA = msg->iocom;
7dc0f844 815
0d20ec8a
MD
816 /*
817 * Locate the open transaction state that the other end
818 * specified in <target>. This will be an open SPAN
819 * transaction that we transmitted (h2span_relay) over
820 * the interface the LNK_CIRC is being received on.
821 *
822 * (all LNK_CIRC's that we transmit are on circuit0)
823 */
824 pthread_mutex_lock(&iocomA->mtx);
825 dummy.msgid = msg->any.lnk_circ.target;
826 tx_state = RB_FIND(dmsg_state_tree,
827 &iocomA->circuit0.statewr_tree,
828 &dummy);
0d20ec8a 829 pthread_mutex_unlock(&iocomA->mtx);
8d6d37b8 830 if (tx_state == NULL) {
a2179323 831 /* XXX SMP race */
8d6d37b8
MD
832 fprintf(stderr, "dmsg_lnk_circ: no circuit\n");
833 dmsg_msg_reply(msg, DMSG_ERR_CANTCIRC);
834 break;
835 }
a2179323
MD
836 if (tx_state->icmd != DMSG_LNK_SPAN) {
837 /* XXX SMP race */
838 fprintf(stderr, "dmsg_lnk_circ: not LNK_SPAN\n");
839 dmsg_msg_reply(msg, DMSG_ERR_CANTCIRC);
840 break;
841 }
0d20ec8a
MD
842
843 /* locate h2span_link */
844 rx_state = tx_state->any.relay->source_rt;
29ead430 845
0d20ec8a
MD
846 /*
847 * A wishes to establish a VC through us to the
848 * specified target.
849 *
850 * A sends us the msgid of an open SPAN transaction
851 * it received from us as <target>.
852 */
853 circA = dmsg_alloc(sizeof(*circA));
a2179323 854 dmsg_circuit_init(iocomA, circA);
0d20ec8a
MD
855 circA->state = msg->state; /* LNK_CIRC state */
856 circA->msgid = msg->state->msgid;
857 circA->span_state = tx_state; /* H2SPAN_RELAY state */
858 circA->is_relay = 1;
859 circA->refs = 2; /* state and peer */
860 msg->state->any.circ = circA;
861
862 iocomB = rx_state->iocom;
863
864 circB = dmsg_alloc(sizeof(*circB));
a2179323 865 dmsg_circuit_init(iocomB, circB);
0d20ec8a
MD
866
867 /*
868 * Create a LNK_CIRC transaction on B
869 */
870 fwd_msg = dmsg_msg_alloc(&iocomB->circuit0,
871 0, DMSG_LNK_CIRC | DMSGF_CREATE,
872 dmsg_lnk_circ, circB);
873 fwd_msg->state->any.circ = circB;
a2179323 874 fwd_msg->any.lnk_circ.target = rx_state->msgid;
0d20ec8a
MD
875 circB->state = fwd_msg->state; /* LNK_CIRC state */
876 circB->msgid = fwd_msg->any.head.msgid;
877 circB->span_state = rx_state; /* H2SPAN_LINK state */
878 circB->is_relay = 0;
879 circB->refs = 2; /* state and peer */
880
a2179323
MD
881 if (DMsgDebugOpt >= 4)
882 fprintf(stderr, "CIRC forward %p->%p\n", circA, circB);
883
0d20ec8a
MD
884 /*
885 * Link the two circuits together.
886 */
887 circA->peer = circB;
888 circB->peer = circA;
889
a2179323
MD
890 if (iocomA < iocomB) {
891 pthread_mutex_lock(&iocomA->mtx);
892 pthread_mutex_lock(&iocomB->mtx);
893 } else {
894 pthread_mutex_lock(&iocomB->mtx);
895 pthread_mutex_lock(&iocomA->mtx);
896 }
0d20ec8a
MD
897 if (RB_INSERT(dmsg_circuit_tree, &iocomA->circuit_tree, circA))
898 assert(0);
899 if (RB_INSERT(dmsg_circuit_tree, &iocomB->circuit_tree, circB))
900 assert(0);
a2179323
MD
901 if (iocomA < iocomB) {
902 pthread_mutex_unlock(&iocomB->mtx);
903 pthread_mutex_unlock(&iocomA->mtx);
904 } else {
905 pthread_mutex_unlock(&iocomA->mtx);
906 pthread_mutex_unlock(&iocomB->mtx);
907 }
0d20ec8a
MD
908
909 dmsg_msg_write(fwd_msg);
910
911 if ((msg->any.head.cmd & DMSGF_DELETE) == 0)
912 break;
913 /* FALL THROUGH TO DELETE */
914 case DMSGF_DELETE:
915 /*
916 * (A) Is deleting the virtual circuit, propogate closure
917 * to (B).
918 */
919 iocomA = msg->iocom;
8d6d37b8
MD
920 if (msg->state->any.circ == NULL) {
921 /* already returned an error/deleted */
922 break;
923 }
0d20ec8a
MD
924 circA = msg->state->any.circ;
925 circB = circA->peer;
926 assert(msg->state == circA->state);
927
928 /*
f306de83
MD
929 * We are closing B's send side. If B's receive side is
930 * already closed we disconnect the circuit from B's state.
0d20ec8a 931 */
f306de83 932 disconnect = 0;
0d20ec8a
MD
933 if (circB && (state = circB->state) != NULL) {
934 if (state->rxcmd & DMSGF_DELETE) {
935 circB->state = NULL;
936 state->any.circ = NULL;
937 dmsg_circuit_drop(circB);
938 }
939 dmsg_state_reply(state, msg->any.head.error);
f306de83
MD
940 disconnect = 1;
941 }
942
943 /*
944 * We received a close on A. If A's send side is already
945 * closed we disconnect the circuit from A's state.
946 */
947 if (circA && (state = circA->state) != NULL) {
948 if (state->txcmd & DMSGF_DELETE) {
949 circA->state = NULL;
950 state->any.circ = NULL;
951 dmsg_circuit_drop(circA);
952 }
953 disconnect = 1;
7dc0f844 954 }
0d20ec8a
MD
955
956 /*
f306de83 957 * Disconnect the peer<->peer association
0d20ec8a 958 */
f306de83 959 if (disconnect) {
0d20ec8a
MD
960 if (circB) {
961 circA->peer = NULL;
962 circB->peer = NULL;
963 dmsg_circuit_drop(circA);
964 dmsg_circuit_drop(circB); /* XXX SMP */
965 }
0d20ec8a
MD
966 }
967 break;
968 case DMSGF_REPLY | DMSGF_CREATE:
969 case DMSGF_REPLY | DMSGF_CREATE | DMSGF_DELETE:
970 /*
971 * (B) is acknowledging the creation of the virtual
972 * circuit. This propagates all the way back to (A), though
973 * it should be noted that (A) can start issuing commands
974 * via the virtual circuit before seeing this reply.
975 */
976 circB = msg->state->any.circ;
f306de83 977 assert(circB);
0d20ec8a
MD
978 circA = circB->peer;
979 assert(msg->state == circB->state);
f306de83
MD
980 assert(circA);
981 if ((msg->any.head.cmd & DMSGF_DELETE) == 0) {
0d20ec8a
MD
982 dmsg_state_result(circA->state, msg->any.head.error);
983 break;
984 }
985 /* FALL THROUGH TO DELETE */
986 case DMSGF_REPLY | DMSGF_DELETE:
987 /*
988 * (B) Is deleting the virtual circuit or acknowledging
989 * our deletion of the virtual circuit, propogate closure
990 * to (A).
991 */
992 iocomB = msg->iocom;
993 circB = msg->state->any.circ;
994 circA = circB->peer;
995 assert(msg->state == circB->state);
996
997 /*
f306de83
MD
998 * We received a close on (B), propagate to (A). If we have
999 * already received the close from (A) we disconnect the state.
0d20ec8a 1000 */
f306de83 1001 disconnect = 0;
0d20ec8a
MD
1002 if (circA && (state = circA->state) != NULL) {
1003 if (state->rxcmd & DMSGF_DELETE) {
1004 circA->state = NULL;
1005 state->any.circ = NULL;
1006 dmsg_circuit_drop(circA);
1007 }
1008 dmsg_state_reply(state, msg->any.head.error);
f306de83
MD
1009 disconnect = 1;
1010 }
1011
1012 /*
1013 * We received a close on (B). If (B)'s send side is already
1014 * closed we disconnect the state.
1015 */
1016 if (circB && (state = circB->state) != NULL) {
1017 if (state->txcmd & DMSGF_DELETE) {
1018 circB->state = NULL;
1019 state->any.circ = NULL;
1020 dmsg_circuit_drop(circB);
1021 }
1022 disconnect = 1;
0d20ec8a
MD
1023 }
1024
1025 /*
f306de83 1026 * Disconnect the peer<->peer association
0d20ec8a 1027 */
f306de83 1028 if (disconnect) {
0d20ec8a
MD
1029 if (circA) {
1030 circB->peer = NULL;
1031 circA->peer = NULL;
1032 dmsg_circuit_drop(circB);
1033 dmsg_circuit_drop(circA); /* XXX SMP */
1034 }
0d20ec8a
MD
1035 }
1036 break;
7dc0f844 1037 }
0d20ec8a
MD
1038
1039 /*pthread_mutex_lock(&cluster_mtx);*/
7dc0f844
MD
1040}
1041
1042/*
1043 * Update relay transactions for SPANs.
1044 *
1045 * Called with cluster_mtx held.
1046 */
0c3a8cd0 1047static void dmsg_relay_scan_specific(h2span_node_t *node,
2063f4d7 1048 h2span_conn_t *conn);
7dc0f844
MD
1049
1050static void
0c3a8cd0 1051dmsg_relay_scan(h2span_conn_t *conn, h2span_node_t *node)
7dc0f844
MD
1052{
1053 h2span_cluster_t *cls;
7dc0f844
MD
1054
1055 if (node) {
1056 /*
1057 * Iterate specific node
1058 */
1059 TAILQ_FOREACH(conn, &connq, entry)
0c3a8cd0 1060 dmsg_relay_scan_specific(node, conn);
7dc0f844
MD
1061 } else {
1062 /*
02454b3e 1063 * Full iteration.
7dc0f844 1064 *
02454b3e
MD
1065 * Iterate cluster ids, nodes, and either a specific connection
1066 * or all connections.
7dc0f844 1067 */
7dc0f844
MD
1068 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
1069 /*
1070 * Iterate node ids
1071 */
1072 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
1073 /*
1074 * Synchronize the node's link (received SPANs)
1075 * with each connection's relays.
1076 */
02454b3e 1077 if (conn) {
0c3a8cd0 1078 dmsg_relay_scan_specific(node, conn);
02454b3e
MD
1079 } else {
1080 TAILQ_FOREACH(conn, &connq, entry) {
0c3a8cd0 1081 dmsg_relay_scan_specific(node,
02454b3e
MD
1082 conn);
1083 }
1084 assert(conn == NULL);
1085 }
7dc0f844
MD
1086 }
1087 }
1088 }
1089}
1090
1091/*
1092 * Update the relay'd SPANs for this (node, conn).
1093 *
1094 * Iterate links and adjust relays to match. We only propagate the top link
1095 * for now (XXX we want to propagate the top two).
1096 *
0c3a8cd0 1097 * The dmsg_relay_scan_cmp() function locates the first relay element
7dc0f844 1098 * for any given node. The relay elements will be sub-sorted by dist.
8c280d5d 1099 */
7dc0f844
MD
1100struct relay_scan_info {
1101 h2span_node_t *node;
1102 h2span_relay_t *relay;
1103};
1104
1105static int
0c3a8cd0 1106dmsg_relay_scan_cmp(h2span_relay_t *relay, void *arg)
7dc0f844
MD
1107{
1108 struct relay_scan_info *info = arg;
1109
0d20ec8a 1110 if ((intptr_t)relay->source_rt->any.link->node < (intptr_t)info->node)
7dc0f844 1111 return(-1);
0d20ec8a 1112 if ((intptr_t)relay->source_rt->any.link->node > (intptr_t)info->node)
7dc0f844
MD
1113 return(1);
1114 return(0);
1115}
1116
1117static int
0c3a8cd0 1118dmsg_relay_scan_callback(h2span_relay_t *relay, void *arg)
7dc0f844
MD
1119{
1120 struct relay_scan_info *info = arg;
1121
1122 info->relay = relay;
1123 return(-1);
1124}
1125
8c280d5d 1126static void
0c3a8cd0 1127dmsg_relay_scan_specific(h2span_node_t *node, h2span_conn_t *conn)
8c280d5d 1128{
7dc0f844
MD
1129 struct relay_scan_info info;
1130 h2span_relay_t *relay;
1131 h2span_relay_t *next_relay;
1132 h2span_link_t *slink;
5bc5bca2 1133 dmsg_lnk_conn_t *lconn;
ddfbb283 1134 dmsg_lnk_span_t *lspan;
0d20ec8a
MD
1135 int count;
1136 int maxcount = 2;
1137 uint32_t lastdist = DMSG_SPAN_MAXDIST;
1138 uint32_t lastrnss = 0;
7dc0f844
MD
1139
1140 info.node = node;
1141 info.relay = NULL;
1142
1143 /*
29ead430
MD
1144 * Locate the first related relay for the node on this connection.
1145 * relay will be NULL if there were none.
7dc0f844
MD
1146 */
1147 RB_SCAN(h2span_relay_tree, &conn->tree,
0c3a8cd0 1148 dmsg_relay_scan_cmp, dmsg_relay_scan_callback, &info);
7dc0f844 1149 relay = info.relay;
cf715800
MD
1150 info.relay = NULL;
1151 if (relay)
0d20ec8a 1152 assert(relay->source_rt->any.link->node == node);
7dc0f844 1153
0c3a8cd0 1154 if (DMsgDebugOpt > 8)
81666e1b 1155 fprintf(stderr, "relay scan for connection %p\n", conn);
7dc0f844
MD
1156
1157 /*
1158 * Iterate the node's links (received SPANs) in distance order,
1159 * lowest (best) dist first.
2063f4d7
MD
1160 *
1161 * PROPAGATE THE BEST LINKS OVER THE SPECIFIED CONNECTION.
1162 *
1163 * Track relays while iterating the best links and construct
1164 * missing relays when necessary.
1165 *
1166 * (If some prior better link was removed it would have also
1167 * removed the relay, so the relay can only match exactly or
1168 * be worse).
7dc0f844 1169 */
0d20ec8a 1170 count = 0;
7dc0f844 1171 RB_FOREACH(slink, h2span_link_tree, &node->tree) {
0d20ec8a
MD
1172 /*
1173 * Increment count of successful relays. This isn't
1174 * quite accurate if we break out but nothing after
1175 * the loop uses (count).
1176 *
1177 * If count exceeds the maximum number of relays we desire
1178 * we normally want to break out. However, in order to
1179 * guarantee a symmetric path we have to continue if both
1180 * (dist) and (rnss) continue to match. Otherwise the SPAN
1181 * propagation in the reverse direction may choose different
1182 * routes and we will not have a symmetric path.
1183 *
1184 * NOTE: Spanning tree does not have to be symmetrical so
1185 * this code is not currently enabled.
1186 */
1187 if (++count >= maxcount) {
1188#ifdef REQUIRE_SYMMETRICAL
1189 if (lastdist != slink->dist || lastrnss != slink->rnss)
1190 break;
1191#else
1192 break;
1193#endif
1194 /* go beyond the nominal maximum desired relays */
1195 }
1196
29ead430 1197 /*
2063f4d7
MD
1198 * Match, relay already in-place, get the next
1199 * relay to match against the next slink.
7dc0f844 1200 */
0d20ec8a 1201 if (relay && relay->source_rt->any.link == slink) {
cf715800 1202 relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
2063f4d7
MD
1203 continue;
1204 }
1205
1206 /*
1207 * We might want this SLINK, if it passes our filters.
1208 *
1209 * The spanning tree can cause closed loops so we have
1210 * to limit slink->dist.
1211 */
0c3a8cd0 1212 if (slink->dist > DMSG_SPAN_MAXDIST)
29ead430 1213 break;
2063f4d7
MD
1214
1215 /*
1216 * Don't bother transmitting a LNK_SPAN out the same
1217 * connection it came in on. Trivial optimization.
1218 */
1219 if (slink->state->iocom == conn->state->iocom)
c1963fb2 1220 break;
7dc0f844 1221
2063f4d7
MD
1222 /*
1223 * NOTE ON FILTERS: The protocol spec allows non-requested
1224 * SPANs to be transmitted, the other end is expected to
1225 * leave their transactions open but otherwise ignore them.
1226 *
1227 * Don't bother transmitting if the remote connection
1228 * is not accepting this SPAN's peer_type.
0d20ec8a
MD
1229 *
1230 * pfs_mask is typically used so pure clients can filter
1231 * out receiving SPANs for other pure clients.
2063f4d7 1232 */
ddfbb283 1233 lspan = &slink->state->msg->any.lnk_span;
2063f4d7 1234 lconn = &conn->state->msg->any.lnk_conn;
ddfbb283 1235 if (((1LLU << lspan->peer_type) & lconn->peer_mask) == 0)
2063f4d7 1236 break;
0d20ec8a
MD
1237 if (((1LLU << lspan->pfs_type) & lconn->pfs_mask) == 0)
1238 break;
2063f4d7
MD
1239
1240 /*
ddfbb283 1241 * Do not give pure clients visibility to other pure clients
2063f4d7 1242 */
ddfbb283
MD
1243 if (lconn->pfs_type == DMSG_PFSTYPE_CLIENT &&
1244 lspan->pfs_type == DMSG_PFSTYPE_CLIENT) {
1245 break;
1246 }
1247
1248 /*
1249 * Connection filter, if cluster uuid is not NULL it must
1250 * match the span cluster uuid. Only applies when the
1251 * peer_type matches.
1252 */
1253 if (lspan->peer_type == lconn->peer_type &&
1254 !uuid_is_nil(&lconn->pfs_clid, NULL) &&
1255 uuid_compare(&slink->node->cls->pfs_clid,
1256 &lconn->pfs_clid, NULL)) {
1257 break;
7dc0f844 1258 }
2063f4d7 1259
ddfbb283
MD
1260 /*
1261 * Connection filter, if cluster label is not empty it must
1262 * match the span cluster label. Only applies when the
1263 * peer_type matches.
1264 */
1265 if (lspan->peer_type == lconn->peer_type &&
1266 lconn->cl_label[0] &&
1267 strcmp(lconn->cl_label, slink->node->cls->cl_label)) {
1268 break;
1269 }
1270
1271 /*
0d20ec8a 1272 * NOTE! pfs_fsid differentiates nodes within the same cluster
ddfbb283
MD
1273 * so we obviously don't want to match those. Similarly
1274 * for fs_label.
1275 */
1276
2063f4d7
MD
1277 /*
1278 * Ok, we've accepted this SPAN for relaying.
1279 */
1280 assert(relay == NULL ||
0d20ec8a
MD
1281 relay->source_rt->any.link->node != slink->node ||
1282 relay->source_rt->any.link->dist >= slink->dist);
1283 relay = dmsg_generate_relay(conn, slink);
1284 lastdist = slink->dist;
1285 lastrnss = slink->rnss;
2063f4d7
MD
1286
1287 /*
1288 * Match (created new relay), get the next relay to
1289 * match against the next slink.
1290 */
1291 relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
7dc0f844
MD
1292 }
1293
1294 /*
1295 * Any remaining relay's belonging to this connection which match
1296 * the node are in excess of the current aggregate spanning state
1297 * and should be removed.
1298 */
0d20ec8a 1299 while (relay && relay->source_rt->any.link->node == node) {
7dc0f844 1300 next_relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
a2179323 1301 fprintf(stderr, "RELAY DELETE FROM EXTRAS\n");
0c3a8cd0 1302 dmsg_relay_delete(relay);
7dc0f844
MD
1303 relay = next_relay;
1304 }
1305}
1306
0d20ec8a
MD
1307/*
1308 * Helper function to generate missing relay.
1309 *
1310 * cluster_mtx must be held
1311 */
1312static
1313h2span_relay_t *
1314dmsg_generate_relay(h2span_conn_t *conn, h2span_link_t *slink)
1315{
1316 h2span_relay_t *relay;
1317 h2span_node_t *node;
1318 dmsg_msg_t *msg;
1319
1320 node = slink->node;
1321
1322 relay = dmsg_alloc(sizeof(*relay));
1323 relay->conn = conn;
1324 relay->source_rt = slink->state;
1325 /* relay->source_rt->any.link = slink; */
1326
1327 /*
1328 * NOTE: relay->target_rt->any.relay set to relay by alloc.
1329 */
1330 msg = dmsg_msg_alloc(&conn->state->iocom->circuit0,
1331 0, DMSG_LNK_SPAN | DMSGF_CREATE,
1332 dmsg_lnk_relay, relay);
1333 relay->target_rt = msg->state;
1334
1335 msg->any.lnk_span = slink->state->msg->any.lnk_span;
1336 msg->any.lnk_span.dist = slink->dist + 1;
1337 msg->any.lnk_span.rnss = slink->rnss + dmsg_rnss();
1338
1339 RB_INSERT(h2span_relay_tree, &conn->tree, relay);
1340 TAILQ_INSERT_TAIL(&slink->relayq, relay, entry);
1341
1342 dmsg_msg_write(msg);
1343
1344 return (relay);
1345}
1346
1347/*
1348 * Messages received on relay SPANs. These are open transactions so it is
1349 * in fact possible for the other end to close the transaction.
1350 *
1351 * XXX MPRACE on state structure
1352 */
1353static void
1354dmsg_lnk_relay(dmsg_msg_t *msg)
1355{
1356 dmsg_state_t *state = msg->state;
1357 h2span_relay_t *relay;
1358
1359 assert(msg->any.head.cmd & DMSGF_REPLY);
1360
1361 if (msg->any.head.cmd & DMSGF_DELETE) {
1362 pthread_mutex_lock(&cluster_mtx);
a2179323 1363 fprintf(stderr, "RELAY DELETE FROM LNK_RELAY MSG\n");
0d20ec8a
MD
1364 if ((relay = state->any.relay) != NULL) {
1365 dmsg_relay_delete(relay);
1366 } else {
1367 dmsg_state_reply(state, 0);
1368 }
1369 pthread_mutex_unlock(&cluster_mtx);
1370 }
1371}
1372
a2179323
MD
1373/*
1374 * cluster_mtx held by caller
1375 */
7dc0f844
MD
1376static
1377void
0c3a8cd0 1378dmsg_relay_delete(h2span_relay_t *relay)
7dc0f844 1379{
81666e1b 1380 fprintf(stderr,
29ead430 1381 "RELAY DELETE %p RELAY %p ON CLS=%p NODE=%p DIST=%d FD %d STATE %p\n",
0d20ec8a 1382 relay->source_rt->any.link,
29ead430 1383 relay,
0d20ec8a
MD
1384 relay->source_rt->any.link->node->cls, relay->source_rt->any.link->node,
1385 relay->source_rt->any.link->dist,
1386 relay->conn->state->iocom->sock_fd, relay->target_rt);
90e8cd1d 1387
7dc0f844 1388 RB_REMOVE(h2span_relay_tree, &relay->conn->tree, relay);
0d20ec8a 1389 TAILQ_REMOVE(&relay->source_rt->any.link->relayq, relay, entry);
7dc0f844 1390
0d20ec8a
MD
1391 if (relay->target_rt) {
1392 relay->target_rt->any.relay = NULL;
1393 dmsg_state_reply(relay->target_rt, 0);
7dc0f844 1394 /* state invalid after reply */
0d20ec8a 1395 relay->target_rt = NULL;
7dc0f844
MD
1396 }
1397 relay->conn = NULL;
0d20ec8a 1398 relay->source_rt = NULL;
0c3a8cd0 1399 dmsg_free(relay);
8c280d5d 1400}
81666e1b 1401
1a34728c 1402static void *
0c3a8cd0 1403dmsg_volconf_thread(void *info)
1a34728c
MD
1404{
1405 h2span_media_config_t *conf = info;
1406
1407 pthread_mutex_lock(&cluster_mtx);
1408 while ((conf->ctl & H2CONFCTL_STOP) == 0) {
1409 if (conf->ctl & H2CONFCTL_UPDATE) {
1410 fprintf(stderr, "VOLCONF UPDATE\n");
1411 conf->ctl &= ~H2CONFCTL_UPDATE;
1412 if (bcmp(&conf->copy_run, &conf->copy_pend,
1413 sizeof(conf->copy_run)) == 0) {
1414 fprintf(stderr, "VOLCONF: no changes\n");
1415 continue;
1416 }
1417 /*
1418 * XXX TODO - auto reconnect on lookup failure or
1419 * connect failure or stream failure.
1420 */
1421
1422 pthread_mutex_unlock(&cluster_mtx);
0c3a8cd0 1423 dmsg_volconf_stop(conf);
1a34728c
MD
1424 conf->copy_run = conf->copy_pend;
1425 if (conf->copy_run.copyid != 0 &&
1426 strncmp(conf->copy_run.path, "span:", 5) == 0) {
0c3a8cd0 1427 dmsg_volconf_start(conf,
1a34728c
MD
1428 conf->copy_run.path + 5);
1429 }
1430 pthread_mutex_lock(&cluster_mtx);
1431 fprintf(stderr, "VOLCONF UPDATE DONE state %d\n", conf->state);
1432 }
1433 if (conf->state == H2MC_CONNECT) {
0c3a8cd0 1434 dmsg_volconf_start(conf, conf->copy_run.path + 5);
1a34728c
MD
1435 pthread_mutex_unlock(&cluster_mtx);
1436 sleep(5);
1437 pthread_mutex_lock(&cluster_mtx);
1438 } else {
1439 pthread_cond_wait(&conf->cond, &cluster_mtx);
1440 }
1441 }
1442 pthread_mutex_unlock(&cluster_mtx);
0c3a8cd0 1443 dmsg_volconf_stop(conf);
1a34728c
MD
1444 return(NULL);
1445}
1446
1447static
1448void
0c3a8cd0 1449dmsg_volconf_stop(h2span_media_config_t *conf)
1a34728c
MD
1450{
1451 switch(conf->state) {
1452 case H2MC_STOPPED:
1453 break;
1454 case H2MC_CONNECT:
1455 conf->state = H2MC_STOPPED;
1456 break;
1457 case H2MC_RUNNING:
1458 shutdown(conf->fd, SHUT_WR);
1459 pthread_join(conf->iocom_thread, NULL);
1460 conf->iocom_thread = NULL;
1461 break;
1462 }
1463}
1464
1465static
1466void
0c3a8cd0 1467dmsg_volconf_start(h2span_media_config_t *conf, const char *hostname)
1a34728c 1468{
0c3a8cd0 1469 dmsg_master_service_info_t *info;
e1648a68 1470
1a34728c
MD
1471 switch(conf->state) {
1472 case H2MC_STOPPED:
1473 case H2MC_CONNECT:
0c3a8cd0 1474 conf->fd = dmsg_connect(hostname);
1a34728c
MD
1475 if (conf->fd < 0) {
1476 fprintf(stderr, "Unable to connect to %s\n", hostname);
1477 conf->state = H2MC_CONNECT;
1478 } else {
e1648a68
MD
1479 info = malloc(sizeof(*info));
1480 bzero(info, sizeof(*info));
1481 info->fd = conf->fd;
1482 info->detachme = 0;
1a34728c 1483 conf->state = H2MC_RUNNING;
e1648a68 1484 pthread_create(&conf->iocom_thread, NULL,
0c3a8cd0 1485 dmsg_master_service, info);
1a34728c
MD
1486 }
1487 break;
1488 case H2MC_RUNNING:
1489 break;
1490 }
1491}
1492
0d20ec8a
MD
1493/************************************************************************
1494 * MESSAGE ROUTING AND SOURCE VALIDATION *
1495 ************************************************************************/
1496
1497int
1498dmsg_circuit_relay(dmsg_msg_t *msg)
1499{
1500 dmsg_iocom_t *iocom = msg->iocom;
1501 dmsg_circuit_t *circ;
1502 dmsg_circuit_t *peer;
1503 dmsg_circuit_t dummy;
1504 int error = 0;
1505
1506 /*
1507 * Relay occurs before any state processing, msg state should always
1508 * be NULL.
1509 */
1510 assert(msg->state == NULL);
1511
1512 /*
1513 * Lookup the circuit on the incoming iocom.
1514 */
a2179323 1515 pthread_mutex_lock(&iocom->mtx);
0d20ec8a
MD
1516
1517 dummy.msgid = msg->any.head.circuit;
1518 circ = RB_FIND(dmsg_circuit_tree, &iocom->circuit_tree, &dummy);
1519 assert(circ);
1520 peer = circ->peer;
a2179323
MD
1521 dmsg_circuit_hold(peer);
1522
1523 if (DMsgDebugOpt >= 4) {
1524 fprintf(stderr,
1525 "CIRC relay %08x %p->%p\n",
1526 msg->any.head.cmd, circ, peer);
1527 }
0d20ec8a
MD
1528
1529 msg->iocom = peer->iocom;
1530 msg->any.head.circuit = peer->msgid;
a2179323
MD
1531 dmsg_circuit_drop_locked(msg->circuit);
1532 msg->circuit = peer;
0d20ec8a 1533
a2179323 1534 pthread_mutex_unlock(&iocom->mtx);
0d20ec8a 1535
0d20ec8a
MD
1536 dmsg_msg_write(msg);
1537 error = DMSG_IOQ_ERROR_ROUTED;
1538
1539 return error;
1540}
1541
29ead430 1542/************************************************************************
90e8cd1d 1543 * ROUTER AND MESSAGING HANDLES *
29ead430
MD
1544 ************************************************************************
1545 *
90e8cd1d
MD
1546 * Basically the idea here is to provide a stable data structure which
1547 * can be localized to the caller for higher level protocols to work with.
0c3a8cd0 1548 * Depends on the context, these dmsg_handle's can be pooled by use-case
90e8cd1d
MD
1549 * and remain persistent through a client (or mount point's) life.
1550 */
1551
1552#if 0
1553/*
1554 * Obtain a stable handle on a cluster given its uuid. This ties directly
1555 * into the global cluster topology, creating the structure if necessary
1556 * (even if the uuid does not exist or does not exist yet), and preventing
1557 * the structure from getting ripped out from under us while we hold a
1558 * pointer to it.
1559 */
1560h2span_cluster_t *
0c3a8cd0 1561dmsg_cluster_get(uuid_t *pfs_clid)
90e8cd1d
MD
1562{
1563 h2span_cluster_t dummy_cls;
1564 h2span_cluster_t *cls;
1565
1566 dummy_cls.pfs_clid = *pfs_clid;
1567 pthread_mutex_lock(&cluster_mtx);
1568 cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls);
1569 if (cls)
1570 ++cls->refs;
1571 pthread_mutex_unlock(&cluster_mtx);
1572 return (cls);
1573}
1574
1575void
0c3a8cd0 1576dmsg_cluster_put(h2span_cluster_t *cls)
90e8cd1d
MD
1577{
1578 pthread_mutex_lock(&cluster_mtx);
1579 assert(cls->refs > 0);
1580 --cls->refs;
1581 if (RB_EMPTY(&cls->tree) && cls->refs == 0) {
1582 RB_REMOVE(h2span_cluster_tree,
1583 &cluster_tree, cls);
0c3a8cd0 1584 dmsg_free(cls);
90e8cd1d
MD
1585 }
1586 pthread_mutex_unlock(&cluster_mtx);
1587}
1588
1589/*
1590 * Obtain a stable handle to a specific cluster node given its uuid.
1591 * This handle does NOT lock in the route to the node and is typically
0c3a8cd0 1592 * used as part of the dmsg_handle_*() API to obtain a set of
90e8cd1d 1593 * stable nodes.
29ead430 1594 */
90e8cd1d 1595h2span_node_t *
0c3a8cd0 1596dmsg_node_get(h2span_cluster_t *cls, uuid_t *pfs_fsid)
90e8cd1d
MD
1597{
1598}
1599
1600#endif
29ead430 1601
81666e1b
MD
1602/*
1603 * Dumps the spanning tree
a2179323
MD
1604 *
1605 * DEBUG ONLY
81666e1b
MD
1606 */
1607void
0d20ec8a 1608dmsg_shell_tree(dmsg_circuit_t *circuit, char *cmdbuf __unused)
81666e1b
MD
1609{
1610 h2span_cluster_t *cls;
1611 h2span_node_t *node;
1612 h2span_link_t *slink;
a2179323 1613 h2span_relay_t *relay;
81666e1b
MD
1614 char *uustr = NULL;
1615
1616 pthread_mutex_lock(&cluster_mtx);
1617 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
0d20ec8a
MD
1618 dmsg_circuit_printf(circuit, "Cluster %s %s (%s)\n",
1619 dmsg_peer_type_to_str(cls->peer_type),
1620 dmsg_uuid_to_str(&cls->pfs_clid, &uustr),
1621 cls->cl_label);
81666e1b 1622 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
0d20ec8a
MD
1623 dmsg_circuit_printf(circuit, " Node %s %s (%s)\n",
1624 dmsg_pfs_type_to_str(node->pfs_type),
0c3a8cd0 1625 dmsg_uuid_to_str(&node->pfs_fsid, &uustr),
ddfbb283 1626 node->fs_label);
81666e1b 1627 RB_FOREACH(slink, h2span_link_tree, &node->tree) {
0d20ec8a 1628 dmsg_circuit_printf(circuit,
a2179323
MD
1629 "\tSLink msgid %016jx "
1630 "dist=%d via %d\n",
1631 (intmax_t)slink->state->msgid,
0c3a8cd0
MD
1632 slink->dist,
1633 slink->state->iocom->sock_fd);
a2179323
MD
1634 TAILQ_FOREACH(relay, &slink->relayq, entry) {
1635 dmsg_circuit_printf(circuit,
1636 "\t Relay-out msgid %016jx "
1637 "via %d\n",
1638 (intmax_t)relay->target_rt->msgid,
1639 relay->target_rt->iocom->sock_fd);
1640 }
81666e1b
MD
1641 }
1642 }
1643 }
1644 pthread_mutex_unlock(&cluster_mtx);
1645 if (uustr)
1646 free(uustr);
1647#if 0
1648 TAILQ_FOREACH(conn, &connq, entry) {
1649 }
1650#endif
1651}
0d20ec8a 1652
a2179323
MD
1653/*
1654 * DEBUG ONLY
1655 *
1656 * Locate the state representing an incoming LNK_SPAN given its msgid.
1657 */
1658int
1659dmsg_debug_findspan(uint64_t msgid, dmsg_state_t **statep)
1660{
1661 h2span_cluster_t *cls;
1662 h2span_node_t *node;
1663 h2span_link_t *slink;
1664 h2span_relay_t *relay;
1665
1666 pthread_mutex_lock(&cluster_mtx);
1667 relay = NULL;
1668 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
1669 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
1670 RB_FOREACH(slink, h2span_link_tree, &node->tree) {
1671 if (slink->state->msgid == msgid) {
1672 *statep = slink->state;
1673 goto found;
1674 }
1675 }
1676 }
1677 }
1678 pthread_mutex_unlock(&cluster_mtx);
1679 *statep = NULL;
1680 return(ENOENT);
1681found:
1682 pthread_mutex_unlock(&cluster_mtx);
1683 return(0);
1684}
1685
0d20ec8a
MD
1686/*
1687 * Random number sub-sort value to add to SPAN rnss fields on relay.
1688 * This allows us to differentiate spans with the same <dist> field
1689 * for relaying purposes. We must normally limit the number of relays
1690 * for any given SPAN origination but we must also guarantee that a
1691 * symmetric reverse path exists, so we use the rnss field as a sub-sort
1692 * (since there can be thousands or millions if we only match on <dist>),
1693 * and if there STILL too many spans we go past the limit.
1694 */
1695static
1696uint32_t
1697dmsg_rnss(void)
1698{
1699 if (DMsgRNSS == 0) {
1700 pthread_mutex_lock(&cluster_mtx);
1701 while (DMsgRNSS == 0) {
1702 srandomdev();
1703 DMsgRNSS = random();
1704 }
1705 pthread_mutex_unlock(&cluster_mtx);
1706 }
1707 return(DMsgRNSS);
1708}