jme: Improve tiny packets transmission performance on low frequency CPU
[dragonfly.git] / lib / libdmsg / msg_lnk.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2012 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34/*
35 * LNK_SPAN PROTOCOL SUPPORT FUNCTIONS - Please see sys/dmsg.h for an
36 * involved explanation of the protocol.
37 */
38
39#include "dmsg_local.h"
40
41void (*dmsg_node_handler)(void **opaquep, struct dmsg_msg *msg, int op);
42
43/*
44 * Maximum spanning tree distance. This has the practical effect of
45 * stopping tail-chasing closed loops when a feeder span is lost.
46 */
47#define DMSG_SPAN_MAXDIST 16
48
49/*
50 * RED-BLACK TREE DEFINITIONS
51 *
52 * We need to track:
53 *
54 * (1) shared fsid's (a cluster).
55 * (2) unique fsid's (a node in a cluster) <--- LNK_SPAN transactions.
56 *
57 * We need to aggegate all active LNK_SPANs, aggregate, and create our own
58 * outgoing LNK_SPAN transactions on each of our connections representing
59 * the aggregated state.
60 *
61 * h2span_conn - list of iocom connections who wish to receive SPAN
62 * propagation from other connections. Might contain
63 * a filter string. Only iocom's with an open
64 * LNK_CONN transactions are applicable for SPAN
65 * propagation.
66 *
67 * h2span_relay - List of links relayed (via SPAN). Essentially
68 * each relay structure represents a LNK_SPAN
69 * transaction that we initiated, verses h2span_link
70 * which is a LNK_SPAN transaction that we received.
71 *
72 * --
73 *
74 * h2span_cluster - Organizes the shared fsid's. One structure for
75 * each cluster.
76 *
77 * h2span_node - Organizes the nodes in a cluster. One structure
78 * for each unique {cluster,node}, aka {fsid, pfs_fsid}.
79 *
80 * h2span_link - Organizes all incoming and outgoing LNK_SPAN message
81 * transactions related to a node.
82 *
83 * One h2span_link structure for each incoming LNK_SPAN
84 * transaction. Links selected for propagation back
85 * out are also where the outgoing LNK_SPAN messages
86 * are indexed into (so we can propagate changes).
87 *
88 * The h2span_link's use a red-black tree to sort the
89 * distance hop metric for the incoming LNK_SPAN. We
90 * then select the top N for outgoing. When the
91 * topology changes the top N may also change and cause
92 * new outgoing LNK_SPAN transactions to be opened
93 * and less desireable ones to be closed, causing
94 * transactional aborts within the message flow in
95 * the process.
96 *
97 * Also note - All outgoing LNK_SPAN message transactions are also
98 * entered into a red-black tree for use by the routing
99 * function. This is handled by msg.c in the state
100 * code, not here.
101 */
102
103struct h2span_link;
104struct h2span_relay;
105TAILQ_HEAD(h2span_media_queue, h2span_media);
106TAILQ_HEAD(h2span_conn_queue, h2span_conn);
107TAILQ_HEAD(h2span_relay_queue, h2span_relay);
108
109RB_HEAD(h2span_cluster_tree, h2span_cluster);
110RB_HEAD(h2span_node_tree, h2span_node);
111RB_HEAD(h2span_link_tree, h2span_link);
112RB_HEAD(h2span_relay_tree, h2span_relay);
113uint32_t DMsgRNSS;
114
115/*
116 * This represents a media
117 */
118struct h2span_media {
119 TAILQ_ENTRY(h2span_media) entry;
120 uuid_t mediaid;
121 int refs;
122 struct h2span_media_config {
123 dmsg_vol_data_t copy_run;
124 dmsg_vol_data_t copy_pend;
125 pthread_t thread;
126 pthread_cond_t cond;
127 int ctl;
128 int fd;
129 dmsg_iocom_t iocom;
130 pthread_t iocom_thread;
131 enum { H2MC_STOPPED, H2MC_CONNECT, H2MC_RUNNING } state;
132 } config[DMSG_COPYID_COUNT];
133};
134
135typedef struct h2span_media_config h2span_media_config_t;
136
137#define H2CONFCTL_STOP 0x00000001
138#define H2CONFCTL_UPDATE 0x00000002
139
140/*
141 * Received LNK_CONN transaction enables SPAN protocol over connection.
142 * (may contain filter). Typically one for each mount and several may
143 * share the same media.
144 */
145struct h2span_conn {
146 TAILQ_ENTRY(h2span_conn) entry;
147 struct h2span_relay_tree tree;
148 struct h2span_media *media;
149 dmsg_state_t *state;
150};
151
152/*
153 * All received LNK_SPANs are organized by cluster (pfs_clid),
154 * node (pfs_fsid), and link (received LNK_SPAN transaction).
155 */
156struct h2span_cluster {
157 RB_ENTRY(h2span_cluster) rbnode;
158 struct h2span_node_tree tree;
159 uuid_t pfs_clid; /* shared fsid */
160 uint8_t peer_type;
161 char cl_label[128]; /* cluster label (typ PEER_BLOCK) */
162 int refs; /* prevents destruction */
163};
164
165struct h2span_node {
166 RB_ENTRY(h2span_node) rbnode;
167 struct h2span_link_tree tree;
168 struct h2span_cluster *cls;
169 uint8_t pfs_type;
170 uuid_t pfs_fsid; /* unique fsid */
171 char fs_label[128]; /* fs label (typ PEER_HAMMER2) */
172 void *opaque;
173};
174
175struct h2span_link {
176 RB_ENTRY(h2span_link) rbnode;
177 dmsg_state_t *state; /* state<->link */
178 struct h2span_node *node; /* related node */
179 uint32_t dist;
180 uint32_t rnss;
181 struct h2span_relay_queue relayq; /* relay out */
182};
183
184/*
185 * Any LNK_SPAN transactions we receive which are relayed out other
186 * connections utilize this structure to track the LNK_SPAN transactions
187 * we initiate (relay out) on other connections. We only relay out
188 * LNK_SPANs on connections we have an open CONN transaction for.
189 *
190 * The relay structure points to the outgoing LNK_SPAN trans (out_state)
191 * and to the incoming LNK_SPAN transaction (in_state). The relay
192 * structure holds refs on the related states.
193 *
194 * In many respects this is the core of the protocol... actually figuring
195 * out what LNK_SPANs to relay. The spanid used for relaying is the
196 * address of the 'state' structure, which is why h2span_relay has to
197 * be entered into a RB-TREE based at h2span_conn (so we can look
198 * up the spanid to validate it).
199 */
200struct h2span_relay {
201 TAILQ_ENTRY(h2span_relay) entry; /* from link */
202 RB_ENTRY(h2span_relay) rbnode; /* from h2span_conn */
203 struct h2span_conn *conn; /* related CONN transaction */
204 dmsg_state_t *source_rt; /* h2span_link state */
205 dmsg_state_t *target_rt; /* h2span_relay state */
206};
207
208typedef struct h2span_media h2span_media_t;
209typedef struct h2span_conn h2span_conn_t;
210typedef struct h2span_cluster h2span_cluster_t;
211typedef struct h2span_node h2span_node_t;
212typedef struct h2span_link h2span_link_t;
213typedef struct h2span_relay h2span_relay_t;
214
215#define dmsg_termstr(array) _dmsg_termstr((array), sizeof(array))
216
217static h2span_relay_t *dmsg_generate_relay(h2span_conn_t *conn,
218 h2span_link_t *slink);
219static uint32_t dmsg_rnss(void);
220
221static __inline
222void
223_dmsg_termstr(char *base, size_t size)
224{
225 base[size-1] = 0;
226}
227
228/*
229 * Cluster peer_type, uuid, AND label must match for a match
230 */
231static
232int
233h2span_cluster_cmp(h2span_cluster_t *cls1, h2span_cluster_t *cls2)
234{
235 int r;
236
237 if (cls1->peer_type < cls2->peer_type)
238 return(-1);
239 if (cls1->peer_type > cls2->peer_type)
240 return(1);
241 r = uuid_compare(&cls1->pfs_clid, &cls2->pfs_clid, NULL);
242 if (r == 0)
243 r = strcmp(cls1->cl_label, cls2->cl_label);
244
245 return r;
246}
247
248/*
249 * Match against fs_label/pfs_fsid. Together these two items represent a
250 * unique node. In most cases the primary differentiator is pfs_fsid but
251 * we also string-match fs_label.
252 */
253static
254int
255h2span_node_cmp(h2span_node_t *node1, h2span_node_t *node2)
256{
257 int r;
258
259 r = strcmp(node1->fs_label, node2->fs_label);
260 if (r == 0)
261 r = uuid_compare(&node1->pfs_fsid, &node2->pfs_fsid, NULL);
262 return (r);
263}
264
265/*
266 * Sort/subsort must match h2span_relay_cmp() under any given node
267 * to make the aggregation algorithm easier, so the best links are
268 * in the same sorted order as the best relays.
269 *
270 * NOTE: We cannot use link*->state->msgid because this msgid is created
271 * by each remote host and thus might wind up being the same.
272 */
273static
274int
275h2span_link_cmp(h2span_link_t *link1, h2span_link_t *link2)
276{
277 if (link1->dist < link2->dist)
278 return(-1);
279 if (link1->dist > link2->dist)
280 return(1);
281 if (link1->rnss < link2->rnss)
282 return(-1);
283 if (link1->rnss > link2->rnss)
284 return(1);
285#if 1
286 if ((uintptr_t)link1->state < (uintptr_t)link2->state)
287 return(-1);
288 if ((uintptr_t)link1->state > (uintptr_t)link2->state)
289 return(1);
290#else
291 if (link1->state->msgid < link2->state->msgid)
292 return(-1);
293 if (link1->state->msgid > link2->state->msgid)
294 return(1);
295#endif
296 return(0);
297}
298
299/*
300 * Relay entries are sorted by node, subsorted by distance and link
301 * address (so we can match up the conn->tree relay topology with
302 * a node's link topology).
303 */
304static
305int
306h2span_relay_cmp(h2span_relay_t *relay1, h2span_relay_t *relay2)
307{
308 h2span_link_t *link1 = relay1->source_rt->any.link;
309 h2span_link_t *link2 = relay2->source_rt->any.link;
310
311 if ((intptr_t)link1->node < (intptr_t)link2->node)
312 return(-1);
313 if ((intptr_t)link1->node > (intptr_t)link2->node)
314 return(1);
315 if (link1->dist < link2->dist)
316 return(-1);
317 if (link1->dist > link2->dist)
318 return(1);
319 if (link1->rnss < link2->rnss)
320 return(-1);
321 if (link1->rnss > link2->rnss)
322 return(1);
323#if 1
324 if ((uintptr_t)link1->state < (uintptr_t)link2->state)
325 return(-1);
326 if ((uintptr_t)link1->state > (uintptr_t)link2->state)
327 return(1);
328#else
329 if (link1->state->msgid < link2->state->msgid)
330 return(-1);
331 if (link1->state->msgid > link2->state->msgid)
332 return(1);
333#endif
334 return(0);
335}
336
337RB_PROTOTYPE_STATIC(h2span_cluster_tree, h2span_cluster,
338 rbnode, h2span_cluster_cmp);
339RB_PROTOTYPE_STATIC(h2span_node_tree, h2span_node,
340 rbnode, h2span_node_cmp);
341RB_PROTOTYPE_STATIC(h2span_link_tree, h2span_link,
342 rbnode, h2span_link_cmp);
343RB_PROTOTYPE_STATIC(h2span_relay_tree, h2span_relay,
344 rbnode, h2span_relay_cmp);
345
346RB_GENERATE_STATIC(h2span_cluster_tree, h2span_cluster,
347 rbnode, h2span_cluster_cmp);
348RB_GENERATE_STATIC(h2span_node_tree, h2span_node,
349 rbnode, h2span_node_cmp);
350RB_GENERATE_STATIC(h2span_link_tree, h2span_link,
351 rbnode, h2span_link_cmp);
352RB_GENERATE_STATIC(h2span_relay_tree, h2span_relay,
353 rbnode, h2span_relay_cmp);
354
355/*
356 * Global mutex protects cluster_tree lookups, connq, mediaq.
357 */
358static pthread_mutex_t cluster_mtx;
359static struct h2span_cluster_tree cluster_tree = RB_INITIALIZER(cluster_tree);
360static struct h2span_conn_queue connq = TAILQ_HEAD_INITIALIZER(connq);
361static struct h2span_media_queue mediaq = TAILQ_HEAD_INITIALIZER(mediaq);
362
363static void dmsg_lnk_span(dmsg_msg_t *msg);
364static void dmsg_lnk_conn(dmsg_msg_t *msg);
365static void dmsg_lnk_circ(dmsg_msg_t *msg);
366static void dmsg_lnk_relay(dmsg_msg_t *msg);
367static void dmsg_relay_scan(h2span_conn_t *conn, h2span_node_t *node);
368static void dmsg_relay_delete(h2span_relay_t *relay);
369
370static void *dmsg_volconf_thread(void *info);
371static void dmsg_volconf_stop(h2span_media_config_t *conf);
372static void dmsg_volconf_start(h2span_media_config_t *conf,
373 const char *hostname);
374
375void
376dmsg_msg_lnk_signal(dmsg_iocom_t *iocom __unused)
377{
378 pthread_mutex_lock(&cluster_mtx);
379 dmsg_relay_scan(NULL, NULL);
380 pthread_mutex_unlock(&cluster_mtx);
381}
382
383/*
384 * DMSG_PROTO_LNK - Generic DMSG_PROTO_LNK.
385 * (incoming iocom lock not held)
386 *
387 * This function is typically called for one-way and opening-transactions
388 * since state->func is assigned after that, but it will also be called
389 * if no state->func is assigned on transaction-open.
390 */
391void
392dmsg_msg_lnk(dmsg_msg_t *msg)
393{
394 uint32_t icmd = msg->state ? msg->state->icmd : msg->any.head.cmd;
395
396 switch(icmd & DMSGF_BASECMDMASK) {
397 case DMSG_LNK_CONN:
398 dmsg_lnk_conn(msg);
399 break;
400 case DMSG_LNK_SPAN:
401 dmsg_lnk_span(msg);
402 break;
403 case DMSG_LNK_CIRC:
404 dmsg_lnk_circ(msg);
405 break;
406 default:
407 fprintf(stderr,
408 "MSG_PROTO_LNK: Unknown msg %08x\n", msg->any.head.cmd);
409 dmsg_msg_reply(msg, DMSG_ERR_NOSUPP);
410 /* state invalid after reply */
411 break;
412 }
413}
414
415/*
416 * LNK_CONN - iocom identify message reception.
417 * (incoming iocom lock not held)
418 *
419 * Remote node identifies itself to us, sets up a SPAN filter, and gives us
420 * the ok to start transmitting SPANs.
421 */
422void
423dmsg_lnk_conn(dmsg_msg_t *msg)
424{
425 dmsg_state_t *state = msg->state;
426 h2span_media_t *media;
427 h2span_media_config_t *conf;
428 h2span_conn_t *conn;
429 h2span_relay_t *relay;
430 char *alloc = NULL;
431 int i;
432
433 pthread_mutex_lock(&cluster_mtx);
434
435 switch(msg->any.head.cmd & DMSGF_TRANSMASK) {
436 case DMSG_LNK_CONN | DMSGF_CREATE:
437 case DMSG_LNK_CONN | DMSGF_CREATE | DMSGF_DELETE:
438 /*
439 * On transaction start we allocate a new h2span_conn and
440 * acknowledge the request, leaving the transaction open.
441 * We then relay priority-selected SPANs.
442 */
443 fprintf(stderr, "LNK_CONN(%08x): %s/%s/%s\n",
444 (uint32_t)msg->any.head.msgid,
445 dmsg_uuid_to_str(&msg->any.lnk_conn.pfs_clid,
446 &alloc),
447 msg->any.lnk_conn.cl_label,
448 msg->any.lnk_conn.fs_label);
449 free(alloc);
450
451 conn = dmsg_alloc(sizeof(*conn));
452
453 RB_INIT(&conn->tree);
454 state->iocom->conn = conn; /* XXX only one */
455 conn->state = state;
456 state->func = dmsg_lnk_conn;
457 state->any.conn = conn;
458 TAILQ_INSERT_TAIL(&connq, conn, entry);
459
460 /*
461 * Set up media
462 */
463 TAILQ_FOREACH(media, &mediaq, entry) {
464 if (uuid_compare(&msg->any.lnk_conn.mediaid,
465 &media->mediaid, NULL) == 0) {
466 break;
467 }
468 }
469 if (media == NULL) {
470 media = dmsg_alloc(sizeof(*media));
471 media->mediaid = msg->any.lnk_conn.mediaid;
472 TAILQ_INSERT_TAIL(&mediaq, media, entry);
473 }
474 conn->media = media;
475 ++media->refs;
476
477 if ((msg->any.head.cmd & DMSGF_DELETE) == 0) {
478 dmsg_msg_result(msg, 0);
479 dmsg_iocom_signal(msg->iocom);
480 break;
481 }
482 /* FALL THROUGH */
483 case DMSG_LNK_CONN | DMSGF_DELETE:
484 case DMSG_LNK_ERROR | DMSGF_DELETE:
485deleteconn:
486 /*
487 * On transaction terminate we clean out our h2span_conn
488 * and acknowledge the request, closing the transaction.
489 */
490 fprintf(stderr, "LNK_CONN: Terminated\n");
491 conn = state->any.conn;
492 assert(conn);
493
494 /*
495 * Clean out the media structure. If refs drops to zero we
496 * also clean out the media config threads. These threads
497 * maintain span connections to other hammer2 service daemons.
498 */
499 media = conn->media;
500 if (--media->refs == 0) {
501 fprintf(stderr, "Shutting down media spans\n");
502 for (i = 0; i < DMSG_COPYID_COUNT; ++i) {
503 conf = &media->config[i];
504
505 if (conf->thread == NULL)
506 continue;
507 conf->ctl = H2CONFCTL_STOP;
508 pthread_cond_signal(&conf->cond);
509 }
510 for (i = 0; i < DMSG_COPYID_COUNT; ++i) {
511 conf = &media->config[i];
512
513 if (conf->thread == NULL)
514 continue;
515 pthread_mutex_unlock(&cluster_mtx);
516 pthread_join(conf->thread, NULL);
517 pthread_mutex_lock(&cluster_mtx);
518 conf->thread = NULL;
519 pthread_cond_destroy(&conf->cond);
520 }
521 fprintf(stderr, "Media shutdown complete\n");
522 TAILQ_REMOVE(&mediaq, media, entry);
523 dmsg_free(media);
524 }
525
526 /*
527 * Clean out all relays. This requires terminating each
528 * relay transaction.
529 */
530 while ((relay = RB_ROOT(&conn->tree)) != NULL) {
531 dmsg_relay_delete(relay);
532 }
533
534 /*
535 * Clean out conn
536 */
537 conn->media = NULL;
538 conn->state = NULL;
539 msg->state->any.conn = NULL;
540 msg->state->iocom->conn = NULL;
541 TAILQ_REMOVE(&connq, conn, entry);
542 dmsg_free(conn);
543
544 dmsg_msg_reply(msg, 0);
545 /* state invalid after reply */
546 break;
547 case DMSG_LNK_VOLCONF:
548 /*
549 * One-way volume-configuration message is transmitted
550 * over the open LNK_CONN transaction.
551 */
552 fprintf(stderr, "RECEIVED VOLCONF\n");
553 if (msg->any.lnk_volconf.index < 0 ||
554 msg->any.lnk_volconf.index >= DMSG_COPYID_COUNT) {
555 fprintf(stderr, "VOLCONF: ILLEGAL INDEX %d\n",
556 msg->any.lnk_volconf.index);
557 break;
558 }
559 if (msg->any.lnk_volconf.copy.path[sizeof(msg->any.lnk_volconf.copy.path) - 1] != 0 ||
560 msg->any.lnk_volconf.copy.path[0] == 0) {
561 fprintf(stderr, "VOLCONF: ILLEGAL PATH %d\n",
562 msg->any.lnk_volconf.index);
563 break;
564 }
565 conn = msg->state->any.conn;
566 if (conn == NULL) {
567 fprintf(stderr, "VOLCONF: LNK_CONN is missing\n");
568 break;
569 }
570 conf = &conn->media->config[msg->any.lnk_volconf.index];
571 conf->copy_pend = msg->any.lnk_volconf.copy;
572 conf->ctl |= H2CONFCTL_UPDATE;
573 if (conf->thread == NULL) {
574 fprintf(stderr, "VOLCONF THREAD STARTED\n");
575 pthread_cond_init(&conf->cond, NULL);
576 pthread_create(&conf->thread, NULL,
577 dmsg_volconf_thread, (void *)conf);
578 }
579 pthread_cond_signal(&conf->cond);
580 break;
581 default:
582 /*
583 * Failsafe
584 */
585 if (msg->any.head.cmd & DMSGF_DELETE)
586 goto deleteconn;
587 dmsg_msg_reply(msg, DMSG_ERR_NOSUPP);
588 break;
589 }
590 pthread_mutex_unlock(&cluster_mtx);
591}
592
593/*
594 * LNK_SPAN - Spanning tree protocol message reception
595 * (incoming iocom lock not held)
596 *
597 * Receive a spanning tree transactional message, creating or destroying
598 * a SPAN and propagating it to other iocoms.
599 */
600void
601dmsg_lnk_span(dmsg_msg_t *msg)
602{
603 dmsg_state_t *state = msg->state;
604 h2span_cluster_t dummy_cls;
605 h2span_node_t dummy_node;
606 h2span_cluster_t *cls;
607 h2span_node_t *node;
608 h2span_link_t *slink;
609 h2span_relay_t *relay;
610 char *alloc = NULL;
611
612 assert((msg->any.head.cmd & DMSGF_REPLY) == 0);
613
614 pthread_mutex_lock(&cluster_mtx);
615
616 /*
617 * On transaction start we initialize the tracking infrastructure
618 */
619 if (msg->any.head.cmd & DMSGF_CREATE) {
620 assert(state->func == NULL);
621 state->func = dmsg_lnk_span;
622
623 dmsg_termstr(msg->any.lnk_span.cl_label);
624 dmsg_termstr(msg->any.lnk_span.fs_label);
625
626 /*
627 * Find the cluster
628 */
629 dummy_cls.pfs_clid = msg->any.lnk_span.pfs_clid;
630 dummy_cls.peer_type = msg->any.lnk_span.peer_type;
631 bcopy(msg->any.lnk_span.cl_label,
632 dummy_cls.cl_label,
633 sizeof(dummy_cls.cl_label));
634 cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls);
635 if (cls == NULL) {
636 cls = dmsg_alloc(sizeof(*cls));
637 cls->pfs_clid = msg->any.lnk_span.pfs_clid;
638 cls->peer_type = msg->any.lnk_span.peer_type;
639 bcopy(msg->any.lnk_span.cl_label,
640 cls->cl_label,
641 sizeof(cls->cl_label));
642 RB_INIT(&cls->tree);
643 RB_INSERT(h2span_cluster_tree, &cluster_tree, cls);
644 }
645
646 /*
647 * Find the node
648 */
649 dummy_node.pfs_fsid = msg->any.lnk_span.pfs_fsid;
650 bcopy(msg->any.lnk_span.fs_label, dummy_node.fs_label,
651 sizeof(dummy_node.fs_label));
652 node = RB_FIND(h2span_node_tree, &cls->tree, &dummy_node);
653 if (node == NULL) {
654 node = dmsg_alloc(sizeof(*node));
655 node->pfs_fsid = msg->any.lnk_span.pfs_fsid;
656 node->pfs_type = msg->any.lnk_span.pfs_type;
657 bcopy(msg->any.lnk_span.fs_label,
658 node->fs_label,
659 sizeof(node->fs_label));
660 node->cls = cls;
661 RB_INIT(&node->tree);
662 RB_INSERT(h2span_node_tree, &cls->tree, node);
663 if (dmsg_node_handler) {
664 dmsg_node_handler(&node->opaque, msg,
665 DMSG_NODEOP_ADD);
666 }
667 }
668
669 /*
670 * Create the link
671 */
672 assert(state->any.link == NULL);
673 slink = dmsg_alloc(sizeof(*slink));
674 TAILQ_INIT(&slink->relayq);
675 slink->node = node;
676 slink->dist = msg->any.lnk_span.dist;
677 slink->rnss = msg->any.lnk_span.rnss;
678 slink->state = state;
679 state->any.link = slink;
680
681 RB_INSERT(h2span_link_tree, &node->tree, slink);
682
683 fprintf(stderr,
684 "LNK_SPAN(thr %p): %p %s cl=%s fs=%s dist=%d\n",
685 msg->iocom,
686 slink,
687 dmsg_uuid_to_str(&msg->any.lnk_span.pfs_clid, &alloc),
688 msg->any.lnk_span.cl_label,
689 msg->any.lnk_span.fs_label,
690 msg->any.lnk_span.dist);
691 free(alloc);
692#if 0
693 dmsg_relay_scan(NULL, node);
694#endif
695 dmsg_iocom_signal(msg->iocom);
696 }
697
698 /*
699 * On transaction terminate we remove the tracking infrastructure.
700 */
701 if (msg->any.head.cmd & DMSGF_DELETE) {
702 slink = state->any.link;
703 assert(slink != NULL);
704 node = slink->node;
705 cls = node->cls;
706
707 fprintf(stderr, "LNK_DELE(thr %p): %p %s cl=%s fs=%s dist=%d\n",
708 msg->iocom,
709 slink,
710 dmsg_uuid_to_str(&cls->pfs_clid, &alloc),
711 state->msg->any.lnk_span.cl_label,
712 state->msg->any.lnk_span.fs_label,
713 state->msg->any.lnk_span.dist);
714 free(alloc);
715
716 /*
717 * Clean out all relays. This requires terminating each
718 * relay transaction.
719 */
720 while ((relay = TAILQ_FIRST(&slink->relayq)) != NULL) {
721 dmsg_relay_delete(relay);
722 }
723
724 /*
725 * Clean out the topology
726 */
727 RB_REMOVE(h2span_link_tree, &node->tree, slink);
728 if (RB_EMPTY(&node->tree)) {
729 RB_REMOVE(h2span_node_tree, &cls->tree, node);
730 if (dmsg_node_handler) {
731 dmsg_node_handler(&node->opaque, msg,
732 DMSG_NODEOP_DEL);
733 }
734 if (RB_EMPTY(&cls->tree) && cls->refs == 0) {
735 RB_REMOVE(h2span_cluster_tree,
736 &cluster_tree, cls);
737 dmsg_free(cls);
738 }
739 node->cls = NULL;
740 dmsg_free(node);
741 node = NULL;
742 }
743 state->any.link = NULL;
744 slink->state = NULL;
745 slink->node = NULL;
746 dmsg_free(slink);
747
748 /*
749 * We have to terminate the transaction
750 */
751 dmsg_state_reply(state, 0);
752 /* state invalid after reply */
753
754 /*
755 * If the node still exists issue any required updates. If
756 * it doesn't then all related relays have already been
757 * removed and there's nothing left to do.
758 */
759#if 0
760 if (node)
761 dmsg_relay_scan(NULL, node);
762#endif
763 if (node)
764 dmsg_iocom_signal(msg->iocom);
765 }
766
767 pthread_mutex_unlock(&cluster_mtx);
768}
769
770/*
771 * LNK_CIRC - Virtual circuit protocol message reception
772 * (incoming iocom lock not held)
773 *
774 * Handles all cases.
775 */
776void
777dmsg_lnk_circ(dmsg_msg_t *msg)
778{
779 dmsg_circuit_t *circA;
780 dmsg_circuit_t *circB;
781 dmsg_state_t *rx_state;
782 dmsg_state_t *tx_state;
783 dmsg_state_t *state;
784 dmsg_state_t dummy;
785 dmsg_msg_t *fwd_msg;
786 dmsg_iocom_t *iocomA;
787 dmsg_iocom_t *iocomB;
788
789 /*pthread_mutex_lock(&cluster_mtx);*/
790
791 switch (msg->any.head.cmd & (DMSGF_CREATE |
792 DMSGF_DELETE |
793 DMSGF_REPLY)) {
794 case DMSGF_CREATE:
795 case DMSGF_CREATE | DMSGF_DELETE:
796 /*
797 * (A) wishes to establish a virtual circuit through us to (B).
798 * (B) is specified by lnk_circ.target (the message id for
799 * a LNK_SPAN that (A) received from us which represents (B)).
800 *
801 * Designate the originator of the circuit (the current
802 * remote end) as (A) and the other side as (B).
803 *
804 * Accept the VC but do not reply. We will wait for the end-
805 * to-end reply to propagate back.
806 */
807 iocomA = msg->iocom;
808
809 /*
810 * Locate the open transaction state that the other end
811 * specified in <target>. This will be an open SPAN
812 * transaction that we transmitted (h2span_relay) over
813 * the interface the LNK_CIRC is being received on.
814 *
815 * (all LNK_CIRC's that we transmit are on circuit0)
816 */
817 pthread_mutex_lock(&iocomA->mtx);
818 dummy.msgid = msg->any.lnk_circ.target;
819 tx_state = RB_FIND(dmsg_state_tree,
820 &iocomA->circuit0.statewr_tree,
821 &dummy);
822 /* XXX state refs */
823 assert(tx_state);
824 pthread_mutex_unlock(&iocomA->mtx);
825
826 /* locate h2span_link */
827 rx_state = tx_state->any.relay->source_rt;
828
829 /*
830 * A wishes to establish a VC through us to the
831 * specified target.
832 *
833 * A sends us the msgid of an open SPAN transaction
834 * it received from us as <target>.
835 */
836 circA = dmsg_alloc(sizeof(*circA));
837 circA->iocom = iocomA;
838 circA->state = msg->state; /* LNK_CIRC state */
839 circA->msgid = msg->state->msgid;
840 circA->span_state = tx_state; /* H2SPAN_RELAY state */
841 circA->is_relay = 1;
842 circA->refs = 2; /* state and peer */
843 msg->state->any.circ = circA;
844
845 iocomB = rx_state->iocom;
846
847 circB = dmsg_alloc(sizeof(*circB));
848
849 /*
850 * Create a LNK_CIRC transaction on B
851 */
852 fwd_msg = dmsg_msg_alloc(&iocomB->circuit0,
853 0, DMSG_LNK_CIRC | DMSGF_CREATE,
854 dmsg_lnk_circ, circB);
855 fwd_msg->state->any.circ = circB;
856 circB->iocom = iocomB;
857 circB->state = fwd_msg->state; /* LNK_CIRC state */
858 circB->msgid = fwd_msg->any.head.msgid;
859 circB->span_state = rx_state; /* H2SPAN_LINK state */
860 circB->is_relay = 0;
861 circB->refs = 2; /* state and peer */
862
863 /*
864 * Link the two circuits together.
865 */
866 circA->peer = circB;
867 circB->peer = circA;
868
869 if (RB_INSERT(dmsg_circuit_tree, &iocomA->circuit_tree, circA))
870 assert(0);
871 if (RB_INSERT(dmsg_circuit_tree, &iocomB->circuit_tree, circB))
872 assert(0);
873
874 dmsg_msg_write(fwd_msg);
875
876 if ((msg->any.head.cmd & DMSGF_DELETE) == 0)
877 break;
878 /* FALL THROUGH TO DELETE */
879 case DMSGF_DELETE:
880 /*
881 * (A) Is deleting the virtual circuit, propogate closure
882 * to (B).
883 */
884 iocomA = msg->iocom;
885 circA = msg->state->any.circ;
886 circB = circA->peer;
887 assert(msg->state == circA->state);
888
889 /*
890 * If we are closing A and the peer B is closed, disconnect.
891 */
892 if (circB && (state = circB->state) != NULL) {
893 if (state->rxcmd & DMSGF_DELETE) {
894 circB->state = NULL;
895 state->any.circ = NULL;
896 dmsg_circuit_drop(circB);
897 }
898 dmsg_state_reply(state, msg->any.head.error);
899 }
900
901 /*
902 * If both sides now closed terminate the peer association
903 * and the state association. This may drop up to two refs
904 * on circA and one on circB.
905 */
906 if (circA->state->txcmd & DMSGF_DELETE) {
907 if (circB) {
908 circA->peer = NULL;
909 circB->peer = NULL;
910 dmsg_circuit_drop(circA);
911 dmsg_circuit_drop(circB); /* XXX SMP */
912 }
913 circA->state->any.circ = NULL;
914 circA->state = NULL;
915 dmsg_circuit_drop(circA);
916 }
917 break;
918 case DMSGF_REPLY | DMSGF_CREATE:
919 case DMSGF_REPLY | DMSGF_CREATE | DMSGF_DELETE:
920 /*
921 * (B) is acknowledging the creation of the virtual
922 * circuit. This propagates all the way back to (A), though
923 * it should be noted that (A) can start issuing commands
924 * via the virtual circuit before seeing this reply.
925 */
926 circB = msg->state->any.circ;
927 circA = circB->peer;
928 assert(msg->state == circB->state);
929 if (circA && (msg->any.head.cmd & DMSGF_DELETE) == 0) {
930 dmsg_state_result(circA->state, msg->any.head.error);
931 break;
932 }
933 /* FALL THROUGH TO DELETE */
934 case DMSGF_REPLY | DMSGF_DELETE:
935 /*
936 * (B) Is deleting the virtual circuit or acknowledging
937 * our deletion of the virtual circuit, propogate closure
938 * to (A).
939 */
940 iocomB = msg->iocom;
941 circB = msg->state->any.circ;
942 circA = circB->peer;
943 assert(msg->state == circB->state);
944
945 /*
946 * If we are closing A and the peer B is closed, disconnect.
947 */
948 if (circA && (state = circA->state) != NULL) {
949 if (state->rxcmd & DMSGF_DELETE) {
950 circA->state = NULL;
951 state->any.circ = NULL;
952 dmsg_circuit_drop(circA);
953 }
954 dmsg_state_reply(state, msg->any.head.error);
955 }
956
957 /*
958 * If both sides now closed terminate the peer association
959 * and the state association. This may drop up to two refs
960 * on circA and one on circB.
961 */
962 if (circB->state->txcmd & DMSGF_DELETE) {
963 if (circA) {
964 circB->peer = NULL;
965 circA->peer = NULL;
966 dmsg_circuit_drop(circB);
967 dmsg_circuit_drop(circA); /* XXX SMP */
968 }
969 circB->state->any.circ = NULL;
970 circB->state = NULL;
971 dmsg_circuit_drop(circB);
972 }
973 break;
974 }
975
976 /*pthread_mutex_lock(&cluster_mtx);*/
977}
978
979/*
980 * Update relay transactions for SPANs.
981 *
982 * Called with cluster_mtx held.
983 */
984static void dmsg_relay_scan_specific(h2span_node_t *node,
985 h2span_conn_t *conn);
986
987static void
988dmsg_relay_scan(h2span_conn_t *conn, h2span_node_t *node)
989{
990 h2span_cluster_t *cls;
991
992 if (node) {
993 /*
994 * Iterate specific node
995 */
996 TAILQ_FOREACH(conn, &connq, entry)
997 dmsg_relay_scan_specific(node, conn);
998 } else {
999 /*
1000 * Full iteration.
1001 *
1002 * Iterate cluster ids, nodes, and either a specific connection
1003 * or all connections.
1004 */
1005 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
1006 /*
1007 * Iterate node ids
1008 */
1009 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
1010 /*
1011 * Synchronize the node's link (received SPANs)
1012 * with each connection's relays.
1013 */
1014 if (conn) {
1015 dmsg_relay_scan_specific(node, conn);
1016 } else {
1017 TAILQ_FOREACH(conn, &connq, entry) {
1018 dmsg_relay_scan_specific(node,
1019 conn);
1020 }
1021 assert(conn == NULL);
1022 }
1023 }
1024 }
1025 }
1026}
1027
1028/*
1029 * Update the relay'd SPANs for this (node, conn).
1030 *
1031 * Iterate links and adjust relays to match. We only propagate the top link
1032 * for now (XXX we want to propagate the top two).
1033 *
1034 * The dmsg_relay_scan_cmp() function locates the first relay element
1035 * for any given node. The relay elements will be sub-sorted by dist.
1036 */
1037struct relay_scan_info {
1038 h2span_node_t *node;
1039 h2span_relay_t *relay;
1040};
1041
1042static int
1043dmsg_relay_scan_cmp(h2span_relay_t *relay, void *arg)
1044{
1045 struct relay_scan_info *info = arg;
1046
1047 if ((intptr_t)relay->source_rt->any.link->node < (intptr_t)info->node)
1048 return(-1);
1049 if ((intptr_t)relay->source_rt->any.link->node > (intptr_t)info->node)
1050 return(1);
1051 return(0);
1052}
1053
1054static int
1055dmsg_relay_scan_callback(h2span_relay_t *relay, void *arg)
1056{
1057 struct relay_scan_info *info = arg;
1058
1059 info->relay = relay;
1060 return(-1);
1061}
1062
1063static void
1064dmsg_relay_scan_specific(h2span_node_t *node, h2span_conn_t *conn)
1065{
1066 struct relay_scan_info info;
1067 h2span_relay_t *relay;
1068 h2span_relay_t *next_relay;
1069 h2span_link_t *slink;
1070 dmsg_lnk_conn_t *lconn;
1071 dmsg_lnk_span_t *lspan;
1072 int count;
1073 int maxcount = 2;
1074 uint32_t lastdist = DMSG_SPAN_MAXDIST;
1075 uint32_t lastrnss = 0;
1076
1077 info.node = node;
1078 info.relay = NULL;
1079
1080 /*
1081 * Locate the first related relay for the node on this connection.
1082 * relay will be NULL if there were none.
1083 */
1084 RB_SCAN(h2span_relay_tree, &conn->tree,
1085 dmsg_relay_scan_cmp, dmsg_relay_scan_callback, &info);
1086 relay = info.relay;
1087 info.relay = NULL;
1088 if (relay)
1089 assert(relay->source_rt->any.link->node == node);
1090
1091 if (DMsgDebugOpt > 8)
1092 fprintf(stderr, "relay scan for connection %p\n", conn);
1093
1094 /*
1095 * Iterate the node's links (received SPANs) in distance order,
1096 * lowest (best) dist first.
1097 *
1098 * PROPAGATE THE BEST LINKS OVER THE SPECIFIED CONNECTION.
1099 *
1100 * Track relays while iterating the best links and construct
1101 * missing relays when necessary.
1102 *
1103 * (If some prior better link was removed it would have also
1104 * removed the relay, so the relay can only match exactly or
1105 * be worse).
1106 */
1107 count = 0;
1108 RB_FOREACH(slink, h2span_link_tree, &node->tree) {
1109 /*
1110 * Increment count of successful relays. This isn't
1111 * quite accurate if we break out but nothing after
1112 * the loop uses (count).
1113 *
1114 * If count exceeds the maximum number of relays we desire
1115 * we normally want to break out. However, in order to
1116 * guarantee a symmetric path we have to continue if both
1117 * (dist) and (rnss) continue to match. Otherwise the SPAN
1118 * propagation in the reverse direction may choose different
1119 * routes and we will not have a symmetric path.
1120 *
1121 * NOTE: Spanning tree does not have to be symmetrical so
1122 * this code is not currently enabled.
1123 */
1124 if (++count >= maxcount) {
1125#ifdef REQUIRE_SYMMETRICAL
1126 if (lastdist != slink->dist || lastrnss != slink->rnss)
1127 break;
1128#else
1129 break;
1130#endif
1131 /* go beyond the nominal maximum desired relays */
1132 }
1133
1134 /*
1135 * Match, relay already in-place, get the next
1136 * relay to match against the next slink.
1137 */
1138 if (relay && relay->source_rt->any.link == slink) {
1139 relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
1140 continue;
1141 }
1142
1143 /*
1144 * We might want this SLINK, if it passes our filters.
1145 *
1146 * The spanning tree can cause closed loops so we have
1147 * to limit slink->dist.
1148 */
1149 if (slink->dist > DMSG_SPAN_MAXDIST)
1150 break;
1151
1152 /*
1153 * Don't bother transmitting a LNK_SPAN out the same
1154 * connection it came in on. Trivial optimization.
1155 */
1156 if (slink->state->iocom == conn->state->iocom)
1157 break;
1158
1159 /*
1160 * NOTE ON FILTERS: The protocol spec allows non-requested
1161 * SPANs to be transmitted, the other end is expected to
1162 * leave their transactions open but otherwise ignore them.
1163 *
1164 * Don't bother transmitting if the remote connection
1165 * is not accepting this SPAN's peer_type.
1166 *
1167 * pfs_mask is typically used so pure clients can filter
1168 * out receiving SPANs for other pure clients.
1169 */
1170 lspan = &slink->state->msg->any.lnk_span;
1171 lconn = &conn->state->msg->any.lnk_conn;
1172 if (((1LLU << lspan->peer_type) & lconn->peer_mask) == 0)
1173 break;
1174 if (((1LLU << lspan->pfs_type) & lconn->pfs_mask) == 0)
1175 break;
1176
1177 /*
1178 * Do not give pure clients visibility to other pure clients
1179 */
1180 if (lconn->pfs_type == DMSG_PFSTYPE_CLIENT &&
1181 lspan->pfs_type == DMSG_PFSTYPE_CLIENT) {
1182 break;
1183 }
1184
1185 /*
1186 * Connection filter, if cluster uuid is not NULL it must
1187 * match the span cluster uuid. Only applies when the
1188 * peer_type matches.
1189 */
1190 if (lspan->peer_type == lconn->peer_type &&
1191 !uuid_is_nil(&lconn->pfs_clid, NULL) &&
1192 uuid_compare(&slink->node->cls->pfs_clid,
1193 &lconn->pfs_clid, NULL)) {
1194 break;
1195 }
1196
1197 /*
1198 * Connection filter, if cluster label is not empty it must
1199 * match the span cluster label. Only applies when the
1200 * peer_type matches.
1201 */
1202 if (lspan->peer_type == lconn->peer_type &&
1203 lconn->cl_label[0] &&
1204 strcmp(lconn->cl_label, slink->node->cls->cl_label)) {
1205 break;
1206 }
1207
1208 /*
1209 * NOTE! pfs_fsid differentiates nodes within the same cluster
1210 * so we obviously don't want to match those. Similarly
1211 * for fs_label.
1212 */
1213
1214 /*
1215 * Ok, we've accepted this SPAN for relaying.
1216 */
1217 assert(relay == NULL ||
1218 relay->source_rt->any.link->node != slink->node ||
1219 relay->source_rt->any.link->dist >= slink->dist);
1220 relay = dmsg_generate_relay(conn, slink);
1221 lastdist = slink->dist;
1222 lastrnss = slink->rnss;
1223
1224 /*
1225 * Match (created new relay), get the next relay to
1226 * match against the next slink.
1227 */
1228 relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
1229 }
1230
1231 /*
1232 * Any remaining relay's belonging to this connection which match
1233 * the node are in excess of the current aggregate spanning state
1234 * and should be removed.
1235 */
1236 while (relay && relay->source_rt->any.link->node == node) {
1237 next_relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
1238 dmsg_relay_delete(relay);
1239 relay = next_relay;
1240 }
1241}
1242
1243/*
1244 * Helper function to generate missing relay.
1245 *
1246 * cluster_mtx must be held
1247 */
1248static
1249h2span_relay_t *
1250dmsg_generate_relay(h2span_conn_t *conn, h2span_link_t *slink)
1251{
1252 h2span_relay_t *relay;
1253 h2span_node_t *node;
1254 dmsg_msg_t *msg;
1255
1256 node = slink->node;
1257
1258 relay = dmsg_alloc(sizeof(*relay));
1259 relay->conn = conn;
1260 relay->source_rt = slink->state;
1261 /* relay->source_rt->any.link = slink; */
1262
1263 /*
1264 * NOTE: relay->target_rt->any.relay set to relay by alloc.
1265 */
1266 msg = dmsg_msg_alloc(&conn->state->iocom->circuit0,
1267 0, DMSG_LNK_SPAN | DMSGF_CREATE,
1268 dmsg_lnk_relay, relay);
1269 relay->target_rt = msg->state;
1270
1271 msg->any.lnk_span = slink->state->msg->any.lnk_span;
1272 msg->any.lnk_span.dist = slink->dist + 1;
1273 msg->any.lnk_span.rnss = slink->rnss + dmsg_rnss();
1274
1275 RB_INSERT(h2span_relay_tree, &conn->tree, relay);
1276 TAILQ_INSERT_TAIL(&slink->relayq, relay, entry);
1277
1278 dmsg_msg_write(msg);
1279
1280 return (relay);
1281}
1282
1283/*
1284 * Messages received on relay SPANs. These are open transactions so it is
1285 * in fact possible for the other end to close the transaction.
1286 *
1287 * XXX MPRACE on state structure
1288 */
1289static void
1290dmsg_lnk_relay(dmsg_msg_t *msg)
1291{
1292 dmsg_state_t *state = msg->state;
1293 h2span_relay_t *relay;
1294
1295 assert(msg->any.head.cmd & DMSGF_REPLY);
1296
1297 if (msg->any.head.cmd & DMSGF_DELETE) {
1298 pthread_mutex_lock(&cluster_mtx);
1299 if ((relay = state->any.relay) != NULL) {
1300 dmsg_relay_delete(relay);
1301 } else {
1302 dmsg_state_reply(state, 0);
1303 }
1304 pthread_mutex_unlock(&cluster_mtx);
1305 }
1306}
1307
1308
1309static
1310void
1311dmsg_relay_delete(h2span_relay_t *relay)
1312{
1313 fprintf(stderr,
1314 "RELAY DELETE %p RELAY %p ON CLS=%p NODE=%p DIST=%d FD %d STATE %p\n",
1315 relay->source_rt->any.link,
1316 relay,
1317 relay->source_rt->any.link->node->cls, relay->source_rt->any.link->node,
1318 relay->source_rt->any.link->dist,
1319 relay->conn->state->iocom->sock_fd, relay->target_rt);
1320
1321 RB_REMOVE(h2span_relay_tree, &relay->conn->tree, relay);
1322 TAILQ_REMOVE(&relay->source_rt->any.link->relayq, relay, entry);
1323
1324 if (relay->target_rt) {
1325 relay->target_rt->any.relay = NULL;
1326 dmsg_state_reply(relay->target_rt, 0);
1327 /* state invalid after reply */
1328 relay->target_rt = NULL;
1329 }
1330 relay->conn = NULL;
1331 relay->source_rt = NULL;
1332 dmsg_free(relay);
1333}
1334
1335static void *
1336dmsg_volconf_thread(void *info)
1337{
1338 h2span_media_config_t *conf = info;
1339
1340 pthread_mutex_lock(&cluster_mtx);
1341 while ((conf->ctl & H2CONFCTL_STOP) == 0) {
1342 if (conf->ctl & H2CONFCTL_UPDATE) {
1343 fprintf(stderr, "VOLCONF UPDATE\n");
1344 conf->ctl &= ~H2CONFCTL_UPDATE;
1345 if (bcmp(&conf->copy_run, &conf->copy_pend,
1346 sizeof(conf->copy_run)) == 0) {
1347 fprintf(stderr, "VOLCONF: no changes\n");
1348 continue;
1349 }
1350 /*
1351 * XXX TODO - auto reconnect on lookup failure or
1352 * connect failure or stream failure.
1353 */
1354
1355 pthread_mutex_unlock(&cluster_mtx);
1356 dmsg_volconf_stop(conf);
1357 conf->copy_run = conf->copy_pend;
1358 if (conf->copy_run.copyid != 0 &&
1359 strncmp(conf->copy_run.path, "span:", 5) == 0) {
1360 dmsg_volconf_start(conf,
1361 conf->copy_run.path + 5);
1362 }
1363 pthread_mutex_lock(&cluster_mtx);
1364 fprintf(stderr, "VOLCONF UPDATE DONE state %d\n", conf->state);
1365 }
1366 if (conf->state == H2MC_CONNECT) {
1367 dmsg_volconf_start(conf, conf->copy_run.path + 5);
1368 pthread_mutex_unlock(&cluster_mtx);
1369 sleep(5);
1370 pthread_mutex_lock(&cluster_mtx);
1371 } else {
1372 pthread_cond_wait(&conf->cond, &cluster_mtx);
1373 }
1374 }
1375 pthread_mutex_unlock(&cluster_mtx);
1376 dmsg_volconf_stop(conf);
1377 return(NULL);
1378}
1379
1380static
1381void
1382dmsg_volconf_stop(h2span_media_config_t *conf)
1383{
1384 switch(conf->state) {
1385 case H2MC_STOPPED:
1386 break;
1387 case H2MC_CONNECT:
1388 conf->state = H2MC_STOPPED;
1389 break;
1390 case H2MC_RUNNING:
1391 shutdown(conf->fd, SHUT_WR);
1392 pthread_join(conf->iocom_thread, NULL);
1393 conf->iocom_thread = NULL;
1394 break;
1395 }
1396}
1397
1398static
1399void
1400dmsg_volconf_start(h2span_media_config_t *conf, const char *hostname)
1401{
1402 dmsg_master_service_info_t *info;
1403
1404 switch(conf->state) {
1405 case H2MC_STOPPED:
1406 case H2MC_CONNECT:
1407 conf->fd = dmsg_connect(hostname);
1408 if (conf->fd < 0) {
1409 fprintf(stderr, "Unable to connect to %s\n", hostname);
1410 conf->state = H2MC_CONNECT;
1411 } else {
1412 info = malloc(sizeof(*info));
1413 bzero(info, sizeof(*info));
1414 info->fd = conf->fd;
1415 info->detachme = 0;
1416 conf->state = H2MC_RUNNING;
1417 pthread_create(&conf->iocom_thread, NULL,
1418 dmsg_master_service, info);
1419 }
1420 break;
1421 case H2MC_RUNNING:
1422 break;
1423 }
1424}
1425
1426/************************************************************************
1427 * MESSAGE ROUTING AND SOURCE VALIDATION *
1428 ************************************************************************/
1429
1430int
1431dmsg_circuit_relay(dmsg_msg_t *msg)
1432{
1433 dmsg_iocom_t *iocom = msg->iocom;
1434 dmsg_circuit_t *circ;
1435 dmsg_circuit_t *peer;
1436 dmsg_circuit_t dummy;
1437 int error = 0;
1438
1439 /*
1440 * Relay occurs before any state processing, msg state should always
1441 * be NULL.
1442 */
1443 assert(msg->state == NULL);
1444
1445 /*
1446 * Lookup the circuit on the incoming iocom.
1447 */
1448 pthread_mutex_lock(&cluster_mtx);
1449
1450 dummy.msgid = msg->any.head.circuit;
1451 circ = RB_FIND(dmsg_circuit_tree, &iocom->circuit_tree, &dummy);
1452 assert(circ);
1453 peer = circ->peer;
1454
1455 msg->iocom = peer->iocom;
1456 msg->any.head.circuit = peer->msgid;
1457
1458 pthread_mutex_unlock(&cluster_mtx);
1459
1460 fprintf(stderr, "ROUTE MESSAGE VC %08x to %08x\n",
1461 (uint32_t)circ->msgid, (uint32_t)peer->msgid); /* brevity */
1462 dmsg_msg_write(msg);
1463 error = DMSG_IOQ_ERROR_ROUTED;
1464
1465 return error;
1466}
1467
1468/************************************************************************
1469 * ROUTER AND MESSAGING HANDLES *
1470 ************************************************************************
1471 *
1472 * Basically the idea here is to provide a stable data structure which
1473 * can be localized to the caller for higher level protocols to work with.
1474 * Depends on the context, these dmsg_handle's can be pooled by use-case
1475 * and remain persistent through a client (or mount point's) life.
1476 */
1477
1478#if 0
1479/*
1480 * Obtain a stable handle on a cluster given its uuid. This ties directly
1481 * into the global cluster topology, creating the structure if necessary
1482 * (even if the uuid does not exist or does not exist yet), and preventing
1483 * the structure from getting ripped out from under us while we hold a
1484 * pointer to it.
1485 */
1486h2span_cluster_t *
1487dmsg_cluster_get(uuid_t *pfs_clid)
1488{
1489 h2span_cluster_t dummy_cls;
1490 h2span_cluster_t *cls;
1491
1492 dummy_cls.pfs_clid = *pfs_clid;
1493 pthread_mutex_lock(&cluster_mtx);
1494 cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls);
1495 if (cls)
1496 ++cls->refs;
1497 pthread_mutex_unlock(&cluster_mtx);
1498 return (cls);
1499}
1500
1501void
1502dmsg_cluster_put(h2span_cluster_t *cls)
1503{
1504 pthread_mutex_lock(&cluster_mtx);
1505 assert(cls->refs > 0);
1506 --cls->refs;
1507 if (RB_EMPTY(&cls->tree) && cls->refs == 0) {
1508 RB_REMOVE(h2span_cluster_tree,
1509 &cluster_tree, cls);
1510 dmsg_free(cls);
1511 }
1512 pthread_mutex_unlock(&cluster_mtx);
1513}
1514
1515/*
1516 * Obtain a stable handle to a specific cluster node given its uuid.
1517 * This handle does NOT lock in the route to the node and is typically
1518 * used as part of the dmsg_handle_*() API to obtain a set of
1519 * stable nodes.
1520 */
1521h2span_node_t *
1522dmsg_node_get(h2span_cluster_t *cls, uuid_t *pfs_fsid)
1523{
1524}
1525
1526#endif
1527
1528/*
1529 * Dumps the spanning tree
1530 */
1531void
1532dmsg_shell_tree(dmsg_circuit_t *circuit, char *cmdbuf __unused)
1533{
1534 h2span_cluster_t *cls;
1535 h2span_node_t *node;
1536 h2span_link_t *slink;
1537 char *uustr = NULL;
1538
1539 pthread_mutex_lock(&cluster_mtx);
1540 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
1541 dmsg_circuit_printf(circuit, "Cluster %s %s (%s)\n",
1542 dmsg_peer_type_to_str(cls->peer_type),
1543 dmsg_uuid_to_str(&cls->pfs_clid, &uustr),
1544 cls->cl_label);
1545 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
1546 dmsg_circuit_printf(circuit, " Node %s %s (%s)\n",
1547 dmsg_pfs_type_to_str(node->pfs_type),
1548 dmsg_uuid_to_str(&node->pfs_fsid, &uustr),
1549 node->fs_label);
1550 RB_FOREACH(slink, h2span_link_tree, &node->tree) {
1551 dmsg_circuit_printf(circuit,
1552 "\tLink dist=%d via %d\n",
1553 slink->dist,
1554 slink->state->iocom->sock_fd);
1555 }
1556 }
1557 }
1558 pthread_mutex_unlock(&cluster_mtx);
1559 if (uustr)
1560 free(uustr);
1561#if 0
1562 TAILQ_FOREACH(conn, &connq, entry) {
1563 }
1564#endif
1565}
1566
1567/*
1568 * Random number sub-sort value to add to SPAN rnss fields on relay.
1569 * This allows us to differentiate spans with the same <dist> field
1570 * for relaying purposes. We must normally limit the number of relays
1571 * for any given SPAN origination but we must also guarantee that a
1572 * symmetric reverse path exists, so we use the rnss field as a sub-sort
1573 * (since there can be thousands or millions if we only match on <dist>),
1574 * and if there STILL too many spans we go past the limit.
1575 */
1576static
1577uint32_t
1578dmsg_rnss(void)
1579{
1580 if (DMsgRNSS == 0) {
1581 pthread_mutex_lock(&cluster_mtx);
1582 while (DMsgRNSS == 0) {
1583 srandomdev();
1584 DMsgRNSS = random();
1585 }
1586 pthread_mutex_unlock(&cluster_mtx);
1587 }
1588 return(DMsgRNSS);
1589}