2 * Copyright (c) 2012-2014 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * LNK_SPAN PROTOCOL SUPPORT FUNCTIONS - Please see sys/dmsg.h for an
36 * involved explanation of the protocol.
39 #include "dmsg_local.h"
42 * Maximum spanning tree distance. This has the practical effect of
43 * stopping tail-chasing closed loops when a feeder span is lost.
45 #define DMSG_SPAN_MAXDIST 16
48 * RED-BLACK TREE DEFINITIONS
52 * (1) shared fsid's (a cluster).
53 * (2) unique fsid's (a node in a cluster) <--- LNK_SPAN transactions.
55 * We need to aggegate all active LNK_SPANs, aggregate, and create our own
56 * outgoing LNK_SPAN transactions on each of our connections representing
57 * the aggregated state.
59 * h2span_conn - list of iocom connections who wish to receive SPAN
60 * propagation from other connections. Might contain
61 * a filter string. Only iocom's with an open
62 * LNK_CONN transactions are applicable for SPAN
65 * h2span_relay - List of links relayed (via SPAN). Essentially
66 * each relay structure represents a LNK_SPAN
67 * transaction that we initiated, verses h2span_link
68 * which is a LNK_SPAN transaction that we received.
72 * h2span_cluster - Organizes the shared fsid's. One structure for
75 * h2span_node - Organizes the nodes in a cluster. One structure
76 * for each unique {cluster,node}, aka {fsid, pfs_fsid}.
78 * h2span_link - Organizes all incoming and outgoing LNK_SPAN message
79 * transactions related to a node.
81 * One h2span_link structure for each incoming LNK_SPAN
82 * transaction. Links selected for propagation back
83 * out are also where the outgoing LNK_SPAN messages
84 * are indexed into (so we can propagate changes).
86 * The h2span_link's use a red-black tree to sort the
87 * distance hop metric for the incoming LNK_SPAN. We
88 * then select the top N for outgoing. When the
89 * topology changes the top N may also change and cause
90 * new outgoing LNK_SPAN transactions to be opened
91 * and less desireable ones to be closed, causing
92 * transactional aborts within the message flow in
95 * Also note - All outgoing LNK_SPAN message transactions are also
96 * entered into a red-black tree for use by the routing
97 * function. This is handled by msg.c in the state
103 TAILQ_HEAD(h2span_conn_queue, h2span_conn);
104 TAILQ_HEAD(h2span_relay_queue, h2span_relay);
106 RB_HEAD(h2span_cluster_tree, h2span_cluster);
107 RB_HEAD(h2span_node_tree, h2span_node);
108 RB_HEAD(h2span_link_tree, h2span_link);
109 RB_HEAD(h2span_relay_tree, h2span_relay);
113 * Received LNK_CONN transaction enables SPAN protocol over connection.
114 * (may contain filter). Typically one for each mount and several may
115 * share the same media.
118 TAILQ_ENTRY(h2span_conn) entry;
119 struct h2span_relay_tree tree;
121 dmsg_lnk_conn_t lnk_conn;
125 * All received LNK_SPANs are organized by cluster (pfs_clid),
126 * node (pfs_fsid), and link (received LNK_SPAN transaction).
128 struct h2span_cluster {
129 RB_ENTRY(h2span_cluster) rbnode;
130 struct h2span_node_tree tree;
131 uuid_t pfs_clid; /* shared fsid */
133 char cl_label[128]; /* cluster label (typ PEER_BLOCK) */
134 int refs; /* prevents destruction */
138 RB_ENTRY(h2span_node) rbnode;
139 struct h2span_link_tree tree;
140 struct h2span_cluster *cls;
142 uuid_t pfs_fsid; /* unique fsid */
143 char fs_label[128]; /* fs label (typ PEER_HAMMER2) */
148 RB_ENTRY(h2span_link) rbnode;
149 dmsg_state_t *state; /* state<->link */
150 struct h2span_node *node; /* related node */
151 struct h2span_relay_queue relayq; /* relay out */
152 dmsg_lnk_span_t lnk_span;
156 * Any LNK_SPAN transactions we receive which are relayed out other
157 * connections utilize this structure to track the LNK_SPAN transactions
158 * we initiate (relay out) on other connections. We only relay out
159 * LNK_SPANs on connections we have an open CONN transaction for.
161 * The relay structure points to the outgoing LNK_SPAN trans (out_state)
162 * and to the incoming LNK_SPAN transaction (in_state). The relay
163 * structure holds refs on the related states.
165 * In many respects this is the core of the protocol... actually figuring
166 * out what LNK_SPANs to relay. The spanid used for relaying is the
167 * address of the 'state' structure, which is why h2span_relay has to
168 * be entered into a RB-TREE based at h2span_conn (so we can look
169 * up the spanid to validate it).
171 struct h2span_relay {
172 TAILQ_ENTRY(h2span_relay) entry; /* from link */
173 RB_ENTRY(h2span_relay) rbnode; /* from h2span_conn */
174 struct h2span_conn *conn; /* related CONN transaction */
175 dmsg_state_t *source_rt; /* h2span_link state */
176 dmsg_state_t *target_rt; /* h2span_relay state */
179 typedef struct h2span_conn h2span_conn_t;
180 typedef struct h2span_cluster h2span_cluster_t;
181 typedef struct h2span_node h2span_node_t;
182 typedef struct h2span_link h2span_link_t;
183 typedef struct h2span_relay h2span_relay_t;
185 #define dmsg_termstr(array) _dmsg_termstr((array), sizeof(array))
187 static h2span_relay_t *dmsg_generate_relay(h2span_conn_t *conn,
188 h2span_link_t *slink);
189 static uint32_t dmsg_rnss(void);
193 _dmsg_termstr(char *base, size_t size)
199 * Cluster peer_type, uuid, AND label must match for a match
203 h2span_cluster_cmp(h2span_cluster_t *cls1, h2span_cluster_t *cls2)
207 if (cls1->peer_type < cls2->peer_type)
209 if (cls1->peer_type > cls2->peer_type)
211 r = uuid_compare(&cls1->pfs_clid, &cls2->pfs_clid, NULL);
213 r = strcmp(cls1->cl_label, cls2->cl_label);
219 * Match against fs_label/pfs_fsid. Together these two items represent a
220 * unique node. In most cases the primary differentiator is pfs_fsid but
221 * we also string-match fs_label.
225 h2span_node_cmp(h2span_node_t *node1, h2span_node_t *node2)
229 r = strcmp(node1->fs_label, node2->fs_label);
231 r = uuid_compare(&node1->pfs_fsid, &node2->pfs_fsid, NULL);
236 * Sort/subsort must match h2span_relay_cmp() under any given node
237 * to make the aggregation algorithm easier, so the best links are
238 * in the same sorted order as the best relays.
240 * NOTE: We cannot use link*->state->msgid because this msgid is created
241 * by each remote host and thus might wind up being the same.
245 h2span_link_cmp(h2span_link_t *link1, h2span_link_t *link2)
247 if (link1->lnk_span.dist < link2->lnk_span.dist)
249 if (link1->lnk_span.dist > link2->lnk_span.dist)
251 if (link1->lnk_span.rnss < link2->lnk_span.rnss)
253 if (link1->lnk_span.rnss > link2->lnk_span.rnss)
256 if ((uintptr_t)link1->state < (uintptr_t)link2->state)
258 if ((uintptr_t)link1->state > (uintptr_t)link2->state)
261 if (link1->state->msgid < link2->state->msgid)
263 if (link1->state->msgid > link2->state->msgid)
270 * Relay entries are sorted by node, subsorted by distance and link
271 * address (so we can match up the conn->tree relay topology with
272 * a node's link topology).
276 h2span_relay_cmp(h2span_relay_t *relay1, h2span_relay_t *relay2)
278 h2span_link_t *link1 = relay1->source_rt->any.link;
279 h2span_link_t *link2 = relay2->source_rt->any.link;
281 if ((intptr_t)link1->node < (intptr_t)link2->node)
283 if ((intptr_t)link1->node > (intptr_t)link2->node)
285 if (link1->lnk_span.dist < link2->lnk_span.dist)
287 if (link1->lnk_span.dist > link2->lnk_span.dist)
289 if (link1->lnk_span.rnss < link2->lnk_span.rnss)
291 if (link1->lnk_span.rnss > link2->lnk_span.rnss)
294 if ((uintptr_t)link1->state < (uintptr_t)link2->state)
296 if ((uintptr_t)link1->state > (uintptr_t)link2->state)
299 if (link1->state->msgid < link2->state->msgid)
301 if (link1->state->msgid > link2->state->msgid)
307 RB_PROTOTYPE_STATIC(h2span_cluster_tree, h2span_cluster,
308 rbnode, h2span_cluster_cmp);
309 RB_PROTOTYPE_STATIC(h2span_node_tree, h2span_node,
310 rbnode, h2span_node_cmp);
311 RB_PROTOTYPE_STATIC(h2span_link_tree, h2span_link,
312 rbnode, h2span_link_cmp);
313 RB_PROTOTYPE_STATIC(h2span_relay_tree, h2span_relay,
314 rbnode, h2span_relay_cmp);
316 RB_GENERATE_STATIC(h2span_cluster_tree, h2span_cluster,
317 rbnode, h2span_cluster_cmp);
318 RB_GENERATE_STATIC(h2span_node_tree, h2span_node,
319 rbnode, h2span_node_cmp);
320 RB_GENERATE_STATIC(h2span_link_tree, h2span_link,
321 rbnode, h2span_link_cmp);
322 RB_GENERATE_STATIC(h2span_relay_tree, h2span_relay,
323 rbnode, h2span_relay_cmp);
326 * Global mutex protects cluster_tree lookups, connq, mediaq.
328 static pthread_mutex_t cluster_mtx;
329 static struct h2span_cluster_tree cluster_tree = RB_INITIALIZER(cluster_tree);
330 static struct h2span_conn_queue connq = TAILQ_HEAD_INITIALIZER(connq);
331 static struct dmsg_media_queue mediaq = TAILQ_HEAD_INITIALIZER(mediaq);
333 static void dmsg_lnk_span(dmsg_msg_t *msg);
334 static void dmsg_lnk_conn(dmsg_msg_t *msg);
335 static void dmsg_lnk_relay(dmsg_msg_t *msg);
336 static void dmsg_relay_scan(h2span_conn_t *conn, h2span_node_t *node);
337 static void dmsg_relay_delete(h2span_relay_t *relay);
340 dmsg_msg_lnk_signal(dmsg_iocom_t *iocom __unused)
342 pthread_mutex_lock(&cluster_mtx);
343 dmsg_relay_scan(NULL, NULL);
344 pthread_mutex_unlock(&cluster_mtx);
348 * DMSG_PROTO_LNK - Generic DMSG_PROTO_LNK.
349 * (incoming iocom lock not held)
351 * This function is typically called for one-way and opening-transactions
352 * since state->func is assigned after that, but it will also be called
353 * if no state->func is assigned on transaction-open.
356 dmsg_msg_lnk(dmsg_msg_t *msg)
358 dmsg_iocom_t *iocom = msg->state->iocom;
360 switch(msg->tcmd & DMSGF_BASECMDMASK) {
368 iocom->usrmsg_callback(msg, 1);
369 /* state invalid after reply */
375 * LNK_CONN - iocom identify message reception.
376 * (incoming iocom lock not held)
378 * Remote node identifies itself to us, sets up a SPAN filter, and gives us
379 * the ok to start transmitting SPANs.
382 dmsg_lnk_conn(dmsg_msg_t *msg)
384 dmsg_state_t *state = msg->state;
385 dmsg_iocom_t *iocom = state->iocom;
388 h2span_relay_t *relay;
391 pthread_mutex_lock(&cluster_mtx);
394 "dmsg_lnk_conn: msg %p cmd %08x state %p "
395 "txcmd %08x rxcmd %08x\n",
396 msg, msg->any.head.cmd, state,
397 state->txcmd, state->rxcmd);
399 switch(msg->any.head.cmd & DMSGF_TRANSMASK) {
400 case DMSG_LNK_CONN | DMSGF_CREATE:
401 case DMSG_LNK_CONN | DMSGF_CREATE | DMSGF_DELETE:
403 * On transaction start we allocate a new h2span_conn and
404 * acknowledge the request, leaving the transaction open.
405 * We then relay priority-selected SPANs.
407 fprintf(stderr, "LNK_CONN(%08x): %s/%s/%s\n",
408 (uint32_t)msg->any.head.msgid,
409 dmsg_uuid_to_str(&msg->any.lnk_conn.pfs_clid,
411 msg->any.lnk_conn.cl_label,
412 msg->any.lnk_conn.fs_label);
415 conn = dmsg_alloc(sizeof(*conn));
416 assert(state->iocom->conn == NULL);
418 RB_INIT(&conn->tree);
419 state->iocom->conn = conn; /* XXX only one */
420 state->iocom->conn_msgid = state->msgid;
422 state->func = dmsg_lnk_conn;
423 state->any.conn = conn;
424 TAILQ_INSERT_TAIL(&connq, conn, entry);
425 conn->lnk_conn = msg->any.lnk_conn;
430 TAILQ_FOREACH(media, &mediaq, entry) {
431 if (uuid_compare(&msg->any.lnk_conn.mediaid,
432 &media->mediaid, NULL) == 0) {
437 media = dmsg_alloc(sizeof(*media));
438 media->mediaid = msg->any.lnk_conn.mediaid;
439 TAILQ_INSERT_TAIL(&mediaq, media, entry);
441 state->media = media;
444 if ((msg->any.head.cmd & DMSGF_DELETE) == 0) {
445 iocom->usrmsg_callback(msg, 0);
446 dmsg_msg_result(msg, 0);
447 dmsg_iocom_signal(iocom);
451 case DMSG_LNK_CONN | DMSGF_DELETE:
452 case DMSG_LNK_ERROR | DMSGF_DELETE:
454 * On transaction terminate we clean out our h2span_conn
455 * and acknowledge the request, closing the transaction.
457 fprintf(stderr, "LNK_CONN: Terminated\n");
458 conn = state->any.conn;
464 * Callback will clean out media config / user-opaque state
466 media = state->media;
468 if (media->refs == 0) {
469 fprintf(stderr, "Media shutdown\n");
470 TAILQ_REMOVE(&mediaq, media, entry);
471 pthread_mutex_unlock(&cluster_mtx);
472 iocom->usrmsg_callback(msg, 0);
473 pthread_mutex_lock(&cluster_mtx);
479 * Clean out all relays. This requires terminating each
482 while ((relay = RB_ROOT(&conn->tree)) != NULL) {
483 dmsg_relay_delete(relay);
490 msg->state->any.conn = NULL;
491 msg->state->iocom->conn = NULL;
492 TAILQ_REMOVE(&connq, conn, entry);
495 dmsg_msg_reply(msg, 0);
496 /* state invalid after reply */
499 iocom->usrmsg_callback(msg, 1);
501 if (msg->any.head.cmd & DMSGF_DELETE)
503 dmsg_msg_reply(msg, DMSG_ERR_NOSUPP);
507 pthread_mutex_unlock(&cluster_mtx);
511 * LNK_SPAN - Spanning tree protocol message reception
512 * (incoming iocom lock not held)
514 * Receive a spanning tree transactional message, creating or destroying
515 * a SPAN and propagating it to other iocoms.
518 dmsg_lnk_span(dmsg_msg_t *msg)
520 dmsg_state_t *state = msg->state;
521 dmsg_iocom_t *iocom = state->iocom;
522 h2span_cluster_t dummy_cls;
523 h2span_node_t dummy_node;
524 h2span_cluster_t *cls;
526 h2span_link_t *slink;
527 h2span_relay_t *relay;
531 * Ignore reply to LNK_SPAN. The reply is expected and will commands
532 * to flow in both directions on the open transaction. This will also
533 * ignore DMSGF_REPLY|DMSGF_DELETE messages. Since we take no action
534 * if the other end unexpectedly closes their side of the transaction,
535 * we can ignore that too.
537 if (msg->any.head.cmd & DMSGF_REPLY) {
538 printf("Ignore reply to LNK_SPAN\n");
542 pthread_mutex_lock(&cluster_mtx);
545 * On transaction start we initialize the tracking infrastructure
547 if (msg->any.head.cmd & DMSGF_CREATE) {
548 assert(state->func == NULL);
549 state->func = dmsg_lnk_span;
551 dmsg_termstr(msg->any.lnk_span.cl_label);
552 dmsg_termstr(msg->any.lnk_span.fs_label);
557 dummy_cls.pfs_clid = msg->any.lnk_span.pfs_clid;
558 dummy_cls.peer_type = msg->any.lnk_span.peer_type;
559 bcopy(msg->any.lnk_span.cl_label,
561 sizeof(dummy_cls.cl_label));
562 cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls);
564 cls = dmsg_alloc(sizeof(*cls));
565 cls->pfs_clid = msg->any.lnk_span.pfs_clid;
566 cls->peer_type = msg->any.lnk_span.peer_type;
567 bcopy(msg->any.lnk_span.cl_label,
569 sizeof(cls->cl_label));
571 RB_INSERT(h2span_cluster_tree, &cluster_tree, cls);
577 dummy_node.pfs_fsid = msg->any.lnk_span.pfs_fsid;
578 bcopy(msg->any.lnk_span.fs_label, dummy_node.fs_label,
579 sizeof(dummy_node.fs_label));
580 node = RB_FIND(h2span_node_tree, &cls->tree, &dummy_node);
582 node = dmsg_alloc(sizeof(*node));
583 node->pfs_fsid = msg->any.lnk_span.pfs_fsid;
584 node->pfs_type = msg->any.lnk_span.pfs_type;
585 bcopy(msg->any.lnk_span.fs_label,
587 sizeof(node->fs_label));
589 RB_INIT(&node->tree);
590 RB_INSERT(h2span_node_tree, &cls->tree, node);
596 * NOTE: Sub-transactions on the incoming SPAN can be used
597 * to talk to the originator. We should not set-up
598 * state->relay for incoming SPANs since our sub-trans
599 * is running on the same interface (i.e. no actual
600 * relaying need be done).
602 * NOTE: Later on when we relay the SPAN out the outgoing
603 * SPAN state will be set up to relay back to this
606 * NOTE: It is possible for SPAN targets to send one-way
607 * messages to the originator but it is not possible
608 * for the originator to (currently) broadcast one-way
609 * messages to all of its SPAN targets. The protocol
610 * allows such a feature to be added in the future.
612 assert(state->any.link == NULL);
613 slink = dmsg_alloc(sizeof(*slink));
614 TAILQ_INIT(&slink->relayq);
616 slink->state = state;
617 state->any.link = slink;
618 slink->lnk_span = msg->any.lnk_span;
620 RB_INSERT(h2span_link_tree, &node->tree, slink);
623 "LNK_SPAN(thr %p): %p %s cl=%s fs=%s dist=%d\n",
626 dmsg_uuid_to_str(&msg->any.lnk_span.pfs_clid, &alloc),
627 msg->any.lnk_span.cl_label,
628 msg->any.lnk_span.fs_label,
629 msg->any.lnk_span.dist);
632 dmsg_relay_scan(NULL, node);
635 * Ack the open, which will issue a CREATE on our side, and
636 * leave the transaction open. Necessary to allow the
637 * transaction to be used as a virtual circuit.
639 dmsg_state_result(state, 0);
640 dmsg_iocom_signal(iocom);
644 * On transaction terminate we remove the tracking infrastructure.
646 if (msg->any.head.cmd & DMSGF_DELETE) {
647 slink = state->any.link;
648 assert(slink != NULL);
652 fprintf(stderr, "LNK_DELE(thr %p): %p %s cl=%s fs=%s\n",
655 dmsg_uuid_to_str(&cls->pfs_clid, &alloc),
661 * Clean out all relays. This requires terminating each
664 while ((relay = TAILQ_FIRST(&slink->relayq)) != NULL) {
665 dmsg_relay_delete(relay);
669 * Clean out the topology
671 RB_REMOVE(h2span_link_tree, &node->tree, slink);
672 if (RB_EMPTY(&node->tree)) {
673 RB_REMOVE(h2span_node_tree, &cls->tree, node);
674 if (RB_EMPTY(&cls->tree) && cls->refs == 0) {
675 RB_REMOVE(h2span_cluster_tree,
683 state->any.link = NULL;
689 * We have to terminate the transaction
691 dmsg_state_reply(state, 0);
692 /* state invalid after reply */
695 * If the node still exists issue any required updates. If
696 * it doesn't then all related relays have already been
697 * removed and there's nothing left to do.
701 dmsg_relay_scan(NULL, node);
704 dmsg_iocom_signal(iocom);
707 pthread_mutex_unlock(&cluster_mtx);
711 * Update relay transactions for SPANs.
713 * Called with cluster_mtx held.
715 static void dmsg_relay_scan_specific(h2span_node_t *node,
716 h2span_conn_t *conn);
719 dmsg_relay_scan(h2span_conn_t *conn, h2span_node_t *node)
721 h2span_cluster_t *cls;
725 * Iterate specific node
727 TAILQ_FOREACH(conn, &connq, entry)
728 dmsg_relay_scan_specific(node, conn);
733 * Iterate cluster ids, nodes, and either a specific connection
734 * or all connections.
736 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
740 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
742 * Synchronize the node's link (received SPANs)
743 * with each connection's relays.
746 dmsg_relay_scan_specific(node, conn);
748 TAILQ_FOREACH(conn, &connq, entry) {
749 dmsg_relay_scan_specific(node,
752 assert(conn == NULL);
760 * Update the relay'd SPANs for this (node, conn).
762 * Iterate links and adjust relays to match. We only propagate the top link
763 * for now (XXX we want to propagate the top two).
765 * The dmsg_relay_scan_cmp() function locates the first relay element
766 * for any given node. The relay elements will be sub-sorted by dist.
768 struct relay_scan_info {
770 h2span_relay_t *relay;
774 dmsg_relay_scan_cmp(h2span_relay_t *relay, void *arg)
776 struct relay_scan_info *info = arg;
778 if ((intptr_t)relay->source_rt->any.link->node < (intptr_t)info->node)
780 if ((intptr_t)relay->source_rt->any.link->node > (intptr_t)info->node)
786 dmsg_relay_scan_callback(h2span_relay_t *relay, void *arg)
788 struct relay_scan_info *info = arg;
795 dmsg_relay_scan_specific(h2span_node_t *node, h2span_conn_t *conn)
797 struct relay_scan_info info;
798 h2span_relay_t *relay;
799 h2span_relay_t *next_relay;
800 h2span_link_t *slink;
801 dmsg_lnk_conn_t *lconn;
802 dmsg_lnk_span_t *lspan;
805 #ifdef REQUIRE_SYMMETRICAL
806 uint32_t lastdist = DMSG_SPAN_MAXDIST;
807 uint32_t lastrnss = 0;
814 * Locate the first related relay for the node on this connection.
815 * relay will be NULL if there were none.
817 RB_SCAN(h2span_relay_tree, &conn->tree,
818 dmsg_relay_scan_cmp, dmsg_relay_scan_callback, &info);
822 assert(relay->source_rt->any.link->node == node);
824 if (DMsgDebugOpt > 8)
825 fprintf(stderr, "relay scan for connection %p\n", conn);
828 * Iterate the node's links (received SPANs) in distance order,
829 * lowest (best) dist first.
831 * PROPAGATE THE BEST LINKS OVER THE SPECIFIED CONNECTION.
833 * Track relays while iterating the best links and construct
834 * missing relays when necessary.
836 * (If some prior better link was removed it would have also
837 * removed the relay, so the relay can only match exactly or
841 RB_FOREACH(slink, h2span_link_tree, &node->tree) {
843 * Increment count of successful relays. This isn't
844 * quite accurate if we break out but nothing after
845 * the loop uses (count).
847 * If count exceeds the maximum number of relays we desire
848 * we normally want to break out. However, in order to
849 * guarantee a symmetric path we have to continue if both
850 * (dist) and (rnss) continue to match. Otherwise the SPAN
851 * propagation in the reverse direction may choose different
852 * routes and we will not have a symmetric path.
854 * NOTE: Spanning tree does not have to be symmetrical so
855 * this code is not currently enabled.
857 if (++count >= maxcount) {
858 #ifdef REQUIRE_SYMMETRICAL
859 if (lastdist != slink->lnk_span.dist ||
860 lastrnss != slink->lnk_span.rnss) {
866 /* go beyond the nominal maximum desired relays */
870 * Match, relay already in-place, get the next
871 * relay to match against the next slink.
873 if (relay && relay->source_rt->any.link == slink) {
874 relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
879 * We might want this SLINK, if it passes our filters.
881 * The spanning tree can cause closed loops so we have
882 * to limit slink->dist.
884 if (slink->lnk_span.dist > DMSG_SPAN_MAXDIST)
888 * Don't bother transmitting a LNK_SPAN out the same
889 * connection it came in on. Trivial optimization.
891 if (slink->state->iocom == conn->state->iocom)
895 * NOTE ON FILTERS: The protocol spec allows non-requested
896 * SPANs to be transmitted, the other end is expected to
897 * leave their transactions open but otherwise ignore them.
899 * Don't bother transmitting if the remote connection
900 * is not accepting this SPAN's peer_type.
902 * pfs_mask is typically used so pure clients can filter
903 * out receiving SPANs for other pure clients.
905 lspan = &slink->lnk_span;
906 lconn = &conn->lnk_conn;
907 if (((1LLU << lspan->peer_type) & lconn->peer_mask) == 0)
909 if (((1LLU << lspan->pfs_type) & lconn->pfs_mask) == 0)
913 * Do not give pure clients visibility to other pure clients
915 if (lconn->pfs_type == DMSG_PFSTYPE_CLIENT &&
916 lspan->pfs_type == DMSG_PFSTYPE_CLIENT) {
921 * Connection filter, if cluster uuid is not NULL it must
922 * match the span cluster uuid. Only applies when the
925 if (lspan->peer_type == lconn->peer_type &&
926 !uuid_is_nil(&lconn->pfs_clid, NULL) &&
927 uuid_compare(&slink->node->cls->pfs_clid,
928 &lconn->pfs_clid, NULL)) {
933 * Connection filter, if cluster label is not empty it must
934 * match the span cluster label. Only applies when the
937 if (lspan->peer_type == lconn->peer_type &&
938 lconn->cl_label[0] &&
939 strcmp(lconn->cl_label, slink->node->cls->cl_label)) {
944 * NOTE! pfs_fsid differentiates nodes within the same cluster
945 * so we obviously don't want to match those. Similarly
950 * Ok, we've accepted this SPAN for relaying.
952 assert(relay == NULL ||
953 relay->source_rt->any.link->node != slink->node ||
954 relay->source_rt->any.link->lnk_span.dist >=
955 slink->lnk_span.dist);
956 relay = dmsg_generate_relay(conn, slink);
957 #ifdef REQUIRE_SYMMETRICAL
958 lastdist = slink->lnk_span.dist;
959 lastrnss = slink->lnk_span.rnss;
963 * Match (created new relay), get the next relay to
964 * match against the next slink.
966 relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
970 * Any remaining relay's belonging to this connection which match
971 * the node are in excess of the current aggregate spanning state
972 * and should be removed.
974 while (relay && relay->source_rt->any.link->node == node) {
975 next_relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
976 fprintf(stderr, "RELAY DELETE FROM EXTRAS\n");
977 dmsg_relay_delete(relay);
983 * Find the slink associated with the msgid and return its state,
984 * so the caller can issue a transaction.
987 dmsg_findspan(const char *label)
990 h2span_cluster_t *cls;
992 h2span_link_t *slink;
993 uint64_t msgid = strtoull(label, NULL, 16);
995 pthread_mutex_lock(&cluster_mtx);
998 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
999 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
1000 RB_FOREACH(slink, h2span_link_tree, &node->tree) {
1001 if (slink->state->msgid == msgid) {
1002 state = slink->state;
1009 pthread_mutex_unlock(&cluster_mtx);
1011 fprintf(stderr, "findspan: %p\n", state);
1018 * Helper function to generate missing relay on target connection.
1020 * cluster_mtx must be held
1024 dmsg_generate_relay(h2span_conn_t *conn, h2span_link_t *slink)
1026 h2span_relay_t *relay;
1029 relay = dmsg_alloc(sizeof(*relay));
1031 relay->source_rt = slink->state;
1032 /* relay->source_rt->any.link = slink; */
1035 * NOTE: relay->target_rt->any.relay set to relay by alloc.
1037 * NOTE: LNK_SPAN is transmitted as a top-level transaction.
1039 msg = dmsg_msg_alloc(&conn->state->iocom->state0,
1040 0, DMSG_LNK_SPAN | DMSGF_CREATE,
1041 dmsg_lnk_relay, relay);
1042 relay->target_rt = msg->state;
1044 msg->any.lnk_span = slink->lnk_span;
1045 msg->any.lnk_span.dist = slink->lnk_span.dist + 1;
1046 msg->any.lnk_span.rnss = slink->lnk_span.rnss + dmsg_rnss();
1048 RB_INSERT(h2span_relay_tree, &conn->tree, relay);
1049 TAILQ_INSERT_TAIL(&slink->relayq, relay, entry);
1052 * Seed the relay so new sub-transactions received on the outgoing
1053 * SPAN circuit are relayed back to the originator.
1055 msg->state->relay = relay->source_rt;
1056 dmsg_state_hold(msg->state->relay);
1058 dmsg_msg_write(msg);
1064 * Messages received on relay SPANs. These are open transactions so it is
1065 * in fact possible for the other end to close the transaction.
1067 * XXX MPRACE on state structure
1070 dmsg_lnk_relay(dmsg_msg_t *msg)
1072 dmsg_state_t *state = msg->state;
1073 h2span_relay_t *relay;
1075 assert(msg->any.head.cmd & DMSGF_REPLY);
1077 if (msg->any.head.cmd & DMSGF_DELETE) {
1078 pthread_mutex_lock(&cluster_mtx);
1079 fprintf(stderr, "RELAY DELETE FROM LNK_RELAY MSG\n");
1080 if ((relay = state->any.relay) != NULL) {
1081 dmsg_relay_delete(relay);
1083 dmsg_state_reply(state, 0);
1085 pthread_mutex_unlock(&cluster_mtx);
1090 * cluster_mtx held by caller
1094 dmsg_relay_delete(h2span_relay_t *relay)
1097 "RELAY DELETE %p RELAY %p ON CLS=%p NODE=%p "
1098 "DIST=%d FD %d STATE %p\n",
1099 relay->source_rt->any.link,
1101 relay->source_rt->any.link->node->cls,
1102 relay->source_rt->any.link->node,
1103 relay->source_rt->any.link->lnk_span.dist,
1104 relay->conn->state->iocom->sock_fd,
1107 RB_REMOVE(h2span_relay_tree, &relay->conn->tree, relay);
1108 TAILQ_REMOVE(&relay->source_rt->any.link->relayq, relay, entry);
1110 if (relay->target_rt) {
1111 relay->target_rt->any.relay = NULL;
1112 dmsg_state_reply(relay->target_rt, 0);
1113 /* state invalid after reply */
1114 relay->target_rt = NULL;
1118 * NOTE: relay->source_rt->refs is held by the relay SPAN
1119 * state, not by this relay structure.
1122 relay->source_rt = NULL;
1126 /************************************************************************
1127 * ROUTER AND MESSAGING HANDLES *
1128 ************************************************************************
1130 * Basically the idea here is to provide a stable data structure which
1131 * can be localized to the caller for higher level protocols to work with.
1132 * Depends on the context, these dmsg_handle's can be pooled by use-case
1133 * and remain persistent through a client (or mount point's) life.
1138 * Obtain a stable handle on a cluster given its uuid. This ties directly
1139 * into the global cluster topology, creating the structure if necessary
1140 * (even if the uuid does not exist or does not exist yet), and preventing
1141 * the structure from getting ripped out from under us while we hold a
1145 dmsg_cluster_get(uuid_t *pfs_clid)
1147 h2span_cluster_t dummy_cls;
1148 h2span_cluster_t *cls;
1150 dummy_cls.pfs_clid = *pfs_clid;
1151 pthread_mutex_lock(&cluster_mtx);
1152 cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls);
1155 pthread_mutex_unlock(&cluster_mtx);
1160 dmsg_cluster_put(h2span_cluster_t *cls)
1162 pthread_mutex_lock(&cluster_mtx);
1163 assert(cls->refs > 0);
1165 if (RB_EMPTY(&cls->tree) && cls->refs == 0) {
1166 RB_REMOVE(h2span_cluster_tree,
1167 &cluster_tree, cls);
1170 pthread_mutex_unlock(&cluster_mtx);
1174 * Obtain a stable handle to a specific cluster node given its uuid.
1175 * This handle does NOT lock in the route to the node and is typically
1176 * used as part of the dmsg_handle_*() API to obtain a set of
1180 dmsg_node_get(h2span_cluster_t *cls, uuid_t *pfs_fsid)
1187 * Dumps the spanning tree
1192 dmsg_shell_tree(dmsg_iocom_t *iocom, char *cmdbuf __unused)
1194 h2span_cluster_t *cls;
1195 h2span_node_t *node;
1196 h2span_link_t *slink;
1197 h2span_relay_t *relay;
1200 pthread_mutex_lock(&cluster_mtx);
1201 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
1202 dmsg_printf(iocom, "Cluster %s %s (%s)\n",
1203 dmsg_peer_type_to_str(cls->peer_type),
1204 dmsg_uuid_to_str(&cls->pfs_clid, &uustr),
1206 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
1207 dmsg_printf(iocom, " Node %02x %s (%s)\n",
1209 dmsg_uuid_to_str(&node->pfs_fsid, &uustr),
1211 RB_FOREACH(slink, h2span_link_tree, &node->tree) {
1213 "\tSLink msgid %016jx "
1215 (intmax_t)slink->state->msgid,
1216 slink->lnk_span.dist,
1217 slink->state->iocom->sock_fd);
1218 TAILQ_FOREACH(relay, &slink->relayq, entry) {
1220 "\t Relay-out msgid %016jx "
1222 (intmax_t)relay->target_rt->msgid,
1223 relay->target_rt->iocom->sock_fd);
1228 pthread_mutex_unlock(&cluster_mtx);
1232 TAILQ_FOREACH(conn, &connq, entry) {
1240 * Locate the state representing an incoming LNK_SPAN given its msgid.
1243 dmsg_debug_findspan(uint64_t msgid, dmsg_state_t **statep)
1245 h2span_cluster_t *cls;
1246 h2span_node_t *node;
1247 h2span_link_t *slink;
1249 pthread_mutex_lock(&cluster_mtx);
1250 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
1251 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
1252 RB_FOREACH(slink, h2span_link_tree, &node->tree) {
1253 if (slink->state->msgid == msgid) {
1254 *statep = slink->state;
1260 pthread_mutex_unlock(&cluster_mtx);
1264 pthread_mutex_unlock(&cluster_mtx);
1269 * Random number sub-sort value to add to SPAN rnss fields on relay.
1270 * This allows us to differentiate spans with the same <dist> field
1271 * for relaying purposes. We must normally limit the number of relays
1272 * for any given SPAN origination but we must also guarantee that a
1273 * symmetric reverse path exists, so we use the rnss field as a sub-sort
1274 * (since there can be thousands or millions if we only match on <dist>),
1275 * and if there STILL too many spans we go past the limit.
1281 if (DMsgRNSS == 0) {
1282 pthread_mutex_lock(&cluster_mtx);
1283 while (DMsgRNSS == 0) {
1285 DMsgRNSS = random();
1287 pthread_mutex_unlock(&cluster_mtx);