2 * Copyright (c) 2012 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * LNK_SPAN PROTOCOL SUPPORT FUNCTIONS
37 * This code supports the LNK_SPAN protocol. Essentially all PFS's
38 * clients and services rendezvous with the userland hammer2 service and
39 * open LNK_SPAN transactions using a message header linkid of 0,
40 * registering any PFS's they have connectivity to with us.
44 * Each registration maintains its own open LNK_SPAN message transaction.
45 * The SPANs are collected, aggregated, and retransmitted over available
46 * connections through the maintainance of additional LNK_SPAN message
47 * transactions on each link.
49 * The msgid for each active LNK_SPAN transaction we receive allows us to
50 * send a message to the target PFS (which might be one of many belonging
51 * to the same cluster), by specifying that msgid as the linkid in any
52 * message we send to the target PFS.
54 * Similarly the msgid we allocate for any LNK_SPAN transaction we transmit
55 * (and remember we will maintain multiple open LNK_SPAN transactions on
56 * each connection representing the topology span, so every node sees every
57 * other node as a separate open transaction). So, similarly the msgid for
58 * these active transactions which we initiated can be used by the other
59 * end to route messages through us to another node, ultimately winding up
60 * at the identified hammer2 PFS. We have to adjust the spanid in the message
61 * header at each hop to be representative of the outgoing LNK_SPAN we
62 * are forwarding the message through.
66 * If we were to retransmit every LNK_SPAN transaction we receive it would
67 * create a huge mess, so we have to aggregate all received LNK_SPAN
68 * transactions, sort them by the fsid (the cluster) and sub-sort them by
69 * the pfs_fsid (individual nodes in the cluster), and only retransmit
70 * (create outgoing transactions) for a subset of the nearest distance-hops
71 * for each individual node.
73 * The higher level protocols can then issue transactions to the nodes making
74 * up a cluster to perform all actions required.
78 * Since this is a large topology and a spanning tree protocol, links can
79 * go up and down all the time. Any time a link goes down its transaction
80 * is closed. The transaction has to be closed on both ends before we can
81 * delete (and potentially reuse) the related spanid. The LNK_SPAN being
82 * closed may have been propagated out to other connections and those related
83 * LNK_SPANs are also closed. Ultimately all routes via the lost LNK_SPAN
84 * go away, ultimately reaching all sources and all targets.
86 * Any messages in-transit using a route that goes away will be thrown away.
87 * Open transactions are only tracked at the two end-points. When a link
88 * failure propagates to an end-point the related open transactions lose
89 * their spanid and are automatically aborted.
91 * It is important to note that internal route nodes cannot just associate
92 * a lost LNK_SPAN transaction with another route to the same destination.
93 * Message transactions MUST be serialized and MUST be ordered. All messages
94 * for a transaction must run over the same route. So if the route used by
95 * an active transaction is lost, the related messages will be fully aborted
96 * and the higher protocol levels will retry as appropriate.
98 * FULLY ABORTING A ROUTED MESSAGE is handled via link-failure propagation
99 * back to the originator. Only the originator keeps tracks of a message.
100 * Routers just pass it through. If a route is lost during transit the
101 * message is simply thrown away.
103 * It is also important to note that several paths to the same PFS can be
104 * propagated along the same link, which allows concurrency and even
105 * redundancy over several network interfaces or via different routes through
106 * the topology. Any given transaction will use only a single route but busy
107 * servers will often have hundreds of transactions active simultaniously,
108 * so having multiple active paths through the network topology for A<->B
109 * will improve performance.
113 * Most protocols consolidate operations rather than simply relaying them.
114 * This is particularly true of LEAF protocols (such as strict HAMMER2
115 * clients), of which there can be millions connecting into the cluster at
116 * various points. The SPAN protocol is not used for these LEAF elements.
118 * Instead the primary service they connect to implements a proxy for the
119 * client protocols so the core topology only has to propagate a couple of
120 * LNK_SPANs and not millions. LNK_SPANs are meant to be used only for
121 * core master nodes and satellite slaves and cache nodes.
127 * Maximum spanning tree distance. This has the practical effect of
128 * stopping tail-chasing closed loops when a feeder span is lost.
130 #define HAMMER2_SPAN_MAXDIST 16
133 * RED-BLACK TREE DEFINITIONS
137 * (1) shared fsid's (a cluster).
138 * (2) unique fsid's (a node in a cluster) <--- LNK_SPAN transactions.
140 * We need to aggegate all active LNK_SPANs, aggregate, and create our own
141 * outgoing LNK_SPAN transactions on each of our connections representing
142 * the aggregated state.
144 * h2span_connect - list of iocom connections who wish to receive SPAN
145 * propagation from other connections. Might contain
146 * a filter string. Only iocom's with an open
147 * LNK_CONN transactions are applicable for SPAN
150 * h2span_relay - List of links relayed (via SPAN). Essentially
151 * each relay structure represents a LNK_SPAN
152 * transaction that we initiated, verses h2span_link
153 * which is a LNK_SPAN transaction that we received.
157 * h2span_cluster - Organizes the shared fsid's. One structure for
160 * h2span_node - Organizes the nodes in a cluster. One structure
161 * for each unique {cluster,node}, aka {fsid, pfs_fsid}.
163 * h2span_link - Organizes all incoming and outgoing LNK_SPAN message
164 * transactions related to a node.
166 * One h2span_link structure for each incoming LNK_SPAN
167 * transaction. Links selected for propagation back
168 * out are also where the outgoing LNK_SPAN messages
169 * are indexed into (so we can propagate changes).
171 * The h2span_link's use a red-black tree to sort the
172 * distance hop metric for the incoming LNK_SPAN. We
173 * then select the top N for outgoing. When the
174 * topology changes the top N may also change and cause
175 * new outgoing LNK_SPAN transactions to be opened
176 * and less desireable ones to be closed, causing
177 * transactional aborts within the message flow in
180 * Also note - All outgoing LNK_SPAN message transactions are also
181 * entered into a red-black tree for use by the routing
182 * function. This is handled by msg.c in the state
188 TAILQ_HEAD(h2span_connect_queue, h2span_connect);
189 TAILQ_HEAD(h2span_relay_queue, h2span_relay);
191 RB_HEAD(h2span_cluster_tree, h2span_cluster);
192 RB_HEAD(h2span_node_tree, h2span_node);
193 RB_HEAD(h2span_link_tree, h2span_link);
194 RB_HEAD(h2span_relay_tree, h2span_relay);
197 * Received LNK_CONN transaction enables SPAN protocol over connection.
198 * (may contain filter).
200 struct h2span_connect {
201 TAILQ_ENTRY(h2span_connect) entry;
202 struct h2span_relay_tree tree;
203 hammer2_state_t *state;
207 * All received LNK_SPANs are organized by cluster (pfs_clid),
208 * node (pfs_fsid), and link (received LNK_SPAN transaction).
210 struct h2span_cluster {
211 RB_ENTRY(h2span_cluster) rbnode;
212 struct h2span_node_tree tree;
213 uuid_t pfs_clid; /* shared fsid */
217 RB_ENTRY(h2span_node) rbnode;
218 struct h2span_link_tree tree;
219 struct h2span_cluster *cls;
220 uuid_t pfs_fsid; /* unique fsid */
225 RB_ENTRY(h2span_link) rbnode;
226 hammer2_state_t *state; /* state<->link */
227 struct h2span_node *node; /* related node */
229 struct h2span_relay_queue relayq; /* relay out */
230 struct hammer2_router router;
234 * Any LNK_SPAN transactions we receive which are relayed out other
235 * connections utilize this structure to track the LNK_SPAN transaction
236 * we initiate on the other connections, if selected for relay.
238 * In many respects this is the core of the protocol... actually figuring
239 * out what LNK_SPANs to relay. The spanid used for relaying is the
240 * address of the 'state' structure, which is why h2span_relay has to
241 * be entered into a RB-TREE based at h2span_connect (so we can look
242 * up the spanid to validate it).
244 struct h2span_relay {
245 RB_ENTRY(h2span_relay) rbnode; /* from h2span_connect */
246 TAILQ_ENTRY(h2span_relay) entry; /* from link */
247 struct h2span_connect *conn;
248 hammer2_state_t *state; /* transmitted LNK_SPAN */
249 struct h2span_link *link; /* received LNK_SPAN */
253 typedef struct h2span_connect h2span_connect_t;
254 typedef struct h2span_cluster h2span_cluster_t;
255 typedef struct h2span_node h2span_node_t;
256 typedef struct h2span_link h2span_link_t;
257 typedef struct h2span_relay h2span_relay_t;
261 h2span_cluster_cmp(h2span_cluster_t *cls1, h2span_cluster_t *cls2)
263 return(uuid_compare(&cls1->pfs_clid, &cls2->pfs_clid, NULL));
268 h2span_node_cmp(h2span_node_t *node1, h2span_node_t *node2)
270 return(uuid_compare(&node1->pfs_fsid, &node2->pfs_fsid, NULL));
274 * NOTE: Sort/subsort must match h2span_relay_cmp() under any given
279 h2span_link_cmp(h2span_link_t *link1, h2span_link_t *link2)
281 if (link1->dist < link2->dist)
283 if (link1->dist > link2->dist)
285 if (link1->state->msgid < link2->state->msgid)
287 if (link1->state->msgid > link2->state->msgid)
293 * Relay entries are sorted by node, subsorted by distance and link
294 * address (so we can match up the conn->tree relay topology with
295 * a node's link topology).
299 h2span_relay_cmp(h2span_relay_t *relay1, h2span_relay_t *relay2)
301 h2span_link_t *link1 = relay1->link;
302 h2span_link_t *link2 = relay2->link;
304 if ((intptr_t)link1->node < (intptr_t)link2->node)
306 if ((intptr_t)link1->node > (intptr_t)link2->node)
308 if (link1->dist < link2->dist)
310 if (link1->dist > link2->dist)
312 if (link1->state->msgid < link2->state->msgid)
314 if (link1->state->msgid > link2->state->msgid)
319 RB_PROTOTYPE_STATIC(h2span_cluster_tree, h2span_cluster,
320 rbnode, h2span_cluster_cmp);
321 RB_PROTOTYPE_STATIC(h2span_node_tree, h2span_node,
322 rbnode, h2span_node_cmp);
323 RB_PROTOTYPE_STATIC(h2span_link_tree, h2span_link,
324 rbnode, h2span_link_cmp);
325 RB_PROTOTYPE_STATIC(h2span_relay_tree, h2span_relay,
326 rbnode, h2span_relay_cmp);
328 RB_GENERATE_STATIC(h2span_cluster_tree, h2span_cluster,
329 rbnode, h2span_cluster_cmp);
330 RB_GENERATE_STATIC(h2span_node_tree, h2span_node,
331 rbnode, h2span_node_cmp);
332 RB_GENERATE_STATIC(h2span_link_tree, h2span_link,
333 rbnode, h2span_link_cmp);
334 RB_GENERATE_STATIC(h2span_relay_tree, h2span_relay,
335 rbnode, h2span_relay_cmp);
338 * Global mutex protects cluster_tree lookups.
340 static pthread_mutex_t cluster_mtx;
341 static struct h2span_cluster_tree cluster_tree = RB_INITIALIZER(cluster_tree);
342 static struct h2span_connect_queue connq = TAILQ_HEAD_INITIALIZER(connq);
344 static void hammer2_lnk_span(hammer2_msg_t *msg);
345 static void hammer2_lnk_conn(hammer2_msg_t *msg);
346 static void hammer2_lnk_relay(hammer2_msg_t *msg);
347 static void hammer2_relay_scan(h2span_connect_t *conn, h2span_node_t *node);
348 static void hammer2_relay_delete(h2span_relay_t *relay);
351 hammer2_msg_lnk_signal(hammer2_router_t *router __unused)
353 pthread_mutex_lock(&cluster_mtx);
354 hammer2_relay_scan(NULL, NULL);
355 pthread_mutex_unlock(&cluster_mtx);
359 * Receive a HAMMER2_MSG_PROTO_LNK message. This only called for
360 * one-way and opening-transactions since state->func will be assigned
361 * in all other cases.
364 hammer2_msg_lnk(hammer2_msg_t *msg)
366 switch(msg->any.head.cmd & HAMMER2_MSGF_BASECMDMASK) {
367 case HAMMER2_LNK_CONN:
368 hammer2_lnk_conn(msg);
370 case HAMMER2_LNK_SPAN:
371 hammer2_lnk_span(msg);
375 "MSG_PROTO_LNK: Unknown msg %08x\n", msg->any.head.cmd);
376 hammer2_msg_reply(msg, HAMMER2_MSG_ERR_NOSUPP);
377 /* state invalid after reply */
383 hammer2_lnk_conn(hammer2_msg_t *msg)
385 hammer2_state_t *state = msg->state;
386 h2span_connect_t *conn;
387 h2span_relay_t *relay;
390 pthread_mutex_lock(&cluster_mtx);
393 * On transaction start we allocate a new h2span_connect and
394 * acknowledge the request, leaving the transaction open.
395 * We then relay priority-selected SPANs.
397 if (msg->any.head.cmd & HAMMER2_MSGF_CREATE) {
398 state->func = hammer2_lnk_conn;
400 fprintf(stderr, "LNK_CONN(%08x): %s/%s\n",
401 (uint32_t)msg->any.head.msgid,
402 hammer2_uuid_to_str(&msg->any.lnk_conn.pfs_clid,
404 msg->any.lnk_conn.label);
407 conn = hammer2_alloc(sizeof(*conn));
409 RB_INIT(&conn->tree);
411 state->any.conn = conn;
412 TAILQ_INSERT_TAIL(&connq, conn, entry);
414 hammer2_msg_result(msg, 0);
418 * Span-synchronize all nodes with the new connection
420 hammer2_relay_scan(conn, NULL);
422 hammer2_router_signal(msg->router);
426 * On transaction terminate we clean out our h2span_connect
427 * and acknowledge the request, closing the transaction.
429 if (msg->any.head.cmd & HAMMER2_MSGF_DELETE) {
430 fprintf(stderr, "LNK_CONN: Terminated\n");
431 conn = state->any.conn;
435 * Clean out all relays. This requires terminating each
438 while ((relay = RB_ROOT(&conn->tree)) != NULL) {
439 hammer2_relay_delete(relay);
446 msg->state->any.conn = NULL;
447 TAILQ_REMOVE(&connq, conn, entry);
450 hammer2_msg_reply(msg, 0);
451 /* state invalid after reply */
453 pthread_mutex_unlock(&cluster_mtx);
457 hammer2_lnk_span(hammer2_msg_t *msg)
459 hammer2_state_t *state = msg->state;
460 h2span_cluster_t dummy_cls;
461 h2span_node_t dummy_node;
462 h2span_cluster_t *cls;
464 h2span_link_t *slink;
465 h2span_relay_t *relay;
468 assert((msg->any.head.cmd & HAMMER2_MSGF_REPLY) == 0);
470 pthread_mutex_lock(&cluster_mtx);
473 * On transaction start we initialize the tracking infrastructure
475 if (msg->any.head.cmd & HAMMER2_MSGF_CREATE) {
476 assert(state->func == NULL);
477 state->func = hammer2_lnk_span;
479 msg->any.lnk_span.label[sizeof(msg->any.lnk_span.label)-1] = 0;
484 dummy_cls.pfs_clid = msg->any.lnk_span.pfs_clid;
485 cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls);
487 cls = hammer2_alloc(sizeof(*cls));
488 cls->pfs_clid = msg->any.lnk_span.pfs_clid;
490 RB_INSERT(h2span_cluster_tree, &cluster_tree, cls);
496 dummy_node.pfs_fsid = msg->any.lnk_span.pfs_fsid;
497 node = RB_FIND(h2span_node_tree, &cls->tree, &dummy_node);
499 node = hammer2_alloc(sizeof(*node));
500 node->pfs_fsid = msg->any.lnk_span.pfs_fsid;
502 RB_INIT(&node->tree);
503 RB_INSERT(h2span_node_tree, &cls->tree, node);
504 snprintf(node->label, sizeof(node->label),
505 "%s", msg->any.lnk_span.label);
511 assert(state->any.link == NULL);
512 slink = hammer2_alloc(sizeof(*slink));
513 TAILQ_INIT(&slink->relayq);
515 slink->dist = msg->any.lnk_span.dist;
516 slink->state = state;
517 state->any.link = slink;
520 * Embedded router structure in link for message forwarding.
522 TAILQ_INIT(&slink->router.txmsgq);
523 slink->router.iocom = state->iocom;
524 slink->router.link = slink;
526 RB_INSERT(h2span_link_tree, &node->tree, slink);
528 fprintf(stderr, "LNK_SPAN(thr %p): %p %s/%s dist=%d\n",
531 hammer2_uuid_to_str(&msg->any.lnk_span.pfs_clid,
533 msg->any.lnk_span.label,
534 msg->any.lnk_span.dist);
538 hammer2_relay_scan(NULL, node);
540 hammer2_router_signal(msg->router);
544 * On transaction terminate we remove the tracking infrastructure.
546 if (msg->any.head.cmd & HAMMER2_MSGF_DELETE) {
547 slink = state->any.link;
548 assert(slink != NULL);
552 fprintf(stderr, "LNK_DELE(thr %p): %p %s/%s dist=%d\n",
555 hammer2_uuid_to_str(&cls->pfs_clid, &alloc),
556 state->msg->any.lnk_span.label,
557 state->msg->any.lnk_span.dist);
561 * Clean out all relays. This requires terminating each
564 while ((relay = TAILQ_FIRST(&slink->relayq)) != NULL) {
565 hammer2_relay_delete(relay);
569 * Clean out the topology
571 RB_REMOVE(h2span_link_tree, &node->tree, slink);
572 if (RB_EMPTY(&node->tree)) {
573 RB_REMOVE(h2span_node_tree, &cls->tree, node);
574 if (RB_EMPTY(&cls->tree)) {
575 RB_REMOVE(h2span_cluster_tree,
583 state->any.link = NULL;
589 * We have to terminate the transaction
591 hammer2_state_reply(state, 0);
592 /* state invalid after reply */
595 * If the node still exists issue any required updates. If
596 * it doesn't then all related relays have already been
597 * removed and there's nothing left to do.
601 hammer2_relay_scan(NULL, node);
604 hammer2_router_signal(msg->router);
607 pthread_mutex_unlock(&cluster_mtx);
611 * Messages received on relay SPANs. These are open transactions so it is
612 * in fact possible for the other end to close the transaction.
614 * XXX MPRACE on state structure
617 hammer2_lnk_relay(hammer2_msg_t *msg)
619 hammer2_state_t *state = msg->state;
620 h2span_relay_t *relay;
622 assert(msg->any.head.cmd & HAMMER2_MSGF_REPLY);
624 if (msg->any.head.cmd & HAMMER2_MSGF_DELETE) {
625 pthread_mutex_lock(&cluster_mtx);
626 if ((relay = state->any.relay) != NULL) {
627 hammer2_relay_delete(relay);
629 hammer2_state_reply(state, 0);
631 pthread_mutex_unlock(&cluster_mtx);
636 * Update relay transactions for SPANs.
638 * Called with cluster_mtx held.
640 static void hammer2_relay_scan_specific(h2span_node_t *node,
641 h2span_connect_t *conn);
644 hammer2_relay_scan(h2span_connect_t *conn, h2span_node_t *node)
646 h2span_cluster_t *cls;
650 * Iterate specific node
652 TAILQ_FOREACH(conn, &connq, entry)
653 hammer2_relay_scan_specific(node, conn);
658 * Iterate cluster ids, nodes, and either a specific connection
659 * or all connections.
661 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
665 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
667 * Synchronize the node's link (received SPANs)
668 * with each connection's relays.
671 hammer2_relay_scan_specific(node, conn);
673 TAILQ_FOREACH(conn, &connq, entry) {
674 hammer2_relay_scan_specific(node,
677 assert(conn == NULL);
685 * Update the relay'd SPANs for this (node, conn).
687 * Iterate links and adjust relays to match. We only propagate the top link
688 * for now (XXX we want to propagate the top two).
690 * The hammer2_relay_scan_cmp() function locates the first relay element
691 * for any given node. The relay elements will be sub-sorted by dist.
693 struct relay_scan_info {
695 h2span_relay_t *relay;
699 hammer2_relay_scan_cmp(h2span_relay_t *relay, void *arg)
701 struct relay_scan_info *info = arg;
703 if ((intptr_t)relay->link->node < (intptr_t)info->node)
705 if ((intptr_t)relay->link->node > (intptr_t)info->node)
711 hammer2_relay_scan_callback(h2span_relay_t *relay, void *arg)
713 struct relay_scan_info *info = arg;
720 hammer2_relay_scan_specific(h2span_node_t *node, h2span_connect_t *conn)
722 struct relay_scan_info info;
723 h2span_relay_t *relay;
724 h2span_relay_t *next_relay;
725 h2span_link_t *slink;
732 * Locate the first related relay for the node on this connection.
733 * relay will be NULL if there were none.
735 RB_SCAN(h2span_relay_tree, &conn->tree,
736 hammer2_relay_scan_cmp, hammer2_relay_scan_callback, &info);
740 assert(relay->link->node == node);
743 fprintf(stderr, "relay scan for connection %p\n", conn);
746 * Iterate the node's links (received SPANs) in distance order,
747 * lowest (best) dist first.
749 /* fprintf(stderr, "LOOP\n"); */
750 RB_FOREACH(slink, h2span_link_tree, &node->tree) {
752 fprintf(stderr, "SLINK %p RELAY %p(%p)\n",
753 slink, relay, relay ? relay->link : NULL);
756 * PROPAGATE THE BEST LINKS OVER THE SPECIFIED CONNECTION.
758 * Track relays while iterating the best links and construct
759 * missing relays when necessary.
761 * (If some prior better link was removed it would have also
762 * removed the relay, so the relay can only match exactly or
765 if (relay && relay->link == slink) {
767 * Match, relay already in-place, get the next
768 * relay to match against the next slink.
770 relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
773 } else if (slink->dist > HAMMER2_SPAN_MAXDIST) {
775 * No match but span distance is too great,
776 * do not relay. This prevents endless closed
777 * loops with ever-incrementing distances when
778 * the seed span is lost in the graph.
780 * All later spans will also be too far away so
781 * we can break out of the loop.
786 * No match, distance is ok, construct a new relay.
787 * (slink is better than relay).
791 assert(relay == NULL ||
792 relay->link->node != slink->node ||
793 relay->link->dist >= slink->dist);
794 relay = hammer2_alloc(sizeof(*relay));
798 msg = hammer2_msg_alloc(&conn->state->iocom->router, 0,
801 hammer2_lnk_relay, relay);
802 relay->state = msg->state;
803 msg->any.lnk_span = slink->state->msg->any.lnk_span;
804 msg->any.lnk_span.dist = slink->dist + 1;
806 RB_INSERT(h2span_relay_tree, &conn->tree, relay);
807 TAILQ_INSERT_TAIL(&slink->relayq, relay, entry);
809 hammer2_msg_write(msg);
812 "RELAY SPAN %p RELAY %p ON CLS=%p NODE=%p DIST=%d "
816 node->cls, node, slink->dist,
817 conn->state->iocom->sock_fd, relay->state);
820 * Match (created new relay), get the next relay to
821 * match against the next slink.
823 relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
830 * Any remaining relay's belonging to this connection which match
831 * the node are in excess of the current aggregate spanning state
832 * and should be removed.
834 while (relay && relay->link->node == node) {
835 next_relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
836 hammer2_relay_delete(relay);
843 hammer2_relay_delete(h2span_relay_t *relay)
846 "RELAY DELETE %p RELAY %p ON CLS=%p NODE=%p DIST=%d FD %d STATE %p\n",
849 relay->link->node->cls, relay->link->node,
851 relay->conn->state->iocom->sock_fd, relay->state);
853 RB_REMOVE(h2span_relay_tree, &relay->conn->tree, relay);
854 TAILQ_REMOVE(&relay->link->relayq, relay, entry);
857 relay->state->any.relay = NULL;
858 hammer2_state_reply(relay->state, 0);
859 /* state invalid after reply */
867 /************************************************************************
869 ************************************************************************
871 * Provides route functions to msg.c
876 * Acquire a persistent router structure given the cluster and node ids.
877 * Messages can be transacted via this structure while held. If the route
878 * is lost messages will return failure.
881 hammer2_router_get(uuid_t *pfs_clid, uuid_t *pfs_fsid)
886 * Release previously acquired router.
889 hammer2_router_put(hammer2_router_t *router)
895 * Dumps the spanning tree
898 shell_tree(hammer2_router_t *router, char *cmdbuf __unused)
900 h2span_cluster_t *cls;
902 h2span_link_t *slink;
905 pthread_mutex_lock(&cluster_mtx);
906 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
907 router_printf(router, "Cluster %s\n",
908 hammer2_uuid_to_str(&cls->pfs_clid, &uustr));
909 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
910 router_printf(router, " Node %s (%s)\n",
911 hammer2_uuid_to_str(&node->pfs_fsid, &uustr),
913 RB_FOREACH(slink, h2span_link_tree, &node->tree) {
914 router_printf(router, "\tLink dist=%d via %d\n",
916 slink->state->iocom->sock_fd);
920 pthread_mutex_unlock(&cluster_mtx);
924 TAILQ_FOREACH(conn, &connq, entry) {