hammer2 - userland API / span work
[dragonfly.git] / sbin / hammer2 / msg_lnk.c
1 /*
2  * Copyright (c) 2012 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * LNK_SPAN PROTOCOL SUPPORT FUNCTIONS
36  *
37  * This code supports the LNK_SPAN protocol.  Essentially all PFS's
38  * clients and services rendezvous with the userland hammer2 service and
39  * open LNK_SPAN transactions using a message header linkid of 0,
40  * registering any PFS's they have connectivity to with us.
41  *
42  * --
43  *
44  * Each registration maintains its own open LNK_SPAN message transaction.
45  * The SPANs are collected, aggregated, and retransmitted over available
46  * connections through the maintainance of additional LNK_SPAN message
47  * transactions on each link.
48  *
49  * The msgid for each active LNK_SPAN transaction we receive allows us to
50  * send a message to the target PFS (which might be one of many belonging
51  * to the same cluster), by specifying that msgid as the linkid in any
52  * message we send to the target PFS.
53  *
54  * Similarly the msgid we allocate for any LNK_SPAN transaction we transmit
55  * (and remember we will maintain multiple open LNK_SPAN transactions on
56  * each connection representing the topology span, so every node sees every
57  * other node as a separate open transaction).  So, similarly the msgid for
58  * these active transactions which we initiated can be used by the other
59  * end to route messages through us to another node, ultimately winding up
60  * at the identified hammer2 PFS.  We have to adjust the spanid in the message
61  * header at each hop to be representative of the outgoing LNK_SPAN we
62  * are forwarding the message through.
63  *
64  * --
65  *
66  * If we were to retransmit every LNK_SPAN transaction we receive it would
67  * create a huge mess, so we have to aggregate all received LNK_SPAN
68  * transactions, sort them by the fsid (the cluster) and sub-sort them by
69  * the pfs_fsid (individual nodes in the cluster), and only retransmit
70  * (create outgoing transactions) for a subset of the nearest distance-hops
71  * for each individual node.
72  *
73  * The higher level protocols can then issue transactions to the nodes making
74  * up a cluster to perform all actions required.
75  *
76  * --
77  *
78  * Since this is a large topology and a spanning tree protocol, links can
79  * go up and down all the time.  Any time a link goes down its transaction
80  * is closed.  The transaction has to be closed on both ends before we can
81  * delete (and potentially reuse) the related spanid.  The LNK_SPAN being
82  * closed may have been propagated out to other connections and those related
83  * LNK_SPANs are also closed.  Ultimately all routes via the lost LNK_SPAN
84  * go away, ultimately reaching all sources and all targets.
85  *
86  * Any messages in-transit using a route that goes away will be thrown away.
87  * Open transactions are only tracked at the two end-points.  When a link
88  * failure propagates to an end-point the related open transactions lose
89  * their spanid and are automatically aborted.
90  *
91  * It is important to note that internal route nodes cannot just associate
92  * a lost LNK_SPAN transaction with another route to the same destination.
93  * Message transactions MUST be serialized and MUST be ordered.  All messages
94  * for a transaction must run over the same route.  So if the route used by
95  * an active transaction is lost, the related messages will be fully aborted
96  * and the higher protocol levels will retry as appropriate.
97  *
98  * It is also important to note that several paths to the same PFS can be
99  * propagated along the same link, which allows concurrency and even
100  * redundancy over several network interfaces or via different routes through
101  * the topology.  Any given transaction will use only a single route but busy
102  * servers will often have hundreds of transactions active simultaniously,
103  * so having multiple active paths through the network topology for A<->B
104  * will improve performance.
105  *
106  * --
107  *
108  * Most protocols consolidate operations rather than simply relaying them.
109  * This is particularly true of LEAF protocols (such as strict HAMMER2
110  * clients), of which there can be millions connecting into the cluster at
111  * various points.  The SPAN protocol is not used for these LEAF elements.
112  *
113  * Instead the primary service they connect to implements a proxy for the
114  * client protocols so the core topology only has to propagate a couple of
115  * LNK_SPANs and not millions.  LNK_SPANs are meant to be used only for
116  * core master nodes and satellite slaves and cache nodes.
117  */
118
119 #include "hammer2.h"
120
121 /*
122  * RED-BLACK TREE DEFINITIONS
123  *
124  * We need to track:
125  *
126  * (1) shared fsid's (a cluster).
127  * (2) unique fsid's (a node in a cluster) <--- LNK_SPAN transactions.
128  *
129  * We need to aggegate all active LNK_SPANs, aggregate, and create our own
130  * outgoing LNK_SPAN transactions on each of our connections representing
131  * the aggregated state.
132  *
133  * h2span_connect       - list of iocom connections who wish to receive SPAN
134  *                        propagation from other connections.  Might contain
135  *                        a filter string.  Only iocom's with an open
136  *                        LNK_CONN transactions are applicable for SPAN
137  *                        propagation.
138  *
139  * h2span_relay         - List of links relayed (via SPAN).  Essentially
140  *                        each relay structure represents a LNK_SPAN
141  *                        transaction that we initiated, verses h2span_link
142  *                        which is a LNK_SPAN transaction that we received.
143  *
144  * --
145  *
146  * h2span_cluster       - Organizes the shared fsid's.  One structure for
147  *                        each cluster.
148  *
149  * h2span_node          - Organizes the nodes in a cluster.  One structure
150  *                        for each unique {cluster,node}, aka {fsid, pfs_fsid}.
151  *
152  * h2span_link          - Organizes all incoming and outgoing LNK_SPAN message
153  *                        transactions related to a node.
154  *
155  *                        One h2span_link structure for each incoming LNK_SPAN
156  *                        transaction.  Links selected for propagation back
157  *                        out are also where the outgoing LNK_SPAN messages
158  *                        are indexed into (so we can propagate changes).
159  *
160  *                        The h2span_link's use a red-black tree to sort the
161  *                        distance hop metric for the incoming LNK_SPAN.  We
162  *                        then select the top N for outgoing.  When the
163  *                        topology changes the top N may also change and cause
164  *                        new outgoing LNK_SPAN transactions to be opened
165  *                        and less desireable ones to be closed, causing
166  *                        transactional aborts within the message flow in
167  *                        the process.
168  *
169  * Also note            - All outgoing LNK_SPAN message transactions are also
170  *                        entered into a red-black tree for use by the routing
171  *                        function.  This is handled by msg.c in the state
172  *                        code, not here.
173  */
174
175 struct h2span_link;
176 struct h2span_relay;
177 TAILQ_HEAD(h2span_connect_queue, h2span_connect);
178 TAILQ_HEAD(h2span_relay_queue, h2span_relay);
179
180 RB_HEAD(h2span_cluster_tree, h2span_cluster);
181 RB_HEAD(h2span_node_tree, h2span_node);
182 RB_HEAD(h2span_link_tree, h2span_link);
183 RB_HEAD(h2span_relay_tree, h2span_relay);
184
185 /*
186  * Received LNK_CONN transaction enables SPAN protocol over connection.
187  * (may contain filter).
188  */
189 struct h2span_connect {
190         TAILQ_ENTRY(h2span_connect) entry;
191         struct h2span_relay_tree tree;
192         hammer2_state_t *state;
193 };
194
195 /*
196  * All received LNK_SPANs are organized by cluster (pfs_clid),
197  * node (pfs_fsid), and link (received LNK_SPAN transaction).
198  */
199 struct h2span_cluster {
200         RB_ENTRY(h2span_cluster) rbnode;
201         struct h2span_node_tree tree;
202         uuid_t  pfs_clid;               /* shared fsid */
203 };
204
205 struct h2span_node {
206         RB_ENTRY(h2span_node) rbnode;
207         struct h2span_link_tree tree;
208         struct h2span_cluster *cls;
209         uuid_t  pfs_fsid;               /* unique fsid */
210         char label[64];
211 };
212
213 struct h2span_link {
214         RB_ENTRY(h2span_link) rbnode;
215         hammer2_state_t *state;         /* state<->link */
216         struct h2span_node *node;       /* related node */
217         int32_t dist;
218         struct h2span_relay_queue relayq; /* relay out */
219 };
220
221 /*
222  * Any LNK_SPAN transactions we receive which are relayed out other
223  * connections utilize this structure to track the LNK_SPAN transaction
224  * we initiate on the other connections, if selected for relay.
225  *
226  * In many respects this is the core of the protocol... actually figuring
227  * out what LNK_SPANs to relay.  The spanid used for relaying is the
228  * address of the 'state' structure, which is why h2span_relay has to
229  * be entered into a RB-TREE based at h2span_connect (so we can look
230  * up the spanid to validate it).
231  */
232 struct h2span_relay {
233         RB_ENTRY(h2span_relay) rbnode;  /* from h2span_connect */
234         TAILQ_ENTRY(h2span_relay) entry; /* from link */
235         struct h2span_connect *conn;
236         hammer2_state_t *state;         /* transmitted LNK_SPAN */
237         struct h2span_link *link;       /* received LNK_SPAN */
238 };
239
240
241 typedef struct h2span_connect h2span_connect_t;
242 typedef struct h2span_cluster h2span_cluster_t;
243 typedef struct h2span_node h2span_node_t;
244 typedef struct h2span_link h2span_link_t;
245 typedef struct h2span_relay h2span_relay_t;
246
247 static
248 int
249 h2span_cluster_cmp(h2span_cluster_t *cls1, h2span_cluster_t *cls2)
250 {
251         return(uuid_compare(&cls1->pfs_clid, &cls2->pfs_clid, NULL));
252 }
253
254 static
255 int
256 h2span_node_cmp(h2span_node_t *node1, h2span_node_t *node2)
257 {
258         return(uuid_compare(&node1->pfs_fsid, &node2->pfs_fsid, NULL));
259 }
260
261 static
262 int
263 h2span_link_cmp(h2span_link_t *link1, h2span_link_t *link2)
264 {
265         if (link1->dist < link2->dist)
266                 return(-1);
267         if (link1->dist > link2->dist)
268                 return(1);
269         if ((intptr_t)link1 < (intptr_t)link2)
270                 return(-1);
271         if ((intptr_t)link1 > (intptr_t)link2)
272                 return(1);
273         return(0);
274 }
275
276 /*
277  * Relay entries are sorted by node, subsorted by distance and link
278  * address (so we can match up the conn->tree relay topology with
279  * a node's link topology).
280  */
281 static
282 int
283 h2span_relay_cmp(h2span_relay_t *relay1, h2span_relay_t *relay2)
284 {
285         if ((intptr_t)relay1->link->node < (intptr_t)relay2->link->node)
286                 return(-1);
287         if ((intptr_t)relay1->link->node > (intptr_t)relay2->link->node)
288                 return(1);
289         if ((intptr_t)relay1->link->dist < (intptr_t)relay2->link->dist)
290                 return(-1);
291         if ((intptr_t)relay1->link->dist > (intptr_t)relay2->link->dist)
292                 return(1);
293         if ((intptr_t)relay1->link < (intptr_t)relay2->link)
294                 return(-1);
295         if ((intptr_t)relay1->link > (intptr_t)relay2->link)
296                 return(1);
297         return(0);
298 }
299
300 RB_PROTOTYPE_STATIC(h2span_cluster_tree, h2span_cluster,
301              rbnode, h2span_cluster_cmp);
302 RB_PROTOTYPE_STATIC(h2span_node_tree, h2span_node,
303              rbnode, h2span_node_cmp);
304 RB_PROTOTYPE_STATIC(h2span_link_tree, h2span_link,
305              rbnode, h2span_link_cmp);
306 RB_PROTOTYPE_STATIC(h2span_relay_tree, h2span_relay,
307              rbnode, h2span_relay_cmp);
308
309 RB_GENERATE_STATIC(h2span_cluster_tree, h2span_cluster,
310              rbnode, h2span_cluster_cmp);
311 RB_GENERATE_STATIC(h2span_node_tree, h2span_node,
312              rbnode, h2span_node_cmp);
313 RB_GENERATE_STATIC(h2span_link_tree, h2span_link,
314              rbnode, h2span_link_cmp);
315 RB_GENERATE_STATIC(h2span_relay_tree, h2span_relay,
316              rbnode, h2span_relay_cmp);
317
318 /*
319  * Global mutex protects cluster_tree lookups.
320  */
321 static pthread_mutex_t cluster_mtx;
322 static struct h2span_cluster_tree cluster_tree = RB_INITIALIZER(cluster_tree);
323 static struct h2span_connect_queue connq = TAILQ_HEAD_INITIALIZER(connq);
324
325 static void hammer2_lnk_span(hammer2_state_t *state, hammer2_msg_t *msg);
326 static void hammer2_lnk_conn(hammer2_state_t *state, hammer2_msg_t *msg);
327 static void hammer2_lnk_relay(hammer2_state_t *state, hammer2_msg_t *msg);
328 static void hammer2_relay_scan(h2span_connect_t *conn, h2span_node_t *node);
329 static void hammer2_relay_delete(h2span_relay_t *relay);
330
331 /*
332  * Receive a HAMMER2_MSG_PROTO_LNK message.  This only called for
333  * one-way and opening-transactions since state->func will be assigned
334  * in all other cases.
335  */
336 void
337 hammer2_msg_lnk(hammer2_iocom_t *iocom, hammer2_msg_t *msg)
338 {
339         switch(msg->any.head.cmd & HAMMER2_MSGF_BASECMDMASK) {
340         case HAMMER2_LNK_CONN:
341                 hammer2_lnk_conn(msg->state, msg);
342                 break;
343         case HAMMER2_LNK_SPAN:
344                 hammer2_lnk_span(msg->state, msg);
345                 break;
346         default:
347                 fprintf(stderr,
348                         "MSG_PROTO_LNK: Unknown msg %08x\n", msg->any.head.cmd);
349                 hammer2_msg_reply(iocom, msg, HAMMER2_MSG_ERR_NOSUPP);
350                 /* state invalid after reply */
351                 break;
352         }
353 }
354
355 void
356 hammer2_lnk_conn(hammer2_state_t *state, hammer2_msg_t *msg)
357 {
358         h2span_connect_t *conn;
359         h2span_relay_t *relay;
360         char *alloc = NULL;
361
362         pthread_mutex_lock(&cluster_mtx);
363
364         /*
365          * On transaction start we allocate a new h2span_connect and
366          * acknowledge the request, leaving the transaction open.
367          * We then relay priority-selected SPANs.
368          */
369         if (msg->any.head.cmd & HAMMER2_MSGF_CREATE) {
370                 state->func = hammer2_lnk_conn;
371
372                 fprintf(stderr, "LNK_CONN(%08x): %s/%s\n",
373                         (uint32_t)msg->any.head.msgid,
374                         hammer2_uuid_to_str(&msg->any.lnk_conn.pfs_clid,
375                                             &alloc),
376                         msg->any.lnk_conn.label);
377                 free(alloc);
378
379                 conn = hammer2_alloc(sizeof(*conn));
380
381                 RB_INIT(&conn->tree);
382                 conn->state = state;
383                 state->any.conn = conn;
384                 TAILQ_INSERT_TAIL(&connq, conn, entry);
385
386                 hammer2_msg_result(state->iocom, msg, 0);
387
388                 /*
389                  * Span-synchronize all nodes with the new connection
390                  */
391                 hammer2_relay_scan(conn, NULL);
392         }
393
394         /*
395          * On transaction terminate we clean out our h2span_connect
396          * and acknowledge the request, closing the transaction.
397          */
398         if (msg->any.head.cmd & HAMMER2_MSGF_DELETE) {
399                 fprintf(stderr, "LNK_CONN: Terminated\n");
400                 conn = state->any.conn;
401                 assert(conn);
402
403                 /*
404                  * Clean out all relays.  This requires terminating each
405                  * relay transaction.
406                  */
407                 while ((relay = RB_ROOT(&conn->tree)) != NULL) {
408                         hammer2_relay_delete(relay);
409                 }
410
411                 /*
412                  * Clean out conn
413                  */
414                 conn->state = NULL;
415                 msg->state->any.conn = NULL;
416                 TAILQ_REMOVE(&connq, conn, entry);
417                 hammer2_free(conn);
418
419                 hammer2_msg_reply(state->iocom, msg, 0);
420                 /* state invalid after reply */
421         }
422         pthread_mutex_unlock(&cluster_mtx);
423 }
424
425 void
426 hammer2_lnk_span(hammer2_state_t *state, hammer2_msg_t *msg)
427 {
428         h2span_cluster_t dummy_cls;
429         h2span_node_t dummy_node;
430         h2span_cluster_t *cls;
431         h2span_node_t *node;
432         h2span_link_t *slink;
433         h2span_relay_t *relay;
434         char *alloc = NULL;
435
436         pthread_mutex_lock(&cluster_mtx);
437
438         /*
439          * On transaction start we initialize the tracking infrastructure
440          */
441         if (msg->any.head.cmd & HAMMER2_MSGF_CREATE) {
442                 state->func = hammer2_lnk_span;
443
444                 msg->any.lnk_span.label[sizeof(msg->any.lnk_span.label)-1] = 0;
445
446                 fprintf(stderr, "LNK_SPAN: %s/%s dist=%d\n",
447                         hammer2_uuid_to_str(&msg->any.lnk_span.pfs_clid,
448                                             &alloc),
449                         msg->any.lnk_span.label,
450                         msg->any.lnk_span.dist);
451                 free(alloc);
452
453                 /*
454                  * Find the cluster
455                  */
456                 dummy_cls.pfs_clid = msg->any.lnk_span.pfs_clid;
457                 cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls);
458                 if (cls == NULL) {
459                         cls = hammer2_alloc(sizeof(*cls));
460                         cls->pfs_clid = msg->any.lnk_span.pfs_clid;
461                         RB_INIT(&cls->tree);
462                         RB_INSERT(h2span_cluster_tree, &cluster_tree, cls);
463                 }
464
465                 /*
466                  * Find the node
467                  */
468                 dummy_node.pfs_fsid = msg->any.lnk_span.pfs_fsid;
469                 node = RB_FIND(h2span_node_tree, &cls->tree, &dummy_node);
470                 if (node == NULL) {
471                         node = hammer2_alloc(sizeof(*node));
472                         node->pfs_fsid = msg->any.lnk_span.pfs_fsid;
473                         node->cls = cls;
474                         RB_INIT(&node->tree);
475                         RB_INSERT(h2span_node_tree, &cls->tree, node);
476                         snprintf(node->label, sizeof(node->label),
477                                  "%s", msg->any.lnk_span.label);
478                 }
479
480                 /*
481                  * Create the link
482                  */
483                 assert(state->any.link == NULL);
484                 slink = hammer2_alloc(sizeof(*slink));
485                 TAILQ_INIT(&slink->relayq);
486                 slink->node = node;
487                 slink->dist = msg->any.lnk_span.dist;
488                 slink->state = state;
489                 state->any.link = slink;
490                 RB_INSERT(h2span_link_tree, &node->tree, slink);
491
492                 hammer2_relay_scan(NULL, node);
493         }
494
495         /*
496          * On transaction terminate we remove the tracking infrastructure.
497          */
498         if (msg->any.head.cmd & HAMMER2_MSGF_DELETE) {
499                 slink = state->any.link;
500                 assert(slink != NULL);
501                 node = slink->node;
502                 cls = node->cls;
503
504                 /*
505                  * Clean out all relays.  This requires terminating each
506                  * relay transaction.
507                  */
508                 while ((relay = TAILQ_FIRST(&slink->relayq)) != NULL) {
509                         hammer2_relay_delete(relay);
510                 }
511
512                 /*
513                  * Clean out the topology
514                  */
515                 RB_REMOVE(h2span_link_tree, &node->tree, slink);
516                 if (RB_EMPTY(&node->tree)) {
517                         RB_REMOVE(h2span_node_tree, &cls->tree, node);
518                         if (RB_EMPTY(&cls->tree)) {
519                                 RB_REMOVE(h2span_cluster_tree,
520                                           &cluster_tree, cls);
521                                 hammer2_free(cls);
522                         }
523                         node->cls = NULL;
524                         hammer2_free(node);
525                         node = NULL;
526                 }
527                 state->any.link = NULL;
528                 slink->state = NULL;
529                 slink->node = NULL;
530                 hammer2_free(slink);
531
532                 /*
533                  * We have to terminate the transaction
534                  */
535                 hammer2_state_reply(state, 0);
536                 /* state invalid after reply */
537
538                 /*
539                  * If the node still exists issue any required updates.  If
540                  * it doesn't then all related relays have already been
541                  * removed and there's nothing left to do.
542                  */
543                 if (node)
544                         hammer2_relay_scan(NULL, node);
545         }
546
547         pthread_mutex_unlock(&cluster_mtx);
548 }
549
550 /*
551  * Messages received on relay SPANs.  These are open transactions so it is
552  * in fact possible for the other end to close the transaction.
553  *
554  * XXX MPRACE on state structure
555  */
556 static void
557 hammer2_lnk_relay(hammer2_state_t *state, hammer2_msg_t *msg)
558 {
559         h2span_relay_t *relay;
560
561         if (msg->any.head.cmd & HAMMER2_MSGF_DELETE) {
562                 pthread_mutex_lock(&cluster_mtx);
563                 if ((relay = state->any.relay) != NULL) {
564                         hammer2_relay_delete(relay);
565                 } else {
566                         hammer2_state_reply(state, 0);
567                 }
568                 pthread_mutex_unlock(&cluster_mtx);
569         }
570 }
571
572 /*
573  * Update relay transactions for SPANs.
574  *
575  * Called with cluster_mtx held.
576  */
577 static void hammer2_relay_scan_specific(h2span_node_t *node,
578                                         h2span_connect_t *conn);
579
580 static void
581 hammer2_relay_scan(h2span_connect_t *conn, h2span_node_t *node)
582 {
583         h2span_cluster_t *cls;
584
585         if (node) {
586                 /*
587                  * Iterate specific node
588                  */
589                 TAILQ_FOREACH(conn, &connq, entry)
590                         hammer2_relay_scan_specific(node, conn);
591         } else {
592                 /*
593                  * Full iteration.
594                  *
595                  * Iterate cluster ids, nodes, and either a specific connection
596                  * or all connections.
597                  */
598                 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
599                         /*
600                          * Iterate node ids
601                          */
602                         RB_FOREACH(node, h2span_node_tree, &cls->tree) {
603                                 /*
604                                  * Synchronize the node's link (received SPANs)
605                                  * with each connection's relays.
606                                  */
607                                 if (conn) {
608                                         hammer2_relay_scan_specific(node, conn);
609                                 } else {
610                                         TAILQ_FOREACH(conn, &connq, entry) {
611                                             hammer2_relay_scan_specific(node,
612                                                                         conn);
613                                         }
614                                         assert(conn == NULL);
615                                 }
616                         }
617                 }
618         }
619 }
620
621 /*
622  * Update the relay'd SPANs for this (node, conn).
623  *
624  * Iterate links and adjust relays to match.  We only propagate the top link
625  * for now (XXX we want to propagate the top two).
626  *
627  * The hammer2_relay_scan_cmp() function locates the first relay element
628  * for any given node.  The relay elements will be sub-sorted by dist.
629  */
630 struct relay_scan_info {
631         h2span_node_t *node;
632         h2span_relay_t *relay;
633 };
634
635 static int
636 hammer2_relay_scan_cmp(h2span_relay_t *relay, void *arg)
637 {
638         struct relay_scan_info *info = arg;
639
640         if ((intptr_t)relay->link->node < (intptr_t)info->node)
641                 return(-1);
642         if ((intptr_t)relay->link->node > (intptr_t)info->node)
643                 return(1);
644         return(0);
645 }
646
647 static int
648 hammer2_relay_scan_callback(h2span_relay_t *relay, void *arg)
649 {
650         struct relay_scan_info *info = arg;
651
652         info->relay = relay;
653         return(-1);
654 }
655
656 static void
657 hammer2_relay_scan_specific(h2span_node_t *node, h2span_connect_t *conn)
658 {
659         struct relay_scan_info info;
660         h2span_relay_t *relay;
661         h2span_relay_t *next_relay;
662         h2span_link_t *slink;
663         int count = 2;
664
665         info.node = node;
666         info.relay = NULL;
667
668         /*
669          * Locate the first related relay for the connection.  relay will
670          * be NULL if there were none.
671          */
672         RB_SCAN(h2span_relay_tree, &conn->tree,
673                 hammer2_relay_scan_cmp, hammer2_relay_scan_callback, &info);
674         relay = info.relay;
675
676         if (DebugOpt > 8)
677                 fprintf(stderr, "relay scan for connection %p\n", conn);
678
679         /*
680          * Iterate the node's links (received SPANs) in distance order,
681          * lowest (best) dist first.
682          */
683         RB_FOREACH(slink, h2span_link_tree, &node->tree) {
684                 /*
685                  * PROPAGATE THE BEST RELAYS BY TRANSMITTING SPANs.
686                  *
687                  * Check for match against current best relay.
688                  *
689                  * A match failure means that the current best relay is not
690                  * as good as the link, create a new relay for the link.
691                  *
692                  * (If some prior better link was removed it would have also
693                  *  removed the relay, so the relay can only match exactly or
694                  *  be worst).
695                  */
696                 info.relay = relay;
697                 if (relay == NULL || relay->link != slink) {
698                         hammer2_msg_t *msg;
699
700                         assert(relay == NULL ||
701                                relay->link->dist <= slink->dist);
702                         relay = hammer2_alloc(sizeof(*relay));
703                         relay->conn = conn;
704                         relay->link = slink;
705
706                         msg = hammer2_msg_alloc(conn->state->iocom, 0,
707                                                 HAMMER2_LNK_SPAN |
708                                                 HAMMER2_MSGF_CREATE);
709                         msg->any.lnk_span = slink->state->msg->any.lnk_span;
710                         ++msg->any.lnk_span.dist; /* XXX add weighting */
711
712                         hammer2_msg_write(conn->state->iocom, msg,
713                                           hammer2_lnk_relay, relay,
714                                           &relay->state);
715                         fprintf(stderr,
716                                 "RELAY SPAN ON CLS=%p NODE=%p FD %d state %p\n",
717                                 node->cls, node,
718                                 conn->state->iocom->sock_fd, relay->state);
719
720                         RB_INSERT(h2span_relay_tree, &conn->tree, relay);
721                         TAILQ_INSERT_TAIL(&slink->relayq, relay, entry);
722                 }
723
724                 /*
725                  * Iterate, figure out the next relay.
726                  */
727                 relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
728                 if (--count == 0) {
729                         break;
730                         continue;
731                 }
732         }
733
734         /*
735          * Any remaining relay's belonging to this connection which match
736          * the node are in excess of the current aggregate spanning state
737          * and should be removed.
738          */
739         while (relay && relay->link->node == node) {
740                 next_relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
741                 hammer2_relay_delete(relay);
742                 relay = next_relay;
743         }
744 }
745
746 static
747 void
748 hammer2_relay_delete(h2span_relay_t *relay)
749 {
750         fprintf(stderr,
751                 "RELAY DELETE ON CLS=%p NODE=%p FD %d STATE %p\n",
752                 relay->link->node->cls, relay->link->node,
753                 relay->conn->state->iocom->sock_fd, relay->state);
754         fprintf(stderr, "RELAY TX %08x RX %08x\n", relay->state->txcmd, relay->state->rxcmd);
755
756         RB_REMOVE(h2span_relay_tree, &relay->conn->tree, relay);
757         TAILQ_REMOVE(&relay->link->relayq, relay, entry);
758
759         if (relay->state) {
760                 relay->state->any.relay = NULL;
761                 hammer2_state_reply(relay->state, 0);
762                 /* state invalid after reply */
763                 relay->state = NULL;
764         }
765         relay->conn = NULL;
766         relay->link = NULL;
767         hammer2_free(relay);
768 }
769
770 /*
771  * Dumps the spanning tree
772  */
773 void
774 shell_tree(hammer2_iocom_t *iocom, char *cmdbuf __unused)
775 {
776         h2span_cluster_t *cls;
777         h2span_node_t *node;
778         h2span_link_t *slink;
779         char *uustr = NULL;
780
781         pthread_mutex_lock(&cluster_mtx);
782         RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
783                 iocom_printf(iocom, "Cluster %s\n",
784                              hammer2_uuid_to_str(&cls->pfs_clid, &uustr));
785                 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
786                         iocom_printf(iocom, "    Node %s (%s)\n",
787                                  hammer2_uuid_to_str(&node->pfs_fsid, &uustr),
788                                  node->label);
789                         RB_FOREACH(slink, h2span_link_tree, &node->tree) {
790                                 iocom_printf(iocom, "\tLink dist=%d via %d\n",
791                                              slink->dist,
792                                              slink->state->iocom->sock_fd);
793                         }
794                 }
795         }
796         pthread_mutex_unlock(&cluster_mtx);
797         if (uustr)
798                 free(uustr);
799 #if 0
800         TAILQ_FOREACH(conn, &connq, entry) {
801         }
802 #endif
803 }