hammer2 - Message span tree work
[dragonfly.git] / sbin / hammer2 / msg_lnk.c
1 /*
2  * Copyright (c) 2012 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * LNK_SPAN PROTOCOL SUPPORT FUNCTIONS
36  *
37  * This code supports the LNK_SPAN protocol.  Essentially all PFS's
38  * clients and services rendezvous with the userland hammer2 service and
39  * open LNK_SPAN transactions using a message header linkid of 0,
40  * registering any PFS's they have connectivity to with us.
41  *
42  * --
43  *
44  * Each registration maintains its own open LNK_SPAN message transaction.
45  * The SPANs are collected, aggregated, and retransmitted over available
46  * connections through the maintainance of additional LNK_SPAN message
47  * transactions on each link.
48  *
49  * The msgid for each active LNK_SPAN transaction we receive allows us to
50  * send a message to the target PFS (which might be one of many belonging
51  * to the same cluster), by specifying that msgid as the linkid in any
52  * message we send to the target PFS.
53  *
54  * Similarly the msgid we allocate for any LNK_SPAN transaction we transmit
55  * (and remember we will maintain multiple open LNK_SPAN transactions on
56  * each connection representing the topology span, so every node sees every
57  * other node as a separate open transaction).  So, similarly the msgid for
58  * these active transactions which we initiated can be used by the other
59  * end to route messages through us to another node, ultimately winding up
60  * at the identified hammer2 PFS.  We have to adjust the spanid in the message
61  * header at each hop to be representative of the outgoing LNK_SPAN we
62  * are forwarding the message through.
63  *
64  * --
65  *
66  * If we were to retransmit every LNK_SPAN transaction we receive it would
67  * create a huge mess, so we have to aggregate all received LNK_SPAN
68  * transactions, sort them by the fsid (the cluster) and sub-sort them by
69  * the pfs_fsid (individual nodes in the cluster), and only retransmit
70  * (create outgoing transactions) for a subset of the nearest distance-hops
71  * for each individual node.
72  *
73  * The higher level protocols can then issue transactions to the nodes making
74  * up a cluster to perform all actions required.
75  *
76  * --
77  *
78  * Since this is a large topology and a spanning tree protocol, links can
79  * go up and down all the time.  Any time a link goes down its transaction
80  * is closed.  The transaction has to be closed on both ends before we can
81  * delete (and potentially reuse) the related spanid.  The LNK_SPAN being
82  * closed may have been propagated out to other connections and those related
83  * LNK_SPANs are also closed.  Ultimately all routes via the lost LNK_SPAN
84  * go away, ultimately reaching all sources and all targets.
85  *
86  * Any messages in-transit using a route that goes away will be thrown away.
87  * Open transactions are only tracked at the two end-points.  When a link
88  * failure propagates to an end-point the related open transactions lose
89  * their spanid and are automatically aborted.
90  *
91  * It is important to note that internal route nodes cannot just associate
92  * a lost LNK_SPAN transaction with another route to the same destination.
93  * Message transactions MUST be serialized and MUST be ordered.  All messages
94  * for a transaction must run over the same route.  So if the route used by
95  * an active transaction is lost, the related messages will be fully aborted
96  * and the higher protocol levels will retry as appropriate.
97  *
98  * FULLY ABORTING A ROUTED MESSAGE is handled via link-failure propagation
99  * back to the originator.  Only the originator keeps tracks of a message.
100  * Routers just pass it through.  If a route is lost during transit the
101  * message is simply thrown away.
102  *
103  * It is also important to note that several paths to the same PFS can be
104  * propagated along the same link, which allows concurrency and even
105  * redundancy over several network interfaces or via different routes through
106  * the topology.  Any given transaction will use only a single route but busy
107  * servers will often have hundreds of transactions active simultaniously,
108  * so having multiple active paths through the network topology for A<->B
109  * will improve performance.
110  *
111  * --
112  *
113  * Most protocols consolidate operations rather than simply relaying them.
114  * This is particularly true of LEAF protocols (such as strict HAMMER2
115  * clients), of which there can be millions connecting into the cluster at
116  * various points.  The SPAN protocol is not used for these LEAF elements.
117  *
118  * Instead the primary service they connect to implements a proxy for the
119  * client protocols so the core topology only has to propagate a couple of
120  * LNK_SPANs and not millions.  LNK_SPANs are meant to be used only for
121  * core master nodes and satellite slaves and cache nodes.
122  */
123
124 #include "hammer2.h"
125
126 /*
127  * Maximum spanning tree distance.  This has the practical effect of
128  * stopping tail-chasing closed loops when a feeder span is lost.
129  */
130 #define HAMMER2_SPAN_MAXDIST    16
131
132 /*
133  * RED-BLACK TREE DEFINITIONS
134  *
135  * We need to track:
136  *
137  * (1) shared fsid's (a cluster).
138  * (2) unique fsid's (a node in a cluster) <--- LNK_SPAN transactions.
139  *
140  * We need to aggegate all active LNK_SPANs, aggregate, and create our own
141  * outgoing LNK_SPAN transactions on each of our connections representing
142  * the aggregated state.
143  *
144  * h2span_connect       - list of iocom connections who wish to receive SPAN
145  *                        propagation from other connections.  Might contain
146  *                        a filter string.  Only iocom's with an open
147  *                        LNK_CONN transactions are applicable for SPAN
148  *                        propagation.
149  *
150  * h2span_relay         - List of links relayed (via SPAN).  Essentially
151  *                        each relay structure represents a LNK_SPAN
152  *                        transaction that we initiated, verses h2span_link
153  *                        which is a LNK_SPAN transaction that we received.
154  *
155  * --
156  *
157  * h2span_cluster       - Organizes the shared fsid's.  One structure for
158  *                        each cluster.
159  *
160  * h2span_node          - Organizes the nodes in a cluster.  One structure
161  *                        for each unique {cluster,node}, aka {fsid, pfs_fsid}.
162  *
163  * h2span_link          - Organizes all incoming and outgoing LNK_SPAN message
164  *                        transactions related to a node.
165  *
166  *                        One h2span_link structure for each incoming LNK_SPAN
167  *                        transaction.  Links selected for propagation back
168  *                        out are also where the outgoing LNK_SPAN messages
169  *                        are indexed into (so we can propagate changes).
170  *
171  *                        The h2span_link's use a red-black tree to sort the
172  *                        distance hop metric for the incoming LNK_SPAN.  We
173  *                        then select the top N for outgoing.  When the
174  *                        topology changes the top N may also change and cause
175  *                        new outgoing LNK_SPAN transactions to be opened
176  *                        and less desireable ones to be closed, causing
177  *                        transactional aborts within the message flow in
178  *                        the process.
179  *
180  * Also note            - All outgoing LNK_SPAN message transactions are also
181  *                        entered into a red-black tree for use by the routing
182  *                        function.  This is handled by msg.c in the state
183  *                        code, not here.
184  */
185
186 struct h2span_link;
187 struct h2span_relay;
188 TAILQ_HEAD(h2span_connect_queue, h2span_connect);
189 TAILQ_HEAD(h2span_relay_queue, h2span_relay);
190
191 RB_HEAD(h2span_cluster_tree, h2span_cluster);
192 RB_HEAD(h2span_node_tree, h2span_node);
193 RB_HEAD(h2span_link_tree, h2span_link);
194 RB_HEAD(h2span_relay_tree, h2span_relay);
195
196 /*
197  * Received LNK_CONN transaction enables SPAN protocol over connection.
198  * (may contain filter).
199  */
200 struct h2span_connect {
201         TAILQ_ENTRY(h2span_connect) entry;
202         struct h2span_relay_tree tree;
203         hammer2_state_t *state;
204 };
205
206 /*
207  * All received LNK_SPANs are organized by cluster (pfs_clid),
208  * node (pfs_fsid), and link (received LNK_SPAN transaction).
209  */
210 struct h2span_cluster {
211         RB_ENTRY(h2span_cluster) rbnode;
212         struct h2span_node_tree tree;
213         uuid_t  pfs_clid;               /* shared fsid */
214         int     refs;                   /* prevents destruction */
215 };
216
217 struct h2span_node {
218         RB_ENTRY(h2span_node) rbnode;
219         struct h2span_link_tree tree;
220         struct h2span_cluster *cls;
221         uuid_t  pfs_fsid;               /* unique fsid */
222         char label[64];
223 };
224
225 struct h2span_link {
226         RB_ENTRY(h2span_link) rbnode;
227         hammer2_state_t *state;         /* state<->link */
228         struct h2span_node *node;       /* related node */
229         int32_t dist;
230         struct h2span_relay_queue relayq; /* relay out */
231         struct hammer2_router *router;  /* route out this link */
232 };
233
234 /*
235  * Any LNK_SPAN transactions we receive which are relayed out other
236  * connections utilize this structure to track the LNK_SPAN transaction
237  * we initiate on the other connections, if selected for relay.
238  *
239  * In many respects this is the core of the protocol... actually figuring
240  * out what LNK_SPANs to relay.  The spanid used for relaying is the
241  * address of the 'state' structure, which is why h2span_relay has to
242  * be entered into a RB-TREE based at h2span_connect (so we can look
243  * up the spanid to validate it).
244  *
245  * NOTE: Messages can be received via the LNK_SPAN transaction the
246  *       relay maintains, and can be replied via relay->router, but
247  *       messages are NOT initiated via a relay.  Messages are initiated
248  *       via incoming links (h2span_link's).
249  *
250  *       relay->link represents the link being relayed, NOT the LNK_SPAN
251  *       transaction the relay is holding open.
252  */
253 struct h2span_relay {
254         RB_ENTRY(h2span_relay) rbnode;  /* from h2span_connect */
255         TAILQ_ENTRY(h2span_relay) entry; /* from link */
256         struct h2span_connect *conn;
257         hammer2_state_t *state;         /* transmitted LNK_SPAN */
258         struct h2span_link *link;       /* LNK_SPAN being relayed */
259         struct hammer2_router   *router;/* route out this relay */
260 };
261
262
263 typedef struct h2span_connect h2span_connect_t;
264 typedef struct h2span_cluster h2span_cluster_t;
265 typedef struct h2span_node h2span_node_t;
266 typedef struct h2span_link h2span_link_t;
267 typedef struct h2span_relay h2span_relay_t;
268
269 static
270 int
271 h2span_cluster_cmp(h2span_cluster_t *cls1, h2span_cluster_t *cls2)
272 {
273         return(uuid_compare(&cls1->pfs_clid, &cls2->pfs_clid, NULL));
274 }
275
276 static
277 int
278 h2span_node_cmp(h2span_node_t *node1, h2span_node_t *node2)
279 {
280         return(uuid_compare(&node1->pfs_fsid, &node2->pfs_fsid, NULL));
281 }
282
283 /*
284  * Sort/subsort must match h2span_relay_cmp() under any given node
285  * to make the aggregation algorithm easier, so the best links are
286  * in the same sorted order as the best relays.
287  *
288  * NOTE: We cannot use link*->state->msgid because this msgid is created
289  *       by each remote host and thus might wind up being the same.
290  */
291 static
292 int
293 h2span_link_cmp(h2span_link_t *link1, h2span_link_t *link2)
294 {
295         if (link1->dist < link2->dist)
296                 return(-1);
297         if (link1->dist > link2->dist)
298                 return(1);
299 #if 1
300         if ((uintptr_t)link1->state < (uintptr_t)link2->state)
301                 return(-1);
302         if ((uintptr_t)link1->state > (uintptr_t)link2->state)
303                 return(1);
304 #else
305         if (link1->state->msgid < link2->state->msgid)
306                 return(-1);
307         if (link1->state->msgid > link2->state->msgid)
308                 return(1);
309 #endif
310         return(0);
311 }
312
313 /*
314  * Relay entries are sorted by node, subsorted by distance and link
315  * address (so we can match up the conn->tree relay topology with
316  * a node's link topology).
317  */
318 static
319 int
320 h2span_relay_cmp(h2span_relay_t *relay1, h2span_relay_t *relay2)
321 {
322         h2span_link_t *link1 = relay1->link;
323         h2span_link_t *link2 = relay2->link;
324
325         if ((intptr_t)link1->node < (intptr_t)link2->node)
326                 return(-1);
327         if ((intptr_t)link1->node > (intptr_t)link2->node)
328                 return(1);
329         if (link1->dist < link2->dist)
330                 return(-1);
331         if (link1->dist > link2->dist)
332                 return(1);
333 #if 1
334         if ((uintptr_t)link1->state < (uintptr_t)link2->state)
335                 return(-1);
336         if ((uintptr_t)link1->state > (uintptr_t)link2->state)
337                 return(1);
338 #else
339         if (link1->state->msgid < link2->state->msgid)
340                 return(-1);
341         if (link1->state->msgid > link2->state->msgid)
342                 return(1);
343 #endif
344         return(0);
345 }
346
347 RB_PROTOTYPE_STATIC(h2span_cluster_tree, h2span_cluster,
348              rbnode, h2span_cluster_cmp);
349 RB_PROTOTYPE_STATIC(h2span_node_tree, h2span_node,
350              rbnode, h2span_node_cmp);
351 RB_PROTOTYPE_STATIC(h2span_link_tree, h2span_link,
352              rbnode, h2span_link_cmp);
353 RB_PROTOTYPE_STATIC(h2span_relay_tree, h2span_relay,
354              rbnode, h2span_relay_cmp);
355
356 RB_GENERATE_STATIC(h2span_cluster_tree, h2span_cluster,
357              rbnode, h2span_cluster_cmp);
358 RB_GENERATE_STATIC(h2span_node_tree, h2span_node,
359              rbnode, h2span_node_cmp);
360 RB_GENERATE_STATIC(h2span_link_tree, h2span_link,
361              rbnode, h2span_link_cmp);
362 RB_GENERATE_STATIC(h2span_relay_tree, h2span_relay,
363              rbnode, h2span_relay_cmp);
364
365 /*
366  * Global mutex protects cluster_tree lookups.
367  */
368 static pthread_mutex_t cluster_mtx;
369 static struct h2span_cluster_tree cluster_tree = RB_INITIALIZER(cluster_tree);
370 static struct h2span_connect_queue connq = TAILQ_HEAD_INITIALIZER(connq);
371
372 static void hammer2_lnk_span(hammer2_msg_t *msg);
373 static void hammer2_lnk_conn(hammer2_msg_t *msg);
374 static void hammer2_lnk_relay(hammer2_msg_t *msg);
375 static void hammer2_relay_scan(h2span_connect_t *conn, h2span_node_t *node);
376 static void hammer2_relay_delete(h2span_relay_t *relay);
377
378 void
379 hammer2_msg_lnk_signal(hammer2_router_t *router __unused)
380 {
381         pthread_mutex_lock(&cluster_mtx);
382         hammer2_relay_scan(NULL, NULL);
383         pthread_mutex_unlock(&cluster_mtx);
384 }
385
386 /*
387  * Receive a HAMMER2_MSG_PROTO_LNK message.  This only called for
388  * one-way and opening-transactions since state->func will be assigned
389  * in all other cases.
390  */
391 void
392 hammer2_msg_lnk(hammer2_msg_t *msg)
393 {
394         switch(msg->any.head.cmd & HAMMER2_MSGF_BASECMDMASK) {
395         case HAMMER2_LNK_CONN:
396                 hammer2_lnk_conn(msg);
397                 break;
398         case HAMMER2_LNK_SPAN:
399                 hammer2_lnk_span(msg);
400                 break;
401         default:
402                 fprintf(stderr,
403                         "MSG_PROTO_LNK: Unknown msg %08x\n", msg->any.head.cmd);
404                 hammer2_msg_reply(msg, HAMMER2_MSG_ERR_NOSUPP);
405                 /* state invalid after reply */
406                 break;
407         }
408 }
409
410 void
411 hammer2_lnk_conn(hammer2_msg_t *msg)
412 {
413         hammer2_state_t *state = msg->state;
414         h2span_connect_t *conn;
415         h2span_relay_t *relay;
416         char *alloc = NULL;
417
418         pthread_mutex_lock(&cluster_mtx);
419
420         /*
421          * On transaction start we allocate a new h2span_connect and
422          * acknowledge the request, leaving the transaction open.
423          * We then relay priority-selected SPANs.
424          */
425         if (msg->any.head.cmd & HAMMER2_MSGF_CREATE) {
426                 state->func = hammer2_lnk_conn;
427
428                 fprintf(stderr, "LNK_CONN(%08x): %s/%s\n",
429                         (uint32_t)msg->any.head.msgid,
430                         hammer2_uuid_to_str(&msg->any.lnk_conn.pfs_clid,
431                                             &alloc),
432                         msg->any.lnk_conn.label);
433                 free(alloc);
434
435                 conn = hammer2_alloc(sizeof(*conn));
436
437                 RB_INIT(&conn->tree);
438                 conn->state = state;
439                 state->any.conn = conn;
440                 TAILQ_INSERT_TAIL(&connq, conn, entry);
441
442                 hammer2_msg_result(msg, 0);
443
444 #if 0
445                 /*
446                  * Span-synchronize all nodes with the new connection
447                  */
448                 hammer2_relay_scan(conn, NULL);
449 #endif
450                 hammer2_router_signal(msg->router);
451         }
452
453         /*
454          * On transaction terminate we clean out our h2span_connect
455          * and acknowledge the request, closing the transaction.
456          */
457         if (msg->any.head.cmd & HAMMER2_MSGF_DELETE) {
458                 fprintf(stderr, "LNK_CONN: Terminated\n");
459                 conn = state->any.conn;
460                 assert(conn);
461
462                 /*
463                  * Clean out all relays.  This requires terminating each
464                  * relay transaction.
465                  */
466                 while ((relay = RB_ROOT(&conn->tree)) != NULL) {
467                         hammer2_relay_delete(relay);
468                 }
469
470                 /*
471                  * Clean out conn
472                  */
473                 conn->state = NULL;
474                 msg->state->any.conn = NULL;
475                 TAILQ_REMOVE(&connq, conn, entry);
476                 hammer2_free(conn);
477
478                 hammer2_msg_reply(msg, 0);
479                 /* state invalid after reply */
480         }
481         pthread_mutex_unlock(&cluster_mtx);
482 }
483
484 void
485 hammer2_lnk_span(hammer2_msg_t *msg)
486 {
487         hammer2_state_t *state = msg->state;
488         h2span_cluster_t dummy_cls;
489         h2span_node_t dummy_node;
490         h2span_cluster_t *cls;
491         h2span_node_t *node;
492         h2span_link_t *slink;
493         h2span_relay_t *relay;
494         char *alloc = NULL;
495
496         assert((msg->any.head.cmd & HAMMER2_MSGF_REPLY) == 0);
497
498         pthread_mutex_lock(&cluster_mtx);
499
500         /*
501          * On transaction start we initialize the tracking infrastructure
502          */
503         if (msg->any.head.cmd & HAMMER2_MSGF_CREATE) {
504                 assert(state->func == NULL);
505                 state->func = hammer2_lnk_span;
506
507                 msg->any.lnk_span.label[sizeof(msg->any.lnk_span.label)-1] = 0;
508
509                 /*
510                  * Find the cluster
511                  */
512                 dummy_cls.pfs_clid = msg->any.lnk_span.pfs_clid;
513                 cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls);
514                 if (cls == NULL) {
515                         cls = hammer2_alloc(sizeof(*cls));
516                         cls->pfs_clid = msg->any.lnk_span.pfs_clid;
517                         RB_INIT(&cls->tree);
518                         RB_INSERT(h2span_cluster_tree, &cluster_tree, cls);
519                 }
520
521                 /*
522                  * Find the node
523                  */
524                 dummy_node.pfs_fsid = msg->any.lnk_span.pfs_fsid;
525                 node = RB_FIND(h2span_node_tree, &cls->tree, &dummy_node);
526                 if (node == NULL) {
527                         node = hammer2_alloc(sizeof(*node));
528                         node->pfs_fsid = msg->any.lnk_span.pfs_fsid;
529                         node->cls = cls;
530                         RB_INIT(&node->tree);
531                         RB_INSERT(h2span_node_tree, &cls->tree, node);
532                         snprintf(node->label, sizeof(node->label),
533                                  "%s", msg->any.lnk_span.label);
534                 }
535
536                 /*
537                  * Create the link
538                  */
539                 assert(state->any.link == NULL);
540                 slink = hammer2_alloc(sizeof(*slink));
541                 TAILQ_INIT(&slink->relayq);
542                 slink->node = node;
543                 slink->dist = msg->any.lnk_span.dist;
544                 slink->state = state;
545                 state->any.link = slink;
546
547                 /*
548                  * Embedded router structure in link for message forwarding.
549                  *
550                  * The spanning id for the router is the message id of
551                  * the SPAN link it is embedded in, allowing messages to
552                  * be routed via &slink->router.
553                  */
554                 slink->router = hammer2_router_alloc();
555                 slink->router->iocom = state->iocom;
556                 slink->router->link = slink;
557                 slink->router->target = state->msgid;
558                 hammer2_router_connect(slink->router);
559
560                 RB_INSERT(h2span_link_tree, &node->tree, slink);
561
562                 fprintf(stderr, "LNK_SPAN(thr %p): %p %s/%s dist=%d\n",
563                         msg->router->iocom,
564                         slink,
565                         hammer2_uuid_to_str(&msg->any.lnk_span.pfs_clid,
566                                             &alloc),
567                         msg->any.lnk_span.label,
568                         msg->any.lnk_span.dist);
569                 free(alloc);
570 #if 0
571                 hammer2_relay_scan(NULL, node);
572 #endif
573                 hammer2_router_signal(msg->router);
574         }
575
576         /*
577          * On transaction terminate we remove the tracking infrastructure.
578          */
579         if (msg->any.head.cmd & HAMMER2_MSGF_DELETE) {
580                 slink = state->any.link;
581                 assert(slink != NULL);
582                 node = slink->node;
583                 cls = node->cls;
584
585                 fprintf(stderr, "LNK_DELE(thr %p): %p %s/%s dist=%d\n",
586                         msg->router->iocom,
587                         slink,
588                         hammer2_uuid_to_str(&cls->pfs_clid, &alloc),
589                         state->msg->any.lnk_span.label,
590                         state->msg->any.lnk_span.dist);
591                 free(alloc);
592
593                 /*
594                  * Remove the router from consideration
595                  */
596                 hammer2_router_disconnect(&slink->router);
597
598                 /*
599                  * Clean out all relays.  This requires terminating each
600                  * relay transaction.
601                  */
602                 while ((relay = TAILQ_FIRST(&slink->relayq)) != NULL) {
603                         hammer2_relay_delete(relay);
604                 }
605
606                 /*
607                  * Clean out the topology
608                  */
609                 RB_REMOVE(h2span_link_tree, &node->tree, slink);
610                 if (RB_EMPTY(&node->tree)) {
611                         RB_REMOVE(h2span_node_tree, &cls->tree, node);
612                         if (RB_EMPTY(&cls->tree) && cls->refs == 0) {
613                                 RB_REMOVE(h2span_cluster_tree,
614                                           &cluster_tree, cls);
615                                 hammer2_free(cls);
616                         }
617                         node->cls = NULL;
618                         hammer2_free(node);
619                         node = NULL;
620                 }
621                 state->any.link = NULL;
622                 slink->state = NULL;
623                 slink->node = NULL;
624                 hammer2_free(slink);
625
626                 /*
627                  * We have to terminate the transaction
628                  */
629                 hammer2_state_reply(state, 0);
630                 /* state invalid after reply */
631
632                 /*
633                  * If the node still exists issue any required updates.  If
634                  * it doesn't then all related relays have already been
635                  * removed and there's nothing left to do.
636                  */
637 #if 0
638                 if (node)
639                         hammer2_relay_scan(NULL, node);
640 #endif
641                 if (node)
642                         hammer2_router_signal(msg->router);
643         }
644
645         pthread_mutex_unlock(&cluster_mtx);
646 }
647
648 /*
649  * Messages received on relay SPANs.  These are open transactions so it is
650  * in fact possible for the other end to close the transaction.
651  *
652  * XXX MPRACE on state structure
653  */
654 static void
655 hammer2_lnk_relay(hammer2_msg_t *msg)
656 {
657         hammer2_state_t *state = msg->state;
658         h2span_relay_t *relay;
659
660         assert(msg->any.head.cmd & HAMMER2_MSGF_REPLY);
661
662         if (msg->any.head.cmd & HAMMER2_MSGF_DELETE) {
663                 pthread_mutex_lock(&cluster_mtx);
664                 if ((relay = state->any.relay) != NULL) {
665                         hammer2_relay_delete(relay);
666                 } else {
667                         hammer2_state_reply(state, 0);
668                 }
669                 pthread_mutex_unlock(&cluster_mtx);
670         }
671 }
672
673 /*
674  * Update relay transactions for SPANs.
675  *
676  * Called with cluster_mtx held.
677  */
678 static void hammer2_relay_scan_specific(h2span_node_t *node,
679                                         h2span_connect_t *conn);
680
681 static void
682 hammer2_relay_scan(h2span_connect_t *conn, h2span_node_t *node)
683 {
684         h2span_cluster_t *cls;
685
686         if (node) {
687                 /*
688                  * Iterate specific node
689                  */
690                 TAILQ_FOREACH(conn, &connq, entry)
691                         hammer2_relay_scan_specific(node, conn);
692         } else {
693                 /*
694                  * Full iteration.
695                  *
696                  * Iterate cluster ids, nodes, and either a specific connection
697                  * or all connections.
698                  */
699                 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
700                         /*
701                          * Iterate node ids
702                          */
703                         RB_FOREACH(node, h2span_node_tree, &cls->tree) {
704                                 /*
705                                  * Synchronize the node's link (received SPANs)
706                                  * with each connection's relays.
707                                  */
708                                 if (conn) {
709                                         hammer2_relay_scan_specific(node, conn);
710                                 } else {
711                                         TAILQ_FOREACH(conn, &connq, entry) {
712                                             hammer2_relay_scan_specific(node,
713                                                                         conn);
714                                         }
715                                         assert(conn == NULL);
716                                 }
717                         }
718                 }
719         }
720 }
721
722 /*
723  * Update the relay'd SPANs for this (node, conn).
724  *
725  * Iterate links and adjust relays to match.  We only propagate the top link
726  * for now (XXX we want to propagate the top two).
727  *
728  * The hammer2_relay_scan_cmp() function locates the first relay element
729  * for any given node.  The relay elements will be sub-sorted by dist.
730  */
731 struct relay_scan_info {
732         h2span_node_t *node;
733         h2span_relay_t *relay;
734 };
735
736 static int
737 hammer2_relay_scan_cmp(h2span_relay_t *relay, void *arg)
738 {
739         struct relay_scan_info *info = arg;
740
741         if ((intptr_t)relay->link->node < (intptr_t)info->node)
742                 return(-1);
743         if ((intptr_t)relay->link->node > (intptr_t)info->node)
744                 return(1);
745         return(0);
746 }
747
748 static int
749 hammer2_relay_scan_callback(h2span_relay_t *relay, void *arg)
750 {
751         struct relay_scan_info *info = arg;
752
753         info->relay = relay;
754         return(-1);
755 }
756
757 static void
758 hammer2_relay_scan_specific(h2span_node_t *node, h2span_connect_t *conn)
759 {
760         struct relay_scan_info info;
761         h2span_relay_t *relay;
762         h2span_relay_t *next_relay;
763         h2span_link_t *slink;
764         int count = 2;
765
766         info.node = node;
767         info.relay = NULL;
768
769         /*
770          * Locate the first related relay for the node on this connection.
771          * relay will be NULL if there were none.
772          */
773         RB_SCAN(h2span_relay_tree, &conn->tree,
774                 hammer2_relay_scan_cmp, hammer2_relay_scan_callback, &info);
775         relay = info.relay;
776         info.relay = NULL;
777         if (relay)
778                 assert(relay->link->node == node);
779
780         if (DebugOpt > 8)
781                 fprintf(stderr, "relay scan for connection %p\n", conn);
782
783         /*
784          * Iterate the node's links (received SPANs) in distance order,
785          * lowest (best) dist first.
786          */
787         /* fprintf(stderr, "LOOP\n"); */
788         RB_FOREACH(slink, h2span_link_tree, &node->tree) {
789                 /*
790                 fprintf(stderr, "SLINK %p RELAY %p(%p)\n",
791                         slink, relay, relay ? relay->link : NULL);
792                 */
793                 /*
794                  * PROPAGATE THE BEST LINKS OVER THE SPECIFIED CONNECTION.
795                  *
796                  * Track relays while iterating the best links and construct
797                  * missing relays when necessary.
798                  *
799                  * (If some prior better link was removed it would have also
800                  *  removed the relay, so the relay can only match exactly or
801                  *  be worse).
802                  */
803                 if (relay && relay->link == slink) {
804                         /*
805                          * Match, relay already in-place, get the next
806                          * relay to match against the next slink.
807                          */
808                         relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
809                         if (--count == 0)
810                                 break;
811                 } else if (slink->dist > HAMMER2_SPAN_MAXDIST) {
812                         /*
813                          * No match but span distance is too great,
814                          * do not relay.  This prevents endless closed
815                          * loops with ever-incrementing distances when
816                          * the seed span is lost in the graph.
817                          *
818                          * All later spans will also be too far away so
819                          * we can break out of the loop.
820                          */
821                         break;
822                 } else if (slink->state->iocom == conn->state->iocom) {
823                         /*
824                          * No match but we would transmit a LNK_SPAN
825                          * out the same connection it came in on, which
826                          * can be trivially optimized out.
827                          */
828                         break;
829                 } else {
830                         /*
831                          * No match, distance is ok, construct a new relay.
832                          * (slink is better than relay).
833                          */
834                         hammer2_msg_t *msg;
835
836                         assert(relay == NULL ||
837                                relay->link->node != slink->node ||
838                                relay->link->dist >= slink->dist);
839                         relay = hammer2_alloc(sizeof(*relay));
840                         relay->conn = conn;
841                         relay->link = slink;
842
843                         msg = hammer2_msg_alloc(conn->state->iocom->router, 0,
844                                                 HAMMER2_LNK_SPAN |
845                                                 HAMMER2_MSGF_CREATE,
846                                                 hammer2_lnk_relay, relay);
847                         relay->state = msg->state;
848                         relay->router = hammer2_router_alloc();
849                         relay->router->iocom = relay->state->iocom;
850                         relay->router->relay = relay;
851                         relay->router->target = relay->state->msgid;
852
853                         msg->any.lnk_span = slink->state->msg->any.lnk_span;
854                         msg->any.lnk_span.dist = slink->dist + 1;
855
856                         hammer2_router_connect(relay->router);
857
858                         RB_INSERT(h2span_relay_tree, &conn->tree, relay);
859                         TAILQ_INSERT_TAIL(&slink->relayq, relay, entry);
860
861                         hammer2_msg_write(msg);
862
863                         fprintf(stderr,
864                                 "RELAY SPAN %p RELAY %p ON CLS=%p NODE=%p DIST=%d "
865                                 "FD %d state %p\n",
866                                 slink,
867                                 relay,
868                                 node->cls, node, slink->dist,
869                                 conn->state->iocom->sock_fd, relay->state);
870
871                         /*
872                          * Match (created new relay), get the next relay to
873                          * match against the next slink.
874                          */
875                         relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
876                         if (--count == 0)
877                                 break;
878                 }
879         }
880
881         /*
882          * Any remaining relay's belonging to this connection which match
883          * the node are in excess of the current aggregate spanning state
884          * and should be removed.
885          */
886         while (relay && relay->link->node == node) {
887                 next_relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
888                 hammer2_relay_delete(relay);
889                 relay = next_relay;
890         }
891 }
892
893 static
894 void
895 hammer2_relay_delete(h2span_relay_t *relay)
896 {
897         fprintf(stderr,
898                 "RELAY DELETE %p RELAY %p ON CLS=%p NODE=%p DIST=%d FD %d STATE %p\n",
899                 relay->link,
900                 relay,
901                 relay->link->node->cls, relay->link->node,
902                 relay->link->dist,
903                 relay->conn->state->iocom->sock_fd, relay->state);
904
905         hammer2_router_disconnect(&relay->router);
906
907         RB_REMOVE(h2span_relay_tree, &relay->conn->tree, relay);
908         TAILQ_REMOVE(&relay->link->relayq, relay, entry);
909
910         if (relay->state) {
911                 relay->state->any.relay = NULL;
912                 hammer2_state_reply(relay->state, 0);
913                 /* state invalid after reply */
914                 relay->state = NULL;
915         }
916         relay->conn = NULL;
917         relay->link = NULL;
918         hammer2_free(relay);
919 }
920
921 /************************************************************************
922  *                      ROUTER AND MESSAGING HANDLES                    *
923  ************************************************************************
924  *
925  * Basically the idea here is to provide a stable data structure which
926  * can be localized to the caller for higher level protocols to work with.
927  * Depends on the context, these hammer2_handle's can be pooled by use-case
928  * and remain persistent through a client (or mount point's) life.
929  */
930
931 #if 0
932 /*
933  * Obtain a stable handle on a cluster given its uuid.  This ties directly
934  * into the global cluster topology, creating the structure if necessary
935  * (even if the uuid does not exist or does not exist yet), and preventing
936  * the structure from getting ripped out from under us while we hold a
937  * pointer to it.
938  */
939 h2span_cluster_t *
940 hammer2_cluster_get(uuid_t *pfs_clid)
941 {
942         h2span_cluster_t dummy_cls;
943         h2span_cluster_t *cls;
944
945         dummy_cls.pfs_clid = *pfs_clid;
946         pthread_mutex_lock(&cluster_mtx);
947         cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls);
948         if (cls)
949                 ++cls->refs;
950         pthread_mutex_unlock(&cluster_mtx);
951         return (cls);
952 }
953
954 void
955 hammer2_cluster_put(h2span_cluster_t *cls)
956 {
957         pthread_mutex_lock(&cluster_mtx);
958         assert(cls->refs > 0);
959         --cls->refs;
960         if (RB_EMPTY(&cls->tree) && cls->refs == 0) {
961                 RB_REMOVE(h2span_cluster_tree,
962                           &cluster_tree, cls);
963                 hammer2_free(cls);
964         }
965         pthread_mutex_unlock(&cluster_mtx);
966 }
967
968 /*
969  * Obtain a stable handle to a specific cluster node given its uuid.
970  * This handle does NOT lock in the route to the node and is typically
971  * used as part of the hammer2_handle_*() API to obtain a set of
972  * stable nodes.
973  */
974 h2span_node_t *
975 hammer2_node_get(h2span_cluster_t *cls, uuid_t *pfs_fsid)
976 {
977 }
978
979 #endif
980
981 #if 0
982 /*
983  * Acquire a persistent router structure given the cluster and node ids.
984  * Messages can be transacted via this structure while held.  If the route
985  * is lost messages will return failure.
986  */
987 hammer2_router_t *
988 hammer2_router_get(uuid_t *pfs_clid, uuid_t *pfs_fsid)
989 {
990 }
991
992 /*
993  * Release previously acquired router.
994  */
995 void
996 hammer2_router_put(hammer2_router_t *router)
997 {
998 }
999 #endif
1000
1001 /************************************************************************
1002  *                              DEBUGGER                                *
1003  ************************************************************************/
1004 /*
1005  * Dumps the spanning tree
1006  */
1007 void
1008 shell_tree(hammer2_router_t *router, char *cmdbuf __unused)
1009 {
1010         h2span_cluster_t *cls;
1011         h2span_node_t *node;
1012         h2span_link_t *slink;
1013         char *uustr = NULL;
1014
1015         pthread_mutex_lock(&cluster_mtx);
1016         RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
1017                 router_printf(router, "Cluster %s\n",
1018                              hammer2_uuid_to_str(&cls->pfs_clid, &uustr));
1019                 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
1020                         router_printf(router, "    Node %s (%s)\n",
1021                                  hammer2_uuid_to_str(&node->pfs_fsid, &uustr),
1022                                  node->label);
1023                         RB_FOREACH(slink, h2span_link_tree, &node->tree) {
1024                                 router_printf(router, "\tLink dist=%d via %d\n",
1025                                              slink->dist,
1026                                              slink->state->iocom->sock_fd);
1027                         }
1028                 }
1029         }
1030         pthread_mutex_unlock(&cluster_mtx);
1031         if (uustr)
1032                 free(uustr);
1033 #if 0
1034         TAILQ_FOREACH(conn, &connq, entry) {
1035         }
1036 #endif
1037 }