hammer2 - Message routing work
[dragonfly.git] / sbin / hammer2 / msg_lnk.c
CommitLineData
8c280d5d
MD
1/*
2 * Copyright (c) 2012 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34/*
35 * LNK_SPAN PROTOCOL SUPPORT FUNCTIONS
36 *
37 * This code supports the LNK_SPAN protocol. Essentially all PFS's
38 * clients and services rendezvous with the userland hammer2 service and
39 * open LNK_SPAN transactions using a message header linkid of 0,
40 * registering any PFS's they have connectivity to with us.
41 *
42 * --
43 *
44 * Each registration maintains its own open LNK_SPAN message transaction.
45 * The SPANs are collected, aggregated, and retransmitted over available
46 * connections through the maintainance of additional LNK_SPAN message
47 * transactions on each link.
48 *
49 * The msgid for each active LNK_SPAN transaction we receive allows us to
50 * send a message to the target PFS (which might be one of many belonging
51 * to the same cluster), by specifying that msgid as the linkid in any
52 * message we send to the target PFS.
53 *
54 * Similarly the msgid we allocate for any LNK_SPAN transaction we transmit
55 * (and remember we will maintain multiple open LNK_SPAN transactions on
56 * each connection representing the topology span, so every node sees every
57 * other node as a separate open transaction). So, similarly the msgid for
58 * these active transactions which we initiated can be used by the other
59 * end to route messages through us to another node, ultimately winding up
60 * at the identified hammer2 PFS. We have to adjust the spanid in the message
61 * header at each hop to be representative of the outgoing LNK_SPAN we
62 * are forwarding the message through.
63 *
64 * --
65 *
66 * If we were to retransmit every LNK_SPAN transaction we receive it would
67 * create a huge mess, so we have to aggregate all received LNK_SPAN
68 * transactions, sort them by the fsid (the cluster) and sub-sort them by
69 * the pfs_fsid (individual nodes in the cluster), and only retransmit
7dc0f844 70 * (create outgoing transactions) for a subset of the nearest distance-hops
8c280d5d
MD
71 * for each individual node.
72 *
73 * The higher level protocols can then issue transactions to the nodes making
74 * up a cluster to perform all actions required.
75 *
76 * --
77 *
78 * Since this is a large topology and a spanning tree protocol, links can
79 * go up and down all the time. Any time a link goes down its transaction
80 * is closed. The transaction has to be closed on both ends before we can
81 * delete (and potentially reuse) the related spanid. The LNK_SPAN being
82 * closed may have been propagated out to other connections and those related
83 * LNK_SPANs are also closed. Ultimately all routes via the lost LNK_SPAN
84 * go away, ultimately reaching all sources and all targets.
85 *
86 * Any messages in-transit using a route that goes away will be thrown away.
87 * Open transactions are only tracked at the two end-points. When a link
88 * failure propagates to an end-point the related open transactions lose
89 * their spanid and are automatically aborted.
90 *
91 * It is important to note that internal route nodes cannot just associate
92 * a lost LNK_SPAN transaction with another route to the same destination.
93 * Message transactions MUST be serialized and MUST be ordered. All messages
94 * for a transaction must run over the same route. So if the route used by
95 * an active transaction is lost, the related messages will be fully aborted
96 * and the higher protocol levels will retry as appropriate.
97 *
29ead430
MD
98 * FULLY ABORTING A ROUTED MESSAGE is handled via link-failure propagation
99 * back to the originator. Only the originator keeps tracks of a message.
100 * Routers just pass it through. If a route is lost during transit the
101 * message is simply thrown away.
102 *
8c280d5d
MD
103 * It is also important to note that several paths to the same PFS can be
104 * propagated along the same link, which allows concurrency and even
105 * redundancy over several network interfaces or via different routes through
106 * the topology. Any given transaction will use only a single route but busy
107 * servers will often have hundreds of transactions active simultaniously,
108 * so having multiple active paths through the network topology for A<->B
109 * will improve performance.
110 *
111 * --
112 *
113 * Most protocols consolidate operations rather than simply relaying them.
114 * This is particularly true of LEAF protocols (such as strict HAMMER2
115 * clients), of which there can be millions connecting into the cluster at
116 * various points. The SPAN protocol is not used for these LEAF elements.
117 *
118 * Instead the primary service they connect to implements a proxy for the
119 * client protocols so the core topology only has to propagate a couple of
120 * LNK_SPANs and not millions. LNK_SPANs are meant to be used only for
121 * core master nodes and satellite slaves and cache nodes.
122 */
123
124#include "hammer2.h"
125
126/*
cf715800
MD
127 * Maximum spanning tree distance. This has the practical effect of
128 * stopping tail-chasing closed loops when a feeder span is lost.
129 */
130#define HAMMER2_SPAN_MAXDIST 16
131
132/*
8c280d5d
MD
133 * RED-BLACK TREE DEFINITIONS
134 *
7dc0f844 135 * We need to track:
8c280d5d
MD
136 *
137 * (1) shared fsid's (a cluster).
138 * (2) unique fsid's (a node in a cluster) <--- LNK_SPAN transactions.
139 *
140 * We need to aggegate all active LNK_SPANs, aggregate, and create our own
141 * outgoing LNK_SPAN transactions on each of our connections representing
142 * the aggregated state.
143 *
144 * h2span_connect - list of iocom connections who wish to receive SPAN
145 * propagation from other connections. Might contain
146 * a filter string. Only iocom's with an open
147 * LNK_CONN transactions are applicable for SPAN
148 * propagation.
149 *
150 * h2span_relay - List of links relayed (via SPAN). Essentially
151 * each relay structure represents a LNK_SPAN
152 * transaction that we initiated, verses h2span_link
153 * which is a LNK_SPAN transaction that we received.
154 *
155 * --
156 *
157 * h2span_cluster - Organizes the shared fsid's. One structure for
158 * each cluster.
159 *
160 * h2span_node - Organizes the nodes in a cluster. One structure
161 * for each unique {cluster,node}, aka {fsid, pfs_fsid}.
162 *
163 * h2span_link - Organizes all incoming and outgoing LNK_SPAN message
164 * transactions related to a node.
165 *
166 * One h2span_link structure for each incoming LNK_SPAN
167 * transaction. Links selected for propagation back
168 * out are also where the outgoing LNK_SPAN messages
169 * are indexed into (so we can propagate changes).
170 *
171 * The h2span_link's use a red-black tree to sort the
7dc0f844 172 * distance hop metric for the incoming LNK_SPAN. We
8c280d5d
MD
173 * then select the top N for outgoing. When the
174 * topology changes the top N may also change and cause
175 * new outgoing LNK_SPAN transactions to be opened
176 * and less desireable ones to be closed, causing
177 * transactional aborts within the message flow in
178 * the process.
179 *
180 * Also note - All outgoing LNK_SPAN message transactions are also
181 * entered into a red-black tree for use by the routing
182 * function. This is handled by msg.c in the state
183 * code, not here.
184 */
185
186struct h2span_link;
187struct h2span_relay;
188TAILQ_HEAD(h2span_connect_queue, h2span_connect);
189TAILQ_HEAD(h2span_relay_queue, h2span_relay);
190
191RB_HEAD(h2span_cluster_tree, h2span_cluster);
192RB_HEAD(h2span_node_tree, h2span_node);
193RB_HEAD(h2span_link_tree, h2span_link);
194RB_HEAD(h2span_relay_tree, h2span_relay);
195
196/*
197 * Received LNK_CONN transaction enables SPAN protocol over connection.
198 * (may contain filter).
199 */
200struct h2span_connect {
201 TAILQ_ENTRY(h2span_connect) entry;
202 struct h2span_relay_tree tree;
203 hammer2_state_t *state;
204};
205
206/*
207 * All received LNK_SPANs are organized by cluster (pfs_clid),
208 * node (pfs_fsid), and link (received LNK_SPAN transaction).
209 */
210struct h2span_cluster {
211 RB_ENTRY(h2span_cluster) rbnode;
212 struct h2span_node_tree tree;
213 uuid_t pfs_clid; /* shared fsid */
90e8cd1d 214 int refs; /* prevents destruction */
8c280d5d
MD
215};
216
7dc0f844 217struct h2span_node {
8c280d5d
MD
218 RB_ENTRY(h2span_node) rbnode;
219 struct h2span_link_tree tree;
220 struct h2span_cluster *cls;
221 uuid_t pfs_fsid; /* unique fsid */
81666e1b 222 char label[64];
8c280d5d
MD
223};
224
225struct h2span_link {
226 RB_ENTRY(h2span_link) rbnode;
227 hammer2_state_t *state; /* state<->link */
228 struct h2span_node *node; /* related node */
7dc0f844 229 int32_t dist;
8c280d5d 230 struct h2span_relay_queue relayq; /* relay out */
90e8cd1d 231 struct hammer2_router *router; /* route out this link */
8c280d5d
MD
232};
233
234/*
235 * Any LNK_SPAN transactions we receive which are relayed out other
236 * connections utilize this structure to track the LNK_SPAN transaction
237 * we initiate on the other connections, if selected for relay.
238 *
239 * In many respects this is the core of the protocol... actually figuring
240 * out what LNK_SPANs to relay. The spanid used for relaying is the
241 * address of the 'state' structure, which is why h2span_relay has to
242 * be entered into a RB-TREE based at h2span_connect (so we can look
243 * up the spanid to validate it).
90e8cd1d
MD
244 *
245 * NOTE: Messages can be received via the LNK_SPAN transaction the
246 * relay maintains, and can be replied via relay->router, but
247 * messages are NOT initiated via a relay. Messages are initiated
248 * via incoming links (h2span_link's).
249 *
250 * relay->link represents the link being relayed, NOT the LNK_SPAN
251 * transaction the relay is holding open.
8c280d5d
MD
252 */
253struct h2span_relay {
254 RB_ENTRY(h2span_relay) rbnode; /* from h2span_connect */
255 TAILQ_ENTRY(h2span_relay) entry; /* from link */
256 struct h2span_connect *conn;
257 hammer2_state_t *state; /* transmitted LNK_SPAN */
90e8cd1d
MD
258 struct h2span_link *link; /* LNK_SPAN being relayed */
259 struct hammer2_router *router;/* route out this relay */
8c280d5d
MD
260};
261
262
263typedef struct h2span_connect h2span_connect_t;
264typedef struct h2span_cluster h2span_cluster_t;
265typedef struct h2span_node h2span_node_t;
266typedef struct h2span_link h2span_link_t;
267typedef struct h2span_relay h2span_relay_t;
268
269static
270int
271h2span_cluster_cmp(h2span_cluster_t *cls1, h2span_cluster_t *cls2)
272{
273 return(uuid_compare(&cls1->pfs_clid, &cls2->pfs_clid, NULL));
274}
275
276static
277int
278h2span_node_cmp(h2span_node_t *node1, h2span_node_t *node2)
279{
280 return(uuid_compare(&node1->pfs_fsid, &node2->pfs_fsid, NULL));
281}
282
cf715800
MD
283/*
284 * NOTE: Sort/subsort must match h2span_relay_cmp() under any given
285 * node.
286 */
8c280d5d
MD
287static
288int
289h2span_link_cmp(h2span_link_t *link1, h2span_link_t *link2)
290{
7dc0f844 291 if (link1->dist < link2->dist)
8c280d5d 292 return(-1);
7dc0f844 293 if (link1->dist > link2->dist)
8c280d5d 294 return(1);
29ead430 295 if (link1->state->msgid < link2->state->msgid)
8c280d5d 296 return(-1);
29ead430 297 if (link1->state->msgid > link2->state->msgid)
8c280d5d
MD
298 return(1);
299 return(0);
300}
301
7dc0f844
MD
302/*
303 * Relay entries are sorted by node, subsorted by distance and link
304 * address (so we can match up the conn->tree relay topology with
305 * a node's link topology).
306 */
8c280d5d
MD
307static
308int
309h2span_relay_cmp(h2span_relay_t *relay1, h2span_relay_t *relay2)
310{
29ead430
MD
311 h2span_link_t *link1 = relay1->link;
312 h2span_link_t *link2 = relay2->link;
313
314 if ((intptr_t)link1->node < (intptr_t)link2->node)
7dc0f844 315 return(-1);
29ead430 316 if ((intptr_t)link1->node > (intptr_t)link2->node)
7dc0f844 317 return(1);
29ead430 318 if (link1->dist < link2->dist)
8c280d5d 319 return(-1);
29ead430 320 if (link1->dist > link2->dist)
7dc0f844 321 return(1);
29ead430 322 if (link1->state->msgid < link2->state->msgid)
7dc0f844 323 return(-1);
29ead430 324 if (link1->state->msgid > link2->state->msgid)
8c280d5d
MD
325 return(1);
326 return(0);
327}
328
329RB_PROTOTYPE_STATIC(h2span_cluster_tree, h2span_cluster,
330 rbnode, h2span_cluster_cmp);
331RB_PROTOTYPE_STATIC(h2span_node_tree, h2span_node,
332 rbnode, h2span_node_cmp);
333RB_PROTOTYPE_STATIC(h2span_link_tree, h2span_link,
334 rbnode, h2span_link_cmp);
335RB_PROTOTYPE_STATIC(h2span_relay_tree, h2span_relay,
336 rbnode, h2span_relay_cmp);
337
338RB_GENERATE_STATIC(h2span_cluster_tree, h2span_cluster,
339 rbnode, h2span_cluster_cmp);
340RB_GENERATE_STATIC(h2span_node_tree, h2span_node,
341 rbnode, h2span_node_cmp);
342RB_GENERATE_STATIC(h2span_link_tree, h2span_link,
343 rbnode, h2span_link_cmp);
344RB_GENERATE_STATIC(h2span_relay_tree, h2span_relay,
345 rbnode, h2span_relay_cmp);
346
347/*
348 * Global mutex protects cluster_tree lookups.
349 */
350static pthread_mutex_t cluster_mtx;
351static struct h2span_cluster_tree cluster_tree = RB_INITIALIZER(cluster_tree);
352static struct h2span_connect_queue connq = TAILQ_HEAD_INITIALIZER(connq);
353
29ead430
MD
354static void hammer2_lnk_span(hammer2_msg_t *msg);
355static void hammer2_lnk_conn(hammer2_msg_t *msg);
356static void hammer2_lnk_relay(hammer2_msg_t *msg);
02454b3e 357static void hammer2_relay_scan(h2span_connect_t *conn, h2span_node_t *node);
7dc0f844 358static void hammer2_relay_delete(h2span_relay_t *relay);
8c280d5d 359
29ead430
MD
360void
361hammer2_msg_lnk_signal(hammer2_router_t *router __unused)
362{
363 pthread_mutex_lock(&cluster_mtx);
364 hammer2_relay_scan(NULL, NULL);
365 pthread_mutex_unlock(&cluster_mtx);
366}
367
8c280d5d
MD
368/*
369 * Receive a HAMMER2_MSG_PROTO_LNK message. This only called for
370 * one-way and opening-transactions since state->func will be assigned
371 * in all other cases.
372 */
373void
29ead430 374hammer2_msg_lnk(hammer2_msg_t *msg)
8c280d5d
MD
375{
376 switch(msg->any.head.cmd & HAMMER2_MSGF_BASECMDMASK) {
377 case HAMMER2_LNK_CONN:
29ead430 378 hammer2_lnk_conn(msg);
8c280d5d
MD
379 break;
380 case HAMMER2_LNK_SPAN:
29ead430 381 hammer2_lnk_span(msg);
8c280d5d
MD
382 break;
383 default:
384 fprintf(stderr,
385 "MSG_PROTO_LNK: Unknown msg %08x\n", msg->any.head.cmd);
29ead430 386 hammer2_msg_reply(msg, HAMMER2_MSG_ERR_NOSUPP);
8c280d5d
MD
387 /* state invalid after reply */
388 break;
389 }
390}
391
392void
29ead430 393hammer2_lnk_conn(hammer2_msg_t *msg)
8c280d5d 394{
29ead430 395 hammer2_state_t *state = msg->state;
8c280d5d
MD
396 h2span_connect_t *conn;
397 h2span_relay_t *relay;
398 char *alloc = NULL;
399
400 pthread_mutex_lock(&cluster_mtx);
401
402 /*
403 * On transaction start we allocate a new h2span_connect and
404 * acknowledge the request, leaving the transaction open.
7dc0f844 405 * We then relay priority-selected SPANs.
8c280d5d
MD
406 */
407 if (msg->any.head.cmd & HAMMER2_MSGF_CREATE) {
408 state->func = hammer2_lnk_conn;
409
81666e1b
MD
410 fprintf(stderr, "LNK_CONN(%08x): %s/%s\n",
411 (uint32_t)msg->any.head.msgid,
8c280d5d
MD
412 hammer2_uuid_to_str(&msg->any.lnk_conn.pfs_clid,
413 &alloc),
414 msg->any.lnk_conn.label);
415 free(alloc);
416
417 conn = hammer2_alloc(sizeof(*conn));
418
419 RB_INIT(&conn->tree);
420 conn->state = state;
421 state->any.conn = conn;
422 TAILQ_INSERT_TAIL(&connq, conn, entry);
423
29ead430 424 hammer2_msg_result(msg, 0);
02454b3e 425
29ead430 426#if 0
02454b3e
MD
427 /*
428 * Span-synchronize all nodes with the new connection
429 */
430 hammer2_relay_scan(conn, NULL);
29ead430
MD
431#endif
432 hammer2_router_signal(msg->router);
8c280d5d
MD
433 }
434
435 /*
436 * On transaction terminate we clean out our h2span_connect
437 * and acknowledge the request, closing the transaction.
438 */
439 if (msg->any.head.cmd & HAMMER2_MSGF_DELETE) {
440 fprintf(stderr, "LNK_CONN: Terminated\n");
441 conn = state->any.conn;
442 assert(conn);
7dc0f844
MD
443
444 /*
445 * Clean out all relays. This requires terminating each
446 * relay transaction.
447 */
8c280d5d 448 while ((relay = RB_ROOT(&conn->tree)) != NULL) {
7dc0f844 449 hammer2_relay_delete(relay);
8c280d5d
MD
450 }
451
452 /*
453 * Clean out conn
454 */
455 conn->state = NULL;
456 msg->state->any.conn = NULL;
457 TAILQ_REMOVE(&connq, conn, entry);
458 hammer2_free(conn);
459
29ead430 460 hammer2_msg_reply(msg, 0);
8c280d5d
MD
461 /* state invalid after reply */
462 }
463 pthread_mutex_unlock(&cluster_mtx);
464}
465
466void
29ead430 467hammer2_lnk_span(hammer2_msg_t *msg)
8c280d5d 468{
29ead430 469 hammer2_state_t *state = msg->state;
8c280d5d
MD
470 h2span_cluster_t dummy_cls;
471 h2span_node_t dummy_node;
472 h2span_cluster_t *cls;
473 h2span_node_t *node;
474 h2span_link_t *slink;
475 h2span_relay_t *relay;
476 char *alloc = NULL;
477
29ead430
MD
478 assert((msg->any.head.cmd & HAMMER2_MSGF_REPLY) == 0);
479
8c280d5d
MD
480 pthread_mutex_lock(&cluster_mtx);
481
482 /*
483 * On transaction start we initialize the tracking infrastructure
484 */
485 if (msg->any.head.cmd & HAMMER2_MSGF_CREATE) {
29ead430 486 assert(state->func == NULL);
8c280d5d
MD
487 state->func = hammer2_lnk_span;
488
81666e1b
MD
489 msg->any.lnk_span.label[sizeof(msg->any.lnk_span.label)-1] = 0;
490
8c280d5d
MD
491 /*
492 * Find the cluster
493 */
494 dummy_cls.pfs_clid = msg->any.lnk_span.pfs_clid;
495 cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls);
496 if (cls == NULL) {
497 cls = hammer2_alloc(sizeof(*cls));
498 cls->pfs_clid = msg->any.lnk_span.pfs_clid;
499 RB_INIT(&cls->tree);
500 RB_INSERT(h2span_cluster_tree, &cluster_tree, cls);
501 }
502
503 /*
504 * Find the node
505 */
506 dummy_node.pfs_fsid = msg->any.lnk_span.pfs_fsid;
507 node = RB_FIND(h2span_node_tree, &cls->tree, &dummy_node);
508 if (node == NULL) {
509 node = hammer2_alloc(sizeof(*node));
510 node->pfs_fsid = msg->any.lnk_span.pfs_fsid;
511 node->cls = cls;
512 RB_INIT(&node->tree);
513 RB_INSERT(h2span_node_tree, &cls->tree, node);
81666e1b
MD
514 snprintf(node->label, sizeof(node->label),
515 "%s", msg->any.lnk_span.label);
8c280d5d
MD
516 }
517
518 /*
519 * Create the link
520 */
521 assert(state->any.link == NULL);
522 slink = hammer2_alloc(sizeof(*slink));
7dc0f844 523 TAILQ_INIT(&slink->relayq);
8c280d5d 524 slink->node = node;
7dc0f844 525 slink->dist = msg->any.lnk_span.dist;
8c280d5d
MD
526 slink->state = state;
527 state->any.link = slink;
29ead430
MD
528
529 /*
530 * Embedded router structure in link for message forwarding.
90e8cd1d
MD
531 *
532 * The spanning id for the router is the message id of
533 * the SPAN link it is embedded in, allowing messages to
534 * be routed via &slink->router.
29ead430 535 */
90e8cd1d
MD
536 slink->router = hammer2_router_alloc();
537 slink->router->iocom = state->iocom;
538 slink->router->link = slink;
539 slink->router->spanid = state->msgid;
540 hammer2_router_connect(slink->router);
29ead430 541
8c280d5d
MD
542 RB_INSERT(h2span_link_tree, &node->tree, slink);
543
29ead430
MD
544 fprintf(stderr, "LNK_SPAN(thr %p): %p %s/%s dist=%d\n",
545 msg->router->iocom,
546 slink,
547 hammer2_uuid_to_str(&msg->any.lnk_span.pfs_clid,
548 &alloc),
549 msg->any.lnk_span.label,
550 msg->any.lnk_span.dist);
551 free(alloc);
29ead430 552#if 0
02454b3e 553 hammer2_relay_scan(NULL, node);
29ead430
MD
554#endif
555 hammer2_router_signal(msg->router);
8c280d5d
MD
556 }
557
558 /*
559 * On transaction terminate we remove the tracking infrastructure.
560 */
561 if (msg->any.head.cmd & HAMMER2_MSGF_DELETE) {
562 slink = state->any.link;
563 assert(slink != NULL);
564 node = slink->node;
565 cls = node->cls;
566
29ead430
MD
567 fprintf(stderr, "LNK_DELE(thr %p): %p %s/%s dist=%d\n",
568 msg->router->iocom,
569 slink,
570 hammer2_uuid_to_str(&cls->pfs_clid, &alloc),
571 state->msg->any.lnk_span.label,
572 state->msg->any.lnk_span.dist);
573 free(alloc);
574
8c280d5d 575 /*
90e8cd1d
MD
576 * Remove the router from consideration
577 */
578 hammer2_router_disconnect(&slink->router);
579
580 /*
7dc0f844
MD
581 * Clean out all relays. This requires terminating each
582 * relay transaction.
8c280d5d
MD
583 */
584 while ((relay = TAILQ_FIRST(&slink->relayq)) != NULL) {
7dc0f844 585 hammer2_relay_delete(relay);
8c280d5d
MD
586 }
587
588 /*
589 * Clean out the topology
590 */
591 RB_REMOVE(h2span_link_tree, &node->tree, slink);
592 if (RB_EMPTY(&node->tree)) {
593 RB_REMOVE(h2span_node_tree, &cls->tree, node);
90e8cd1d 594 if (RB_EMPTY(&cls->tree) && cls->refs == 0) {
8c280d5d
MD
595 RB_REMOVE(h2span_cluster_tree,
596 &cluster_tree, cls);
597 hammer2_free(cls);
598 }
599 node->cls = NULL;
600 hammer2_free(node);
7dc0f844 601 node = NULL;
8c280d5d
MD
602 }
603 state->any.link = NULL;
604 slink->state = NULL;
605 slink->node = NULL;
606 hammer2_free(slink);
7dc0f844
MD
607
608 /*
609 * We have to terminate the transaction
610 */
611 hammer2_state_reply(state, 0);
612 /* state invalid after reply */
613
614 /*
615 * If the node still exists issue any required updates. If
616 * it doesn't then all related relays have already been
617 * removed and there's nothing left to do.
618 */
29ead430 619#if 0
7dc0f844 620 if (node)
02454b3e 621 hammer2_relay_scan(NULL, node);
29ead430
MD
622#endif
623 if (node)
624 hammer2_router_signal(msg->router);
8c280d5d
MD
625 }
626
627 pthread_mutex_unlock(&cluster_mtx);
628}
629
630/*
7dc0f844
MD
631 * Messages received on relay SPANs. These are open transactions so it is
632 * in fact possible for the other end to close the transaction.
633 *
634 * XXX MPRACE on state structure
635 */
636static void
29ead430 637hammer2_lnk_relay(hammer2_msg_t *msg)
7dc0f844 638{
29ead430 639 hammer2_state_t *state = msg->state;
7dc0f844
MD
640 h2span_relay_t *relay;
641
29ead430
MD
642 assert(msg->any.head.cmd & HAMMER2_MSGF_REPLY);
643
7dc0f844
MD
644 if (msg->any.head.cmd & HAMMER2_MSGF_DELETE) {
645 pthread_mutex_lock(&cluster_mtx);
646 if ((relay = state->any.relay) != NULL) {
647 hammer2_relay_delete(relay);
648 } else {
649 hammer2_state_reply(state, 0);
650 }
651 pthread_mutex_unlock(&cluster_mtx);
652 }
653}
654
655/*
656 * Update relay transactions for SPANs.
657 *
658 * Called with cluster_mtx held.
659 */
02454b3e
MD
660static void hammer2_relay_scan_specific(h2span_node_t *node,
661 h2span_connect_t *conn);
7dc0f844
MD
662
663static void
02454b3e 664hammer2_relay_scan(h2span_connect_t *conn, h2span_node_t *node)
7dc0f844
MD
665{
666 h2span_cluster_t *cls;
7dc0f844
MD
667
668 if (node) {
669 /*
670 * Iterate specific node
671 */
672 TAILQ_FOREACH(conn, &connq, entry)
02454b3e 673 hammer2_relay_scan_specific(node, conn);
7dc0f844
MD
674 } else {
675 /*
02454b3e 676 * Full iteration.
7dc0f844 677 *
02454b3e
MD
678 * Iterate cluster ids, nodes, and either a specific connection
679 * or all connections.
7dc0f844 680 */
7dc0f844
MD
681 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
682 /*
683 * Iterate node ids
684 */
685 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
686 /*
687 * Synchronize the node's link (received SPANs)
688 * with each connection's relays.
689 */
02454b3e
MD
690 if (conn) {
691 hammer2_relay_scan_specific(node, conn);
692 } else {
693 TAILQ_FOREACH(conn, &connq, entry) {
694 hammer2_relay_scan_specific(node,
695 conn);
696 }
697 assert(conn == NULL);
698 }
7dc0f844
MD
699 }
700 }
701 }
702}
703
704/*
705 * Update the relay'd SPANs for this (node, conn).
706 *
707 * Iterate links and adjust relays to match. We only propagate the top link
708 * for now (XXX we want to propagate the top two).
709 *
710 * The hammer2_relay_scan_cmp() function locates the first relay element
711 * for any given node. The relay elements will be sub-sorted by dist.
8c280d5d 712 */
7dc0f844
MD
713struct relay_scan_info {
714 h2span_node_t *node;
715 h2span_relay_t *relay;
716};
717
718static int
719hammer2_relay_scan_cmp(h2span_relay_t *relay, void *arg)
720{
721 struct relay_scan_info *info = arg;
722
723 if ((intptr_t)relay->link->node < (intptr_t)info->node)
724 return(-1);
725 if ((intptr_t)relay->link->node > (intptr_t)info->node)
726 return(1);
727 return(0);
728}
729
730static int
731hammer2_relay_scan_callback(h2span_relay_t *relay, void *arg)
732{
733 struct relay_scan_info *info = arg;
734
735 info->relay = relay;
736 return(-1);
737}
738
8c280d5d 739static void
02454b3e 740hammer2_relay_scan_specific(h2span_node_t *node, h2span_connect_t *conn)
8c280d5d 741{
7dc0f844
MD
742 struct relay_scan_info info;
743 h2span_relay_t *relay;
744 h2span_relay_t *next_relay;
745 h2span_link_t *slink;
746 int count = 2;
747
748 info.node = node;
749 info.relay = NULL;
750
751 /*
29ead430
MD
752 * Locate the first related relay for the node on this connection.
753 * relay will be NULL if there were none.
7dc0f844
MD
754 */
755 RB_SCAN(h2span_relay_tree, &conn->tree,
756 hammer2_relay_scan_cmp, hammer2_relay_scan_callback, &info);
757 relay = info.relay;
cf715800
MD
758 info.relay = NULL;
759 if (relay)
760 assert(relay->link->node == node);
7dc0f844 761
81666e1b
MD
762 if (DebugOpt > 8)
763 fprintf(stderr, "relay scan for connection %p\n", conn);
7dc0f844
MD
764
765 /*
766 * Iterate the node's links (received SPANs) in distance order,
767 * lowest (best) dist first.
768 */
29ead430 769 /* fprintf(stderr, "LOOP\n"); */
7dc0f844
MD
770 RB_FOREACH(slink, h2span_link_tree, &node->tree) {
771 /*
29ead430
MD
772 fprintf(stderr, "SLINK %p RELAY %p(%p)\n",
773 slink, relay, relay ? relay->link : NULL);
774 */
775 /*
cf715800 776 * PROPAGATE THE BEST LINKS OVER THE SPECIFIED CONNECTION.
7dc0f844 777 *
cf715800
MD
778 * Track relays while iterating the best links and construct
779 * missing relays when necessary.
7dc0f844
MD
780 *
781 * (If some prior better link was removed it would have also
782 * removed the relay, so the relay can only match exactly or
29ead430 783 * be worse).
7dc0f844 784 */
cf715800
MD
785 if (relay && relay->link == slink) {
786 /*
29ead430
MD
787 * Match, relay already in-place, get the next
788 * relay to match against the next slink.
cf715800
MD
789 */
790 relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
791 if (--count == 0)
792 break;
793 } else if (slink->dist > HAMMER2_SPAN_MAXDIST) {
794 /*
795 * No match but span distance is too great,
796 * do not relay. This prevents endless closed
797 * loops with ever-incrementing distances when
798 * the seed span is lost in the graph.
29ead430
MD
799 *
800 * All later spans will also be too far away so
801 * we can break out of the loop.
cf715800 802 */
29ead430 803 break;
cf715800
MD
804 } else {
805 /*
806 * No match, distance is ok, construct a new relay.
29ead430 807 * (slink is better than relay).
cf715800 808 */
7dc0f844
MD
809 hammer2_msg_t *msg;
810
811 assert(relay == NULL ||
29ead430
MD
812 relay->link->node != slink->node ||
813 relay->link->dist >= slink->dist);
7dc0f844
MD
814 relay = hammer2_alloc(sizeof(*relay));
815 relay->conn = conn;
816 relay->link = slink;
817
90e8cd1d 818 msg = hammer2_msg_alloc(conn->state->iocom->router, 0,
29ead430
MD
819 HAMMER2_LNK_SPAN |
820 HAMMER2_MSGF_CREATE,
821 hammer2_lnk_relay, relay);
822 relay->state = msg->state;
90e8cd1d
MD
823 relay->router = hammer2_router_alloc();
824 relay->router->iocom = relay->state->iocom;
825 relay->router->relay = relay;
826 relay->router->spanid = relay->state->msgid;
827
29ead430
MD
828 msg->any.lnk_span = slink->state->msg->any.lnk_span;
829 msg->any.lnk_span.dist = slink->dist + 1;
830
90e8cd1d
MD
831 hammer2_router_connect(relay->router);
832
cf715800
MD
833 RB_INSERT(h2span_relay_tree, &conn->tree, relay);
834 TAILQ_INSERT_TAIL(&slink->relayq, relay, entry);
835
29ead430 836 hammer2_msg_write(msg);
7dc0f844 837
81666e1b 838 fprintf(stderr,
29ead430 839 "RELAY SPAN %p RELAY %p ON CLS=%p NODE=%p DIST=%d "
cf715800 840 "FD %d state %p\n",
29ead430
MD
841 slink,
842 relay,
cf715800 843 node->cls, node, slink->dist,
7dc0f844
MD
844 conn->state->iocom->sock_fd, relay->state);
845
cf715800
MD
846 /*
847 * Match (created new relay), get the next relay to
848 * match against the next slink.
849 */
850 relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
851 if (--count == 0)
852 break;
7dc0f844
MD
853 }
854 }
855
856 /*
857 * Any remaining relay's belonging to this connection which match
858 * the node are in excess of the current aggregate spanning state
859 * and should be removed.
860 */
861 while (relay && relay->link->node == node) {
862 next_relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
863 hammer2_relay_delete(relay);
864 relay = next_relay;
865 }
866}
867
868static
869void
870hammer2_relay_delete(h2span_relay_t *relay)
871{
81666e1b 872 fprintf(stderr,
29ead430
MD
873 "RELAY DELETE %p RELAY %p ON CLS=%p NODE=%p DIST=%d FD %d STATE %p\n",
874 relay->link,
875 relay,
7dc0f844 876 relay->link->node->cls, relay->link->node,
cf715800 877 relay->link->dist,
7dc0f844 878 relay->conn->state->iocom->sock_fd, relay->state);
7dc0f844 879
90e8cd1d
MD
880 hammer2_router_disconnect(&relay->router);
881
7dc0f844
MD
882 RB_REMOVE(h2span_relay_tree, &relay->conn->tree, relay);
883 TAILQ_REMOVE(&relay->link->relayq, relay, entry);
884
885 if (relay->state) {
886 relay->state->any.relay = NULL;
887 hammer2_state_reply(relay->state, 0);
888 /* state invalid after reply */
889 relay->state = NULL;
890 }
891 relay->conn = NULL;
892 relay->link = NULL;
893 hammer2_free(relay);
8c280d5d 894}
81666e1b 895
29ead430 896/************************************************************************
90e8cd1d 897 * ROUTER AND MESSAGING HANDLES *
29ead430
MD
898 ************************************************************************
899 *
90e8cd1d
MD
900 * Basically the idea here is to provide a stable data structure which
901 * can be localized to the caller for higher level protocols to work with.
902 * Depends on the context, these hammer2_handle's can be pooled by use-case
903 * and remain persistent through a client (or mount point's) life.
904 */
905
906#if 0
907/*
908 * Obtain a stable handle on a cluster given its uuid. This ties directly
909 * into the global cluster topology, creating the structure if necessary
910 * (even if the uuid does not exist or does not exist yet), and preventing
911 * the structure from getting ripped out from under us while we hold a
912 * pointer to it.
913 */
914h2span_cluster_t *
915hammer2_cluster_get(uuid_t *pfs_clid)
916{
917 h2span_cluster_t dummy_cls;
918 h2span_cluster_t *cls;
919
920 dummy_cls.pfs_clid = *pfs_clid;
921 pthread_mutex_lock(&cluster_mtx);
922 cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls);
923 if (cls)
924 ++cls->refs;
925 pthread_mutex_unlock(&cluster_mtx);
926 return (cls);
927}
928
929void
930hammer2_cluster_put(h2span_cluster_t *cls)
931{
932 pthread_mutex_lock(&cluster_mtx);
933 assert(cls->refs > 0);
934 --cls->refs;
935 if (RB_EMPTY(&cls->tree) && cls->refs == 0) {
936 RB_REMOVE(h2span_cluster_tree,
937 &cluster_tree, cls);
938 hammer2_free(cls);
939 }
940 pthread_mutex_unlock(&cluster_mtx);
941}
942
943/*
944 * Obtain a stable handle to a specific cluster node given its uuid.
945 * This handle does NOT lock in the route to the node and is typically
946 * used as part of the hammer2_handle_*() API to obtain a set of
947 * stable nodes.
29ead430 948 */
90e8cd1d
MD
949h2span_node_t *
950hammer2_node_get(h2span_cluster_t *cls, uuid_t *pfs_fsid)
951{
952}
953
954#endif
29ead430
MD
955
956#if 0
957/*
958 * Acquire a persistent router structure given the cluster and node ids.
959 * Messages can be transacted via this structure while held. If the route
960 * is lost messages will return failure.
961 */
962hammer2_router_t *
963hammer2_router_get(uuid_t *pfs_clid, uuid_t *pfs_fsid)
964{
965}
966
967/*
968 * Release previously acquired router.
969 */
970void
971hammer2_router_put(hammer2_router_t *router)
972{
973}
974#endif
975
90e8cd1d
MD
976/************************************************************************
977 * DEBUGGER *
978 ************************************************************************/
81666e1b
MD
979/*
980 * Dumps the spanning tree
981 */
982void
29ead430 983shell_tree(hammer2_router_t *router, char *cmdbuf __unused)
81666e1b
MD
984{
985 h2span_cluster_t *cls;
986 h2span_node_t *node;
987 h2span_link_t *slink;
988 char *uustr = NULL;
989
990 pthread_mutex_lock(&cluster_mtx);
991 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
29ead430 992 router_printf(router, "Cluster %s\n",
81666e1b
MD
993 hammer2_uuid_to_str(&cls->pfs_clid, &uustr));
994 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
29ead430 995 router_printf(router, " Node %s (%s)\n",
81666e1b
MD
996 hammer2_uuid_to_str(&node->pfs_fsid, &uustr),
997 node->label);
998 RB_FOREACH(slink, h2span_link_tree, &node->tree) {
29ead430 999 router_printf(router, "\tLink dist=%d via %d\n",
81666e1b
MD
1000 slink->dist,
1001 slink->state->iocom->sock_fd);
1002 }
1003 }
1004 }
1005 pthread_mutex_unlock(&cluster_mtx);
1006 if (uustr)
1007 free(uustr);
1008#if 0
1009 TAILQ_FOREACH(conn, &connq, entry) {
1010 }
1011#endif
1012}