hammer2 - Message span tree work
[dragonfly.git] / sbin / hammer2 / msg_lnk.c
CommitLineData
8c280d5d
MD
1/*
2 * Copyright (c) 2012 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34/*
35 * LNK_SPAN PROTOCOL SUPPORT FUNCTIONS
36 *
37 * This code supports the LNK_SPAN protocol. Essentially all PFS's
38 * clients and services rendezvous with the userland hammer2 service and
39 * open LNK_SPAN transactions using a message header linkid of 0,
40 * registering any PFS's they have connectivity to with us.
41 *
42 * --
43 *
44 * Each registration maintains its own open LNK_SPAN message transaction.
45 * The SPANs are collected, aggregated, and retransmitted over available
46 * connections through the maintainance of additional LNK_SPAN message
47 * transactions on each link.
48 *
49 * The msgid for each active LNK_SPAN transaction we receive allows us to
50 * send a message to the target PFS (which might be one of many belonging
51 * to the same cluster), by specifying that msgid as the linkid in any
52 * message we send to the target PFS.
53 *
54 * Similarly the msgid we allocate for any LNK_SPAN transaction we transmit
55 * (and remember we will maintain multiple open LNK_SPAN transactions on
56 * each connection representing the topology span, so every node sees every
57 * other node as a separate open transaction). So, similarly the msgid for
58 * these active transactions which we initiated can be used by the other
59 * end to route messages through us to another node, ultimately winding up
60 * at the identified hammer2 PFS. We have to adjust the spanid in the message
61 * header at each hop to be representative of the outgoing LNK_SPAN we
62 * are forwarding the message through.
63 *
64 * --
65 *
66 * If we were to retransmit every LNK_SPAN transaction we receive it would
67 * create a huge mess, so we have to aggregate all received LNK_SPAN
68 * transactions, sort them by the fsid (the cluster) and sub-sort them by
69 * the pfs_fsid (individual nodes in the cluster), and only retransmit
7dc0f844 70 * (create outgoing transactions) for a subset of the nearest distance-hops
8c280d5d
MD
71 * for each individual node.
72 *
73 * The higher level protocols can then issue transactions to the nodes making
74 * up a cluster to perform all actions required.
75 *
76 * --
77 *
78 * Since this is a large topology and a spanning tree protocol, links can
79 * go up and down all the time. Any time a link goes down its transaction
80 * is closed. The transaction has to be closed on both ends before we can
81 * delete (and potentially reuse) the related spanid. The LNK_SPAN being
82 * closed may have been propagated out to other connections and those related
83 * LNK_SPANs are also closed. Ultimately all routes via the lost LNK_SPAN
84 * go away, ultimately reaching all sources and all targets.
85 *
86 * Any messages in-transit using a route that goes away will be thrown away.
87 * Open transactions are only tracked at the two end-points. When a link
88 * failure propagates to an end-point the related open transactions lose
89 * their spanid and are automatically aborted.
90 *
91 * It is important to note that internal route nodes cannot just associate
92 * a lost LNK_SPAN transaction with another route to the same destination.
93 * Message transactions MUST be serialized and MUST be ordered. All messages
94 * for a transaction must run over the same route. So if the route used by
95 * an active transaction is lost, the related messages will be fully aborted
96 * and the higher protocol levels will retry as appropriate.
97 *
29ead430
MD
98 * FULLY ABORTING A ROUTED MESSAGE is handled via link-failure propagation
99 * back to the originator. Only the originator keeps tracks of a message.
100 * Routers just pass it through. If a route is lost during transit the
101 * message is simply thrown away.
102 *
8c280d5d
MD
103 * It is also important to note that several paths to the same PFS can be
104 * propagated along the same link, which allows concurrency and even
105 * redundancy over several network interfaces or via different routes through
106 * the topology. Any given transaction will use only a single route but busy
107 * servers will often have hundreds of transactions active simultaniously,
108 * so having multiple active paths through the network topology for A<->B
109 * will improve performance.
110 *
111 * --
112 *
113 * Most protocols consolidate operations rather than simply relaying them.
114 * This is particularly true of LEAF protocols (such as strict HAMMER2
115 * clients), of which there can be millions connecting into the cluster at
116 * various points. The SPAN protocol is not used for these LEAF elements.
117 *
118 * Instead the primary service they connect to implements a proxy for the
119 * client protocols so the core topology only has to propagate a couple of
120 * LNK_SPANs and not millions. LNK_SPANs are meant to be used only for
121 * core master nodes and satellite slaves and cache nodes.
122 */
123
124#include "hammer2.h"
125
126/*
cf715800
MD
127 * Maximum spanning tree distance. This has the practical effect of
128 * stopping tail-chasing closed loops when a feeder span is lost.
129 */
130#define HAMMER2_SPAN_MAXDIST 16
131
132/*
8c280d5d
MD
133 * RED-BLACK TREE DEFINITIONS
134 *
7dc0f844 135 * We need to track:
8c280d5d
MD
136 *
137 * (1) shared fsid's (a cluster).
138 * (2) unique fsid's (a node in a cluster) <--- LNK_SPAN transactions.
139 *
140 * We need to aggegate all active LNK_SPANs, aggregate, and create our own
141 * outgoing LNK_SPAN transactions on each of our connections representing
142 * the aggregated state.
143 *
144 * h2span_connect - list of iocom connections who wish to receive SPAN
145 * propagation from other connections. Might contain
146 * a filter string. Only iocom's with an open
147 * LNK_CONN transactions are applicable for SPAN
148 * propagation.
149 *
150 * h2span_relay - List of links relayed (via SPAN). Essentially
151 * each relay structure represents a LNK_SPAN
152 * transaction that we initiated, verses h2span_link
153 * which is a LNK_SPAN transaction that we received.
154 *
155 * --
156 *
157 * h2span_cluster - Organizes the shared fsid's. One structure for
158 * each cluster.
159 *
160 * h2span_node - Organizes the nodes in a cluster. One structure
161 * for each unique {cluster,node}, aka {fsid, pfs_fsid}.
162 *
163 * h2span_link - Organizes all incoming and outgoing LNK_SPAN message
164 * transactions related to a node.
165 *
166 * One h2span_link structure for each incoming LNK_SPAN
167 * transaction. Links selected for propagation back
168 * out are also where the outgoing LNK_SPAN messages
169 * are indexed into (so we can propagate changes).
170 *
171 * The h2span_link's use a red-black tree to sort the
7dc0f844 172 * distance hop metric for the incoming LNK_SPAN. We
8c280d5d
MD
173 * then select the top N for outgoing. When the
174 * topology changes the top N may also change and cause
175 * new outgoing LNK_SPAN transactions to be opened
176 * and less desireable ones to be closed, causing
177 * transactional aborts within the message flow in
178 * the process.
179 *
180 * Also note - All outgoing LNK_SPAN message transactions are also
181 * entered into a red-black tree for use by the routing
182 * function. This is handled by msg.c in the state
183 * code, not here.
184 */
185
186struct h2span_link;
187struct h2span_relay;
188TAILQ_HEAD(h2span_connect_queue, h2span_connect);
189TAILQ_HEAD(h2span_relay_queue, h2span_relay);
190
191RB_HEAD(h2span_cluster_tree, h2span_cluster);
192RB_HEAD(h2span_node_tree, h2span_node);
193RB_HEAD(h2span_link_tree, h2span_link);
194RB_HEAD(h2span_relay_tree, h2span_relay);
195
196/*
197 * Received LNK_CONN transaction enables SPAN protocol over connection.
198 * (may contain filter).
199 */
200struct h2span_connect {
201 TAILQ_ENTRY(h2span_connect) entry;
202 struct h2span_relay_tree tree;
203 hammer2_state_t *state;
204};
205
206/*
207 * All received LNK_SPANs are organized by cluster (pfs_clid),
208 * node (pfs_fsid), and link (received LNK_SPAN transaction).
209 */
210struct h2span_cluster {
211 RB_ENTRY(h2span_cluster) rbnode;
212 struct h2span_node_tree tree;
213 uuid_t pfs_clid; /* shared fsid */
90e8cd1d 214 int refs; /* prevents destruction */
8c280d5d
MD
215};
216
7dc0f844 217struct h2span_node {
8c280d5d
MD
218 RB_ENTRY(h2span_node) rbnode;
219 struct h2span_link_tree tree;
220 struct h2span_cluster *cls;
221 uuid_t pfs_fsid; /* unique fsid */
81666e1b 222 char label[64];
8c280d5d
MD
223};
224
225struct h2span_link {
226 RB_ENTRY(h2span_link) rbnode;
227 hammer2_state_t *state; /* state<->link */
228 struct h2span_node *node; /* related node */
7dc0f844 229 int32_t dist;
8c280d5d 230 struct h2span_relay_queue relayq; /* relay out */
90e8cd1d 231 struct hammer2_router *router; /* route out this link */
8c280d5d
MD
232};
233
234/*
235 * Any LNK_SPAN transactions we receive which are relayed out other
236 * connections utilize this structure to track the LNK_SPAN transaction
237 * we initiate on the other connections, if selected for relay.
238 *
239 * In many respects this is the core of the protocol... actually figuring
240 * out what LNK_SPANs to relay. The spanid used for relaying is the
241 * address of the 'state' structure, which is why h2span_relay has to
242 * be entered into a RB-TREE based at h2span_connect (so we can look
243 * up the spanid to validate it).
90e8cd1d
MD
244 *
245 * NOTE: Messages can be received via the LNK_SPAN transaction the
246 * relay maintains, and can be replied via relay->router, but
247 * messages are NOT initiated via a relay. Messages are initiated
248 * via incoming links (h2span_link's).
249 *
250 * relay->link represents the link being relayed, NOT the LNK_SPAN
251 * transaction the relay is holding open.
8c280d5d
MD
252 */
253struct h2span_relay {
254 RB_ENTRY(h2span_relay) rbnode; /* from h2span_connect */
255 TAILQ_ENTRY(h2span_relay) entry; /* from link */
256 struct h2span_connect *conn;
257 hammer2_state_t *state; /* transmitted LNK_SPAN */
90e8cd1d
MD
258 struct h2span_link *link; /* LNK_SPAN being relayed */
259 struct hammer2_router *router;/* route out this relay */
8c280d5d
MD
260};
261
262
263typedef struct h2span_connect h2span_connect_t;
264typedef struct h2span_cluster h2span_cluster_t;
265typedef struct h2span_node h2span_node_t;
266typedef struct h2span_link h2span_link_t;
267typedef struct h2span_relay h2span_relay_t;
268
269static
270int
271h2span_cluster_cmp(h2span_cluster_t *cls1, h2span_cluster_t *cls2)
272{
273 return(uuid_compare(&cls1->pfs_clid, &cls2->pfs_clid, NULL));
274}
275
276static
277int
278h2span_node_cmp(h2span_node_t *node1, h2span_node_t *node2)
279{
280 return(uuid_compare(&node1->pfs_fsid, &node2->pfs_fsid, NULL));
281}
282
cf715800 283/*
10c86c4e
MD
284 * Sort/subsort must match h2span_relay_cmp() under any given node
285 * to make the aggregation algorithm easier, so the best links are
286 * in the same sorted order as the best relays.
287 *
288 * NOTE: We cannot use link*->state->msgid because this msgid is created
289 * by each remote host and thus might wind up being the same.
cf715800 290 */
8c280d5d
MD
291static
292int
293h2span_link_cmp(h2span_link_t *link1, h2span_link_t *link2)
294{
7dc0f844 295 if (link1->dist < link2->dist)
8c280d5d 296 return(-1);
7dc0f844 297 if (link1->dist > link2->dist)
8c280d5d 298 return(1);
10c86c4e
MD
299#if 1
300 if ((uintptr_t)link1->state < (uintptr_t)link2->state)
301 return(-1);
302 if ((uintptr_t)link1->state > (uintptr_t)link2->state)
303 return(1);
304#else
29ead430 305 if (link1->state->msgid < link2->state->msgid)
8c280d5d 306 return(-1);
29ead430 307 if (link1->state->msgid > link2->state->msgid)
8c280d5d 308 return(1);
10c86c4e 309#endif
8c280d5d
MD
310 return(0);
311}
312
7dc0f844
MD
313/*
314 * Relay entries are sorted by node, subsorted by distance and link
315 * address (so we can match up the conn->tree relay topology with
316 * a node's link topology).
317 */
8c280d5d
MD
318static
319int
320h2span_relay_cmp(h2span_relay_t *relay1, h2span_relay_t *relay2)
321{
29ead430
MD
322 h2span_link_t *link1 = relay1->link;
323 h2span_link_t *link2 = relay2->link;
324
325 if ((intptr_t)link1->node < (intptr_t)link2->node)
7dc0f844 326 return(-1);
29ead430 327 if ((intptr_t)link1->node > (intptr_t)link2->node)
7dc0f844 328 return(1);
29ead430 329 if (link1->dist < link2->dist)
8c280d5d 330 return(-1);
29ead430 331 if (link1->dist > link2->dist)
7dc0f844 332 return(1);
10c86c4e
MD
333#if 1
334 if ((uintptr_t)link1->state < (uintptr_t)link2->state)
335 return(-1);
336 if ((uintptr_t)link1->state > (uintptr_t)link2->state)
337 return(1);
338#else
29ead430 339 if (link1->state->msgid < link2->state->msgid)
7dc0f844 340 return(-1);
29ead430 341 if (link1->state->msgid > link2->state->msgid)
8c280d5d 342 return(1);
10c86c4e 343#endif
8c280d5d
MD
344 return(0);
345}
346
347RB_PROTOTYPE_STATIC(h2span_cluster_tree, h2span_cluster,
348 rbnode, h2span_cluster_cmp);
349RB_PROTOTYPE_STATIC(h2span_node_tree, h2span_node,
350 rbnode, h2span_node_cmp);
351RB_PROTOTYPE_STATIC(h2span_link_tree, h2span_link,
352 rbnode, h2span_link_cmp);
353RB_PROTOTYPE_STATIC(h2span_relay_tree, h2span_relay,
354 rbnode, h2span_relay_cmp);
355
356RB_GENERATE_STATIC(h2span_cluster_tree, h2span_cluster,
357 rbnode, h2span_cluster_cmp);
358RB_GENERATE_STATIC(h2span_node_tree, h2span_node,
359 rbnode, h2span_node_cmp);
360RB_GENERATE_STATIC(h2span_link_tree, h2span_link,
361 rbnode, h2span_link_cmp);
362RB_GENERATE_STATIC(h2span_relay_tree, h2span_relay,
363 rbnode, h2span_relay_cmp);
364
365/*
366 * Global mutex protects cluster_tree lookups.
367 */
368static pthread_mutex_t cluster_mtx;
369static struct h2span_cluster_tree cluster_tree = RB_INITIALIZER(cluster_tree);
370static struct h2span_connect_queue connq = TAILQ_HEAD_INITIALIZER(connq);
371
29ead430
MD
372static void hammer2_lnk_span(hammer2_msg_t *msg);
373static void hammer2_lnk_conn(hammer2_msg_t *msg);
374static void hammer2_lnk_relay(hammer2_msg_t *msg);
02454b3e 375static void hammer2_relay_scan(h2span_connect_t *conn, h2span_node_t *node);
7dc0f844 376static void hammer2_relay_delete(h2span_relay_t *relay);
8c280d5d 377
29ead430
MD
378void
379hammer2_msg_lnk_signal(hammer2_router_t *router __unused)
380{
381 pthread_mutex_lock(&cluster_mtx);
382 hammer2_relay_scan(NULL, NULL);
383 pthread_mutex_unlock(&cluster_mtx);
384}
385
8c280d5d
MD
386/*
387 * Receive a HAMMER2_MSG_PROTO_LNK message. This only called for
388 * one-way and opening-transactions since state->func will be assigned
389 * in all other cases.
390 */
391void
29ead430 392hammer2_msg_lnk(hammer2_msg_t *msg)
8c280d5d
MD
393{
394 switch(msg->any.head.cmd & HAMMER2_MSGF_BASECMDMASK) {
395 case HAMMER2_LNK_CONN:
29ead430 396 hammer2_lnk_conn(msg);
8c280d5d
MD
397 break;
398 case HAMMER2_LNK_SPAN:
29ead430 399 hammer2_lnk_span(msg);
8c280d5d
MD
400 break;
401 default:
402 fprintf(stderr,
403 "MSG_PROTO_LNK: Unknown msg %08x\n", msg->any.head.cmd);
29ead430 404 hammer2_msg_reply(msg, HAMMER2_MSG_ERR_NOSUPP);
8c280d5d
MD
405 /* state invalid after reply */
406 break;
407 }
408}
409
410void
29ead430 411hammer2_lnk_conn(hammer2_msg_t *msg)
8c280d5d 412{
29ead430 413 hammer2_state_t *state = msg->state;
8c280d5d
MD
414 h2span_connect_t *conn;
415 h2span_relay_t *relay;
416 char *alloc = NULL;
417
418 pthread_mutex_lock(&cluster_mtx);
419
420 /*
421 * On transaction start we allocate a new h2span_connect and
422 * acknowledge the request, leaving the transaction open.
7dc0f844 423 * We then relay priority-selected SPANs.
8c280d5d
MD
424 */
425 if (msg->any.head.cmd & HAMMER2_MSGF_CREATE) {
426 state->func = hammer2_lnk_conn;
427
81666e1b
MD
428 fprintf(stderr, "LNK_CONN(%08x): %s/%s\n",
429 (uint32_t)msg->any.head.msgid,
8c280d5d
MD
430 hammer2_uuid_to_str(&msg->any.lnk_conn.pfs_clid,
431 &alloc),
432 msg->any.lnk_conn.label);
433 free(alloc);
434
435 conn = hammer2_alloc(sizeof(*conn));
436
437 RB_INIT(&conn->tree);
438 conn->state = state;
439 state->any.conn = conn;
440 TAILQ_INSERT_TAIL(&connq, conn, entry);
441
29ead430 442 hammer2_msg_result(msg, 0);
02454b3e 443
29ead430 444#if 0
02454b3e
MD
445 /*
446 * Span-synchronize all nodes with the new connection
447 */
448 hammer2_relay_scan(conn, NULL);
29ead430
MD
449#endif
450 hammer2_router_signal(msg->router);
8c280d5d
MD
451 }
452
453 /*
454 * On transaction terminate we clean out our h2span_connect
455 * and acknowledge the request, closing the transaction.
456 */
457 if (msg->any.head.cmd & HAMMER2_MSGF_DELETE) {
458 fprintf(stderr, "LNK_CONN: Terminated\n");
459 conn = state->any.conn;
460 assert(conn);
7dc0f844
MD
461
462 /*
463 * Clean out all relays. This requires terminating each
464 * relay transaction.
465 */
8c280d5d 466 while ((relay = RB_ROOT(&conn->tree)) != NULL) {
7dc0f844 467 hammer2_relay_delete(relay);
8c280d5d
MD
468 }
469
470 /*
471 * Clean out conn
472 */
473 conn->state = NULL;
474 msg->state->any.conn = NULL;
475 TAILQ_REMOVE(&connq, conn, entry);
476 hammer2_free(conn);
477
29ead430 478 hammer2_msg_reply(msg, 0);
8c280d5d
MD
479 /* state invalid after reply */
480 }
481 pthread_mutex_unlock(&cluster_mtx);
482}
483
484void
29ead430 485hammer2_lnk_span(hammer2_msg_t *msg)
8c280d5d 486{
29ead430 487 hammer2_state_t *state = msg->state;
8c280d5d
MD
488 h2span_cluster_t dummy_cls;
489 h2span_node_t dummy_node;
490 h2span_cluster_t *cls;
491 h2span_node_t *node;
492 h2span_link_t *slink;
493 h2span_relay_t *relay;
494 char *alloc = NULL;
495
29ead430
MD
496 assert((msg->any.head.cmd & HAMMER2_MSGF_REPLY) == 0);
497
8c280d5d
MD
498 pthread_mutex_lock(&cluster_mtx);
499
500 /*
501 * On transaction start we initialize the tracking infrastructure
502 */
503 if (msg->any.head.cmd & HAMMER2_MSGF_CREATE) {
29ead430 504 assert(state->func == NULL);
8c280d5d
MD
505 state->func = hammer2_lnk_span;
506
81666e1b
MD
507 msg->any.lnk_span.label[sizeof(msg->any.lnk_span.label)-1] = 0;
508
8c280d5d
MD
509 /*
510 * Find the cluster
511 */
512 dummy_cls.pfs_clid = msg->any.lnk_span.pfs_clid;
513 cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls);
514 if (cls == NULL) {
515 cls = hammer2_alloc(sizeof(*cls));
516 cls->pfs_clid = msg->any.lnk_span.pfs_clid;
517 RB_INIT(&cls->tree);
518 RB_INSERT(h2span_cluster_tree, &cluster_tree, cls);
519 }
520
521 /*
522 * Find the node
523 */
524 dummy_node.pfs_fsid = msg->any.lnk_span.pfs_fsid;
525 node = RB_FIND(h2span_node_tree, &cls->tree, &dummy_node);
526 if (node == NULL) {
527 node = hammer2_alloc(sizeof(*node));
528 node->pfs_fsid = msg->any.lnk_span.pfs_fsid;
529 node->cls = cls;
530 RB_INIT(&node->tree);
531 RB_INSERT(h2span_node_tree, &cls->tree, node);
81666e1b
MD
532 snprintf(node->label, sizeof(node->label),
533 "%s", msg->any.lnk_span.label);
8c280d5d
MD
534 }
535
536 /*
537 * Create the link
538 */
539 assert(state->any.link == NULL);
540 slink = hammer2_alloc(sizeof(*slink));
7dc0f844 541 TAILQ_INIT(&slink->relayq);
8c280d5d 542 slink->node = node;
7dc0f844 543 slink->dist = msg->any.lnk_span.dist;
8c280d5d
MD
544 slink->state = state;
545 state->any.link = slink;
29ead430
MD
546
547 /*
548 * Embedded router structure in link for message forwarding.
90e8cd1d
MD
549 *
550 * The spanning id for the router is the message id of
551 * the SPAN link it is embedded in, allowing messages to
552 * be routed via &slink->router.
29ead430 553 */
90e8cd1d
MD
554 slink->router = hammer2_router_alloc();
555 slink->router->iocom = state->iocom;
556 slink->router->link = slink;
10c86c4e 557 slink->router->target = state->msgid;
90e8cd1d 558 hammer2_router_connect(slink->router);
29ead430 559
8c280d5d
MD
560 RB_INSERT(h2span_link_tree, &node->tree, slink);
561
29ead430
MD
562 fprintf(stderr, "LNK_SPAN(thr %p): %p %s/%s dist=%d\n",
563 msg->router->iocom,
564 slink,
565 hammer2_uuid_to_str(&msg->any.lnk_span.pfs_clid,
566 &alloc),
567 msg->any.lnk_span.label,
568 msg->any.lnk_span.dist);
569 free(alloc);
29ead430 570#if 0
02454b3e 571 hammer2_relay_scan(NULL, node);
29ead430
MD
572#endif
573 hammer2_router_signal(msg->router);
8c280d5d
MD
574 }
575
576 /*
577 * On transaction terminate we remove the tracking infrastructure.
578 */
579 if (msg->any.head.cmd & HAMMER2_MSGF_DELETE) {
580 slink = state->any.link;
581 assert(slink != NULL);
582 node = slink->node;
583 cls = node->cls;
584
29ead430
MD
585 fprintf(stderr, "LNK_DELE(thr %p): %p %s/%s dist=%d\n",
586 msg->router->iocom,
587 slink,
588 hammer2_uuid_to_str(&cls->pfs_clid, &alloc),
589 state->msg->any.lnk_span.label,
590 state->msg->any.lnk_span.dist);
591 free(alloc);
592
8c280d5d 593 /*
90e8cd1d
MD
594 * Remove the router from consideration
595 */
596 hammer2_router_disconnect(&slink->router);
597
598 /*
7dc0f844
MD
599 * Clean out all relays. This requires terminating each
600 * relay transaction.
8c280d5d
MD
601 */
602 while ((relay = TAILQ_FIRST(&slink->relayq)) != NULL) {
7dc0f844 603 hammer2_relay_delete(relay);
8c280d5d
MD
604 }
605
606 /*
607 * Clean out the topology
608 */
609 RB_REMOVE(h2span_link_tree, &node->tree, slink);
610 if (RB_EMPTY(&node->tree)) {
611 RB_REMOVE(h2span_node_tree, &cls->tree, node);
90e8cd1d 612 if (RB_EMPTY(&cls->tree) && cls->refs == 0) {
8c280d5d
MD
613 RB_REMOVE(h2span_cluster_tree,
614 &cluster_tree, cls);
615 hammer2_free(cls);
616 }
617 node->cls = NULL;
618 hammer2_free(node);
7dc0f844 619 node = NULL;
8c280d5d
MD
620 }
621 state->any.link = NULL;
622 slink->state = NULL;
623 slink->node = NULL;
624 hammer2_free(slink);
7dc0f844
MD
625
626 /*
627 * We have to terminate the transaction
628 */
629 hammer2_state_reply(state, 0);
630 /* state invalid after reply */
631
632 /*
633 * If the node still exists issue any required updates. If
634 * it doesn't then all related relays have already been
635 * removed and there's nothing left to do.
636 */
29ead430 637#if 0
7dc0f844 638 if (node)
02454b3e 639 hammer2_relay_scan(NULL, node);
29ead430
MD
640#endif
641 if (node)
642 hammer2_router_signal(msg->router);
8c280d5d
MD
643 }
644
645 pthread_mutex_unlock(&cluster_mtx);
646}
647
648/*
7dc0f844
MD
649 * Messages received on relay SPANs. These are open transactions so it is
650 * in fact possible for the other end to close the transaction.
651 *
652 * XXX MPRACE on state structure
653 */
654static void
29ead430 655hammer2_lnk_relay(hammer2_msg_t *msg)
7dc0f844 656{
29ead430 657 hammer2_state_t *state = msg->state;
7dc0f844
MD
658 h2span_relay_t *relay;
659
29ead430
MD
660 assert(msg->any.head.cmd & HAMMER2_MSGF_REPLY);
661
7dc0f844
MD
662 if (msg->any.head.cmd & HAMMER2_MSGF_DELETE) {
663 pthread_mutex_lock(&cluster_mtx);
664 if ((relay = state->any.relay) != NULL) {
665 hammer2_relay_delete(relay);
666 } else {
667 hammer2_state_reply(state, 0);
668 }
669 pthread_mutex_unlock(&cluster_mtx);
670 }
671}
672
673/*
674 * Update relay transactions for SPANs.
675 *
676 * Called with cluster_mtx held.
677 */
02454b3e
MD
678static void hammer2_relay_scan_specific(h2span_node_t *node,
679 h2span_connect_t *conn);
7dc0f844
MD
680
681static void
02454b3e 682hammer2_relay_scan(h2span_connect_t *conn, h2span_node_t *node)
7dc0f844
MD
683{
684 h2span_cluster_t *cls;
7dc0f844
MD
685
686 if (node) {
687 /*
688 * Iterate specific node
689 */
690 TAILQ_FOREACH(conn, &connq, entry)
02454b3e 691 hammer2_relay_scan_specific(node, conn);
7dc0f844
MD
692 } else {
693 /*
02454b3e 694 * Full iteration.
7dc0f844 695 *
02454b3e
MD
696 * Iterate cluster ids, nodes, and either a specific connection
697 * or all connections.
7dc0f844 698 */
7dc0f844
MD
699 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
700 /*
701 * Iterate node ids
702 */
703 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
704 /*
705 * Synchronize the node's link (received SPANs)
706 * with each connection's relays.
707 */
02454b3e
MD
708 if (conn) {
709 hammer2_relay_scan_specific(node, conn);
710 } else {
711 TAILQ_FOREACH(conn, &connq, entry) {
712 hammer2_relay_scan_specific(node,
713 conn);
714 }
715 assert(conn == NULL);
716 }
7dc0f844
MD
717 }
718 }
719 }
720}
721
722/*
723 * Update the relay'd SPANs for this (node, conn).
724 *
725 * Iterate links and adjust relays to match. We only propagate the top link
726 * for now (XXX we want to propagate the top two).
727 *
728 * The hammer2_relay_scan_cmp() function locates the first relay element
729 * for any given node. The relay elements will be sub-sorted by dist.
8c280d5d 730 */
7dc0f844
MD
731struct relay_scan_info {
732 h2span_node_t *node;
733 h2span_relay_t *relay;
734};
735
736static int
737hammer2_relay_scan_cmp(h2span_relay_t *relay, void *arg)
738{
739 struct relay_scan_info *info = arg;
740
741 if ((intptr_t)relay->link->node < (intptr_t)info->node)
742 return(-1);
743 if ((intptr_t)relay->link->node > (intptr_t)info->node)
744 return(1);
745 return(0);
746}
747
748static int
749hammer2_relay_scan_callback(h2span_relay_t *relay, void *arg)
750{
751 struct relay_scan_info *info = arg;
752
753 info->relay = relay;
754 return(-1);
755}
756
8c280d5d 757static void
02454b3e 758hammer2_relay_scan_specific(h2span_node_t *node, h2span_connect_t *conn)
8c280d5d 759{
7dc0f844
MD
760 struct relay_scan_info info;
761 h2span_relay_t *relay;
762 h2span_relay_t *next_relay;
763 h2span_link_t *slink;
764 int count = 2;
765
766 info.node = node;
767 info.relay = NULL;
768
769 /*
29ead430
MD
770 * Locate the first related relay for the node on this connection.
771 * relay will be NULL if there were none.
7dc0f844
MD
772 */
773 RB_SCAN(h2span_relay_tree, &conn->tree,
774 hammer2_relay_scan_cmp, hammer2_relay_scan_callback, &info);
775 relay = info.relay;
cf715800
MD
776 info.relay = NULL;
777 if (relay)
778 assert(relay->link->node == node);
7dc0f844 779
81666e1b
MD
780 if (DebugOpt > 8)
781 fprintf(stderr, "relay scan for connection %p\n", conn);
7dc0f844
MD
782
783 /*
784 * Iterate the node's links (received SPANs) in distance order,
785 * lowest (best) dist first.
786 */
29ead430 787 /* fprintf(stderr, "LOOP\n"); */
7dc0f844
MD
788 RB_FOREACH(slink, h2span_link_tree, &node->tree) {
789 /*
29ead430
MD
790 fprintf(stderr, "SLINK %p RELAY %p(%p)\n",
791 slink, relay, relay ? relay->link : NULL);
792 */
793 /*
cf715800 794 * PROPAGATE THE BEST LINKS OVER THE SPECIFIED CONNECTION.
7dc0f844 795 *
cf715800
MD
796 * Track relays while iterating the best links and construct
797 * missing relays when necessary.
7dc0f844
MD
798 *
799 * (If some prior better link was removed it would have also
800 * removed the relay, so the relay can only match exactly or
29ead430 801 * be worse).
7dc0f844 802 */
cf715800
MD
803 if (relay && relay->link == slink) {
804 /*
29ead430
MD
805 * Match, relay already in-place, get the next
806 * relay to match against the next slink.
cf715800
MD
807 */
808 relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
809 if (--count == 0)
810 break;
811 } else if (slink->dist > HAMMER2_SPAN_MAXDIST) {
812 /*
813 * No match but span distance is too great,
814 * do not relay. This prevents endless closed
815 * loops with ever-incrementing distances when
816 * the seed span is lost in the graph.
29ead430
MD
817 *
818 * All later spans will also be too far away so
819 * we can break out of the loop.
cf715800 820 */
29ead430 821 break;
c1963fb2
MD
822 } else if (slink->state->iocom == conn->state->iocom) {
823 /*
824 * No match but we would transmit a LNK_SPAN
825 * out the same connection it came in on, which
826 * can be trivially optimized out.
827 */
828 break;
cf715800
MD
829 } else {
830 /*
831 * No match, distance is ok, construct a new relay.
29ead430 832 * (slink is better than relay).
cf715800 833 */
7dc0f844
MD
834 hammer2_msg_t *msg;
835
836 assert(relay == NULL ||
29ead430
MD
837 relay->link->node != slink->node ||
838 relay->link->dist >= slink->dist);
7dc0f844
MD
839 relay = hammer2_alloc(sizeof(*relay));
840 relay->conn = conn;
841 relay->link = slink;
842
90e8cd1d 843 msg = hammer2_msg_alloc(conn->state->iocom->router, 0,
29ead430
MD
844 HAMMER2_LNK_SPAN |
845 HAMMER2_MSGF_CREATE,
846 hammer2_lnk_relay, relay);
847 relay->state = msg->state;
90e8cd1d
MD
848 relay->router = hammer2_router_alloc();
849 relay->router->iocom = relay->state->iocom;
850 relay->router->relay = relay;
10c86c4e 851 relay->router->target = relay->state->msgid;
90e8cd1d 852
29ead430
MD
853 msg->any.lnk_span = slink->state->msg->any.lnk_span;
854 msg->any.lnk_span.dist = slink->dist + 1;
855
90e8cd1d
MD
856 hammer2_router_connect(relay->router);
857
cf715800
MD
858 RB_INSERT(h2span_relay_tree, &conn->tree, relay);
859 TAILQ_INSERT_TAIL(&slink->relayq, relay, entry);
860
29ead430 861 hammer2_msg_write(msg);
7dc0f844 862
81666e1b 863 fprintf(stderr,
29ead430 864 "RELAY SPAN %p RELAY %p ON CLS=%p NODE=%p DIST=%d "
cf715800 865 "FD %d state %p\n",
29ead430
MD
866 slink,
867 relay,
cf715800 868 node->cls, node, slink->dist,
7dc0f844
MD
869 conn->state->iocom->sock_fd, relay->state);
870
cf715800
MD
871 /*
872 * Match (created new relay), get the next relay to
873 * match against the next slink.
874 */
875 relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
876 if (--count == 0)
877 break;
7dc0f844
MD
878 }
879 }
880
881 /*
882 * Any remaining relay's belonging to this connection which match
883 * the node are in excess of the current aggregate spanning state
884 * and should be removed.
885 */
886 while (relay && relay->link->node == node) {
887 next_relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
888 hammer2_relay_delete(relay);
889 relay = next_relay;
890 }
891}
892
893static
894void
895hammer2_relay_delete(h2span_relay_t *relay)
896{
81666e1b 897 fprintf(stderr,
29ead430
MD
898 "RELAY DELETE %p RELAY %p ON CLS=%p NODE=%p DIST=%d FD %d STATE %p\n",
899 relay->link,
900 relay,
7dc0f844 901 relay->link->node->cls, relay->link->node,
cf715800 902 relay->link->dist,
7dc0f844 903 relay->conn->state->iocom->sock_fd, relay->state);
7dc0f844 904
90e8cd1d
MD
905 hammer2_router_disconnect(&relay->router);
906
7dc0f844
MD
907 RB_REMOVE(h2span_relay_tree, &relay->conn->tree, relay);
908 TAILQ_REMOVE(&relay->link->relayq, relay, entry);
909
910 if (relay->state) {
911 relay->state->any.relay = NULL;
912 hammer2_state_reply(relay->state, 0);
913 /* state invalid after reply */
914 relay->state = NULL;
915 }
916 relay->conn = NULL;
917 relay->link = NULL;
918 hammer2_free(relay);
8c280d5d 919}
81666e1b 920
29ead430 921/************************************************************************
90e8cd1d 922 * ROUTER AND MESSAGING HANDLES *
29ead430
MD
923 ************************************************************************
924 *
90e8cd1d
MD
925 * Basically the idea here is to provide a stable data structure which
926 * can be localized to the caller for higher level protocols to work with.
927 * Depends on the context, these hammer2_handle's can be pooled by use-case
928 * and remain persistent through a client (or mount point's) life.
929 */
930
931#if 0
932/*
933 * Obtain a stable handle on a cluster given its uuid. This ties directly
934 * into the global cluster topology, creating the structure if necessary
935 * (even if the uuid does not exist or does not exist yet), and preventing
936 * the structure from getting ripped out from under us while we hold a
937 * pointer to it.
938 */
939h2span_cluster_t *
940hammer2_cluster_get(uuid_t *pfs_clid)
941{
942 h2span_cluster_t dummy_cls;
943 h2span_cluster_t *cls;
944
945 dummy_cls.pfs_clid = *pfs_clid;
946 pthread_mutex_lock(&cluster_mtx);
947 cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls);
948 if (cls)
949 ++cls->refs;
950 pthread_mutex_unlock(&cluster_mtx);
951 return (cls);
952}
953
954void
955hammer2_cluster_put(h2span_cluster_t *cls)
956{
957 pthread_mutex_lock(&cluster_mtx);
958 assert(cls->refs > 0);
959 --cls->refs;
960 if (RB_EMPTY(&cls->tree) && cls->refs == 0) {
961 RB_REMOVE(h2span_cluster_tree,
962 &cluster_tree, cls);
963 hammer2_free(cls);
964 }
965 pthread_mutex_unlock(&cluster_mtx);
966}
967
968/*
969 * Obtain a stable handle to a specific cluster node given its uuid.
970 * This handle does NOT lock in the route to the node and is typically
971 * used as part of the hammer2_handle_*() API to obtain a set of
972 * stable nodes.
29ead430 973 */
90e8cd1d
MD
974h2span_node_t *
975hammer2_node_get(h2span_cluster_t *cls, uuid_t *pfs_fsid)
976{
977}
978
979#endif
29ead430
MD
980
981#if 0
982/*
983 * Acquire a persistent router structure given the cluster and node ids.
984 * Messages can be transacted via this structure while held. If the route
985 * is lost messages will return failure.
986 */
987hammer2_router_t *
988hammer2_router_get(uuid_t *pfs_clid, uuid_t *pfs_fsid)
989{
990}
991
992/*
993 * Release previously acquired router.
994 */
995void
996hammer2_router_put(hammer2_router_t *router)
997{
998}
999#endif
1000
90e8cd1d
MD
1001/************************************************************************
1002 * DEBUGGER *
1003 ************************************************************************/
81666e1b
MD
1004/*
1005 * Dumps the spanning tree
1006 */
1007void
29ead430 1008shell_tree(hammer2_router_t *router, char *cmdbuf __unused)
81666e1b
MD
1009{
1010 h2span_cluster_t *cls;
1011 h2span_node_t *node;
1012 h2span_link_t *slink;
1013 char *uustr = NULL;
1014
1015 pthread_mutex_lock(&cluster_mtx);
1016 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
29ead430 1017 router_printf(router, "Cluster %s\n",
81666e1b
MD
1018 hammer2_uuid_to_str(&cls->pfs_clid, &uustr));
1019 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
29ead430 1020 router_printf(router, " Node %s (%s)\n",
81666e1b
MD
1021 hammer2_uuid_to_str(&node->pfs_fsid, &uustr),
1022 node->label);
1023 RB_FOREACH(slink, h2span_link_tree, &node->tree) {
29ead430 1024 router_printf(router, "\tLink dist=%d via %d\n",
81666e1b
MD
1025 slink->dist,
1026 slink->state->iocom->sock_fd);
1027 }
1028 }
1029 }
1030 pthread_mutex_unlock(&cluster_mtx);
1031 if (uustr)
1032 free(uustr);
1033#if 0
1034 TAILQ_FOREACH(conn, &connq, entry) {
1035 }
1036#endif
1037}