hammer2 - SPAN protocol work, router work
[dragonfly.git] / sbin / hammer2 / msg_lnk.c
CommitLineData
8c280d5d
MD
1/*
2 * Copyright (c) 2012 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34/*
35 * LNK_SPAN PROTOCOL SUPPORT FUNCTIONS
36 *
37 * This code supports the LNK_SPAN protocol. Essentially all PFS's
38 * clients and services rendezvous with the userland hammer2 service and
39 * open LNK_SPAN transactions using a message header linkid of 0,
40 * registering any PFS's they have connectivity to with us.
41 *
42 * --
43 *
44 * Each registration maintains its own open LNK_SPAN message transaction.
45 * The SPANs are collected, aggregated, and retransmitted over available
46 * connections through the maintainance of additional LNK_SPAN message
47 * transactions on each link.
48 *
49 * The msgid for each active LNK_SPAN transaction we receive allows us to
50 * send a message to the target PFS (which might be one of many belonging
51 * to the same cluster), by specifying that msgid as the linkid in any
52 * message we send to the target PFS.
53 *
54 * Similarly the msgid we allocate for any LNK_SPAN transaction we transmit
55 * (and remember we will maintain multiple open LNK_SPAN transactions on
56 * each connection representing the topology span, so every node sees every
57 * other node as a separate open transaction). So, similarly the msgid for
58 * these active transactions which we initiated can be used by the other
59 * end to route messages through us to another node, ultimately winding up
60 * at the identified hammer2 PFS. We have to adjust the spanid in the message
61 * header at each hop to be representative of the outgoing LNK_SPAN we
62 * are forwarding the message through.
63 *
64 * --
65 *
66 * If we were to retransmit every LNK_SPAN transaction we receive it would
67 * create a huge mess, so we have to aggregate all received LNK_SPAN
68 * transactions, sort them by the fsid (the cluster) and sub-sort them by
69 * the pfs_fsid (individual nodes in the cluster), and only retransmit
7dc0f844 70 * (create outgoing transactions) for a subset of the nearest distance-hops
8c280d5d
MD
71 * for each individual node.
72 *
73 * The higher level protocols can then issue transactions to the nodes making
74 * up a cluster to perform all actions required.
75 *
76 * --
77 *
78 * Since this is a large topology and a spanning tree protocol, links can
79 * go up and down all the time. Any time a link goes down its transaction
80 * is closed. The transaction has to be closed on both ends before we can
81 * delete (and potentially reuse) the related spanid. The LNK_SPAN being
82 * closed may have been propagated out to other connections and those related
83 * LNK_SPANs are also closed. Ultimately all routes via the lost LNK_SPAN
84 * go away, ultimately reaching all sources and all targets.
85 *
86 * Any messages in-transit using a route that goes away will be thrown away.
87 * Open transactions are only tracked at the two end-points. When a link
88 * failure propagates to an end-point the related open transactions lose
89 * their spanid and are automatically aborted.
90 *
91 * It is important to note that internal route nodes cannot just associate
92 * a lost LNK_SPAN transaction with another route to the same destination.
93 * Message transactions MUST be serialized and MUST be ordered. All messages
94 * for a transaction must run over the same route. So if the route used by
95 * an active transaction is lost, the related messages will be fully aborted
96 * and the higher protocol levels will retry as appropriate.
97 *
29ead430
MD
98 * FULLY ABORTING A ROUTED MESSAGE is handled via link-failure propagation
99 * back to the originator. Only the originator keeps tracks of a message.
100 * Routers just pass it through. If a route is lost during transit the
101 * message is simply thrown away.
102 *
8c280d5d
MD
103 * It is also important to note that several paths to the same PFS can be
104 * propagated along the same link, which allows concurrency and even
105 * redundancy over several network interfaces or via different routes through
106 * the topology. Any given transaction will use only a single route but busy
107 * servers will often have hundreds of transactions active simultaniously,
108 * so having multiple active paths through the network topology for A<->B
109 * will improve performance.
110 *
111 * --
112 *
113 * Most protocols consolidate operations rather than simply relaying them.
114 * This is particularly true of LEAF protocols (such as strict HAMMER2
115 * clients), of which there can be millions connecting into the cluster at
116 * various points. The SPAN protocol is not used for these LEAF elements.
117 *
118 * Instead the primary service they connect to implements a proxy for the
119 * client protocols so the core topology only has to propagate a couple of
120 * LNK_SPANs and not millions. LNK_SPANs are meant to be used only for
121 * core master nodes and satellite slaves and cache nodes.
122 */
123
124#include "hammer2.h"
125
126/*
cf715800
MD
127 * Maximum spanning tree distance. This has the practical effect of
128 * stopping tail-chasing closed loops when a feeder span is lost.
129 */
130#define HAMMER2_SPAN_MAXDIST 16
131
132/*
8c280d5d
MD
133 * RED-BLACK TREE DEFINITIONS
134 *
7dc0f844 135 * We need to track:
8c280d5d
MD
136 *
137 * (1) shared fsid's (a cluster).
138 * (2) unique fsid's (a node in a cluster) <--- LNK_SPAN transactions.
139 *
140 * We need to aggegate all active LNK_SPANs, aggregate, and create our own
141 * outgoing LNK_SPAN transactions on each of our connections representing
142 * the aggregated state.
143 *
144 * h2span_connect - list of iocom connections who wish to receive SPAN
145 * propagation from other connections. Might contain
146 * a filter string. Only iocom's with an open
147 * LNK_CONN transactions are applicable for SPAN
148 * propagation.
149 *
150 * h2span_relay - List of links relayed (via SPAN). Essentially
151 * each relay structure represents a LNK_SPAN
152 * transaction that we initiated, verses h2span_link
153 * which is a LNK_SPAN transaction that we received.
154 *
155 * --
156 *
157 * h2span_cluster - Organizes the shared fsid's. One structure for
158 * each cluster.
159 *
160 * h2span_node - Organizes the nodes in a cluster. One structure
161 * for each unique {cluster,node}, aka {fsid, pfs_fsid}.
162 *
163 * h2span_link - Organizes all incoming and outgoing LNK_SPAN message
164 * transactions related to a node.
165 *
166 * One h2span_link structure for each incoming LNK_SPAN
167 * transaction. Links selected for propagation back
168 * out are also where the outgoing LNK_SPAN messages
169 * are indexed into (so we can propagate changes).
170 *
171 * The h2span_link's use a red-black tree to sort the
7dc0f844 172 * distance hop metric for the incoming LNK_SPAN. We
8c280d5d
MD
173 * then select the top N for outgoing. When the
174 * topology changes the top N may also change and cause
175 * new outgoing LNK_SPAN transactions to be opened
176 * and less desireable ones to be closed, causing
177 * transactional aborts within the message flow in
178 * the process.
179 *
180 * Also note - All outgoing LNK_SPAN message transactions are also
181 * entered into a red-black tree for use by the routing
182 * function. This is handled by msg.c in the state
183 * code, not here.
184 */
185
186struct h2span_link;
187struct h2span_relay;
188TAILQ_HEAD(h2span_connect_queue, h2span_connect);
189TAILQ_HEAD(h2span_relay_queue, h2span_relay);
190
191RB_HEAD(h2span_cluster_tree, h2span_cluster);
192RB_HEAD(h2span_node_tree, h2span_node);
193RB_HEAD(h2span_link_tree, h2span_link);
194RB_HEAD(h2span_relay_tree, h2span_relay);
195
196/*
197 * Received LNK_CONN transaction enables SPAN protocol over connection.
198 * (may contain filter).
199 */
200struct h2span_connect {
201 TAILQ_ENTRY(h2span_connect) entry;
202 struct h2span_relay_tree tree;
203 hammer2_state_t *state;
204};
205
206/*
207 * All received LNK_SPANs are organized by cluster (pfs_clid),
208 * node (pfs_fsid), and link (received LNK_SPAN transaction).
209 */
210struct h2span_cluster {
211 RB_ENTRY(h2span_cluster) rbnode;
212 struct h2span_node_tree tree;
213 uuid_t pfs_clid; /* shared fsid */
214};
215
7dc0f844 216struct h2span_node {
8c280d5d
MD
217 RB_ENTRY(h2span_node) rbnode;
218 struct h2span_link_tree tree;
219 struct h2span_cluster *cls;
220 uuid_t pfs_fsid; /* unique fsid */
81666e1b 221 char label[64];
8c280d5d
MD
222};
223
224struct h2span_link {
225 RB_ENTRY(h2span_link) rbnode;
226 hammer2_state_t *state; /* state<->link */
227 struct h2span_node *node; /* related node */
7dc0f844 228 int32_t dist;
8c280d5d 229 struct h2span_relay_queue relayq; /* relay out */
29ead430 230 struct hammer2_router router;
8c280d5d
MD
231};
232
233/*
234 * Any LNK_SPAN transactions we receive which are relayed out other
235 * connections utilize this structure to track the LNK_SPAN transaction
236 * we initiate on the other connections, if selected for relay.
237 *
238 * In many respects this is the core of the protocol... actually figuring
239 * out what LNK_SPANs to relay. The spanid used for relaying is the
240 * address of the 'state' structure, which is why h2span_relay has to
241 * be entered into a RB-TREE based at h2span_connect (so we can look
242 * up the spanid to validate it).
243 */
244struct h2span_relay {
245 RB_ENTRY(h2span_relay) rbnode; /* from h2span_connect */
246 TAILQ_ENTRY(h2span_relay) entry; /* from link */
247 struct h2span_connect *conn;
248 hammer2_state_t *state; /* transmitted LNK_SPAN */
249 struct h2span_link *link; /* received LNK_SPAN */
250};
251
252
253typedef struct h2span_connect h2span_connect_t;
254typedef struct h2span_cluster h2span_cluster_t;
255typedef struct h2span_node h2span_node_t;
256typedef struct h2span_link h2span_link_t;
257typedef struct h2span_relay h2span_relay_t;
258
259static
260int
261h2span_cluster_cmp(h2span_cluster_t *cls1, h2span_cluster_t *cls2)
262{
263 return(uuid_compare(&cls1->pfs_clid, &cls2->pfs_clid, NULL));
264}
265
266static
267int
268h2span_node_cmp(h2span_node_t *node1, h2span_node_t *node2)
269{
270 return(uuid_compare(&node1->pfs_fsid, &node2->pfs_fsid, NULL));
271}
272
cf715800
MD
273/*
274 * NOTE: Sort/subsort must match h2span_relay_cmp() under any given
275 * node.
276 */
8c280d5d
MD
277static
278int
279h2span_link_cmp(h2span_link_t *link1, h2span_link_t *link2)
280{
7dc0f844 281 if (link1->dist < link2->dist)
8c280d5d 282 return(-1);
7dc0f844 283 if (link1->dist > link2->dist)
8c280d5d 284 return(1);
29ead430 285 if (link1->state->msgid < link2->state->msgid)
8c280d5d 286 return(-1);
29ead430 287 if (link1->state->msgid > link2->state->msgid)
8c280d5d
MD
288 return(1);
289 return(0);
290}
291
7dc0f844
MD
292/*
293 * Relay entries are sorted by node, subsorted by distance and link
294 * address (so we can match up the conn->tree relay topology with
295 * a node's link topology).
296 */
8c280d5d
MD
297static
298int
299h2span_relay_cmp(h2span_relay_t *relay1, h2span_relay_t *relay2)
300{
29ead430
MD
301 h2span_link_t *link1 = relay1->link;
302 h2span_link_t *link2 = relay2->link;
303
304 if ((intptr_t)link1->node < (intptr_t)link2->node)
7dc0f844 305 return(-1);
29ead430 306 if ((intptr_t)link1->node > (intptr_t)link2->node)
7dc0f844 307 return(1);
29ead430 308 if (link1->dist < link2->dist)
8c280d5d 309 return(-1);
29ead430 310 if (link1->dist > link2->dist)
7dc0f844 311 return(1);
29ead430 312 if (link1->state->msgid < link2->state->msgid)
7dc0f844 313 return(-1);
29ead430 314 if (link1->state->msgid > link2->state->msgid)
8c280d5d
MD
315 return(1);
316 return(0);
317}
318
319RB_PROTOTYPE_STATIC(h2span_cluster_tree, h2span_cluster,
320 rbnode, h2span_cluster_cmp);
321RB_PROTOTYPE_STATIC(h2span_node_tree, h2span_node,
322 rbnode, h2span_node_cmp);
323RB_PROTOTYPE_STATIC(h2span_link_tree, h2span_link,
324 rbnode, h2span_link_cmp);
325RB_PROTOTYPE_STATIC(h2span_relay_tree, h2span_relay,
326 rbnode, h2span_relay_cmp);
327
328RB_GENERATE_STATIC(h2span_cluster_tree, h2span_cluster,
329 rbnode, h2span_cluster_cmp);
330RB_GENERATE_STATIC(h2span_node_tree, h2span_node,
331 rbnode, h2span_node_cmp);
332RB_GENERATE_STATIC(h2span_link_tree, h2span_link,
333 rbnode, h2span_link_cmp);
334RB_GENERATE_STATIC(h2span_relay_tree, h2span_relay,
335 rbnode, h2span_relay_cmp);
336
337/*
338 * Global mutex protects cluster_tree lookups.
339 */
340static pthread_mutex_t cluster_mtx;
341static struct h2span_cluster_tree cluster_tree = RB_INITIALIZER(cluster_tree);
342static struct h2span_connect_queue connq = TAILQ_HEAD_INITIALIZER(connq);
343
29ead430
MD
344static void hammer2_lnk_span(hammer2_msg_t *msg);
345static void hammer2_lnk_conn(hammer2_msg_t *msg);
346static void hammer2_lnk_relay(hammer2_msg_t *msg);
02454b3e 347static void hammer2_relay_scan(h2span_connect_t *conn, h2span_node_t *node);
7dc0f844 348static void hammer2_relay_delete(h2span_relay_t *relay);
8c280d5d 349
29ead430
MD
350void
351hammer2_msg_lnk_signal(hammer2_router_t *router __unused)
352{
353 pthread_mutex_lock(&cluster_mtx);
354 hammer2_relay_scan(NULL, NULL);
355 pthread_mutex_unlock(&cluster_mtx);
356}
357
8c280d5d
MD
358/*
359 * Receive a HAMMER2_MSG_PROTO_LNK message. This only called for
360 * one-way and opening-transactions since state->func will be assigned
361 * in all other cases.
362 */
363void
29ead430 364hammer2_msg_lnk(hammer2_msg_t *msg)
8c280d5d
MD
365{
366 switch(msg->any.head.cmd & HAMMER2_MSGF_BASECMDMASK) {
367 case HAMMER2_LNK_CONN:
29ead430 368 hammer2_lnk_conn(msg);
8c280d5d
MD
369 break;
370 case HAMMER2_LNK_SPAN:
29ead430 371 hammer2_lnk_span(msg);
8c280d5d
MD
372 break;
373 default:
374 fprintf(stderr,
375 "MSG_PROTO_LNK: Unknown msg %08x\n", msg->any.head.cmd);
29ead430 376 hammer2_msg_reply(msg, HAMMER2_MSG_ERR_NOSUPP);
8c280d5d
MD
377 /* state invalid after reply */
378 break;
379 }
380}
381
382void
29ead430 383hammer2_lnk_conn(hammer2_msg_t *msg)
8c280d5d 384{
29ead430 385 hammer2_state_t *state = msg->state;
8c280d5d
MD
386 h2span_connect_t *conn;
387 h2span_relay_t *relay;
388 char *alloc = NULL;
389
390 pthread_mutex_lock(&cluster_mtx);
391
392 /*
393 * On transaction start we allocate a new h2span_connect and
394 * acknowledge the request, leaving the transaction open.
7dc0f844 395 * We then relay priority-selected SPANs.
8c280d5d
MD
396 */
397 if (msg->any.head.cmd & HAMMER2_MSGF_CREATE) {
398 state->func = hammer2_lnk_conn;
399
81666e1b
MD
400 fprintf(stderr, "LNK_CONN(%08x): %s/%s\n",
401 (uint32_t)msg->any.head.msgid,
8c280d5d
MD
402 hammer2_uuid_to_str(&msg->any.lnk_conn.pfs_clid,
403 &alloc),
404 msg->any.lnk_conn.label);
405 free(alloc);
406
407 conn = hammer2_alloc(sizeof(*conn));
408
409 RB_INIT(&conn->tree);
410 conn->state = state;
411 state->any.conn = conn;
412 TAILQ_INSERT_TAIL(&connq, conn, entry);
413
29ead430 414 hammer2_msg_result(msg, 0);
02454b3e 415
29ead430 416#if 0
02454b3e
MD
417 /*
418 * Span-synchronize all nodes with the new connection
419 */
420 hammer2_relay_scan(conn, NULL);
29ead430
MD
421#endif
422 hammer2_router_signal(msg->router);
8c280d5d
MD
423 }
424
425 /*
426 * On transaction terminate we clean out our h2span_connect
427 * and acknowledge the request, closing the transaction.
428 */
429 if (msg->any.head.cmd & HAMMER2_MSGF_DELETE) {
430 fprintf(stderr, "LNK_CONN: Terminated\n");
431 conn = state->any.conn;
432 assert(conn);
7dc0f844
MD
433
434 /*
435 * Clean out all relays. This requires terminating each
436 * relay transaction.
437 */
8c280d5d 438 while ((relay = RB_ROOT(&conn->tree)) != NULL) {
7dc0f844 439 hammer2_relay_delete(relay);
8c280d5d
MD
440 }
441
442 /*
443 * Clean out conn
444 */
445 conn->state = NULL;
446 msg->state->any.conn = NULL;
447 TAILQ_REMOVE(&connq, conn, entry);
448 hammer2_free(conn);
449
29ead430 450 hammer2_msg_reply(msg, 0);
8c280d5d
MD
451 /* state invalid after reply */
452 }
453 pthread_mutex_unlock(&cluster_mtx);
454}
455
456void
29ead430 457hammer2_lnk_span(hammer2_msg_t *msg)
8c280d5d 458{
29ead430 459 hammer2_state_t *state = msg->state;
8c280d5d
MD
460 h2span_cluster_t dummy_cls;
461 h2span_node_t dummy_node;
462 h2span_cluster_t *cls;
463 h2span_node_t *node;
464 h2span_link_t *slink;
465 h2span_relay_t *relay;
466 char *alloc = NULL;
467
29ead430
MD
468 assert((msg->any.head.cmd & HAMMER2_MSGF_REPLY) == 0);
469
8c280d5d
MD
470 pthread_mutex_lock(&cluster_mtx);
471
472 /*
473 * On transaction start we initialize the tracking infrastructure
474 */
475 if (msg->any.head.cmd & HAMMER2_MSGF_CREATE) {
29ead430 476 assert(state->func == NULL);
8c280d5d
MD
477 state->func = hammer2_lnk_span;
478
81666e1b
MD
479 msg->any.lnk_span.label[sizeof(msg->any.lnk_span.label)-1] = 0;
480
8c280d5d
MD
481 /*
482 * Find the cluster
483 */
484 dummy_cls.pfs_clid = msg->any.lnk_span.pfs_clid;
485 cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls);
486 if (cls == NULL) {
487 cls = hammer2_alloc(sizeof(*cls));
488 cls->pfs_clid = msg->any.lnk_span.pfs_clid;
489 RB_INIT(&cls->tree);
490 RB_INSERT(h2span_cluster_tree, &cluster_tree, cls);
491 }
492
493 /*
494 * Find the node
495 */
496 dummy_node.pfs_fsid = msg->any.lnk_span.pfs_fsid;
497 node = RB_FIND(h2span_node_tree, &cls->tree, &dummy_node);
498 if (node == NULL) {
499 node = hammer2_alloc(sizeof(*node));
500 node->pfs_fsid = msg->any.lnk_span.pfs_fsid;
501 node->cls = cls;
502 RB_INIT(&node->tree);
503 RB_INSERT(h2span_node_tree, &cls->tree, node);
81666e1b
MD
504 snprintf(node->label, sizeof(node->label),
505 "%s", msg->any.lnk_span.label);
8c280d5d
MD
506 }
507
508 /*
509 * Create the link
510 */
511 assert(state->any.link == NULL);
512 slink = hammer2_alloc(sizeof(*slink));
7dc0f844 513 TAILQ_INIT(&slink->relayq);
8c280d5d 514 slink->node = node;
7dc0f844 515 slink->dist = msg->any.lnk_span.dist;
8c280d5d
MD
516 slink->state = state;
517 state->any.link = slink;
29ead430
MD
518
519 /*
520 * Embedded router structure in link for message forwarding.
521 */
522 TAILQ_INIT(&slink->router.txmsgq);
523 slink->router.iocom = state->iocom;
524 slink->router.link = slink;
525
8c280d5d
MD
526 RB_INSERT(h2span_link_tree, &node->tree, slink);
527
29ead430
MD
528 fprintf(stderr, "LNK_SPAN(thr %p): %p %s/%s dist=%d\n",
529 msg->router->iocom,
530 slink,
531 hammer2_uuid_to_str(&msg->any.lnk_span.pfs_clid,
532 &alloc),
533 msg->any.lnk_span.label,
534 msg->any.lnk_span.dist);
535 free(alloc);
536
537#if 0
02454b3e 538 hammer2_relay_scan(NULL, node);
29ead430
MD
539#endif
540 hammer2_router_signal(msg->router);
8c280d5d
MD
541 }
542
543 /*
544 * On transaction terminate we remove the tracking infrastructure.
545 */
546 if (msg->any.head.cmd & HAMMER2_MSGF_DELETE) {
547 slink = state->any.link;
548 assert(slink != NULL);
549 node = slink->node;
550 cls = node->cls;
551
29ead430
MD
552 fprintf(stderr, "LNK_DELE(thr %p): %p %s/%s dist=%d\n",
553 msg->router->iocom,
554 slink,
555 hammer2_uuid_to_str(&cls->pfs_clid, &alloc),
556 state->msg->any.lnk_span.label,
557 state->msg->any.lnk_span.dist);
558 free(alloc);
559
8c280d5d 560 /*
7dc0f844
MD
561 * Clean out all relays. This requires terminating each
562 * relay transaction.
8c280d5d
MD
563 */
564 while ((relay = TAILQ_FIRST(&slink->relayq)) != NULL) {
7dc0f844 565 hammer2_relay_delete(relay);
8c280d5d
MD
566 }
567
568 /*
569 * Clean out the topology
570 */
571 RB_REMOVE(h2span_link_tree, &node->tree, slink);
572 if (RB_EMPTY(&node->tree)) {
573 RB_REMOVE(h2span_node_tree, &cls->tree, node);
574 if (RB_EMPTY(&cls->tree)) {
575 RB_REMOVE(h2span_cluster_tree,
576 &cluster_tree, cls);
577 hammer2_free(cls);
578 }
579 node->cls = NULL;
580 hammer2_free(node);
7dc0f844 581 node = NULL;
8c280d5d
MD
582 }
583 state->any.link = NULL;
584 slink->state = NULL;
585 slink->node = NULL;
586 hammer2_free(slink);
7dc0f844
MD
587
588 /*
589 * We have to terminate the transaction
590 */
591 hammer2_state_reply(state, 0);
592 /* state invalid after reply */
593
594 /*
595 * If the node still exists issue any required updates. If
596 * it doesn't then all related relays have already been
597 * removed and there's nothing left to do.
598 */
29ead430 599#if 0
7dc0f844 600 if (node)
02454b3e 601 hammer2_relay_scan(NULL, node);
29ead430
MD
602#endif
603 if (node)
604 hammer2_router_signal(msg->router);
8c280d5d
MD
605 }
606
607 pthread_mutex_unlock(&cluster_mtx);
608}
609
610/*
7dc0f844
MD
611 * Messages received on relay SPANs. These are open transactions so it is
612 * in fact possible for the other end to close the transaction.
613 *
614 * XXX MPRACE on state structure
615 */
616static void
29ead430 617hammer2_lnk_relay(hammer2_msg_t *msg)
7dc0f844 618{
29ead430 619 hammer2_state_t *state = msg->state;
7dc0f844
MD
620 h2span_relay_t *relay;
621
29ead430
MD
622 assert(msg->any.head.cmd & HAMMER2_MSGF_REPLY);
623
7dc0f844
MD
624 if (msg->any.head.cmd & HAMMER2_MSGF_DELETE) {
625 pthread_mutex_lock(&cluster_mtx);
626 if ((relay = state->any.relay) != NULL) {
627 hammer2_relay_delete(relay);
628 } else {
629 hammer2_state_reply(state, 0);
630 }
631 pthread_mutex_unlock(&cluster_mtx);
632 }
633}
634
635/*
636 * Update relay transactions for SPANs.
637 *
638 * Called with cluster_mtx held.
639 */
02454b3e
MD
640static void hammer2_relay_scan_specific(h2span_node_t *node,
641 h2span_connect_t *conn);
7dc0f844
MD
642
643static void
02454b3e 644hammer2_relay_scan(h2span_connect_t *conn, h2span_node_t *node)
7dc0f844
MD
645{
646 h2span_cluster_t *cls;
7dc0f844
MD
647
648 if (node) {
649 /*
650 * Iterate specific node
651 */
652 TAILQ_FOREACH(conn, &connq, entry)
02454b3e 653 hammer2_relay_scan_specific(node, conn);
7dc0f844
MD
654 } else {
655 /*
02454b3e 656 * Full iteration.
7dc0f844 657 *
02454b3e
MD
658 * Iterate cluster ids, nodes, and either a specific connection
659 * or all connections.
7dc0f844 660 */
7dc0f844
MD
661 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
662 /*
663 * Iterate node ids
664 */
665 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
666 /*
667 * Synchronize the node's link (received SPANs)
668 * with each connection's relays.
669 */
02454b3e
MD
670 if (conn) {
671 hammer2_relay_scan_specific(node, conn);
672 } else {
673 TAILQ_FOREACH(conn, &connq, entry) {
674 hammer2_relay_scan_specific(node,
675 conn);
676 }
677 assert(conn == NULL);
678 }
7dc0f844
MD
679 }
680 }
681 }
682}
683
684/*
685 * Update the relay'd SPANs for this (node, conn).
686 *
687 * Iterate links and adjust relays to match. We only propagate the top link
688 * for now (XXX we want to propagate the top two).
689 *
690 * The hammer2_relay_scan_cmp() function locates the first relay element
691 * for any given node. The relay elements will be sub-sorted by dist.
8c280d5d 692 */
7dc0f844
MD
693struct relay_scan_info {
694 h2span_node_t *node;
695 h2span_relay_t *relay;
696};
697
698static int
699hammer2_relay_scan_cmp(h2span_relay_t *relay, void *arg)
700{
701 struct relay_scan_info *info = arg;
702
703 if ((intptr_t)relay->link->node < (intptr_t)info->node)
704 return(-1);
705 if ((intptr_t)relay->link->node > (intptr_t)info->node)
706 return(1);
707 return(0);
708}
709
710static int
711hammer2_relay_scan_callback(h2span_relay_t *relay, void *arg)
712{
713 struct relay_scan_info *info = arg;
714
715 info->relay = relay;
716 return(-1);
717}
718
8c280d5d 719static void
02454b3e 720hammer2_relay_scan_specific(h2span_node_t *node, h2span_connect_t *conn)
8c280d5d 721{
7dc0f844
MD
722 struct relay_scan_info info;
723 h2span_relay_t *relay;
724 h2span_relay_t *next_relay;
725 h2span_link_t *slink;
726 int count = 2;
727
728 info.node = node;
729 info.relay = NULL;
730
731 /*
29ead430
MD
732 * Locate the first related relay for the node on this connection.
733 * relay will be NULL if there were none.
7dc0f844
MD
734 */
735 RB_SCAN(h2span_relay_tree, &conn->tree,
736 hammer2_relay_scan_cmp, hammer2_relay_scan_callback, &info);
737 relay = info.relay;
cf715800
MD
738 info.relay = NULL;
739 if (relay)
740 assert(relay->link->node == node);
7dc0f844 741
81666e1b
MD
742 if (DebugOpt > 8)
743 fprintf(stderr, "relay scan for connection %p\n", conn);
7dc0f844
MD
744
745 /*
746 * Iterate the node's links (received SPANs) in distance order,
747 * lowest (best) dist first.
748 */
29ead430 749 /* fprintf(stderr, "LOOP\n"); */
7dc0f844
MD
750 RB_FOREACH(slink, h2span_link_tree, &node->tree) {
751 /*
29ead430
MD
752 fprintf(stderr, "SLINK %p RELAY %p(%p)\n",
753 slink, relay, relay ? relay->link : NULL);
754 */
755 /*
cf715800 756 * PROPAGATE THE BEST LINKS OVER THE SPECIFIED CONNECTION.
7dc0f844 757 *
cf715800
MD
758 * Track relays while iterating the best links and construct
759 * missing relays when necessary.
7dc0f844
MD
760 *
761 * (If some prior better link was removed it would have also
762 * removed the relay, so the relay can only match exactly or
29ead430 763 * be worse).
7dc0f844 764 */
cf715800
MD
765 if (relay && relay->link == slink) {
766 /*
29ead430
MD
767 * Match, relay already in-place, get the next
768 * relay to match against the next slink.
cf715800
MD
769 */
770 relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
771 if (--count == 0)
772 break;
773 } else if (slink->dist > HAMMER2_SPAN_MAXDIST) {
774 /*
775 * No match but span distance is too great,
776 * do not relay. This prevents endless closed
777 * loops with ever-incrementing distances when
778 * the seed span is lost in the graph.
29ead430
MD
779 *
780 * All later spans will also be too far away so
781 * we can break out of the loop.
cf715800 782 */
29ead430 783 break;
cf715800
MD
784 } else {
785 /*
786 * No match, distance is ok, construct a new relay.
29ead430 787 * (slink is better than relay).
cf715800 788 */
7dc0f844
MD
789 hammer2_msg_t *msg;
790
791 assert(relay == NULL ||
29ead430
MD
792 relay->link->node != slink->node ||
793 relay->link->dist >= slink->dist);
7dc0f844
MD
794 relay = hammer2_alloc(sizeof(*relay));
795 relay->conn = conn;
796 relay->link = slink;
797
29ead430
MD
798 msg = hammer2_msg_alloc(&conn->state->iocom->router, 0,
799 HAMMER2_LNK_SPAN |
800 HAMMER2_MSGF_CREATE,
801 hammer2_lnk_relay, relay);
802 relay->state = msg->state;
803 msg->any.lnk_span = slink->state->msg->any.lnk_span;
804 msg->any.lnk_span.dist = slink->dist + 1;
805
cf715800
MD
806 RB_INSERT(h2span_relay_tree, &conn->tree, relay);
807 TAILQ_INSERT_TAIL(&slink->relayq, relay, entry);
808
29ead430 809 hammer2_msg_write(msg);
7dc0f844 810
81666e1b 811 fprintf(stderr,
29ead430 812 "RELAY SPAN %p RELAY %p ON CLS=%p NODE=%p DIST=%d "
cf715800 813 "FD %d state %p\n",
29ead430
MD
814 slink,
815 relay,
cf715800 816 node->cls, node, slink->dist,
7dc0f844
MD
817 conn->state->iocom->sock_fd, relay->state);
818
cf715800
MD
819 /*
820 * Match (created new relay), get the next relay to
821 * match against the next slink.
822 */
823 relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
824 if (--count == 0)
825 break;
7dc0f844
MD
826 }
827 }
828
829 /*
830 * Any remaining relay's belonging to this connection which match
831 * the node are in excess of the current aggregate spanning state
832 * and should be removed.
833 */
834 while (relay && relay->link->node == node) {
835 next_relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
836 hammer2_relay_delete(relay);
837 relay = next_relay;
838 }
839}
840
841static
842void
843hammer2_relay_delete(h2span_relay_t *relay)
844{
81666e1b 845 fprintf(stderr,
29ead430
MD
846 "RELAY DELETE %p RELAY %p ON CLS=%p NODE=%p DIST=%d FD %d STATE %p\n",
847 relay->link,
848 relay,
7dc0f844 849 relay->link->node->cls, relay->link->node,
cf715800 850 relay->link->dist,
7dc0f844 851 relay->conn->state->iocom->sock_fd, relay->state);
7dc0f844
MD
852
853 RB_REMOVE(h2span_relay_tree, &relay->conn->tree, relay);
854 TAILQ_REMOVE(&relay->link->relayq, relay, entry);
855
856 if (relay->state) {
857 relay->state->any.relay = NULL;
858 hammer2_state_reply(relay->state, 0);
859 /* state invalid after reply */
860 relay->state = NULL;
861 }
862 relay->conn = NULL;
863 relay->link = NULL;
864 hammer2_free(relay);
8c280d5d 865}
81666e1b 866
29ead430
MD
867/************************************************************************
868 * ROUTER *
869 ************************************************************************
870 *
871 * Provides route functions to msg.c
872 */
873
874#if 0
875/*
876 * Acquire a persistent router structure given the cluster and node ids.
877 * Messages can be transacted via this structure while held. If the route
878 * is lost messages will return failure.
879 */
880hammer2_router_t *
881hammer2_router_get(uuid_t *pfs_clid, uuid_t *pfs_fsid)
882{
883}
884
885/*
886 * Release previously acquired router.
887 */
888void
889hammer2_router_put(hammer2_router_t *router)
890{
891}
892#endif
893
81666e1b
MD
894/*
895 * Dumps the spanning tree
896 */
897void
29ead430 898shell_tree(hammer2_router_t *router, char *cmdbuf __unused)
81666e1b
MD
899{
900 h2span_cluster_t *cls;
901 h2span_node_t *node;
902 h2span_link_t *slink;
903 char *uustr = NULL;
904
905 pthread_mutex_lock(&cluster_mtx);
906 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
29ead430 907 router_printf(router, "Cluster %s\n",
81666e1b
MD
908 hammer2_uuid_to_str(&cls->pfs_clid, &uustr));
909 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
29ead430 910 router_printf(router, " Node %s (%s)\n",
81666e1b
MD
911 hammer2_uuid_to_str(&node->pfs_fsid, &uustr),
912 node->label);
913 RB_FOREACH(slink, h2span_link_tree, &node->tree) {
29ead430 914 router_printf(router, "\tLink dist=%d via %d\n",
81666e1b
MD
915 slink->dist,
916 slink->state->iocom->sock_fd);
917 }
918 }
919 }
920 pthread_mutex_unlock(&cluster_mtx);
921 if (uustr)
922 free(uustr);
923#if 0
924 TAILQ_FOREACH(conn, &connq, entry) {
925 }
926#endif
927}