hammer2 - Messaging layer separation work part 1
[dragonfly.git] / sbin / hammer2 / msg_lnk.c
1 /*
2  * Copyright (c) 2012 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * LNK_SPAN PROTOCOL SUPPORT FUNCTIONS
36  *
37  * This code supports the LNK_SPAN protocol.  Essentially all PFS's
38  * clients and services rendezvous with the userland hammer2 service and
39  * open LNK_SPAN transactions using a message header linkid of 0,
40  * registering any PFS's they have connectivity to with us.
41  *
42  * --
43  *
44  * Each registration maintains its own open LNK_SPAN message transaction.
45  * The SPANs are collected, aggregated, and retransmitted over available
46  * connections through the maintainance of additional LNK_SPAN message
47  * transactions on each link.
48  *
49  * The msgid for each active LNK_SPAN transaction we receive allows us to
50  * send a message to the target PFS (which might be one of many belonging
51  * to the same cluster), by specifying that msgid as the linkid in any
52  * message we send to the target PFS.
53  *
54  * Similarly the msgid we allocate for any LNK_SPAN transaction we transmit
55  * (and remember we will maintain multiple open LNK_SPAN transactions on
56  * each connection representing the topology span, so every node sees every
57  * other node as a separate open transaction).  So, similarly the msgid for
58  * these active transactions which we initiated can be used by the other
59  * end to route messages through us to another node, ultimately winding up
60  * at the identified hammer2 PFS.  We have to adjust the spanid in the message
61  * header at each hop to be representative of the outgoing LNK_SPAN we
62  * are forwarding the message through.
63  *
64  * --
65  *
66  * If we were to retransmit every LNK_SPAN transaction we receive it would
67  * create a huge mess, so we have to aggregate all received LNK_SPAN
68  * transactions, sort them by the fsid (the cluster) and sub-sort them by
69  * the pfs_fsid (individual nodes in the cluster), and only retransmit
70  * (create outgoing transactions) for a subset of the nearest distance-hops
71  * for each individual node.
72  *
73  * The higher level protocols can then issue transactions to the nodes making
74  * up a cluster to perform all actions required.
75  *
76  * --
77  *
78  * Since this is a large topology and a spanning tree protocol, links can
79  * go up and down all the time.  Any time a link goes down its transaction
80  * is closed.  The transaction has to be closed on both ends before we can
81  * delete (and potentially reuse) the related spanid.  The LNK_SPAN being
82  * closed may have been propagated out to other connections and those related
83  * LNK_SPANs are also closed.  Ultimately all routes via the lost LNK_SPAN
84  * go away, ultimately reaching all sources and all targets.
85  *
86  * Any messages in-transit using a route that goes away will be thrown away.
87  * Open transactions are only tracked at the two end-points.  When a link
88  * failure propagates to an end-point the related open transactions lose
89  * their spanid and are automatically aborted.
90  *
91  * It is important to note that internal route nodes cannot just associate
92  * a lost LNK_SPAN transaction with another route to the same destination.
93  * Message transactions MUST be serialized and MUST be ordered.  All messages
94  * for a transaction must run over the same route.  So if the route used by
95  * an active transaction is lost, the related messages will be fully aborted
96  * and the higher protocol levels will retry as appropriate.
97  *
98  * FULLY ABORTING A ROUTED MESSAGE is handled via link-failure propagation
99  * back to the originator.  Only the originator keeps tracks of a message.
100  * Routers just pass it through.  If a route is lost during transit the
101  * message is simply thrown away.
102  *
103  * It is also important to note that several paths to the same PFS can be
104  * propagated along the same link, which allows concurrency and even
105  * redundancy over several network interfaces or via different routes through
106  * the topology.  Any given transaction will use only a single route but busy
107  * servers will often have hundreds of transactions active simultaniously,
108  * so having multiple active paths through the network topology for A<->B
109  * will improve performance.
110  *
111  * --
112  *
113  * Most protocols consolidate operations rather than simply relaying them.
114  * This is particularly true of LEAF protocols (such as strict HAMMER2
115  * clients), of which there can be millions connecting into the cluster at
116  * various points.  The SPAN protocol is not used for these LEAF elements.
117  *
118  * Instead the primary service they connect to implements a proxy for the
119  * client protocols so the core topology only has to propagate a couple of
120  * LNK_SPANs and not millions.  LNK_SPANs are meant to be used only for
121  * core master nodes and satellite slaves and cache nodes.
122  */
123
124 #include "hammer2.h"
125
126 /*
127  * Maximum spanning tree distance.  This has the practical effect of
128  * stopping tail-chasing closed loops when a feeder span is lost.
129  */
130 #define HAMMER2_SPAN_MAXDIST    16
131
132 /*
133  * RED-BLACK TREE DEFINITIONS
134  *
135  * We need to track:
136  *
137  * (1) shared fsid's (a cluster).
138  * (2) unique fsid's (a node in a cluster) <--- LNK_SPAN transactions.
139  *
140  * We need to aggegate all active LNK_SPANs, aggregate, and create our own
141  * outgoing LNK_SPAN transactions on each of our connections representing
142  * the aggregated state.
143  *
144  * h2span_conn          - list of iocom connections who wish to receive SPAN
145  *                        propagation from other connections.  Might contain
146  *                        a filter string.  Only iocom's with an open
147  *                        LNK_CONN transactions are applicable for SPAN
148  *                        propagation.
149  *
150  * h2span_relay         - List of links relayed (via SPAN).  Essentially
151  *                        each relay structure represents a LNK_SPAN
152  *                        transaction that we initiated, verses h2span_link
153  *                        which is a LNK_SPAN transaction that we received.
154  *
155  * --
156  *
157  * h2span_cluster       - Organizes the shared fsid's.  One structure for
158  *                        each cluster.
159  *
160  * h2span_node          - Organizes the nodes in a cluster.  One structure
161  *                        for each unique {cluster,node}, aka {fsid, pfs_fsid}.
162  *
163  * h2span_link          - Organizes all incoming and outgoing LNK_SPAN message
164  *                        transactions related to a node.
165  *
166  *                        One h2span_link structure for each incoming LNK_SPAN
167  *                        transaction.  Links selected for propagation back
168  *                        out are also where the outgoing LNK_SPAN messages
169  *                        are indexed into (so we can propagate changes).
170  *
171  *                        The h2span_link's use a red-black tree to sort the
172  *                        distance hop metric for the incoming LNK_SPAN.  We
173  *                        then select the top N for outgoing.  When the
174  *                        topology changes the top N may also change and cause
175  *                        new outgoing LNK_SPAN transactions to be opened
176  *                        and less desireable ones to be closed, causing
177  *                        transactional aborts within the message flow in
178  *                        the process.
179  *
180  * Also note            - All outgoing LNK_SPAN message transactions are also
181  *                        entered into a red-black tree for use by the routing
182  *                        function.  This is handled by msg.c in the state
183  *                        code, not here.
184  */
185
186 struct h2span_link;
187 struct h2span_relay;
188 TAILQ_HEAD(h2span_media_queue, h2span_media);
189 TAILQ_HEAD(h2span_conn_queue, h2span_conn);
190 TAILQ_HEAD(h2span_relay_queue, h2span_relay);
191
192 RB_HEAD(h2span_cluster_tree, h2span_cluster);
193 RB_HEAD(h2span_node_tree, h2span_node);
194 RB_HEAD(h2span_link_tree, h2span_link);
195 RB_HEAD(h2span_relay_tree, h2span_relay);
196
197 /*
198  * This represents a media
199  */
200 struct h2span_media {
201         TAILQ_ENTRY(h2span_media) entry;
202         uuid_t  mediaid;
203         int     refs;
204         struct h2span_media_config {
205                 dmsg_vol_data_t         copy_run;
206                 dmsg_vol_data_t         copy_pend;
207                 pthread_t               thread;
208                 pthread_cond_t          cond;
209                 int                     ctl;
210                 int                     fd;
211                 hammer2_iocom_t         iocom;
212                 pthread_t               iocom_thread;
213                 enum { H2MC_STOPPED, H2MC_CONNECT, H2MC_RUNNING } state;
214         } config[HAMMER2_COPYID_COUNT];
215 };
216
217 typedef struct h2span_media_config h2span_media_config_t;
218
219 #define H2CONFCTL_STOP          0x00000001
220 #define H2CONFCTL_UPDATE        0x00000002
221
222 /*
223  * Received LNK_CONN transaction enables SPAN protocol over connection.
224  * (may contain filter).  Typically one for each mount and several may
225  * share the same media.
226  */
227 struct h2span_conn {
228         TAILQ_ENTRY(h2span_conn) entry;
229         struct h2span_relay_tree tree;
230         struct h2span_media *media;
231         hammer2_state_t *state;
232 };
233
234 /*
235  * All received LNK_SPANs are organized by cluster (pfs_clid),
236  * node (pfs_fsid), and link (received LNK_SPAN transaction).
237  */
238 struct h2span_cluster {
239         RB_ENTRY(h2span_cluster) rbnode;
240         struct h2span_node_tree tree;
241         uuid_t  pfs_clid;               /* shared fsid */
242         int     refs;                   /* prevents destruction */
243 };
244
245 struct h2span_node {
246         RB_ENTRY(h2span_node) rbnode;
247         struct h2span_link_tree tree;
248         struct h2span_cluster *cls;
249         uuid_t  pfs_fsid;               /* unique fsid */
250         char label[64];
251 };
252
253 struct h2span_link {
254         RB_ENTRY(h2span_link) rbnode;
255         hammer2_state_t *state;         /* state<->link */
256         struct h2span_node *node;       /* related node */
257         int32_t dist;
258         struct h2span_relay_queue relayq; /* relay out */
259         struct hammer2_router *router;  /* route out this link */
260 };
261
262 /*
263  * Any LNK_SPAN transactions we receive which are relayed out other
264  * connections utilize this structure to track the LNK_SPAN transaction
265  * we initiate on the other connections, if selected for relay.
266  *
267  * In many respects this is the core of the protocol... actually figuring
268  * out what LNK_SPANs to relay.  The spanid used for relaying is the
269  * address of the 'state' structure, which is why h2span_relay has to
270  * be entered into a RB-TREE based at h2span_conn (so we can look
271  * up the spanid to validate it).
272  *
273  * NOTE: Messages can be received via the LNK_SPAN transaction the
274  *       relay maintains, and can be replied via relay->router, but
275  *       messages are NOT initiated via a relay.  Messages are initiated
276  *       via incoming links (h2span_link's).
277  *
278  *       relay->link represents the link being relayed, NOT the LNK_SPAN
279  *       transaction the relay is holding open.
280  */
281 struct h2span_relay {
282         RB_ENTRY(h2span_relay) rbnode;  /* from h2span_conn */
283         TAILQ_ENTRY(h2span_relay) entry; /* from link */
284         struct h2span_conn *conn;
285         hammer2_state_t *state;         /* transmitted LNK_SPAN */
286         struct h2span_link *link;       /* LNK_SPAN being relayed */
287         struct hammer2_router   *router;/* route out this relay */
288 };
289
290
291 typedef struct h2span_media h2span_media_t;
292 typedef struct h2span_conn h2span_conn_t;
293 typedef struct h2span_cluster h2span_cluster_t;
294 typedef struct h2span_node h2span_node_t;
295 typedef struct h2span_link h2span_link_t;
296 typedef struct h2span_relay h2span_relay_t;
297
298 static
299 int
300 h2span_cluster_cmp(h2span_cluster_t *cls1, h2span_cluster_t *cls2)
301 {
302         return(uuid_compare(&cls1->pfs_clid, &cls2->pfs_clid, NULL));
303 }
304
305 static
306 int
307 h2span_node_cmp(h2span_node_t *node1, h2span_node_t *node2)
308 {
309         return(uuid_compare(&node1->pfs_fsid, &node2->pfs_fsid, NULL));
310 }
311
312 /*
313  * Sort/subsort must match h2span_relay_cmp() under any given node
314  * to make the aggregation algorithm easier, so the best links are
315  * in the same sorted order as the best relays.
316  *
317  * NOTE: We cannot use link*->state->msgid because this msgid is created
318  *       by each remote host and thus might wind up being the same.
319  */
320 static
321 int
322 h2span_link_cmp(h2span_link_t *link1, h2span_link_t *link2)
323 {
324         if (link1->dist < link2->dist)
325                 return(-1);
326         if (link1->dist > link2->dist)
327                 return(1);
328 #if 1
329         if ((uintptr_t)link1->state < (uintptr_t)link2->state)
330                 return(-1);
331         if ((uintptr_t)link1->state > (uintptr_t)link2->state)
332                 return(1);
333 #else
334         if (link1->state->msgid < link2->state->msgid)
335                 return(-1);
336         if (link1->state->msgid > link2->state->msgid)
337                 return(1);
338 #endif
339         return(0);
340 }
341
342 /*
343  * Relay entries are sorted by node, subsorted by distance and link
344  * address (so we can match up the conn->tree relay topology with
345  * a node's link topology).
346  */
347 static
348 int
349 h2span_relay_cmp(h2span_relay_t *relay1, h2span_relay_t *relay2)
350 {
351         h2span_link_t *link1 = relay1->link;
352         h2span_link_t *link2 = relay2->link;
353
354         if ((intptr_t)link1->node < (intptr_t)link2->node)
355                 return(-1);
356         if ((intptr_t)link1->node > (intptr_t)link2->node)
357                 return(1);
358         if (link1->dist < link2->dist)
359                 return(-1);
360         if (link1->dist > link2->dist)
361                 return(1);
362 #if 1
363         if ((uintptr_t)link1->state < (uintptr_t)link2->state)
364                 return(-1);
365         if ((uintptr_t)link1->state > (uintptr_t)link2->state)
366                 return(1);
367 #else
368         if (link1->state->msgid < link2->state->msgid)
369                 return(-1);
370         if (link1->state->msgid > link2->state->msgid)
371                 return(1);
372 #endif
373         return(0);
374 }
375
376 RB_PROTOTYPE_STATIC(h2span_cluster_tree, h2span_cluster,
377              rbnode, h2span_cluster_cmp);
378 RB_PROTOTYPE_STATIC(h2span_node_tree, h2span_node,
379              rbnode, h2span_node_cmp);
380 RB_PROTOTYPE_STATIC(h2span_link_tree, h2span_link,
381              rbnode, h2span_link_cmp);
382 RB_PROTOTYPE_STATIC(h2span_relay_tree, h2span_relay,
383              rbnode, h2span_relay_cmp);
384
385 RB_GENERATE_STATIC(h2span_cluster_tree, h2span_cluster,
386              rbnode, h2span_cluster_cmp);
387 RB_GENERATE_STATIC(h2span_node_tree, h2span_node,
388              rbnode, h2span_node_cmp);
389 RB_GENERATE_STATIC(h2span_link_tree, h2span_link,
390              rbnode, h2span_link_cmp);
391 RB_GENERATE_STATIC(h2span_relay_tree, h2span_relay,
392              rbnode, h2span_relay_cmp);
393
394 /*
395  * Global mutex protects cluster_tree lookups, connq, mediaq.
396  */
397 static pthread_mutex_t cluster_mtx;
398 static struct h2span_cluster_tree cluster_tree = RB_INITIALIZER(cluster_tree);
399 static struct h2span_conn_queue connq = TAILQ_HEAD_INITIALIZER(connq);
400 static struct h2span_media_queue mediaq = TAILQ_HEAD_INITIALIZER(mediaq);
401
402 static void hammer2_lnk_span(hammer2_msg_t *msg);
403 static void hammer2_lnk_conn(hammer2_msg_t *msg);
404 static void hammer2_lnk_relay(hammer2_msg_t *msg);
405 static void hammer2_relay_scan(h2span_conn_t *conn, h2span_node_t *node);
406 static void hammer2_relay_delete(h2span_relay_t *relay);
407
408 static void *hammer2_volconf_thread(void *info);
409 static void hammer2_volconf_stop(h2span_media_config_t *conf);
410 static void hammer2_volconf_start(h2span_media_config_t *conf,
411                                 const char *hostname);
412
413 void
414 hammer2_msg_lnk_signal(hammer2_router_t *router __unused)
415 {
416         pthread_mutex_lock(&cluster_mtx);
417         hammer2_relay_scan(NULL, NULL);
418         pthread_mutex_unlock(&cluster_mtx);
419 }
420
421 /*
422  * Receive a HAMMER2_MSG_PROTO_LNK message.  This only called for
423  * one-way and opening-transactions since state->func will be assigned
424  * in all other cases.
425  */
426 void
427 hammer2_msg_lnk(hammer2_msg_t *msg)
428 {
429         switch(msg->any.head.cmd & DMSGF_BASECMDMASK) {
430         case DMSG_LNK_CONN:
431                 hammer2_lnk_conn(msg);
432                 break;
433         case DMSG_LNK_SPAN:
434                 hammer2_lnk_span(msg);
435                 break;
436         default:
437                 fprintf(stderr,
438                         "MSG_PROTO_LNK: Unknown msg %08x\n", msg->any.head.cmd);
439                 hammer2_msg_reply(msg, DMSG_ERR_NOSUPP);
440                 /* state invalid after reply */
441                 break;
442         }
443 }
444
445 void
446 hammer2_lnk_conn(hammer2_msg_t *msg)
447 {
448         hammer2_state_t *state = msg->state;
449         h2span_media_t *media;
450         h2span_media_config_t *conf;
451         h2span_conn_t *conn;
452         h2span_relay_t *relay;
453         char *alloc = NULL;
454         int i;
455
456         pthread_mutex_lock(&cluster_mtx);
457
458         switch(msg->any.head.cmd & DMSGF_TRANSMASK) {
459         case DMSG_LNK_CONN | DMSGF_CREATE:
460         case DMSG_LNK_CONN | DMSGF_CREATE | DMSGF_DELETE:
461                 /*
462                  * On transaction start we allocate a new h2span_conn and
463                  * acknowledge the request, leaving the transaction open.
464                  * We then relay priority-selected SPANs.
465                  */
466                 fprintf(stderr, "LNK_CONN(%08x): %s/%s\n",
467                         (uint32_t)msg->any.head.msgid,
468                         hammer2_uuid_to_str(&msg->any.lnk_conn.pfs_clid,
469                                             &alloc),
470                         msg->any.lnk_conn.label);
471                 free(alloc);
472
473                 conn = hammer2_alloc(sizeof(*conn));
474
475                 RB_INIT(&conn->tree);
476                 conn->state = state;
477                 state->func = hammer2_lnk_conn;
478                 state->any.conn = conn;
479                 TAILQ_INSERT_TAIL(&connq, conn, entry);
480
481                 /*
482                  * Set up media
483                  */
484                 TAILQ_FOREACH(media, &mediaq, entry) {
485                         if (uuid_compare(&msg->any.lnk_conn.mediaid,
486                                          &media->mediaid, NULL) == 0) {
487                                 break;
488                         }
489                 }
490                 if (media == NULL) {
491                         media = hammer2_alloc(sizeof(*media));
492                         media->mediaid = msg->any.lnk_conn.mediaid;
493                         TAILQ_INSERT_TAIL(&mediaq, media, entry);
494                 }
495                 conn->media = media;
496                 ++media->refs;
497
498                 if ((msg->any.head.cmd & DMSGF_DELETE) == 0) {
499                         hammer2_msg_result(msg, 0);
500                         hammer2_router_signal(msg->router);
501                         break;
502                 }
503                 /* FALL THROUGH */
504         case DMSG_LNK_CONN | DMSGF_DELETE:
505         case DMSG_LNK_ERROR | DMSGF_DELETE:
506 deleteconn:
507                 /*
508                  * On transaction terminate we clean out our h2span_conn
509                  * and acknowledge the request, closing the transaction.
510                  */
511                 fprintf(stderr, "LNK_CONN: Terminated\n");
512                 conn = state->any.conn;
513                 assert(conn);
514
515                 /*
516                  * Clean out the media structure. If refs drops to zero we
517                  * also clean out the media config threads.  These threads
518                  * maintain span connections to other hammer2 service daemons.
519                  */
520                 media = conn->media;
521                 if (--media->refs == 0) {
522                         fprintf(stderr, "Shutting down media spans\n");
523                         for (i = 0; i < HAMMER2_COPYID_COUNT; ++i) {
524                                 conf = &media->config[i];
525
526                                 if (conf->thread == NULL)
527                                         continue;
528                                 conf->ctl = H2CONFCTL_STOP;
529                                 pthread_cond_signal(&conf->cond);
530                         }
531                         for (i = 0; i < HAMMER2_COPYID_COUNT; ++i) {
532                                 conf = &media->config[i];
533
534                                 if (conf->thread == NULL)
535                                         continue;
536                                 pthread_mutex_unlock(&cluster_mtx);
537                                 pthread_join(conf->thread, NULL);
538                                 pthread_mutex_lock(&cluster_mtx);
539                                 conf->thread = NULL;
540                                 pthread_cond_destroy(&conf->cond);
541                         }
542                         fprintf(stderr, "Media shutdown complete\n");
543                         TAILQ_REMOVE(&mediaq, media, entry);
544                         hammer2_free(media);
545                 }
546
547                 /*
548                  * Clean out all relays.  This requires terminating each
549                  * relay transaction.
550                  */
551                 while ((relay = RB_ROOT(&conn->tree)) != NULL) {
552                         hammer2_relay_delete(relay);
553                 }
554
555                 /*
556                  * Clean out conn
557                  */
558                 conn->media = NULL;
559                 conn->state = NULL;
560                 msg->state->any.conn = NULL;
561                 TAILQ_REMOVE(&connq, conn, entry);
562                 hammer2_free(conn);
563
564                 hammer2_msg_reply(msg, 0);
565                 /* state invalid after reply */
566                 break;
567         case DMSG_LNK_VOLCONF:
568                 /*
569                  * One-way volume-configuration message is transmitted
570                  * over the open LNK_CONN transaction.
571                  */
572                 fprintf(stderr, "RECEIVED VOLCONF\n");
573                 if (msg->any.lnk_volconf.index < 0 ||
574                     msg->any.lnk_volconf.index >= HAMMER2_COPYID_COUNT) {
575                         fprintf(stderr, "VOLCONF: ILLEGAL INDEX %d\n",
576                                 msg->any.lnk_volconf.index);
577                         break;
578                 }
579                 if (msg->any.lnk_volconf.copy.path[sizeof(msg->any.lnk_volconf.copy.path) - 1] != 0 ||
580                     msg->any.lnk_volconf.copy.path[0] == 0) {
581                         fprintf(stderr, "VOLCONF: ILLEGAL PATH %d\n",
582                                 msg->any.lnk_volconf.index);
583                         break;
584                 }
585                 conn = msg->state->any.conn;
586                 if (conn == NULL) {
587                         fprintf(stderr, "VOLCONF: LNK_CONN is missing\n");
588                         break;
589                 }
590                 conf = &conn->media->config[msg->any.lnk_volconf.index];
591                 conf->copy_pend = msg->any.lnk_volconf.copy;
592                 conf->ctl |= H2CONFCTL_UPDATE;
593                 if (conf->thread == NULL) {
594                         fprintf(stderr, "VOLCONF THREAD STARTED\n");
595                         pthread_cond_init(&conf->cond, NULL);
596                         pthread_create(&conf->thread, NULL,
597                                        hammer2_volconf_thread, (void *)conf);
598                 }
599                 pthread_cond_signal(&conf->cond);
600                 break;
601         default:
602                 /*
603                  * Failsafe
604                  */
605                 if (msg->any.head.cmd & DMSGF_DELETE)
606                         goto deleteconn;
607                 hammer2_msg_reply(msg, DMSG_ERR_NOSUPP);
608                 break;
609         }
610         pthread_mutex_unlock(&cluster_mtx);
611 }
612
613 void
614 hammer2_lnk_span(hammer2_msg_t *msg)
615 {
616         hammer2_state_t *state = msg->state;
617         h2span_cluster_t dummy_cls;
618         h2span_node_t dummy_node;
619         h2span_cluster_t *cls;
620         h2span_node_t *node;
621         h2span_link_t *slink;
622         h2span_relay_t *relay;
623         char *alloc = NULL;
624
625         assert((msg->any.head.cmd & DMSGF_REPLY) == 0);
626
627         pthread_mutex_lock(&cluster_mtx);
628
629         /*
630          * On transaction start we initialize the tracking infrastructure
631          */
632         if (msg->any.head.cmd & DMSGF_CREATE) {
633                 assert(state->func == NULL);
634                 state->func = hammer2_lnk_span;
635
636                 msg->any.lnk_span.label[sizeof(msg->any.lnk_span.label)-1] = 0;
637
638                 /*
639                  * Find the cluster
640                  */
641                 dummy_cls.pfs_clid = msg->any.lnk_span.pfs_clid;
642                 cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls);
643                 if (cls == NULL) {
644                         cls = hammer2_alloc(sizeof(*cls));
645                         cls->pfs_clid = msg->any.lnk_span.pfs_clid;
646                         RB_INIT(&cls->tree);
647                         RB_INSERT(h2span_cluster_tree, &cluster_tree, cls);
648                 }
649
650                 /*
651                  * Find the node
652                  */
653                 dummy_node.pfs_fsid = msg->any.lnk_span.pfs_fsid;
654                 node = RB_FIND(h2span_node_tree, &cls->tree, &dummy_node);
655                 if (node == NULL) {
656                         node = hammer2_alloc(sizeof(*node));
657                         node->pfs_fsid = msg->any.lnk_span.pfs_fsid;
658                         node->cls = cls;
659                         RB_INIT(&node->tree);
660                         RB_INSERT(h2span_node_tree, &cls->tree, node);
661                         snprintf(node->label, sizeof(node->label),
662                                  "%s", msg->any.lnk_span.label);
663                 }
664
665                 /*
666                  * Create the link
667                  */
668                 assert(state->any.link == NULL);
669                 slink = hammer2_alloc(sizeof(*slink));
670                 TAILQ_INIT(&slink->relayq);
671                 slink->node = node;
672                 slink->dist = msg->any.lnk_span.dist;
673                 slink->state = state;
674                 state->any.link = slink;
675
676                 /*
677                  * Embedded router structure in link for message forwarding.
678                  *
679                  * The spanning id for the router is the message id of
680                  * the SPAN link it is embedded in, allowing messages to
681                  * be routed via &slink->router.
682                  */
683                 slink->router = hammer2_router_alloc();
684                 slink->router->iocom = state->iocom;
685                 slink->router->link = slink;
686                 slink->router->target = state->msgid;
687                 hammer2_router_connect(slink->router);
688
689                 RB_INSERT(h2span_link_tree, &node->tree, slink);
690
691                 fprintf(stderr, "LNK_SPAN(thr %p): %p %s/%s dist=%d\n",
692                         msg->router->iocom,
693                         slink,
694                         hammer2_uuid_to_str(&msg->any.lnk_span.pfs_clid,
695                                             &alloc),
696                         msg->any.lnk_span.label,
697                         msg->any.lnk_span.dist);
698                 free(alloc);
699 #if 0
700                 hammer2_relay_scan(NULL, node);
701 #endif
702                 hammer2_router_signal(msg->router);
703         }
704
705         /*
706          * On transaction terminate we remove the tracking infrastructure.
707          */
708         if (msg->any.head.cmd & DMSGF_DELETE) {
709                 slink = state->any.link;
710                 assert(slink != NULL);
711                 node = slink->node;
712                 cls = node->cls;
713
714                 fprintf(stderr, "LNK_DELE(thr %p): %p %s/%s dist=%d\n",
715                         msg->router->iocom,
716                         slink,
717                         hammer2_uuid_to_str(&cls->pfs_clid, &alloc),
718                         state->msg->any.lnk_span.label,
719                         state->msg->any.lnk_span.dist);
720                 free(alloc);
721
722                 /*
723                  * Remove the router from consideration
724                  */
725                 hammer2_router_disconnect(&slink->router);
726
727                 /*
728                  * Clean out all relays.  This requires terminating each
729                  * relay transaction.
730                  */
731                 while ((relay = TAILQ_FIRST(&slink->relayq)) != NULL) {
732                         hammer2_relay_delete(relay);
733                 }
734
735                 /*
736                  * Clean out the topology
737                  */
738                 RB_REMOVE(h2span_link_tree, &node->tree, slink);
739                 if (RB_EMPTY(&node->tree)) {
740                         RB_REMOVE(h2span_node_tree, &cls->tree, node);
741                         if (RB_EMPTY(&cls->tree) && cls->refs == 0) {
742                                 RB_REMOVE(h2span_cluster_tree,
743                                           &cluster_tree, cls);
744                                 hammer2_free(cls);
745                         }
746                         node->cls = NULL;
747                         hammer2_free(node);
748                         node = NULL;
749                 }
750                 state->any.link = NULL;
751                 slink->state = NULL;
752                 slink->node = NULL;
753                 hammer2_free(slink);
754
755                 /*
756                  * We have to terminate the transaction
757                  */
758                 hammer2_state_reply(state, 0);
759                 /* state invalid after reply */
760
761                 /*
762                  * If the node still exists issue any required updates.  If
763                  * it doesn't then all related relays have already been
764                  * removed and there's nothing left to do.
765                  */
766 #if 0
767                 if (node)
768                         hammer2_relay_scan(NULL, node);
769 #endif
770                 if (node)
771                         hammer2_router_signal(msg->router);
772         }
773
774         pthread_mutex_unlock(&cluster_mtx);
775 }
776
777 /*
778  * Messages received on relay SPANs.  These are open transactions so it is
779  * in fact possible for the other end to close the transaction.
780  *
781  * XXX MPRACE on state structure
782  */
783 static void
784 hammer2_lnk_relay(hammer2_msg_t *msg)
785 {
786         hammer2_state_t *state = msg->state;
787         h2span_relay_t *relay;
788
789         assert(msg->any.head.cmd & DMSGF_REPLY);
790
791         if (msg->any.head.cmd & DMSGF_DELETE) {
792                 pthread_mutex_lock(&cluster_mtx);
793                 if ((relay = state->any.relay) != NULL) {
794                         hammer2_relay_delete(relay);
795                 } else {
796                         hammer2_state_reply(state, 0);
797                 }
798                 pthread_mutex_unlock(&cluster_mtx);
799         }
800 }
801
802 /*
803  * Update relay transactions for SPANs.
804  *
805  * Called with cluster_mtx held.
806  */
807 static void hammer2_relay_scan_specific(h2span_node_t *node,
808                                         h2span_conn_t *conn);
809
810 static void
811 hammer2_relay_scan(h2span_conn_t *conn, h2span_node_t *node)
812 {
813         h2span_cluster_t *cls;
814
815         if (node) {
816                 /*
817                  * Iterate specific node
818                  */
819                 TAILQ_FOREACH(conn, &connq, entry)
820                         hammer2_relay_scan_specific(node, conn);
821         } else {
822                 /*
823                  * Full iteration.
824                  *
825                  * Iterate cluster ids, nodes, and either a specific connection
826                  * or all connections.
827                  */
828                 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
829                         /*
830                          * Iterate node ids
831                          */
832                         RB_FOREACH(node, h2span_node_tree, &cls->tree) {
833                                 /*
834                                  * Synchronize the node's link (received SPANs)
835                                  * with each connection's relays.
836                                  */
837                                 if (conn) {
838                                         hammer2_relay_scan_specific(node, conn);
839                                 } else {
840                                         TAILQ_FOREACH(conn, &connq, entry) {
841                                             hammer2_relay_scan_specific(node,
842                                                                         conn);
843                                         }
844                                         assert(conn == NULL);
845                                 }
846                         }
847                 }
848         }
849 }
850
851 /*
852  * Update the relay'd SPANs for this (node, conn).
853  *
854  * Iterate links and adjust relays to match.  We only propagate the top link
855  * for now (XXX we want to propagate the top two).
856  *
857  * The hammer2_relay_scan_cmp() function locates the first relay element
858  * for any given node.  The relay elements will be sub-sorted by dist.
859  */
860 struct relay_scan_info {
861         h2span_node_t *node;
862         h2span_relay_t *relay;
863 };
864
865 static int
866 hammer2_relay_scan_cmp(h2span_relay_t *relay, void *arg)
867 {
868         struct relay_scan_info *info = arg;
869
870         if ((intptr_t)relay->link->node < (intptr_t)info->node)
871                 return(-1);
872         if ((intptr_t)relay->link->node > (intptr_t)info->node)
873                 return(1);
874         return(0);
875 }
876
877 static int
878 hammer2_relay_scan_callback(h2span_relay_t *relay, void *arg)
879 {
880         struct relay_scan_info *info = arg;
881
882         info->relay = relay;
883         return(-1);
884 }
885
886 static void
887 hammer2_relay_scan_specific(h2span_node_t *node, h2span_conn_t *conn)
888 {
889         struct relay_scan_info info;
890         h2span_relay_t *relay;
891         h2span_relay_t *next_relay;
892         h2span_link_t *slink;
893         dmsg_lnk_conn_t *lconn;
894         hammer2_msg_t *msg;
895         int count = 2;
896         uint8_t peer_type;
897
898         info.node = node;
899         info.relay = NULL;
900
901         /*
902          * Locate the first related relay for the node on this connection.
903          * relay will be NULL if there were none.
904          */
905         RB_SCAN(h2span_relay_tree, &conn->tree,
906                 hammer2_relay_scan_cmp, hammer2_relay_scan_callback, &info);
907         relay = info.relay;
908         info.relay = NULL;
909         if (relay)
910                 assert(relay->link->node == node);
911
912         if (DebugOpt > 8)
913                 fprintf(stderr, "relay scan for connection %p\n", conn);
914
915         /*
916          * Iterate the node's links (received SPANs) in distance order,
917          * lowest (best) dist first.
918          *
919          * PROPAGATE THE BEST LINKS OVER THE SPECIFIED CONNECTION.
920          *
921          * Track relays while iterating the best links and construct
922          * missing relays when necessary.
923          *
924          * (If some prior better link was removed it would have also
925          *  removed the relay, so the relay can only match exactly or
926          *  be worse).
927          */
928         RB_FOREACH(slink, h2span_link_tree, &node->tree) {
929                 /*
930                  * Match, relay already in-place, get the next
931                  * relay to match against the next slink.
932                  */
933                 if (relay && relay->link == slink) {
934                         relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
935                         if (--count == 0)
936                                 break;
937                         continue;
938                 }
939
940                 /*
941                  * We might want this SLINK, if it passes our filters.
942                  *
943                  * The spanning tree can cause closed loops so we have
944                  * to limit slink->dist.
945                  */
946                 if (slink->dist > HAMMER2_SPAN_MAXDIST)
947                         break;
948
949                 /*
950                  * Don't bother transmitting a LNK_SPAN out the same
951                  * connection it came in on.  Trivial optimization.
952                  */
953                 if (slink->state->iocom == conn->state->iocom)
954                         break;
955
956                 /*
957                  * NOTE ON FILTERS: The protocol spec allows non-requested
958                  * SPANs to be transmitted, the other end is expected to
959                  * leave their transactions open but otherwise ignore them.
960                  *
961                  * Don't bother transmitting if the remote connection
962                  * is not accepting this SPAN's peer_type.
963                  */
964                 peer_type = slink->state->msg->any.lnk_span.peer_type;
965                 lconn = &conn->state->msg->any.lnk_conn;
966                 if (((1LLU << peer_type) & lconn->peer_mask) == 0)
967                         break;
968
969                 /*
970                  * Filter based on pfs_clid or label (XXX).  This typically
971                  * reduces the amount of SPAN traffic that a mount end-point
972                  * sees by only passing along SPANs related to the cluster id
973                  * (that is, it will see all PFS's associated with the
974                  * particular cluster it represents).
975                  */
976                 if (peer_type == lconn->peer_type &&
977                     peer_type == HAMMER2_PEER_HAMMER2) {
978                         if (!uuid_is_nil(&slink->node->cls->pfs_clid, NULL) &&
979                             uuid_compare(&slink->node->cls->pfs_clid,
980                                          &lconn->pfs_clid, NULL) != 0) {
981                                 break;
982                         }
983                 }
984
985                 /*
986                  * Ok, we've accepted this SPAN for relaying.
987                  */
988                 assert(relay == NULL ||
989                        relay->link->node != slink->node ||
990                        relay->link->dist >= slink->dist);
991                 relay = hammer2_alloc(sizeof(*relay));
992                 relay->conn = conn;
993                 relay->link = slink;
994
995                 msg = hammer2_msg_alloc(conn->state->iocom->router, 0,
996                                         DMSG_LNK_SPAN |
997                                         DMSGF_CREATE,
998                                         hammer2_lnk_relay, relay);
999                 relay->state = msg->state;
1000                 relay->router = hammer2_router_alloc();
1001                 relay->router->iocom = relay->state->iocom;
1002                 relay->router->relay = relay;
1003                 relay->router->target = relay->state->msgid;
1004
1005                 msg->any.lnk_span = slink->state->msg->any.lnk_span;
1006                 msg->any.lnk_span.dist = slink->dist + 1;
1007
1008                 hammer2_router_connect(relay->router);
1009
1010                 RB_INSERT(h2span_relay_tree, &conn->tree, relay);
1011                 TAILQ_INSERT_TAIL(&slink->relayq, relay, entry);
1012
1013                 hammer2_msg_write(msg);
1014
1015                 fprintf(stderr,
1016                         "RELAY SPAN %p RELAY %p ON CLS=%p NODE=%p DIST=%d "
1017                         "FD %d state %p\n",
1018                         slink,
1019                         relay,
1020                         node->cls, node, slink->dist,
1021                         conn->state->iocom->sock_fd, relay->state);
1022
1023                 /*
1024                  * Match (created new relay), get the next relay to
1025                  * match against the next slink.
1026                  */
1027                 relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
1028                 if (--count == 0)
1029                         break;
1030         }
1031
1032         /*
1033          * Any remaining relay's belonging to this connection which match
1034          * the node are in excess of the current aggregate spanning state
1035          * and should be removed.
1036          */
1037         while (relay && relay->link->node == node) {
1038                 next_relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
1039                 hammer2_relay_delete(relay);
1040                 relay = next_relay;
1041         }
1042 }
1043
1044 static
1045 void
1046 hammer2_relay_delete(h2span_relay_t *relay)
1047 {
1048         fprintf(stderr,
1049                 "RELAY DELETE %p RELAY %p ON CLS=%p NODE=%p DIST=%d FD %d STATE %p\n",
1050                 relay->link,
1051                 relay,
1052                 relay->link->node->cls, relay->link->node,
1053                 relay->link->dist,
1054                 relay->conn->state->iocom->sock_fd, relay->state);
1055
1056         hammer2_router_disconnect(&relay->router);
1057
1058         RB_REMOVE(h2span_relay_tree, &relay->conn->tree, relay);
1059         TAILQ_REMOVE(&relay->link->relayq, relay, entry);
1060
1061         if (relay->state) {
1062                 relay->state->any.relay = NULL;
1063                 hammer2_state_reply(relay->state, 0);
1064                 /* state invalid after reply */
1065                 relay->state = NULL;
1066         }
1067         relay->conn = NULL;
1068         relay->link = NULL;
1069         hammer2_free(relay);
1070 }
1071
1072 static void *
1073 hammer2_volconf_thread(void *info)
1074 {
1075         h2span_media_config_t *conf = info;
1076
1077         pthread_mutex_lock(&cluster_mtx);
1078         while ((conf->ctl & H2CONFCTL_STOP) == 0) {
1079                 if (conf->ctl & H2CONFCTL_UPDATE) {
1080                         fprintf(stderr, "VOLCONF UPDATE\n");
1081                         conf->ctl &= ~H2CONFCTL_UPDATE;
1082                         if (bcmp(&conf->copy_run, &conf->copy_pend,
1083                                  sizeof(conf->copy_run)) == 0) {
1084                                 fprintf(stderr, "VOLCONF: no changes\n");
1085                                 continue;
1086                         }
1087                         /*
1088                          * XXX TODO - auto reconnect on lookup failure or
1089                          *              connect failure or stream failure.
1090                          */
1091
1092                         pthread_mutex_unlock(&cluster_mtx);
1093                         hammer2_volconf_stop(conf);
1094                         conf->copy_run = conf->copy_pend;
1095                         if (conf->copy_run.copyid != 0 &&
1096                             strncmp(conf->copy_run.path, "span:", 5) == 0) {
1097                                 hammer2_volconf_start(conf,
1098                                                       conf->copy_run.path + 5);
1099                         }
1100                         pthread_mutex_lock(&cluster_mtx);
1101                         fprintf(stderr, "VOLCONF UPDATE DONE state %d\n", conf->state);
1102                 }
1103                 if (conf->state == H2MC_CONNECT) {
1104                         hammer2_volconf_start(conf, conf->copy_run.path + 5);
1105                         pthread_mutex_unlock(&cluster_mtx);
1106                         sleep(5);
1107                         pthread_mutex_lock(&cluster_mtx);
1108                 } else {
1109                         pthread_cond_wait(&conf->cond, &cluster_mtx);
1110                 }
1111         }
1112         pthread_mutex_unlock(&cluster_mtx);
1113         hammer2_volconf_stop(conf);
1114         return(NULL);
1115 }
1116
1117 static
1118 void
1119 hammer2_volconf_stop(h2span_media_config_t *conf)
1120 {
1121         switch(conf->state) {
1122         case H2MC_STOPPED:
1123                 break;
1124         case H2MC_CONNECT:
1125                 conf->state = H2MC_STOPPED;
1126                 break;
1127         case H2MC_RUNNING:
1128                 shutdown(conf->fd, SHUT_WR);
1129                 pthread_join(conf->iocom_thread, NULL);
1130                 conf->iocom_thread = NULL;
1131                 break;
1132         }
1133 }
1134
1135 static
1136 void
1137 hammer2_volconf_start(h2span_media_config_t *conf, const char *hostname)
1138 {
1139         hammer2_master_service_info_t *info;
1140
1141         switch(conf->state) {
1142         case H2MC_STOPPED:
1143         case H2MC_CONNECT:
1144                 conf->fd = hammer2_connect(hostname);
1145                 if (conf->fd < 0) {
1146                         fprintf(stderr, "Unable to connect to %s\n", hostname);
1147                         conf->state = H2MC_CONNECT;
1148                 } else {
1149                         info = malloc(sizeof(*info));
1150                         bzero(info, sizeof(*info));
1151                         info->fd = conf->fd;
1152                         info->detachme = 0;
1153                         conf->state = H2MC_RUNNING;
1154                         pthread_create(&conf->iocom_thread, NULL,
1155                                        master_service, info);
1156                 }
1157                 break;
1158         case H2MC_RUNNING:
1159                 break;
1160         }
1161 }
1162
1163 /************************************************************************
1164  *                      ROUTER AND MESSAGING HANDLES                    *
1165  ************************************************************************
1166  *
1167  * Basically the idea here is to provide a stable data structure which
1168  * can be localized to the caller for higher level protocols to work with.
1169  * Depends on the context, these hammer2_handle's can be pooled by use-case
1170  * and remain persistent through a client (or mount point's) life.
1171  */
1172
1173 #if 0
1174 /*
1175  * Obtain a stable handle on a cluster given its uuid.  This ties directly
1176  * into the global cluster topology, creating the structure if necessary
1177  * (even if the uuid does not exist or does not exist yet), and preventing
1178  * the structure from getting ripped out from under us while we hold a
1179  * pointer to it.
1180  */
1181 h2span_cluster_t *
1182 hammer2_cluster_get(uuid_t *pfs_clid)
1183 {
1184         h2span_cluster_t dummy_cls;
1185         h2span_cluster_t *cls;
1186
1187         dummy_cls.pfs_clid = *pfs_clid;
1188         pthread_mutex_lock(&cluster_mtx);
1189         cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls);
1190         if (cls)
1191                 ++cls->refs;
1192         pthread_mutex_unlock(&cluster_mtx);
1193         return (cls);
1194 }
1195
1196 void
1197 hammer2_cluster_put(h2span_cluster_t *cls)
1198 {
1199         pthread_mutex_lock(&cluster_mtx);
1200         assert(cls->refs > 0);
1201         --cls->refs;
1202         if (RB_EMPTY(&cls->tree) && cls->refs == 0) {
1203                 RB_REMOVE(h2span_cluster_tree,
1204                           &cluster_tree, cls);
1205                 hammer2_free(cls);
1206         }
1207         pthread_mutex_unlock(&cluster_mtx);
1208 }
1209
1210 /*
1211  * Obtain a stable handle to a specific cluster node given its uuid.
1212  * This handle does NOT lock in the route to the node and is typically
1213  * used as part of the hammer2_handle_*() API to obtain a set of
1214  * stable nodes.
1215  */
1216 h2span_node_t *
1217 hammer2_node_get(h2span_cluster_t *cls, uuid_t *pfs_fsid)
1218 {
1219 }
1220
1221 #endif
1222
1223 #if 0
1224 /*
1225  * Acquire a persistent router structure given the cluster and node ids.
1226  * Messages can be transacted via this structure while held.  If the route
1227  * is lost messages will return failure.
1228  */
1229 hammer2_router_t *
1230 hammer2_router_get(uuid_t *pfs_clid, uuid_t *pfs_fsid)
1231 {
1232 }
1233
1234 /*
1235  * Release previously acquired router.
1236  */
1237 void
1238 hammer2_router_put(hammer2_router_t *router)
1239 {
1240 }
1241 #endif
1242
1243 /************************************************************************
1244  *                              DEBUGGER                                *
1245  ************************************************************************/
1246 /*
1247  * Dumps the spanning tree
1248  */
1249 void
1250 shell_tree(hammer2_router_t *router, char *cmdbuf __unused)
1251 {
1252         h2span_cluster_t *cls;
1253         h2span_node_t *node;
1254         h2span_link_t *slink;
1255         char *uustr = NULL;
1256
1257         pthread_mutex_lock(&cluster_mtx);
1258         RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
1259                 router_printf(router, "Cluster %s\n",
1260                              hammer2_uuid_to_str(&cls->pfs_clid, &uustr));
1261                 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
1262                         router_printf(router, "    Node %s (%s)\n",
1263                                  hammer2_uuid_to_str(&node->pfs_fsid, &uustr),
1264                                  node->label);
1265                         RB_FOREACH(slink, h2span_link_tree, &node->tree) {
1266                                 router_printf(router, "\tLink dist=%d via %d\n",
1267                                              slink->dist,
1268                                              slink->state->iocom->sock_fd);
1269                         }
1270                 }
1271         }
1272         pthread_mutex_unlock(&cluster_mtx);
1273         if (uustr)
1274                 free(uustr);
1275 #if 0
1276         TAILQ_FOREACH(conn, &connq, entry) {
1277         }
1278 #endif
1279 }