hammer2 - Config notifications, cleanup HAMMER2 VFS API
[dragonfly.git] / sbin / hammer2 / msg_lnk.c
1 /*
2  * Copyright (c) 2012 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@dragonflybsd.org>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * LNK_SPAN PROTOCOL SUPPORT FUNCTIONS
36  *
37  * This code supports the LNK_SPAN protocol.  Essentially all PFS's
38  * clients and services rendezvous with the userland hammer2 service and
39  * open LNK_SPAN transactions using a message header linkid of 0,
40  * registering any PFS's they have connectivity to with us.
41  *
42  * --
43  *
44  * Each registration maintains its own open LNK_SPAN message transaction.
45  * The SPANs are collected, aggregated, and retransmitted over available
46  * connections through the maintainance of additional LNK_SPAN message
47  * transactions on each link.
48  *
49  * The msgid for each active LNK_SPAN transaction we receive allows us to
50  * send a message to the target PFS (which might be one of many belonging
51  * to the same cluster), by specifying that msgid as the linkid in any
52  * message we send to the target PFS.
53  *
54  * Similarly the msgid we allocate for any LNK_SPAN transaction we transmit
55  * (and remember we will maintain multiple open LNK_SPAN transactions on
56  * each connection representing the topology span, so every node sees every
57  * other node as a separate open transaction).  So, similarly the msgid for
58  * these active transactions which we initiated can be used by the other
59  * end to route messages through us to another node, ultimately winding up
60  * at the identified hammer2 PFS.  We have to adjust the spanid in the message
61  * header at each hop to be representative of the outgoing LNK_SPAN we
62  * are forwarding the message through.
63  *
64  * --
65  *
66  * If we were to retransmit every LNK_SPAN transaction we receive it would
67  * create a huge mess, so we have to aggregate all received LNK_SPAN
68  * transactions, sort them by the fsid (the cluster) and sub-sort them by
69  * the pfs_fsid (individual nodes in the cluster), and only retransmit
70  * (create outgoing transactions) for a subset of the nearest distance-hops
71  * for each individual node.
72  *
73  * The higher level protocols can then issue transactions to the nodes making
74  * up a cluster to perform all actions required.
75  *
76  * --
77  *
78  * Since this is a large topology and a spanning tree protocol, links can
79  * go up and down all the time.  Any time a link goes down its transaction
80  * is closed.  The transaction has to be closed on both ends before we can
81  * delete (and potentially reuse) the related spanid.  The LNK_SPAN being
82  * closed may have been propagated out to other connections and those related
83  * LNK_SPANs are also closed.  Ultimately all routes via the lost LNK_SPAN
84  * go away, ultimately reaching all sources and all targets.
85  *
86  * Any messages in-transit using a route that goes away will be thrown away.
87  * Open transactions are only tracked at the two end-points.  When a link
88  * failure propagates to an end-point the related open transactions lose
89  * their spanid and are automatically aborted.
90  *
91  * It is important to note that internal route nodes cannot just associate
92  * a lost LNK_SPAN transaction with another route to the same destination.
93  * Message transactions MUST be serialized and MUST be ordered.  All messages
94  * for a transaction must run over the same route.  So if the route used by
95  * an active transaction is lost, the related messages will be fully aborted
96  * and the higher protocol levels will retry as appropriate.
97  *
98  * FULLY ABORTING A ROUTED MESSAGE is handled via link-failure propagation
99  * back to the originator.  Only the originator keeps tracks of a message.
100  * Routers just pass it through.  If a route is lost during transit the
101  * message is simply thrown away.
102  *
103  * It is also important to note that several paths to the same PFS can be
104  * propagated along the same link, which allows concurrency and even
105  * redundancy over several network interfaces or via different routes through
106  * the topology.  Any given transaction will use only a single route but busy
107  * servers will often have hundreds of transactions active simultaniously,
108  * so having multiple active paths through the network topology for A<->B
109  * will improve performance.
110  *
111  * --
112  *
113  * Most protocols consolidate operations rather than simply relaying them.
114  * This is particularly true of LEAF protocols (such as strict HAMMER2
115  * clients), of which there can be millions connecting into the cluster at
116  * various points.  The SPAN protocol is not used for these LEAF elements.
117  *
118  * Instead the primary service they connect to implements a proxy for the
119  * client protocols so the core topology only has to propagate a couple of
120  * LNK_SPANs and not millions.  LNK_SPANs are meant to be used only for
121  * core master nodes and satellite slaves and cache nodes.
122  */
123
124 #include "hammer2.h"
125
126 /*
127  * Maximum spanning tree distance.  This has the practical effect of
128  * stopping tail-chasing closed loops when a feeder span is lost.
129  */
130 #define HAMMER2_SPAN_MAXDIST    16
131
132 /*
133  * RED-BLACK TREE DEFINITIONS
134  *
135  * We need to track:
136  *
137  * (1) shared fsid's (a cluster).
138  * (2) unique fsid's (a node in a cluster) <--- LNK_SPAN transactions.
139  *
140  * We need to aggegate all active LNK_SPANs, aggregate, and create our own
141  * outgoing LNK_SPAN transactions on each of our connections representing
142  * the aggregated state.
143  *
144  * h2span_connect       - list of iocom connections who wish to receive SPAN
145  *                        propagation from other connections.  Might contain
146  *                        a filter string.  Only iocom's with an open
147  *                        LNK_CONN transactions are applicable for SPAN
148  *                        propagation.
149  *
150  * h2span_relay         - List of links relayed (via SPAN).  Essentially
151  *                        each relay structure represents a LNK_SPAN
152  *                        transaction that we initiated, verses h2span_link
153  *                        which is a LNK_SPAN transaction that we received.
154  *
155  * --
156  *
157  * h2span_cluster       - Organizes the shared fsid's.  One structure for
158  *                        each cluster.
159  *
160  * h2span_node          - Organizes the nodes in a cluster.  One structure
161  *                        for each unique {cluster,node}, aka {fsid, pfs_fsid}.
162  *
163  * h2span_link          - Organizes all incoming and outgoing LNK_SPAN message
164  *                        transactions related to a node.
165  *
166  *                        One h2span_link structure for each incoming LNK_SPAN
167  *                        transaction.  Links selected for propagation back
168  *                        out are also where the outgoing LNK_SPAN messages
169  *                        are indexed into (so we can propagate changes).
170  *
171  *                        The h2span_link's use a red-black tree to sort the
172  *                        distance hop metric for the incoming LNK_SPAN.  We
173  *                        then select the top N for outgoing.  When the
174  *                        topology changes the top N may also change and cause
175  *                        new outgoing LNK_SPAN transactions to be opened
176  *                        and less desireable ones to be closed, causing
177  *                        transactional aborts within the message flow in
178  *                        the process.
179  *
180  * Also note            - All outgoing LNK_SPAN message transactions are also
181  *                        entered into a red-black tree for use by the routing
182  *                        function.  This is handled by msg.c in the state
183  *                        code, not here.
184  */
185
186 struct h2span_link;
187 struct h2span_relay;
188 TAILQ_HEAD(h2span_media_queue, h2span_media);
189 TAILQ_HEAD(h2span_connect_queue, h2span_connect);
190 TAILQ_HEAD(h2span_relay_queue, h2span_relay);
191
192 RB_HEAD(h2span_cluster_tree, h2span_cluster);
193 RB_HEAD(h2span_node_tree, h2span_node);
194 RB_HEAD(h2span_link_tree, h2span_link);
195 RB_HEAD(h2span_relay_tree, h2span_relay);
196
197 /*
198  * This represents a media
199  */
200 struct h2span_media {
201         TAILQ_ENTRY(h2span_media) entry;
202         uuid_t  mediaid;
203         int     refs;
204         struct h2span_media_config {
205                 hammer2_copy_data_t     copy_run;
206                 hammer2_copy_data_t     copy_pend;
207                 pthread_t               thread;
208                 pthread_cond_t          cond;
209                 int                     ctl;
210                 int                     fd;
211                 hammer2_iocom_t         iocom;
212                 pthread_t               iocom_thread;
213                 enum { H2MC_STOPPED, H2MC_CONNECT, H2MC_RUNNING } state;
214         } config[HAMMER2_COPYID_COUNT];
215 };
216
217 typedef struct h2span_media_config h2span_media_config_t;
218
219 #define H2CONFCTL_STOP          0x00000001
220 #define H2CONFCTL_UPDATE        0x00000002
221
222 /*
223  * Received LNK_CONN transaction enables SPAN protocol over connection.
224  * (may contain filter).  Typically one for each mount and several may
225  * share the same media.
226  */
227 struct h2span_connect {
228         TAILQ_ENTRY(h2span_connect) entry;
229         struct h2span_relay_tree tree;
230         struct h2span_media *media;
231         hammer2_state_t *state;
232 };
233
234 /*
235  * All received LNK_SPANs are organized by cluster (pfs_clid),
236  * node (pfs_fsid), and link (received LNK_SPAN transaction).
237  */
238 struct h2span_cluster {
239         RB_ENTRY(h2span_cluster) rbnode;
240         struct h2span_node_tree tree;
241         uuid_t  pfs_clid;               /* shared fsid */
242         int     refs;                   /* prevents destruction */
243 };
244
245 struct h2span_node {
246         RB_ENTRY(h2span_node) rbnode;
247         struct h2span_link_tree tree;
248         struct h2span_cluster *cls;
249         uuid_t  pfs_fsid;               /* unique fsid */
250         char label[64];
251 };
252
253 struct h2span_link {
254         RB_ENTRY(h2span_link) rbnode;
255         hammer2_state_t *state;         /* state<->link */
256         struct h2span_node *node;       /* related node */
257         int32_t dist;
258         struct h2span_relay_queue relayq; /* relay out */
259         struct hammer2_router *router;  /* route out this link */
260 };
261
262 /*
263  * Any LNK_SPAN transactions we receive which are relayed out other
264  * connections utilize this structure to track the LNK_SPAN transaction
265  * we initiate on the other connections, if selected for relay.
266  *
267  * In many respects this is the core of the protocol... actually figuring
268  * out what LNK_SPANs to relay.  The spanid used for relaying is the
269  * address of the 'state' structure, which is why h2span_relay has to
270  * be entered into a RB-TREE based at h2span_connect (so we can look
271  * up the spanid to validate it).
272  *
273  * NOTE: Messages can be received via the LNK_SPAN transaction the
274  *       relay maintains, and can be replied via relay->router, but
275  *       messages are NOT initiated via a relay.  Messages are initiated
276  *       via incoming links (h2span_link's).
277  *
278  *       relay->link represents the link being relayed, NOT the LNK_SPAN
279  *       transaction the relay is holding open.
280  */
281 struct h2span_relay {
282         RB_ENTRY(h2span_relay) rbnode;  /* from h2span_connect */
283         TAILQ_ENTRY(h2span_relay) entry; /* from link */
284         struct h2span_connect *conn;
285         hammer2_state_t *state;         /* transmitted LNK_SPAN */
286         struct h2span_link *link;       /* LNK_SPAN being relayed */
287         struct hammer2_router   *router;/* route out this relay */
288 };
289
290
291 typedef struct h2span_media h2span_media_t;
292 typedef struct h2span_connect h2span_connect_t;
293 typedef struct h2span_cluster h2span_cluster_t;
294 typedef struct h2span_node h2span_node_t;
295 typedef struct h2span_link h2span_link_t;
296 typedef struct h2span_relay h2span_relay_t;
297
298 static
299 int
300 h2span_cluster_cmp(h2span_cluster_t *cls1, h2span_cluster_t *cls2)
301 {
302         return(uuid_compare(&cls1->pfs_clid, &cls2->pfs_clid, NULL));
303 }
304
305 static
306 int
307 h2span_node_cmp(h2span_node_t *node1, h2span_node_t *node2)
308 {
309         return(uuid_compare(&node1->pfs_fsid, &node2->pfs_fsid, NULL));
310 }
311
312 /*
313  * Sort/subsort must match h2span_relay_cmp() under any given node
314  * to make the aggregation algorithm easier, so the best links are
315  * in the same sorted order as the best relays.
316  *
317  * NOTE: We cannot use link*->state->msgid because this msgid is created
318  *       by each remote host and thus might wind up being the same.
319  */
320 static
321 int
322 h2span_link_cmp(h2span_link_t *link1, h2span_link_t *link2)
323 {
324         if (link1->dist < link2->dist)
325                 return(-1);
326         if (link1->dist > link2->dist)
327                 return(1);
328 #if 1
329         if ((uintptr_t)link1->state < (uintptr_t)link2->state)
330                 return(-1);
331         if ((uintptr_t)link1->state > (uintptr_t)link2->state)
332                 return(1);
333 #else
334         if (link1->state->msgid < link2->state->msgid)
335                 return(-1);
336         if (link1->state->msgid > link2->state->msgid)
337                 return(1);
338 #endif
339         return(0);
340 }
341
342 /*
343  * Relay entries are sorted by node, subsorted by distance and link
344  * address (so we can match up the conn->tree relay topology with
345  * a node's link topology).
346  */
347 static
348 int
349 h2span_relay_cmp(h2span_relay_t *relay1, h2span_relay_t *relay2)
350 {
351         h2span_link_t *link1 = relay1->link;
352         h2span_link_t *link2 = relay2->link;
353
354         if ((intptr_t)link1->node < (intptr_t)link2->node)
355                 return(-1);
356         if ((intptr_t)link1->node > (intptr_t)link2->node)
357                 return(1);
358         if (link1->dist < link2->dist)
359                 return(-1);
360         if (link1->dist > link2->dist)
361                 return(1);
362 #if 1
363         if ((uintptr_t)link1->state < (uintptr_t)link2->state)
364                 return(-1);
365         if ((uintptr_t)link1->state > (uintptr_t)link2->state)
366                 return(1);
367 #else
368         if (link1->state->msgid < link2->state->msgid)
369                 return(-1);
370         if (link1->state->msgid > link2->state->msgid)
371                 return(1);
372 #endif
373         return(0);
374 }
375
376 RB_PROTOTYPE_STATIC(h2span_cluster_tree, h2span_cluster,
377              rbnode, h2span_cluster_cmp);
378 RB_PROTOTYPE_STATIC(h2span_node_tree, h2span_node,
379              rbnode, h2span_node_cmp);
380 RB_PROTOTYPE_STATIC(h2span_link_tree, h2span_link,
381              rbnode, h2span_link_cmp);
382 RB_PROTOTYPE_STATIC(h2span_relay_tree, h2span_relay,
383              rbnode, h2span_relay_cmp);
384
385 RB_GENERATE_STATIC(h2span_cluster_tree, h2span_cluster,
386              rbnode, h2span_cluster_cmp);
387 RB_GENERATE_STATIC(h2span_node_tree, h2span_node,
388              rbnode, h2span_node_cmp);
389 RB_GENERATE_STATIC(h2span_link_tree, h2span_link,
390              rbnode, h2span_link_cmp);
391 RB_GENERATE_STATIC(h2span_relay_tree, h2span_relay,
392              rbnode, h2span_relay_cmp);
393
394 /*
395  * Global mutex protects cluster_tree lookups, connq, mediaq.
396  */
397 static pthread_mutex_t cluster_mtx;
398 static struct h2span_cluster_tree cluster_tree = RB_INITIALIZER(cluster_tree);
399 static struct h2span_connect_queue connq = TAILQ_HEAD_INITIALIZER(connq);
400 static struct h2span_media_queue mediaq = TAILQ_HEAD_INITIALIZER(mediaq);
401
402 static void hammer2_lnk_span(hammer2_msg_t *msg);
403 static void hammer2_lnk_conn(hammer2_msg_t *msg);
404 static void hammer2_lnk_relay(hammer2_msg_t *msg);
405 static void hammer2_relay_scan(h2span_connect_t *conn, h2span_node_t *node);
406 static void hammer2_relay_delete(h2span_relay_t *relay);
407
408 static void *hammer2_volconf_thread(void *info);
409 static void hammer2_volconf_stop(h2span_media_config_t *conf);
410 static void hammer2_volconf_start(h2span_media_config_t *conf,
411                                 const char *hostname);
412
413 void
414 hammer2_msg_lnk_signal(hammer2_router_t *router __unused)
415 {
416         pthread_mutex_lock(&cluster_mtx);
417         hammer2_relay_scan(NULL, NULL);
418         pthread_mutex_unlock(&cluster_mtx);
419 }
420
421 /*
422  * Receive a HAMMER2_MSG_PROTO_LNK message.  This only called for
423  * one-way and opening-transactions since state->func will be assigned
424  * in all other cases.
425  */
426 void
427 hammer2_msg_lnk(hammer2_msg_t *msg)
428 {
429         switch(msg->any.head.cmd & HAMMER2_MSGF_BASECMDMASK) {
430         case HAMMER2_LNK_CONN:
431                 hammer2_lnk_conn(msg);
432                 break;
433         case HAMMER2_LNK_SPAN:
434                 hammer2_lnk_span(msg);
435                 break;
436         default:
437                 fprintf(stderr,
438                         "MSG_PROTO_LNK: Unknown msg %08x\n", msg->any.head.cmd);
439                 hammer2_msg_reply(msg, HAMMER2_MSG_ERR_NOSUPP);
440                 /* state invalid after reply */
441                 break;
442         }
443 }
444
445 void
446 hammer2_lnk_conn(hammer2_msg_t *msg)
447 {
448         hammer2_state_t *state = msg->state;
449         h2span_media_t *media;
450         h2span_media_config_t *conf;
451         h2span_connect_t *conn;
452         h2span_relay_t *relay;
453         char *alloc = NULL;
454         int i;
455
456         pthread_mutex_lock(&cluster_mtx);
457
458         switch(msg->any.head.cmd & HAMMER2_MSGF_TRANSMASK) {
459         case HAMMER2_LNK_CONN | HAMMER2_MSGF_CREATE:
460         case HAMMER2_LNK_CONN | HAMMER2_MSGF_CREATE | HAMMER2_MSGF_DELETE:
461                 /*
462                  * On transaction start we allocate a new h2span_connect and
463                  * acknowledge the request, leaving the transaction open.
464                  * We then relay priority-selected SPANs.
465                  */
466                 fprintf(stderr, "LNK_CONN(%08x): %s/%s\n",
467                         (uint32_t)msg->any.head.msgid,
468                         hammer2_uuid_to_str(&msg->any.lnk_conn.pfs_clid,
469                                             &alloc),
470                         msg->any.lnk_conn.label);
471                 free(alloc);
472
473                 conn = hammer2_alloc(sizeof(*conn));
474
475                 RB_INIT(&conn->tree);
476                 conn->state = state;
477                 state->func = hammer2_lnk_conn;
478                 state->any.conn = conn;
479                 TAILQ_INSERT_TAIL(&connq, conn, entry);
480
481                 /*
482                  * Set up media
483                  */
484                 TAILQ_FOREACH(media, &mediaq, entry) {
485                         if (uuid_compare(&msg->any.lnk_conn.mediaid,
486                                          &media->mediaid, NULL) == 0) {
487                                 break;
488                         }
489                 }
490                 if (media == NULL) {
491                         media = hammer2_alloc(sizeof(*media));
492                         media->mediaid = msg->any.lnk_conn.mediaid;
493                         TAILQ_INSERT_TAIL(&mediaq, media, entry);
494                 }
495                 conn->media = media;
496                 ++media->refs;
497
498                 if ((msg->any.head.cmd & HAMMER2_MSGF_DELETE) == 0) {
499                         hammer2_msg_result(msg, 0);
500                         hammer2_router_signal(msg->router);
501                         break;
502                 }
503                 /* FALL THROUGH */
504         case HAMMER2_LNK_CONN | HAMMER2_MSGF_DELETE:
505         case HAMMER2_LNK_ERROR | HAMMER2_MSGF_DELETE:
506 deleteconn:
507                 /*
508                  * On transaction terminate we clean out our h2span_connect
509                  * and acknowledge the request, closing the transaction.
510                  */
511                 fprintf(stderr, "LNK_CONN: Terminated\n");
512                 conn = state->any.conn;
513                 assert(conn);
514
515                 /*
516                  * Clean out the media structure. If refs drops to zero we
517                  * also clean out the media config threads.  These threads
518                  * maintain span connections to other hammer2 service daemons.
519                  */
520                 media = conn->media;
521                 if (--media->refs == 0) {
522                         fprintf(stderr, "Shutting down media spans\n");
523                         for (i = 0; i < HAMMER2_COPYID_COUNT; ++i) {
524                                 conf = &media->config[i];
525
526                                 if (conf->thread == NULL)
527                                         continue;
528                                 conf->ctl = H2CONFCTL_STOP;
529                                 pthread_cond_signal(&conf->cond);
530                         }
531                         for (i = 0; i < HAMMER2_COPYID_COUNT; ++i) {
532                                 conf = &media->config[i];
533
534                                 if (conf->thread == NULL)
535                                         continue;
536                                 pthread_mutex_unlock(&cluster_mtx);
537                                 pthread_join(conf->thread, NULL);
538                                 pthread_mutex_lock(&cluster_mtx);
539                                 conf->thread = NULL;
540                                 pthread_cond_destroy(&conf->cond);
541                         }
542                         fprintf(stderr, "Media shutdown complete\n");
543                         TAILQ_REMOVE(&mediaq, media, entry);
544                         hammer2_free(media);
545                 }
546
547                 /*
548                  * Clean out all relays.  This requires terminating each
549                  * relay transaction.
550                  */
551                 while ((relay = RB_ROOT(&conn->tree)) != NULL) {
552                         hammer2_relay_delete(relay);
553                 }
554
555                 /*
556                  * Clean out conn
557                  */
558                 conn->media = NULL;
559                 conn->state = NULL;
560                 msg->state->any.conn = NULL;
561                 TAILQ_REMOVE(&connq, conn, entry);
562                 hammer2_free(conn);
563
564                 hammer2_msg_reply(msg, 0);
565                 /* state invalid after reply */
566                 break;
567         case HAMMER2_LNK_VOLCONF:
568                 /*
569                  * One-way volume-configuration message is transmitted
570                  * over the open LNK_CONN transaction.
571                  */
572                 fprintf(stderr, "RECEIVED VOLCONF\n");
573                 if (msg->any.lnk_volconf.index < 0 ||
574                     msg->any.lnk_volconf.index >= HAMMER2_COPYID_COUNT) {
575                         fprintf(stderr, "VOLCONF: ILLEGAL INDEX %d\n",
576                                 msg->any.lnk_volconf.index);
577                         break;
578                 }
579                 if (msg->any.lnk_volconf.copy.path[sizeof(msg->any.lnk_volconf.copy.path) - 1] != 0 ||
580                     msg->any.lnk_volconf.copy.path[0] == 0) {
581                         fprintf(stderr, "VOLCONF: ILLEGAL PATH %d\n",
582                                 msg->any.lnk_volconf.index);
583                         break;
584                 }
585                 conn = msg->state->any.conn;
586                 if (conn == NULL) {
587                         fprintf(stderr, "VOLCONF: LNK_CONN is missing\n");
588                         break;
589                 }
590                 conf = &conn->media->config[msg->any.lnk_volconf.index];
591                 conf->copy_pend = msg->any.lnk_volconf.copy;
592                 conf->ctl |= H2CONFCTL_UPDATE;
593                 if (conf->thread == NULL) {
594                         fprintf(stderr, "VOLCONF THREAD STARTED\n");
595                         pthread_cond_init(&conf->cond, NULL);
596                         pthread_create(&conf->thread, NULL,
597                                        hammer2_volconf_thread, (void *)conf);
598                 }
599                 pthread_cond_signal(&conf->cond);
600                 break;
601         default:
602                 /*
603                  * Failsafe
604                  */
605                 if (msg->any.head.cmd & HAMMER2_MSGF_DELETE)
606                         goto deleteconn;
607                 hammer2_msg_reply(msg, HAMMER2_MSG_ERR_NOSUPP);
608                 break;
609         }
610         pthread_mutex_unlock(&cluster_mtx);
611 }
612
613 void
614 hammer2_lnk_span(hammer2_msg_t *msg)
615 {
616         hammer2_state_t *state = msg->state;
617         h2span_cluster_t dummy_cls;
618         h2span_node_t dummy_node;
619         h2span_cluster_t *cls;
620         h2span_node_t *node;
621         h2span_link_t *slink;
622         h2span_relay_t *relay;
623         char *alloc = NULL;
624
625         assert((msg->any.head.cmd & HAMMER2_MSGF_REPLY) == 0);
626
627         pthread_mutex_lock(&cluster_mtx);
628
629         /*
630          * On transaction start we initialize the tracking infrastructure
631          */
632         if (msg->any.head.cmd & HAMMER2_MSGF_CREATE) {
633                 assert(state->func == NULL);
634                 state->func = hammer2_lnk_span;
635
636                 msg->any.lnk_span.label[sizeof(msg->any.lnk_span.label)-1] = 0;
637
638                 /*
639                  * Find the cluster
640                  */
641                 dummy_cls.pfs_clid = msg->any.lnk_span.pfs_clid;
642                 cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls);
643                 if (cls == NULL) {
644                         cls = hammer2_alloc(sizeof(*cls));
645                         cls->pfs_clid = msg->any.lnk_span.pfs_clid;
646                         RB_INIT(&cls->tree);
647                         RB_INSERT(h2span_cluster_tree, &cluster_tree, cls);
648                 }
649
650                 /*
651                  * Find the node
652                  */
653                 dummy_node.pfs_fsid = msg->any.lnk_span.pfs_fsid;
654                 node = RB_FIND(h2span_node_tree, &cls->tree, &dummy_node);
655                 if (node == NULL) {
656                         node = hammer2_alloc(sizeof(*node));
657                         node->pfs_fsid = msg->any.lnk_span.pfs_fsid;
658                         node->cls = cls;
659                         RB_INIT(&node->tree);
660                         RB_INSERT(h2span_node_tree, &cls->tree, node);
661                         snprintf(node->label, sizeof(node->label),
662                                  "%s", msg->any.lnk_span.label);
663                 }
664
665                 /*
666                  * Create the link
667                  */
668                 assert(state->any.link == NULL);
669                 slink = hammer2_alloc(sizeof(*slink));
670                 TAILQ_INIT(&slink->relayq);
671                 slink->node = node;
672                 slink->dist = msg->any.lnk_span.dist;
673                 slink->state = state;
674                 state->any.link = slink;
675
676                 /*
677                  * Embedded router structure in link for message forwarding.
678                  *
679                  * The spanning id for the router is the message id of
680                  * the SPAN link it is embedded in, allowing messages to
681                  * be routed via &slink->router.
682                  */
683                 slink->router = hammer2_router_alloc();
684                 slink->router->iocom = state->iocom;
685                 slink->router->link = slink;
686                 slink->router->target = state->msgid;
687                 hammer2_router_connect(slink->router);
688
689                 RB_INSERT(h2span_link_tree, &node->tree, slink);
690
691                 fprintf(stderr, "LNK_SPAN(thr %p): %p %s/%s dist=%d\n",
692                         msg->router->iocom,
693                         slink,
694                         hammer2_uuid_to_str(&msg->any.lnk_span.pfs_clid,
695                                             &alloc),
696                         msg->any.lnk_span.label,
697                         msg->any.lnk_span.dist);
698                 free(alloc);
699 #if 0
700                 hammer2_relay_scan(NULL, node);
701 #endif
702                 hammer2_router_signal(msg->router);
703         }
704
705         /*
706          * On transaction terminate we remove the tracking infrastructure.
707          */
708         if (msg->any.head.cmd & HAMMER2_MSGF_DELETE) {
709                 slink = state->any.link;
710                 assert(slink != NULL);
711                 node = slink->node;
712                 cls = node->cls;
713
714                 fprintf(stderr, "LNK_DELE(thr %p): %p %s/%s dist=%d\n",
715                         msg->router->iocom,
716                         slink,
717                         hammer2_uuid_to_str(&cls->pfs_clid, &alloc),
718                         state->msg->any.lnk_span.label,
719                         state->msg->any.lnk_span.dist);
720                 free(alloc);
721
722                 /*
723                  * Remove the router from consideration
724                  */
725                 hammer2_router_disconnect(&slink->router);
726
727                 /*
728                  * Clean out all relays.  This requires terminating each
729                  * relay transaction.
730                  */
731                 while ((relay = TAILQ_FIRST(&slink->relayq)) != NULL) {
732                         hammer2_relay_delete(relay);
733                 }
734
735                 /*
736                  * Clean out the topology
737                  */
738                 RB_REMOVE(h2span_link_tree, &node->tree, slink);
739                 if (RB_EMPTY(&node->tree)) {
740                         RB_REMOVE(h2span_node_tree, &cls->tree, node);
741                         if (RB_EMPTY(&cls->tree) && cls->refs == 0) {
742                                 RB_REMOVE(h2span_cluster_tree,
743                                           &cluster_tree, cls);
744                                 hammer2_free(cls);
745                         }
746                         node->cls = NULL;
747                         hammer2_free(node);
748                         node = NULL;
749                 }
750                 state->any.link = NULL;
751                 slink->state = NULL;
752                 slink->node = NULL;
753                 hammer2_free(slink);
754
755                 /*
756                  * We have to terminate the transaction
757                  */
758                 hammer2_state_reply(state, 0);
759                 /* state invalid after reply */
760
761                 /*
762                  * If the node still exists issue any required updates.  If
763                  * it doesn't then all related relays have already been
764                  * removed and there's nothing left to do.
765                  */
766 #if 0
767                 if (node)
768                         hammer2_relay_scan(NULL, node);
769 #endif
770                 if (node)
771                         hammer2_router_signal(msg->router);
772         }
773
774         pthread_mutex_unlock(&cluster_mtx);
775 }
776
777 /*
778  * Messages received on relay SPANs.  These are open transactions so it is
779  * in fact possible for the other end to close the transaction.
780  *
781  * XXX MPRACE on state structure
782  */
783 static void
784 hammer2_lnk_relay(hammer2_msg_t *msg)
785 {
786         hammer2_state_t *state = msg->state;
787         h2span_relay_t *relay;
788
789         assert(msg->any.head.cmd & HAMMER2_MSGF_REPLY);
790
791         if (msg->any.head.cmd & HAMMER2_MSGF_DELETE) {
792                 pthread_mutex_lock(&cluster_mtx);
793                 if ((relay = state->any.relay) != NULL) {
794                         hammer2_relay_delete(relay);
795                 } else {
796                         hammer2_state_reply(state, 0);
797                 }
798                 pthread_mutex_unlock(&cluster_mtx);
799         }
800 }
801
802 /*
803  * Update relay transactions for SPANs.
804  *
805  * Called with cluster_mtx held.
806  */
807 static void hammer2_relay_scan_specific(h2span_node_t *node,
808                                         h2span_connect_t *conn);
809
810 static void
811 hammer2_relay_scan(h2span_connect_t *conn, h2span_node_t *node)
812 {
813         h2span_cluster_t *cls;
814
815         if (node) {
816                 /*
817                  * Iterate specific node
818                  */
819                 TAILQ_FOREACH(conn, &connq, entry)
820                         hammer2_relay_scan_specific(node, conn);
821         } else {
822                 /*
823                  * Full iteration.
824                  *
825                  * Iterate cluster ids, nodes, and either a specific connection
826                  * or all connections.
827                  */
828                 RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
829                         /*
830                          * Iterate node ids
831                          */
832                         RB_FOREACH(node, h2span_node_tree, &cls->tree) {
833                                 /*
834                                  * Synchronize the node's link (received SPANs)
835                                  * with each connection's relays.
836                                  */
837                                 if (conn) {
838                                         hammer2_relay_scan_specific(node, conn);
839                                 } else {
840                                         TAILQ_FOREACH(conn, &connq, entry) {
841                                             hammer2_relay_scan_specific(node,
842                                                                         conn);
843                                         }
844                                         assert(conn == NULL);
845                                 }
846                         }
847                 }
848         }
849 }
850
851 /*
852  * Update the relay'd SPANs for this (node, conn).
853  *
854  * Iterate links and adjust relays to match.  We only propagate the top link
855  * for now (XXX we want to propagate the top two).
856  *
857  * The hammer2_relay_scan_cmp() function locates the first relay element
858  * for any given node.  The relay elements will be sub-sorted by dist.
859  */
860 struct relay_scan_info {
861         h2span_node_t *node;
862         h2span_relay_t *relay;
863 };
864
865 static int
866 hammer2_relay_scan_cmp(h2span_relay_t *relay, void *arg)
867 {
868         struct relay_scan_info *info = arg;
869
870         if ((intptr_t)relay->link->node < (intptr_t)info->node)
871                 return(-1);
872         if ((intptr_t)relay->link->node > (intptr_t)info->node)
873                 return(1);
874         return(0);
875 }
876
877 static int
878 hammer2_relay_scan_callback(h2span_relay_t *relay, void *arg)
879 {
880         struct relay_scan_info *info = arg;
881
882         info->relay = relay;
883         return(-1);
884 }
885
886 static void
887 hammer2_relay_scan_specific(h2span_node_t *node, h2span_connect_t *conn)
888 {
889         struct relay_scan_info info;
890         h2span_relay_t *relay;
891         h2span_relay_t *next_relay;
892         h2span_link_t *slink;
893         int count = 2;
894
895         info.node = node;
896         info.relay = NULL;
897
898         /*
899          * Locate the first related relay for the node on this connection.
900          * relay will be NULL if there were none.
901          */
902         RB_SCAN(h2span_relay_tree, &conn->tree,
903                 hammer2_relay_scan_cmp, hammer2_relay_scan_callback, &info);
904         relay = info.relay;
905         info.relay = NULL;
906         if (relay)
907                 assert(relay->link->node == node);
908
909         if (DebugOpt > 8)
910                 fprintf(stderr, "relay scan for connection %p\n", conn);
911
912         /*
913          * Iterate the node's links (received SPANs) in distance order,
914          * lowest (best) dist first.
915          */
916         /* fprintf(stderr, "LOOP\n"); */
917         RB_FOREACH(slink, h2span_link_tree, &node->tree) {
918                 /*
919                 fprintf(stderr, "SLINK %p RELAY %p(%p)\n",
920                         slink, relay, relay ? relay->link : NULL);
921                 */
922                 /*
923                  * PROPAGATE THE BEST LINKS OVER THE SPECIFIED CONNECTION.
924                  *
925                  * Track relays while iterating the best links and construct
926                  * missing relays when necessary.
927                  *
928                  * (If some prior better link was removed it would have also
929                  *  removed the relay, so the relay can only match exactly or
930                  *  be worse).
931                  */
932                 if (relay && relay->link == slink) {
933                         /*
934                          * Match, relay already in-place, get the next
935                          * relay to match against the next slink.
936                          */
937                         relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
938                         if (--count == 0)
939                                 break;
940                 } else if (slink->dist > HAMMER2_SPAN_MAXDIST) {
941                         /*
942                          * No match but span distance is too great,
943                          * do not relay.  This prevents endless closed
944                          * loops with ever-incrementing distances when
945                          * the seed span is lost in the graph.
946                          *
947                          * All later spans will also be too far away so
948                          * we can break out of the loop.
949                          */
950                         break;
951                 } else if (slink->state->iocom == conn->state->iocom) {
952                         /*
953                          * No match but we would transmit a LNK_SPAN
954                          * out the same connection it came in on, which
955                          * can be trivially optimized out.
956                          */
957                         break;
958                 } else {
959                         /*
960                          * No match, distance is ok, construct a new relay.
961                          * (slink is better than relay).
962                          */
963                         hammer2_msg_t *msg;
964
965                         assert(relay == NULL ||
966                                relay->link->node != slink->node ||
967                                relay->link->dist >= slink->dist);
968                         relay = hammer2_alloc(sizeof(*relay));
969                         relay->conn = conn;
970                         relay->link = slink;
971
972                         msg = hammer2_msg_alloc(conn->state->iocom->router, 0,
973                                                 HAMMER2_LNK_SPAN |
974                                                 HAMMER2_MSGF_CREATE,
975                                                 hammer2_lnk_relay, relay);
976                         relay->state = msg->state;
977                         relay->router = hammer2_router_alloc();
978                         relay->router->iocom = relay->state->iocom;
979                         relay->router->relay = relay;
980                         relay->router->target = relay->state->msgid;
981
982                         msg->any.lnk_span = slink->state->msg->any.lnk_span;
983                         msg->any.lnk_span.dist = slink->dist + 1;
984
985                         hammer2_router_connect(relay->router);
986
987                         RB_INSERT(h2span_relay_tree, &conn->tree, relay);
988                         TAILQ_INSERT_TAIL(&slink->relayq, relay, entry);
989
990                         hammer2_msg_write(msg);
991
992                         fprintf(stderr,
993                                 "RELAY SPAN %p RELAY %p ON CLS=%p NODE=%p DIST=%d "
994                                 "FD %d state %p\n",
995                                 slink,
996                                 relay,
997                                 node->cls, node, slink->dist,
998                                 conn->state->iocom->sock_fd, relay->state);
999
1000                         /*
1001                          * Match (created new relay), get the next relay to
1002                          * match against the next slink.
1003                          */
1004                         relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
1005                         if (--count == 0)
1006                                 break;
1007                 }
1008         }
1009
1010         /*
1011          * Any remaining relay's belonging to this connection which match
1012          * the node are in excess of the current aggregate spanning state
1013          * and should be removed.
1014          */
1015         while (relay && relay->link->node == node) {
1016                 next_relay = RB_NEXT(h2span_relay_tree, &conn->tree, relay);
1017                 hammer2_relay_delete(relay);
1018                 relay = next_relay;
1019         }
1020 }
1021
1022 static
1023 void
1024 hammer2_relay_delete(h2span_relay_t *relay)
1025 {
1026         fprintf(stderr,
1027                 "RELAY DELETE %p RELAY %p ON CLS=%p NODE=%p DIST=%d FD %d STATE %p\n",
1028                 relay->link,
1029                 relay,
1030                 relay->link->node->cls, relay->link->node,
1031                 relay->link->dist,
1032                 relay->conn->state->iocom->sock_fd, relay->state);
1033
1034         hammer2_router_disconnect(&relay->router);
1035
1036         RB_REMOVE(h2span_relay_tree, &relay->conn->tree, relay);
1037         TAILQ_REMOVE(&relay->link->relayq, relay, entry);
1038
1039         if (relay->state) {
1040                 relay->state->any.relay = NULL;
1041                 hammer2_state_reply(relay->state, 0);
1042                 /* state invalid after reply */
1043                 relay->state = NULL;
1044         }
1045         relay->conn = NULL;
1046         relay->link = NULL;
1047         hammer2_free(relay);
1048 }
1049
1050 static void *
1051 hammer2_volconf_thread(void *info)
1052 {
1053         h2span_media_config_t *conf = info;
1054
1055         pthread_mutex_lock(&cluster_mtx);
1056         while ((conf->ctl & H2CONFCTL_STOP) == 0) {
1057                 if (conf->ctl & H2CONFCTL_UPDATE) {
1058                         fprintf(stderr, "VOLCONF UPDATE\n");
1059                         conf->ctl &= ~H2CONFCTL_UPDATE;
1060                         if (bcmp(&conf->copy_run, &conf->copy_pend,
1061                                  sizeof(conf->copy_run)) == 0) {
1062                                 fprintf(stderr, "VOLCONF: no changes\n");
1063                                 continue;
1064                         }
1065                         /*
1066                          * XXX TODO - auto reconnect on lookup failure or
1067                          *              connect failure or stream failure.
1068                          */
1069
1070                         pthread_mutex_unlock(&cluster_mtx);
1071                         hammer2_volconf_stop(conf);
1072                         conf->copy_run = conf->copy_pend;
1073                         if (conf->copy_run.copyid != 0 &&
1074                             strncmp(conf->copy_run.path, "span:", 5) == 0) {
1075                                 hammer2_volconf_start(conf,
1076                                                       conf->copy_run.path + 5);
1077                         }
1078                         pthread_mutex_lock(&cluster_mtx);
1079                         fprintf(stderr, "VOLCONF UPDATE DONE state %d\n", conf->state);
1080                 }
1081                 if (conf->state == H2MC_CONNECT) {
1082                         hammer2_volconf_start(conf, conf->copy_run.path + 5);
1083                         pthread_mutex_unlock(&cluster_mtx);
1084                         sleep(5);
1085                         pthread_mutex_lock(&cluster_mtx);
1086                 } else {
1087                         pthread_cond_wait(&conf->cond, &cluster_mtx);
1088                 }
1089         }
1090         pthread_mutex_unlock(&cluster_mtx);
1091         hammer2_volconf_stop(conf);
1092         return(NULL);
1093 }
1094
1095 static
1096 void
1097 hammer2_volconf_stop(h2span_media_config_t *conf)
1098 {
1099         switch(conf->state) {
1100         case H2MC_STOPPED:
1101                 break;
1102         case H2MC_CONNECT:
1103                 conf->state = H2MC_STOPPED;
1104                 break;
1105         case H2MC_RUNNING:
1106                 shutdown(conf->fd, SHUT_WR);
1107                 pthread_join(conf->iocom_thread, NULL);
1108                 conf->iocom_thread = NULL;
1109                 break;
1110         }
1111 }
1112
1113 static
1114 void
1115 hammer2_volconf_start(h2span_media_config_t *conf, const char *hostname)
1116 {
1117         switch(conf->state) {
1118         case H2MC_STOPPED:
1119         case H2MC_CONNECT:
1120                 conf->fd = hammer2_connect(hostname);
1121                 if (conf->fd < 0) {
1122                         fprintf(stderr, "Unable to connect to %s\n", hostname);
1123                         conf->state = H2MC_CONNECT;
1124                 } else {
1125                         pthread_create(&conf->iocom_thread, NULL,
1126                                        master_service,
1127                                        (void *)(intptr_t)conf->fd);
1128                         conf->state = H2MC_RUNNING;
1129                 }
1130                 break;
1131         case H2MC_RUNNING:
1132                 break;
1133         }
1134 }
1135
1136 /************************************************************************
1137  *                      ROUTER AND MESSAGING HANDLES                    *
1138  ************************************************************************
1139  *
1140  * Basically the idea here is to provide a stable data structure which
1141  * can be localized to the caller for higher level protocols to work with.
1142  * Depends on the context, these hammer2_handle's can be pooled by use-case
1143  * and remain persistent through a client (or mount point's) life.
1144  */
1145
1146 #if 0
1147 /*
1148  * Obtain a stable handle on a cluster given its uuid.  This ties directly
1149  * into the global cluster topology, creating the structure if necessary
1150  * (even if the uuid does not exist or does not exist yet), and preventing
1151  * the structure from getting ripped out from under us while we hold a
1152  * pointer to it.
1153  */
1154 h2span_cluster_t *
1155 hammer2_cluster_get(uuid_t *pfs_clid)
1156 {
1157         h2span_cluster_t dummy_cls;
1158         h2span_cluster_t *cls;
1159
1160         dummy_cls.pfs_clid = *pfs_clid;
1161         pthread_mutex_lock(&cluster_mtx);
1162         cls = RB_FIND(h2span_cluster_tree, &cluster_tree, &dummy_cls);
1163         if (cls)
1164                 ++cls->refs;
1165         pthread_mutex_unlock(&cluster_mtx);
1166         return (cls);
1167 }
1168
1169 void
1170 hammer2_cluster_put(h2span_cluster_t *cls)
1171 {
1172         pthread_mutex_lock(&cluster_mtx);
1173         assert(cls->refs > 0);
1174         --cls->refs;
1175         if (RB_EMPTY(&cls->tree) && cls->refs == 0) {
1176                 RB_REMOVE(h2span_cluster_tree,
1177                           &cluster_tree, cls);
1178                 hammer2_free(cls);
1179         }
1180         pthread_mutex_unlock(&cluster_mtx);
1181 }
1182
1183 /*
1184  * Obtain a stable handle to a specific cluster node given its uuid.
1185  * This handle does NOT lock in the route to the node and is typically
1186  * used as part of the hammer2_handle_*() API to obtain a set of
1187  * stable nodes.
1188  */
1189 h2span_node_t *
1190 hammer2_node_get(h2span_cluster_t *cls, uuid_t *pfs_fsid)
1191 {
1192 }
1193
1194 #endif
1195
1196 #if 0
1197 /*
1198  * Acquire a persistent router structure given the cluster and node ids.
1199  * Messages can be transacted via this structure while held.  If the route
1200  * is lost messages will return failure.
1201  */
1202 hammer2_router_t *
1203 hammer2_router_get(uuid_t *pfs_clid, uuid_t *pfs_fsid)
1204 {
1205 }
1206
1207 /*
1208  * Release previously acquired router.
1209  */
1210 void
1211 hammer2_router_put(hammer2_router_t *router)
1212 {
1213 }
1214 #endif
1215
1216 /************************************************************************
1217  *                              DEBUGGER                                *
1218  ************************************************************************/
1219 /*
1220  * Dumps the spanning tree
1221  */
1222 void
1223 shell_tree(hammer2_router_t *router, char *cmdbuf __unused)
1224 {
1225         h2span_cluster_t *cls;
1226         h2span_node_t *node;
1227         h2span_link_t *slink;
1228         char *uustr = NULL;
1229
1230         pthread_mutex_lock(&cluster_mtx);
1231         RB_FOREACH(cls, h2span_cluster_tree, &cluster_tree) {
1232                 router_printf(router, "Cluster %s\n",
1233                              hammer2_uuid_to_str(&cls->pfs_clid, &uustr));
1234                 RB_FOREACH(node, h2span_node_tree, &cls->tree) {
1235                         router_printf(router, "    Node %s (%s)\n",
1236                                  hammer2_uuid_to_str(&node->pfs_fsid, &uustr),
1237                                  node->label);
1238                         RB_FOREACH(slink, h2span_link_tree, &node->tree) {
1239                                 router_printf(router, "\tLink dist=%d via %d\n",
1240                                              slink->dist,
1241                                              slink->state->iocom->sock_fd);
1242                         }
1243                 }
1244         }
1245         pthread_mutex_unlock(&cluster_mtx);
1246         if (uustr)
1247                 free(uustr);
1248 #if 0
1249         TAILQ_FOREACH(conn, &connq, entry) {
1250         }
1251 #endif
1252 }