2 * Copyright (c) 2011-2012 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 #include "dmsg_local.h"
40 static int dmsg_state_msgrx(dmsg_msg_t *msg);
41 static void dmsg_state_cleanuptx(dmsg_msg_t *msg);
44 * ROUTER TREE - Represents available routes for message routing, indexed
45 * by their span transaction id. The router structure is
46 * embedded in either an iocom, h2span_link (incoming),
47 * or h2span_relay (outgoing) (see msg_lnk.c).
50 dmsg_router_cmp(dmsg_router_t *router1, dmsg_router_t *router2)
52 if (router1->target < router2->target)
54 if (router1->target > router2->target)
59 RB_GENERATE(dmsg_router_tree, dmsg_router, rbnode, dmsg_router_cmp);
61 static pthread_mutex_t router_mtx;
62 static struct dmsg_router_tree router_ltree = RB_INITIALIZER(router_ltree);
63 static struct dmsg_router_tree router_rtree = RB_INITIALIZER(router_rtree);
66 * STATE TREE - Represents open transactions which are indexed by their
67 * {router,msgid} relative to the governing iocom.
69 * router is usually iocom->router since state isn't stored
70 * for relayed messages.
73 dmsg_state_cmp(dmsg_state_t *state1, dmsg_state_t *state2)
76 if (state1->router < state2->router)
78 if (state1->router > state2->router)
81 if (state1->msgid < state2->msgid)
83 if (state1->msgid > state2->msgid)
88 RB_GENERATE(dmsg_state_tree, dmsg_state, rbnode, dmsg_state_cmp);
91 * Initialize a low-level ioq
94 dmsg_ioq_init(dmsg_iocom_t *iocom __unused, dmsg_ioq_t *ioq)
96 bzero(ioq, sizeof(*ioq));
97 ioq->state = DMSG_MSGQ_STATE_HEADER1;
98 TAILQ_INIT(&ioq->msgq);
104 * caller holds iocom->mtx.
107 dmsg_ioq_done(dmsg_iocom_t *iocom __unused, dmsg_ioq_t *ioq)
111 while ((msg = TAILQ_FIRST(&ioq->msgq)) != NULL) {
112 assert(0); /* shouldn't happen */
113 TAILQ_REMOVE(&ioq->msgq, msg, qentry);
116 if ((msg = ioq->msg) != NULL) {
123 * Initialize a low-level communications channel.
125 * NOTE: The signal_func() is called at least once from the loop and can be
126 * re-armed via dmsg_iocom_restate().
129 dmsg_iocom_init(dmsg_iocom_t *iocom, int sock_fd, int alt_fd,
130 void (*signal_func)(dmsg_router_t *),
131 void (*rcvmsg_func)(dmsg_msg_t *),
132 void (*dbgmsg_func)(dmsg_msg_t *),
133 void (*altmsg_func)(dmsg_iocom_t *))
137 bzero(iocom, sizeof(*iocom));
139 iocom->router = dmsg_router_alloc();
140 iocom->router->signal_callback = signal_func;
141 iocom->router->rcvmsg_callback = rcvmsg_func;
142 iocom->router->altmsg_callback = altmsg_func;
143 iocom->router->dbgmsg_callback = dbgmsg_func;
144 /* we do not call dmsg_router_connect() for iocom routers */
146 pthread_mutex_init(&iocom->mtx, NULL);
147 RB_INIT(&iocom->router->staterd_tree);
148 RB_INIT(&iocom->router->statewr_tree);
149 TAILQ_INIT(&iocom->freeq);
150 TAILQ_INIT(&iocom->freeq_aux);
151 TAILQ_INIT(&iocom->router->txmsgq);
152 iocom->router->iocom = iocom;
153 iocom->sock_fd = sock_fd;
154 iocom->alt_fd = alt_fd;
155 iocom->flags = DMSG_IOCOMF_RREQ;
157 iocom->flags |= DMSG_IOCOMF_SWORK;
158 dmsg_ioq_init(iocom, &iocom->ioq_rx);
159 dmsg_ioq_init(iocom, &iocom->ioq_tx);
160 if (pipe(iocom->wakeupfds) < 0)
162 fcntl(iocom->wakeupfds[0], F_SETFL, O_NONBLOCK);
163 fcntl(iocom->wakeupfds[1], F_SETFL, O_NONBLOCK);
166 * Negotiate session crypto synchronously. This will mark the
167 * connection as error'd if it fails. If this is a pipe it's
168 * a linkage that we set up ourselves to the filesystem and there
171 if (fstat(sock_fd, &st) < 0)
173 if (S_ISSOCK(st.st_mode))
174 dmsg_crypto_negotiate(iocom);
177 * Make sure our fds are set to non-blocking for the iocom core.
180 fcntl(sock_fd, F_SETFL, O_NONBLOCK);
182 /* if line buffered our single fgets() should be fine */
184 fcntl(alt_fd, F_SETFL, O_NONBLOCK);
189 * May only be called from a callback from iocom_core.
191 * Adjust state machine functions, set flags to guarantee that both
192 * the recevmsg_func and the sendmsg_func is called at least once.
195 dmsg_router_restate(dmsg_router_t *router,
196 void (*signal_func)(dmsg_router_t *),
197 void (*rcvmsg_func)(dmsg_msg_t *msg),
198 void (*altmsg_func)(dmsg_iocom_t *))
200 router->signal_callback = signal_func;
201 router->rcvmsg_callback = rcvmsg_func;
202 router->altmsg_callback = altmsg_func;
204 router->iocom->flags |= DMSG_IOCOMF_SWORK;
206 router->iocom->flags &= ~DMSG_IOCOMF_SWORK;
210 dmsg_router_signal(dmsg_router_t *router)
212 if (router->signal_callback)
213 router->iocom->flags |= DMSG_IOCOMF_SWORK;
217 * Cleanup a terminating iocom.
219 * Caller should not hold iocom->mtx. The iocom has already been disconnected
220 * from all possible references to it.
223 dmsg_iocom_done(dmsg_iocom_t *iocom)
227 if (iocom->sock_fd >= 0) {
228 close(iocom->sock_fd);
231 if (iocom->alt_fd >= 0) {
232 close(iocom->alt_fd);
235 dmsg_ioq_done(iocom, &iocom->ioq_rx);
236 dmsg_ioq_done(iocom, &iocom->ioq_tx);
237 if ((msg = TAILQ_FIRST(&iocom->freeq)) != NULL) {
238 TAILQ_REMOVE(&iocom->freeq, msg, qentry);
241 if ((msg = TAILQ_FIRST(&iocom->freeq_aux)) != NULL) {
242 TAILQ_REMOVE(&iocom->freeq_aux, msg, qentry);
244 msg->aux_data = NULL;
247 if (iocom->wakeupfds[0] >= 0) {
248 close(iocom->wakeupfds[0]);
249 iocom->wakeupfds[0] = -1;
251 if (iocom->wakeupfds[1] >= 0) {
252 close(iocom->wakeupfds[1]);
253 iocom->wakeupfds[1] = -1;
255 pthread_mutex_destroy(&iocom->mtx);
259 * Allocate a new one-way message.
262 dmsg_msg_alloc(dmsg_router_t *router, size_t aux_size, uint32_t cmd,
263 void (*func)(dmsg_msg_t *), void *data)
265 dmsg_state_t *state = NULL;
266 dmsg_iocom_t *iocom = router->iocom;
270 pthread_mutex_lock(&iocom->mtx);
272 aux_size = (aux_size + DMSG_ALIGNMASK) &
274 if ((msg = TAILQ_FIRST(&iocom->freeq_aux)) != NULL)
275 TAILQ_REMOVE(&iocom->freeq_aux, msg, qentry);
277 if ((msg = TAILQ_FIRST(&iocom->freeq)) != NULL)
278 TAILQ_REMOVE(&iocom->freeq, msg, qentry);
280 if ((cmd & (DMSGF_CREATE | DMSGF_REPLY)) == DMSGF_CREATE) {
282 * Create state when CREATE is set without REPLY.
284 * NOTE: CREATE in txcmd handled by dmsg_msg_write()
285 * NOTE: DELETE in txcmd handled by dmsg_state_cleanuptx()
287 state = malloc(sizeof(*state));
288 bzero(state, sizeof(*state));
289 state->iocom = iocom;
290 state->flags = DMSG_STATE_DYNAMIC;
291 state->msgid = (uint64_t)(uintptr_t)state;
292 state->router = router;
293 state->txcmd = cmd & ~(DMSGF_CREATE | DMSGF_DELETE);
294 state->rxcmd = DMSGF_REPLY;
296 state->any.any = data;
297 pthread_mutex_lock(&iocom->mtx);
298 RB_INSERT(dmsg_state_tree,
299 &iocom->router->statewr_tree,
301 pthread_mutex_unlock(&iocom->mtx);
302 state->flags |= DMSG_STATE_INSERTED;
304 pthread_mutex_unlock(&iocom->mtx);
306 msg = malloc(sizeof(*msg));
307 bzero(msg, sizeof(*msg));
308 msg->aux_data = NULL;
311 if (msg->aux_size != aux_size) {
314 msg->aux_data = NULL;
318 msg->aux_data = malloc(aux_size);
319 msg->aux_size = aux_size;
322 hbytes = (cmd & DMSGF_SIZE) * DMSG_ALIGN;
324 bzero(&msg->any.head, hbytes);
325 msg->hdr_size = hbytes;
326 msg->any.head.cmd = cmd;
327 msg->any.head.aux_descr = 0;
328 msg->any.head.aux_crc = 0;
329 msg->router = router;
333 msg->any.head.msgid = state->msgid;
339 * Free a message so it can be reused afresh.
341 * NOTE: aux_size can be 0 with a non-NULL aux_data.
345 dmsg_msg_free_locked(dmsg_msg_t *msg)
347 dmsg_iocom_t *iocom = msg->router->iocom;
351 TAILQ_INSERT_TAIL(&iocom->freeq_aux, msg, qentry);
353 TAILQ_INSERT_TAIL(&iocom->freeq, msg, qentry);
357 dmsg_msg_free(dmsg_msg_t *msg)
359 dmsg_iocom_t *iocom = msg->router->iocom;
361 pthread_mutex_lock(&iocom->mtx);
362 dmsg_msg_free_locked(msg);
363 pthread_mutex_unlock(&iocom->mtx);
367 * I/O core loop for an iocom.
369 * Thread localized, iocom->mtx not held.
372 dmsg_iocom_core(dmsg_iocom_t *iocom)
374 struct pollfd fds[3];
379 int wi; /* wakeup pipe */
381 int ai; /* alt bulk path socket */
383 while ((iocom->flags & DMSG_IOCOMF_EOF) == 0) {
384 if ((iocom->flags & (DMSG_IOCOMF_RWORK |
389 DMSG_IOCOMF_AWWORK)) == 0) {
391 * Only poll if no immediate work is pending.
392 * Otherwise we are just wasting our time calling
403 * Always check the inter-thread pipe, e.g.
404 * for iocom->txmsgq work.
407 fds[wi].fd = iocom->wakeupfds[0];
408 fds[wi].events = POLLIN;
412 * Check the socket input/output direction as
415 if (iocom->flags & (DMSG_IOCOMF_RREQ |
418 fds[si].fd = iocom->sock_fd;
422 if (iocom->flags & DMSG_IOCOMF_RREQ)
423 fds[si].events |= POLLIN;
424 if (iocom->flags & DMSG_IOCOMF_WREQ)
425 fds[si].events |= POLLOUT;
429 * Check the alternative fd for work.
431 if (iocom->alt_fd >= 0) {
433 fds[ai].fd = iocom->alt_fd;
434 fds[ai].events = POLLIN;
437 poll(fds, count, timeout);
439 if (wi >= 0 && (fds[wi].revents & POLLIN))
440 iocom->flags |= DMSG_IOCOMF_PWORK;
441 if (si >= 0 && (fds[si].revents & POLLIN))
442 iocom->flags |= DMSG_IOCOMF_RWORK;
443 if (si >= 0 && (fds[si].revents & POLLOUT))
444 iocom->flags |= DMSG_IOCOMF_WWORK;
445 if (wi >= 0 && (fds[wi].revents & POLLOUT))
446 iocom->flags |= DMSG_IOCOMF_WWORK;
447 if (ai >= 0 && (fds[ai].revents & POLLIN))
448 iocom->flags |= DMSG_IOCOMF_ARWORK;
451 * Always check the pipe
453 iocom->flags |= DMSG_IOCOMF_PWORK;
456 if (iocom->flags & DMSG_IOCOMF_SWORK) {
457 iocom->flags &= ~DMSG_IOCOMF_SWORK;
458 iocom->router->signal_callback(iocom->router);
462 * Pending message queues from other threads wake us up
463 * with a write to the wakeupfds[] pipe. We have to clear
464 * the pipe with a dummy read.
466 if (iocom->flags & DMSG_IOCOMF_PWORK) {
467 iocom->flags &= ~DMSG_IOCOMF_PWORK;
468 read(iocom->wakeupfds[0], dummybuf, sizeof(dummybuf));
469 iocom->flags |= DMSG_IOCOMF_RWORK;
470 iocom->flags |= DMSG_IOCOMF_WWORK;
471 if (TAILQ_FIRST(&iocom->router->txmsgq))
472 dmsg_iocom_flush1(iocom);
476 * Message write sequencing
478 if (iocom->flags & DMSG_IOCOMF_WWORK)
479 dmsg_iocom_flush1(iocom);
482 * Message read sequencing. Run this after the write
483 * sequencing in case the write sequencing allowed another
484 * auto-DELETE to occur on the read side.
486 if (iocom->flags & DMSG_IOCOMF_RWORK) {
487 while ((iocom->flags & DMSG_IOCOMF_EOF) == 0 &&
488 (msg = dmsg_ioq_read(iocom)) != NULL) {
490 fprintf(stderr, "receive %s\n",
493 iocom->router->rcvmsg_callback(msg);
494 dmsg_state_cleanuprx(iocom, msg);
498 if (iocom->flags & DMSG_IOCOMF_ARWORK) {
499 iocom->flags &= ~DMSG_IOCOMF_ARWORK;
500 iocom->router->altmsg_callback(iocom);
506 * Make sure there's enough room in the FIFO to hold the
509 * Assume worst case encrypted form is 2x the size of the
510 * plaintext equivalent.
514 dmsg_ioq_makeroom(dmsg_ioq_t *ioq, size_t needed)
519 bytes = ioq->fifo_cdx - ioq->fifo_beg;
520 nmax = sizeof(ioq->buf) - ioq->fifo_end;
521 if (bytes + nmax / 2 < needed) {
523 bcopy(ioq->buf + ioq->fifo_beg,
527 ioq->fifo_cdx -= ioq->fifo_beg;
529 if (ioq->fifo_cdn < ioq->fifo_end) {
530 bcopy(ioq->buf + ioq->fifo_cdn,
531 ioq->buf + ioq->fifo_cdx,
532 ioq->fifo_end - ioq->fifo_cdn);
534 ioq->fifo_end -= ioq->fifo_cdn - ioq->fifo_cdx;
535 ioq->fifo_cdn = ioq->fifo_cdx;
536 nmax = sizeof(ioq->buf) - ioq->fifo_end;
542 * Read the next ready message from the ioq, issuing I/O if needed.
543 * Caller should retry on a read-event when NULL is returned.
545 * If an error occurs during reception a DMSG_LNK_ERROR msg will
546 * be returned for each open transaction, then the ioq and iocom
547 * will be errored out and a non-transactional DMSG_LNK_ERROR
548 * msg will be returned as the final message. The caller should not call
549 * us again after the final message is returned.
551 * Thread localized, iocom->mtx not held.
554 dmsg_ioq_read(dmsg_iocom_t *iocom)
556 dmsg_ioq_t *ioq = &iocom->ioq_rx;
567 iocom->flags &= ~(DMSG_IOCOMF_RREQ | DMSG_IOCOMF_RWORK);
570 * If a message is already pending we can just remove and
571 * return it. Message state has already been processed.
572 * (currently not implemented)
574 if ((msg = TAILQ_FIRST(&ioq->msgq)) != NULL) {
575 TAILQ_REMOVE(&ioq->msgq, msg, qentry);
580 * If the stream is errored out we stop processing it.
586 * Message read in-progress (msg is NULL at the moment). We don't
587 * allocate a msg until we have its core header.
589 nmax = sizeof(ioq->buf) - ioq->fifo_end;
590 bytes = ioq->fifo_cdx - ioq->fifo_beg; /* already decrypted */
594 case DMSG_MSGQ_STATE_HEADER1:
596 * Load the primary header, fail on any non-trivial read
597 * error or on EOF. Since the primary header is the same
598 * size is the message alignment it will never straddle
599 * the end of the buffer.
601 nmax = dmsg_ioq_makeroom(ioq, sizeof(msg->any.head));
602 if (bytes < sizeof(msg->any.head)) {
603 n = read(iocom->sock_fd,
604 ioq->buf + ioq->fifo_end,
608 ioq->error = DMSG_IOQ_ERROR_EOF;
611 if (errno != EINTR &&
612 errno != EINPROGRESS &&
614 ioq->error = DMSG_IOQ_ERROR_SOCK;
620 ioq->fifo_end += (size_t)n;
625 * Decrypt data received so far. Data will be decrypted
626 * in-place but might create gaps in the FIFO. Partial
627 * blocks are not immediately decrypted.
629 * WARNING! The header might be in the wrong endian, we
630 * do not fix it up until we get the entire
633 if (iocom->flags & DMSG_IOCOMF_CRYPTED) {
634 dmsg_crypto_decrypt(iocom, ioq);
636 ioq->fifo_cdx = ioq->fifo_end;
637 ioq->fifo_cdn = ioq->fifo_end;
639 bytes = ioq->fifo_cdx - ioq->fifo_beg;
642 * Insufficient data accumulated (msg is NULL, caller will
646 if (bytes < sizeof(msg->any.head))
650 * Check and fixup the core header. Note that the icrc
651 * has to be calculated before any fixups, but the crc
652 * fields in the msg may have to be swapped like everything
655 head = (void *)(ioq->buf + ioq->fifo_beg);
656 if (head->magic != DMSG_HDR_MAGIC &&
657 head->magic != DMSG_HDR_MAGIC_REV) {
658 ioq->error = DMSG_IOQ_ERROR_SYNC;
663 * Calculate the full header size and aux data size
665 if (head->magic == DMSG_HDR_MAGIC_REV) {
666 ioq->hbytes = (bswap32(head->cmd) & DMSGF_SIZE) *
668 ioq->abytes = bswap32(head->aux_bytes) *
671 ioq->hbytes = (head->cmd & DMSGF_SIZE) *
673 ioq->abytes = head->aux_bytes * DMSG_ALIGN;
675 if (ioq->hbytes < sizeof(msg->any.head) ||
676 ioq->hbytes > sizeof(msg->any) ||
677 ioq->abytes > DMSG_AUX_MAX) {
678 ioq->error = DMSG_IOQ_ERROR_FIELD;
683 * Allocate the message, the next state will fill it in.
685 msg = dmsg_msg_alloc(iocom->router, ioq->abytes, 0,
690 * Fall through to the next state. Make sure that the
691 * extended header does not straddle the end of the buffer.
692 * We still want to issue larger reads into our buffer,
693 * book-keeping is easier if we don't bcopy() yet.
695 * Make sure there is enough room for bloated encrypt data.
697 nmax = dmsg_ioq_makeroom(ioq, ioq->hbytes);
698 ioq->state = DMSG_MSGQ_STATE_HEADER2;
700 case DMSG_MSGQ_STATE_HEADER2:
702 * Fill out the extended header.
705 if (bytes < ioq->hbytes) {
706 n = read(iocom->sock_fd,
707 ioq->buf + ioq->fifo_end,
711 ioq->error = DMSG_IOQ_ERROR_EOF;
714 if (errno != EINTR &&
715 errno != EINPROGRESS &&
717 ioq->error = DMSG_IOQ_ERROR_SOCK;
723 ioq->fifo_end += (size_t)n;
727 if (iocom->flags & DMSG_IOCOMF_CRYPTED) {
728 dmsg_crypto_decrypt(iocom, ioq);
730 ioq->fifo_cdx = ioq->fifo_end;
731 ioq->fifo_cdn = ioq->fifo_end;
733 bytes = ioq->fifo_cdx - ioq->fifo_beg;
736 * Insufficient data accumulated (set msg NULL so caller will
739 if (bytes < ioq->hbytes) {
745 * Calculate the extended header, decrypt data received
746 * so far. Handle endian-conversion for the entire extended
749 head = (void *)(ioq->buf + ioq->fifo_beg);
754 if (head->magic == DMSG_HDR_MAGIC_REV)
755 xcrc32 = bswap32(head->hdr_crc);
757 xcrc32 = head->hdr_crc;
759 if (dmsg_icrc32(head, ioq->hbytes) != xcrc32) {
760 ioq->error = DMSG_IOQ_ERROR_XCRC;
761 fprintf(stderr, "BAD-XCRC(%08x,%08x) %s\n",
762 xcrc32, dmsg_icrc32(head, ioq->hbytes),
767 head->hdr_crc = xcrc32;
769 if (head->magic == DMSG_HDR_MAGIC_REV) {
770 dmsg_bswap_head(head);
774 * Copy the extended header into the msg and adjust the
777 bcopy(head, &msg->any, ioq->hbytes);
780 * We are either done or we fall-through.
782 if (ioq->abytes == 0) {
783 ioq->fifo_beg += ioq->hbytes;
788 * Must adjust bytes (and the state) when falling through.
789 * nmax doesn't change.
791 ioq->fifo_beg += ioq->hbytes;
792 bytes -= ioq->hbytes;
793 ioq->state = DMSG_MSGQ_STATE_AUXDATA1;
795 case DMSG_MSGQ_STATE_AUXDATA1:
797 * Copy the partial or complete payload from remaining
798 * bytes in the FIFO in order to optimize the makeroom call
799 * in the AUXDATA2 state. We have to fall-through either
800 * way so we can check the crc.
802 * msg->aux_size tracks our aux data.
804 if (bytes >= ioq->abytes) {
805 bcopy(ioq->buf + ioq->fifo_beg, msg->aux_data,
807 msg->aux_size = ioq->abytes;
808 ioq->fifo_beg += ioq->abytes;
809 assert(ioq->fifo_beg <= ioq->fifo_cdx);
810 assert(ioq->fifo_cdx <= ioq->fifo_cdn);
811 bytes -= ioq->abytes;
813 bcopy(ioq->buf + ioq->fifo_beg, msg->aux_data,
815 msg->aux_size = bytes;
816 ioq->fifo_beg += bytes;
817 if (ioq->fifo_cdx < ioq->fifo_beg)
818 ioq->fifo_cdx = ioq->fifo_beg;
819 assert(ioq->fifo_beg <= ioq->fifo_cdx);
820 assert(ioq->fifo_cdx <= ioq->fifo_cdn);
825 ioq->state = DMSG_MSGQ_STATE_AUXDATA2;
827 case DMSG_MSGQ_STATE_AUXDATA2:
829 * Make sure there is enough room for more data.
832 nmax = dmsg_ioq_makeroom(ioq, ioq->abytes - msg->aux_size);
835 * Read and decrypt more of the payload.
837 if (msg->aux_size < ioq->abytes) {
839 n = read(iocom->sock_fd,
840 ioq->buf + ioq->fifo_end,
844 ioq->error = DMSG_IOQ_ERROR_EOF;
847 if (errno != EINTR &&
848 errno != EINPROGRESS &&
850 ioq->error = DMSG_IOQ_ERROR_SOCK;
856 ioq->fifo_end += (size_t)n;
860 if (iocom->flags & DMSG_IOCOMF_CRYPTED) {
861 dmsg_crypto_decrypt(iocom, ioq);
863 ioq->fifo_cdx = ioq->fifo_end;
864 ioq->fifo_cdn = ioq->fifo_end;
866 bytes = ioq->fifo_cdx - ioq->fifo_beg;
868 if (bytes > ioq->abytes - msg->aux_size)
869 bytes = ioq->abytes - msg->aux_size;
872 bcopy(ioq->buf + ioq->fifo_beg,
873 msg->aux_data + msg->aux_size,
875 msg->aux_size += bytes;
876 ioq->fifo_beg += bytes;
880 * Insufficient data accumulated (set msg NULL so caller will
883 if (msg->aux_size < ioq->abytes) {
887 assert(msg->aux_size == ioq->abytes);
890 * Check aux_crc, then we are done.
892 xcrc32 = dmsg_icrc32(msg->aux_data, msg->aux_size);
893 if (xcrc32 != msg->any.head.aux_crc) {
894 ioq->error = DMSG_IOQ_ERROR_ACRC;
898 case DMSG_MSGQ_STATE_ERROR:
900 * Continued calls to drain recorded transactions (returning
901 * a LNK_ERROR for each one), before we return the final
908 * We don't double-return errors, the caller should not
909 * have called us again after getting an error msg.
916 * Check the message sequence. The iv[] should prevent any
917 * possibility of a replay but we add this check anyway.
919 if (msg && ioq->error == 0) {
920 if ((msg->any.head.salt & 255) != (ioq->seq & 255)) {
921 ioq->error = DMSG_IOQ_ERROR_MSGSEQ;
928 * Handle relaying. Transactional state is not recorded XXX
932 * Process transactional state for the message.
934 if (msg && ioq->error == 0) {
935 error = dmsg_state_msgrx(msg);
937 if (error == DMSG_IOQ_ERROR_EALREADY) {
946 * Handle error, RREQ, or completion
948 * NOTE: nmax and bytes are invalid at this point, we don't bother
949 * to update them when breaking out.
954 * An unrecoverable error causes all active receive
955 * transactions to be terminated with a LNK_ERROR message.
957 * Once all active transactions are exhausted we set the
958 * iocom ERROR flag and return a non-transactional LNK_ERROR
959 * message, which should cause master processing loops to
962 assert(ioq->msg == msg);
969 * No more I/O read processing
971 ioq->state = DMSG_MSGQ_STATE_ERROR;
974 * Simulate a remote LNK_ERROR DELETE msg for any open
975 * transactions, ending with a final non-transactional
976 * LNK_ERROR (that the session can detect) when no
977 * transactions remain.
979 msg = dmsg_msg_alloc(iocom->router, 0, 0, NULL, NULL);
980 bzero(&msg->any.head, sizeof(msg->any.head));
981 msg->any.head.magic = DMSG_HDR_MAGIC;
982 msg->any.head.cmd = DMSG_LNK_ERROR;
983 msg->any.head.error = ioq->error;
985 pthread_mutex_lock(&iocom->mtx);
986 dmsg_iocom_drain(iocom);
987 if ((state = RB_ROOT(&iocom->router->staterd_tree)) != NULL) {
989 * Active remote transactions are still present.
990 * Simulate the other end sending us a DELETE.
992 if (state->rxcmd & DMSGF_DELETE) {
996 /*state->txcmd |= DMSGF_DELETE;*/
998 msg->router = state->router;
999 msg->any.head.msgid = state->msgid;
1000 msg->any.head.cmd |= DMSGF_ABORT |
1003 } else if ((state = RB_ROOT(&iocom->router->statewr_tree)) !=
1006 * Active local transactions are still present.
1007 * Simulate the other end sending us a DELETE.
1009 if (state->rxcmd & DMSGF_DELETE) {
1014 msg->router = state->router;
1015 msg->any.head.msgid = state->msgid;
1016 msg->any.head.cmd |= DMSGF_ABORT |
1019 if ((state->rxcmd & DMSGF_CREATE) == 0) {
1020 msg->any.head.cmd |=
1026 * No active local or remote transactions remain.
1027 * Generate a final LNK_ERROR and flag EOF.
1030 iocom->flags |= DMSG_IOCOMF_EOF;
1031 fprintf(stderr, "EOF ON SOCKET %d\n", iocom->sock_fd);
1033 pthread_mutex_unlock(&iocom->mtx);
1036 * For the iocom error case we want to set RWORK to indicate
1037 * that more messages might be pending.
1039 * It is possible to return NULL when there is more work to
1040 * do because each message has to be DELETEd in both
1041 * directions before we continue on with the next (though
1042 * this could be optimized). The transmit direction will
1046 iocom->flags |= DMSG_IOCOMF_RWORK;
1047 } else if (msg == NULL) {
1049 * Insufficient data received to finish building the message,
1050 * set RREQ and return NULL.
1052 * Leave ioq->msg intact.
1053 * Leave the FIFO intact.
1055 iocom->flags |= DMSG_IOCOMF_RREQ;
1060 * The fifo has already been advanced past the message.
1061 * Trivially reset the FIFO indices if possible.
1063 * clear the FIFO if it is now empty and set RREQ to wait
1064 * for more from the socket. If the FIFO is not empty set
1065 * TWORK to bypass the poll so we loop immediately.
1067 if (ioq->fifo_beg == ioq->fifo_cdx &&
1068 ioq->fifo_cdn == ioq->fifo_end) {
1069 iocom->flags |= DMSG_IOCOMF_RREQ;
1075 iocom->flags |= DMSG_IOCOMF_RWORK;
1077 ioq->state = DMSG_MSGQ_STATE_HEADER1;
1084 * Calculate the header and data crc's and write a low-level message to
1085 * the connection. If aux_crc is non-zero the aux_data crc is already
1086 * assumed to have been set.
1088 * A non-NULL msg is added to the queue but not necessarily flushed.
1089 * Calling this function with msg == NULL will get a flush going.
1091 * Caller must hold iocom->mtx.
1094 dmsg_iocom_flush1(dmsg_iocom_t *iocom)
1096 dmsg_ioq_t *ioq = &iocom->ioq_tx;
1100 dmsg_msg_queue_t tmpq;
1102 iocom->flags &= ~(DMSG_IOCOMF_WREQ | DMSG_IOCOMF_WWORK);
1104 pthread_mutex_lock(&iocom->mtx);
1105 while ((msg = TAILQ_FIRST(&iocom->router->txmsgq)) != NULL) {
1106 TAILQ_REMOVE(&iocom->router->txmsgq, msg, qentry);
1107 TAILQ_INSERT_TAIL(&tmpq, msg, qentry);
1109 pthread_mutex_unlock(&iocom->mtx);
1111 while ((msg = TAILQ_FIRST(&tmpq)) != NULL) {
1113 * Process terminal connection errors.
1115 TAILQ_REMOVE(&tmpq, msg, qentry);
1117 TAILQ_INSERT_TAIL(&ioq->msgq, msg, qentry);
1123 * Finish populating the msg fields. The salt ensures that
1124 * the iv[] array is ridiculously randomized and we also
1125 * re-seed our PRNG every 32768 messages just to be sure.
1127 msg->any.head.magic = DMSG_HDR_MAGIC;
1128 msg->any.head.salt = (random() << 8) | (ioq->seq & 255);
1130 if ((ioq->seq & 32767) == 0)
1134 * Calculate aux_crc if 0, then calculate hdr_crc.
1136 if (msg->aux_size && msg->any.head.aux_crc == 0) {
1137 assert((msg->aux_size & DMSG_ALIGNMASK) == 0);
1138 xcrc32 = dmsg_icrc32(msg->aux_data, msg->aux_size);
1139 msg->any.head.aux_crc = xcrc32;
1141 msg->any.head.aux_bytes = msg->aux_size / DMSG_ALIGN;
1142 assert((msg->aux_size & DMSG_ALIGNMASK) == 0);
1144 hbytes = (msg->any.head.cmd & DMSGF_SIZE) *
1146 msg->any.head.hdr_crc = 0;
1147 msg->any.head.hdr_crc = dmsg_icrc32(&msg->any.head, hbytes);
1150 * Enqueue the message (the flush codes handles stream
1153 TAILQ_INSERT_TAIL(&ioq->msgq, msg, qentry);
1156 dmsg_iocom_flush2(iocom);
1160 * Thread localized, iocom->mtx not held by caller.
1163 dmsg_iocom_flush2(dmsg_iocom_t *iocom)
1165 dmsg_ioq_t *ioq = &iocom->ioq_tx;
1168 struct iovec iov[DMSG_IOQ_MAXIOVEC];
1177 dmsg_iocom_drain(iocom);
1182 * Pump messages out the connection by building an iovec.
1184 * ioq->hbytes/ioq->abytes tracks how much of the first message
1185 * in the queue has been successfully written out, so we can
1193 TAILQ_FOREACH(msg, &ioq->msgq, qentry) {
1194 hbytes = (msg->any.head.cmd & DMSGF_SIZE) *
1196 abytes = msg->aux_size;
1197 assert(hoff <= hbytes && aoff <= abytes);
1199 if (hoff < hbytes) {
1200 iov[iovcnt].iov_base = (char *)&msg->any.head + hoff;
1201 iov[iovcnt].iov_len = hbytes - hoff;
1202 nact += hbytes - hoff;
1204 if (iovcnt == DMSG_IOQ_MAXIOVEC)
1207 if (aoff < abytes) {
1208 assert(msg->aux_data != NULL);
1209 iov[iovcnt].iov_base = (char *)msg->aux_data + aoff;
1210 iov[iovcnt].iov_len = abytes - aoff;
1211 nact += abytes - aoff;
1213 if (iovcnt == DMSG_IOQ_MAXIOVEC)
1223 * Encrypt and write the data. The crypto code will move the
1224 * data into the fifo and adjust the iov as necessary. If
1225 * encryption is disabled the iov is left alone.
1227 * May return a smaller iov (thus a smaller n), with aggregated
1228 * chunks. May reduce nmax to what fits in the FIFO.
1230 * This function sets nact to the number of original bytes now
1231 * encrypted, adding to the FIFO some number of bytes that might
1232 * be greater depending on the crypto mechanic. iov[] is adjusted
1233 * to point at the FIFO if necessary.
1235 * NOTE: The return value from the writev() is the post-encrypted
1236 * byte count, not the plaintext count.
1238 if (iocom->flags & DMSG_IOCOMF_CRYPTED) {
1240 * Make sure the FIFO has a reasonable amount of space
1241 * left (if not completely full).
1243 if (ioq->fifo_beg > sizeof(ioq->buf) / 2 &&
1244 sizeof(ioq->buf) - ioq->fifo_end >= DMSG_ALIGN * 2) {
1245 bcopy(ioq->buf + ioq->fifo_beg, ioq->buf,
1246 ioq->fifo_end - ioq->fifo_beg);
1247 ioq->fifo_cdx -= ioq->fifo_beg;
1248 ioq->fifo_cdn -= ioq->fifo_beg;
1249 ioq->fifo_end -= ioq->fifo_beg;
1253 iovcnt = dmsg_crypto_encrypt(iocom, ioq, iov, iovcnt, &nact);
1254 n = writev(iocom->sock_fd, iov, iovcnt);
1259 if (ioq->fifo_beg == ioq->fifo_end) {
1267 n = writev(iocom->sock_fd, iov, iovcnt);
1275 * Clean out the transmit queue based on what we successfully
1276 * sent (nact is the plaintext count). ioq->hbytes/abytes
1277 * represents the portion of the first message previously sent.
1279 while ((msg = TAILQ_FIRST(&ioq->msgq)) != NULL) {
1280 hbytes = (msg->any.head.cmd & DMSGF_SIZE) *
1282 abytes = msg->aux_size;
1284 if ((size_t)nact < hbytes - ioq->hbytes) {
1285 ioq->hbytes += nact;
1289 nact -= hbytes - ioq->hbytes;
1290 ioq->hbytes = hbytes;
1291 if ((size_t)nact < abytes - ioq->abytes) {
1292 ioq->abytes += nact;
1296 nact -= abytes - ioq->abytes;
1298 TAILQ_REMOVE(&ioq->msgq, msg, qentry);
1303 dmsg_state_cleanuptx(msg);
1308 * Process the return value from the write w/regards to blocking.
1311 if (errno != EINTR &&
1312 errno != EINPROGRESS &&
1317 ioq->error = DMSG_IOQ_ERROR_SOCK;
1318 dmsg_iocom_drain(iocom);
1321 * Wait for socket buffer space
1323 iocom->flags |= DMSG_IOCOMF_WREQ;
1326 iocom->flags |= DMSG_IOCOMF_WREQ;
1329 dmsg_iocom_drain(iocom);
1334 * Kill pending msgs on ioq_tx and adjust the flags such that no more
1335 * write events will occur. We don't kill read msgs because we want
1336 * the caller to pull off our contrived terminal error msg to detect
1337 * the connection failure.
1339 * Thread localized, iocom->mtx not held by caller.
1342 dmsg_iocom_drain(dmsg_iocom_t *iocom)
1344 dmsg_ioq_t *ioq = &iocom->ioq_tx;
1347 iocom->flags &= ~(DMSG_IOCOMF_WREQ | DMSG_IOCOMF_WWORK);
1351 while ((msg = TAILQ_FIRST(&ioq->msgq)) != NULL) {
1352 TAILQ_REMOVE(&ioq->msgq, msg, qentry);
1354 dmsg_state_cleanuptx(msg);
1359 * Write a message to an iocom, with additional state processing.
1362 dmsg_msg_write(dmsg_msg_t *msg)
1364 dmsg_iocom_t *iocom = msg->router->iocom;
1365 dmsg_state_t *state;
1369 * Handle state processing, create state if necessary.
1371 pthread_mutex_lock(&iocom->mtx);
1372 if ((state = msg->state) != NULL) {
1374 * Existing transaction (could be reply). It is also
1375 * possible for this to be the first reply (CREATE is set),
1376 * in which case we populate state->txcmd.
1378 * state->txcmd is adjusted to hold the final message cmd,
1379 * and we also be sure to set the CREATE bit here. We did
1380 * not set it in dmsg_msg_alloc() because that would have
1381 * not been serialized (state could have gotten ripped out
1382 * from under the message prior to it being transmitted).
1384 if ((msg->any.head.cmd & (DMSGF_CREATE | DMSGF_REPLY)) ==
1386 state->txcmd = msg->any.head.cmd & ~DMSGF_DELETE;
1388 msg->any.head.msgid = state->msgid;
1389 assert(((state->txcmd ^ msg->any.head.cmd) & DMSGF_REPLY) == 0);
1390 if (msg->any.head.cmd & DMSGF_CREATE)
1391 state->txcmd = msg->any.head.cmd & ~DMSGF_DELETE;
1393 msg->any.head.msgid = 0;
1394 /* XXX set spanid by router */
1396 msg->any.head.source = 0;
1397 msg->any.head.target = msg->router->target;
1400 * Queue it for output, wake up the I/O pthread. Note that the
1401 * I/O thread is responsible for generating the CRCs and encryption.
1403 TAILQ_INSERT_TAIL(&iocom->router->txmsgq, msg, qentry);
1405 write(iocom->wakeupfds[1], &dummy, 1); /* XXX optimize me */
1406 pthread_mutex_unlock(&iocom->mtx);
1410 * This is a shortcut to formulate a reply to msg with a simple error code,
1411 * It can reply to and terminate a transaction, or it can reply to a one-way
1412 * messages. A DMSG_LNK_ERROR command code is utilized to encode
1413 * the error code (which can be 0). Not all transactions are terminated
1414 * with DMSG_LNK_ERROR status (the low level only cares about the
1415 * MSGF_DELETE flag), but most are.
1417 * Replies to one-way messages are a bit of an oxymoron but the feature
1418 * is used by the debug (DBG) protocol.
1420 * The reply contains no extended data.
1423 dmsg_msg_reply(dmsg_msg_t *msg, uint32_t error)
1425 dmsg_iocom_t *iocom = msg->router->iocom;
1426 dmsg_state_t *state = msg->state;
1432 * Reply with a simple error code and terminate the transaction.
1434 cmd = DMSG_LNK_ERROR;
1437 * Check if our direction has even been initiated yet, set CREATE.
1439 * Check what direction this is (command or reply direction). Note
1440 * that txcmd might not have been initiated yet.
1442 * If our direction has already been closed we just return without
1446 if (state->txcmd & DMSGF_DELETE)
1448 if (state->txcmd & DMSGF_REPLY)
1450 cmd |= DMSGF_DELETE;
1452 if ((msg->any.head.cmd & DMSGF_REPLY) == 0)
1457 * Allocate the message and associate it with the existing state.
1458 * We cannot pass MSGF_CREATE to msg_alloc() because that may
1459 * allocate new state. We have our state already.
1461 nmsg = dmsg_msg_alloc(iocom->router, 0, cmd, NULL, NULL);
1463 if ((state->txcmd & DMSGF_CREATE) == 0)
1464 nmsg->any.head.cmd |= DMSGF_CREATE;
1466 nmsg->any.head.error = error;
1467 nmsg->state = state;
1468 dmsg_msg_write(nmsg);
1472 * Similar to dmsg_msg_reply() but leave the transaction open. That is,
1473 * we are generating a streaming reply or an intermediate acknowledgement
1474 * of some sort as part of the higher level protocol, with more to come
1478 dmsg_msg_result(dmsg_msg_t *msg, uint32_t error)
1480 dmsg_iocom_t *iocom = msg->router->iocom;
1481 dmsg_state_t *state = msg->state;
1487 * Reply with a simple error code and terminate the transaction.
1489 cmd = DMSG_LNK_ERROR;
1492 * Check if our direction has even been initiated yet, set CREATE.
1494 * Check what direction this is (command or reply direction). Note
1495 * that txcmd might not have been initiated yet.
1497 * If our direction has already been closed we just return without
1501 if (state->txcmd & DMSGF_DELETE)
1503 if (state->txcmd & DMSGF_REPLY)
1505 /* continuing transaction, do not set MSGF_DELETE */
1507 if ((msg->any.head.cmd & DMSGF_REPLY) == 0)
1511 nmsg = dmsg_msg_alloc(iocom->router, 0, cmd, NULL, NULL);
1513 if ((state->txcmd & DMSGF_CREATE) == 0)
1514 nmsg->any.head.cmd |= DMSGF_CREATE;
1516 nmsg->any.head.error = error;
1517 nmsg->state = state;
1518 dmsg_msg_write(nmsg);
1522 * Terminate a transaction given a state structure by issuing a DELETE.
1525 dmsg_state_reply(dmsg_state_t *state, uint32_t error)
1528 uint32_t cmd = DMSG_LNK_ERROR | DMSGF_DELETE;
1531 * Nothing to do if we already transmitted a delete
1533 if (state->txcmd & DMSGF_DELETE)
1537 * Set REPLY if the other end initiated the command. Otherwise
1538 * we are the command direction.
1540 if (state->txcmd & DMSGF_REPLY)
1543 nmsg = dmsg_msg_alloc(state->iocom->router, 0, cmd, NULL, NULL);
1545 if ((state->txcmd & DMSGF_CREATE) == 0)
1546 nmsg->any.head.cmd |= DMSGF_CREATE;
1548 nmsg->any.head.error = error;
1549 nmsg->state = state;
1550 dmsg_msg_write(nmsg);
1553 /************************************************************************
1554 * TRANSACTION STATE HANDLING *
1555 ************************************************************************
1560 * Process state tracking for a message after reception, prior to
1563 * Called with msglk held and the msg dequeued.
1565 * All messages are called with dummy state and return actual state.
1566 * (One-off messages often just return the same dummy state).
1568 * May request that caller discard the message by setting *discardp to 1.
1569 * The returned state is not used in this case and is allowed to be NULL.
1573 * These routines handle persistent and command/reply message state via the
1574 * CREATE and DELETE flags. The first message in a command or reply sequence
1575 * sets CREATE, the last message in a command or reply sequence sets DELETE.
1577 * There can be any number of intermediate messages belonging to the same
1578 * sequence sent inbetween the CREATE message and the DELETE message,
1579 * which set neither flag. This represents a streaming command or reply.
1581 * Any command message received with CREATE set expects a reply sequence to
1582 * be returned. Reply sequences work the same as command sequences except the
1583 * REPLY bit is also sent. Both the command side and reply side can
1584 * degenerate into a single message with both CREATE and DELETE set. Note
1585 * that one side can be streaming and the other side not, or neither, or both.
1587 * The msgid is unique for the initiator. That is, two sides sending a new
1588 * message can use the same msgid without colliding.
1592 * ABORT sequences work by setting the ABORT flag along with normal message
1593 * state. However, ABORTs can also be sent on half-closed messages, that is
1594 * even if the command or reply side has already sent a DELETE, as long as
1595 * the message has not been fully closed it can still send an ABORT+DELETE
1596 * to terminate the half-closed message state.
1598 * Since ABORT+DELETEs can race we silently discard ABORT's for message
1599 * state which has already been fully closed. REPLY+ABORT+DELETEs can
1600 * also race, and in this situation the other side might have already
1601 * initiated a new unrelated command with the same message id. Since
1602 * the abort has not set the CREATE flag the situation can be detected
1603 * and the message will also be discarded.
1605 * Non-blocking requests can be initiated with ABORT+CREATE[+DELETE].
1606 * The ABORT request is essentially integrated into the command instead
1607 * of being sent later on. In this situation the command implementation
1608 * detects that CREATE and ABORT are both set (vs ABORT alone) and can
1609 * special-case non-blocking operation for the command.
1611 * NOTE! Messages with ABORT set without CREATE or DELETE are considered
1612 * to be mid-stream aborts for command/reply sequences. ABORTs on
1613 * one-way messages are not supported.
1615 * NOTE! If a command sequence does not support aborts the ABORT flag is
1620 * One-off messages (no reply expected) are sent with neither CREATE or DELETE
1621 * set. One-off messages cannot be aborted and typically aren't processed
1622 * by these routines. The REPLY bit can be used to distinguish whether a
1623 * one-off message is a command or reply. For example, one-off replies
1624 * will typically just contain status updates.
1627 dmsg_state_msgrx(dmsg_msg_t *msg)
1629 dmsg_iocom_t *iocom = msg->router->iocom;
1630 dmsg_state_t *state;
1635 * Lock RB tree and locate existing persistent state, if any.
1637 * If received msg is a command state is on staterd_tree.
1638 * If received msg is a reply state is on statewr_tree.
1640 dummy.msgid = msg->any.head.msgid;
1641 pthread_mutex_lock(&iocom->mtx);
1642 if (msg->any.head.cmd & DMSGF_REPLY) {
1643 state = RB_FIND(dmsg_state_tree,
1644 &iocom->router->statewr_tree, &dummy);
1646 state = RB_FIND(dmsg_state_tree,
1647 &iocom->router->staterd_tree, &dummy);
1650 pthread_mutex_unlock(&iocom->mtx);
1653 * Short-cut one-off or mid-stream messages (state may be NULL).
1655 if ((msg->any.head.cmd & (DMSGF_CREATE | DMSGF_DELETE |
1656 DMSGF_ABORT)) == 0) {
1661 * Switch on CREATE, DELETE, REPLY, and also handle ABORT from
1662 * inside the case statements.
1664 switch(msg->any.head.cmd & (DMSGF_CREATE | DMSGF_DELETE |
1667 case DMSGF_CREATE | DMSGF_DELETE:
1669 * New persistant command received.
1672 fprintf(stderr, "duplicate-trans %s\n",
1674 error = DMSG_IOQ_ERROR_TRANS;
1678 state = malloc(sizeof(*state));
1679 bzero(state, sizeof(*state));
1680 state->iocom = iocom;
1681 state->flags = DMSG_STATE_DYNAMIC;
1683 state->txcmd = DMSGF_REPLY;
1684 state->rxcmd = msg->any.head.cmd & ~DMSGF_DELETE;
1685 state->flags |= DMSG_STATE_INSERTED;
1686 state->msgid = msg->any.head.msgid;
1687 state->router = msg->router;
1689 pthread_mutex_lock(&iocom->mtx);
1690 RB_INSERT(dmsg_state_tree,
1691 &iocom->router->staterd_tree, state);
1692 pthread_mutex_unlock(&iocom->mtx);
1695 fprintf(stderr, "create state %p id=%08x on iocom staterd %p\n",
1696 state, (uint32_t)state->msgid, iocom);
1701 * Persistent state is expected but might not exist if an
1702 * ABORT+DELETE races the close.
1704 if (state == NULL) {
1705 if (msg->any.head.cmd & DMSGF_ABORT) {
1706 error = DMSG_IOQ_ERROR_EALREADY;
1708 fprintf(stderr, "missing-state %s\n",
1710 error = DMSG_IOQ_ERROR_TRANS;
1717 * Handle another ABORT+DELETE case if the msgid has already
1720 if ((state->rxcmd & DMSGF_CREATE) == 0) {
1721 if (msg->any.head.cmd & DMSGF_ABORT) {
1722 error = DMSG_IOQ_ERROR_EALREADY;
1724 fprintf(stderr, "reused-state %s\n",
1726 error = DMSG_IOQ_ERROR_TRANS;
1735 * Check for mid-stream ABORT command received, otherwise
1738 if (msg->any.head.cmd & DMSGF_ABORT) {
1739 if (state == NULL ||
1740 (state->rxcmd & DMSGF_CREATE) == 0) {
1741 error = DMSG_IOQ_ERROR_EALREADY;
1747 case DMSGF_REPLY | DMSGF_CREATE:
1748 case DMSGF_REPLY | DMSGF_CREATE | DMSGF_DELETE:
1750 * When receiving a reply with CREATE set the original
1751 * persistent state message should already exist.
1753 if (state == NULL) {
1754 fprintf(stderr, "no-state(r) %s\n",
1756 error = DMSG_IOQ_ERROR_TRANS;
1760 assert(((state->rxcmd ^ msg->any.head.cmd) &
1762 state->rxcmd = msg->any.head.cmd & ~DMSGF_DELETE;
1765 case DMSGF_REPLY | DMSGF_DELETE:
1767 * Received REPLY+ABORT+DELETE in case where msgid has
1768 * already been fully closed, ignore the message.
1770 if (state == NULL) {
1771 if (msg->any.head.cmd & DMSGF_ABORT) {
1772 error = DMSG_IOQ_ERROR_EALREADY;
1774 fprintf(stderr, "no-state(r,d) %s\n",
1776 error = DMSG_IOQ_ERROR_TRANS;
1783 * Received REPLY+ABORT+DELETE in case where msgid has
1784 * already been reused for an unrelated message,
1785 * ignore the message.
1787 if ((state->rxcmd & DMSGF_CREATE) == 0) {
1788 if (msg->any.head.cmd & DMSGF_ABORT) {
1789 error = DMSG_IOQ_ERROR_EALREADY;
1791 fprintf(stderr, "reused-state(r,d) %s\n",
1793 error = DMSG_IOQ_ERROR_TRANS;
1802 * Check for mid-stream ABORT reply received to sent command.
1804 if (msg->any.head.cmd & DMSGF_ABORT) {
1805 if (state == NULL ||
1806 (state->rxcmd & DMSGF_CREATE) == 0) {
1807 error = DMSG_IOQ_ERROR_EALREADY;
1818 dmsg_state_cleanuprx(dmsg_iocom_t *iocom, dmsg_msg_t *msg)
1820 dmsg_state_t *state;
1822 if ((state = msg->state) == NULL) {
1824 * Free a non-transactional message, there is no state
1828 } else if (msg->any.head.cmd & DMSGF_DELETE) {
1830 * Message terminating transaction, destroy the related
1831 * state, the original message, and this message (if it
1832 * isn't the original message due to a CREATE|DELETE).
1834 pthread_mutex_lock(&iocom->mtx);
1835 state->rxcmd |= DMSGF_DELETE;
1836 if (state->txcmd & DMSGF_DELETE) {
1837 if (state->msg == msg)
1839 assert(state->flags & DMSG_STATE_INSERTED);
1840 if (state->rxcmd & DMSGF_REPLY) {
1841 assert(msg->any.head.cmd & DMSGF_REPLY);
1842 RB_REMOVE(dmsg_state_tree,
1843 &iocom->router->statewr_tree, state);
1845 assert((msg->any.head.cmd & DMSGF_REPLY) == 0);
1846 RB_REMOVE(dmsg_state_tree,
1847 &iocom->router->staterd_tree, state);
1849 state->flags &= ~DMSG_STATE_INSERTED;
1850 dmsg_state_free(state);
1854 pthread_mutex_unlock(&iocom->mtx);
1856 } else if (state->msg != msg) {
1858 * Message not terminating transaction, leave state intact
1859 * and free message if it isn't the CREATE message.
1866 dmsg_state_cleanuptx(dmsg_msg_t *msg)
1868 dmsg_iocom_t *iocom = msg->router->iocom;
1869 dmsg_state_t *state;
1871 if ((state = msg->state) == NULL) {
1873 } else if (msg->any.head.cmd & DMSGF_DELETE) {
1874 pthread_mutex_lock(&iocom->mtx);
1875 state->txcmd |= DMSGF_DELETE;
1876 if (state->rxcmd & DMSGF_DELETE) {
1877 if (state->msg == msg)
1879 assert(state->flags & DMSG_STATE_INSERTED);
1880 if (state->txcmd & DMSGF_REPLY) {
1881 assert(msg->any.head.cmd & DMSGF_REPLY);
1882 RB_REMOVE(dmsg_state_tree,
1883 &iocom->router->staterd_tree, state);
1885 assert((msg->any.head.cmd & DMSGF_REPLY) == 0);
1886 RB_REMOVE(dmsg_state_tree,
1887 &iocom->router->statewr_tree, state);
1889 state->flags &= ~DMSG_STATE_INSERTED;
1890 dmsg_state_free(state);
1894 pthread_mutex_unlock(&iocom->mtx);
1896 } else if (state->msg != msg) {
1902 * Called with iocom locked
1905 dmsg_state_free(dmsg_state_t *state)
1907 dmsg_iocom_t *iocom = state->iocom;
1912 fprintf(stderr, "terminate state %p id=%08x\n",
1913 state, (uint32_t)state->msgid);
1915 assert(state->any.any == NULL);
1919 dmsg_msg_free_locked(msg);
1923 * When an iocom error is present we are trying to close down the
1924 * iocom, but we have to wait for all states to terminate before
1925 * we can do so. The iocom rx code will terminate the receive side
1926 * for all transactions by simulating incoming DELETE messages,
1927 * but the state doesn't go away until both sides are terminated.
1929 * We may have to wake up the rx code.
1931 if (iocom->ioq_rx.error &&
1932 RB_EMPTY(&iocom->router->staterd_tree) &&
1933 RB_EMPTY(&iocom->router->statewr_tree)) {
1935 write(iocom->wakeupfds[1], &dummy, 1);
1939 /************************************************************************
1941 ************************************************************************
1943 * Incoming messages are routed by their spanid, matched up against
1944 * outgoing LNK_SPANs managed by h2span_relay structures (see msg_lnk.c).
1945 * Any replies run through the same router.
1947 * Originated messages are routed by their spanid, matched up against
1948 * incoming LNK_SPANs managed by h2span_link structures (see msg_lnk.c).
1949 * Replies come back through the same route.
1951 * Keep in mind that ALL MESSAGE TRAFFIC pertaining to a particular
1952 * transaction runs through the same route. Commands and replies both.
1954 * An originated message will use a different routing spanid to
1955 * reach a target node than a message which originates from that node.
1956 * They might use the same physical pipes (each pipe can have multiple
1957 * SPANs and RELAYs), but the routes are distinct from the perspective
1961 dmsg_router_alloc(void)
1963 dmsg_router_t *router;
1965 router = dmsg_alloc(sizeof(*router));
1966 TAILQ_INIT(&router->txmsgq);
1971 dmsg_router_connect(dmsg_router_t *router)
1975 assert(router->link || router->relay);
1976 assert((router->flags & DMSG_ROUTER_CONNECTED) == 0);
1978 pthread_mutex_lock(&router_mtx);
1980 tmp = RB_INSERT(dmsg_router_tree, &router_ltree, router);
1982 tmp = RB_INSERT(dmsg_router_tree, &router_rtree, router);
1983 assert(tmp == NULL);
1984 router->flags |= DMSG_ROUTER_CONNECTED;
1985 pthread_mutex_unlock(&router_mtx);
1989 dmsg_router_disconnect(dmsg_router_t **routerp)
1991 dmsg_router_t *router;
1994 assert(router->link || router->relay);
1995 assert(router->flags & DMSG_ROUTER_CONNECTED);
1997 pthread_mutex_lock(&router_mtx);
1999 RB_REMOVE(dmsg_router_tree, &router_ltree, router);
2001 RB_REMOVE(dmsg_router_tree, &router_rtree, router);
2002 router->flags &= ~DMSG_ROUTER_CONNECTED;
2004 pthread_mutex_unlock(&router_mtx);
2012 dmsg_route_msg(dmsg_msg_t *msg)
2018 * This swaps endian for a hammer2_msg_hdr. Note that the extended
2019 * header is not adjusted, just the core header.
2022 dmsg_bswap_head(dmsg_hdr_t *head)
2024 head->magic = bswap16(head->magic);
2025 head->reserved02 = bswap16(head->reserved02);
2026 head->salt = bswap32(head->salt);
2028 head->msgid = bswap64(head->msgid);
2029 head->source = bswap64(head->source);
2030 head->target = bswap64(head->target);
2032 head->cmd = bswap32(head->cmd);
2033 head->aux_crc = bswap32(head->aux_crc);
2034 head->aux_bytes = bswap32(head->aux_bytes);
2035 head->error = bswap32(head->error);
2036 head->aux_descr = bswap64(head->aux_descr);
2037 head->reserved38= bswap32(head->reserved38);
2038 head->hdr_crc = bswap32(head->hdr_crc);