2 * Copyright (c) 2011-2012 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 #include "dmsg_local.h"
40 static int dmsg_state_msgrx(dmsg_msg_t *msg);
41 static void dmsg_state_cleanuptx(dmsg_msg_t *msg);
44 * ROUTER TREE - Represents available routes for message routing, indexed
45 * by their spanid. The router structure is embedded in
46 * either an iocom, h2span_link, or h2span_relay (see msg_lnk.c).
49 dmsg_router_cmp(dmsg_router_t *router1, dmsg_router_t *router2)
51 if (router1->target < router2->target)
53 if (router1->target > router2->target)
58 RB_GENERATE(dmsg_router_tree, dmsg_router, rbnode, dmsg_router_cmp);
60 static pthread_mutex_t router_mtx;
61 static struct dmsg_router_tree router_ltree = RB_INITIALIZER(router_ltree);
62 static struct dmsg_router_tree router_rtree = RB_INITIALIZER(router_rtree);
65 * STATE TREE - Represents open transactions which are indexed by their
66 * {router,msgid} relative to the governing iocom.
68 * router is usually iocom->router since state isn't stored
69 * for relayed messages.
72 dmsg_state_cmp(dmsg_state_t *state1, dmsg_state_t *state2)
75 if (state1->router < state2->router)
77 if (state1->router > state2->router)
80 if (state1->msgid < state2->msgid)
82 if (state1->msgid > state2->msgid)
87 RB_GENERATE(dmsg_state_tree, dmsg_state, rbnode, dmsg_state_cmp);
90 * Initialize a low-level ioq
93 dmsg_ioq_init(dmsg_iocom_t *iocom __unused, dmsg_ioq_t *ioq)
95 bzero(ioq, sizeof(*ioq));
96 ioq->state = DMSG_MSGQ_STATE_HEADER1;
97 TAILQ_INIT(&ioq->msgq);
103 * caller holds iocom->mtx.
106 dmsg_ioq_done(dmsg_iocom_t *iocom __unused, dmsg_ioq_t *ioq)
110 while ((msg = TAILQ_FIRST(&ioq->msgq)) != NULL) {
111 assert(0); /* shouldn't happen */
112 TAILQ_REMOVE(&ioq->msgq, msg, qentry);
115 if ((msg = ioq->msg) != NULL) {
122 * Initialize a low-level communications channel.
124 * NOTE: The signal_func() is called at least once from the loop and can be
125 * re-armed via dmsg_iocom_restate().
128 dmsg_iocom_init(dmsg_iocom_t *iocom, int sock_fd, int alt_fd,
129 void (*signal_func)(dmsg_router_t *),
130 void (*rcvmsg_func)(dmsg_msg_t *),
131 void (*altmsg_func)(dmsg_iocom_t *))
135 bzero(iocom, sizeof(*iocom));
137 iocom->router = dmsg_router_alloc();
138 iocom->router->signal_callback = signal_func;
139 iocom->router->rcvmsg_callback = rcvmsg_func;
140 iocom->router->altmsg_callback = altmsg_func;
141 /* we do not call dmsg_router_connect() for iocom routers */
143 pthread_mutex_init(&iocom->mtx, NULL);
144 RB_INIT(&iocom->router->staterd_tree);
145 RB_INIT(&iocom->router->statewr_tree);
146 TAILQ_INIT(&iocom->freeq);
147 TAILQ_INIT(&iocom->freeq_aux);
148 TAILQ_INIT(&iocom->router->txmsgq);
149 iocom->router->iocom = iocom;
150 iocom->sock_fd = sock_fd;
151 iocom->alt_fd = alt_fd;
152 iocom->flags = DMSG_IOCOMF_RREQ;
154 iocom->flags |= DMSG_IOCOMF_SWORK;
155 dmsg_ioq_init(iocom, &iocom->ioq_rx);
156 dmsg_ioq_init(iocom, &iocom->ioq_tx);
157 if (pipe(iocom->wakeupfds) < 0)
159 fcntl(iocom->wakeupfds[0], F_SETFL, O_NONBLOCK);
160 fcntl(iocom->wakeupfds[1], F_SETFL, O_NONBLOCK);
163 * Negotiate session crypto synchronously. This will mark the
164 * connection as error'd if it fails. If this is a pipe it's
165 * a linkage that we set up ourselves to the filesystem and there
168 if (fstat(sock_fd, &st) < 0)
170 if (S_ISSOCK(st.st_mode))
171 dmsg_crypto_negotiate(iocom);
174 * Make sure our fds are set to non-blocking for the iocom core.
177 fcntl(sock_fd, F_SETFL, O_NONBLOCK);
179 /* if line buffered our single fgets() should be fine */
181 fcntl(alt_fd, F_SETFL, O_NONBLOCK);
186 * May only be called from a callback from iocom_core.
188 * Adjust state machine functions, set flags to guarantee that both
189 * the recevmsg_func and the sendmsg_func is called at least once.
192 dmsg_router_restate(dmsg_router_t *router,
193 void (*signal_func)(dmsg_router_t *),
194 void (*rcvmsg_func)(dmsg_msg_t *msg),
195 void (*altmsg_func)(dmsg_iocom_t *))
197 router->signal_callback = signal_func;
198 router->rcvmsg_callback = rcvmsg_func;
199 router->altmsg_callback = altmsg_func;
201 router->iocom->flags |= DMSG_IOCOMF_SWORK;
203 router->iocom->flags &= ~DMSG_IOCOMF_SWORK;
207 dmsg_router_signal(dmsg_router_t *router)
209 if (router->signal_callback)
210 router->iocom->flags |= DMSG_IOCOMF_SWORK;
214 * Cleanup a terminating iocom.
216 * Caller should not hold iocom->mtx. The iocom has already been disconnected
217 * from all possible references to it.
220 dmsg_iocom_done(dmsg_iocom_t *iocom)
224 if (iocom->sock_fd >= 0) {
225 close(iocom->sock_fd);
228 if (iocom->alt_fd >= 0) {
229 close(iocom->alt_fd);
232 dmsg_ioq_done(iocom, &iocom->ioq_rx);
233 dmsg_ioq_done(iocom, &iocom->ioq_tx);
234 if ((msg = TAILQ_FIRST(&iocom->freeq)) != NULL) {
235 TAILQ_REMOVE(&iocom->freeq, msg, qentry);
238 if ((msg = TAILQ_FIRST(&iocom->freeq_aux)) != NULL) {
239 TAILQ_REMOVE(&iocom->freeq_aux, msg, qentry);
241 msg->aux_data = NULL;
244 if (iocom->wakeupfds[0] >= 0) {
245 close(iocom->wakeupfds[0]);
246 iocom->wakeupfds[0] = -1;
248 if (iocom->wakeupfds[1] >= 0) {
249 close(iocom->wakeupfds[1]);
250 iocom->wakeupfds[1] = -1;
252 pthread_mutex_destroy(&iocom->mtx);
256 * Allocate a new one-way message.
259 dmsg_msg_alloc(dmsg_router_t *router, size_t aux_size, uint32_t cmd,
260 void (*func)(dmsg_msg_t *), void *data)
262 dmsg_state_t *state = NULL;
263 dmsg_iocom_t *iocom = router->iocom;
267 pthread_mutex_lock(&iocom->mtx);
269 aux_size = (aux_size + DMSG_ALIGNMASK) &
271 if ((msg = TAILQ_FIRST(&iocom->freeq_aux)) != NULL)
272 TAILQ_REMOVE(&iocom->freeq_aux, msg, qentry);
274 if ((msg = TAILQ_FIRST(&iocom->freeq)) != NULL)
275 TAILQ_REMOVE(&iocom->freeq, msg, qentry);
277 if ((cmd & (DMSGF_CREATE | DMSGF_REPLY)) == DMSGF_CREATE) {
279 * Create state when CREATE is set without REPLY.
281 * NOTE: CREATE in txcmd handled by dmsg_msg_write()
282 * NOTE: DELETE in txcmd handled by dmsg_state_cleanuptx()
284 state = malloc(sizeof(*state));
285 bzero(state, sizeof(*state));
286 state->iocom = iocom;
287 state->flags = DMSG_STATE_DYNAMIC;
288 state->msgid = (uint64_t)(uintptr_t)state;
289 state->router = router;
290 state->txcmd = cmd & ~(DMSGF_CREATE | DMSGF_DELETE);
291 state->rxcmd = DMSGF_REPLY;
293 state->any.any = data;
294 pthread_mutex_lock(&iocom->mtx);
295 RB_INSERT(dmsg_state_tree,
296 &iocom->router->statewr_tree,
298 pthread_mutex_unlock(&iocom->mtx);
299 state->flags |= DMSG_STATE_INSERTED;
301 pthread_mutex_unlock(&iocom->mtx);
303 msg = malloc(sizeof(*msg));
304 bzero(msg, sizeof(*msg));
305 msg->aux_data = NULL;
308 if (msg->aux_size != aux_size) {
311 msg->aux_data = NULL;
315 msg->aux_data = malloc(aux_size);
316 msg->aux_size = aux_size;
319 hbytes = (cmd & DMSGF_SIZE) * DMSG_ALIGN;
321 bzero(&msg->any.head, hbytes);
322 msg->hdr_size = hbytes;
323 msg->any.head.cmd = cmd;
324 msg->any.head.aux_descr = 0;
325 msg->any.head.aux_crc = 0;
326 msg->router = router;
330 msg->any.head.msgid = state->msgid;
336 * Free a message so it can be reused afresh.
338 * NOTE: aux_size can be 0 with a non-NULL aux_data.
342 dmsg_msg_free_locked(dmsg_msg_t *msg)
344 dmsg_iocom_t *iocom = msg->router->iocom;
348 TAILQ_INSERT_TAIL(&iocom->freeq_aux, msg, qentry);
350 TAILQ_INSERT_TAIL(&iocom->freeq, msg, qentry);
354 dmsg_msg_free(dmsg_msg_t *msg)
356 dmsg_iocom_t *iocom = msg->router->iocom;
358 pthread_mutex_lock(&iocom->mtx);
359 dmsg_msg_free_locked(msg);
360 pthread_mutex_unlock(&iocom->mtx);
364 * I/O core loop for an iocom.
366 * Thread localized, iocom->mtx not held.
369 dmsg_iocom_core(dmsg_iocom_t *iocom)
371 struct pollfd fds[3];
376 int wi; /* wakeup pipe */
378 int ai; /* alt bulk path socket */
380 while ((iocom->flags & DMSG_IOCOMF_EOF) == 0) {
381 if ((iocom->flags & (DMSG_IOCOMF_RWORK |
386 DMSG_IOCOMF_AWWORK)) == 0) {
388 * Only poll if no immediate work is pending.
389 * Otherwise we are just wasting our time calling
400 * Always check the inter-thread pipe, e.g.
401 * for iocom->txmsgq work.
404 fds[wi].fd = iocom->wakeupfds[0];
405 fds[wi].events = POLLIN;
409 * Check the socket input/output direction as
412 if (iocom->flags & (DMSG_IOCOMF_RREQ |
415 fds[si].fd = iocom->sock_fd;
419 if (iocom->flags & DMSG_IOCOMF_RREQ)
420 fds[si].events |= POLLIN;
421 if (iocom->flags & DMSG_IOCOMF_WREQ)
422 fds[si].events |= POLLOUT;
426 * Check the alternative fd for work.
428 if (iocom->alt_fd >= 0) {
430 fds[ai].fd = iocom->alt_fd;
431 fds[ai].events = POLLIN;
434 poll(fds, count, timeout);
436 if (wi >= 0 && (fds[wi].revents & POLLIN))
437 iocom->flags |= DMSG_IOCOMF_PWORK;
438 if (si >= 0 && (fds[si].revents & POLLIN))
439 iocom->flags |= DMSG_IOCOMF_RWORK;
440 if (si >= 0 && (fds[si].revents & POLLOUT))
441 iocom->flags |= DMSG_IOCOMF_WWORK;
442 if (wi >= 0 && (fds[wi].revents & POLLOUT))
443 iocom->flags |= DMSG_IOCOMF_WWORK;
444 if (ai >= 0 && (fds[ai].revents & POLLIN))
445 iocom->flags |= DMSG_IOCOMF_ARWORK;
448 * Always check the pipe
450 iocom->flags |= DMSG_IOCOMF_PWORK;
453 if (iocom->flags & DMSG_IOCOMF_SWORK) {
454 iocom->flags &= ~DMSG_IOCOMF_SWORK;
455 iocom->router->signal_callback(iocom->router);
459 * Pending message queues from other threads wake us up
460 * with a write to the wakeupfds[] pipe. We have to clear
461 * the pipe with a dummy read.
463 if (iocom->flags & DMSG_IOCOMF_PWORK) {
464 iocom->flags &= ~DMSG_IOCOMF_PWORK;
465 read(iocom->wakeupfds[0], dummybuf, sizeof(dummybuf));
466 iocom->flags |= DMSG_IOCOMF_RWORK;
467 iocom->flags |= DMSG_IOCOMF_WWORK;
468 if (TAILQ_FIRST(&iocom->router->txmsgq))
469 dmsg_iocom_flush1(iocom);
473 * Message write sequencing
475 if (iocom->flags & DMSG_IOCOMF_WWORK)
476 dmsg_iocom_flush1(iocom);
479 * Message read sequencing. Run this after the write
480 * sequencing in case the write sequencing allowed another
481 * auto-DELETE to occur on the read side.
483 if (iocom->flags & DMSG_IOCOMF_RWORK) {
484 while ((iocom->flags & DMSG_IOCOMF_EOF) == 0 &&
485 (msg = dmsg_ioq_read(iocom)) != NULL) {
487 fprintf(stderr, "receive %s\n",
490 iocom->router->rcvmsg_callback(msg);
491 dmsg_state_cleanuprx(iocom, msg);
495 if (iocom->flags & DMSG_IOCOMF_ARWORK) {
496 iocom->flags &= ~DMSG_IOCOMF_ARWORK;
497 iocom->router->altmsg_callback(iocom);
503 * Make sure there's enough room in the FIFO to hold the
506 * Assume worst case encrypted form is 2x the size of the
507 * plaintext equivalent.
511 dmsg_ioq_makeroom(dmsg_ioq_t *ioq, size_t needed)
516 bytes = ioq->fifo_cdx - ioq->fifo_beg;
517 nmax = sizeof(ioq->buf) - ioq->fifo_end;
518 if (bytes + nmax / 2 < needed) {
520 bcopy(ioq->buf + ioq->fifo_beg,
524 ioq->fifo_cdx -= ioq->fifo_beg;
526 if (ioq->fifo_cdn < ioq->fifo_end) {
527 bcopy(ioq->buf + ioq->fifo_cdn,
528 ioq->buf + ioq->fifo_cdx,
529 ioq->fifo_end - ioq->fifo_cdn);
531 ioq->fifo_end -= ioq->fifo_cdn - ioq->fifo_cdx;
532 ioq->fifo_cdn = ioq->fifo_cdx;
533 nmax = sizeof(ioq->buf) - ioq->fifo_end;
539 * Read the next ready message from the ioq, issuing I/O if needed.
540 * Caller should retry on a read-event when NULL is returned.
542 * If an error occurs during reception a DMSG_LNK_ERROR msg will
543 * be returned for each open transaction, then the ioq and iocom
544 * will be errored out and a non-transactional DMSG_LNK_ERROR
545 * msg will be returned as the final message. The caller should not call
546 * us again after the final message is returned.
548 * Thread localized, iocom->mtx not held.
551 dmsg_ioq_read(dmsg_iocom_t *iocom)
553 dmsg_ioq_t *ioq = &iocom->ioq_rx;
564 iocom->flags &= ~(DMSG_IOCOMF_RREQ | DMSG_IOCOMF_RWORK);
567 * If a message is already pending we can just remove and
568 * return it. Message state has already been processed.
569 * (currently not implemented)
571 if ((msg = TAILQ_FIRST(&ioq->msgq)) != NULL) {
572 TAILQ_REMOVE(&ioq->msgq, msg, qentry);
577 * If the stream is errored out we stop processing it.
583 * Message read in-progress (msg is NULL at the moment). We don't
584 * allocate a msg until we have its core header.
586 nmax = sizeof(ioq->buf) - ioq->fifo_end;
587 bytes = ioq->fifo_cdx - ioq->fifo_beg; /* already decrypted */
591 case DMSG_MSGQ_STATE_HEADER1:
593 * Load the primary header, fail on any non-trivial read
594 * error or on EOF. Since the primary header is the same
595 * size is the message alignment it will never straddle
596 * the end of the buffer.
598 nmax = dmsg_ioq_makeroom(ioq, sizeof(msg->any.head));
599 if (bytes < sizeof(msg->any.head)) {
600 n = read(iocom->sock_fd,
601 ioq->buf + ioq->fifo_end,
605 ioq->error = DMSG_IOQ_ERROR_EOF;
608 if (errno != EINTR &&
609 errno != EINPROGRESS &&
611 ioq->error = DMSG_IOQ_ERROR_SOCK;
617 ioq->fifo_end += (size_t)n;
622 * Decrypt data received so far. Data will be decrypted
623 * in-place but might create gaps in the FIFO. Partial
624 * blocks are not immediately decrypted.
626 * WARNING! The header might be in the wrong endian, we
627 * do not fix it up until we get the entire
630 if (iocom->flags & DMSG_IOCOMF_CRYPTED) {
631 dmsg_crypto_decrypt(iocom, ioq);
633 ioq->fifo_cdx = ioq->fifo_end;
634 ioq->fifo_cdn = ioq->fifo_end;
636 bytes = ioq->fifo_cdx - ioq->fifo_beg;
639 * Insufficient data accumulated (msg is NULL, caller will
643 if (bytes < sizeof(msg->any.head))
647 * Check and fixup the core header. Note that the icrc
648 * has to be calculated before any fixups, but the crc
649 * fields in the msg may have to be swapped like everything
652 head = (void *)(ioq->buf + ioq->fifo_beg);
653 if (head->magic != DMSG_HDR_MAGIC &&
654 head->magic != DMSG_HDR_MAGIC_REV) {
655 ioq->error = DMSG_IOQ_ERROR_SYNC;
660 * Calculate the full header size and aux data size
662 if (head->magic == DMSG_HDR_MAGIC_REV) {
663 ioq->hbytes = (bswap32(head->cmd) & DMSGF_SIZE) *
665 ioq->abytes = bswap32(head->aux_bytes) *
668 ioq->hbytes = (head->cmd & DMSGF_SIZE) *
670 ioq->abytes = head->aux_bytes * DMSG_ALIGN;
672 if (ioq->hbytes < sizeof(msg->any.head) ||
673 ioq->hbytes > sizeof(msg->any) ||
674 ioq->abytes > DMSG_AUX_MAX) {
675 ioq->error = DMSG_IOQ_ERROR_FIELD;
680 * Allocate the message, the next state will fill it in.
682 msg = dmsg_msg_alloc(iocom->router, ioq->abytes, 0,
687 * Fall through to the next state. Make sure that the
688 * extended header does not straddle the end of the buffer.
689 * We still want to issue larger reads into our buffer,
690 * book-keeping is easier if we don't bcopy() yet.
692 * Make sure there is enough room for bloated encrypt data.
694 nmax = dmsg_ioq_makeroom(ioq, ioq->hbytes);
695 ioq->state = DMSG_MSGQ_STATE_HEADER2;
697 case DMSG_MSGQ_STATE_HEADER2:
699 * Fill out the extended header.
702 if (bytes < ioq->hbytes) {
703 n = read(iocom->sock_fd,
704 ioq->buf + ioq->fifo_end,
708 ioq->error = DMSG_IOQ_ERROR_EOF;
711 if (errno != EINTR &&
712 errno != EINPROGRESS &&
714 ioq->error = DMSG_IOQ_ERROR_SOCK;
720 ioq->fifo_end += (size_t)n;
724 if (iocom->flags & DMSG_IOCOMF_CRYPTED) {
725 dmsg_crypto_decrypt(iocom, ioq);
727 ioq->fifo_cdx = ioq->fifo_end;
728 ioq->fifo_cdn = ioq->fifo_end;
730 bytes = ioq->fifo_cdx - ioq->fifo_beg;
733 * Insufficient data accumulated (set msg NULL so caller will
736 if (bytes < ioq->hbytes) {
742 * Calculate the extended header, decrypt data received
743 * so far. Handle endian-conversion for the entire extended
746 head = (void *)(ioq->buf + ioq->fifo_beg);
751 if (head->magic == DMSG_HDR_MAGIC_REV)
752 xcrc32 = bswap32(head->hdr_crc);
754 xcrc32 = head->hdr_crc;
756 if (dmsg_icrc32(head, ioq->hbytes) != xcrc32) {
757 ioq->error = DMSG_IOQ_ERROR_XCRC;
758 fprintf(stderr, "BAD-XCRC(%08x,%08x) %s\n",
759 xcrc32, dmsg_icrc32(head, ioq->hbytes),
764 head->hdr_crc = xcrc32;
766 if (head->magic == DMSG_HDR_MAGIC_REV) {
767 dmsg_bswap_head(head);
771 * Copy the extended header into the msg and adjust the
774 bcopy(head, &msg->any, ioq->hbytes);
777 * We are either done or we fall-through.
779 if (ioq->abytes == 0) {
780 ioq->fifo_beg += ioq->hbytes;
785 * Must adjust bytes (and the state) when falling through.
786 * nmax doesn't change.
788 ioq->fifo_beg += ioq->hbytes;
789 bytes -= ioq->hbytes;
790 ioq->state = DMSG_MSGQ_STATE_AUXDATA1;
792 case DMSG_MSGQ_STATE_AUXDATA1:
794 * Copy the partial or complete payload from remaining
795 * bytes in the FIFO in order to optimize the makeroom call
796 * in the AUXDATA2 state. We have to fall-through either
797 * way so we can check the crc.
799 * msg->aux_size tracks our aux data.
801 if (bytes >= ioq->abytes) {
802 bcopy(ioq->buf + ioq->fifo_beg, msg->aux_data,
804 msg->aux_size = ioq->abytes;
805 ioq->fifo_beg += ioq->abytes;
806 assert(ioq->fifo_beg <= ioq->fifo_cdx);
807 assert(ioq->fifo_cdx <= ioq->fifo_cdn);
808 bytes -= ioq->abytes;
810 bcopy(ioq->buf + ioq->fifo_beg, msg->aux_data,
812 msg->aux_size = bytes;
813 ioq->fifo_beg += bytes;
814 if (ioq->fifo_cdx < ioq->fifo_beg)
815 ioq->fifo_cdx = ioq->fifo_beg;
816 assert(ioq->fifo_beg <= ioq->fifo_cdx);
817 assert(ioq->fifo_cdx <= ioq->fifo_cdn);
822 ioq->state = DMSG_MSGQ_STATE_AUXDATA2;
824 case DMSG_MSGQ_STATE_AUXDATA2:
826 * Make sure there is enough room for more data.
829 nmax = dmsg_ioq_makeroom(ioq, ioq->abytes - msg->aux_size);
832 * Read and decrypt more of the payload.
834 if (msg->aux_size < ioq->abytes) {
836 n = read(iocom->sock_fd,
837 ioq->buf + ioq->fifo_end,
841 ioq->error = DMSG_IOQ_ERROR_EOF;
844 if (errno != EINTR &&
845 errno != EINPROGRESS &&
847 ioq->error = DMSG_IOQ_ERROR_SOCK;
853 ioq->fifo_end += (size_t)n;
857 if (iocom->flags & DMSG_IOCOMF_CRYPTED) {
858 dmsg_crypto_decrypt(iocom, ioq);
860 ioq->fifo_cdx = ioq->fifo_end;
861 ioq->fifo_cdn = ioq->fifo_end;
863 bytes = ioq->fifo_cdx - ioq->fifo_beg;
865 if (bytes > ioq->abytes - msg->aux_size)
866 bytes = ioq->abytes - msg->aux_size;
869 bcopy(ioq->buf + ioq->fifo_beg,
870 msg->aux_data + msg->aux_size,
872 msg->aux_size += bytes;
873 ioq->fifo_beg += bytes;
877 * Insufficient data accumulated (set msg NULL so caller will
880 if (msg->aux_size < ioq->abytes) {
884 assert(msg->aux_size == ioq->abytes);
887 * Check aux_crc, then we are done.
889 xcrc32 = dmsg_icrc32(msg->aux_data, msg->aux_size);
890 if (xcrc32 != msg->any.head.aux_crc) {
891 ioq->error = DMSG_IOQ_ERROR_ACRC;
895 case DMSG_MSGQ_STATE_ERROR:
897 * Continued calls to drain recorded transactions (returning
898 * a LNK_ERROR for each one), before we return the final
905 * We don't double-return errors, the caller should not
906 * have called us again after getting an error msg.
913 * Check the message sequence. The iv[] should prevent any
914 * possibility of a replay but we add this check anyway.
916 if (msg && ioq->error == 0) {
917 if ((msg->any.head.salt & 255) != (ioq->seq & 255)) {
918 ioq->error = DMSG_IOQ_ERROR_MSGSEQ;
925 * Process transactional state for the message.
927 if (msg && ioq->error == 0) {
928 error = dmsg_state_msgrx(msg);
930 if (error == DMSG_IOQ_ERROR_EALREADY) {
939 * Handle error, RREQ, or completion
941 * NOTE: nmax and bytes are invalid at this point, we don't bother
942 * to update them when breaking out.
947 * An unrecoverable error causes all active receive
948 * transactions to be terminated with a LNK_ERROR message.
950 * Once all active transactions are exhausted we set the
951 * iocom ERROR flag and return a non-transactional LNK_ERROR
952 * message, which should cause master processing loops to
955 assert(ioq->msg == msg);
962 * No more I/O read processing
964 ioq->state = DMSG_MSGQ_STATE_ERROR;
967 * Simulate a remote LNK_ERROR DELETE msg for any open
968 * transactions, ending with a final non-transactional
969 * LNK_ERROR (that the session can detect) when no
970 * transactions remain.
972 msg = dmsg_msg_alloc(iocom->router, 0, 0, NULL, NULL);
973 bzero(&msg->any.head, sizeof(msg->any.head));
974 msg->any.head.magic = DMSG_HDR_MAGIC;
975 msg->any.head.cmd = DMSG_LNK_ERROR;
976 msg->any.head.error = ioq->error;
978 pthread_mutex_lock(&iocom->mtx);
979 dmsg_iocom_drain(iocom);
980 if ((state = RB_ROOT(&iocom->router->staterd_tree)) != NULL) {
982 * Active remote transactions are still present.
983 * Simulate the other end sending us a DELETE.
985 if (state->rxcmd & DMSGF_DELETE) {
989 /*state->txcmd |= DMSGF_DELETE;*/
991 msg->router = state->router;
992 msg->any.head.msgid = state->msgid;
993 msg->any.head.cmd |= DMSGF_ABORT |
996 } else if ((state = RB_ROOT(&iocom->router->statewr_tree)) !=
999 * Active local transactions are still present.
1000 * Simulate the other end sending us a DELETE.
1002 if (state->rxcmd & DMSGF_DELETE) {
1007 msg->router = state->router;
1008 msg->any.head.msgid = state->msgid;
1009 msg->any.head.cmd |= DMSGF_ABORT |
1012 if ((state->rxcmd & DMSGF_CREATE) == 0) {
1013 msg->any.head.cmd |=
1019 * No active local or remote transactions remain.
1020 * Generate a final LNK_ERROR and flag EOF.
1023 iocom->flags |= DMSG_IOCOMF_EOF;
1024 fprintf(stderr, "EOF ON SOCKET %d\n", iocom->sock_fd);
1026 pthread_mutex_unlock(&iocom->mtx);
1029 * For the iocom error case we want to set RWORK to indicate
1030 * that more messages might be pending.
1032 * It is possible to return NULL when there is more work to
1033 * do because each message has to be DELETEd in both
1034 * directions before we continue on with the next (though
1035 * this could be optimized). The transmit direction will
1039 iocom->flags |= DMSG_IOCOMF_RWORK;
1040 } else if (msg == NULL) {
1042 * Insufficient data received to finish building the message,
1043 * set RREQ and return NULL.
1045 * Leave ioq->msg intact.
1046 * Leave the FIFO intact.
1048 iocom->flags |= DMSG_IOCOMF_RREQ;
1053 * The fifo has already been advanced past the message.
1054 * Trivially reset the FIFO indices if possible.
1056 * clear the FIFO if it is now empty and set RREQ to wait
1057 * for more from the socket. If the FIFO is not empty set
1058 * TWORK to bypass the poll so we loop immediately.
1060 if (ioq->fifo_beg == ioq->fifo_cdx &&
1061 ioq->fifo_cdn == ioq->fifo_end) {
1062 iocom->flags |= DMSG_IOCOMF_RREQ;
1068 iocom->flags |= DMSG_IOCOMF_RWORK;
1070 ioq->state = DMSG_MSGQ_STATE_HEADER1;
1077 * Calculate the header and data crc's and write a low-level message to
1078 * the connection. If aux_crc is non-zero the aux_data crc is already
1079 * assumed to have been set.
1081 * A non-NULL msg is added to the queue but not necessarily flushed.
1082 * Calling this function with msg == NULL will get a flush going.
1084 * Caller must hold iocom->mtx.
1087 dmsg_iocom_flush1(dmsg_iocom_t *iocom)
1089 dmsg_ioq_t *ioq = &iocom->ioq_tx;
1093 dmsg_msg_queue_t tmpq;
1095 iocom->flags &= ~(DMSG_IOCOMF_WREQ | DMSG_IOCOMF_WWORK);
1097 pthread_mutex_lock(&iocom->mtx);
1098 while ((msg = TAILQ_FIRST(&iocom->router->txmsgq)) != NULL) {
1099 TAILQ_REMOVE(&iocom->router->txmsgq, msg, qentry);
1100 TAILQ_INSERT_TAIL(&tmpq, msg, qentry);
1102 pthread_mutex_unlock(&iocom->mtx);
1104 while ((msg = TAILQ_FIRST(&tmpq)) != NULL) {
1106 * Process terminal connection errors.
1108 TAILQ_REMOVE(&tmpq, msg, qentry);
1110 TAILQ_INSERT_TAIL(&ioq->msgq, msg, qentry);
1116 * Finish populating the msg fields. The salt ensures that
1117 * the iv[] array is ridiculously randomized and we also
1118 * re-seed our PRNG every 32768 messages just to be sure.
1120 msg->any.head.magic = DMSG_HDR_MAGIC;
1121 msg->any.head.salt = (random() << 8) | (ioq->seq & 255);
1123 if ((ioq->seq & 32767) == 0)
1127 * Calculate aux_crc if 0, then calculate hdr_crc.
1129 if (msg->aux_size && msg->any.head.aux_crc == 0) {
1130 assert((msg->aux_size & DMSG_ALIGNMASK) == 0);
1131 xcrc32 = dmsg_icrc32(msg->aux_data, msg->aux_size);
1132 msg->any.head.aux_crc = xcrc32;
1134 msg->any.head.aux_bytes = msg->aux_size / DMSG_ALIGN;
1135 assert((msg->aux_size & DMSG_ALIGNMASK) == 0);
1137 hbytes = (msg->any.head.cmd & DMSGF_SIZE) *
1139 msg->any.head.hdr_crc = 0;
1140 msg->any.head.hdr_crc = dmsg_icrc32(&msg->any.head, hbytes);
1143 * Enqueue the message (the flush codes handles stream
1146 TAILQ_INSERT_TAIL(&ioq->msgq, msg, qentry);
1149 dmsg_iocom_flush2(iocom);
1153 * Thread localized, iocom->mtx not held by caller.
1156 dmsg_iocom_flush2(dmsg_iocom_t *iocom)
1158 dmsg_ioq_t *ioq = &iocom->ioq_tx;
1161 struct iovec iov[DMSG_IOQ_MAXIOVEC];
1170 dmsg_iocom_drain(iocom);
1175 * Pump messages out the connection by building an iovec.
1177 * ioq->hbytes/ioq->abytes tracks how much of the first message
1178 * in the queue has been successfully written out, so we can
1186 TAILQ_FOREACH(msg, &ioq->msgq, qentry) {
1187 hbytes = (msg->any.head.cmd & DMSGF_SIZE) *
1189 abytes = msg->aux_size;
1190 assert(hoff <= hbytes && aoff <= abytes);
1192 if (hoff < hbytes) {
1193 iov[iovcnt].iov_base = (char *)&msg->any.head + hoff;
1194 iov[iovcnt].iov_len = hbytes - hoff;
1195 nact += hbytes - hoff;
1197 if (iovcnt == DMSG_IOQ_MAXIOVEC)
1200 if (aoff < abytes) {
1201 assert(msg->aux_data != NULL);
1202 iov[iovcnt].iov_base = (char *)msg->aux_data + aoff;
1203 iov[iovcnt].iov_len = abytes - aoff;
1204 nact += abytes - aoff;
1206 if (iovcnt == DMSG_IOQ_MAXIOVEC)
1216 * Encrypt and write the data. The crypto code will move the
1217 * data into the fifo and adjust the iov as necessary. If
1218 * encryption is disabled the iov is left alone.
1220 * May return a smaller iov (thus a smaller n), with aggregated
1221 * chunks. May reduce nmax to what fits in the FIFO.
1223 * This function sets nact to the number of original bytes now
1224 * encrypted, adding to the FIFO some number of bytes that might
1225 * be greater depending on the crypto mechanic. iov[] is adjusted
1226 * to point at the FIFO if necessary.
1228 * NOTE: The return value from the writev() is the post-encrypted
1229 * byte count, not the plaintext count.
1231 if (iocom->flags & DMSG_IOCOMF_CRYPTED) {
1233 * Make sure the FIFO has a reasonable amount of space
1234 * left (if not completely full).
1236 if (ioq->fifo_beg > sizeof(ioq->buf) / 2 &&
1237 sizeof(ioq->buf) - ioq->fifo_end >= DMSG_ALIGN * 2) {
1238 bcopy(ioq->buf + ioq->fifo_beg, ioq->buf,
1239 ioq->fifo_end - ioq->fifo_beg);
1240 ioq->fifo_cdx -= ioq->fifo_beg;
1241 ioq->fifo_cdn -= ioq->fifo_beg;
1242 ioq->fifo_end -= ioq->fifo_beg;
1246 iovcnt = dmsg_crypto_encrypt(iocom, ioq, iov, iovcnt, &nact);
1247 n = writev(iocom->sock_fd, iov, iovcnt);
1252 if (ioq->fifo_beg == ioq->fifo_end) {
1260 n = writev(iocom->sock_fd, iov, iovcnt);
1268 * Clean out the transmit queue based on what we successfully
1269 * sent (nact is the plaintext count). ioq->hbytes/abytes
1270 * represents the portion of the first message previously sent.
1272 while ((msg = TAILQ_FIRST(&ioq->msgq)) != NULL) {
1273 hbytes = (msg->any.head.cmd & DMSGF_SIZE) *
1275 abytes = msg->aux_size;
1277 if ((size_t)nact < hbytes - ioq->hbytes) {
1278 ioq->hbytes += nact;
1282 nact -= hbytes - ioq->hbytes;
1283 ioq->hbytes = hbytes;
1284 if ((size_t)nact < abytes - ioq->abytes) {
1285 ioq->abytes += nact;
1289 nact -= abytes - ioq->abytes;
1291 TAILQ_REMOVE(&ioq->msgq, msg, qentry);
1296 dmsg_state_cleanuptx(msg);
1301 * Process the return value from the write w/regards to blocking.
1304 if (errno != EINTR &&
1305 errno != EINPROGRESS &&
1310 ioq->error = DMSG_IOQ_ERROR_SOCK;
1311 dmsg_iocom_drain(iocom);
1314 * Wait for socket buffer space
1316 iocom->flags |= DMSG_IOCOMF_WREQ;
1319 iocom->flags |= DMSG_IOCOMF_WREQ;
1322 dmsg_iocom_drain(iocom);
1327 * Kill pending msgs on ioq_tx and adjust the flags such that no more
1328 * write events will occur. We don't kill read msgs because we want
1329 * the caller to pull off our contrived terminal error msg to detect
1330 * the connection failure.
1332 * Thread localized, iocom->mtx not held by caller.
1335 dmsg_iocom_drain(dmsg_iocom_t *iocom)
1337 dmsg_ioq_t *ioq = &iocom->ioq_tx;
1340 iocom->flags &= ~(DMSG_IOCOMF_WREQ | DMSG_IOCOMF_WWORK);
1344 while ((msg = TAILQ_FIRST(&ioq->msgq)) != NULL) {
1345 TAILQ_REMOVE(&ioq->msgq, msg, qentry);
1347 dmsg_state_cleanuptx(msg);
1352 * Write a message to an iocom, with additional state processing.
1355 dmsg_msg_write(dmsg_msg_t *msg)
1357 dmsg_iocom_t *iocom = msg->router->iocom;
1358 dmsg_state_t *state;
1362 * Handle state processing, create state if necessary.
1364 pthread_mutex_lock(&iocom->mtx);
1365 if ((state = msg->state) != NULL) {
1367 * Existing transaction (could be reply). It is also
1368 * possible for this to be the first reply (CREATE is set),
1369 * in which case we populate state->txcmd.
1371 * state->txcmd is adjusted to hold the final message cmd,
1372 * and we also be sure to set the CREATE bit here. We did
1373 * not set it in dmsg_msg_alloc() because that would have
1374 * not been serialized (state could have gotten ripped out
1375 * from under the message prior to it being transmitted).
1377 if ((msg->any.head.cmd & (DMSGF_CREATE | DMSGF_REPLY)) ==
1379 state->txcmd = msg->any.head.cmd & ~DMSGF_DELETE;
1381 msg->any.head.msgid = state->msgid;
1382 assert(((state->txcmd ^ msg->any.head.cmd) & DMSGF_REPLY) == 0);
1383 if (msg->any.head.cmd & DMSGF_CREATE)
1384 state->txcmd = msg->any.head.cmd & ~DMSGF_DELETE;
1386 msg->any.head.msgid = 0;
1387 /* XXX set spanid by router */
1389 msg->any.head.source = 0;
1390 msg->any.head.target = msg->router->target;
1393 * Queue it for output, wake up the I/O pthread. Note that the
1394 * I/O thread is responsible for generating the CRCs and encryption.
1396 TAILQ_INSERT_TAIL(&iocom->router->txmsgq, msg, qentry);
1398 write(iocom->wakeupfds[1], &dummy, 1); /* XXX optimize me */
1399 pthread_mutex_unlock(&iocom->mtx);
1403 * This is a shortcut to formulate a reply to msg with a simple error code,
1404 * It can reply to and terminate a transaction, or it can reply to a one-way
1405 * messages. A DMSG_LNK_ERROR command code is utilized to encode
1406 * the error code (which can be 0). Not all transactions are terminated
1407 * with DMSG_LNK_ERROR status (the low level only cares about the
1408 * MSGF_DELETE flag), but most are.
1410 * Replies to one-way messages are a bit of an oxymoron but the feature
1411 * is used by the debug (DBG) protocol.
1413 * The reply contains no extended data.
1416 dmsg_msg_reply(dmsg_msg_t *msg, uint32_t error)
1418 dmsg_iocom_t *iocom = msg->router->iocom;
1419 dmsg_state_t *state = msg->state;
1425 * Reply with a simple error code and terminate the transaction.
1427 cmd = DMSG_LNK_ERROR;
1430 * Check if our direction has even been initiated yet, set CREATE.
1432 * Check what direction this is (command or reply direction). Note
1433 * that txcmd might not have been initiated yet.
1435 * If our direction has already been closed we just return without
1439 if (state->txcmd & DMSGF_DELETE)
1441 if (state->txcmd & DMSGF_REPLY)
1443 cmd |= DMSGF_DELETE;
1445 if ((msg->any.head.cmd & DMSGF_REPLY) == 0)
1450 * Allocate the message and associate it with the existing state.
1451 * We cannot pass MSGF_CREATE to msg_alloc() because that may
1452 * allocate new state. We have our state already.
1454 nmsg = dmsg_msg_alloc(iocom->router, 0, cmd, NULL, NULL);
1456 if ((state->txcmd & DMSGF_CREATE) == 0)
1457 nmsg->any.head.cmd |= DMSGF_CREATE;
1459 nmsg->any.head.error = error;
1460 nmsg->state = state;
1461 dmsg_msg_write(nmsg);
1465 * Similar to dmsg_msg_reply() but leave the transaction open. That is,
1466 * we are generating a streaming reply or an intermediate acknowledgement
1467 * of some sort as part of the higher level protocol, with more to come
1471 dmsg_msg_result(dmsg_msg_t *msg, uint32_t error)
1473 dmsg_iocom_t *iocom = msg->router->iocom;
1474 dmsg_state_t *state = msg->state;
1480 * Reply with a simple error code and terminate the transaction.
1482 cmd = DMSG_LNK_ERROR;
1485 * Check if our direction has even been initiated yet, set CREATE.
1487 * Check what direction this is (command or reply direction). Note
1488 * that txcmd might not have been initiated yet.
1490 * If our direction has already been closed we just return without
1494 if (state->txcmd & DMSGF_DELETE)
1496 if (state->txcmd & DMSGF_REPLY)
1498 /* continuing transaction, do not set MSGF_DELETE */
1500 if ((msg->any.head.cmd & DMSGF_REPLY) == 0)
1504 nmsg = dmsg_msg_alloc(iocom->router, 0, cmd, NULL, NULL);
1506 if ((state->txcmd & DMSGF_CREATE) == 0)
1507 nmsg->any.head.cmd |= DMSGF_CREATE;
1509 nmsg->any.head.error = error;
1510 nmsg->state = state;
1511 dmsg_msg_write(nmsg);
1515 * Terminate a transaction given a state structure by issuing a DELETE.
1518 dmsg_state_reply(dmsg_state_t *state, uint32_t error)
1521 uint32_t cmd = DMSG_LNK_ERROR | DMSGF_DELETE;
1524 * Nothing to do if we already transmitted a delete
1526 if (state->txcmd & DMSGF_DELETE)
1530 * Set REPLY if the other end initiated the command. Otherwise
1531 * we are the command direction.
1533 if (state->txcmd & DMSGF_REPLY)
1536 nmsg = dmsg_msg_alloc(state->iocom->router, 0, cmd, NULL, NULL);
1538 if ((state->txcmd & DMSGF_CREATE) == 0)
1539 nmsg->any.head.cmd |= DMSGF_CREATE;
1541 nmsg->any.head.error = error;
1542 nmsg->state = state;
1543 dmsg_msg_write(nmsg);
1546 /************************************************************************
1547 * TRANSACTION STATE HANDLING *
1548 ************************************************************************
1553 * Process state tracking for a message after reception, prior to
1556 * Called with msglk held and the msg dequeued.
1558 * All messages are called with dummy state and return actual state.
1559 * (One-off messages often just return the same dummy state).
1561 * May request that caller discard the message by setting *discardp to 1.
1562 * The returned state is not used in this case and is allowed to be NULL.
1566 * These routines handle persistent and command/reply message state via the
1567 * CREATE and DELETE flags. The first message in a command or reply sequence
1568 * sets CREATE, the last message in a command or reply sequence sets DELETE.
1570 * There can be any number of intermediate messages belonging to the same
1571 * sequence sent inbetween the CREATE message and the DELETE message,
1572 * which set neither flag. This represents a streaming command or reply.
1574 * Any command message received with CREATE set expects a reply sequence to
1575 * be returned. Reply sequences work the same as command sequences except the
1576 * REPLY bit is also sent. Both the command side and reply side can
1577 * degenerate into a single message with both CREATE and DELETE set. Note
1578 * that one side can be streaming and the other side not, or neither, or both.
1580 * The msgid is unique for the initiator. That is, two sides sending a new
1581 * message can use the same msgid without colliding.
1585 * ABORT sequences work by setting the ABORT flag along with normal message
1586 * state. However, ABORTs can also be sent on half-closed messages, that is
1587 * even if the command or reply side has already sent a DELETE, as long as
1588 * the message has not been fully closed it can still send an ABORT+DELETE
1589 * to terminate the half-closed message state.
1591 * Since ABORT+DELETEs can race we silently discard ABORT's for message
1592 * state which has already been fully closed. REPLY+ABORT+DELETEs can
1593 * also race, and in this situation the other side might have already
1594 * initiated a new unrelated command with the same message id. Since
1595 * the abort has not set the CREATE flag the situation can be detected
1596 * and the message will also be discarded.
1598 * Non-blocking requests can be initiated with ABORT+CREATE[+DELETE].
1599 * The ABORT request is essentially integrated into the command instead
1600 * of being sent later on. In this situation the command implementation
1601 * detects that CREATE and ABORT are both set (vs ABORT alone) and can
1602 * special-case non-blocking operation for the command.
1604 * NOTE! Messages with ABORT set without CREATE or DELETE are considered
1605 * to be mid-stream aborts for command/reply sequences. ABORTs on
1606 * one-way messages are not supported.
1608 * NOTE! If a command sequence does not support aborts the ABORT flag is
1613 * One-off messages (no reply expected) are sent with neither CREATE or DELETE
1614 * set. One-off messages cannot be aborted and typically aren't processed
1615 * by these routines. The REPLY bit can be used to distinguish whether a
1616 * one-off message is a command or reply. For example, one-off replies
1617 * will typically just contain status updates.
1620 dmsg_state_msgrx(dmsg_msg_t *msg)
1622 dmsg_iocom_t *iocom = msg->router->iocom;
1623 dmsg_state_t *state;
1628 * Lock RB tree and locate existing persistent state, if any.
1630 * If received msg is a command state is on staterd_tree.
1631 * If received msg is a reply state is on statewr_tree.
1633 dummy.msgid = msg->any.head.msgid;
1634 pthread_mutex_lock(&iocom->mtx);
1635 if (msg->any.head.cmd & DMSGF_REPLY) {
1636 state = RB_FIND(dmsg_state_tree,
1637 &iocom->router->statewr_tree, &dummy);
1639 state = RB_FIND(dmsg_state_tree,
1640 &iocom->router->staterd_tree, &dummy);
1643 pthread_mutex_unlock(&iocom->mtx);
1646 * Short-cut one-off or mid-stream messages (state may be NULL).
1648 if ((msg->any.head.cmd & (DMSGF_CREATE | DMSGF_DELETE |
1649 DMSGF_ABORT)) == 0) {
1654 * Switch on CREATE, DELETE, REPLY, and also handle ABORT from
1655 * inside the case statements.
1657 switch(msg->any.head.cmd & (DMSGF_CREATE | DMSGF_DELETE |
1660 case DMSGF_CREATE | DMSGF_DELETE:
1662 * New persistant command received.
1665 fprintf(stderr, "duplicate-trans %s\n",
1667 error = DMSG_IOQ_ERROR_TRANS;
1671 state = malloc(sizeof(*state));
1672 bzero(state, sizeof(*state));
1673 state->iocom = iocom;
1674 state->flags = DMSG_STATE_DYNAMIC;
1676 state->txcmd = DMSGF_REPLY;
1677 state->rxcmd = msg->any.head.cmd & ~DMSGF_DELETE;
1678 state->flags |= DMSG_STATE_INSERTED;
1679 state->msgid = msg->any.head.msgid;
1680 state->router = msg->router;
1682 pthread_mutex_lock(&iocom->mtx);
1683 RB_INSERT(dmsg_state_tree,
1684 &iocom->router->staterd_tree, state);
1685 pthread_mutex_unlock(&iocom->mtx);
1688 fprintf(stderr, "create state %p id=%08x on iocom staterd %p\n",
1689 state, (uint32_t)state->msgid, iocom);
1694 * Persistent state is expected but might not exist if an
1695 * ABORT+DELETE races the close.
1697 if (state == NULL) {
1698 if (msg->any.head.cmd & DMSGF_ABORT) {
1699 error = DMSG_IOQ_ERROR_EALREADY;
1701 fprintf(stderr, "missing-state %s\n",
1703 error = DMSG_IOQ_ERROR_TRANS;
1710 * Handle another ABORT+DELETE case if the msgid has already
1713 if ((state->rxcmd & DMSGF_CREATE) == 0) {
1714 if (msg->any.head.cmd & DMSGF_ABORT) {
1715 error = DMSG_IOQ_ERROR_EALREADY;
1717 fprintf(stderr, "reused-state %s\n",
1719 error = DMSG_IOQ_ERROR_TRANS;
1728 * Check for mid-stream ABORT command received, otherwise
1731 if (msg->any.head.cmd & DMSGF_ABORT) {
1732 if (state == NULL ||
1733 (state->rxcmd & DMSGF_CREATE) == 0) {
1734 error = DMSG_IOQ_ERROR_EALREADY;
1740 case DMSGF_REPLY | DMSGF_CREATE:
1741 case DMSGF_REPLY | DMSGF_CREATE | DMSGF_DELETE:
1743 * When receiving a reply with CREATE set the original
1744 * persistent state message should already exist.
1746 if (state == NULL) {
1747 fprintf(stderr, "no-state(r) %s\n",
1749 error = DMSG_IOQ_ERROR_TRANS;
1753 assert(((state->rxcmd ^ msg->any.head.cmd) &
1755 state->rxcmd = msg->any.head.cmd & ~DMSGF_DELETE;
1758 case DMSGF_REPLY | DMSGF_DELETE:
1760 * Received REPLY+ABORT+DELETE in case where msgid has
1761 * already been fully closed, ignore the message.
1763 if (state == NULL) {
1764 if (msg->any.head.cmd & DMSGF_ABORT) {
1765 error = DMSG_IOQ_ERROR_EALREADY;
1767 fprintf(stderr, "no-state(r,d) %s\n",
1769 error = DMSG_IOQ_ERROR_TRANS;
1776 * Received REPLY+ABORT+DELETE in case where msgid has
1777 * already been reused for an unrelated message,
1778 * ignore the message.
1780 if ((state->rxcmd & DMSGF_CREATE) == 0) {
1781 if (msg->any.head.cmd & DMSGF_ABORT) {
1782 error = DMSG_IOQ_ERROR_EALREADY;
1784 fprintf(stderr, "reused-state(r,d) %s\n",
1786 error = DMSG_IOQ_ERROR_TRANS;
1795 * Check for mid-stream ABORT reply received to sent command.
1797 if (msg->any.head.cmd & DMSGF_ABORT) {
1798 if (state == NULL ||
1799 (state->rxcmd & DMSGF_CREATE) == 0) {
1800 error = DMSG_IOQ_ERROR_EALREADY;
1811 dmsg_state_cleanuprx(dmsg_iocom_t *iocom, dmsg_msg_t *msg)
1813 dmsg_state_t *state;
1815 if ((state = msg->state) == NULL) {
1817 * Free a non-transactional message, there is no state
1821 } else if (msg->any.head.cmd & DMSGF_DELETE) {
1823 * Message terminating transaction, destroy the related
1824 * state, the original message, and this message (if it
1825 * isn't the original message due to a CREATE|DELETE).
1827 pthread_mutex_lock(&iocom->mtx);
1828 state->rxcmd |= DMSGF_DELETE;
1829 if (state->txcmd & DMSGF_DELETE) {
1830 if (state->msg == msg)
1832 assert(state->flags & DMSG_STATE_INSERTED);
1833 if (state->rxcmd & DMSGF_REPLY) {
1834 assert(msg->any.head.cmd & DMSGF_REPLY);
1835 RB_REMOVE(dmsg_state_tree,
1836 &iocom->router->statewr_tree, state);
1838 assert((msg->any.head.cmd & DMSGF_REPLY) == 0);
1839 RB_REMOVE(dmsg_state_tree,
1840 &iocom->router->staterd_tree, state);
1842 state->flags &= ~DMSG_STATE_INSERTED;
1843 dmsg_state_free(state);
1847 pthread_mutex_unlock(&iocom->mtx);
1849 } else if (state->msg != msg) {
1851 * Message not terminating transaction, leave state intact
1852 * and free message if it isn't the CREATE message.
1859 dmsg_state_cleanuptx(dmsg_msg_t *msg)
1861 dmsg_iocom_t *iocom = msg->router->iocom;
1862 dmsg_state_t *state;
1864 if ((state = msg->state) == NULL) {
1866 } else if (msg->any.head.cmd & DMSGF_DELETE) {
1867 pthread_mutex_lock(&iocom->mtx);
1868 state->txcmd |= DMSGF_DELETE;
1869 if (state->rxcmd & DMSGF_DELETE) {
1870 if (state->msg == msg)
1872 assert(state->flags & DMSG_STATE_INSERTED);
1873 if (state->txcmd & DMSGF_REPLY) {
1874 assert(msg->any.head.cmd & DMSGF_REPLY);
1875 RB_REMOVE(dmsg_state_tree,
1876 &iocom->router->staterd_tree, state);
1878 assert((msg->any.head.cmd & DMSGF_REPLY) == 0);
1879 RB_REMOVE(dmsg_state_tree,
1880 &iocom->router->statewr_tree, state);
1882 state->flags &= ~DMSG_STATE_INSERTED;
1883 dmsg_state_free(state);
1887 pthread_mutex_unlock(&iocom->mtx);
1889 } else if (state->msg != msg) {
1895 * Called with iocom locked
1898 dmsg_state_free(dmsg_state_t *state)
1900 dmsg_iocom_t *iocom = state->iocom;
1905 fprintf(stderr, "terminate state %p id=%08x\n",
1906 state, (uint32_t)state->msgid);
1908 assert(state->any.any == NULL);
1912 dmsg_msg_free_locked(msg);
1916 * When an iocom error is present we are trying to close down the
1917 * iocom, but we have to wait for all states to terminate before
1918 * we can do so. The iocom rx code will terminate the receive side
1919 * for all transactions by simulating incoming DELETE messages,
1920 * but the state doesn't go away until both sides are terminated.
1922 * We may have to wake up the rx code.
1924 if (iocom->ioq_rx.error &&
1925 RB_EMPTY(&iocom->router->staterd_tree) &&
1926 RB_EMPTY(&iocom->router->statewr_tree)) {
1928 write(iocom->wakeupfds[1], &dummy, 1);
1932 /************************************************************************
1934 ************************************************************************
1936 * Incoming messages are routed by their spanid, matched up against
1937 * outgoing LNK_SPANs managed by h2span_relay structures (see msg_lnk.c).
1938 * Any replies run through the same router.
1940 * Originated messages are routed by their spanid, matched up against
1941 * incoming LNK_SPANs managed by h2span_link structures (see msg_lnk.c).
1942 * Replies come back through the same route.
1944 * Keep in mind that ALL MESSAGE TRAFFIC pertaining to a particular
1945 * transaction runs through the same route. Commands and replies both.
1947 * An originated message will use a different routing spanid to
1948 * reach a target node than a message which originates from that node.
1949 * They might use the same physical pipes (each pipe can have multiple
1950 * SPANs and RELAYs), but the routes are distinct from the perspective
1954 dmsg_router_alloc(void)
1956 dmsg_router_t *router;
1958 router = dmsg_alloc(sizeof(*router));
1959 TAILQ_INIT(&router->txmsgq);
1964 dmsg_router_connect(dmsg_router_t *router)
1968 assert(router->link || router->relay);
1969 assert((router->flags & DMSG_ROUTER_CONNECTED) == 0);
1971 pthread_mutex_lock(&router_mtx);
1973 tmp = RB_INSERT(dmsg_router_tree, &router_ltree, router);
1975 tmp = RB_INSERT(dmsg_router_tree, &router_rtree, router);
1976 assert(tmp == NULL);
1977 router->flags |= DMSG_ROUTER_CONNECTED;
1978 pthread_mutex_unlock(&router_mtx);
1982 dmsg_router_disconnect(dmsg_router_t **routerp)
1984 dmsg_router_t *router;
1987 assert(router->link || router->relay);
1988 assert(router->flags & DMSG_ROUTER_CONNECTED);
1990 pthread_mutex_lock(&router_mtx);
1992 RB_REMOVE(dmsg_router_tree, &router_ltree, router);
1994 RB_REMOVE(dmsg_router_tree, &router_rtree, router);
1995 router->flags &= ~DMSG_ROUTER_CONNECTED;
1997 pthread_mutex_unlock(&router_mtx);
2005 dmsg_route_msg(dmsg_msg_t *msg)
2011 * This swaps endian for a hammer2_msg_hdr. Note that the extended
2012 * header is not adjusted, just the core header.
2015 dmsg_bswap_head(dmsg_hdr_t *head)
2017 head->magic = bswap16(head->magic);
2018 head->reserved02 = bswap16(head->reserved02);
2019 head->salt = bswap32(head->salt);
2021 head->msgid = bswap64(head->msgid);
2022 head->source = bswap64(head->source);
2023 head->target = bswap64(head->target);
2025 head->cmd = bswap32(head->cmd);
2026 head->aux_crc = bswap32(head->aux_crc);
2027 head->aux_bytes = bswap32(head->aux_bytes);
2028 head->error = bswap32(head->error);
2029 head->aux_descr = bswap64(head->aux_descr);
2030 head->reserved38= bswap32(head->reserved38);
2031 head->hdr_crc = bswap32(head->hdr_crc);