2 * Copyright (c) 2011-2015 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 #include "dmsg_local.h"
41 static int dmsg_state_msgrx(dmsg_msg_t *msg);
42 static void dmsg_state_cleanuptx(dmsg_iocom_t *iocom, dmsg_msg_t *msg);
43 static void dmsg_msg_free_locked(dmsg_msg_t *msg);
44 static void dmsg_state_free(dmsg_state_t *state);
45 static void dmsg_msg_simulate_failure(dmsg_state_t *state, int error);
47 RB_GENERATE(dmsg_state_tree, dmsg_state, rbnode, dmsg_state_cmp);
50 * STATE TREE - Represents open transactions which are indexed by their
51 * { msgid } relative to the governing iocom.
54 dmsg_state_cmp(dmsg_state_t *state1, dmsg_state_t *state2)
56 if (state1->msgid < state2->msgid)
58 if (state1->msgid > state2->msgid)
64 * Initialize a low-level ioq
67 dmsg_ioq_init(dmsg_iocom_t *iocom __unused, dmsg_ioq_t *ioq)
69 bzero(ioq, sizeof(*ioq));
70 ioq->state = DMSG_MSGQ_STATE_HEADER1;
71 TAILQ_INIT(&ioq->msgq);
77 * caller holds iocom->mtx.
80 dmsg_ioq_done(dmsg_iocom_t *iocom __unused, dmsg_ioq_t *ioq)
84 while ((msg = TAILQ_FIRST(&ioq->msgq)) != NULL) {
85 assert(0); /* shouldn't happen */
86 TAILQ_REMOVE(&ioq->msgq, msg, qentry);
89 if ((msg = ioq->msg) != NULL) {
96 * Initialize a low-level communications channel.
98 * NOTE: The signal_func() is called at least once from the loop and can be
99 * re-armed via dmsg_iocom_restate().
102 dmsg_iocom_init(dmsg_iocom_t *iocom, int sock_fd, int alt_fd,
103 void (*signal_func)(dmsg_iocom_t *iocom),
104 void (*rcvmsg_func)(dmsg_msg_t *msg),
105 void (*usrmsg_func)(dmsg_msg_t *msg, int unmanaged),
106 void (*altmsg_func)(dmsg_iocom_t *iocom))
110 bzero(iocom, sizeof(*iocom));
112 asprintf(&iocom->label, "iocom-%p", iocom);
113 iocom->signal_callback = signal_func;
114 iocom->rcvmsg_callback = rcvmsg_func;
115 iocom->altmsg_callback = altmsg_func;
116 iocom->usrmsg_callback = usrmsg_func;
118 pthread_mutex_init(&iocom->mtx, NULL);
119 RB_INIT(&iocom->staterd_tree);
120 RB_INIT(&iocom->statewr_tree);
121 TAILQ_INIT(&iocom->freeq);
122 TAILQ_INIT(&iocom->freeq_aux);
123 TAILQ_INIT(&iocom->txmsgq);
124 iocom->sock_fd = sock_fd;
125 iocom->alt_fd = alt_fd;
126 iocom->flags = DMSG_IOCOMF_RREQ | DMSG_IOCOMF_CLOSEALT;
128 iocom->flags |= DMSG_IOCOMF_SWORK;
129 dmsg_ioq_init(iocom, &iocom->ioq_rx);
130 dmsg_ioq_init(iocom, &iocom->ioq_tx);
131 iocom->state0.refs = 1; /* should never trigger a free */
132 iocom->state0.iocom = iocom;
133 iocom->state0.parent = &iocom->state0;
134 iocom->state0.flags = DMSG_STATE_ROOT;
135 TAILQ_INIT(&iocom->state0.subq);
137 if (pipe(iocom->wakeupfds) < 0)
139 fcntl(iocom->wakeupfds[0], F_SETFL, O_NONBLOCK);
140 fcntl(iocom->wakeupfds[1], F_SETFL, O_NONBLOCK);
143 * Negotiate session crypto synchronously. This will mark the
144 * connection as error'd if it fails. If this is a pipe it's
145 * a linkage that we set up ourselves to the filesystem and there
148 if (fstat(sock_fd, &st) < 0)
150 if (S_ISSOCK(st.st_mode))
151 dmsg_crypto_negotiate(iocom);
154 * Make sure our fds are set to non-blocking for the iocom core.
157 fcntl(sock_fd, F_SETFL, O_NONBLOCK);
159 /* if line buffered our single fgets() should be fine */
161 fcntl(alt_fd, F_SETFL, O_NONBLOCK);
166 dmsg_iocom_label(dmsg_iocom_t *iocom, const char *ctl, ...)
173 vasprintf(&iocom->label, ctl, va);
180 * May only be called from a callback from iocom_core.
182 * Adjust state machine functions, set flags to guarantee that both
183 * the recevmsg_func and the sendmsg_func is called at least once.
186 dmsg_iocom_restate(dmsg_iocom_t *iocom,
187 void (*signal_func)(dmsg_iocom_t *),
188 void (*rcvmsg_func)(dmsg_msg_t *msg))
190 pthread_mutex_lock(&iocom->mtx);
191 iocom->signal_callback = signal_func;
192 iocom->rcvmsg_callback = rcvmsg_func;
194 atomic_set_int(&iocom->flags, DMSG_IOCOMF_SWORK);
196 atomic_clear_int(&iocom->flags, DMSG_IOCOMF_SWORK);
197 pthread_mutex_unlock(&iocom->mtx);
201 dmsg_iocom_signal(dmsg_iocom_t *iocom)
203 pthread_mutex_lock(&iocom->mtx);
204 if (iocom->signal_callback)
205 atomic_set_int(&iocom->flags, DMSG_IOCOMF_SWORK);
206 pthread_mutex_unlock(&iocom->mtx);
210 * Cleanup a terminating iocom.
212 * Caller should not hold iocom->mtx. The iocom has already been disconnected
213 * from all possible references to it.
216 dmsg_iocom_done(dmsg_iocom_t *iocom)
220 if (iocom->sock_fd >= 0) {
221 close(iocom->sock_fd);
224 if (iocom->alt_fd >= 0 && (iocom->flags & DMSG_IOCOMF_CLOSEALT)) {
225 close(iocom->alt_fd);
228 dmsg_ioq_done(iocom, &iocom->ioq_rx);
229 dmsg_ioq_done(iocom, &iocom->ioq_tx);
230 while ((msg = TAILQ_FIRST(&iocom->freeq)) != NULL) {
231 TAILQ_REMOVE(&iocom->freeq, msg, qentry);
234 while ((msg = TAILQ_FIRST(&iocom->freeq_aux)) != NULL) {
235 TAILQ_REMOVE(&iocom->freeq_aux, msg, qentry);
237 msg->aux_data = NULL;
240 if (iocom->wakeupfds[0] >= 0) {
241 close(iocom->wakeupfds[0]);
242 iocom->wakeupfds[0] = -1;
244 if (iocom->wakeupfds[1] >= 0) {
245 close(iocom->wakeupfds[1]);
246 iocom->wakeupfds[1] = -1;
248 pthread_mutex_destroy(&iocom->mtx);
252 * Allocate a new message using the specified transaction state.
254 * If CREATE is set a new transaction is allocated relative to the passed-in
255 * transaction (the 'state' argument becomes pstate).
257 * If CREATE is not set the message is associated with the passed-in
261 dmsg_msg_alloc(dmsg_state_t *state,
262 size_t aux_size, uint32_t cmd,
263 void (*func)(dmsg_msg_t *), void *data)
265 dmsg_iocom_t *iocom = state->iocom;
268 pthread_mutex_lock(&iocom->mtx);
269 msg = dmsg_msg_alloc_locked(state, aux_size, cmd, func, data);
270 pthread_mutex_unlock(&iocom->mtx);
276 dmsg_msg_alloc_locked(dmsg_state_t *state,
277 size_t aux_size, uint32_t cmd,
278 void (*func)(dmsg_msg_t *), void *data)
280 dmsg_iocom_t *iocom = state->iocom;
281 dmsg_state_t *pstate;
288 aligned_size = DMSG_DOALIGN(aux_size);
289 if ((msg = TAILQ_FIRST(&iocom->freeq_aux)) != NULL)
290 TAILQ_REMOVE(&iocom->freeq_aux, msg, qentry);
293 if ((msg = TAILQ_FIRST(&iocom->freeq)) != NULL)
294 TAILQ_REMOVE(&iocom->freeq, msg, qentry);
297 aligned_size = DMSG_DOALIGN(aux_size);
299 if ((cmd & (DMSGF_CREATE | DMSGF_REPLY)) == DMSGF_CREATE) {
301 * When CREATE is set without REPLY the caller is
302 * initiating a new transaction stacked under the specified
305 * NOTE: CREATE in txcmd handled by dmsg_msg_write()
306 * NOTE: DELETE in txcmd handled by dmsg_state_cleanuptx()
309 state = malloc(sizeof(*state));
310 atomic_add_int(&dmsg_state_count, 1);
311 bzero(state, sizeof(*state));
312 TAILQ_INIT(&state->subq);
313 dmsg_state_hold(pstate);
315 state->parent = pstate;
316 state->iocom = iocom;
317 state->flags = DMSG_STATE_DYNAMIC;
318 state->msgid = (uint64_t)(uintptr_t)state;
319 state->txcmd = cmd & ~(DMSGF_CREATE | DMSGF_DELETE);
320 state->rxcmd = DMSGF_REPLY;
321 state->icmd = state->txcmd & DMSGF_BASECMDMASK;
323 state->any.any = data;
325 RB_INSERT(dmsg_state_tree, &iocom->statewr_tree, state);
326 TAILQ_INSERT_TAIL(&pstate->subq, state, entry);
327 state->flags |= DMSG_STATE_INSERTED;
331 "create state %p id=%08x on iocom statewr %p\n",
332 state, (uint32_t)state->msgid, iocom);
336 * Otherwise the message is transmitted over the existing
339 pstate = state->parent;
342 /* XXX SMP race for state */
343 hbytes = (cmd & DMSGF_SIZE) * DMSG_ALIGN;
345 msg = malloc(offsetof(struct dmsg_msg, any.head) + hbytes + 4);
346 bzero(msg, offsetof(struct dmsg_msg, any.head));
347 *(int *)((char *)msg +
348 offsetof(struct dmsg_msg, any.head) + hbytes) =
352 "allo msg %p id=%08x on iocom %p\n",
353 msg, (int)msg->any.head.msgid, iocom);
356 msg = malloc(sizeof(*msg));
357 bzero(msg, sizeof(*msg));
362 * [re]allocate the auxillary data buffer. The caller knows that
363 * a size-aligned buffer will be allocated but we do not want to
364 * force the caller to zero any tail piece, so we do that ourself.
366 if (msg->aux_size != aux_size) {
369 msg->aux_data = NULL;
373 msg->aux_data = malloc(aligned_size);
374 msg->aux_size = aux_size;
375 if (aux_size != aligned_size) {
376 bzero(msg->aux_data + aux_size,
377 aligned_size - aux_size);
383 * Set REVTRANS if the transaction was remotely initiated
384 * Set REVCIRC if the circuit was remotely initiated
386 if (state->flags & DMSG_STATE_OPPOSITE)
387 cmd |= DMSGF_REVTRANS;
388 if (pstate->flags & DMSG_STATE_OPPOSITE)
389 cmd |= DMSGF_REVCIRC;
392 * Finish filling out the header.
395 bzero(&msg->any.head, hbytes);
396 msg->hdr_size = hbytes;
397 msg->any.head.magic = DMSG_HDR_MAGIC;
398 msg->any.head.cmd = cmd;
399 msg->any.head.aux_descr = 0;
400 msg->any.head.aux_crc = 0;
401 msg->any.head.msgid = state->msgid;
402 msg->any.head.circuit = pstate->msgid;
409 * Free a message so it can be reused afresh.
411 * NOTE: aux_size can be 0 with a non-NULL aux_data.
415 dmsg_msg_free_locked(dmsg_msg_t *msg)
417 /*dmsg_iocom_t *iocom = msg->iocom;*/
421 "free msg %p id=%08x on (aux %p)\n",
422 msg, (int)msg->any.head.msgid, msg->aux_data);
425 int hbytes = (msg->any.head.cmd & DMSGF_SIZE) * DMSG_ALIGN;
426 if (*(int *)((char *)msg +
427 offsetof(struct dmsg_msg, any.head) + hbytes) !=
429 fprintf(stderr, "MSGFREE FAILED CMD %08x\n", msg->any.head.cmd);
433 msg->state = NULL; /* safety */
436 msg->aux_data = NULL;
442 TAILQ_INSERT_TAIL(&iocom->freeq_aux, msg, qentry);
444 TAILQ_INSERT_TAIL(&iocom->freeq, msg, qentry);
449 dmsg_msg_free(dmsg_msg_t *msg)
451 dmsg_iocom_t *iocom = msg->state->iocom;
453 pthread_mutex_lock(&iocom->mtx);
454 dmsg_msg_free_locked(msg);
455 pthread_mutex_unlock(&iocom->mtx);
459 * I/O core loop for an iocom.
461 * Thread localized, iocom->mtx not held.
464 dmsg_iocom_core(dmsg_iocom_t *iocom)
466 struct pollfd fds[3];
471 int wi; /* wakeup pipe */
473 int ai; /* alt bulk path socket */
475 while ((iocom->flags & DMSG_IOCOMF_EOF) == 0) {
477 * These iocom->flags are only manipulated within the
478 * context of the current thread. However, modifications
479 * still require atomic ops.
481 if ((iocom->flags & (DMSG_IOCOMF_RWORK |
486 DMSG_IOCOMF_AWWORK)) == 0) {
488 * Only poll if no immediate work is pending.
489 * Otherwise we are just wasting our time calling
500 * Always check the inter-thread pipe, e.g.
501 * for iocom->txmsgq work.
504 fds[wi].fd = iocom->wakeupfds[0];
505 fds[wi].events = POLLIN;
509 * Check the socket input/output direction as
512 if (iocom->flags & (DMSG_IOCOMF_RREQ |
515 fds[si].fd = iocom->sock_fd;
519 if (iocom->flags & DMSG_IOCOMF_RREQ)
520 fds[si].events |= POLLIN;
521 if (iocom->flags & DMSG_IOCOMF_WREQ)
522 fds[si].events |= POLLOUT;
526 * Check the alternative fd for work.
528 if (iocom->alt_fd >= 0) {
530 fds[ai].fd = iocom->alt_fd;
531 fds[ai].events = POLLIN;
534 poll(fds, count, timeout);
536 if (wi >= 0 && (fds[wi].revents & POLLIN))
537 atomic_set_int(&iocom->flags,
539 if (si >= 0 && (fds[si].revents & POLLIN))
540 atomic_set_int(&iocom->flags,
542 if (si >= 0 && (fds[si].revents & POLLOUT))
543 atomic_set_int(&iocom->flags,
545 if (wi >= 0 && (fds[wi].revents & POLLOUT))
546 atomic_set_int(&iocom->flags,
548 if (ai >= 0 && (fds[ai].revents & POLLIN))
549 atomic_set_int(&iocom->flags,
553 * Always check the pipe
555 atomic_set_int(&iocom->flags, DMSG_IOCOMF_PWORK);
558 if (iocom->flags & DMSG_IOCOMF_SWORK) {
559 atomic_clear_int(&iocom->flags, DMSG_IOCOMF_SWORK);
560 iocom->signal_callback(iocom);
564 * Pending message queues from other threads wake us up
565 * with a write to the wakeupfds[] pipe. We have to clear
566 * the pipe with a dummy read.
568 if (iocom->flags & DMSG_IOCOMF_PWORK) {
569 atomic_clear_int(&iocom->flags, DMSG_IOCOMF_PWORK);
570 read(iocom->wakeupfds[0], dummybuf, sizeof(dummybuf));
571 atomic_set_int(&iocom->flags, DMSG_IOCOMF_RWORK);
572 atomic_set_int(&iocom->flags, DMSG_IOCOMF_WWORK);
573 if (TAILQ_FIRST(&iocom->txmsgq))
574 dmsg_iocom_flush1(iocom);
578 * Message write sequencing
580 if (iocom->flags & DMSG_IOCOMF_WWORK)
581 dmsg_iocom_flush1(iocom);
584 * Message read sequencing. Run this after the write
585 * sequencing in case the write sequencing allowed another
586 * auto-DELETE to occur on the read side.
588 if (iocom->flags & DMSG_IOCOMF_RWORK) {
589 while ((iocom->flags & DMSG_IOCOMF_EOF) == 0 &&
590 (msg = dmsg_ioq_read(iocom)) != NULL) {
592 fprintf(stderr, "receive %s\n",
595 iocom->rcvmsg_callback(msg);
596 dmsg_state_cleanuprx(iocom, msg);
600 if (iocom->flags & DMSG_IOCOMF_ARWORK) {
601 atomic_clear_int(&iocom->flags, DMSG_IOCOMF_ARWORK);
602 iocom->altmsg_callback(iocom);
608 * Make sure there's enough room in the FIFO to hold the
611 * Assume worst case encrypted form is 2x the size of the
612 * plaintext equivalent.
616 dmsg_ioq_makeroom(dmsg_ioq_t *ioq, size_t needed)
621 bytes = ioq->fifo_cdx - ioq->fifo_beg;
622 nmax = sizeof(ioq->buf) - ioq->fifo_end;
623 if (bytes + nmax / 2 < needed) {
625 bcopy(ioq->buf + ioq->fifo_beg,
629 ioq->fifo_cdx -= ioq->fifo_beg;
631 if (ioq->fifo_cdn < ioq->fifo_end) {
632 bcopy(ioq->buf + ioq->fifo_cdn,
633 ioq->buf + ioq->fifo_cdx,
634 ioq->fifo_end - ioq->fifo_cdn);
636 ioq->fifo_end -= ioq->fifo_cdn - ioq->fifo_cdx;
637 ioq->fifo_cdn = ioq->fifo_cdx;
638 nmax = sizeof(ioq->buf) - ioq->fifo_end;
644 * Read the next ready message from the ioq, issuing I/O if needed.
645 * Caller should retry on a read-event when NULL is returned.
647 * If an error occurs during reception a DMSG_LNK_ERROR msg will
648 * be returned for each open transaction, then the ioq and iocom
649 * will be errored out and a non-transactional DMSG_LNK_ERROR
650 * msg will be returned as the final message. The caller should not call
651 * us again after the final message is returned.
653 * Thread localized, iocom->mtx not held.
656 dmsg_ioq_read(dmsg_iocom_t *iocom)
658 dmsg_ioq_t *ioq = &iocom->ioq_rx;
671 * If a message is already pending we can just remove and
672 * return it. Message state has already been processed.
673 * (currently not implemented)
675 if ((msg = TAILQ_FIRST(&ioq->msgq)) != NULL) {
676 TAILQ_REMOVE(&ioq->msgq, msg, qentry);
679 atomic_clear_int(&iocom->flags, DMSG_IOCOMF_RREQ | DMSG_IOCOMF_RWORK);
682 * If the stream is errored out we stop processing it.
688 * Message read in-progress (msg is NULL at the moment). We don't
689 * allocate a msg until we have its core header.
691 nmax = sizeof(ioq->buf) - ioq->fifo_end;
692 bytes = ioq->fifo_cdx - ioq->fifo_beg; /* already decrypted */
696 case DMSG_MSGQ_STATE_HEADER1:
698 * Load the primary header, fail on any non-trivial read
699 * error or on EOF. Since the primary header is the same
700 * size is the message alignment it will never straddle
701 * the end of the buffer.
703 nmax = dmsg_ioq_makeroom(ioq, sizeof(msg->any.head));
704 if (bytes < sizeof(msg->any.head)) {
705 n = read(iocom->sock_fd,
706 ioq->buf + ioq->fifo_end,
710 ioq->error = DMSG_IOQ_ERROR_EOF;
713 if (errno != EINTR &&
714 errno != EINPROGRESS &&
716 ioq->error = DMSG_IOQ_ERROR_SOCK;
722 ioq->fifo_end += (size_t)n;
727 * Decrypt data received so far. Data will be decrypted
728 * in-place but might create gaps in the FIFO. Partial
729 * blocks are not immediately decrypted.
731 * WARNING! The header might be in the wrong endian, we
732 * do not fix it up until we get the entire
735 if (iocom->flags & DMSG_IOCOMF_CRYPTED) {
736 dmsg_crypto_decrypt(iocom, ioq);
738 ioq->fifo_cdx = ioq->fifo_end;
739 ioq->fifo_cdn = ioq->fifo_end;
741 bytes = ioq->fifo_cdx - ioq->fifo_beg;
744 * Insufficient data accumulated (msg is NULL, caller will
748 if (bytes < sizeof(msg->any.head))
752 * Check and fixup the core header. Note that the icrc
753 * has to be calculated before any fixups, but the crc
754 * fields in the msg may have to be swapped like everything
757 head = (void *)(ioq->buf + ioq->fifo_beg);
758 if (head->magic != DMSG_HDR_MAGIC &&
759 head->magic != DMSG_HDR_MAGIC_REV) {
760 fprintf(stderr, "%s: head->magic is bad %02x\n",
761 iocom->label, head->magic);
762 if (iocom->flags & DMSG_IOCOMF_CRYPTED)
763 fprintf(stderr, "(on encrypted link)\n");
764 ioq->error = DMSG_IOQ_ERROR_SYNC;
769 * Calculate the full header size and aux data size
771 if (head->magic == DMSG_HDR_MAGIC_REV) {
772 ioq->hbytes = (bswap32(head->cmd) & DMSGF_SIZE) *
774 aux_size = bswap32(head->aux_bytes);
776 ioq->hbytes = (head->cmd & DMSGF_SIZE) *
778 aux_size = head->aux_bytes;
780 ioq->abytes = DMSG_DOALIGN(aux_size);
781 ioq->unaligned_aux_size = aux_size;
782 if (ioq->hbytes < sizeof(msg->any.head) ||
783 ioq->hbytes > sizeof(msg->any) ||
784 ioq->abytes > DMSG_AUX_MAX) {
785 ioq->error = DMSG_IOQ_ERROR_FIELD;
790 * Allocate the message, the next state will fill it in.
792 * NOTE: The aux_data buffer will be sized to an aligned
793 * value and the aligned remainder zero'd for
796 * NOTE: Supply dummy state and a degenerate cmd without
797 * CREATE set. The message will temporarily be
798 * associated with state0 until later post-processing.
800 msg = dmsg_msg_alloc(&iocom->state0, aux_size,
801 ioq->hbytes / DMSG_ALIGN,
806 * Fall through to the next state. Make sure that the
807 * extended header does not straddle the end of the buffer.
808 * We still want to issue larger reads into our buffer,
809 * book-keeping is easier if we don't bcopy() yet.
811 * Make sure there is enough room for bloated encrypt data.
813 nmax = dmsg_ioq_makeroom(ioq, ioq->hbytes);
814 ioq->state = DMSG_MSGQ_STATE_HEADER2;
816 case DMSG_MSGQ_STATE_HEADER2:
818 * Fill out the extended header.
821 if (bytes < ioq->hbytes) {
822 n = read(iocom->sock_fd,
823 ioq->buf + ioq->fifo_end,
827 ioq->error = DMSG_IOQ_ERROR_EOF;
830 if (errno != EINTR &&
831 errno != EINPROGRESS &&
833 ioq->error = DMSG_IOQ_ERROR_SOCK;
839 ioq->fifo_end += (size_t)n;
843 if (iocom->flags & DMSG_IOCOMF_CRYPTED) {
844 dmsg_crypto_decrypt(iocom, ioq);
846 ioq->fifo_cdx = ioq->fifo_end;
847 ioq->fifo_cdn = ioq->fifo_end;
849 bytes = ioq->fifo_cdx - ioq->fifo_beg;
852 * Insufficient data accumulated (set msg NULL so caller will
855 if (bytes < ioq->hbytes) {
861 * Calculate the extended header, decrypt data received
862 * so far. Handle endian-conversion for the entire extended
865 head = (void *)(ioq->buf + ioq->fifo_beg);
870 if (head->magic == DMSG_HDR_MAGIC_REV)
871 xcrc32 = bswap32(head->hdr_crc);
873 xcrc32 = head->hdr_crc;
875 if (dmsg_icrc32(head, ioq->hbytes) != xcrc32) {
876 ioq->error = DMSG_IOQ_ERROR_XCRC;
877 fprintf(stderr, "BAD-XCRC(%08x,%08x) %s\n",
878 xcrc32, dmsg_icrc32(head, ioq->hbytes),
883 head->hdr_crc = xcrc32;
885 if (head->magic == DMSG_HDR_MAGIC_REV) {
886 dmsg_bswap_head(head);
890 * Copy the extended header into the msg and adjust the
893 bcopy(head, &msg->any, ioq->hbytes);
896 * We are either done or we fall-through.
898 if (ioq->abytes == 0) {
899 ioq->fifo_beg += ioq->hbytes;
904 * Must adjust bytes (and the state) when falling through.
905 * nmax doesn't change.
907 ioq->fifo_beg += ioq->hbytes;
908 bytes -= ioq->hbytes;
909 ioq->state = DMSG_MSGQ_STATE_AUXDATA1;
911 case DMSG_MSGQ_STATE_AUXDATA1:
913 * Copy the partial or complete [decrypted] payload from
914 * remaining bytes in the FIFO in order to optimize the
915 * makeroom call in the AUXDATA2 state. We have to
916 * fall-through either way so we can check the crc.
918 * msg->aux_size tracks our aux data.
920 * (Lets not complicate matters if the data is encrypted,
921 * since the data in-stream is not the same size as the
924 if (bytes >= ioq->abytes) {
925 bcopy(ioq->buf + ioq->fifo_beg, msg->aux_data,
927 msg->aux_size = ioq->abytes;
928 ioq->fifo_beg += ioq->abytes;
929 assert(ioq->fifo_beg <= ioq->fifo_cdx);
930 assert(ioq->fifo_cdx <= ioq->fifo_cdn);
931 bytes -= ioq->abytes;
933 bcopy(ioq->buf + ioq->fifo_beg, msg->aux_data,
935 msg->aux_size = bytes;
936 ioq->fifo_beg += bytes;
937 if (ioq->fifo_cdx < ioq->fifo_beg)
938 ioq->fifo_cdx = ioq->fifo_beg;
939 assert(ioq->fifo_beg <= ioq->fifo_cdx);
940 assert(ioq->fifo_cdx <= ioq->fifo_cdn);
945 ioq->state = DMSG_MSGQ_STATE_AUXDATA2;
947 case DMSG_MSGQ_STATE_AUXDATA2:
949 * Make sure there is enough room for more data.
952 nmax = dmsg_ioq_makeroom(ioq, ioq->abytes - msg->aux_size);
955 * Read and decrypt more of the payload.
957 if (msg->aux_size < ioq->abytes) {
959 n = read(iocom->sock_fd,
960 ioq->buf + ioq->fifo_end,
964 ioq->error = DMSG_IOQ_ERROR_EOF;
967 if (errno != EINTR &&
968 errno != EINPROGRESS &&
970 ioq->error = DMSG_IOQ_ERROR_SOCK;
976 ioq->fifo_end += (size_t)n;
980 if (iocom->flags & DMSG_IOCOMF_CRYPTED) {
981 dmsg_crypto_decrypt(iocom, ioq);
983 ioq->fifo_cdx = ioq->fifo_end;
984 ioq->fifo_cdn = ioq->fifo_end;
986 bytes = ioq->fifo_cdx - ioq->fifo_beg;
988 if (bytes > ioq->abytes - msg->aux_size)
989 bytes = ioq->abytes - msg->aux_size;
992 bcopy(ioq->buf + ioq->fifo_beg,
993 msg->aux_data + msg->aux_size,
995 msg->aux_size += bytes;
996 ioq->fifo_beg += bytes;
1000 * Insufficient data accumulated (set msg NULL so caller will
1003 * Assert the auxillary data size is correct, then record the
1004 * original unaligned size from the message header.
1006 if (msg->aux_size < ioq->abytes) {
1010 assert(msg->aux_size == ioq->abytes);
1011 msg->aux_size = ioq->unaligned_aux_size;
1014 * Check aux_crc, then we are done. Note that the crc
1015 * is calculated over the aligned size, not the actual
1018 xcrc32 = dmsg_icrc32(msg->aux_data, ioq->abytes);
1019 if (xcrc32 != msg->any.head.aux_crc) {
1020 ioq->error = DMSG_IOQ_ERROR_ACRC;
1022 "iocom: ACRC error %08x vs %08x "
1023 "msgid %016jx msgcmd %08x auxsize %d\n",
1025 msg->any.head.aux_crc,
1026 (intmax_t)msg->any.head.msgid,
1028 msg->any.head.aux_bytes);
1032 case DMSG_MSGQ_STATE_ERROR:
1034 * Continued calls to drain recorded transactions (returning
1035 * a LNK_ERROR for each one), before we return the final
1038 assert(msg == NULL);
1042 * We don't double-return errors, the caller should not
1043 * have called us again after getting an error msg.
1050 * Check the message sequence. The iv[] should prevent any
1051 * possibility of a replay but we add this check anyway.
1053 if (msg && ioq->error == 0) {
1054 if ((msg->any.head.salt & 255) != (ioq->seq & 255)) {
1055 ioq->error = DMSG_IOQ_ERROR_MSGSEQ;
1062 * Handle error, RREQ, or completion
1064 * NOTE: nmax and bytes are invalid at this point, we don't bother
1065 * to update them when breaking out.
1068 dmsg_state_t *tmp_state;
1070 fprintf(stderr, "IOQ ERROR %d\n", ioq->error);
1072 * An unrecoverable error causes all active receive
1073 * transactions to be terminated with a LNK_ERROR message.
1075 * Once all active transactions are exhausted we set the
1076 * iocom ERROR flag and return a non-transactional LNK_ERROR
1077 * message, which should cause master processing loops to
1080 assert(ioq->msg == msg);
1088 * No more I/O read processing
1090 ioq->state = DMSG_MSGQ_STATE_ERROR;
1093 * Simulate a remote LNK_ERROR DELETE msg for any open
1094 * transactions, ending with a final non-transactional
1095 * LNK_ERROR (that the session can detect) when no
1096 * transactions remain.
1098 * NOTE: Temporarily supply state0 and a degenerate cmd
1099 * without CREATE set. The real state will be
1100 * assigned in the loop.
1102 * NOTE: We are simulating a received message using our
1103 * side of the state, so the DMSGF_REV* bits have
1106 pthread_mutex_lock(&iocom->mtx);
1107 dmsg_iocom_drain(iocom);
1110 RB_FOREACH(state, dmsg_state_tree, &iocom->staterd_tree) {
1111 atomic_set_int(&state->flags, DMSG_STATE_DYING);
1112 if (tmp_state == NULL && TAILQ_EMPTY(&state->subq))
1115 RB_FOREACH(state, dmsg_state_tree, &iocom->statewr_tree) {
1116 atomic_set_int(&state->flags, DMSG_STATE_DYING);
1117 if (tmp_state == NULL && TAILQ_EMPTY(&state->subq))
1122 dmsg_msg_simulate_failure(tmp_state, ioq->error);
1124 dmsg_msg_simulate_failure(&iocom->state0, ioq->error);
1126 pthread_mutex_unlock(&iocom->mtx);
1127 if (TAILQ_FIRST(&ioq->msgq))
1132 * For the iocom error case we want to set RWORK to indicate
1133 * that more messages might be pending.
1135 * It is possible to return NULL when there is more work to
1136 * do because each message has to be DELETEd in both
1137 * directions before we continue on with the next (though
1138 * this could be optimized). The transmit direction will
1142 atomic_set_int(&iocom->flags, DMSG_IOCOMF_RWORK);
1144 } else if (msg == NULL) {
1146 * Insufficient data received to finish building the message,
1147 * set RREQ and return NULL.
1149 * Leave ioq->msg intact.
1150 * Leave the FIFO intact.
1152 atomic_set_int(&iocom->flags, DMSG_IOCOMF_RREQ);
1155 * Continue processing msg.
1157 * The fifo has already been advanced past the message.
1158 * Trivially reset the FIFO indices if possible.
1160 * clear the FIFO if it is now empty and set RREQ to wait
1161 * for more from the socket. If the FIFO is not empty set
1162 * TWORK to bypass the poll so we loop immediately.
1164 if (ioq->fifo_beg == ioq->fifo_cdx &&
1165 ioq->fifo_cdn == ioq->fifo_end) {
1166 atomic_set_int(&iocom->flags, DMSG_IOCOMF_RREQ);
1172 atomic_set_int(&iocom->flags, DMSG_IOCOMF_RWORK);
1174 ioq->state = DMSG_MSGQ_STATE_HEADER1;
1178 * Handle message routing. Validates non-zero sources
1179 * and routes message. Error will be 0 if the message is
1182 * State processing only occurs for messages destined for us.
1184 if (DMsgDebugOpt >= 5) {
1186 "rxmsg cmd=%08x msgid=%016jx circ=%016jx\n",
1188 (intmax_t)msg->any.head.msgid,
1189 (intmax_t)msg->any.head.circuit);
1191 error = dmsg_state_msgrx(msg);
1195 * Abort-after-closure, throw message away and
1196 * start reading another.
1198 if (error == DMSG_IOQ_ERROR_EALREADY) {
1204 * Process real error and throw away message.
1209 /* no error, not routed. Fall through and return msg */
1215 * Calculate the header and data crc's and write a low-level message to
1216 * the connection. If aux_crc is non-zero the aux_data crc is already
1217 * assumed to have been set.
1219 * A non-NULL msg is added to the queue but not necessarily flushed.
1220 * Calling this function with msg == NULL will get a flush going.
1222 * (called from iocom_core only)
1225 dmsg_iocom_flush1(dmsg_iocom_t *iocom)
1227 dmsg_ioq_t *ioq = &iocom->ioq_tx;
1232 dmsg_msg_queue_t tmpq;
1234 atomic_clear_int(&iocom->flags, DMSG_IOCOMF_WREQ | DMSG_IOCOMF_WWORK);
1236 pthread_mutex_lock(&iocom->mtx);
1237 while ((msg = TAILQ_FIRST(&iocom->txmsgq)) != NULL) {
1238 TAILQ_REMOVE(&iocom->txmsgq, msg, qentry);
1239 TAILQ_INSERT_TAIL(&tmpq, msg, qentry);
1241 pthread_mutex_unlock(&iocom->mtx);
1243 while ((msg = TAILQ_FIRST(&tmpq)) != NULL) {
1245 * Process terminal connection errors.
1247 TAILQ_REMOVE(&tmpq, msg, qentry);
1249 TAILQ_INSERT_TAIL(&ioq->msgq, msg, qentry);
1255 * Finish populating the msg fields. The salt ensures that
1256 * the iv[] array is ridiculously randomized and we also
1257 * re-seed our PRNG every 32768 messages just to be sure.
1259 msg->any.head.magic = DMSG_HDR_MAGIC;
1260 msg->any.head.salt = (random() << 8) | (ioq->seq & 255);
1262 if ((ioq->seq & 32767) == 0)
1266 * Calculate aux_crc if 0, then calculate hdr_crc.
1268 if (msg->aux_size && msg->any.head.aux_crc == 0) {
1269 abytes = DMSG_DOALIGN(msg->aux_size);
1270 xcrc32 = dmsg_icrc32(msg->aux_data, abytes);
1271 msg->any.head.aux_crc = xcrc32;
1273 msg->any.head.aux_bytes = msg->aux_size;
1275 hbytes = (msg->any.head.cmd & DMSGF_SIZE) *
1277 msg->any.head.hdr_crc = 0;
1278 msg->any.head.hdr_crc = dmsg_icrc32(&msg->any.head, hbytes);
1281 * Enqueue the message (the flush codes handles stream
1284 TAILQ_INSERT_TAIL(&ioq->msgq, msg, qentry);
1287 dmsg_iocom_flush2(iocom);
1291 * Thread localized, iocom->mtx not held by caller.
1293 * (called from iocom_core via iocom_flush1 only)
1296 dmsg_iocom_flush2(dmsg_iocom_t *iocom)
1298 dmsg_ioq_t *ioq = &iocom->ioq_tx;
1301 struct iovec iov[DMSG_IOQ_MAXIOVEC];
1310 dmsg_iocom_drain(iocom);
1315 * Pump messages out the connection by building an iovec.
1317 * ioq->hbytes/ioq->abytes tracks how much of the first message
1318 * in the queue has been successfully written out, so we can
1326 TAILQ_FOREACH(msg, &ioq->msgq, qentry) {
1327 hbytes = (msg->any.head.cmd & DMSGF_SIZE) *
1329 abytes = DMSG_DOALIGN(msg->aux_size);
1330 assert(hoff <= hbytes && aoff <= abytes);
1332 if (hoff < hbytes) {
1333 size_t maxlen = hbytes - hoff;
1334 if (maxlen > sizeof(ioq->buf) / 2)
1335 maxlen = sizeof(ioq->buf) / 2;
1336 iov[iovcnt].iov_base = (char *)&msg->any.head + hoff;
1337 iov[iovcnt].iov_len = maxlen;
1340 if (iovcnt == DMSG_IOQ_MAXIOVEC ||
1341 maxlen != hbytes - hoff) {
1345 if (aoff < abytes) {
1346 size_t maxlen = abytes - aoff;
1347 if (maxlen > sizeof(ioq->buf) / 2)
1348 maxlen = sizeof(ioq->buf) / 2;
1350 assert(msg->aux_data != NULL);
1351 iov[iovcnt].iov_base = (char *)msg->aux_data + aoff;
1352 iov[iovcnt].iov_len = maxlen;
1355 if (iovcnt == DMSG_IOQ_MAXIOVEC ||
1356 maxlen != abytes - aoff) {
1367 * Encrypt and write the data. The crypto code will move the
1368 * data into the fifo and adjust the iov as necessary. If
1369 * encryption is disabled the iov is left alone.
1371 * May return a smaller iov (thus a smaller n), with aggregated
1372 * chunks. May reduce nmax to what fits in the FIFO.
1374 * This function sets nact to the number of original bytes now
1375 * encrypted, adding to the FIFO some number of bytes that might
1376 * be greater depending on the crypto mechanic. iov[] is adjusted
1377 * to point at the FIFO if necessary.
1379 * NOTE: nact is the number of bytes eaten from the message. For
1380 * encrypted data this is the number of bytes processed for
1381 * encryption and not necessarily the number of bytes writable.
1382 * The return value from the writev() is the post-encrypted
1383 * byte count which might be larger.
1385 * NOTE: For direct writes, nact is the return value from the writev().
1387 if (iocom->flags & DMSG_IOCOMF_CRYPTED) {
1389 * Make sure the FIFO has a reasonable amount of space
1390 * left (if not completely full).
1392 * In this situation we are staging the encrypted message
1393 * data in the FIFO. (nact) represents how much plaintext
1394 * has been staged, (n) represents how much encrypted data
1395 * has been flushed. The two are independent of each other.
1397 if (ioq->fifo_beg > sizeof(ioq->buf) / 2 &&
1398 sizeof(ioq->buf) - ioq->fifo_end < DMSG_ALIGN * 2) {
1399 bcopy(ioq->buf + ioq->fifo_beg, ioq->buf,
1400 ioq->fifo_end - ioq->fifo_beg);
1401 ioq->fifo_cdx -= ioq->fifo_beg;
1402 ioq->fifo_cdn -= ioq->fifo_beg;
1403 ioq->fifo_end -= ioq->fifo_beg;
1408 * beg .... cdx ............ cdn ............. end
1409 * [WRITABLE] [PARTIALENCRYPT] [NOTYETENCRYPTED]
1411 * Advance fifo_beg on a successful write.
1413 iovcnt = dmsg_crypto_encrypt(iocom, ioq, iov, iovcnt, &nact);
1414 n = writev(iocom->sock_fd, iov, iovcnt);
1417 if (ioq->fifo_beg == ioq->fifo_end) {
1425 * We don't mess with the nact returned by the crypto_encrypt
1426 * call, which represents the filling of the FIFO. (n) tells
1427 * us how much we were able to write from the FIFO. The two
1428 * are different beasts when encrypting.
1432 * In this situation we are not staging the messages to the
1433 * FIFO but instead writing them directly from the msg
1434 * structure(s) unencrypted, so (nact) is basically (n).
1436 n = writev(iocom->sock_fd, iov, iovcnt);
1444 * Clean out the transmit queue based on what we successfully
1445 * sent (nact is the plaintext count). ioq->hbytes/abytes
1446 * represents the portion of the first message previously sent.
1448 while ((msg = TAILQ_FIRST(&ioq->msgq)) != NULL) {
1449 hbytes = (msg->any.head.cmd & DMSGF_SIZE) *
1451 abytes = DMSG_DOALIGN(msg->aux_size);
1453 if ((size_t)nact < hbytes - ioq->hbytes) {
1454 ioq->hbytes += nact;
1458 nact -= hbytes - ioq->hbytes;
1459 ioq->hbytes = hbytes;
1460 if ((size_t)nact < abytes - ioq->abytes) {
1461 ioq->abytes += nact;
1465 nact -= abytes - ioq->abytes;
1466 /* ioq->abytes = abytes; optimized out */
1470 "txmsg cmd=%08x msgid=%016jx circ=%016jx\n",
1472 (intmax_t)msg->any.head.msgid,
1473 (intmax_t)msg->any.head.circuit);
1476 TAILQ_REMOVE(&ioq->msgq, msg, qentry);
1485 * Process the return value from the write w/regards to blocking.
1488 if (errno != EINTR &&
1489 errno != EINPROGRESS &&
1494 ioq->error = DMSG_IOQ_ERROR_SOCK;
1495 dmsg_iocom_drain(iocom);
1498 * Wait for socket buffer space
1500 atomic_set_int(&iocom->flags, DMSG_IOCOMF_WREQ);
1503 atomic_set_int(&iocom->flags, DMSG_IOCOMF_WREQ);
1506 dmsg_iocom_drain(iocom);
1511 * Kill pending msgs on ioq_tx and adjust the flags such that no more
1512 * write events will occur. We don't kill read msgs because we want
1513 * the caller to pull off our contrived terminal error msg to detect
1514 * the connection failure.
1516 * Localized to iocom_core thread, iocom->mtx not held by caller.
1519 dmsg_iocom_drain(dmsg_iocom_t *iocom)
1521 dmsg_ioq_t *ioq = &iocom->ioq_tx;
1524 atomic_clear_int(&iocom->flags, DMSG_IOCOMF_WREQ | DMSG_IOCOMF_WWORK);
1528 while ((msg = TAILQ_FIRST(&ioq->msgq)) != NULL) {
1529 TAILQ_REMOVE(&ioq->msgq, msg, qentry);
1536 * Write a message to an iocom, with additional state processing.
1539 dmsg_msg_write(dmsg_msg_t *msg)
1541 dmsg_iocom_t *iocom = msg->state->iocom;
1542 dmsg_state_t *state;
1545 pthread_mutex_lock(&iocom->mtx);
1549 * Make sure the parent transaction is still open in the transmit
1550 * direction. If it isn't the message is dead and we have to
1551 * potentially simulate a rxmsg terminating the transaction.
1553 if (state->parent->txcmd & DMSGF_DELETE) {
1554 fprintf(stderr, "dmsg_msg_write: EARLY TERMINATION\n");
1555 dmsg_msg_simulate_failure(state, DMSG_ERR_LOSTLINK);
1556 dmsg_state_cleanuptx(iocom, msg);
1558 pthread_mutex_unlock(&iocom->mtx);
1563 * Process state data into the message as needed, then update the
1564 * state based on the message.
1566 if ((state->flags & DMSG_STATE_ROOT) == 0) {
1568 * Existing transaction (could be reply). It is also
1569 * possible for this to be the first reply (CREATE is set),
1570 * in which case we populate state->txcmd.
1572 * state->txcmd is adjusted to hold the final message cmd,
1573 * and we also be sure to set the CREATE bit here. We did
1574 * not set it in dmsg_msg_alloc() because that would have
1575 * not been serialized (state could have gotten ripped out
1576 * from under the message prior to it being transmitted).
1578 if ((msg->any.head.cmd & (DMSGF_CREATE | DMSGF_REPLY)) ==
1580 state->txcmd = msg->any.head.cmd & ~DMSGF_DELETE;
1581 state->icmd = state->txcmd & DMSGF_BASECMDMASK;
1583 msg->any.head.msgid = state->msgid;
1585 if (msg->any.head.cmd & DMSGF_CREATE) {
1586 state->txcmd = msg->any.head.cmd & ~DMSGF_DELETE;
1589 dmsg_state_cleanuptx(iocom, msg);
1593 "MSGWRITE %016jx %08x\n",
1594 msg->any.head.msgid, msg->any.head.cmd);
1598 * Queue it for output, wake up the I/O pthread. Note that the
1599 * I/O thread is responsible for generating the CRCs and encryption.
1601 TAILQ_INSERT_TAIL(&iocom->txmsgq, msg, qentry);
1603 write(iocom->wakeupfds[1], &dummy, 1); /* XXX optimize me */
1604 pthread_mutex_unlock(&iocom->mtx);
1608 * iocom->mtx must be held by caller.
1612 dmsg_msg_simulate_failure(dmsg_state_t *state, int error)
1614 dmsg_iocom_t *iocom = state->iocom;
1619 if (state == &iocom->state0) {
1621 * No active local or remote transactions remain.
1622 * Generate a final LNK_ERROR and flag EOF.
1624 msg = dmsg_msg_alloc_locked(&iocom->state0, 0,
1627 msg->any.head.error = error;
1628 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
1629 fprintf(stderr, "EOF ON SOCKET %d\n", iocom->sock_fd);
1630 } else if (state->flags & DMSG_STATE_OPPOSITE) {
1632 * Active remote transactions are still present.
1633 * Simulate the other end sending us a DELETE.
1635 if (state->rxcmd & DMSGF_DELETE) {
1637 "iocom: ioq error(rd) %d sleeping "
1638 "state %p rxcmd %08x txcmd %08x "
1640 error, state, state->rxcmd,
1641 state->txcmd, state->func);
1642 usleep(100000); /* XXX */
1643 atomic_set_int(&iocom->flags,
1646 fprintf(stderr, "SIMULATE ERROR1\n");
1647 msg = dmsg_msg_alloc_locked(&iocom->state0, 0,
1650 /*state->txcmd |= DMSGF_DELETE;*/
1652 msg->any.head.error = error;
1653 msg->any.head.msgid = state->msgid;
1654 msg->any.head.circuit = state->parent->msgid;
1655 msg->any.head.cmd |= DMSGF_ABORT |
1657 if ((state->parent->flags &
1658 DMSG_STATE_OPPOSITE) == 0) {
1659 msg->any.head.cmd |= DMSGF_REVCIRC;
1664 * Active local transactions are still present.
1665 * Simulate the other end sending us a DELETE.
1667 if (state->rxcmd & DMSGF_DELETE) {
1669 "iocom: ioq error(wr) %d sleeping "
1670 "state %p rxcmd %08x txcmd %08x "
1672 error, state, state->rxcmd,
1673 state->txcmd, state->func);
1674 usleep(100000); /* XXX */
1675 atomic_set_int(&iocom->flags,
1678 fprintf(stderr, "SIMULATE ERROR1\n");
1679 msg = dmsg_msg_alloc_locked(&iocom->state0, 0,
1683 msg->any.head.error = error;
1684 msg->any.head.msgid = state->msgid;
1685 msg->any.head.circuit = state->parent->msgid;
1686 msg->any.head.cmd |= DMSGF_ABORT |
1690 if ((state->parent->flags &
1691 DMSG_STATE_OPPOSITE) == 0) {
1692 msg->any.head.cmd |= DMSGF_REVCIRC;
1694 if ((state->rxcmd & DMSGF_CREATE) == 0)
1695 msg->any.head.cmd |= DMSGF_CREATE;
1699 TAILQ_INSERT_TAIL(&iocom->ioq_rx.msgq, msg, qentry);
1700 atomic_set_int(&iocom->flags, DMSG_IOCOMF_RWORK);
1705 * This is a shortcut to formulate a reply to msg with a simple error code,
1706 * It can reply to and terminate a transaction, or it can reply to a one-way
1707 * messages. A DMSG_LNK_ERROR command code is utilized to encode
1708 * the error code (which can be 0). Not all transactions are terminated
1709 * with DMSG_LNK_ERROR status (the low level only cares about the
1710 * MSGF_DELETE flag), but most are.
1712 * Replies to one-way messages are a bit of an oxymoron but the feature
1713 * is used by the debug (DBG) protocol.
1715 * The reply contains no extended data.
1718 dmsg_msg_reply(dmsg_msg_t *msg, uint32_t error)
1720 dmsg_state_t *state = msg->state;
1725 * Reply with a simple error code and terminate the transaction.
1727 cmd = DMSG_LNK_ERROR;
1730 * Check if our direction has even been initiated yet, set CREATE.
1732 * Check what direction this is (command or reply direction). Note
1733 * that txcmd might not have been initiated yet.
1735 * If our direction has already been closed we just return without
1738 if ((state->flags & DMSG_STATE_ROOT) == 0) {
1739 if (state->txcmd & DMSGF_DELETE)
1741 if (state->txcmd & DMSGF_REPLY)
1743 cmd |= DMSGF_DELETE;
1745 if ((msg->any.head.cmd & DMSGF_REPLY) == 0)
1750 * Allocate the message and associate it with the existing state.
1751 * We cannot pass DMSGF_CREATE to msg_alloc() because that may
1752 * allocate new state. We have our state already.
1754 nmsg = dmsg_msg_alloc(state, 0, cmd, NULL, NULL);
1755 if ((state->flags & DMSG_STATE_ROOT) == 0) {
1756 if ((state->txcmd & DMSGF_CREATE) == 0)
1757 nmsg->any.head.cmd |= DMSGF_CREATE;
1759 nmsg->any.head.error = error;
1761 dmsg_msg_write(nmsg);
1765 * Similar to dmsg_msg_reply() but leave the transaction open. That is,
1766 * we are generating a streaming reply or an intermediate acknowledgement
1767 * of some sort as part of the higher level protocol, with more to come
1771 dmsg_msg_result(dmsg_msg_t *msg, uint32_t error)
1773 dmsg_state_t *state = msg->state;
1779 * Reply with a simple error code and terminate the transaction.
1781 cmd = DMSG_LNK_ERROR;
1784 * Check if our direction has even been initiated yet, set CREATE.
1786 * Check what direction this is (command or reply direction). Note
1787 * that txcmd might not have been initiated yet.
1789 * If our direction has already been closed we just return without
1792 if ((state->flags & DMSG_STATE_ROOT) == 0) {
1793 if (state->txcmd & DMSGF_DELETE)
1795 if (state->txcmd & DMSGF_REPLY)
1797 /* continuing transaction, do not set MSGF_DELETE */
1799 if ((msg->any.head.cmd & DMSGF_REPLY) == 0)
1802 nmsg = dmsg_msg_alloc(state, 0, cmd, NULL, NULL);
1803 if ((state->flags & DMSG_STATE_ROOT) == 0) {
1804 if ((state->txcmd & DMSGF_CREATE) == 0)
1805 nmsg->any.head.cmd |= DMSGF_CREATE;
1807 nmsg->any.head.error = error;
1809 dmsg_msg_write(nmsg);
1813 * Terminate a transaction given a state structure by issuing a DELETE.
1814 * (the state structure must not be &iocom->state0)
1817 dmsg_state_reply(dmsg_state_t *state, uint32_t error)
1820 uint32_t cmd = DMSG_LNK_ERROR | DMSGF_DELETE;
1823 * Nothing to do if we already transmitted a delete
1825 if (state->txcmd & DMSGF_DELETE)
1829 * Set REPLY if the other end initiated the command. Otherwise
1830 * we are the command direction.
1832 if (state->txcmd & DMSGF_REPLY)
1835 nmsg = dmsg_msg_alloc(state, 0, cmd, NULL, NULL);
1836 if ((state->flags & DMSG_STATE_ROOT) == 0) {
1837 if ((state->txcmd & DMSGF_CREATE) == 0)
1838 nmsg->any.head.cmd |= DMSGF_CREATE;
1840 nmsg->any.head.error = error;
1841 dmsg_msg_write(nmsg);
1845 * Terminate a transaction given a state structure by issuing a DELETE.
1846 * (the state structure must not be &iocom->state0)
1849 dmsg_state_result(dmsg_state_t *state, uint32_t error)
1852 uint32_t cmd = DMSG_LNK_ERROR;
1855 * Nothing to do if we already transmitted a delete
1857 if (state->txcmd & DMSGF_DELETE)
1861 * Set REPLY if the other end initiated the command. Otherwise
1862 * we are the command direction.
1864 if (state->txcmd & DMSGF_REPLY)
1867 nmsg = dmsg_msg_alloc(state, 0, cmd, NULL, NULL);
1868 if ((state->flags & DMSG_STATE_ROOT) == 0) {
1869 if ((state->txcmd & DMSGF_CREATE) == 0)
1870 nmsg->any.head.cmd |= DMSGF_CREATE;
1872 nmsg->any.head.error = error;
1873 dmsg_msg_write(nmsg);
1876 /************************************************************************
1877 * TRANSACTION STATE HANDLING *
1878 ************************************************************************
1883 * Process state tracking for a message after reception, prior to execution.
1884 * Possibly route the message (consuming it).
1886 * Called with msglk held and the msg dequeued.
1888 * All messages are called with dummy state and return actual state.
1889 * (One-off messages often just return the same dummy state).
1891 * May request that caller discard the message by setting *discardp to 1.
1892 * The returned state is not used in this case and is allowed to be NULL.
1896 * These routines handle persistent and command/reply message state via the
1897 * CREATE and DELETE flags. The first message in a command or reply sequence
1898 * sets CREATE, the last message in a command or reply sequence sets DELETE.
1900 * There can be any number of intermediate messages belonging to the same
1901 * sequence sent inbetween the CREATE message and the DELETE message,
1902 * which set neither flag. This represents a streaming command or reply.
1904 * Any command message received with CREATE set expects a reply sequence to
1905 * be returned. Reply sequences work the same as command sequences except the
1906 * REPLY bit is also sent. Both the command side and reply side can
1907 * degenerate into a single message with both CREATE and DELETE set. Note
1908 * that one side can be streaming and the other side not, or neither, or both.
1910 * The msgid is unique for the initiator. That is, two sides sending a new
1911 * message can use the same msgid without colliding.
1915 * ABORT sequences work by setting the ABORT flag along with normal message
1916 * state. However, ABORTs can also be sent on half-closed messages, that is
1917 * even if the command or reply side has already sent a DELETE, as long as
1918 * the message has not been fully closed it can still send an ABORT+DELETE
1919 * to terminate the half-closed message state.
1921 * Since ABORT+DELETEs can race we silently discard ABORT's for message
1922 * state which has already been fully closed. REPLY+ABORT+DELETEs can
1923 * also race, and in this situation the other side might have already
1924 * initiated a new unrelated command with the same message id. Since
1925 * the abort has not set the CREATE flag the situation can be detected
1926 * and the message will also be discarded.
1928 * Non-blocking requests can be initiated with ABORT+CREATE[+DELETE].
1929 * The ABORT request is essentially integrated into the command instead
1930 * of being sent later on. In this situation the command implementation
1931 * detects that CREATE and ABORT are both set (vs ABORT alone) and can
1932 * special-case non-blocking operation for the command.
1934 * NOTE! Messages with ABORT set without CREATE or DELETE are considered
1935 * to be mid-stream aborts for command/reply sequences. ABORTs on
1936 * one-way messages are not supported.
1938 * NOTE! If a command sequence does not support aborts the ABORT flag is
1943 * One-off messages (no reply expected) are sent without an established
1944 * transaction. CREATE and DELETE are left clear and the msgid is usually 0.
1945 * For one-off messages sent over circuits msgid generally MUST be 0.
1947 * One-off messages cannot be aborted and typically aren't processed
1948 * by these routines. Order is still guaranteed for messages sent over
1949 * the same circuit. The REPLY bit can be used to distinguish whether
1950 * a one-off message is a command or reply. For example, one-off replies
1951 * will typically just contain status updates.
1954 dmsg_state_msgrx(dmsg_msg_t *msg)
1956 dmsg_iocom_t *iocom = msg->state->iocom;
1957 dmsg_state_t *state;
1958 dmsg_state_t *pstate;
1959 dmsg_state_t sdummy;
1962 pthread_mutex_lock(&iocom->mtx);
1965 * Lookup the circuit (pstate). The circuit will be an open
1966 * transaction. The REVCIRC bit in the message tells us which side
1969 if (msg->any.head.circuit) {
1970 sdummy.msgid = msg->any.head.circuit;
1972 if (msg->any.head.cmd & DMSGF_REVCIRC) {
1973 pstate = RB_FIND(dmsg_state_tree,
1974 &iocom->statewr_tree,
1977 pstate = RB_FIND(dmsg_state_tree,
1978 &iocom->staterd_tree,
1981 if (pstate == NULL) {
1983 "missing parent in stacked trans %s\n",
1985 error = DMSG_IOQ_ERROR_TRANS;
1986 pthread_mutex_unlock(&iocom->mtx);
1990 pstate = &iocom->state0;
1996 * If received msg is a command state is on staterd_tree.
1997 * If received msg is a reply state is on statewr_tree.
1998 * Otherwise there is no state (retain &iocom->state0)
2000 sdummy.msgid = msg->any.head.msgid;
2001 if (msg->any.head.cmd & DMSGF_REVTRANS)
2002 state = RB_FIND(dmsg_state_tree, &iocom->statewr_tree, &sdummy);
2004 state = RB_FIND(dmsg_state_tree, &iocom->staterd_tree, &sdummy);
2008 * Message over an existing transaction (CREATE should not
2012 assert(pstate == state->parent);
2015 * Either a new transaction (if CREATE set) or a one-off.
2020 pthread_mutex_unlock(&iocom->mtx);
2023 * Switch on CREATE, DELETE, REPLY, and also handle ABORT from
2024 * inside the case statements.
2026 * Construct new state as necessary.
2028 switch(msg->any.head.cmd & (DMSGF_CREATE | DMSGF_DELETE |
2031 case DMSGF_CREATE | DMSGF_DELETE:
2033 * Create new sub-transaction under pstate.
2034 * (any DELETE is handled in post-processing of msg).
2036 * (During routing the msgid was made unique for this
2037 * direction over the comlink, so our RB trees can be
2038 * iocom-based instead of state-based).
2040 if (state != pstate) {
2042 "duplicate transaction %s\n",
2044 error = DMSG_IOQ_ERROR_TRANS;
2050 * Allocate the new state.
2052 state = malloc(sizeof(*state));
2053 atomic_add_int(&dmsg_state_count, 1);
2054 bzero(state, sizeof(*state));
2055 TAILQ_INIT(&state->subq);
2056 dmsg_state_hold(pstate);
2058 state->parent = pstate;
2059 state->iocom = iocom;
2060 state->flags = DMSG_STATE_DYNAMIC |
2061 DMSG_STATE_OPPOSITE;
2062 state->msgid = msg->any.head.msgid;
2063 state->txcmd = DMSGF_REPLY;
2064 state->rxcmd = msg->any.head.cmd & ~DMSGF_DELETE;
2065 state->icmd = state->rxcmd & DMSGF_BASECMDMASK;
2067 pthread_mutex_lock(&iocom->mtx);
2068 RB_INSERT(dmsg_state_tree, &iocom->staterd_tree, state);
2069 TAILQ_INSERT_TAIL(&pstate->subq, state, entry);
2070 state->flags |= DMSG_STATE_INSERTED;
2073 * If the parent is a relay set up the state handler to
2074 * automatically route the message. Local processing will
2077 * (state relays are seeded by SPAN processing)
2080 state->func = dmsg_state_relay;
2081 pthread_mutex_unlock(&iocom->mtx);
2086 "create state %p id=%08x on iocom staterd %p\n",
2087 state, (uint32_t)state->msgid, iocom);
2092 * Persistent state is expected but might not exist if an
2093 * ABORT+DELETE races the close.
2095 * (any DELETE is handled in post-processing of msg).
2097 if (state == pstate) {
2098 if (msg->any.head.cmd & DMSGF_ABORT) {
2099 error = DMSG_IOQ_ERROR_EALREADY;
2101 fprintf(stderr, "missing-state %s\n",
2103 error = DMSG_IOQ_ERROR_TRANS;
2110 * Handle another ABORT+DELETE case if the msgid has already
2113 if ((state->rxcmd & DMSGF_CREATE) == 0) {
2114 if (msg->any.head.cmd & DMSGF_ABORT) {
2115 error = DMSG_IOQ_ERROR_EALREADY;
2117 fprintf(stderr, "reused-state %s\n",
2119 error = DMSG_IOQ_ERROR_TRANS;
2128 * Check for mid-stream ABORT command received, otherwise
2131 if (msg->any.head.cmd & DMSGF_ABORT) {
2132 if ((state == pstate) ||
2133 (state->rxcmd & DMSGF_CREATE) == 0) {
2134 error = DMSG_IOQ_ERROR_EALREADY;
2140 case DMSGF_REPLY | DMSGF_CREATE:
2141 case DMSGF_REPLY | DMSGF_CREATE | DMSGF_DELETE:
2143 * When receiving a reply with CREATE set the original
2144 * persistent state message should already exist.
2146 if (state == pstate) {
2147 fprintf(stderr, "no-state(r) %s\n",
2149 error = DMSG_IOQ_ERROR_TRANS;
2153 assert(((state->rxcmd ^ msg->any.head.cmd) & DMSGF_REPLY) == 0);
2154 state->rxcmd = msg->any.head.cmd & ~DMSGF_DELETE;
2157 case DMSGF_REPLY | DMSGF_DELETE:
2159 * Received REPLY+ABORT+DELETE in case where msgid has
2160 * already been fully closed, ignore the message.
2162 if (state == pstate) {
2163 if (msg->any.head.cmd & DMSGF_ABORT) {
2164 error = DMSG_IOQ_ERROR_EALREADY;
2166 fprintf(stderr, "no-state(r,d) %s\n",
2168 error = DMSG_IOQ_ERROR_TRANS;
2175 * Received REPLY+ABORT+DELETE in case where msgid has
2176 * already been reused for an unrelated message,
2177 * ignore the message.
2179 if ((state->rxcmd & DMSGF_CREATE) == 0) {
2180 if (msg->any.head.cmd & DMSGF_ABORT) {
2181 error = DMSG_IOQ_ERROR_EALREADY;
2183 fprintf(stderr, "reused-state(r,d) %s\n",
2185 error = DMSG_IOQ_ERROR_TRANS;
2194 * Check for mid-stream ABORT reply received to sent command.
2196 if (msg->any.head.cmd & DMSGF_ABORT) {
2197 if (state == pstate ||
2198 (state->rxcmd & DMSGF_CREATE) == 0) {
2199 error = DMSG_IOQ_ERROR_EALREADY;
2208 * Calculate the easy-switch() transactional command. Represents
2209 * the outer-transaction command for any transaction-create or
2210 * transaction-delete, and the inner message command for any
2211 * non-transaction or inside-transaction command. tcmd will be
2212 * set to 0 for any messaging error condition.
2214 * The two can be told apart because outer-transaction commands
2215 * always have a DMSGF_CREATE and/or DMSGF_DELETE flag.
2217 if (msg->any.head.cmd & (DMSGF_CREATE | DMSGF_DELETE)) {
2218 if ((state->flags & DMSG_STATE_ROOT) == 0) {
2219 msg->tcmd = (msg->state->icmd & DMSGF_BASECMDMASK) |
2220 (msg->any.head.cmd & (DMSGF_CREATE |
2227 msg->tcmd = msg->any.head.cmd & DMSGF_CMDSWMASK;
2233 * Route the message and handle pair-state processing.
2236 dmsg_state_relay(dmsg_msg_t *lmsg)
2238 dmsg_state_t *lpstate;
2239 dmsg_state_t *rpstate;
2240 dmsg_state_t *lstate;
2241 dmsg_state_t *rstate;
2244 if ((lmsg->any.head.cmd & (DMSGF_CREATE | DMSGF_REPLY)) ==
2247 * New sub-transaction, establish new state and relay.
2249 lstate = lmsg->state;
2250 lpstate = lstate->parent;
2251 rpstate = lpstate->relay;
2252 assert(lstate->relay == NULL);
2253 assert(rpstate != NULL);
2255 rmsg = dmsg_msg_alloc(rpstate, 0,
2257 dmsg_state_relay, NULL);
2258 rstate = rmsg->state;
2259 rstate->relay = lstate;
2260 lstate->relay = rstate;
2261 dmsg_state_hold(lstate);
2262 dmsg_state_hold(rstate);
2265 * State & relay already established
2267 lstate = lmsg->state;
2268 rstate = lstate->relay;
2269 assert(rstate != NULL);
2271 rmsg = dmsg_msg_alloc(rstate, 0,
2273 dmsg_state_relay, NULL);
2275 if (lmsg->hdr_size > sizeof(lmsg->any.head)) {
2276 bcopy(&lmsg->any.head + 1, &rmsg->any.head + 1,
2277 lmsg->hdr_size - sizeof(lmsg->any.head));
2279 rmsg->any.head.error = lmsg->any.head.error;
2280 rmsg->any.head.reserved02 = lmsg->any.head.reserved02;
2281 rmsg->any.head.reserved18 = lmsg->any.head.reserved18;
2282 rmsg->aux_size = lmsg->aux_size;
2283 rmsg->aux_data = lmsg->aux_data;
2284 lmsg->aux_data = NULL;
2286 fprintf(stderr, "RELAY %08x\n", rmsg->any.head.cmd);
2288 dmsg_msg_write(rmsg);
2292 * Cleanup and retire msg after processing
2295 dmsg_state_cleanuprx(dmsg_iocom_t *iocom, dmsg_msg_t *msg)
2297 dmsg_state_t *state;
2298 dmsg_state_t *pstate;
2300 assert(msg->state->iocom == iocom);
2302 if (state->flags & DMSG_STATE_ROOT) {
2304 * Free a non-transactional message, there is no state
2308 } else if (msg->any.head.cmd & DMSGF_DELETE) {
2310 * Message terminating transaction, destroy the related
2311 * state, the original message, and this message (if it
2312 * isn't the original message due to a CREATE|DELETE).
2314 * It's possible for governing state to terminate while
2315 * sub-transactions still exist. This is allowed but
2316 * will cause sub-transactions to recursively fail.
2317 * Further reception of sub-transaction messages will be
2318 * impossible because the circuit will no longer exist.
2319 * (XXX need code to make sure that happens properly).
2321 pthread_mutex_lock(&iocom->mtx);
2322 state->rxcmd |= DMSGF_DELETE;
2324 if (state->txcmd & DMSGF_DELETE) {
2325 assert(state->flags & DMSG_STATE_INSERTED);
2326 if (state->rxcmd & DMSGF_REPLY) {
2327 assert(msg->any.head.cmd & DMSGF_REPLY);
2328 RB_REMOVE(dmsg_state_tree,
2329 &iocom->statewr_tree, state);
2331 assert((msg->any.head.cmd & DMSGF_REPLY) == 0);
2332 RB_REMOVE(dmsg_state_tree,
2333 &iocom->staterd_tree, state);
2335 pstate = state->parent;
2336 TAILQ_REMOVE(&pstate->subq, state, entry);
2337 state->flags &= ~DMSG_STATE_INSERTED;
2338 state->parent = NULL;
2339 dmsg_state_drop(pstate);
2342 dmsg_state_drop(state->relay);
2343 state->relay = NULL;
2346 dmsg_state_drop(state);
2350 pthread_mutex_unlock(&iocom->mtx);
2353 * Message not terminating transaction, leave state intact
2354 * and free message if it isn't the CREATE message.
2361 * Clean up the state after pulling out needed fields and queueing the
2362 * message for transmission. This occurs in dmsg_msg_write().
2365 dmsg_state_cleanuptx(dmsg_iocom_t *iocom, dmsg_msg_t *msg)
2367 dmsg_state_t *state;
2368 dmsg_state_t *pstate;
2370 assert(iocom == msg->state->iocom);
2372 if (state->flags & DMSG_STATE_ROOT) {
2374 } else if (msg->any.head.cmd & DMSGF_DELETE) {
2376 * Message terminating transaction, destroy the related
2377 * state, the original message, and this message (if it
2378 * isn't the original message due to a CREATE|DELETE).
2380 * It's possible for governing state to terminate while
2381 * sub-transactions still exist. This is allowed but
2382 * will cause sub-transactions to recursively fail.
2383 * Further reception of sub-transaction messages will be
2384 * impossible because the circuit will no longer exist.
2385 * (XXX need code to make sure that happens properly).
2387 pthread_mutex_lock(&iocom->mtx);
2388 assert((state->txcmd & DMSGF_DELETE) == 0);
2389 state->txcmd |= DMSGF_DELETE;
2390 if (state->rxcmd & DMSGF_DELETE) {
2391 assert(state->flags & DMSG_STATE_INSERTED);
2392 if (state->txcmd & DMSGF_REPLY) {
2393 assert(msg->any.head.cmd & DMSGF_REPLY);
2394 RB_REMOVE(dmsg_state_tree,
2395 &iocom->staterd_tree, state);
2397 assert((msg->any.head.cmd & DMSGF_REPLY) == 0);
2398 RB_REMOVE(dmsg_state_tree,
2399 &iocom->statewr_tree, state);
2401 pstate = state->parent;
2402 TAILQ_REMOVE(&pstate->subq, state, entry);
2403 state->flags &= ~DMSG_STATE_INSERTED;
2404 state->parent = NULL;
2405 dmsg_state_drop(pstate);
2408 dmsg_state_drop(state->relay);
2409 state->relay = NULL;
2411 dmsg_state_drop(state); /* usually the last drop */
2413 pthread_mutex_unlock(&iocom->mtx);
2418 * Called with or without locks
2421 dmsg_state_hold(dmsg_state_t *state)
2423 atomic_add_int(&state->refs, 1);
2427 dmsg_state_drop(dmsg_state_t *state)
2429 if (atomic_fetchadd_int(&state->refs, -1) == 1)
2430 dmsg_state_free(state);
2434 * Called with iocom locked
2437 dmsg_state_free(dmsg_state_t *state)
2439 atomic_add_int(&dmsg_state_count, -1);
2441 fprintf(stderr, "terminate state %p id=%08x\n",
2442 state, (uint32_t)state->msgid);
2444 assert((state->flags & (DMSG_STATE_ROOT | DMSG_STATE_INSERTED)) == 0);
2445 assert(TAILQ_EMPTY(&state->subq));
2446 assert(state->refs == 0);
2447 if (state->any.any != NULL) /* XXX avoid deadlock w/exit & kernel */
2449 assert(state->any.any == NULL);
2454 * This swaps endian for a hammer2_msg_hdr. Note that the extended
2455 * header is not adjusted, just the core header.
2458 dmsg_bswap_head(dmsg_hdr_t *head)
2460 head->magic = bswap16(head->magic);
2461 head->reserved02 = bswap16(head->reserved02);
2462 head->salt = bswap32(head->salt);
2464 head->msgid = bswap64(head->msgid);
2465 head->circuit = bswap64(head->circuit);
2466 head->reserved18= bswap64(head->reserved18);
2468 head->cmd = bswap32(head->cmd);
2469 head->aux_crc = bswap32(head->aux_crc);
2470 head->aux_bytes = bswap32(head->aux_bytes);
2471 head->error = bswap32(head->error);
2472 head->aux_descr = bswap64(head->aux_descr);
2473 head->reserved38= bswap32(head->reserved38);
2474 head->hdr_crc = bswap32(head->hdr_crc);