2 * Copyright (c) 2011-2012 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 #include "dmsg_local.h"
41 static int dmsg_state_msgrx(dmsg_msg_t *msg);
42 static void dmsg_state_cleanuptx(dmsg_iocom_t *iocom, dmsg_msg_t *msg);
43 static void dmsg_msg_free_locked(dmsg_msg_t *msg);
44 static void dmsg_state_free(dmsg_state_t *state);
45 static void dmsg_msg_simulate_failure(dmsg_state_t *state, int error);
47 RB_GENERATE(dmsg_state_tree, dmsg_state, rbnode, dmsg_state_cmp);
50 * STATE TREE - Represents open transactions which are indexed by their
51 * { msgid } relative to the governing iocom.
54 dmsg_state_cmp(dmsg_state_t *state1, dmsg_state_t *state2)
56 if (state1->msgid < state2->msgid)
58 if (state1->msgid > state2->msgid)
64 * Initialize a low-level ioq
67 dmsg_ioq_init(dmsg_iocom_t *iocom __unused, dmsg_ioq_t *ioq)
69 bzero(ioq, sizeof(*ioq));
70 ioq->state = DMSG_MSGQ_STATE_HEADER1;
71 TAILQ_INIT(&ioq->msgq);
77 * caller holds iocom->mtx.
80 dmsg_ioq_done(dmsg_iocom_t *iocom __unused, dmsg_ioq_t *ioq)
84 while ((msg = TAILQ_FIRST(&ioq->msgq)) != NULL) {
85 assert(0); /* shouldn't happen */
86 TAILQ_REMOVE(&ioq->msgq, msg, qentry);
89 if ((msg = ioq->msg) != NULL) {
96 * Initialize a low-level communications channel.
98 * NOTE: The signal_func() is called at least once from the loop and can be
99 * re-armed via dmsg_iocom_restate().
102 dmsg_iocom_init(dmsg_iocom_t *iocom, int sock_fd, int alt_fd,
103 void (*signal_func)(dmsg_iocom_t *iocom),
104 void (*rcvmsg_func)(dmsg_msg_t *msg),
105 void (*usrmsg_func)(dmsg_msg_t *msg, int unmanaged),
106 void (*altmsg_func)(dmsg_iocom_t *iocom))
110 bzero(iocom, sizeof(*iocom));
112 asprintf(&iocom->label, "iocom-%p", iocom);
113 iocom->signal_callback = signal_func;
114 iocom->rcvmsg_callback = rcvmsg_func;
115 iocom->altmsg_callback = altmsg_func;
116 iocom->usrmsg_callback = usrmsg_func;
118 pthread_mutex_init(&iocom->mtx, NULL);
119 RB_INIT(&iocom->staterd_tree);
120 RB_INIT(&iocom->statewr_tree);
121 TAILQ_INIT(&iocom->freeq);
122 TAILQ_INIT(&iocom->freeq_aux);
123 TAILQ_INIT(&iocom->txmsgq);
124 iocom->sock_fd = sock_fd;
125 iocom->alt_fd = alt_fd;
126 iocom->flags = DMSG_IOCOMF_RREQ | DMSG_IOCOMF_CLOSEALT;
128 iocom->flags |= DMSG_IOCOMF_SWORK;
129 dmsg_ioq_init(iocom, &iocom->ioq_rx);
130 dmsg_ioq_init(iocom, &iocom->ioq_tx);
131 iocom->state0.refs = 1; /* should never trigger a free */
132 iocom->state0.iocom = iocom;
133 iocom->state0.parent = &iocom->state0;
134 iocom->state0.flags = DMSG_STATE_ROOT;
135 TAILQ_INIT(&iocom->state0.subq);
137 if (pipe(iocom->wakeupfds) < 0)
139 fcntl(iocom->wakeupfds[0], F_SETFL, O_NONBLOCK);
140 fcntl(iocom->wakeupfds[1], F_SETFL, O_NONBLOCK);
143 * Negotiate session crypto synchronously. This will mark the
144 * connection as error'd if it fails. If this is a pipe it's
145 * a linkage that we set up ourselves to the filesystem and there
148 if (fstat(sock_fd, &st) < 0)
150 if (S_ISSOCK(st.st_mode))
151 dmsg_crypto_negotiate(iocom);
154 * Make sure our fds are set to non-blocking for the iocom core.
157 fcntl(sock_fd, F_SETFL, O_NONBLOCK);
159 /* if line buffered our single fgets() should be fine */
161 fcntl(alt_fd, F_SETFL, O_NONBLOCK);
166 dmsg_iocom_label(dmsg_iocom_t *iocom, const char *ctl, ...)
173 vasprintf(&iocom->label, ctl, va);
180 * May only be called from a callback from iocom_core.
182 * Adjust state machine functions, set flags to guarantee that both
183 * the recevmsg_func and the sendmsg_func is called at least once.
186 dmsg_iocom_restate(dmsg_iocom_t *iocom,
187 void (*signal_func)(dmsg_iocom_t *),
188 void (*rcvmsg_func)(dmsg_msg_t *msg))
190 pthread_mutex_lock(&iocom->mtx);
191 iocom->signal_callback = signal_func;
192 iocom->rcvmsg_callback = rcvmsg_func;
194 atomic_set_int(&iocom->flags, DMSG_IOCOMF_SWORK);
196 atomic_clear_int(&iocom->flags, DMSG_IOCOMF_SWORK);
197 pthread_mutex_unlock(&iocom->mtx);
201 dmsg_iocom_signal(dmsg_iocom_t *iocom)
203 pthread_mutex_lock(&iocom->mtx);
204 if (iocom->signal_callback)
205 atomic_set_int(&iocom->flags, DMSG_IOCOMF_SWORK);
206 pthread_mutex_unlock(&iocom->mtx);
210 * Cleanup a terminating iocom.
212 * Caller should not hold iocom->mtx. The iocom has already been disconnected
213 * from all possible references to it.
216 dmsg_iocom_done(dmsg_iocom_t *iocom)
220 if (iocom->sock_fd >= 0) {
221 close(iocom->sock_fd);
224 if (iocom->alt_fd >= 0 && (iocom->flags & DMSG_IOCOMF_CLOSEALT)) {
225 close(iocom->alt_fd);
228 dmsg_ioq_done(iocom, &iocom->ioq_rx);
229 dmsg_ioq_done(iocom, &iocom->ioq_tx);
230 while ((msg = TAILQ_FIRST(&iocom->freeq)) != NULL) {
231 TAILQ_REMOVE(&iocom->freeq, msg, qentry);
234 while ((msg = TAILQ_FIRST(&iocom->freeq_aux)) != NULL) {
235 TAILQ_REMOVE(&iocom->freeq_aux, msg, qentry);
237 msg->aux_data = NULL;
240 if (iocom->wakeupfds[0] >= 0) {
241 close(iocom->wakeupfds[0]);
242 iocom->wakeupfds[0] = -1;
244 if (iocom->wakeupfds[1] >= 0) {
245 close(iocom->wakeupfds[1]);
246 iocom->wakeupfds[1] = -1;
248 pthread_mutex_destroy(&iocom->mtx);
252 * Allocate a new message using the specified transaction state.
254 * If CREATE is set a new transaction is allocated relative to the passed-in
255 * transaction (the 'state' argument becomes pstate).
257 * If CREATE is not set the message is associated with the passed-in
261 dmsg_msg_alloc(dmsg_state_t *state,
262 size_t aux_size, uint32_t cmd,
263 void (*func)(dmsg_msg_t *), void *data)
265 dmsg_iocom_t *iocom = state->iocom;
268 pthread_mutex_lock(&iocom->mtx);
269 msg = dmsg_msg_alloc_locked(state, aux_size, cmd, func, data);
270 pthread_mutex_unlock(&iocom->mtx);
276 dmsg_msg_alloc_locked(dmsg_state_t *state,
277 size_t aux_size, uint32_t cmd,
278 void (*func)(dmsg_msg_t *), void *data)
280 dmsg_iocom_t *iocom = state->iocom;
281 dmsg_state_t *pstate;
288 aligned_size = DMSG_DOALIGN(aux_size);
289 if ((msg = TAILQ_FIRST(&iocom->freeq_aux)) != NULL)
290 TAILQ_REMOVE(&iocom->freeq_aux, msg, qentry);
293 if ((msg = TAILQ_FIRST(&iocom->freeq)) != NULL)
294 TAILQ_REMOVE(&iocom->freeq, msg, qentry);
297 aligned_size = DMSG_DOALIGN(aux_size);
299 if ((cmd & (DMSGF_CREATE | DMSGF_REPLY)) == DMSGF_CREATE) {
301 * When CREATE is set without REPLY the caller is
302 * initiating a new transaction stacked under the specified
305 * NOTE: CREATE in txcmd handled by dmsg_msg_write()
306 * NOTE: DELETE in txcmd handled by dmsg_state_cleanuptx()
309 state = malloc(sizeof(*state));
310 atomic_add_int(&dmsg_state_count, 1);
311 bzero(state, sizeof(*state));
312 TAILQ_INIT(&state->subq);
313 dmsg_state_hold(pstate);
315 state->parent = pstate;
316 state->iocom = iocom;
317 state->flags = DMSG_STATE_DYNAMIC;
318 state->msgid = (uint64_t)(uintptr_t)state;
319 state->txcmd = cmd & ~(DMSGF_CREATE | DMSGF_DELETE);
320 state->rxcmd = DMSGF_REPLY;
321 state->icmd = state->txcmd & DMSGF_BASECMDMASK;
323 state->any.any = data;
325 RB_INSERT(dmsg_state_tree, &iocom->statewr_tree, state);
326 TAILQ_INSERT_TAIL(&pstate->subq, state, entry);
327 state->flags |= DMSG_STATE_INSERTED;
330 * Otherwise the message is transmitted over the existing
333 pstate = state->parent;
336 /* XXX SMP race for state */
337 hbytes = (cmd & DMSGF_SIZE) * DMSG_ALIGN;
339 msg = malloc(offsetof(struct dmsg_msg, any.head) + hbytes + 4);
340 bzero(msg, offsetof(struct dmsg_msg, any.head));
341 *(int *)((char *)msg +
342 offsetof(struct dmsg_msg, any.head) + hbytes) =
345 msg = malloc(sizeof(*msg));
346 bzero(msg, sizeof(*msg));
351 * [re]allocate the auxillary data buffer. The caller knows that
352 * a size-aligned buffer will be allocated but we do not want to
353 * force the caller to zero any tail piece, so we do that ourself.
355 if (msg->aux_size != aux_size) {
358 msg->aux_data = NULL;
362 msg->aux_data = malloc(aligned_size);
363 msg->aux_size = aux_size;
364 if (aux_size != aligned_size) {
365 bzero(msg->aux_data + aux_size,
366 aligned_size - aux_size);
372 * Set REVTRANS if the transaction was remotely initiated
373 * Set REVCIRC if the circuit was remotely initiated
375 if (state->flags & DMSG_STATE_OPPOSITE)
376 cmd |= DMSGF_REVTRANS;
377 if (pstate->flags & DMSG_STATE_OPPOSITE)
378 cmd |= DMSGF_REVCIRC;
381 * Finish filling out the header.
384 bzero(&msg->any.head, hbytes);
385 msg->hdr_size = hbytes;
386 msg->any.head.magic = DMSG_HDR_MAGIC;
387 msg->any.head.cmd = cmd;
388 msg->any.head.aux_descr = 0;
389 msg->any.head.aux_crc = 0;
390 msg->any.head.msgid = state->msgid;
391 msg->any.head.circuit = pstate->msgid;
398 * Free a message so it can be reused afresh.
400 * NOTE: aux_size can be 0 with a non-NULL aux_data.
404 dmsg_msg_free_locked(dmsg_msg_t *msg)
406 /*dmsg_iocom_t *iocom = msg->iocom;*/
408 int hbytes = (msg->any.head.cmd & DMSGF_SIZE) * DMSG_ALIGN;
409 if (*(int *)((char *)msg +
410 offsetof(struct dmsg_msg, any.head) + hbytes) !=
412 fprintf(stderr, "MSGFREE FAILED CMD %08x\n", msg->any.head.cmd);
416 msg->state = NULL; /* safety */
419 msg->aux_data = NULL;
425 TAILQ_INSERT_TAIL(&iocom->freeq_aux, msg, qentry);
427 TAILQ_INSERT_TAIL(&iocom->freeq, msg, qentry);
432 dmsg_msg_free(dmsg_msg_t *msg)
434 dmsg_iocom_t *iocom = msg->state->iocom;
436 pthread_mutex_lock(&iocom->mtx);
437 dmsg_msg_free_locked(msg);
438 pthread_mutex_unlock(&iocom->mtx);
442 * I/O core loop for an iocom.
444 * Thread localized, iocom->mtx not held.
447 dmsg_iocom_core(dmsg_iocom_t *iocom)
449 struct pollfd fds[3];
454 int wi; /* wakeup pipe */
456 int ai; /* alt bulk path socket */
458 while ((iocom->flags & DMSG_IOCOMF_EOF) == 0) {
460 * These iocom->flags are only manipulated within the
461 * context of the current thread. However, modifications
462 * still require atomic ops.
464 if ((iocom->flags & (DMSG_IOCOMF_RWORK |
469 DMSG_IOCOMF_AWWORK)) == 0) {
471 * Only poll if no immediate work is pending.
472 * Otherwise we are just wasting our time calling
483 * Always check the inter-thread pipe, e.g.
484 * for iocom->txmsgq work.
487 fds[wi].fd = iocom->wakeupfds[0];
488 fds[wi].events = POLLIN;
492 * Check the socket input/output direction as
495 if (iocom->flags & (DMSG_IOCOMF_RREQ |
498 fds[si].fd = iocom->sock_fd;
502 if (iocom->flags & DMSG_IOCOMF_RREQ)
503 fds[si].events |= POLLIN;
504 if (iocom->flags & DMSG_IOCOMF_WREQ)
505 fds[si].events |= POLLOUT;
509 * Check the alternative fd for work.
511 if (iocom->alt_fd >= 0) {
513 fds[ai].fd = iocom->alt_fd;
514 fds[ai].events = POLLIN;
517 poll(fds, count, timeout);
519 if (wi >= 0 && (fds[wi].revents & POLLIN))
520 atomic_set_int(&iocom->flags,
522 if (si >= 0 && (fds[si].revents & POLLIN))
523 atomic_set_int(&iocom->flags,
525 if (si >= 0 && (fds[si].revents & POLLOUT))
526 atomic_set_int(&iocom->flags,
528 if (wi >= 0 && (fds[wi].revents & POLLOUT))
529 atomic_set_int(&iocom->flags,
531 if (ai >= 0 && (fds[ai].revents & POLLIN))
532 atomic_set_int(&iocom->flags,
536 * Always check the pipe
538 atomic_set_int(&iocom->flags, DMSG_IOCOMF_PWORK);
541 if (iocom->flags & DMSG_IOCOMF_SWORK) {
542 atomic_clear_int(&iocom->flags, DMSG_IOCOMF_SWORK);
543 iocom->signal_callback(iocom);
547 * Pending message queues from other threads wake us up
548 * with a write to the wakeupfds[] pipe. We have to clear
549 * the pipe with a dummy read.
551 if (iocom->flags & DMSG_IOCOMF_PWORK) {
552 atomic_clear_int(&iocom->flags, DMSG_IOCOMF_PWORK);
553 read(iocom->wakeupfds[0], dummybuf, sizeof(dummybuf));
554 atomic_set_int(&iocom->flags, DMSG_IOCOMF_RWORK);
555 atomic_set_int(&iocom->flags, DMSG_IOCOMF_WWORK);
556 if (TAILQ_FIRST(&iocom->txmsgq))
557 dmsg_iocom_flush1(iocom);
561 * Message write sequencing
563 if (iocom->flags & DMSG_IOCOMF_WWORK)
564 dmsg_iocom_flush1(iocom);
567 * Message read sequencing. Run this after the write
568 * sequencing in case the write sequencing allowed another
569 * auto-DELETE to occur on the read side.
571 if (iocom->flags & DMSG_IOCOMF_RWORK) {
572 while ((iocom->flags & DMSG_IOCOMF_EOF) == 0 &&
573 (msg = dmsg_ioq_read(iocom)) != NULL) {
575 fprintf(stderr, "receive %s\n",
578 iocom->rcvmsg_callback(msg);
579 dmsg_state_cleanuprx(iocom, msg);
583 if (iocom->flags & DMSG_IOCOMF_ARWORK) {
584 atomic_clear_int(&iocom->flags, DMSG_IOCOMF_ARWORK);
585 iocom->altmsg_callback(iocom);
591 * Make sure there's enough room in the FIFO to hold the
594 * Assume worst case encrypted form is 2x the size of the
595 * plaintext equivalent.
599 dmsg_ioq_makeroom(dmsg_ioq_t *ioq, size_t needed)
604 bytes = ioq->fifo_cdx - ioq->fifo_beg;
605 nmax = sizeof(ioq->buf) - ioq->fifo_end;
606 if (bytes + nmax / 2 < needed) {
608 bcopy(ioq->buf + ioq->fifo_beg,
612 ioq->fifo_cdx -= ioq->fifo_beg;
614 if (ioq->fifo_cdn < ioq->fifo_end) {
615 bcopy(ioq->buf + ioq->fifo_cdn,
616 ioq->buf + ioq->fifo_cdx,
617 ioq->fifo_end - ioq->fifo_cdn);
619 ioq->fifo_end -= ioq->fifo_cdn - ioq->fifo_cdx;
620 ioq->fifo_cdn = ioq->fifo_cdx;
621 nmax = sizeof(ioq->buf) - ioq->fifo_end;
627 * Read the next ready message from the ioq, issuing I/O if needed.
628 * Caller should retry on a read-event when NULL is returned.
630 * If an error occurs during reception a DMSG_LNK_ERROR msg will
631 * be returned for each open transaction, then the ioq and iocom
632 * will be errored out and a non-transactional DMSG_LNK_ERROR
633 * msg will be returned as the final message. The caller should not call
634 * us again after the final message is returned.
636 * Thread localized, iocom->mtx not held.
639 dmsg_ioq_read(dmsg_iocom_t *iocom)
641 dmsg_ioq_t *ioq = &iocom->ioq_rx;
654 * If a message is already pending we can just remove and
655 * return it. Message state has already been processed.
656 * (currently not implemented)
658 if ((msg = TAILQ_FIRST(&ioq->msgq)) != NULL) {
659 TAILQ_REMOVE(&ioq->msgq, msg, qentry);
662 atomic_clear_int(&iocom->flags, DMSG_IOCOMF_RREQ | DMSG_IOCOMF_RWORK);
665 * If the stream is errored out we stop processing it.
671 * Message read in-progress (msg is NULL at the moment). We don't
672 * allocate a msg until we have its core header.
674 nmax = sizeof(ioq->buf) - ioq->fifo_end;
675 bytes = ioq->fifo_cdx - ioq->fifo_beg; /* already decrypted */
679 case DMSG_MSGQ_STATE_HEADER1:
681 * Load the primary header, fail on any non-trivial read
682 * error or on EOF. Since the primary header is the same
683 * size is the message alignment it will never straddle
684 * the end of the buffer.
686 nmax = dmsg_ioq_makeroom(ioq, sizeof(msg->any.head));
687 if (bytes < sizeof(msg->any.head)) {
688 n = read(iocom->sock_fd,
689 ioq->buf + ioq->fifo_end,
693 ioq->error = DMSG_IOQ_ERROR_EOF;
696 if (errno != EINTR &&
697 errno != EINPROGRESS &&
699 ioq->error = DMSG_IOQ_ERROR_SOCK;
705 ioq->fifo_end += (size_t)n;
710 * Decrypt data received so far. Data will be decrypted
711 * in-place but might create gaps in the FIFO. Partial
712 * blocks are not immediately decrypted.
714 * WARNING! The header might be in the wrong endian, we
715 * do not fix it up until we get the entire
718 if (iocom->flags & DMSG_IOCOMF_CRYPTED) {
719 dmsg_crypto_decrypt(iocom, ioq);
721 ioq->fifo_cdx = ioq->fifo_end;
722 ioq->fifo_cdn = ioq->fifo_end;
724 bytes = ioq->fifo_cdx - ioq->fifo_beg;
727 * Insufficient data accumulated (msg is NULL, caller will
731 if (bytes < sizeof(msg->any.head))
735 * Check and fixup the core header. Note that the icrc
736 * has to be calculated before any fixups, but the crc
737 * fields in the msg may have to be swapped like everything
740 head = (void *)(ioq->buf + ioq->fifo_beg);
741 if (head->magic != DMSG_HDR_MAGIC &&
742 head->magic != DMSG_HDR_MAGIC_REV) {
743 fprintf(stderr, "%s: head->magic is bad %02x\n",
744 iocom->label, head->magic);
745 if (iocom->flags & DMSG_IOCOMF_CRYPTED)
746 fprintf(stderr, "(on encrypted link)\n");
747 ioq->error = DMSG_IOQ_ERROR_SYNC;
752 * Calculate the full header size and aux data size
754 if (head->magic == DMSG_HDR_MAGIC_REV) {
755 ioq->hbytes = (bswap32(head->cmd) & DMSGF_SIZE) *
757 aux_size = bswap32(head->aux_bytes);
759 ioq->hbytes = (head->cmd & DMSGF_SIZE) *
761 aux_size = head->aux_bytes;
763 ioq->abytes = DMSG_DOALIGN(aux_size);
764 ioq->unaligned_aux_size = aux_size;
765 if (ioq->hbytes < sizeof(msg->any.head) ||
766 ioq->hbytes > sizeof(msg->any) ||
767 ioq->abytes > DMSG_AUX_MAX) {
768 ioq->error = DMSG_IOQ_ERROR_FIELD;
773 * Allocate the message, the next state will fill it in.
775 * NOTE: The aux_data buffer will be sized to an aligned
776 * value and the aligned remainder zero'd for
779 * NOTE: Supply dummy state and a degenerate cmd without
780 * CREATE set. The message will temporarily be
781 * associated with state0 until later post-processing.
783 msg = dmsg_msg_alloc(&iocom->state0, aux_size,
784 ioq->hbytes / DMSG_ALIGN,
789 * Fall through to the next state. Make sure that the
790 * extended header does not straddle the end of the buffer.
791 * We still want to issue larger reads into our buffer,
792 * book-keeping is easier if we don't bcopy() yet.
794 * Make sure there is enough room for bloated encrypt data.
796 nmax = dmsg_ioq_makeroom(ioq, ioq->hbytes);
797 ioq->state = DMSG_MSGQ_STATE_HEADER2;
799 case DMSG_MSGQ_STATE_HEADER2:
801 * Fill out the extended header.
804 if (bytes < ioq->hbytes) {
805 n = read(iocom->sock_fd,
806 ioq->buf + ioq->fifo_end,
810 ioq->error = DMSG_IOQ_ERROR_EOF;
813 if (errno != EINTR &&
814 errno != EINPROGRESS &&
816 ioq->error = DMSG_IOQ_ERROR_SOCK;
822 ioq->fifo_end += (size_t)n;
826 if (iocom->flags & DMSG_IOCOMF_CRYPTED) {
827 dmsg_crypto_decrypt(iocom, ioq);
829 ioq->fifo_cdx = ioq->fifo_end;
830 ioq->fifo_cdn = ioq->fifo_end;
832 bytes = ioq->fifo_cdx - ioq->fifo_beg;
835 * Insufficient data accumulated (set msg NULL so caller will
838 if (bytes < ioq->hbytes) {
844 * Calculate the extended header, decrypt data received
845 * so far. Handle endian-conversion for the entire extended
848 head = (void *)(ioq->buf + ioq->fifo_beg);
853 if (head->magic == DMSG_HDR_MAGIC_REV)
854 xcrc32 = bswap32(head->hdr_crc);
856 xcrc32 = head->hdr_crc;
858 if (dmsg_icrc32(head, ioq->hbytes) != xcrc32) {
859 ioq->error = DMSG_IOQ_ERROR_XCRC;
860 fprintf(stderr, "BAD-XCRC(%08x,%08x) %s\n",
861 xcrc32, dmsg_icrc32(head, ioq->hbytes),
866 head->hdr_crc = xcrc32;
868 if (head->magic == DMSG_HDR_MAGIC_REV) {
869 dmsg_bswap_head(head);
873 * Copy the extended header into the msg and adjust the
876 bcopy(head, &msg->any, ioq->hbytes);
879 * We are either done or we fall-through.
881 if (ioq->abytes == 0) {
882 ioq->fifo_beg += ioq->hbytes;
887 * Must adjust bytes (and the state) when falling through.
888 * nmax doesn't change.
890 ioq->fifo_beg += ioq->hbytes;
891 bytes -= ioq->hbytes;
892 ioq->state = DMSG_MSGQ_STATE_AUXDATA1;
894 case DMSG_MSGQ_STATE_AUXDATA1:
896 * Copy the partial or complete [decrypted] payload from
897 * remaining bytes in the FIFO in order to optimize the
898 * makeroom call in the AUXDATA2 state. We have to
899 * fall-through either way so we can check the crc.
901 * msg->aux_size tracks our aux data.
903 * (Lets not complicate matters if the data is encrypted,
904 * since the data in-stream is not the same size as the
907 if (bytes >= ioq->abytes) {
908 bcopy(ioq->buf + ioq->fifo_beg, msg->aux_data,
910 msg->aux_size = ioq->abytes;
911 ioq->fifo_beg += ioq->abytes;
912 assert(ioq->fifo_beg <= ioq->fifo_cdx);
913 assert(ioq->fifo_cdx <= ioq->fifo_cdn);
914 bytes -= ioq->abytes;
916 bcopy(ioq->buf + ioq->fifo_beg, msg->aux_data,
918 msg->aux_size = bytes;
919 ioq->fifo_beg += bytes;
920 if (ioq->fifo_cdx < ioq->fifo_beg)
921 ioq->fifo_cdx = ioq->fifo_beg;
922 assert(ioq->fifo_beg <= ioq->fifo_cdx);
923 assert(ioq->fifo_cdx <= ioq->fifo_cdn);
928 ioq->state = DMSG_MSGQ_STATE_AUXDATA2;
930 case DMSG_MSGQ_STATE_AUXDATA2:
932 * Make sure there is enough room for more data.
935 nmax = dmsg_ioq_makeroom(ioq, ioq->abytes - msg->aux_size);
938 * Read and decrypt more of the payload.
940 if (msg->aux_size < ioq->abytes) {
942 n = read(iocom->sock_fd,
943 ioq->buf + ioq->fifo_end,
947 ioq->error = DMSG_IOQ_ERROR_EOF;
950 if (errno != EINTR &&
951 errno != EINPROGRESS &&
953 ioq->error = DMSG_IOQ_ERROR_SOCK;
959 ioq->fifo_end += (size_t)n;
963 if (iocom->flags & DMSG_IOCOMF_CRYPTED) {
964 dmsg_crypto_decrypt(iocom, ioq);
966 ioq->fifo_cdx = ioq->fifo_end;
967 ioq->fifo_cdn = ioq->fifo_end;
969 bytes = ioq->fifo_cdx - ioq->fifo_beg;
971 if (bytes > ioq->abytes - msg->aux_size)
972 bytes = ioq->abytes - msg->aux_size;
975 bcopy(ioq->buf + ioq->fifo_beg,
976 msg->aux_data + msg->aux_size,
978 msg->aux_size += bytes;
979 ioq->fifo_beg += bytes;
983 * Insufficient data accumulated (set msg NULL so caller will
986 * Assert the auxillary data size is correct, then record the
987 * original unaligned size from the message header.
989 if (msg->aux_size < ioq->abytes) {
993 assert(msg->aux_size == ioq->abytes);
994 msg->aux_size = ioq->unaligned_aux_size;
997 * Check aux_crc, then we are done. Note that the crc
998 * is calculated over the aligned size, not the actual
1001 xcrc32 = dmsg_icrc32(msg->aux_data, ioq->abytes);
1002 if (xcrc32 != msg->any.head.aux_crc) {
1003 ioq->error = DMSG_IOQ_ERROR_ACRC;
1005 "iocom: ACRC error %08x vs %08x "
1006 "msgid %016jx msgcmd %08x auxsize %d\n",
1008 msg->any.head.aux_crc,
1009 (intmax_t)msg->any.head.msgid,
1011 msg->any.head.aux_bytes);
1015 case DMSG_MSGQ_STATE_ERROR:
1017 * Continued calls to drain recorded transactions (returning
1018 * a LNK_ERROR for each one), before we return the final
1021 assert(msg == NULL);
1025 * We don't double-return errors, the caller should not
1026 * have called us again after getting an error msg.
1033 * Check the message sequence. The iv[] should prevent any
1034 * possibility of a replay but we add this check anyway.
1036 if (msg && ioq->error == 0) {
1037 if ((msg->any.head.salt & 255) != (ioq->seq & 255)) {
1038 ioq->error = DMSG_IOQ_ERROR_MSGSEQ;
1045 * Handle error, RREQ, or completion
1047 * NOTE: nmax and bytes are invalid at this point, we don't bother
1048 * to update them when breaking out.
1051 dmsg_state_t *tmp_state;
1053 fprintf(stderr, "IOQ ERROR %d\n", ioq->error);
1055 * An unrecoverable error causes all active receive
1056 * transactions to be terminated with a LNK_ERROR message.
1058 * Once all active transactions are exhausted we set the
1059 * iocom ERROR flag and return a non-transactional LNK_ERROR
1060 * message, which should cause master processing loops to
1063 assert(ioq->msg == msg);
1071 * No more I/O read processing
1073 ioq->state = DMSG_MSGQ_STATE_ERROR;
1076 * Simulate a remote LNK_ERROR DELETE msg for any open
1077 * transactions, ending with a final non-transactional
1078 * LNK_ERROR (that the session can detect) when no
1079 * transactions remain.
1081 * NOTE: Temporarily supply state0 and a degenerate cmd
1082 * without CREATE set. The real state will be
1083 * assigned in the loop.
1085 * NOTE: We are simulating a received message using our
1086 * side of the state, so the DMSGF_REV* bits have
1089 pthread_mutex_lock(&iocom->mtx);
1090 dmsg_iocom_drain(iocom);
1093 RB_FOREACH(state, dmsg_state_tree, &iocom->staterd_tree) {
1094 atomic_set_int(&state->flags, DMSG_STATE_DYING);
1095 if (tmp_state == NULL && TAILQ_EMPTY(&state->subq))
1098 RB_FOREACH(state, dmsg_state_tree, &iocom->statewr_tree) {
1099 atomic_set_int(&state->flags, DMSG_STATE_DYING);
1100 if (tmp_state == NULL && TAILQ_EMPTY(&state->subq))
1105 dmsg_msg_simulate_failure(tmp_state, ioq->error);
1107 dmsg_msg_simulate_failure(&iocom->state0, ioq->error);
1109 pthread_mutex_unlock(&iocom->mtx);
1110 if (TAILQ_FIRST(&ioq->msgq))
1115 * For the iocom error case we want to set RWORK to indicate
1116 * that more messages might be pending.
1118 * It is possible to return NULL when there is more work to
1119 * do because each message has to be DELETEd in both
1120 * directions before we continue on with the next (though
1121 * this could be optimized). The transmit direction will
1125 atomic_set_int(&iocom->flags, DMSG_IOCOMF_RWORK);
1127 } else if (msg == NULL) {
1129 * Insufficient data received to finish building the message,
1130 * set RREQ and return NULL.
1132 * Leave ioq->msg intact.
1133 * Leave the FIFO intact.
1135 atomic_set_int(&iocom->flags, DMSG_IOCOMF_RREQ);
1138 * Continue processing msg.
1140 * The fifo has already been advanced past the message.
1141 * Trivially reset the FIFO indices if possible.
1143 * clear the FIFO if it is now empty and set RREQ to wait
1144 * for more from the socket. If the FIFO is not empty set
1145 * TWORK to bypass the poll so we loop immediately.
1147 if (ioq->fifo_beg == ioq->fifo_cdx &&
1148 ioq->fifo_cdn == ioq->fifo_end) {
1149 atomic_set_int(&iocom->flags, DMSG_IOCOMF_RREQ);
1155 atomic_set_int(&iocom->flags, DMSG_IOCOMF_RWORK);
1157 ioq->state = DMSG_MSGQ_STATE_HEADER1;
1161 * Handle message routing. Validates non-zero sources
1162 * and routes message. Error will be 0 if the message is
1165 * State processing only occurs for messages destined for us.
1167 if (DMsgDebugOpt >= 5) {
1169 "rxmsg cmd=%08x msgid=%016jx circ=%016jx\n",
1171 (intmax_t)msg->any.head.msgid,
1172 (intmax_t)msg->any.head.circuit);
1174 error = dmsg_state_msgrx(msg);
1178 * Abort-after-closure, throw message away and
1179 * start reading another.
1181 if (error == DMSG_IOQ_ERROR_EALREADY) {
1187 * Process real error and throw away message.
1192 /* no error, not routed. Fall through and return msg */
1198 * Calculate the header and data crc's and write a low-level message to
1199 * the connection. If aux_crc is non-zero the aux_data crc is already
1200 * assumed to have been set.
1202 * A non-NULL msg is added to the queue but not necessarily flushed.
1203 * Calling this function with msg == NULL will get a flush going.
1205 * (called from iocom_core only)
1208 dmsg_iocom_flush1(dmsg_iocom_t *iocom)
1210 dmsg_ioq_t *ioq = &iocom->ioq_tx;
1215 dmsg_msg_queue_t tmpq;
1217 atomic_clear_int(&iocom->flags, DMSG_IOCOMF_WREQ | DMSG_IOCOMF_WWORK);
1219 pthread_mutex_lock(&iocom->mtx);
1220 while ((msg = TAILQ_FIRST(&iocom->txmsgq)) != NULL) {
1221 TAILQ_REMOVE(&iocom->txmsgq, msg, qentry);
1222 TAILQ_INSERT_TAIL(&tmpq, msg, qentry);
1224 pthread_mutex_unlock(&iocom->mtx);
1226 while ((msg = TAILQ_FIRST(&tmpq)) != NULL) {
1228 * Process terminal connection errors.
1230 TAILQ_REMOVE(&tmpq, msg, qentry);
1232 TAILQ_INSERT_TAIL(&ioq->msgq, msg, qentry);
1238 * Finish populating the msg fields. The salt ensures that
1239 * the iv[] array is ridiculously randomized and we also
1240 * re-seed our PRNG every 32768 messages just to be sure.
1242 msg->any.head.magic = DMSG_HDR_MAGIC;
1243 msg->any.head.salt = (random() << 8) | (ioq->seq & 255);
1245 if ((ioq->seq & 32767) == 0)
1249 * Calculate aux_crc if 0, then calculate hdr_crc.
1251 if (msg->aux_size && msg->any.head.aux_crc == 0) {
1252 abytes = DMSG_DOALIGN(msg->aux_size);
1253 xcrc32 = dmsg_icrc32(msg->aux_data, abytes);
1254 msg->any.head.aux_crc = xcrc32;
1256 msg->any.head.aux_bytes = msg->aux_size;
1258 hbytes = (msg->any.head.cmd & DMSGF_SIZE) *
1260 msg->any.head.hdr_crc = 0;
1261 msg->any.head.hdr_crc = dmsg_icrc32(&msg->any.head, hbytes);
1264 * Enqueue the message (the flush codes handles stream
1267 TAILQ_INSERT_TAIL(&ioq->msgq, msg, qentry);
1270 dmsg_iocom_flush2(iocom);
1274 * Thread localized, iocom->mtx not held by caller.
1276 * (called from iocom_core via iocom_flush1 only)
1279 dmsg_iocom_flush2(dmsg_iocom_t *iocom)
1281 dmsg_ioq_t *ioq = &iocom->ioq_tx;
1284 struct iovec iov[DMSG_IOQ_MAXIOVEC];
1293 dmsg_iocom_drain(iocom);
1298 * Pump messages out the connection by building an iovec.
1300 * ioq->hbytes/ioq->abytes tracks how much of the first message
1301 * in the queue has been successfully written out, so we can
1309 TAILQ_FOREACH(msg, &ioq->msgq, qentry) {
1310 hbytes = (msg->any.head.cmd & DMSGF_SIZE) *
1312 abytes = DMSG_DOALIGN(msg->aux_size);
1313 assert(hoff <= hbytes && aoff <= abytes);
1315 if (hoff < hbytes) {
1316 size_t maxlen = hbytes - hoff;
1317 if (maxlen > sizeof(ioq->buf) / 2)
1318 maxlen = sizeof(ioq->buf) / 2;
1319 iov[iovcnt].iov_base = (char *)&msg->any.head + hoff;
1320 iov[iovcnt].iov_len = maxlen;
1323 if (iovcnt == DMSG_IOQ_MAXIOVEC ||
1324 maxlen != hbytes - hoff) {
1328 if (aoff < abytes) {
1329 size_t maxlen = abytes - aoff;
1330 if (maxlen > sizeof(ioq->buf) / 2)
1331 maxlen = sizeof(ioq->buf) / 2;
1333 assert(msg->aux_data != NULL);
1334 iov[iovcnt].iov_base = (char *)msg->aux_data + aoff;
1335 iov[iovcnt].iov_len = maxlen;
1338 if (iovcnt == DMSG_IOQ_MAXIOVEC ||
1339 maxlen != abytes - aoff) {
1350 * Encrypt and write the data. The crypto code will move the
1351 * data into the fifo and adjust the iov as necessary. If
1352 * encryption is disabled the iov is left alone.
1354 * May return a smaller iov (thus a smaller n), with aggregated
1355 * chunks. May reduce nmax to what fits in the FIFO.
1357 * This function sets nact to the number of original bytes now
1358 * encrypted, adding to the FIFO some number of bytes that might
1359 * be greater depending on the crypto mechanic. iov[] is adjusted
1360 * to point at the FIFO if necessary.
1362 * NOTE: The return value from the writev() is the post-encrypted
1363 * byte count, not the plaintext count.
1365 if (iocom->flags & DMSG_IOCOMF_CRYPTED) {
1367 * Make sure the FIFO has a reasonable amount of space
1368 * left (if not completely full).
1370 * In this situation we are staging the encrypted message
1371 * data in the FIFO. (nact) represents how much plaintext
1372 * has been staged, (n) represents how much encrypted data
1373 * has been flushed. The two are independent of each other.
1375 if (ioq->fifo_beg > sizeof(ioq->buf) / 2 &&
1376 sizeof(ioq->buf) - ioq->fifo_end < DMSG_ALIGN * 2) {
1377 bcopy(ioq->buf + ioq->fifo_beg, ioq->buf,
1378 ioq->fifo_end - ioq->fifo_beg);
1379 ioq->fifo_cdx -= ioq->fifo_beg;
1380 ioq->fifo_cdn -= ioq->fifo_beg;
1381 ioq->fifo_end -= ioq->fifo_beg;
1385 iovcnt = dmsg_crypto_encrypt(iocom, ioq, iov, iovcnt, &nact);
1386 n = writev(iocom->sock_fd, iov, iovcnt);
1391 if (ioq->fifo_beg == ioq->fifo_end) {
1399 * We don't mess with the nact returned by the crypto_encrypt
1400 * call, which represents the filling of the FIFO. (n) tells
1401 * us how much we were able to write from the FIFO. The two
1402 * are different beasts when encrypting.
1406 * In this situation we are not staging the messages to the
1407 * FIFO but instead writing them directly from the msg
1408 * structure(s), so (nact) is basically (n).
1410 n = writev(iocom->sock_fd, iov, iovcnt);
1418 * Clean out the transmit queue based on what we successfully
1419 * sent (nact is the plaintext count). ioq->hbytes/abytes
1420 * represents the portion of the first message previously sent.
1422 while ((msg = TAILQ_FIRST(&ioq->msgq)) != NULL) {
1423 hbytes = (msg->any.head.cmd & DMSGF_SIZE) *
1425 abytes = DMSG_DOALIGN(msg->aux_size);
1427 if ((size_t)nact < hbytes - ioq->hbytes) {
1428 ioq->hbytes += nact;
1432 nact -= hbytes - ioq->hbytes;
1433 ioq->hbytes = hbytes;
1434 if ((size_t)nact < abytes - ioq->abytes) {
1435 ioq->abytes += nact;
1439 nact -= abytes - ioq->abytes;
1440 /* ioq->abytes = abytes; optimized out */
1444 "txmsg cmd=%08x msgid=%016jx circ=%016jx\n",
1446 (intmax_t)msg->any.head.msgid,
1447 (intmax_t)msg->any.head.circuit);
1450 TAILQ_REMOVE(&ioq->msgq, msg, qentry);
1459 * Process the return value from the write w/regards to blocking.
1462 if (errno != EINTR &&
1463 errno != EINPROGRESS &&
1468 ioq->error = DMSG_IOQ_ERROR_SOCK;
1469 dmsg_iocom_drain(iocom);
1472 * Wait for socket buffer space
1474 atomic_set_int(&iocom->flags, DMSG_IOCOMF_WREQ);
1477 atomic_set_int(&iocom->flags, DMSG_IOCOMF_WREQ);
1480 dmsg_iocom_drain(iocom);
1485 * Kill pending msgs on ioq_tx and adjust the flags such that no more
1486 * write events will occur. We don't kill read msgs because we want
1487 * the caller to pull off our contrived terminal error msg to detect
1488 * the connection failure.
1490 * Localized to iocom_core thread, iocom->mtx not held by caller.
1493 dmsg_iocom_drain(dmsg_iocom_t *iocom)
1495 dmsg_ioq_t *ioq = &iocom->ioq_tx;
1498 atomic_clear_int(&iocom->flags, DMSG_IOCOMF_WREQ | DMSG_IOCOMF_WWORK);
1502 while ((msg = TAILQ_FIRST(&ioq->msgq)) != NULL) {
1503 TAILQ_REMOVE(&ioq->msgq, msg, qentry);
1510 * Write a message to an iocom, with additional state processing.
1513 dmsg_msg_write(dmsg_msg_t *msg)
1515 dmsg_iocom_t *iocom = msg->state->iocom;
1516 dmsg_state_t *state;
1519 pthread_mutex_lock(&iocom->mtx);
1523 * Make sure the parent transaction is still open in the transmit
1524 * direction. If it isn't the message is dead and we have to
1525 * potentially simulate a rxmsg terminating the transaction.
1527 if (state->parent->txcmd & DMSGF_DELETE) {
1528 fprintf(stderr, "dmsg_msg_write: EARLY TERMINATION\n");
1529 dmsg_msg_simulate_failure(state, DMSG_ERR_LOSTLINK);
1530 dmsg_state_cleanuptx(iocom, msg);
1532 pthread_mutex_unlock(&iocom->mtx);
1537 * Process state data into the message as needed, then update the
1538 * state based on the message.
1540 if ((state->flags & DMSG_STATE_ROOT) == 0) {
1542 * Existing transaction (could be reply). It is also
1543 * possible for this to be the first reply (CREATE is set),
1544 * in which case we populate state->txcmd.
1546 * state->txcmd is adjusted to hold the final message cmd,
1547 * and we also be sure to set the CREATE bit here. We did
1548 * not set it in dmsg_msg_alloc() because that would have
1549 * not been serialized (state could have gotten ripped out
1550 * from under the message prior to it being transmitted).
1552 if ((msg->any.head.cmd & (DMSGF_CREATE | DMSGF_REPLY)) ==
1554 state->txcmd = msg->any.head.cmd & ~DMSGF_DELETE;
1555 state->icmd = state->txcmd & DMSGF_BASECMDMASK;
1557 msg->any.head.msgid = state->msgid;
1559 if (msg->any.head.cmd & DMSGF_CREATE) {
1560 state->txcmd = msg->any.head.cmd & ~DMSGF_DELETE;
1563 dmsg_state_cleanuptx(iocom, msg);
1567 "MSGWRITE %016jx %08x\n",
1568 msg->any.head.msgid, msg->any.head.cmd);
1572 * Queue it for output, wake up the I/O pthread. Note that the
1573 * I/O thread is responsible for generating the CRCs and encryption.
1575 TAILQ_INSERT_TAIL(&iocom->txmsgq, msg, qentry);
1577 write(iocom->wakeupfds[1], &dummy, 1); /* XXX optimize me */
1578 pthread_mutex_unlock(&iocom->mtx);
1582 * iocom->mtx must be held by caller.
1586 dmsg_msg_simulate_failure(dmsg_state_t *state, int error)
1588 dmsg_iocom_t *iocom = state->iocom;
1593 if (state == &iocom->state0) {
1595 * No active local or remote transactions remain.
1596 * Generate a final LNK_ERROR and flag EOF.
1598 msg = dmsg_msg_alloc_locked(&iocom->state0, 0,
1601 msg->any.head.error = error;
1602 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
1603 fprintf(stderr, "EOF ON SOCKET %d\n", iocom->sock_fd);
1604 } else if (state->flags & DMSG_STATE_OPPOSITE) {
1606 * Active remote transactions are still present.
1607 * Simulate the other end sending us a DELETE.
1609 if (state->rxcmd & DMSGF_DELETE) {
1611 "iocom: ioq error(rd) %d sleeping "
1612 "state %p rxcmd %08x txcmd %08x "
1614 error, state, state->rxcmd,
1615 state->txcmd, state->func);
1616 usleep(100000); /* XXX */
1617 atomic_set_int(&iocom->flags,
1620 fprintf(stderr, "SIMULATE ERROR1\n");
1621 msg = dmsg_msg_alloc_locked(&iocom->state0, 0,
1624 /*state->txcmd |= DMSGF_DELETE;*/
1626 msg->any.head.error = error;
1627 msg->any.head.msgid = state->msgid;
1628 msg->any.head.circuit = state->parent->msgid;
1629 msg->any.head.cmd |= DMSGF_ABORT |
1631 if ((state->parent->flags &
1632 DMSG_STATE_OPPOSITE) == 0) {
1633 msg->any.head.cmd |= DMSGF_REVCIRC;
1638 * Active local transactions are still present.
1639 * Simulate the other end sending us a DELETE.
1641 if (state->rxcmd & DMSGF_DELETE) {
1643 "iocom: ioq error(wr) %d sleeping "
1644 "state %p rxcmd %08x txcmd %08x "
1646 error, state, state->rxcmd,
1647 state->txcmd, state->func);
1648 usleep(100000); /* XXX */
1649 atomic_set_int(&iocom->flags,
1652 fprintf(stderr, "SIMULATE ERROR1\n");
1653 msg = dmsg_msg_alloc_locked(&iocom->state0, 0,
1657 msg->any.head.error = error;
1658 msg->any.head.msgid = state->msgid;
1659 msg->any.head.circuit = state->parent->msgid;
1660 msg->any.head.cmd |= DMSGF_ABORT |
1664 if ((state->parent->flags &
1665 DMSG_STATE_OPPOSITE) == 0) {
1666 msg->any.head.cmd |= DMSGF_REVCIRC;
1668 if ((state->rxcmd & DMSGF_CREATE) == 0)
1669 msg->any.head.cmd |= DMSGF_CREATE;
1673 TAILQ_INSERT_TAIL(&iocom->ioq_rx.msgq, msg, qentry);
1674 atomic_set_int(&iocom->flags, DMSG_IOCOMF_RWORK);
1679 * This is a shortcut to formulate a reply to msg with a simple error code,
1680 * It can reply to and terminate a transaction, or it can reply to a one-way
1681 * messages. A DMSG_LNK_ERROR command code is utilized to encode
1682 * the error code (which can be 0). Not all transactions are terminated
1683 * with DMSG_LNK_ERROR status (the low level only cares about the
1684 * MSGF_DELETE flag), but most are.
1686 * Replies to one-way messages are a bit of an oxymoron but the feature
1687 * is used by the debug (DBG) protocol.
1689 * The reply contains no extended data.
1692 dmsg_msg_reply(dmsg_msg_t *msg, uint32_t error)
1694 dmsg_state_t *state = msg->state;
1699 * Reply with a simple error code and terminate the transaction.
1701 cmd = DMSG_LNK_ERROR;
1704 * Check if our direction has even been initiated yet, set CREATE.
1706 * Check what direction this is (command or reply direction). Note
1707 * that txcmd might not have been initiated yet.
1709 * If our direction has already been closed we just return without
1712 if ((state->flags & DMSG_STATE_ROOT) == 0) {
1713 if (state->txcmd & DMSGF_DELETE)
1715 if (state->txcmd & DMSGF_REPLY)
1717 cmd |= DMSGF_DELETE;
1719 if ((msg->any.head.cmd & DMSGF_REPLY) == 0)
1724 * Allocate the message and associate it with the existing state.
1725 * We cannot pass DMSGF_CREATE to msg_alloc() because that may
1726 * allocate new state. We have our state already.
1728 nmsg = dmsg_msg_alloc(state, 0, cmd, NULL, NULL);
1729 if ((state->flags & DMSG_STATE_ROOT) == 0) {
1730 if ((state->txcmd & DMSGF_CREATE) == 0)
1731 nmsg->any.head.cmd |= DMSGF_CREATE;
1733 nmsg->any.head.error = error;
1735 dmsg_msg_write(nmsg);
1739 * Similar to dmsg_msg_reply() but leave the transaction open. That is,
1740 * we are generating a streaming reply or an intermediate acknowledgement
1741 * of some sort as part of the higher level protocol, with more to come
1745 dmsg_msg_result(dmsg_msg_t *msg, uint32_t error)
1747 dmsg_state_t *state = msg->state;
1753 * Reply with a simple error code and terminate the transaction.
1755 cmd = DMSG_LNK_ERROR;
1758 * Check if our direction has even been initiated yet, set CREATE.
1760 * Check what direction this is (command or reply direction). Note
1761 * that txcmd might not have been initiated yet.
1763 * If our direction has already been closed we just return without
1766 if ((state->flags & DMSG_STATE_ROOT) == 0) {
1767 if (state->txcmd & DMSGF_DELETE)
1769 if (state->txcmd & DMSGF_REPLY)
1771 /* continuing transaction, do not set MSGF_DELETE */
1773 if ((msg->any.head.cmd & DMSGF_REPLY) == 0)
1776 nmsg = dmsg_msg_alloc(state, 0, cmd, NULL, NULL);
1777 if ((state->flags & DMSG_STATE_ROOT) == 0) {
1778 if ((state->txcmd & DMSGF_CREATE) == 0)
1779 nmsg->any.head.cmd |= DMSGF_CREATE;
1781 nmsg->any.head.error = error;
1783 dmsg_msg_write(nmsg);
1787 * Terminate a transaction given a state structure by issuing a DELETE.
1788 * (the state structure must not be &iocom->state0)
1791 dmsg_state_reply(dmsg_state_t *state, uint32_t error)
1794 uint32_t cmd = DMSG_LNK_ERROR | DMSGF_DELETE;
1797 * Nothing to do if we already transmitted a delete
1799 if (state->txcmd & DMSGF_DELETE)
1803 * Set REPLY if the other end initiated the command. Otherwise
1804 * we are the command direction.
1806 if (state->txcmd & DMSGF_REPLY)
1809 nmsg = dmsg_msg_alloc(state, 0, cmd, NULL, NULL);
1810 if ((state->flags & DMSG_STATE_ROOT) == 0) {
1811 if ((state->txcmd & DMSGF_CREATE) == 0)
1812 nmsg->any.head.cmd |= DMSGF_CREATE;
1814 nmsg->any.head.error = error;
1815 dmsg_msg_write(nmsg);
1819 * Terminate a transaction given a state structure by issuing a DELETE.
1820 * (the state structure must not be &iocom->state0)
1823 dmsg_state_result(dmsg_state_t *state, uint32_t error)
1826 uint32_t cmd = DMSG_LNK_ERROR;
1829 * Nothing to do if we already transmitted a delete
1831 if (state->txcmd & DMSGF_DELETE)
1835 * Set REPLY if the other end initiated the command. Otherwise
1836 * we are the command direction.
1838 if (state->txcmd & DMSGF_REPLY)
1841 nmsg = dmsg_msg_alloc(state, 0, cmd, NULL, NULL);
1842 if ((state->flags & DMSG_STATE_ROOT) == 0) {
1843 if ((state->txcmd & DMSGF_CREATE) == 0)
1844 nmsg->any.head.cmd |= DMSGF_CREATE;
1846 nmsg->any.head.error = error;
1847 dmsg_msg_write(nmsg);
1850 /************************************************************************
1851 * TRANSACTION STATE HANDLING *
1852 ************************************************************************
1857 * Process state tracking for a message after reception, prior to execution.
1858 * Possibly route the message (consuming it).
1860 * Called with msglk held and the msg dequeued.
1862 * All messages are called with dummy state and return actual state.
1863 * (One-off messages often just return the same dummy state).
1865 * May request that caller discard the message by setting *discardp to 1.
1866 * The returned state is not used in this case and is allowed to be NULL.
1870 * These routines handle persistent and command/reply message state via the
1871 * CREATE and DELETE flags. The first message in a command or reply sequence
1872 * sets CREATE, the last message in a command or reply sequence sets DELETE.
1874 * There can be any number of intermediate messages belonging to the same
1875 * sequence sent inbetween the CREATE message and the DELETE message,
1876 * which set neither flag. This represents a streaming command or reply.
1878 * Any command message received with CREATE set expects a reply sequence to
1879 * be returned. Reply sequences work the same as command sequences except the
1880 * REPLY bit is also sent. Both the command side and reply side can
1881 * degenerate into a single message with both CREATE and DELETE set. Note
1882 * that one side can be streaming and the other side not, or neither, or both.
1884 * The msgid is unique for the initiator. That is, two sides sending a new
1885 * message can use the same msgid without colliding.
1889 * ABORT sequences work by setting the ABORT flag along with normal message
1890 * state. However, ABORTs can also be sent on half-closed messages, that is
1891 * even if the command or reply side has already sent a DELETE, as long as
1892 * the message has not been fully closed it can still send an ABORT+DELETE
1893 * to terminate the half-closed message state.
1895 * Since ABORT+DELETEs can race we silently discard ABORT's for message
1896 * state which has already been fully closed. REPLY+ABORT+DELETEs can
1897 * also race, and in this situation the other side might have already
1898 * initiated a new unrelated command with the same message id. Since
1899 * the abort has not set the CREATE flag the situation can be detected
1900 * and the message will also be discarded.
1902 * Non-blocking requests can be initiated with ABORT+CREATE[+DELETE].
1903 * The ABORT request is essentially integrated into the command instead
1904 * of being sent later on. In this situation the command implementation
1905 * detects that CREATE and ABORT are both set (vs ABORT alone) and can
1906 * special-case non-blocking operation for the command.
1908 * NOTE! Messages with ABORT set without CREATE or DELETE are considered
1909 * to be mid-stream aborts for command/reply sequences. ABORTs on
1910 * one-way messages are not supported.
1912 * NOTE! If a command sequence does not support aborts the ABORT flag is
1917 * One-off messages (no reply expected) are sent without an established
1918 * transaction. CREATE and DELETE are left clear and the msgid is usually 0.
1919 * For one-off messages sent over circuits msgid generally MUST be 0.
1921 * One-off messages cannot be aborted and typically aren't processed
1922 * by these routines. Order is still guaranteed for messages sent over
1923 * the same circuit. The REPLY bit can be used to distinguish whether
1924 * a one-off message is a command or reply. For example, one-off replies
1925 * will typically just contain status updates.
1928 dmsg_state_msgrx(dmsg_msg_t *msg)
1930 dmsg_iocom_t *iocom = msg->state->iocom;
1931 dmsg_state_t *state;
1932 dmsg_state_t *pstate;
1933 dmsg_state_t sdummy;
1936 pthread_mutex_lock(&iocom->mtx);
1939 * Lookup the circuit (pstate). The circuit will be an open
1940 * transaction. The REVCIRC bit in the message tells us which side
1943 if (msg->any.head.circuit) {
1944 sdummy.msgid = msg->any.head.circuit;
1946 if (msg->any.head.cmd & DMSGF_REVCIRC) {
1947 pstate = RB_FIND(dmsg_state_tree,
1948 &iocom->statewr_tree,
1951 pstate = RB_FIND(dmsg_state_tree,
1952 &iocom->staterd_tree,
1955 if (pstate == NULL) {
1957 "missing parent in stacked trans %s\n",
1959 error = DMSG_IOQ_ERROR_TRANS;
1960 pthread_mutex_unlock(&iocom->mtx);
1964 pstate = &iocom->state0;
1970 * If received msg is a command state is on staterd_tree.
1971 * If received msg is a reply state is on statewr_tree.
1972 * Otherwise there is no state (retain &iocom->state0)
1974 sdummy.msgid = msg->any.head.msgid;
1975 if (msg->any.head.cmd & DMSGF_REVTRANS)
1976 state = RB_FIND(dmsg_state_tree, &iocom->statewr_tree, &sdummy);
1978 state = RB_FIND(dmsg_state_tree, &iocom->staterd_tree, &sdummy);
1982 * Message over an existing transaction (CREATE should not
1986 assert(pstate == state->parent);
1989 * Either a new transaction (if CREATE set) or a one-off.
1994 pthread_mutex_unlock(&iocom->mtx);
1997 * Switch on CREATE, DELETE, REPLY, and also handle ABORT from
1998 * inside the case statements.
2000 * Construct new state as necessary.
2002 switch(msg->any.head.cmd & (DMSGF_CREATE | DMSGF_DELETE |
2005 case DMSGF_CREATE | DMSGF_DELETE:
2007 * Create new sub-transaction under pstate.
2008 * (any DELETE is handled in post-processing of msg).
2010 * (During routing the msgid was made unique for this
2011 * direction over the comlink, so our RB trees can be
2012 * iocom-based instead of state-based).
2014 if (state != pstate) {
2016 "duplicate transaction %s\n",
2018 error = DMSG_IOQ_ERROR_TRANS;
2024 * Allocate the new state.
2026 state = malloc(sizeof(*state));
2027 atomic_add_int(&dmsg_state_count, 1);
2028 bzero(state, sizeof(*state));
2029 TAILQ_INIT(&state->subq);
2030 dmsg_state_hold(pstate);
2032 state->parent = pstate;
2033 state->iocom = iocom;
2034 state->flags = DMSG_STATE_DYNAMIC |
2035 DMSG_STATE_OPPOSITE;
2036 state->msgid = msg->any.head.msgid;
2037 state->txcmd = DMSGF_REPLY;
2038 state->rxcmd = msg->any.head.cmd & ~DMSGF_DELETE;
2039 state->icmd = state->rxcmd & DMSGF_BASECMDMASK;
2041 pthread_mutex_lock(&iocom->mtx);
2042 RB_INSERT(dmsg_state_tree, &iocom->staterd_tree, state);
2043 TAILQ_INSERT_TAIL(&pstate->subq, state, entry);
2044 state->flags |= DMSG_STATE_INSERTED;
2047 * If the parent is a relay set up the state handler to
2048 * automatically route the message. Local processing will
2051 * (state relays are seeded by SPAN processing)
2054 state->func = dmsg_state_relay;
2055 pthread_mutex_unlock(&iocom->mtx);
2060 "create state %p id=%08x on iocom staterd %p\n",
2061 state, (uint32_t)state->msgid, iocom);
2066 * Persistent state is expected but might not exist if an
2067 * ABORT+DELETE races the close.
2069 * (any DELETE is handled in post-processing of msg).
2071 if (state == pstate) {
2072 if (msg->any.head.cmd & DMSGF_ABORT) {
2073 error = DMSG_IOQ_ERROR_EALREADY;
2075 fprintf(stderr, "missing-state %s\n",
2077 error = DMSG_IOQ_ERROR_TRANS;
2084 * Handle another ABORT+DELETE case if the msgid has already
2087 if ((state->rxcmd & DMSGF_CREATE) == 0) {
2088 if (msg->any.head.cmd & DMSGF_ABORT) {
2089 error = DMSG_IOQ_ERROR_EALREADY;
2091 fprintf(stderr, "reused-state %s\n",
2093 error = DMSG_IOQ_ERROR_TRANS;
2102 * Check for mid-stream ABORT command received, otherwise
2105 if (msg->any.head.cmd & DMSGF_ABORT) {
2106 if ((state == pstate) ||
2107 (state->rxcmd & DMSGF_CREATE) == 0) {
2108 error = DMSG_IOQ_ERROR_EALREADY;
2114 case DMSGF_REPLY | DMSGF_CREATE:
2115 case DMSGF_REPLY | DMSGF_CREATE | DMSGF_DELETE:
2117 * When receiving a reply with CREATE set the original
2118 * persistent state message should already exist.
2120 if (state == pstate) {
2121 fprintf(stderr, "no-state(r) %s\n",
2123 error = DMSG_IOQ_ERROR_TRANS;
2127 assert(((state->rxcmd ^ msg->any.head.cmd) & DMSGF_REPLY) == 0);
2128 state->rxcmd = msg->any.head.cmd & ~DMSGF_DELETE;
2131 case DMSGF_REPLY | DMSGF_DELETE:
2133 * Received REPLY+ABORT+DELETE in case where msgid has
2134 * already been fully closed, ignore the message.
2136 if (state == pstate) {
2137 if (msg->any.head.cmd & DMSGF_ABORT) {
2138 error = DMSG_IOQ_ERROR_EALREADY;
2140 fprintf(stderr, "no-state(r,d) %s\n",
2142 error = DMSG_IOQ_ERROR_TRANS;
2149 * Received REPLY+ABORT+DELETE in case where msgid has
2150 * already been reused for an unrelated message,
2151 * ignore the message.
2153 if ((state->rxcmd & DMSGF_CREATE) == 0) {
2154 if (msg->any.head.cmd & DMSGF_ABORT) {
2155 error = DMSG_IOQ_ERROR_EALREADY;
2157 fprintf(stderr, "reused-state(r,d) %s\n",
2159 error = DMSG_IOQ_ERROR_TRANS;
2168 * Check for mid-stream ABORT reply received to sent command.
2170 if (msg->any.head.cmd & DMSGF_ABORT) {
2171 if (state == pstate ||
2172 (state->rxcmd & DMSGF_CREATE) == 0) {
2173 error = DMSG_IOQ_ERROR_EALREADY;
2182 * Calculate the easy-switch() transactional command. Represents
2183 * the outer-transaction command for any transaction-create or
2184 * transaction-delete, and the inner message command for any
2185 * non-transaction or inside-transaction command. tcmd will be
2186 * set to 0 for any messaging error condition.
2188 * The two can be told apart because outer-transaction commands
2189 * always have a DMSGF_CREATE and/or DMSGF_DELETE flag.
2191 if (msg->any.head.cmd & (DMSGF_CREATE | DMSGF_DELETE)) {
2192 if ((state->flags & DMSG_STATE_ROOT) == 0) {
2193 msg->tcmd = (msg->state->icmd & DMSGF_BASECMDMASK) |
2194 (msg->any.head.cmd & (DMSGF_CREATE |
2201 msg->tcmd = msg->any.head.cmd & DMSGF_CMDSWMASK;
2207 * Route the message and handle pair-state processing.
2210 dmsg_state_relay(dmsg_msg_t *lmsg)
2212 dmsg_state_t *lpstate;
2213 dmsg_state_t *rpstate;
2214 dmsg_state_t *lstate;
2215 dmsg_state_t *rstate;
2218 if ((lmsg->any.head.cmd & (DMSGF_CREATE | DMSGF_REPLY)) ==
2221 * New sub-transaction, establish new state and relay.
2223 lstate = lmsg->state;
2224 lpstate = lstate->parent;
2225 rpstate = lpstate->relay;
2226 assert(lstate->relay == NULL);
2227 assert(rpstate != NULL);
2229 rmsg = dmsg_msg_alloc(rpstate,
2232 dmsg_state_relay, NULL);
2233 rstate = rmsg->state;
2234 rstate->relay = lstate;
2235 lstate->relay = rstate;
2236 dmsg_state_hold(lstate);
2237 dmsg_state_hold(rstate);
2240 * State & relay already established
2242 lstate = lmsg->state;
2243 rstate = lstate->relay;
2244 assert(rstate != NULL);
2246 rmsg = dmsg_msg_alloc(rstate,
2249 dmsg_state_relay, NULL);
2251 if (lmsg->hdr_size > sizeof(lmsg->any.head)) {
2252 bcopy(&lmsg->any.head + 1, &rmsg->any.head + 1,
2253 lmsg->hdr_size - sizeof(lmsg->any.head));
2255 rmsg->any.head.error = lmsg->any.head.error;
2256 rmsg->any.head.reserved02 = lmsg->any.head.reserved02;
2257 rmsg->any.head.reserved18 = lmsg->any.head.reserved18;
2258 rmsg->aux_data = lmsg->aux_data;
2259 lmsg->aux_data = NULL;
2261 fprintf(stderr, "RELAY %08x\n", rmsg->any.head.cmd);
2263 dmsg_msg_write(rmsg);
2267 * Cleanup and retire msg after processing
2270 dmsg_state_cleanuprx(dmsg_iocom_t *iocom, dmsg_msg_t *msg)
2272 dmsg_state_t *state;
2273 dmsg_state_t *pstate;
2275 assert(msg->state->iocom == iocom);
2277 if (state->flags & DMSG_STATE_ROOT) {
2279 * Free a non-transactional message, there is no state
2283 } else if (msg->any.head.cmd & DMSGF_DELETE) {
2285 * Message terminating transaction, destroy the related
2286 * state, the original message, and this message (if it
2287 * isn't the original message due to a CREATE|DELETE).
2289 * It's possible for governing state to terminate while
2290 * sub-transactions still exist. This is allowed but
2291 * will cause sub-transactions to recursively fail.
2292 * Further reception of sub-transaction messages will be
2293 * impossible because the circuit will no longer exist.
2294 * (XXX need code to make sure that happens properly).
2296 pthread_mutex_lock(&iocom->mtx);
2297 state->rxcmd |= DMSGF_DELETE;
2299 if (state->txcmd & DMSGF_DELETE) {
2300 assert(state->flags & DMSG_STATE_INSERTED);
2301 if (state->rxcmd & DMSGF_REPLY) {
2302 assert(msg->any.head.cmd & DMSGF_REPLY);
2303 RB_REMOVE(dmsg_state_tree,
2304 &iocom->statewr_tree, state);
2306 assert((msg->any.head.cmd & DMSGF_REPLY) == 0);
2307 RB_REMOVE(dmsg_state_tree,
2308 &iocom->staterd_tree, state);
2310 pstate = state->parent;
2311 TAILQ_REMOVE(&pstate->subq, state, entry);
2312 state->flags &= ~DMSG_STATE_INSERTED;
2313 state->parent = NULL;
2314 dmsg_state_drop(pstate);
2317 dmsg_state_drop(state->relay);
2318 state->relay = NULL;
2321 dmsg_state_drop(state);
2325 pthread_mutex_unlock(&iocom->mtx);
2328 * Message not terminating transaction, leave state intact
2329 * and free message if it isn't the CREATE message.
2336 * Clean up the state after pulling out needed fields and queueing the
2337 * message for transmission. This occurs in dmsg_msg_write().
2340 dmsg_state_cleanuptx(dmsg_iocom_t *iocom, dmsg_msg_t *msg)
2342 dmsg_state_t *state;
2343 dmsg_state_t *pstate;
2345 assert(iocom == msg->state->iocom);
2347 if (state->flags & DMSG_STATE_ROOT) {
2349 } else if (msg->any.head.cmd & DMSGF_DELETE) {
2351 * Message terminating transaction, destroy the related
2352 * state, the original message, and this message (if it
2353 * isn't the original message due to a CREATE|DELETE).
2355 * It's possible for governing state to terminate while
2356 * sub-transactions still exist. This is allowed but
2357 * will cause sub-transactions to recursively fail.
2358 * Further reception of sub-transaction messages will be
2359 * impossible because the circuit will no longer exist.
2360 * (XXX need code to make sure that happens properly).
2362 pthread_mutex_lock(&iocom->mtx);
2363 assert((state->txcmd & DMSGF_DELETE) == 0);
2364 state->txcmd |= DMSGF_DELETE;
2365 if (state->rxcmd & DMSGF_DELETE) {
2366 assert(state->flags & DMSG_STATE_INSERTED);
2367 if (state->txcmd & DMSGF_REPLY) {
2368 assert(msg->any.head.cmd & DMSGF_REPLY);
2369 RB_REMOVE(dmsg_state_tree,
2370 &iocom->staterd_tree, state);
2372 assert((msg->any.head.cmd & DMSGF_REPLY) == 0);
2373 RB_REMOVE(dmsg_state_tree,
2374 &iocom->statewr_tree, state);
2376 pstate = state->parent;
2377 TAILQ_REMOVE(&pstate->subq, state, entry);
2378 state->flags &= ~DMSG_STATE_INSERTED;
2379 state->parent = NULL;
2380 dmsg_state_drop(pstate);
2383 dmsg_state_drop(state->relay);
2384 state->relay = NULL;
2386 dmsg_state_drop(state); /* usually the last drop */
2388 pthread_mutex_unlock(&iocom->mtx);
2393 * Called with or without locks
2396 dmsg_state_hold(dmsg_state_t *state)
2398 atomic_add_int(&state->refs, 1);
2402 dmsg_state_drop(dmsg_state_t *state)
2404 if (atomic_fetchadd_int(&state->refs, -1) == 1)
2405 dmsg_state_free(state);
2409 * Called with iocom locked
2412 dmsg_state_free(dmsg_state_t *state)
2414 atomic_add_int(&dmsg_state_count, -1);
2416 fprintf(stderr, "terminate state %p id=%08x\n",
2417 state, (uint32_t)state->msgid);
2419 assert((state->flags & (DMSG_STATE_ROOT | DMSG_STATE_INSERTED)) == 0);
2420 assert(TAILQ_EMPTY(&state->subq));
2421 assert(state->refs == 0);
2422 if (state->any.any != NULL) /* XXX avoid deadlock w/exit & kernel */
2424 assert(state->any.any == NULL);
2429 * This swaps endian for a hammer2_msg_hdr. Note that the extended
2430 * header is not adjusted, just the core header.
2433 dmsg_bswap_head(dmsg_hdr_t *head)
2435 head->magic = bswap16(head->magic);
2436 head->reserved02 = bswap16(head->reserved02);
2437 head->salt = bswap32(head->salt);
2439 head->msgid = bswap64(head->msgid);
2440 head->circuit = bswap64(head->circuit);
2441 head->reserved18= bswap64(head->reserved18);
2443 head->cmd = bswap32(head->cmd);
2444 head->aux_crc = bswap32(head->aux_crc);
2445 head->aux_bytes = bswap32(head->aux_bytes);
2446 head->error = bswap32(head->error);
2447 head->aux_descr = bswap64(head->aux_descr);
2448 head->reserved38= bswap32(head->reserved38);
2449 head->hdr_crc = bswap32(head->hdr_crc);