1 /* $KAME: sctp_indata.c,v 1.35 2004/08/17 04:06:17 itojun Exp $ */
4 * Copyright (C) 2002, 2003, 2004 Cisco Systems Inc,
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the project nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #if !(defined(__OpenBSD__) || defined(__APPLE__))
33 #include "opt_ipsec.h"
35 #if defined(__FreeBSD__) || defined(__DragonFly__)
36 #include "opt_inet6.h"
39 #if defined(__NetBSD__)
45 #elif !defined(__OpenBSD__)
49 #include <sys/param.h>
50 #include <sys/systm.h>
52 #include <sys/malloc.h>
53 #include <sys/socket.h>
54 #include <sys/socketvar.h>
55 #include <sys/sysctl.h>
58 #include <net/route.h>
61 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
62 #include <sys/limits.h>
64 #include <machine/limits.h>
66 #include <machine/cpu.h>
68 #include <netinet/in.h>
69 #include <netinet/in_systm.h>
70 #include <netinet/ip.h>
72 #include <netinet/ip6.h>
74 #include <netinet/in_pcb.h>
75 #include <netinet/in_var.h>
76 #include <netinet/ip_var.h>
78 #include <netinet6/ip6_var.h>
80 #include <netinet/ip_icmp.h>
81 #include <netinet/icmp_var.h>
82 #include <netinet/sctp_var.h>
83 #include <netinet/sctp_pcb.h>
84 #include <netinet/sctp_header.h>
85 #include <netinet/sctputil.h>
86 #include <netinet/sctp_output.h>
87 #include <netinet/sctp_input.h>
88 #include <netinet/sctp_hashdriver.h>
89 #include <netinet/sctp_indata.h>
90 #include <netinet/sctp_uio.h>
91 #include <netinet/sctp_timer.h>
94 #include <netinet6/ipsec.h>
95 #include <netproto/key/key.h>
101 #include <net/net_osdep.h>
104 extern u_int32_t sctp_debug_on;
108 * NOTES: On the outbound side of things I need to check the sack timer to
109 * see if I should generate a sack into the chunk queue (if I have data to
110 * send that is and will be sending it .. for bundling.
112 * The callback in sctp_usrreq.c will get called when the socket is read
113 * from. This will cause sctp_service_queues() to get called on the top
117 extern int sctp_strict_sacks;
120 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
122 u_int32_t calc, calc_w_oh;
125 if (sctp_debug_on & SCTP_DEBUG_INDATA4) {
126 kprintf("cc:%lu hiwat:%lu lowat:%lu mbcnt:%lu mbmax:%lu\n",
127 (u_long)stcb->sctp_socket->so_rcv.ssb_cc,
128 (u_long)stcb->sctp_socket->so_rcv.ssb_hiwat,
129 (u_long)stcb->sctp_socket->so_rcv.ssb_lowat,
130 (u_long)stcb->sctp_socket->so_rcv.ssb_mbcnt,
131 (u_long)stcb->sctp_socket->so_rcv.ssb_mbmax);
132 kprintf("Setting rwnd to: sb:%ld - (del:%d + reasm:%d str:%d)\n",
133 sctp_sbspace(&stcb->sctp_socket->so_rcv),
134 asoc->size_on_delivery_queue,
135 asoc->size_on_reasm_queue,
136 asoc->size_on_all_streams);
139 if (stcb->sctp_socket->so_rcv.ssb_cc == 0 &&
140 asoc->size_on_delivery_queue == 0 &&
141 asoc->size_on_reasm_queue == 0 &&
142 asoc->size_on_all_streams == 0) {
143 /* Full rwnd granted */
144 asoc->my_rwnd = max(stcb->sctp_socket->so_rcv.ssb_hiwat,
148 /* get actual space */
149 calc = (u_int32_t)sctp_sbspace(&stcb->sctp_socket->so_rcv);
151 /* take out what has NOT been put on socket queue and
152 * we yet hold for putting up.
154 calc = sctp_sbspace_sub(calc, (u_int32_t)asoc->size_on_delivery_queue);
155 calc = sctp_sbspace_sub(calc, (u_int32_t)asoc->size_on_reasm_queue);
156 calc = sctp_sbspace_sub(calc, (u_int32_t)asoc->size_on_all_streams);
158 /* what is the overhead of all these rwnd's */
159 calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
161 asoc->my_rwnd = calc;
162 if (calc_w_oh == 0) {
163 /* If our overhead is greater than the advertised
164 * rwnd, we clamp the rwnd to 1. This lets us
165 * still accept inbound segments, but hopefully will
166 * shut the sender down when he finally gets the message.
172 (asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) {
173 /* SWS engaged, tell peer none left */
176 if (sctp_debug_on & SCTP_DEBUG_INDATA4) {
177 kprintf(" - SWS zeros\n");
180 if (sctp_debug_on & SCTP_DEBUG_INDATA4) {
189 * Take a chk structure and build it into an mbuf. Hmm should we change things
190 * so that instead we store the data side in a chunk?
193 sctp_build_ctl_nchunk(struct sctp_tcb *stcb, uint32_t tsn, uint32_t ppid,
194 uint32_t context, uint16_t stream_no, uint16_t stream_seq, uint8_t flags)
196 struct sctp_sndrcvinfo *outinfo;
200 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVDATAIOEVNT) == 0) {
201 /* user does not want the sndrcv ctl */
205 MGETHDR(ret, MB_DONTWAIT, MT_CONTROL);
210 /* We need a CMSG header followed by the struct */
211 cmh = mtod(ret, struct cmsghdr *);
212 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
213 cmh->cmsg_level = IPPROTO_SCTP;
214 cmh->cmsg_type = SCTP_SNDRCV;
215 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
216 outinfo->sinfo_stream = stream_no;
217 outinfo->sinfo_ssn = stream_seq;
218 if (flags & SCTP_DATA_UNORDERED) {
219 outinfo->sinfo_flags = MSG_UNORDERED;
221 outinfo->sinfo_flags = 0;
223 outinfo->sinfo_ppid = ppid;
224 outinfo->sinfo_context = context;
225 outinfo->sinfo_assoc_id = sctp_get_associd(stcb);
226 outinfo->sinfo_tsn = tsn;
227 outinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
228 ret->m_len = cmh->cmsg_len;
229 ret->m_pkthdr.len = ret->m_len;
231 * We track how many control len's have gone upon the sb
232 * and do not count these in the rwnd calculation.
234 stcb->asoc.my_rwnd_control_len +=
235 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
241 * Take a chk structure and build it into an mbuf. Should we change things
242 * so that instead we store the data side in a chunk?
246 sctp_build_ctl(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk)
248 struct sctp_sndrcvinfo *outinfo;
251 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVDATAIOEVNT) == 0) {
252 /* user does not want the sndrcv ctl */
255 MGET(ret, MB_DONTWAIT, MT_CONTROL);
261 /* We need a CMSG header followed by the struct */
262 cmh = mtod(ret, struct cmsghdr *);
263 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
264 cmh->cmsg_level = IPPROTO_SCTP;
265 cmh->cmsg_type = SCTP_SNDRCV;
266 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
267 outinfo->sinfo_stream = chk->rec.data.stream_number;
268 outinfo->sinfo_ssn = chk->rec.data.stream_seq;
269 if (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
270 outinfo->sinfo_flags = MSG_UNORDERED;
272 outinfo->sinfo_flags = 0;
274 outinfo->sinfo_ppid = chk->rec.data.payloadtype;
275 outinfo->sinfo_context = chk->rec.data.context;
276 outinfo->sinfo_assoc_id = sctp_get_associd(stcb);
277 outinfo->sinfo_tsn = chk->rec.data.TSN_seq;
278 outinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
279 ret->m_len = cmh->cmsg_len;
280 stcb->asoc.my_rwnd_control_len +=
281 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
287 sctp_deliver_data(struct sctp_tcb *stcb, struct sctp_association *asoc,
288 struct sctp_tmit_chunk *chk, int hold_locks)
290 struct mbuf *control, *m;
292 struct sockaddr_in6 sin6;
296 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
297 kprintf("I am now in Deliver data! (%p)\n", chk);
300 /* get a write lock on the inp if not already */
301 if (hold_locks == 0) {
302 SCTP_TCB_UNLOCK(stcb);
303 SCTP_INP_WLOCK(stcb->sctp_ep);
307 /* We always add it to the queue */
308 if (stcb && (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
309 /* socket above is long gone */
311 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
312 kprintf("gone is gone!\n");
317 sctp_m_freem(chk->data);
319 sctp_free_remote_addr(chk->whoTo);
320 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
321 sctppcbinfo.ipi_count_chunk--;
322 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
323 panic("Chunk count is negative");
325 sctppcbinfo.ipi_gencnt_chunk++;
327 TAILQ_FOREACH(chk, &asoc->delivery_queue, sctp_next) {
328 asoc->size_on_delivery_queue -= chk->send_size;
329 asoc->cnt_on_delivery_queue--;
331 * Lose the data pointer, since its in the socket buffer
334 sctp_m_freem(chk->data);
336 /* Now free the address and data */
337 sctp_free_remote_addr(chk->whoTo);
338 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
339 sctppcbinfo.ipi_count_chunk--;
340 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
341 panic("Chunk count is negative");
343 sctppcbinfo.ipi_gencnt_chunk++;
346 SCTP_INP_WUNLOCK(stcb->sctp_ep);
350 TAILQ_INSERT_TAIL(&asoc->delivery_queue, chk, sctp_next);
351 asoc->size_on_delivery_queue += chk->send_size;
352 asoc->cnt_on_delivery_queue++;
354 if (asoc->fragmented_delivery_inprogress) {
356 * oh oh, fragmented delivery in progress
357 * return out of here.
360 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
361 kprintf("Fragmented delivery in progress?\n");
365 SCTP_INP_WUNLOCK(stcb->sctp_ep);
368 /* Now grab the first one */
369 chk = TAILQ_FIRST(&asoc->delivery_queue);
371 /* Nothing in queue */
373 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
374 kprintf("Nothing in queue?\n");
377 asoc->size_on_delivery_queue = 0;
378 asoc->cnt_on_delivery_queue = 0;
380 SCTP_INP_WUNLOCK(stcb->sctp_ep);
384 if (stcb->sctp_socket->so_rcv.ssb_cc >= stcb->sctp_socket->so_rcv.ssb_hiwat) {
385 /* Boy, there really is NO room */
387 SCTP_INP_WUNLOCK(stcb->sctp_ep);
391 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
392 kprintf("Now to the delivery with chk(%p)!\n", chk);
395 /* XXX need to append PKTHDR to the socket buffer first */
396 if ((chk->data->m_flags & M_PKTHDR) == 0) {
397 MGETHDR(m, MB_DONTWAIT, MT_DATA);
401 SCTP_INP_WUNLOCK(stcb->sctp_ep);
404 m->m_pkthdr.len = chk->send_size;
406 m->m_next = chk->data;
409 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
410 if (chk->data->m_next == NULL) {
411 /* hopefully we hit here most of the time */
412 chk->data->m_flags |= M_EOR;
414 /* Add the flag to the LAST mbuf in the chain */
416 while (m->m_next != NULL) {
423 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
424 struct sockaddr_in6 lsa6;
426 control = sctp_build_ctl(stcb, chk);
427 to = (struct sockaddr *)&chk->whoTo->ro._l_addr;
428 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
429 to->sa_family == AF_INET) {
430 struct sockaddr_in *sin;
432 sin = (struct sockaddr_in *)to;
433 bzero(&sin6, sizeof(sin6));
434 sin6.sin6_family = AF_INET6;
435 sin6.sin6_len = sizeof(struct sockaddr_in6);
436 sin6.sin6_addr.s6_addr16[2] = 0xffff;
437 bcopy(&sin->sin_addr, &sin6.sin6_addr.s6_addr16[3],
438 sizeof(sin6.sin6_addr.s6_addr16[3]));
439 sin6.sin6_port = sin->sin_port;
440 to = (struct sockaddr *)&sin6;
442 /* check and strip embedded scope junk */
443 to = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)to,
445 if (((struct sockaddr_in *)to)->sin_port == 0) {
446 kprintf("Huh a, port is %d not net:%p %d?\n",
447 ((struct sockaddr_in *)to)->sin_port,
449 (int)(ntohs(stcb->rport)));
450 ((struct sockaddr_in *)to)->sin_port = stcb->rport;
452 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < (long)chk->send_size) {
453 /* Gak not enough room */
455 sctp_m_freem(control);
456 stcb->asoc.my_rwnd_control_len -=
457 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
461 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
462 if (!sctp_sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv,
463 to, chk->data, control, stcb->asoc.my_vtag,
465 /* Gak not enough room */
467 sctp_m_freem(control);
468 stcb->asoc.my_rwnd_control_len -=
469 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
472 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
473 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
474 stcb->asoc.my_rwnd_control_len +=
478 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
482 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
484 /* append to a already started message. */
485 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
486 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) >=
487 (long)chk->send_size) {
488 ssb_append(&stcb->sctp_socket->so_rcv, chk->data);
491 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
495 SCTP_INP_WUNLOCK(stcb->sctp_ep);
496 /* free up the one we inserted */
498 /* Pull it off the queue */
500 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
501 kprintf("Free_it true, doing tickle wakeup\n");
504 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
505 TAILQ_REMOVE(&asoc->delivery_queue, chk, sctp_next);
506 asoc->size_on_delivery_queue -= chk->send_size;
507 asoc->cnt_on_delivery_queue--;
508 /* Lose the data pointer, since its in the socket buffer */
510 /* Now free the address and data */
511 sctp_free_remote_addr(chk->whoTo);
512 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
513 sctppcbinfo.ipi_count_chunk--;
514 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
515 panic("Chunk count is negative");
517 sctppcbinfo.ipi_gencnt_chunk++;
523 * We are delivering currently from the reassembly queue. We must continue to
524 * deliver until we either:
525 * 1) run out of space.
526 * 2) run out of sequential TSN's
527 * 3) hit the SCTP_DATA_LAST_FRAG flag.
530 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc, int hold_locks)
533 struct sockaddr_in6 sin6;
534 struct sctp_tmit_chunk *chk, *at;
535 struct mbuf *control, *m;
539 cntDel = stream_no = 0;
540 if (hold_locks == 0) {
542 * you always have the TCB lock, we need
543 * to have the inp write lock as well.
545 SCTP_TCB_UNLOCK(stcb);
546 SCTP_INP_WLOCK(stcb->sctp_ep);
549 if (stcb && (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
550 /* socket above is long gone */
551 asoc->fragmented_delivery_inprogress = 0;
552 TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
553 asoc->size_on_delivery_queue -= chk->send_size;
554 asoc->cnt_on_delivery_queue--;
556 * Lose the data pointer, since its in the socket buffer
559 sctp_m_freem(chk->data);
561 /* Now free the address and data */
562 sctp_free_remote_addr(chk->whoTo);
563 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
564 sctppcbinfo.ipi_count_chunk--;
565 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
566 panic("Chunk count is negative");
568 sctppcbinfo.ipi_gencnt_chunk++;
571 SCTP_INP_WUNLOCK(stcb->sctp_ep);
575 if (stcb->sctp_socket->so_rcv.ssb_cc >=
576 stcb->sctp_socket->so_rcv.ssb_hiwat) {
578 sctp_sorwakeup(stcb->sctp_ep,
582 SCTP_INP_WUNLOCK(stcb->sctp_ep);
585 chk = TAILQ_FIRST(&asoc->reasmqueue);
588 sctp_sorwakeup(stcb->sctp_ep,
592 SCTP_INP_WUNLOCK(stcb->sctp_ep);
595 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
596 /* Can't deliver more :< */
598 sctp_sorwakeup(stcb->sctp_ep,
602 SCTP_INP_WUNLOCK(stcb->sctp_ep);
605 stream_no = chk->rec.data.stream_number;
606 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
607 if (nxt_todel != chk->rec.data.stream_seq &&
608 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
610 * Not the next sequence to deliver in its stream OR
614 sctp_sorwakeup(stcb->sctp_ep,
618 SCTP_INP_WUNLOCK(stcb->sctp_ep);
622 if ((chk->data->m_flags & M_PKTHDR) == 0) {
623 MGETHDR(m, MB_DONTWAIT, MT_DATA);
627 SCTP_INP_WUNLOCK(stcb->sctp_ep);
630 m->m_pkthdr.len = chk->send_size;
632 m->m_next = chk->data;
635 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
636 if (chk->data->m_next == NULL) {
637 /* hopefully we hit here most of the time */
638 chk->data->m_flags |= M_EOR;
640 /* Add the flag to the LAST mbuf in the chain */
642 while (m->m_next != NULL) {
648 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
649 struct sockaddr_in6 lsa6;
651 control = sctp_build_ctl(stcb, chk);
652 to = (struct sockaddr *)&chk->whoTo->ro._l_addr;
653 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
654 to->sa_family == AF_INET) {
655 struct sockaddr_in *sin;
656 sin = (struct sockaddr_in *)to;
657 bzero(&sin6, sizeof(sin6));
658 sin6.sin6_family = AF_INET6;
659 sin6.sin6_len = sizeof(struct sockaddr_in6);
660 sin6.sin6_addr.s6_addr16[2] = 0xffff;
661 bcopy(&sin->sin_addr,
662 &sin6.sin6_addr.s6_addr16[3],
663 sizeof(sin6.sin6_addr.s6_addr16[3]));
664 sin6.sin6_port = sin->sin_port;
665 to = (struct sockaddr *)&sin6;
667 /* check and strip embedded scope junk */
668 to = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)to,
670 if (((struct sockaddr_in *)to)->sin_port == 0) {
671 kprintf("Huh b, port is %d not net:%p %d?\n",
672 ((struct sockaddr_in *)to)->sin_port,
674 (int)(ntohs(stcb->rport)));
675 ((struct sockaddr_in *)to)->sin_port = stcb->rport;
677 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) <
678 (long)chk->send_size) {
680 sctp_m_freem(control);
681 stcb->asoc.my_rwnd_control_len -=
682 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
684 sctp_sorwakeup(stcb->sctp_ep,
687 SCTP_INP_WUNLOCK(stcb->sctp_ep);
690 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
691 if (!sctp_sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv,
692 to, chk->data, control, stcb->asoc.my_vtag,
694 /* Gak not enough room */
696 sctp_m_freem(control);
697 stcb->asoc.my_rwnd_control_len -=
698 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
700 sctp_sorwakeup(stcb->sctp_ep,
703 SCTP_INP_WUNLOCK(stcb->sctp_ep);
704 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
707 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
708 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
709 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
710 stcb->asoc.my_rwnd_control_len +=
714 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
718 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) >=
719 (long)chk->send_size) {
720 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
721 ssb_append(&stcb->sctp_socket->so_rcv, chk->data);
722 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
725 /* out of space in the sb */
726 sctp_sorwakeup(stcb->sctp_ep,
729 SCTP_INP_WUNLOCK(stcb->sctp_ep);
733 /* pull it we did it */
734 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
735 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
736 asoc->fragmented_delivery_inprogress = 0;
737 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
738 asoc->strmin[stream_no].last_sequence_delivered++;
741 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
742 asoc->size_on_reasm_queue -= chk->send_size;
743 asoc->cnt_on_reasm_queue--;
744 /* free up the chk */
745 sctp_free_remote_addr(chk->whoTo);
747 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
748 sctppcbinfo.ipi_count_chunk--;
749 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
750 panic("Chunk count is negative");
752 sctppcbinfo.ipi_gencnt_chunk++;
753 if (asoc->fragmented_delivery_inprogress == 0) {
755 * Now lets see if we can deliver the next one on the
759 struct sctp_stream_in *strm;
761 strm = &asoc->strmin[stream_no];
762 nxt_todel = strm->last_sequence_delivered + 1;
763 chk = TAILQ_FIRST(&strm->inqueue);
764 if (chk && (nxt_todel == chk->rec.data.stream_seq)) {
765 while (chk != NULL) {
768 chk->rec.data.stream_seq) {
769 at = TAILQ_NEXT(chk, sctp_next);
770 TAILQ_REMOVE(&strm->inqueue,
772 asoc->size_on_all_streams -=
774 asoc->cnt_on_all_streams--;
775 strm->last_sequence_delivered++;
777 * We ignore the return of
778 * deliver_data here since we
779 * always can hold the chunk on
780 * the d-queue. And we have a
781 * finite number that can be
782 * delivered from the strq.
784 sctp_deliver_data(stcb, asoc, chk, 1);
790 strm->last_sequence_delivered + 1;
793 if (!TAILQ_EMPTY(&asoc->delivery_queue)) {
794 /* Here if deliver_data fails, we must break */
795 if (sctp_deliver_data(stcb, asoc, NULL, 1) == 0)
798 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
800 SCTP_INP_WUNLOCK(stcb->sctp_ep);
803 chk = TAILQ_FIRST(&asoc->reasmqueue);
806 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
809 SCTP_INP_WUNLOCK(stcb->sctp_ep);
813 * Queue the chunk either right into the socket buffer if it is the next one
814 * to go OR put it in the correct place in the delivery queue. If we do
815 * append to the so_buf, keep doing so until we are out of order.
816 * One big question still remains, what to do when the socket buffer is FULL??
819 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
820 struct sctp_tmit_chunk *chk, int *abort_flag)
822 struct sctp_stream_in *strm;
823 struct sctp_tmit_chunk *at;
829 * Need to add code to deal with 16 bit seq wrap
830 * without a TSN wrap for ordered delivery (maybe).
834 asoc->size_on_all_streams += chk->send_size;
835 asoc->cnt_on_all_streams++;
836 strm = &asoc->strmin[chk->rec.data.stream_number];
837 nxt_todel = strm->last_sequence_delivered + 1;
838 #ifdef SCTP_STR_LOGGING
839 sctp_log_strm_del(chk, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
842 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
843 kprintf("queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
844 (u_int)chk->rec.data.stream_seq,
845 (u_int)strm->last_sequence_delivered, (u_int)nxt_todel);
848 if (compare_with_wrap(strm->last_sequence_delivered,
849 chk->rec.data.stream_seq, MAX_SEQ) ||
850 (strm->last_sequence_delivered == chk->rec.data.stream_seq)) {
851 /* The incoming sseq is behind where we last delivered? */
853 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
854 kprintf("Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
855 chk->rec.data.stream_seq,
856 strm->last_sequence_delivered);
860 * throw it in the stream so it gets cleaned up in
861 * association destruction
863 TAILQ_INSERT_HEAD(&strm->inqueue, chk, sctp_next);
864 MGET(oper, MB_DONTWAIT, MT_DATA);
866 struct sctp_paramhdr *ph;
869 oper->m_len = sizeof(struct sctp_paramhdr) +
871 ph = mtod(oper, struct sctp_paramhdr *);
872 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
873 ph->param_length = htons(oper->m_len);
874 ippp = (u_int32_t *)(ph + 1);
875 *ippp = htonl(0x00000001);
877 sctp_abort_an_association(stcb->sctp_ep, stcb,
878 SCTP_PEER_FAULTY, oper);
884 if (nxt_todel == chk->rec.data.stream_seq) {
885 /* can be delivered right away */
887 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
888 kprintf("It's NEXT!\n");
891 #ifdef SCTP_STR_LOGGING
892 sctp_log_strm_del(chk, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
895 asoc->size_on_all_streams -= chk->send_size;
896 asoc->cnt_on_all_streams--;
897 strm->last_sequence_delivered++;
898 sctp_deliver_data(stcb, asoc, chk, 0);
899 chk = TAILQ_FIRST(&strm->inqueue);
900 while (chk != NULL) {
902 nxt_todel = strm->last_sequence_delivered + 1;
903 if (nxt_todel == chk->rec.data.stream_seq) {
904 at = TAILQ_NEXT(chk, sctp_next);
905 TAILQ_REMOVE(&strm->inqueue, chk, sctp_next);
906 asoc->size_on_all_streams -= chk->send_size;
907 asoc->cnt_on_all_streams--;
908 strm->last_sequence_delivered++;
910 * We ignore the return of deliver_data here
911 * since we always can hold the chunk on the
912 * d-queue. And we have a finite number that
913 * can be delivered from the strq.
915 #ifdef SCTP_STR_LOGGING
916 sctp_log_strm_del(chk, NULL,
917 SCTP_STR_LOG_FROM_IMMED_DEL);
919 sctp_deliver_data(stcb, asoc, chk, 0);
928 * Ok, we did not deliver this guy, find
929 * the correct place to put it on the queue.
932 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
933 kprintf("Queue Needed!\n");
936 if (TAILQ_EMPTY(&strm->inqueue)) {
938 #ifdef SCTP_STR_LOGGING
939 sctp_log_strm_del(chk, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
941 TAILQ_INSERT_HEAD(&strm->inqueue, chk, sctp_next);
943 TAILQ_FOREACH(at, &strm->inqueue, sctp_next) {
944 if (compare_with_wrap(at->rec.data.stream_seq,
945 chk->rec.data.stream_seq, MAX_SEQ)) {
947 * one in queue is bigger than the new
948 * one, insert before this one
950 #ifdef SCTP_STR_LOGGING
951 sctp_log_strm_del(chk, at,
952 SCTP_STR_LOG_FROM_INSERT_MD);
954 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
956 } else if (at->rec.data.stream_seq ==
957 chk->rec.data.stream_seq) {
959 * Gak, He sent me a duplicate str seq
963 * foo bar, I guess I will just free
964 * this new guy, should we abort too?
965 * FIX ME MAYBE? Or it COULD be that
966 * the SSN's have wrapped. Maybe I
967 * should compare to TSN somehow...
968 * sigh for now just blow away the
973 sctp_m_freem(chk->data);
975 asoc->size_on_all_streams -= chk->send_size;
976 asoc->cnt_on_all_streams--;
977 sctp_pegs[SCTP_DUP_SSN_RCVD]++;
978 sctp_free_remote_addr(chk->whoTo);
979 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
980 sctppcbinfo.ipi_count_chunk--;
981 if ((int)sctppcbinfo.ipi_count_chunk <
983 panic("Chunk count is negative");
985 sctppcbinfo.ipi_gencnt_chunk++;
988 if (TAILQ_NEXT(at, sctp_next) == NULL) {
990 * We are at the end, insert it
993 #ifdef SCTP_STR_LOGGING
994 sctp_log_strm_del(chk, at,
995 SCTP_STR_LOG_FROM_INSERT_TL);
997 TAILQ_INSERT_AFTER(&strm->inqueue,
1005 /* We delivered some chunks, wake them up */
1008 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1009 kprintf("Doing WAKEUP!\n");
1012 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1017 * Returns two things: You get the total size of the deliverable parts of the
1018 * first fragmented message on the reassembly queue. And you get a 1 back if
1019 * all of the message is ready or a 0 back if the message is still incomplete
1022 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, int *t_size)
1024 struct sctp_tmit_chunk *chk;
1028 chk = TAILQ_FIRST(&asoc->reasmqueue);
1030 /* nothing on the queue */
1033 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1034 /* Not a first on the queue */
1037 tsn = chk->rec.data.TSN_seq;
1039 if (tsn != chk->rec.data.TSN_seq) {
1042 *t_size += chk->send_size;
1043 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1047 chk = TAILQ_NEXT(chk, sctp_next);
1053 * Dump onto the re-assembly queue, in its proper place. After dumping on
1054 * the queue, see if anthing can be delivered. If so pull it off (or as much
1055 * as we can. If we run out of space then we must dump what we can and set
1056 * the appropriate flag to say we queued what we could.
1059 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1060 struct sctp_tmit_chunk *chk, int *abort_flag)
1063 u_int16_t nxt_todel;
1064 u_int32_t cum_ackp1, prev_tsn, post_tsn;
1066 struct sctp_tmit_chunk *at, *prev, *next;
1069 cum_ackp1 = asoc->tsn_last_delivered + 1;
1071 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
1072 /* This is the first one on the queue */
1073 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
1075 * we do not check for delivery of anything when
1076 * only one fragment is here
1078 asoc->size_on_reasm_queue = chk->send_size;
1079 asoc->cnt_on_reasm_queue++;
1080 if (chk->rec.data.TSN_seq == cum_ackp1) {
1081 if (asoc->fragmented_delivery_inprogress == 0 &&
1082 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
1083 SCTP_DATA_FIRST_FRAG) {
1085 * An empty queue, no delivery inprogress, we
1086 * hit the next one and it does NOT have a
1087 * FIRST fragment mark.
1090 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1091 kprintf("Gak, Evil plot, its not first, no fragmented delivery in progress\n");
1094 MGET(oper, MB_DONTWAIT, MT_DATA);
1096 struct sctp_paramhdr *ph;
1100 sizeof(struct sctp_paramhdr) +
1102 ph = mtod(oper, struct sctp_paramhdr *);
1104 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1105 ph->param_length = htons(oper->m_len);
1106 ippp = (u_int32_t *)(ph + 1);
1107 *ippp = htonl(0x10000001);
1109 sctp_abort_an_association(stcb->sctp_ep, stcb,
1110 SCTP_PEER_FAULTY, oper);
1112 } else if (asoc->fragmented_delivery_inprogress &&
1113 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1115 * We are doing a partial delivery and the NEXT
1116 * chunk MUST be either the LAST or MIDDLE
1117 * fragment NOT a FIRST
1120 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1121 kprintf("Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
1124 MGET(oper, MB_DONTWAIT, MT_DATA);
1126 struct sctp_paramhdr *ph;
1130 sizeof(struct sctp_paramhdr) +
1132 ph = mtod(oper, struct sctp_paramhdr *);
1134 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1135 ph->param_length = htons(oper->m_len);
1136 ippp = (u_int32_t *)(ph + 1);
1137 *ippp = htonl(0x10000002);
1139 sctp_abort_an_association(stcb->sctp_ep, stcb,
1140 SCTP_PEER_FAULTY, oper);
1142 } else if (asoc->fragmented_delivery_inprogress) {
1143 /* Here we are ok with a MIDDLE or LAST piece */
1144 if (chk->rec.data.stream_number !=
1145 asoc->str_of_pdapi) {
1146 /* Got to be the right STR No */
1148 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1149 kprintf("Gak, Evil plot, it IS not same stream number %d vs %d\n",
1150 chk->rec.data.stream_number,
1151 asoc->str_of_pdapi);
1154 MGET(oper, MB_DONTWAIT, MT_DATA);
1156 struct sctp_paramhdr *ph;
1159 sizeof(struct sctp_paramhdr) +
1162 struct sctp_paramhdr *);
1164 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1167 ippp = (u_int32_t *)(ph + 1);
1168 *ippp = htonl(0x10000003);
1170 sctp_abort_an_association(stcb->sctp_ep,
1171 stcb, SCTP_PEER_FAULTY, oper);
1173 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
1174 SCTP_DATA_UNORDERED &&
1175 chk->rec.data.stream_seq !=
1176 asoc->ssn_of_pdapi) {
1177 /* Got to be the right STR Seq */
1179 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1180 kprintf("Gak, Evil plot, it IS not same stream seq %d vs %d\n",
1181 chk->rec.data.stream_seq,
1182 asoc->ssn_of_pdapi);
1185 MGET(oper, MB_DONTWAIT, MT_DATA);
1187 struct sctp_paramhdr *ph;
1190 sizeof(struct sctp_paramhdr) +
1193 struct sctp_paramhdr *);
1195 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1198 ippp = (u_int32_t *)(ph + 1);
1199 *ippp = htonl(0x10000004);
1201 sctp_abort_an_association(stcb->sctp_ep,
1202 stcb, SCTP_PEER_FAULTY, oper);
1209 /* Find its place */
1210 at = TAILQ_FIRST(&asoc->reasmqueue);
1212 /* Grab the top flags */
1213 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1214 if (compare_with_wrap(at->rec.data.TSN_seq,
1215 chk->rec.data.TSN_seq, MAX_TSN)) {
1217 * one in queue is bigger than the new one, insert
1221 asoc->size_on_reasm_queue += chk->send_size;
1222 asoc->cnt_on_reasm_queue++;
1224 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1226 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1227 /* Gak, He sent me a duplicate str seq number */
1229 * foo bar, I guess I will just free this new guy,
1230 * should we abort too? FIX ME MAYBE? Or it COULD be
1231 * that the SSN's have wrapped. Maybe I should compare
1232 * to TSN somehow... sigh for now just blow away the
1236 sctp_m_freem(chk->data);
1238 sctp_free_remote_addr(chk->whoTo);
1239 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
1240 sctppcbinfo.ipi_count_chunk--;
1241 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
1242 panic("Chunk count is negative");
1244 sctppcbinfo.ipi_gencnt_chunk++;
1248 if (TAILQ_NEXT(at, sctp_next) == NULL) {
1250 * We are at the end, insert it after this one
1252 /* check it first */
1253 asoc->size_on_reasm_queue += chk->send_size;
1254 asoc->cnt_on_reasm_queue++;
1255 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1260 /* Now the audits */
1262 prev_tsn = chk->rec.data.TSN_seq - 1;
1263 if (prev_tsn == prev->rec.data.TSN_seq) {
1265 * Ok the one I am dropping onto the end
1266 * is the NEXT. A bit of valdiation here.
1268 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1269 SCTP_DATA_FIRST_FRAG ||
1270 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1271 SCTP_DATA_MIDDLE_FRAG) {
1273 * Insert chk MUST be a MIDDLE or LAST fragment
1275 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1276 SCTP_DATA_FIRST_FRAG) {
1278 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1279 kprintf("Prev check - It can be a midlle or last but not a first\n");
1280 kprintf("Gak, Evil plot, it's a FIRST!\n");
1283 MGET(oper, MB_DONTWAIT, MT_DATA);
1285 struct sctp_paramhdr *ph;
1289 sizeof(struct sctp_paramhdr) +
1292 struct sctp_paramhdr *);
1294 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1298 ippp = (u_int32_t *)(ph + 1);
1299 *ippp = htonl(0x10000005);
1301 sctp_abort_an_association(stcb->sctp_ep,
1302 stcb, SCTP_PEER_FAULTY, oper);
1306 if (chk->rec.data.stream_number !=
1307 prev->rec.data.stream_number) {
1309 * Huh, need the correct STR here, they
1313 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1314 kprintf("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1315 chk->rec.data.stream_number,
1316 prev->rec.data.stream_number);
1319 MGET(oper, MB_DONTWAIT, MT_DATA);
1321 struct sctp_paramhdr *ph;
1325 sizeof(struct sctp_paramhdr) +
1328 struct sctp_paramhdr *);
1330 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1333 ippp = (u_int32_t *)(ph + 1);
1334 *ippp = htonl(0x10000006);
1337 sctp_abort_an_association(stcb->sctp_ep,
1338 stcb, SCTP_PEER_FAULTY, oper);
1343 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1344 chk->rec.data.stream_seq !=
1345 prev->rec.data.stream_seq) {
1347 * Huh, need the correct STR here, they
1351 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1352 kprintf("Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1353 chk->rec.data.stream_seq,
1354 prev->rec.data.stream_seq);
1357 MGET(oper, MB_DONTWAIT, MT_DATA);
1359 struct sctp_paramhdr *ph;
1363 sizeof(struct sctp_paramhdr) +
1366 struct sctp_paramhdr *);
1368 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1371 ippp = (u_int32_t *)(ph + 1);
1372 *ippp = htonl(0x10000007);
1375 sctp_abort_an_association(stcb->sctp_ep,
1376 stcb, SCTP_PEER_FAULTY, oper);
1381 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1382 SCTP_DATA_LAST_FRAG) {
1383 /* Insert chk MUST be a FIRST */
1384 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1385 SCTP_DATA_FIRST_FRAG) {
1387 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1388 kprintf("Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1391 MGET(oper, MB_DONTWAIT, MT_DATA);
1393 struct sctp_paramhdr *ph;
1397 sizeof(struct sctp_paramhdr) +
1400 struct sctp_paramhdr *);
1402 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1405 ippp = (u_int32_t *)(ph + 1);
1406 *ippp = htonl(0x10000008);
1409 sctp_abort_an_association(stcb->sctp_ep,
1410 stcb, SCTP_PEER_FAULTY, oper);
1420 post_tsn = chk->rec.data.TSN_seq + 1;
1421 if (post_tsn == next->rec.data.TSN_seq) {
1423 * Ok the one I am inserting ahead of
1424 * is my NEXT one. A bit of valdiation here.
1426 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1427 /* Insert chk MUST be a last fragment */
1428 if ((chk->rec.data.rcv_flags&SCTP_DATA_FRAG_MASK)
1429 != SCTP_DATA_LAST_FRAG) {
1431 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1432 kprintf("Next chk - Next is FIRST, we must be LAST\n");
1433 kprintf("Gak, Evil plot, its not a last!\n");
1436 MGET(oper, MB_DONTWAIT, MT_DATA);
1438 struct sctp_paramhdr *ph;
1442 sizeof(struct sctp_paramhdr) +
1445 struct sctp_paramhdr *);
1447 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1450 ippp = (u_int32_t *)(ph + 1);
1451 *ippp = htonl(0x10000009);
1454 sctp_abort_an_association(stcb->sctp_ep,
1455 stcb, SCTP_PEER_FAULTY, oper);
1460 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1461 SCTP_DATA_MIDDLE_FRAG ||
1462 (next->rec.data.rcv_flags&SCTP_DATA_FRAG_MASK) ==
1463 SCTP_DATA_LAST_FRAG) {
1464 /* Insert chk CAN be MIDDLE or FIRST NOT LAST */
1465 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1466 SCTP_DATA_LAST_FRAG) {
1468 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1469 kprintf("Next chk - Next is a MIDDLE/LAST\n");
1470 kprintf("Gak, Evil plot, new prev chunk is a LAST\n");
1473 MGET(oper, MB_DONTWAIT, MT_DATA);
1475 struct sctp_paramhdr *ph;
1479 sizeof(struct sctp_paramhdr) +
1482 struct sctp_paramhdr *);
1484 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1487 ippp = (u_int32_t *)(ph + 1);
1488 *ippp = htonl(0x1000000a);
1490 sctp_abort_an_association(stcb->sctp_ep,
1491 stcb, SCTP_PEER_FAULTY, oper);
1496 if (chk->rec.data.stream_number !=
1497 next->rec.data.stream_number) {
1499 * Huh, need the correct STR here, they
1503 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1504 kprintf("Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1505 chk->rec.data.stream_number,
1506 next->rec.data.stream_number);
1509 MGET(oper, MB_DONTWAIT, MT_DATA);
1511 struct sctp_paramhdr *ph;
1515 sizeof(struct sctp_paramhdr) +
1518 struct sctp_paramhdr *);
1520 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1523 ippp = (u_int32_t *)(ph + 1);
1524 *ippp = htonl(0x1000000b);
1527 sctp_abort_an_association(stcb->sctp_ep,
1528 stcb, SCTP_PEER_FAULTY, oper);
1533 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1534 chk->rec.data.stream_seq !=
1535 next->rec.data.stream_seq) {
1537 * Huh, need the correct STR here, they
1541 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1542 kprintf("Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1543 chk->rec.data.stream_seq,
1544 next->rec.data.stream_seq);
1547 MGET(oper, MB_DONTWAIT, MT_DATA);
1549 struct sctp_paramhdr *ph;
1553 sizeof(struct sctp_paramhdr) +
1556 struct sctp_paramhdr *);
1558 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1561 ippp = (u_int32_t *)(ph + 1);
1562 *ippp = htonl(0x1000000c);
1565 sctp_abort_an_association(stcb->sctp_ep,
1566 stcb, SCTP_PEER_FAULTY, oper);
1576 * now that we have all in there place we must check a number of
1577 * things to see if we can send data to the ULP.
1579 /* we need to do some delivery, if we can */
1580 chk = TAILQ_FIRST(&asoc->reasmqueue);
1583 asoc->size_on_reasm_queue = 0;
1584 asoc->cnt_on_reasm_queue = 0;
1587 if (asoc->fragmented_delivery_inprogress == 0) {
1589 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
1590 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
1591 (nxt_todel == chk->rec.data.stream_seq ||
1592 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
1594 * Yep the first one is here and its
1595 * ok to deliver but should we?
1597 if (TAILQ_EMPTY(&asoc->delivery_queue) &&
1598 (sctp_is_all_msg_on_reasm(asoc, &tsize) ||
1599 (asoc->size_on_reasm_queue >=
1600 (stcb->sctp_socket->so_rcv.ssb_hiwat >> 2) &&
1604 * start reception, by backing down the TSN
1605 * just in case we can't deliver. If we
1607 asoc->fragmented_delivery_inprogress = 1;
1608 asoc->tsn_last_delivered =
1609 chk->rec.data.TSN_seq - 1;
1610 asoc->str_of_pdapi =
1611 chk->rec.data.stream_number;
1612 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
1613 asoc->fragment_flags = chk->rec.data.rcv_flags;
1614 sctp_service_reassembly(stcb, asoc, 0);
1618 sctp_service_reassembly(stcb, asoc, 0);
1623 * This is an unfortunate routine. It checks to make sure a evil guy is not
1624 * stuffing us full of bad packet fragments. A broken peer could also do this
1625 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1629 sctp_does_chk_belong_to_reasm(struct sctp_association *asoc,
1630 struct sctp_tmit_chunk *chk)
1632 struct sctp_tmit_chunk *at;
1635 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1636 if (compare_with_wrap(chk->rec.data.TSN_seq,
1637 at->rec.data.TSN_seq, MAX_TSN)) {
1638 /* is it one bigger? */
1639 tsn_est = at->rec.data.TSN_seq + 1;
1640 if (tsn_est == chk->rec.data.TSN_seq) {
1641 /* yep. It better be a last then*/
1642 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1643 SCTP_DATA_LAST_FRAG) {
1645 * Ok this guy belongs next to a guy
1646 * that is NOT last, it should be a
1647 * middle/last, not a complete chunk.
1652 * This guy is ok since its a LAST and
1653 * the new chunk is a fully self-
1659 } else if (chk->rec.data.TSN_seq == at->rec.data.TSN_seq) {
1660 /* Software error since I have a dup? */
1664 * Ok, 'at' is larger than new chunk but does it
1665 * need to be right before it.
1667 tsn_est = chk->rec.data.TSN_seq + 1;
1668 if (tsn_est == at->rec.data.TSN_seq) {
1669 /* Yep, It better be a first */
1670 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1671 SCTP_DATA_FIRST_FRAG) {
1682 extern unsigned int sctp_max_chunks_on_queue;
1684 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1685 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1686 struct sctp_nets *net, u_int32_t *high_tsn, int *abort_flag,
1687 int *break_flag, int last_chunk)
1689 /* Process a data chunk */
1690 /* struct sctp_tmit_chunk *chk;*/
1691 struct sctp_tmit_chunk *chk;
1695 u_int16_t strmno, strmseq;
1699 tsn = ntohl(ch->dp.tsn);
1700 #ifdef SCTP_MAP_LOGGING
1701 sctp_log_map(0, tsn, asoc->cumulative_tsn, SCTP_MAP_PREPARE_SLIDE);
1703 if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1704 asoc->cumulative_tsn == tsn) {
1705 /* It is a duplicate */
1706 sctp_pegs[SCTP_DUPTSN_RECVD]++;
1707 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1708 /* Record a dup for the next outbound sack */
1709 asoc->dup_tsns[asoc->numduptsns] = tsn;
1714 /* Calculate the number of TSN's between the base and this TSN */
1715 if (tsn >= asoc->mapping_array_base_tsn) {
1716 gap = tsn - asoc->mapping_array_base_tsn;
1718 gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1;
1720 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1721 /* Can't hold the bit in the mapping at max array, toss it */
1724 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1725 if (sctp_expand_mapping_array(asoc)) {
1726 /* Can't expand, drop it */
1730 if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1733 /* See if we have received this one already */
1734 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
1735 sctp_pegs[SCTP_DUPTSN_RECVD]++;
1736 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1737 /* Record a dup for the next outbound sack */
1738 asoc->dup_tsns[asoc->numduptsns] = tsn;
1741 if (!callout_pending(&asoc->dack_timer.timer)) {
1743 * By starting the timer we assure that we
1744 * WILL sack at the end of the packet
1745 * when sctp_sack_check gets called.
1747 sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep,
1753 * Check to see about the GONE flag, duplicates would cause
1754 * a sack to be sent up above
1756 if (stcb && (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
1758 * wait a minute, this guy is gone, there is no
1759 * longer a receiver. Send peer an ABORT!
1761 struct mbuf *op_err;
1762 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1763 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err);
1768 * Now before going further we see if there is room. If NOT then
1769 * we MAY let one through only IF this TSN is the one we are
1770 * waiting for on a partial delivery API.
1773 /* now do the tests */
1774 if (((asoc->cnt_on_all_streams +
1775 asoc->cnt_on_delivery_queue +
1776 asoc->cnt_on_reasm_queue +
1777 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) ||
1778 (((int)asoc->my_rwnd) <= 0)) {
1780 * When we have NO room in the rwnd we check
1781 * to make sure the reader is doing its job...
1783 if (stcb->sctp_socket->so_rcv.ssb_cc) {
1784 /* some to read, wake-up */
1785 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1787 /* now is it in the mapping array of what we have accepted? */
1788 if (compare_with_wrap(tsn,
1789 asoc->highest_tsn_inside_map, MAX_TSN)) {
1791 /* Nope not in the valid range dump it */
1793 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1794 kprintf("My rwnd overrun1:tsn:%lx rwnd %lu sbspace:%ld delq:%d!\n",
1795 (u_long)tsn, (u_long)asoc->my_rwnd,
1796 sctp_sbspace(&stcb->sctp_socket->so_rcv),
1797 stcb->asoc.cnt_on_delivery_queue);
1800 sctp_set_rwnd(stcb, asoc);
1801 if ((asoc->cnt_on_all_streams +
1802 asoc->cnt_on_delivery_queue +
1803 asoc->cnt_on_reasm_queue +
1804 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) {
1805 sctp_pegs[SCTP_MSGC_DROP]++;
1807 sctp_pegs[SCTP_RWND_DROPS]++;
1813 strmno = ntohs(ch->dp.stream_id);
1814 if (strmno >= asoc->streamincnt) {
1815 struct sctp_paramhdr *phdr;
1818 MGETHDR(mb, MB_DONTWAIT, MT_DATA);
1820 /* add some space up front so prepend will work well */
1821 mb->m_data += sizeof(struct sctp_chunkhdr);
1822 phdr = mtod(mb, struct sctp_paramhdr *);
1824 * Error causes are just param's and this one has
1825 * two back to back phdr, one with the error type
1826 * and size, the other with the streamid and a rsvd
1828 mb->m_pkthdr.len = mb->m_len =
1829 (sizeof(struct sctp_paramhdr) * 2);
1830 phdr->param_type = htons(SCTP_CAUSE_INV_STRM);
1831 phdr->param_length =
1832 htons(sizeof(struct sctp_paramhdr) * 2);
1834 /* We insert the stream in the type field */
1835 phdr->param_type = ch->dp.stream_id;
1836 /* And set the length to 0 for the rsvd field */
1837 phdr->param_length = 0;
1838 sctp_queue_op_err(stcb, mb);
1840 sctp_pegs[SCTP_BAD_STRMNO]++;
1844 * Before we continue lets validate that we are not
1845 * being fooled by an evil attacker. We can only
1846 * have 4k chunks based on our TSN spread allowed
1847 * by the mapping array 512 * 8 bits, so there is
1848 * no way our stream sequence numbers could have wrapped.
1849 * We of course only validate the FIRST fragment so the
1852 strmseq = ntohs(ch->dp.stream_sequence);
1853 if ((ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1854 (ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1855 (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1856 strmseq, MAX_SEQ) ||
1857 asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1858 /* The incoming sseq is behind where we last delivered? */
1860 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1861 kprintf("EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1863 asoc->strmin[strmno].last_sequence_delivered);
1867 * throw it in the stream so it gets cleaned up in
1868 * association destruction
1870 MGET(oper, MB_DONTWAIT, MT_DATA);
1872 struct sctp_paramhdr *ph;
1875 oper->m_len = sizeof(struct sctp_paramhdr) +
1877 ph = mtod(oper, struct sctp_paramhdr *);
1878 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1879 ph->param_length = htons(oper->m_len);
1880 ippp = (u_int32_t *)(ph + 1);
1881 *ippp = htonl(0x20000001);
1883 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY,
1885 sctp_pegs[SCTP_BAD_SSN_WRAP]++;
1890 the_len = (chk_length-sizeof(struct sctp_data_chunk));
1891 if (last_chunk == 0) {
1892 dmbuf = sctp_m_copym(*m,
1893 (offset + sizeof(struct sctp_data_chunk)),
1894 the_len, MB_DONTWAIT);
1896 /* We can steal the last chunk */
1898 /* lop off the top part */
1899 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1900 if (dmbuf->m_pkthdr.len > the_len) {
1901 /* Trim the end round bytes off too */
1902 m_adj(dmbuf, -(dmbuf->m_pkthdr.len-the_len));
1904 sctp_pegs[SCTP_NO_COPY_IN]++;
1906 if (dmbuf == NULL) {
1907 sctp_pegs[SCTP_DROP_NOMEMORY]++;
1910 if ((ch->ch.chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1911 asoc->fragmented_delivery_inprogress == 0 &&
1912 TAILQ_EMPTY(&asoc->delivery_queue) &&
1913 ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) ||
1914 ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1915 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue))) &&
1916 ((long)(stcb->sctp_socket->so_rcv.ssb_hiwat -
1917 stcb->sctp_socket->so_rcv.ssb_cc) >= (long)the_len)) {
1918 /* Candidate for express delivery */
1920 * Its not fragmented,
1922 * Nothing in the delivery queue,
1923 * Its un-ordered OR ordered and the next to deliver AND
1924 * nothing else is stuck on the stream queue,
1925 * And there is room for it in the socket buffer.
1926 * Lets just stuff it up the buffer....
1929 struct mbuf *control, *mmm;
1930 struct sockaddr_in6 sin6;
1931 struct sockaddr_in6 lsa6;
1932 struct sockaddr *to;
1934 /* It would be nice to avoid this copy if we could :< */
1935 control = sctp_build_ctl_nchunk(stcb, tsn,
1936 ch->dp.protocol_id, 0, strmno, strmseq,
1937 ch->ch.chunk_flags);
1938 /* XXX need to append PKTHDR to the socket buffer first */
1940 if ((dmbuf->m_flags & M_PKTHDR) == 0) {
1942 MGETHDR(tmp, MB_DONTWAIT, MT_DATA);
1947 sctp_m_freem(control);
1948 stcb->asoc.my_rwnd_control_len -=
1949 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
1952 goto failed_express_del;
1954 tmp->m_pkthdr.len = the_len;
1956 tmp->m_next = dmbuf;
1959 to = (struct sockaddr *)&net->ro._l_addr;
1960 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
1961 to->sa_family == AF_INET) {
1962 struct sockaddr_in *sin;
1964 sin = (struct sockaddr_in *)to;
1965 bzero(&sin6, sizeof(sin6));
1966 sin6.sin6_family = AF_INET6;
1967 sin6.sin6_len = sizeof(struct sockaddr_in6);
1968 sin6.sin6_addr.s6_addr16[2] = 0xffff;
1969 bcopy(&sin->sin_addr,
1970 &sin6.sin6_addr.s6_addr16[3],
1971 sizeof(sin6.sin6_addr.s6_addr16[3]));
1972 sin6.sin6_port = sin->sin_port;
1973 to = (struct sockaddr *)&sin6;
1976 /* check and strip embedded scope junk */
1977 to = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)to,
1979 if (((struct sockaddr_in *)to)->sin_port == 0) {
1980 kprintf("Huh c, port is %d not net:%p %d?\n",
1981 ((struct sockaddr_in *)to)->sin_port,
1983 (int)(ntohs(stcb->rport)));
1984 ((struct sockaddr_in *)to)->sin_port = stcb->rport;
1989 while (mmm->m_next != NULL) {
1992 mmm->m_flags |= M_EOR;
1993 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1994 /* we have a new high score */
1995 asoc->highest_tsn_inside_map = tsn;
1996 #ifdef SCTP_MAP_LOGGING
1997 sctp_log_map(0, 1, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2000 SCTP_TCB_UNLOCK(stcb);
2001 SCTP_INP_WLOCK(stcb->sctp_ep);
2002 SCTP_TCB_LOCK(stcb);
2003 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
2004 if (!sctp_sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to, dmbuf,
2005 control, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2007 sctp_m_freem(control);
2008 stcb->asoc.my_rwnd_control_len -=
2009 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
2011 sctp_m_freem(dmbuf);
2012 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2013 goto failed_express_del;
2015 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2016 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
2017 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2018 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2021 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2023 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2024 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2025 if ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0) {
2027 /* for ordered, bump what we delivered */
2028 asoc->strmin[strmno].last_sequence_delivered++;
2030 sctp_pegs[SCTP_EXPRESS_ROUTE]++;
2031 #ifdef SCTP_STR_LOGGING
2032 sctp_log_strm_del_alt(tsn, strmseq,
2033 SCTP_STR_LOG_FROM_EXPRS_DEL);
2036 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
2037 kprintf("Express Delivery succeeds\n");
2040 goto finish_express_del;
2044 /* If we reach here this is a new chunk */
2045 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
2047 /* No memory so we drop the chunk */
2048 sctp_pegs[SCTP_DROP_NOMEMORY]++;
2049 if (last_chunk == 0) {
2050 /* we copied it, free the copy */
2051 sctp_m_freem(dmbuf);
2055 sctppcbinfo.ipi_count_chunk++;
2056 sctppcbinfo.ipi_gencnt_chunk++;
2057 chk->rec.data.TSN_seq = tsn;
2058 chk->rec.data.stream_seq = strmseq;
2059 chk->rec.data.stream_number = strmno;
2060 chk->rec.data.payloadtype = ch->dp.protocol_id;
2061 chk->rec.data.context = 0;
2062 chk->rec.data.doing_fast_retransmit = 0;
2063 chk->rec.data.rcv_flags = ch->ch.chunk_flags;
2065 chk->send_size = the_len;
2071 /* Mark it as received */
2072 /* Now queue it where it belongs */
2073 if ((chk->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
2074 SCTP_DATA_NOT_FRAG) {
2075 /* First a sanity check */
2076 if (asoc->fragmented_delivery_inprogress) {
2078 * Ok, we have a fragmented delivery in progress
2079 * if this chunk is next to deliver OR belongs in
2080 * our view to the reassembly, the peer is evil
2083 u_int32_t estimate_tsn;
2084 estimate_tsn = asoc->tsn_last_delivered + 1;
2085 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
2086 (estimate_tsn == chk->rec.data.TSN_seq)) {
2087 /* Evil/Broke peer */
2088 MGET(oper, MB_DONTWAIT, MT_DATA);
2090 struct sctp_paramhdr *ph;
2094 sizeof(struct sctp_paramhdr) +
2096 ph = mtod(oper, struct sctp_paramhdr *);
2098 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2099 ph->param_length = htons(oper->m_len);
2100 ippp = (u_int32_t *)(ph + 1);
2101 *ippp = htonl(0x20000002);
2103 sctp_abort_an_association(stcb->sctp_ep, stcb,
2104 SCTP_PEER_FAULTY, oper);
2107 sctp_pegs[SCTP_DROP_FRAG]++;
2110 if (sctp_does_chk_belong_to_reasm(asoc, chk)) {
2111 MGET(oper, MB_DONTWAIT, MT_DATA);
2113 struct sctp_paramhdr *ph;
2117 sizeof(struct sctp_paramhdr) +
2120 struct sctp_paramhdr *);
2122 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2125 ippp = (u_int32_t *)(ph + 1);
2126 *ippp = htonl(0x20000003);
2128 sctp_abort_an_association(stcb->sctp_ep,
2129 stcb, SCTP_PEER_FAULTY, oper);
2132 sctp_pegs[SCTP_DROP_FRAG]++;
2137 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
2139 * Reassembly queue is NOT empty
2140 * validate that this chk does not need to
2141 * be in reasembly queue. If it does then
2142 * our peer is broken or evil.
2144 if (sctp_does_chk_belong_to_reasm(asoc, chk)) {
2145 MGET(oper, MB_DONTWAIT, MT_DATA);
2147 struct sctp_paramhdr *ph;
2151 sizeof(struct sctp_paramhdr) +
2154 struct sctp_paramhdr *);
2156 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2159 ippp = (u_int32_t *)(ph + 1);
2160 *ippp = htonl(0x20000004);
2162 sctp_abort_an_association(stcb->sctp_ep,
2163 stcb, SCTP_PEER_FAULTY, oper);
2166 sctp_pegs[SCTP_DROP_FRAG]++;
2171 if (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
2172 /* queue directly into socket buffer */
2173 sctp_deliver_data(stcb, asoc, chk, 0);
2174 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2176 /* Special check for when streams are resetting.
2177 * We could be more smart about this and check the
2178 * actual stream to see if it is not being reset.. that
2179 * way we would not create a HOLB when amongst streams
2180 * being reset and those not being reset.
2182 * We take complete messages that have a stream reset
2183 * intervening (aka the TSN is after where our cum-ack needs
2184 * to be) off and put them on a pending_reply_queue. The
2185 * reassembly ones we do not have to worry about since
2186 * they are all sorted and proceessed by TSN order. It
2187 * is only the singletons I must worry about.
2189 if ((asoc->pending_reply) &&
2190 ((compare_with_wrap(tsn, ntohl(asoc->pending_reply->reset_at_tsn), MAX_TSN)) ||
2191 (tsn == ntohl(asoc->pending_reply->reset_at_tsn)))
2193 /* yep its past where we need to reset... go ahead and
2196 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue , chk, sctp_next);
2198 sctp_queue_data_to_stream(stcb, asoc, chk, abort_flag);
2202 /* Into the re-assembly queue */
2203 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2205 sctp_pegs[SCTP_DROP_FRAG]++;
2209 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
2210 /* we have a new high score */
2211 asoc->highest_tsn_inside_map = tsn;
2212 #ifdef SCTP_MAP_LOGGING
2213 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2220 sctp_pegs[SCTP_PEG_TSNS_RCVD]++;
2221 /* Set it present please */
2222 #ifdef SCTP_STR_LOGGING
2223 sctp_log_strm_del_alt(tsn, strmseq, SCTP_STR_LOG_FROM_MARK_TSN);
2225 #ifdef SCTP_MAP_LOGGING
2226 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2227 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2229 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2234 sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag)
2237 * Now we also need to check the mapping array in a couple of ways.
2238 * 1) Did we move the cum-ack point?
2240 struct sctp_association *asoc;
2242 int m_size, all_ones;
2243 int slide_from, slide_end, lgap, distance;
2244 #ifdef SCTP_MAP_LOGGING
2245 uint32_t old_cumack, old_base, old_highest;
2246 unsigned char aux_array[64];
2252 #ifdef SCTP_MAP_LOGGING
2253 old_cumack = asoc->cumulative_tsn;
2254 old_base = asoc->mapping_array_base_tsn;
2255 old_highest = asoc->highest_tsn_inside_map;
2256 if (asoc->mapping_array_size < 64)
2257 memcpy(aux_array, asoc->mapping_array,
2258 asoc->mapping_array_size);
2260 memcpy(aux_array, asoc->mapping_array, 64);
2264 * We could probably improve this a small bit by calculating the
2265 * offset of the current cum-ack as the starting point.
2268 m_size = stcb->asoc.mapping_array_size << 3;
2269 for (i = 0; i < m_size; i++) {
2270 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i)) {
2272 * Ok we found the first place that we are
2277 asoc->cumulative_tsn = asoc->mapping_array_base_tsn +
2282 if (compare_with_wrap(asoc->cumulative_tsn,
2283 asoc->highest_tsn_inside_map,
2285 panic("huh, cumack greater than high-tsn in map");
2288 (asoc->cumulative_tsn == asoc->highest_tsn_inside_map && at >= 8)) {
2289 /* The complete array was completed by a single FR */
2290 /* higest becomes the cum-ack */
2292 asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
2293 /* clear the array */
2295 clr = asoc->mapping_array_size;
2297 clr = (at >> 3) + 1;
2299 * this should be the allones case
2300 * but just in case :>
2302 if (clr > asoc->mapping_array_size)
2303 clr = asoc->mapping_array_size;
2305 memset(asoc->mapping_array, 0, clr);
2306 /* base becomes one ahead of the cum-ack */
2307 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2308 #ifdef SCTP_MAP_LOGGING
2309 sctp_log_map(old_base, old_cumack, old_highest,
2310 SCTP_MAP_PREPARE_SLIDE);
2311 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2312 asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED);
2314 } else if (at >= 8) {
2315 /* we can slide the mapping array down */
2316 /* Calculate the new byte postion we can move down */
2317 slide_from = at >> 3;
2318 /* now calculate the ceiling of the move using our highest TSN value */
2319 if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
2320 lgap = asoc->highest_tsn_inside_map -
2321 asoc->mapping_array_base_tsn;
2323 lgap = (MAX_TSN - asoc->mapping_array_base_tsn) +
2324 asoc->highest_tsn_inside_map + 1;
2326 slide_end = lgap >> 3;
2327 if (slide_end < slide_from) {
2328 panic("impossible slide");
2330 distance = (slide_end-slide_from) + 1;
2331 #ifdef SCTP_MAP_LOGGING
2332 sctp_log_map(old_base, old_cumack, old_highest,
2333 SCTP_MAP_PREPARE_SLIDE);
2334 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2335 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2337 if (distance + slide_from > asoc->mapping_array_size ||
2340 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
2341 kprintf("Ugh bad addition.. you can't hrumpp!\n");
2345 * Here we do NOT slide forward the array so that
2346 * hopefully when more data comes in to fill it up
2347 * we will be able to slide it forward. Really
2348 * I don't think this should happen :-0
2351 #ifdef SCTP_MAP_LOGGING
2352 sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2353 (uint32_t)asoc->mapping_array_size,
2354 SCTP_MAP_SLIDE_NONE);
2358 for (ii = 0; ii < distance; ii++) {
2359 asoc->mapping_array[ii] =
2360 asoc->mapping_array[slide_from + ii];
2362 for (ii = distance;ii <= slide_end; ii++) {
2363 asoc->mapping_array[ii] = 0;
2365 asoc->mapping_array_base_tsn += (slide_from << 3);
2366 #ifdef SCTP_MAP_LOGGING
2367 sctp_log_map(asoc->mapping_array_base_tsn,
2368 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2369 SCTP_MAP_SLIDE_RESULT);
2374 /* check the special flag for stream resets */
2375 if ((asoc->pending_reply) &&
2376 ((compare_with_wrap((asoc->cumulative_tsn+1), ntohl(asoc->pending_reply->reset_at_tsn), MAX_TSN)) ||
2377 ((asoc->cumulative_tsn+1) == ntohl(asoc->pending_reply->reset_at_tsn)))
2379 /* we have finished working through the backlogged TSN's now
2380 * time to reset streams.
2381 * 1: call reset function.
2382 * 2: free pending_reply space
2383 * 3: distribute any chunks in pending_reply_queue.
2385 struct sctp_tmit_chunk *chk;
2386 sctp_handle_stream_reset_response(stcb, asoc->pending_reply);
2387 kfree(asoc->pending_reply, M_PCB);
2388 asoc->pending_reply = NULL;
2389 chk = TAILQ_FIRST(&asoc->pending_reply_queue);
2391 TAILQ_REMOVE(&asoc->pending_reply_queue, chk, sctp_next);
2392 sctp_queue_data_to_stream(stcb, asoc, chk, abort_flag);
2396 chk = TAILQ_FIRST(&asoc->pending_reply_queue);
2400 * Now we need to see if we need to queue a sack or just start
2401 * the timer (if allowed).
2404 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2406 * Ok special case, in SHUTDOWN-SENT case.
2407 * here we maker sure SACK timer is off and
2408 * instead send a SHUTDOWN and a SACK
2410 if (callout_pending(&stcb->asoc.dack_timer.timer)) {
2411 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2412 stcb->sctp_ep, stcb, NULL);
2415 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
2416 kprintf("%s:%d sends a shutdown\n",
2422 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2423 sctp_send_sack(stcb);
2426 /* is there a gap now ? */
2427 is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2428 stcb->asoc.cumulative_tsn, MAX_TSN);
2429 if ((stcb->asoc.first_ack_sent == 0) || /* First time we send a sack */
2430 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no longer is one */
2431 (stcb->asoc.numduptsns) || /* we have dup's */
2432 (is_a_gap) || /* is still a gap */
2433 (callout_pending(&stcb->asoc.dack_timer.timer)) /* timer was up . second packet */
2436 * Ok we must build a SACK since the timer
2437 * is pending, we got our first packet OR
2438 * there are gaps or duplicates.
2440 stcb->asoc.first_ack_sent = 1;
2441 sctp_send_sack(stcb);
2442 /* The sending will stop the timer */
2444 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2445 stcb->sctp_ep, stcb, NULL);
2452 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc, int hold_locks)
2454 struct sctp_tmit_chunk *chk;
2456 u_int16_t nxt_todel;
2459 if (asoc->fragmented_delivery_inprogress) {
2460 sctp_service_reassembly(stcb, asoc, hold_locks);
2462 /* Can we proceed further, i.e. the PD-API is complete */
2463 if (asoc->fragmented_delivery_inprogress) {
2469 * Yes, reassembly delivery no longer in progress see if we
2470 * have some on the sb hold queue.
2473 if (stcb->sctp_socket->so_rcv.ssb_cc >= stcb->sctp_socket->so_rcv.ssb_hiwat) {
2475 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2478 /* If deliver_data says no we must stop */
2479 if (sctp_deliver_data(stcb, asoc, NULL, hold_locks) == 0)
2482 chk = TAILQ_FIRST(&asoc->delivery_queue);
2485 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2488 * Now is there some other chunk I can deliver
2489 * from the reassembly queue.
2491 chk = TAILQ_FIRST(&asoc->reasmqueue);
2493 asoc->size_on_reasm_queue = 0;
2494 asoc->cnt_on_reasm_queue = 0;
2497 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2498 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2499 ((nxt_todel == chk->rec.data.stream_seq) ||
2500 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2502 * Yep the first one is here. We setup to
2503 * start reception, by backing down the TSN
2504 * just in case we can't deliver.
2508 * Before we start though either all of the
2509 * message should be here or 1/4 the socket buffer
2510 * max or nothing on the delivery queue and something
2513 if (TAILQ_EMPTY(&asoc->delivery_queue) &&
2514 (sctp_is_all_msg_on_reasm(asoc, &tsize) ||
2515 (asoc->size_on_reasm_queue >=
2516 (stcb->sctp_socket->so_rcv.ssb_hiwat >> 2) && tsize))) {
2517 asoc->fragmented_delivery_inprogress = 1;
2518 asoc->tsn_last_delivered = chk->rec.data.TSN_seq-1;
2519 asoc->str_of_pdapi = chk->rec.data.stream_number;
2520 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2521 asoc->fragment_flags = chk->rec.data.rcv_flags;
2522 sctp_service_reassembly(stcb, asoc, hold_locks);
2528 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2529 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2530 struct sctp_nets *net, u_int32_t *high_tsn)
2532 struct sctp_data_chunk *ch, chunk_buf;
2533 struct sctp_association *asoc;
2534 int num_chunks = 0; /* number of control chunks processed */
2535 int chk_length, break_flag, last_chunk;
2536 int abort_flag = 0, was_a_gap = 0;
2540 sctp_set_rwnd(stcb, &stcb->asoc);
2544 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2545 stcb->asoc.cumulative_tsn, MAX_TSN)) {
2546 /* there was a gap before this data was processed */
2550 * setup where we got the last DATA packet from for
2551 * any SACK that may need to go out. Don't bump
2552 * the net. This is done ONLY when a chunk
2555 asoc->last_data_chunk_from = net;
2558 * Now before we proceed we must figure out if this
2559 * is a wasted cluster... i.e. it is a small packet
2560 * sent in and yet the driver underneath allocated a
2561 * full cluster for it. If so we must copy it to a
2562 * smaller mbuf and free up the cluster mbuf. This
2563 * will help with cluster starvation.
2565 if (m->m_len < (long)MHLEN && m->m_next == NULL) {
2566 /* we only handle mbufs that are singletons.. not chains */
2567 #ifdef __DragonFly__
2568 if ((*mm)->m_flags & M_PKTHDR)
2569 MGETHDR(m, MB_DONTWAIT, MT_HEADER);
2572 MGET(m, MB_DONTWAIT, MT_DATA);
2574 /* ok lets see if we can copy the data up */
2577 if ((*mm)->m_flags & M_PKTHDR) {
2578 /* got to copy the header first */
2580 M_COPY_PKTHDR(m, (*mm));
2582 M_MOVE_PKTHDR(m, (*mm));
2585 /* get the pointers and copy */
2586 to = mtod(m, caddr_t *);
2587 from = mtod((*mm), caddr_t *);
2588 memcpy(to, from, (*mm)->m_len);
2589 /* copy the length and free up the old */
2590 m->m_len = (*mm)->m_len;
2592 /* sucess, back copy */
2595 /* We are in trouble in the mbuf world .. yikes */
2599 /* get pointer to the first chunk header */
2600 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2601 sizeof(chunk_buf), (u_int8_t *)&chunk_buf);
2603 kprintf(" ... its short\n");
2607 * process all DATA chunks...
2611 if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
2612 kprintf("In process data off:%d length:%d iphlen:%d ch->type:%d\n",
2613 *offset, length, iphlen, (int)ch->ch.chunk_type);
2617 *high_tsn = asoc->cumulative_tsn;
2619 while (ch->ch.chunk_type == SCTP_DATA) {
2620 /* validate chunk length */
2621 chk_length = ntohs(ch->ch.chunk_length);
2622 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1 ||
2623 length - *offset < chk_length) {
2625 * Need to send an abort since we had a invalid
2628 struct mbuf *op_err;
2629 MGET(op_err, MB_DONTWAIT, MT_DATA);
2631 struct sctp_paramhdr *ph;
2634 op_err->m_len = sizeof(struct sctp_paramhdr) +
2636 ph = mtod(op_err, struct sctp_paramhdr *);
2638 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2639 ph->param_length = htons(op_err->m_len);
2640 ippp = (u_int32_t *)(ph + 1);
2641 *ippp = htonl(0x30000001);
2643 sctp_abort_association(inp, stcb, m, iphlen, sh,
2648 if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
2649 kprintf("A chunk of len:%d to process (tot:%d)\n",
2650 chk_length, length - *offset);
2654 #ifdef SCTP_AUDITING_ENABLED
2655 sctp_audit_log(0xB1, 0);
2657 if (SCTP_SIZE32(chk_length) == *offset - length) {
2662 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2663 chk_length, net, high_tsn, &abort_flag, &break_flag,
2667 if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
2668 kprintf("Now incr num_chunks to %d\n",
2678 * Set because of out of rwnd space and no drop rep
2684 *offset += SCTP_SIZE32(chk_length);
2685 if (*offset >= length) {
2686 /* no more data left in the mbuf chain */
2689 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2690 sizeof(chunk_buf), (u_int8_t *)&chunk_buf);
2698 * we need to report rwnd overrun drops.
2700 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2704 * Did we get data, if so update the time for
2705 * auto-close and give peer credit for being
2708 sctp_pegs[SCTP_DATA_DG_RECV]++;
2709 stcb->asoc.overall_error_count = 0;
2710 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2712 /* now service all of the reassm queue and delivery queue */
2713 sctp_service_queues(stcb, asoc, 0);
2714 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2716 * Assure that we ack right away by making
2717 * sure that a d-ack timer is running. So the
2718 * sack_check will send a sack.
2720 sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb,
2723 /* Start a sack timer or QUEUE a SACK for sending */
2724 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
2732 sctp_handle_segments(struct sctp_tcb *stcb, struct sctp_association *asoc,
2733 struct sctp_sack_chunk *ch, u_long last_tsn, u_long *biggest_tsn_acked,
2734 u_long *biggest_newly_acked_tsn, int num_seg, int *ecn_seg_sums)
2736 /************************************************/
2737 /* process fragments and update sendqueue */
2738 /************************************************/
2739 struct sctp_sack *sack;
2740 struct sctp_gap_ack_block *frag;
2741 struct sctp_tmit_chunk *tp1;
2744 #ifdef SCTP_FR_LOGGING
2747 uint16_t frag_strt, frag_end, primary_flag_set;
2748 u_long last_frag_high;
2750 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
2751 primary_flag_set = 1;
2753 primary_flag_set = 0;
2757 frag = (struct sctp_gap_ack_block *)((caddr_t)sack +
2758 sizeof(struct sctp_sack));
2761 for (i = 0; i < num_seg; i++) {
2762 frag_strt = ntohs(frag->start);
2763 frag_end = ntohs(frag->end);
2764 /* some sanity checks on the fargment offsets */
2765 if (frag_strt > frag_end) {
2766 /* this one is malformed, skip */
2770 if (compare_with_wrap((frag_end+last_tsn), *biggest_tsn_acked,
2772 *biggest_tsn_acked = frag_end+last_tsn;
2774 /* mark acked dgs and find out the highestTSN being acked */
2776 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2778 /* save the locations of the last frags */
2779 last_frag_high = frag_end + last_tsn;
2782 * now lets see if we need to reset the queue
2783 * due to a out-of-order SACK fragment
2785 if (compare_with_wrap(frag_strt+last_tsn,
2786 last_frag_high, MAX_TSN)) {
2788 * if the new frag starts after the last TSN
2789 * frag covered, we are ok
2790 * and this one is beyond the last one
2795 * ok, they have reset us, so we need to reset
2796 * the queue this will cause extra hunting but
2797 * hey, they chose the performance
2798 * hit when they failed to order there gaps..
2800 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2802 last_frag_high = frag_end + last_tsn;
2804 for (j = frag_strt + last_tsn; j <= frag_end + last_tsn; j++) {
2806 #ifdef SCTP_FR_LOGGING
2807 if (tp1->rec.data.doing_fast_retransmit)
2811 if (tp1->rec.data.TSN_seq == j) {
2812 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2813 /* must be held until cum-ack passes */
2814 /* ECN Nonce: Add the nonce value to the sender's nonce sum */
2815 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
2817 * If it is less than
2819 * no-longer in flight.
2821 * already be set via
2824 * i.e. ACKED or MARKED.
2826 if (compare_with_wrap(tp1->rec.data.TSN_seq,
2827 *biggest_newly_acked_tsn,
2829 *biggest_newly_acked_tsn =
2830 tp1->rec.data.TSN_seq;
2832 tp1->whoTo->flight_size -= tp1->book_size;
2833 if (tp1->whoTo->flight_size < 0) {
2834 tp1->whoTo->flight_size = 0;
2836 asoc->total_flight -=
2839 if (asoc->total_flight < 0) {
2840 asoc->total_flight = 0;
2843 asoc->total_flight_count--;
2844 if (asoc->total_flight_count < 0) {
2845 asoc->total_flight_count = 0;
2848 if (tp1->snd_count < 2) {
2849 /* True non-retransmited chunk */
2850 tp1->whoTo->net_ack2 +=
2853 /* update RTO too? */
2856 sctp_calculate_rto(stcb,
2859 &tp1->sent_rcv_time);
2860 tp1->whoTo->rto_pending = 0;
2865 if (tp1->sent <= SCTP_DATAGRAM_RESEND &&
2866 tp1->sent != SCTP_DATAGRAM_UNSENT &&
2867 compare_with_wrap(tp1->rec.data.TSN_seq,
2868 asoc->this_sack_highest_gap,
2870 asoc->this_sack_highest_gap =
2871 tp1->rec.data.TSN_seq;
2872 if (primary_flag_set) {
2873 tp1->whoTo->cacc_saw_newack = 1;
2876 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2879 SCTP_DEBUG_INDATA3) {
2880 kprintf("Hmm. one that is in RESEND that is now ACKED\n");
2883 asoc->sent_queue_retran_cnt--;
2884 #ifdef SCTP_AUDITING_ENABLED
2885 sctp_audit_log(0xB2,
2886 (asoc->sent_queue_retran_cnt & 0x000000ff));
2889 if (asoc->sent_queue_retran_cnt < 0) {
2890 kprintf("huh3 retran went negative?\n");
2891 #ifdef SCTP_AUDITING_ENABLED
2896 asoc->sent_queue_retran_cnt = 0;
2900 (*ecn_seg_sums) += tp1->rec.data.ect_nonce;
2901 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
2902 tp1->sent = SCTP_DATAGRAM_MARKED;
2905 } /* if (tp1->TSN_seq == j) */
2906 if (compare_with_wrap(tp1->rec.data.TSN_seq, j,
2909 tp1 = TAILQ_NEXT(tp1, sctp_next);
2910 }/* end while (tp1) */
2911 } /* end for (j = fragStart */
2912 frag++; /* next one */
2914 #ifdef SCTP_FR_LOGGING
2916 sctp_log_fr(*biggest_tsn_acked, *biggest_newly_acked_tsn,
2917 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
2922 sctp_check_for_revoked(struct sctp_association *asoc, u_long cum_ack,
2923 u_long biggest_tsn_acked)
2925 struct sctp_tmit_chunk *tp1;
2928 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2930 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
2933 * ok this guy is either ACK or MARKED. If it is ACKED
2934 * it has been previously acked but not this time i.e.
2935 * revoked. If it is MARKED it was ACK'ed again.
2937 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
2938 /* it has been revoked */
2940 * We do NOT add back to flight size here since
2941 * it is really NOT in flight. Resend (when/if
2942 * it occurs will add to flight size
2944 tp1->sent = SCTP_DATAGRAM_SENT;
2946 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
2947 /* it has been re-acked in this SACK */
2948 tp1->sent = SCTP_DATAGRAM_ACKED;
2951 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
2953 /* above the sack */
2956 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
2958 tp1 = TAILQ_NEXT(tp1, sctp_next);
2960 if (tot_revoked > 0) {
2961 /* Setup the ecn nonce re-sync point. We
2962 * do this since once data is revoked
2963 * we begin to retransmit things, which
2964 * do NOT have the ECN bits set. This means
2965 * we are now out of sync and must wait until
2966 * we get back in sync with the peer to
2969 tp1 = TAILQ_FIRST(&asoc->send_queue);
2971 asoc->nonce_resync_tsn = asoc->sending_seq;
2973 asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
2975 asoc->nonce_wait_for_ecne = 0;
2976 asoc->nonce_sum_check = 0;
2981 extern int sctp_peer_chunk_oh;
2984 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
2985 u_long biggest_tsn_acked, int strike_enabled,
2986 u_long biggest_tsn_newly_acked, int accum_moved)
2988 struct sctp_tmit_chunk *tp1;
2992 u_int32_t sending_seq;
2993 int primary_switch_active = 0;
2994 int double_switch_active = 0;
2996 /* select the sending_seq, this is
2997 * either the next thing ready to
2998 * be sent but not transmitted, OR,
2999 * the next seq we assign.
3001 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3003 sending_seq = asoc->sending_seq;
3005 sending_seq = tp1->rec.data.TSN_seq;
3008 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3009 primary_switch_active = 1;
3011 if (asoc->primary_destination->dest_state & SCTP_ADDR_DOUBLE_SWITCH) {
3012 double_switch_active = 1;
3014 if (stcb->asoc.peer_supports_prsctp ) {
3015 SCTP_GETTIME_TIMEVAL(&now);
3017 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3020 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3022 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3026 if ((tp1->flags & (SCTP_PR_SCTP_ENABLED|SCTP_PR_SCTP_BUFFER)) ==
3027 SCTP_PR_SCTP_ENABLED &&
3028 tp1->sent < SCTP_DATAGRAM_ACKED) {
3029 /* Is it expired? */
3031 if (timercmp(&now, &tp1->rec.data.timetodrop, >))
3033 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3036 /* Yes so drop it */
3037 if (tp1->data != NULL) {
3038 sctp_release_pr_sctp_chunk(stcb, tp1,
3039 (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT),
3042 tp1 = TAILQ_NEXT(tp1, sctp_next);
3047 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3048 asoc->this_sack_highest_gap, MAX_TSN)) {
3049 /* we are beyond the tsn in the sack */
3052 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3053 /* either a RESEND, ACKED, or MARKED */
3055 tp1 = TAILQ_NEXT(tp1, sctp_next);
3058 if (primary_switch_active && (strike_enabled == 0)) {
3059 if (tp1->whoTo != asoc->primary_destination) {
3061 * We can only strike things on the primary if
3062 * the strike_enabled flag is clear
3064 tp1 = TAILQ_NEXT(tp1, sctp_next);
3067 } else if (primary_switch_active) {
3068 if (tp1->whoTo->cacc_saw_newack == 0) {
3070 * Only one was received but it was NOT
3073 tp1 = TAILQ_NEXT(tp1, sctp_next);
3077 if (double_switch_active &&
3078 (compare_with_wrap(asoc->primary_destination->next_tsn_at_change,
3079 tp1->rec.data.TSN_seq, MAX_TSN))) {
3081 * With a double switch we do NOT mark unless we
3082 * are beyond the switch point.
3084 tp1 = TAILQ_NEXT(tp1, sctp_next);
3088 * Here we check to see if we were have already done a FR
3089 * and if so we see if the biggest TSN we saw in the sack is
3090 * smaller than the recovery point. If so we don't strike the
3091 * tsn... otherwise we CAN strike the TSN.
3093 if (accum_moved && asoc->fast_retran_loss_recovery) {
3095 * Strike the TSN if in fast-recovery and
3099 } else if (tp1->rec.data.doing_fast_retransmit) {
3101 * For those that have done a FR we must
3102 * take special consideration if we strike. I.e
3103 * the biggest_newly_acked must be higher
3104 * than the sending_seq at the time we did
3107 #ifdef SCTP_FR_TO_ALTERNATE
3109 * If FR's go to new networks, then we
3110 * must only do this for singly homed asoc's. However
3111 * if the FR's go to the same network (Armando's work)
3112 * then its ok to FR multiple times.
3114 if (asoc->numnets < 2)
3119 if ((compare_with_wrap(biggest_tsn_newly_acked,
3120 tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3121 (biggest_tsn_newly_acked ==
3122 tp1->rec.data.fast_retran_tsn)) {
3124 * Strike the TSN, since this ack is
3125 * beyond where things were when we did
3128 #ifdef SCTP_FR_LOGGING
3129 sctp_log_fr(biggest_tsn_newly_acked,
3130 tp1->rec.data.TSN_seq,
3131 tp1->rec.data.fast_retran_tsn,
3132 SCTP_FR_LOG_STRIKE_CHUNK);
3138 } else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3139 biggest_tsn_newly_acked, MAX_TSN)) {
3141 * We don't strike these:
3142 * This is the HTNA algorithm i.e. we don't strike
3143 * If our TSN is larger than the Highest TSN Newly
3148 /* Strike the TSN */
3151 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3152 /* Increment the count to resend */
3153 struct sctp_nets *alt;
3155 #ifdef SCTP_FR_LOGGING
3156 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3160 /* This is a subsequent FR */
3161 sctp_pegs[SCTP_DUP_FR]++;
3163 asoc->sent_queue_retran_cnt++;
3164 #ifdef SCTP_FR_TO_ALTERNATE
3165 /* Can we find an alternate? */
3166 alt = sctp_find_alternate_net(stcb, tp1->whoTo);
3169 * default behavior is to NOT retransmit FR's
3170 * to an alternate. Armando Caro's paper details
3175 tp1->rec.data.doing_fast_retransmit = 1;
3177 /* mark the sending seq for possible subsequent FR's */
3178 if (TAILQ_EMPTY(&asoc->send_queue)) {
3180 * If the queue of send is empty then its the
3181 * next sequence number that will be assigned so
3182 * we subtract one from this to get the one we
3185 tp1->rec.data.fast_retran_tsn = sending_seq - 1;
3188 * If there are chunks on the send queue
3189 * (unsent data that has made it from the
3190 * stream queues but not out the door, we take
3191 * the first one (which will have the lowest
3192 * TSN) and subtract one to get the one we last
3195 struct sctp_tmit_chunk *ttt;
3196 ttt = TAILQ_FIRST(&asoc->send_queue);
3197 tp1->rec.data.fast_retran_tsn =
3198 ttt->rec.data.TSN_seq - 1;
3202 * this guy had a RTO calculation pending on it,
3205 tp1->whoTo->rto_pending = 0;
3208 /* fix counts and things */
3210 tp1->whoTo->net_ack++;
3211 tp1->whoTo->flight_size -= tp1->book_size;
3212 if (tp1->whoTo->flight_size < 0) {
3213 tp1->whoTo->flight_size = 0;
3215 #ifdef SCTP_LOG_RWND
3216 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3217 asoc->peers_rwnd , tp1->send_size, sctp_peer_chunk_oh);
3219 /* add back to the rwnd */
3220 asoc->peers_rwnd += (tp1->send_size + sctp_peer_chunk_oh);
3222 /* remove from the total flight */
3223 asoc->total_flight -= tp1->book_size;
3224 if (asoc->total_flight < 0) {
3225 asoc->total_flight = 0;
3227 asoc->total_flight_count--;
3228 if (asoc->total_flight_count < 0) {
3229 asoc->total_flight_count = 0;
3231 if (alt != tp1->whoTo) {
3232 /* yes, there is an alternate. */
3233 sctp_free_remote_addr(tp1->whoTo);
3238 tp1 = TAILQ_NEXT(tp1, sctp_next);
3241 if (tot_retrans > 0) {
3242 /* Setup the ecn nonce re-sync point. We
3243 * do this since once we go to FR something
3244 * we introduce a Karn's rule scenario and
3245 * won't know the totals for the ECN bits.
3247 asoc->nonce_resync_tsn = sending_seq;
3248 asoc->nonce_wait_for_ecne = 0;
3249 asoc->nonce_sum_check = 0;
3254 struct sctp_tmit_chunk *
3255 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3256 struct sctp_association *asoc)
3258 struct sctp_tmit_chunk *tp1, *tp2, *a_adv=NULL;
3262 if (asoc->peer_supports_prsctp == 0) {
3265 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3267 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3268 tp1->sent != SCTP_DATAGRAM_RESEND) {
3269 /* no chance to advance, out of here */
3272 if ((tp1->flags & SCTP_PR_SCTP_ENABLED) == 0) {
3274 * We can't fwd-tsn past any that are reliable
3275 * aka retransmitted until the asoc fails.
3280 SCTP_GETTIME_TIMEVAL(&now);
3283 tp2 = TAILQ_NEXT(tp1, sctp_next);
3285 * now we got a chunk which is marked for another
3286 * retransmission to a PR-stream but has run
3287 * out its chances already maybe OR has been
3288 * marked to skip now. Can we skip it if its a
3291 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3292 (tp1->flags & SCTP_PR_SCTP_BUFFER) == 0) {
3294 * Now is this one marked for resend and its time
3298 if (timercmp(&now, &tp1->rec.data.timetodrop, >))
3300 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3303 /* Yes so drop it */
3305 sctp_release_pr_sctp_chunk(stcb, tp1,
3306 (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT),
3311 * No, we are done when hit one for resend whos
3312 * time as not expired.
3318 * Ok now if this chunk is marked to drop it
3319 * we can clean up the chunk, advance our peer ack point
3320 * and we can check the next chunk.
3322 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3323 /* advance PeerAckPoint goes forward */
3324 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3327 * we don't want to de-queue it here. Just wait for the
3328 * next peer SACK to come with a new cumTSN and then
3329 * the chunk will be droped in the normal fashion.
3332 sctp_free_bufspace(stcb, asoc, tp1);
3334 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
3335 kprintf("--total out:%lu total_mbuf_out:%lu\n",
3336 (u_long)asoc->total_output_queue_size,
3337 (u_long)asoc->total_output_mbuf_queue_size);
3341 * Maybe there should be another notification
3344 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3345 (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT),
3347 sctp_m_freem(tp1->data);
3349 sctp_sowwakeup(stcb->sctp_ep,
3353 /* If it is still in RESEND we can advance no further */
3357 * If we hit here we just dumped tp1, move to next
3358 * tsn on sent queue.
3365 #ifdef SCTP_HIGH_SPEED
3366 struct sctp_hs_raise_drop {
3369 int32_t drop_percent;
3372 #define SCTP_HS_TABLE_SIZE 73
3374 struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = {
3382 {1058,8,33}, /* 7 */
3383 {1284,9,32}, /* 8 */
3384 {1529,10,31}, /* 9 */
3385 {1793,11,30}, /* 10 */
3386 {2076,12,29}, /* 11 */
3387 {2378,13,28}, /* 12 */
3388 {2699,14,28}, /* 13 */
3389 {3039,15,27}, /* 14 */
3390 {3399,16,27}, /* 15 */
3391 {3778,17,26}, /* 16 */
3392 {4177,18,26}, /* 17 */
3393 {4596,19,25}, /* 18 */
3394 {5036,20,25}, /* 19 */
3395 {5497,21,24}, /* 20 */
3396 {5979,22,24}, /* 21 */
3397 {6483,23,23}, /* 22 */
3398 {7009,24,23}, /* 23 */
3399 {7558,25,22}, /* 24 */
3400 {8130,26,22}, /* 25 */
3401 {8726,27,22}, /* 26 */
3402 {9346,28,21}, /* 27 */
3403 {9991,29,21}, /* 28 */
3404 {10661,30,21}, /* 29 */
3405 {11358,31,20}, /* 30 */
3406 {12082,32,20}, /* 31 */
3407 {12834,33,20}, /* 32 */
3408 {13614,34,19}, /* 33 */
3409 {14424,35,19}, /* 34 */
3410 {15265,36,19}, /* 35 */
3411 {16137,37,19}, /* 36 */
3412 {17042,38,18}, /* 37 */
3413 {17981,39,18}, /* 38 */
3414 {18955,40,18}, /* 39 */
3415 {19965,41,17}, /* 40 */
3416 {21013,42,17}, /* 41 */
3417 {22101,43,17}, /* 42 */
3418 {23230,44,17}, /* 43 */
3419 {24402,45,16}, /* 44 */
3420 {25618,46,16}, /* 45 */
3421 {26881,47,16}, /* 46 */
3422 {28193,48,16}, /* 47 */
3423 {29557,49,15}, /* 48 */
3424 {30975,50,15}, /* 49 */
3425 {32450,51,15}, /* 50 */
3426 {33986,52,15}, /* 51 */
3427 {35586,53,14}, /* 52 */
3428 {37253,54,14}, /* 53 */
3429 {38992,55,14}, /* 54 */
3430 {40808,56,14}, /* 55 */
3431 {42707,57,13}, /* 56 */
3432 {44694,58,13}, /* 57 */
3433 {46776,59,13}, /* 58 */
3434 {48961,60,13}, /* 59 */
3435 {51258,61,13}, /* 60 */
3436 {53677,62,12}, /* 61 */
3437 {56230,63,12}, /* 62 */
3438 {58932,64,12}, /* 63 */
3439 {61799,65,12}, /* 64 */
3440 {64851,66,11}, /* 65 */
3441 {68113,67,11}, /* 66 */
3442 {71617,68,11}, /* 67 */
3443 {75401,69,10}, /* 68 */
3444 {79517,70,10}, /* 69 */
3445 {84035,71,10}, /* 70 */
3446 {89053,72,10}, /* 71 */
3447 {94717,73,9} /* 72 */
3451 sctp_hs_cwnd_increase(struct sctp_nets *net)
3453 int cur_val, i, indx, incr;
3455 cur_val = net->cwnd >> 10;
3456 indx = SCTP_HS_TABLE_SIZE - 1;
3458 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3460 if (net->net_ack > net->mtu) {
3461 net->cwnd += net->mtu;
3462 #ifdef SCTP_CWND_LOGGING
3463 sctp_log_cwnd(net, net->mtu, SCTP_CWND_LOG_FROM_SS);
3466 net->cwnd += net->net_ack;
3467 #ifdef SCTP_CWND_LOGGING
3468 sctp_log_cwnd(net, net->net_ack, SCTP_CWND_LOG_FROM_SS);
3472 for (i=net->last_hs_used; i<SCTP_HS_TABLE_SIZE; i++) {
3473 if (cur_val < sctp_cwnd_adjust[i].cwnd) {
3478 net->last_hs_used = indx;
3479 incr = ((sctp_cwnd_adjust[indx].increase) << 10);
3481 #ifdef SCTP_CWND_LOGGING
3482 sctp_log_cwnd(net, incr, SCTP_CWND_LOG_FROM_SS);
3488 sctp_hs_cwnd_decrease(struct sctp_nets *net)
3490 int cur_val, i, indx;
3491 #ifdef SCTP_CWND_LOGGING
3492 int old_cwnd = net->cwnd;
3495 cur_val = net->cwnd >> 10;
3496 indx = net->last_hs_used;
3497 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3499 net->ssthresh = net->cwnd / 2;
3500 if (net->ssthresh < (net->mtu*2)) {
3501 net->ssthresh = 2 * net->mtu;
3503 net->cwnd = net->ssthresh;
3504 #ifdef SCTP_CWND_LOGGING
3505 sctp_log_cwnd(net, (net->cwnd-old_cwnd), SCTP_CWND_LOG_FROM_FR);
3508 /* drop by the proper amount */
3509 net->ssthresh = net->cwnd - (int)((net->cwnd / 100) *
3510 sctp_cwnd_adjust[net->last_hs_used].drop_percent);
3511 net->cwnd = net->ssthresh;
3512 /* now where are we */
3513 indx = net->last_hs_used;
3514 cur_val = net->cwnd >> 10;
3515 /* reset where we are in the table */
3516 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3517 /* feel out of hs */
3518 net->last_hs_used = 0;
3520 for (i = indx; i >= 1; i--) {
3521 if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) {
3525 net->last_hs_used = indx;
3532 sctp_handle_sack(struct sctp_sack_chunk *ch, struct sctp_tcb *stcb,
3533 struct sctp_nets *net_from, int *abort_now)
3535 struct sctp_association *asoc;
3536 struct sctp_sack *sack;
3537 struct sctp_tmit_chunk *tp1, *tp2;
3538 u_long cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked;
3540 unsigned int sack_length;
3542 int some_on_streamwheel;
3543 int strike_enabled = 0, cnt_of_cacc = 0;
3544 int accum_moved = 0;
3545 int marking_allowed = 1;
3546 int will_exit_fast_recovery=0;
3548 struct sctp_nets *net = NULL;
3549 int nonce_sum_flag, ecn_seg_sums=0;
3553 * Handle the incoming sack on data I have been sending.
3557 * we take any chance we can to service our queues since we
3558 * cannot get awoken when the socket is read from :<
3560 asoc->overall_error_count = 0;
3562 if (asoc->sent_queue_retran_cnt) {
3564 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3565 kprintf("Handling SACK for asoc:%p retran:%d\n",
3566 asoc, asoc->sent_queue_retran_cnt);
3571 sctp_service_queues(stcb, asoc, 0);
3574 * Now perform the actual SACK handling:
3575 * 1) Verify that it is not an old sack, if so discard.
3576 * 2) If there is nothing left in the send queue (cum-ack is equal
3577 * to last acked) then you have a duplicate too, update any rwnd
3578 * change and verify no timers are running. then return.
3579 * 3) Process any new consequtive data i.e. cum-ack moved
3580 * process these first and note that it moved.
3581 * 4) Process any sack blocks.
3582 * 5) Drop any acked from the queue.
3583 * 6) Check for any revoked blocks and mark.
3584 * 7) Update the cwnd.
3585 * 8) Nothing left, sync up flightsizes and things, stop all timers
3586 * and also check for shutdown_pending state. If so then go ahead
3587 * and send off the shutdown. If in shutdown recv, send off the
3588 * shutdown-ack and start that timer, Ret.
3589 * 9) Strike any non-acked things and do FR procedure if needed being
3590 * sure to set the FR flag.
3591 * 10) Do pr-sctp procedures.
3592 * 11) Apply any FR penalties.
3593 * 12) Assure we will SACK if in shutdown_recv state.
3596 sack_length = ntohs(ch->ch.chunk_length);
3597 if (sack_length < sizeof(struct sctp_sack_chunk)) {
3599 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3600 kprintf("Bad size on sack chunk .. to small\n");
3606 nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
3608 cum_ack = last_tsn = ntohl(sack->cum_tsn_ack);
3609 num_seg = ntohs(sack->num_gap_ack_blks);
3612 if (TAILQ_EMPTY(&asoc->send_queue)) {
3613 send_s = asoc->sending_seq;
3615 tp1 = TAILQ_FIRST(&asoc->send_queue);
3616 send_s = tp1->rec.data.TSN_seq;
3619 if (sctp_strict_sacks) {
3620 if (cum_ack == send_s ||
3621 compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
3624 * no way, we have not even sent this TSN out yet.
3625 * Peer is hopelessly messed up with us.
3630 MGET(oper, MB_DONTWAIT, MT_DATA);
3632 struct sctp_paramhdr *ph;
3635 oper->m_len = sizeof(struct sctp_paramhdr) +
3637 ph = mtod(oper, struct sctp_paramhdr *);
3638 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3639 ph->param_length = htons(oper->m_len);
3640 ippp = (u_int32_t *)(ph + 1);
3641 *ippp = htonl(0x30000002);
3643 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper);
3647 /* update the Rwnd of the peer */
3648 a_rwnd = (u_int32_t)ntohl(sack->a_rwnd);
3649 if (asoc->sent_queue_retran_cnt) {
3651 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3652 kprintf("cum_ack:%lx num_seg:%u last_acked_seq:%x\n",
3653 cum_ack, (u_int)num_seg, asoc->last_acked_seq);
3657 if (compare_with_wrap(asoc->t3timeout_highest_marked, cum_ack, MAX_TSN)) {
3658 /* we are not allowed to mark for FR */
3659 marking_allowed = 0;
3661 /**********************/
3662 /* 1) check the range */
3663 /**********************/
3664 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
3665 /* acking something behind */
3666 if (asoc->sent_queue_retran_cnt) {
3668 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3669 kprintf("The cum-ack is behind us\n");
3676 if (TAILQ_EMPTY(&asoc->sent_queue)) {
3677 /* nothing left on sendqueue.. consider done */
3678 #ifdef SCTP_LOG_RWND
3679 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
3680 asoc->peers_rwnd, 0, 0, a_rwnd);
3682 asoc->peers_rwnd = a_rwnd;
3683 if (asoc->sent_queue_retran_cnt) {
3685 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3686 kprintf("Huh? retran set but none on queue\n");
3689 asoc->sent_queue_retran_cnt = 0;
3691 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3692 /* SWS sender side engages */
3693 asoc->peers_rwnd = 0;
3695 /* stop any timers */
3696 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3697 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3699 net->partial_bytes_acked = 0;
3700 net->flight_size = 0;
3702 asoc->total_flight = 0;
3703 asoc->total_flight_count = 0;
3707 * We init netAckSz and netAckSz2 to 0. These are used to track 2
3708 * things. The total byte count acked is tracked in netAckSz AND
3709 * netAck2 is used to track the total bytes acked that are un-
3710 * amibguious and were never retransmitted. We track these on a
3711 * per destination address basis.
3713 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3714 net->prev_cwnd = net->cwnd;
3718 /* process the new consecutive TSN first */
3719 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3721 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
3723 last_tsn == tp1->rec.data.TSN_seq) {
3724 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3725 /* ECN Nonce: Add the nonce to the sender's nonce sum */
3726 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
3728 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3730 * If it is less than ACKED, it is now
3731 * no-longer in flight. Higher values
3732 * may occur during marking
3734 if ((tp1->whoTo->dest_state &
3735 SCTP_ADDR_UNCONFIRMED) &&
3736 (tp1->snd_count < 2) ) {
3738 * If there was no retran and
3739 * the address is un-confirmed
3740 * and we sent there and are
3741 * now sacked.. its confirmed,
3744 tp1->whoTo->dest_state &=
3745 ~SCTP_ADDR_UNCONFIRMED;
3747 tp1->whoTo->flight_size -=
3749 if (tp1->whoTo->flight_size < 0) {
3750 tp1->whoTo->flight_size = 0;
3752 asoc->total_flight -= tp1->book_size;
3753 if (asoc->total_flight < 0) {
3754 asoc->total_flight = 0;
3756 asoc->total_flight_count--;
3757 if (asoc->total_flight_count < 0) {
3758 asoc->total_flight_count = 0;
3760 tp1->whoTo->net_ack += tp1->send_size;
3761 if (tp1->snd_count < 2) {
3762 /* True non-retransmited chunk */
3763 tp1->whoTo->net_ack2 +=
3765 /* update RTO too? */
3768 sctp_calculate_rto(stcb,
3770 &tp1->sent_rcv_time);
3771 tp1->whoTo->rto_pending = 0;
3776 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3778 if (sctp_debug_on & SCTP_DEBUG_INDATA3) {
3779 kprintf("Hmm. one that is in RESEND that is now ACKED\n");
3782 asoc->sent_queue_retran_cnt--;
3783 #ifdef SCTP_AUDITING_ENABLED
3784 sctp_audit_log(0xB3,
3785 (asoc->sent_queue_retran_cnt & 0x000000ff));
3787 if (asoc->sent_queue_retran_cnt < 0) {
3788 kprintf("huh4 retran went negative?\n");
3789 #ifdef SCTP_AUDITING_ENABLED
3790 sctp_auditing(31, inp, tcb,
3793 asoc->sent_queue_retran_cnt = 0;
3798 tp1->sent = SCTP_DATAGRAM_ACKED;
3803 tp1 = TAILQ_NEXT(tp1, sctp_next);
3805 /*******************************************/
3806 /* cancel ALL T3-send timer if accum moved */
3807 /*******************************************/
3809 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3810 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3814 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
3815 /* always set this up to cum-ack */
3816 asoc->this_sack_highest_gap = last_tsn;
3818 if (((num_seg * sizeof(struct sctp_gap_ack_block)) + sizeof(struct sctp_sack_chunk)) > sack_length) {
3819 /* skip corrupt segments */
3825 if (asoc->primary_destination->dest_state &
3826 SCTP_ADDR_SWITCH_PRIMARY) {
3827 /* clear the nets CACC flags */
3828 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3829 net->cacc_saw_newack = 0;
3833 * thisSackHighestGap will increase while handling NEW segments
3836 sctp_handle_segments(stcb, asoc, ch, last_tsn,
3837 &biggest_tsn_acked, &biggest_tsn_newly_acked,
3838 num_seg, &ecn_seg_sums);
3840 if (sctp_strict_sacks) {
3841 /* validate the biggest_tsn_acked in the gap acks
3842 * if strict adherence is wanted.
3844 if ((biggest_tsn_acked == send_s) ||
3845 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
3847 * peer is either confused or we are under
3848 * attack. We must abort.
3854 if (asoc->primary_destination->dest_state &
3855 SCTP_ADDR_SWITCH_PRIMARY) {
3856 /* clear the nets CACC flags */
3857 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3858 if (net->cacc_saw_newack) {
3866 if (cnt_of_cacc < 2) {
3872 /********************************************/
3873 /* drop the acked chunks from the sendqueue */
3874 /********************************************/
3875 asoc->last_acked_seq = cum_ack;
3876 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3877 if ((cum_ack == asoc->primary_destination->next_tsn_at_change) ||
3878 (compare_with_wrap(cum_ack,
3879 asoc->primary_destination->next_tsn_at_change, MAX_TSN))) {
3880 struct sctp_nets *lnet;
3881 /* Turn off the switch flag for ALL addresses */
3882 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
3883 asoc->primary_destination->dest_state &=
3884 ~(SCTP_ADDR_SWITCH_PRIMARY|SCTP_ADDR_DOUBLE_SWITCH);
3888 /* Drag along the t3 timeout point so we don't have a problem at wrap */
3889 if (marking_allowed) {
3890 asoc->t3timeout_highest_marked = cum_ack;
3892 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3894 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
3898 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3899 /* no more sent on list */
3902 tp2 = TAILQ_NEXT(tp1, sctp_next);
3903 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3905 sctp_free_bufspace(stcb, asoc, tp1);
3907 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
3908 kprintf("--total out:%lu total_mbuf_out:%lu\n",
3909 (u_long)asoc->total_output_queue_size,
3910 (u_long)asoc->total_output_mbuf_queue_size);
3914 sctp_m_freem(tp1->data);
3915 if (tp1->flags & SCTP_PR_SCTP_BUFFER) {
3916 asoc->sent_queue_cnt_removeable--;
3921 asoc->sent_queue_cnt--;
3922 sctp_free_remote_addr(tp1->whoTo);
3923 sctppcbinfo.ipi_count_chunk--;
3924 asoc->chunks_on_out_queue--;
3926 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
3927 panic("Chunk count is going negative");
3929 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, tp1);
3930 sctppcbinfo.ipi_gencnt_chunk++;
3931 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
3933 } while (tp1 != NULL);
3936 if (asoc->fast_retran_loss_recovery && accum_moved) {
3937 if (compare_with_wrap(asoc->last_acked_seq,
3938 asoc->fast_recovery_tsn, MAX_TSN) ||
3939 asoc->last_acked_seq == asoc->fast_recovery_tsn) {
3940 /* Setup so we will exit RFC2582 fast recovery */
3941 will_exit_fast_recovery = 1;
3945 /* Check for revoked fragments if we hand
3946 * fragments in a previous segment. If we
3947 * had no previous fragments we cannot have
3950 if (asoc->saw_sack_with_frags)
3951 sctp_check_for_revoked(asoc, cum_ack, biggest_tsn_acked);
3954 asoc->saw_sack_with_frags = 1;
3956 asoc->saw_sack_with_frags = 0;
3958 /******************************/
3960 /******************************/
3961 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3962 /* if nothing was acked on this destination skip it */
3963 if (net->net_ack == 0)
3966 if (net->net_ack2 > 0) {
3968 * Karn's rule applies to clearing error count,
3971 net->error_count = 0;
3972 if ((net->dest_state&SCTP_ADDR_NOT_REACHABLE) ==
3973 SCTP_ADDR_NOT_REACHABLE) {
3974 /* addr came good */
3975 net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
3976 net->dest_state |= SCTP_ADDR_REACHABLE;
3977 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
3978 SCTP_RECEIVED_SACK, (void *)net);
3979 /* now was it the primary? if so restore */
3980 if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
3981 sctp_set_primary_addr(stcb, NULL, net);
3986 if (asoc->fast_retran_loss_recovery &&
3987 will_exit_fast_recovery == 0) {
3988 /* If we are in loss recovery we skip any cwnd update */
3989 sctp_pegs[SCTP_CWND_SKIP]++;
3990 goto skip_cwnd_update;
3993 /* If the cumulative ack moved we can proceed */
3994 if (net->cwnd <= net->ssthresh) {
3995 /* We are in slow start */
3996 if (net->flight_size + net->net_ack >=
3998 #ifdef SCTP_HIGH_SPEED
3999 sctp_hs_cwnd_increase(net);
4001 if (net->net_ack > net->mtu) {
4002 net->cwnd += net->mtu;
4003 #ifdef SCTP_CWND_LOGGING
4004 sctp_log_cwnd(net, net->mtu,
4005 SCTP_CWND_LOG_FROM_SS);
4009 net->cwnd += net->net_ack;
4010 #ifdef SCTP_CWND_LOGGING
4011 sctp_log_cwnd(net, net->net_ack,
4012 SCTP_CWND_LOG_FROM_SS);
4017 sctp_pegs[SCTP_CWND_SS]++;
4020 sctp_pegs[SCTP_CWND_NOUSE_SS]++;
4021 dif = net->cwnd - (net->flight_size +
4023 #ifdef SCTP_CWND_LOGGING
4024 /* sctp_log_cwnd(net, net->net_ack,
4025 SCTP_CWND_LOG_NOADV_SS);*/
4027 if (dif > sctp_pegs[SCTP_CWND_DIFF_SA]) {
4028 sctp_pegs[SCTP_CWND_DIFF_SA] =
4030 sctp_pegs[SCTP_OQS_AT_SS] =
4031 asoc->total_output_queue_size;
4032 sctp_pegs[SCTP_SQQ_AT_SS] =
4033 asoc->sent_queue_cnt;
4034 sctp_pegs[SCTP_SQC_AT_SS] =
4035 asoc->send_queue_cnt;
4039 /* We are in congestion avoidance */
4040 if (net->flight_size + net->net_ack >=
4043 * add to pba only if we had a cwnd's
4044 * worth (or so) in flight OR the
4045 * burst limit was applied.
4047 net->partial_bytes_acked +=
4051 * Do we need to increase
4052 * (if pba is > cwnd)?
4054 if (net->partial_bytes_acked >=
4057 net->partial_bytes_acked) {
4058 net->partial_bytes_acked -=
4061 net->partial_bytes_acked =
4064 net->cwnd += net->mtu;
4065 #ifdef SCTP_CWND_LOGGING
4066 sctp_log_cwnd(net, net->mtu,
4067 SCTP_CWND_LOG_FROM_CA);
4069 sctp_pegs[SCTP_CWND_CA]++;
4073 sctp_pegs[SCTP_CWND_NOUSE_CA]++;
4074 #ifdef SCTP_CWND_LOGGING
4075 /* sctp_log_cwnd(net, net->net_ack,
4076 SCTP_CWND_LOG_NOADV_CA);
4079 dif = net->cwnd - (net->flight_size +
4081 if (dif > sctp_pegs[SCTP_CWND_DIFF_CA]) {
4082 sctp_pegs[SCTP_CWND_DIFF_CA] =
4084 sctp_pegs[SCTP_OQS_AT_CA] =
4085 asoc->total_output_queue_size;
4086 sctp_pegs[SCTP_SQQ_AT_CA] =
4087 asoc->sent_queue_cnt;
4088 sctp_pegs[SCTP_SQC_AT_CA] =
4089 asoc->send_queue_cnt;
4096 sctp_pegs[SCTP_CWND_NOCUM]++;
4100 * NOW, according to Karn's rule do we need to restore the
4101 * RTO timer back? Check our net_ack2. If not set then we
4102 * have a ambiguity.. i.e. all data ack'd was sent to more
4106 if (net->net_ack2) {
4107 /* restore any doubled timers */
4108 net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1;
4109 if (net->RTO < stcb->asoc.minrto) {
4110 net->RTO = stcb->asoc.minrto;
4112 if (net->RTO > stcb->asoc.maxrto) {
4113 net->RTO = stcb->asoc.maxrto;
4116 if (net->cwnd > sctp_pegs[SCTP_MAX_CWND]) {
4117 sctp_pegs[SCTP_MAX_CWND] = net->cwnd;
4120 /**********************************/
4121 /* Now what about shutdown issues */
4122 /**********************************/
4123 some_on_streamwheel = 0;
4124 if (!TAILQ_EMPTY(&asoc->out_wheel)) {
4125 /* Check to see if some data queued */
4126 struct sctp_stream_out *outs;
4127 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
4128 if (!TAILQ_EMPTY(&outs->outqueue)) {
4129 some_on_streamwheel = 1;
4134 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue) &&
4135 some_on_streamwheel == 0) {
4136 /* nothing left on sendqueue.. consider done */
4137 /* stop all timers */
4138 #ifdef SCTP_LOG_RWND
4139 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4140 asoc->peers_rwnd, 0, 0, a_rwnd);
4142 asoc->peers_rwnd = a_rwnd;
4143 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4144 /* SWS sender side engages */
4145 asoc->peers_rwnd = 0;
4147 /* stop any timers */
4148 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4149 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4151 net->flight_size = 0;
4152 net->partial_bytes_acked = 0;
4154 asoc->total_flight = 0;
4155 asoc->total_flight_count = 0;
4157 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
4158 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
4160 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
4161 kprintf("%s:%d sends a shutdown\n",
4167 sctp_send_shutdown(stcb,
4168 stcb->asoc.primary_destination);
4169 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4170 stcb->sctp_ep, stcb, asoc->primary_destination);
4171 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4172 stcb->sctp_ep, stcb, asoc->primary_destination);
4173 } else if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) {
4174 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT;
4176 sctp_send_shutdown_ack(stcb,
4177 stcb->asoc.primary_destination);
4179 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4180 stcb->sctp_ep, stcb, asoc->primary_destination);
4185 * Now here we are going to recycle net_ack for a different
4188 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4191 if ((num_seg > 0) && marking_allowed) {
4192 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4193 strike_enabled, biggest_tsn_newly_acked, accum_moved);
4196 /*********************************************/
4197 /* Here we perform PR-SCTP procedures */
4199 /*********************************************/
4200 /* C1. update advancedPeerAckPoint */
4201 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4202 asoc->advanced_peer_ack_point = cum_ack;
4204 /* C2. try to further move advancedPeerAckPoint ahead */
4205 if (asoc->peer_supports_prsctp) {
4206 struct sctp_tmit_chunk *lchk;
4207 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4208 /* C3. See if we need to send a Fwd-TSN */
4209 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
4212 * ISSUE with ECN, see FWD-TSN processing for notes
4213 * on issues that will occur when the ECN NONCE stuff
4214 * is put into SCTP for cross checking.
4216 send_forward_tsn(stcb, asoc);
4218 /* ECN Nonce: Disable Nonce Sum check when FWD TSN is sent and store resync tsn*/
4219 asoc->nonce_sum_check = 0;
4220 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
4222 /* Assure a timer is up */
4223 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4224 stcb->sctp_ep, stcb, lchk->whoTo);
4228 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4229 if (asoc->fast_retran_loss_recovery == 0) {
4230 /* out of a RFC2582 Fast recovery window? */
4231 if (net->net_ack > 0) {
4233 * per section 7.2.3, are there
4234 * any destinations that had a fast
4235 * retransmit to them. If so what we
4236 * need to do is adjust ssthresh and
4239 struct sctp_tmit_chunk *lchk;
4240 #ifdef SCTP_HIGH_SPEED
4241 sctp_hs_cwnd_decrease(net);
4243 #ifdef SCTP_CWND_LOGGING
4244 int old_cwnd = net->cwnd;
4246 net->ssthresh = net->cwnd / 2;
4247 if (net->ssthresh < (net->mtu*2)) {
4248 net->ssthresh = 2 * net->mtu;
4250 net->cwnd = net->ssthresh;
4251 #ifdef SCTP_CWND_LOGGING
4252 sctp_log_cwnd(net, (net->cwnd-old_cwnd),
4253 SCTP_CWND_LOG_FROM_FR);
4257 lchk = TAILQ_FIRST(&asoc->send_queue);
4259 net->partial_bytes_acked = 0;
4260 /* Turn on fast recovery window */
4261 asoc->fast_retran_loss_recovery = 1;
4263 /* Mark end of the window */
4264 asoc->fast_recovery_tsn = asoc->sending_seq - 1;
4266 asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
4270 /* Disable Nonce Sum Checking and store the resync tsn*/
4271 asoc->nonce_sum_check = 0;
4272 asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1;
4274 sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
4275 stcb->sctp_ep, stcb, net);
4276 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4277 stcb->sctp_ep, stcb, net);
4279 } else if (net->net_ack > 0) {
4281 * Mark a peg that we WOULD have done a cwnd reduction
4282 * but RFC2582 prevented this action.
4284 sctp_pegs[SCTP_FR_INAWINDOW]++;
4289 /******************************************************************
4290 * Here we do the stuff with ECN Nonce checking.
4291 * We basically check to see if the nonce sum flag was incorrect
4292 * or if resynchronization needs to be done. Also if we catch a
4293 * misbehaving receiver we give him the kick.
4294 ******************************************************************/
4296 if (asoc->ecn_nonce_allowed) {
4297 if (asoc->nonce_sum_check) {
4298 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
4299 if (asoc->nonce_wait_for_ecne == 0) {
4300 struct sctp_tmit_chunk *lchk;
4301 lchk = TAILQ_FIRST(&asoc->send_queue);
4302 asoc->nonce_wait_for_ecne = 1;
4304 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4306 asoc->nonce_wait_tsn = asoc->sending_seq;
4309 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4310 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4311 /* Misbehaving peer. We need to react to this guy */
4312 kprintf("Mis-behaving peer detected\n");
4313 asoc->ecn_allowed = 0;
4314 asoc->ecn_nonce_allowed = 0;
4319 /* See if Resynchronization Possible */
4320 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4321 asoc->nonce_sum_check = 1;
4322 /* now we must calculate what the base
4323 * is. We do this based on two things, we know
4324 * the total's for all the segments gap-acked
4325 * in the SACK, its stored in ecn_seg_sums.
4326 * We also know the SACK's nonce sum, its
4327 * in nonce_sum_flag. So we can build a truth
4328 * table to back-calculate the new value of asoc->nonce_sum_expect_base:
4330 * SACK-flag-Value Seg-Sums Base
4336 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4340 /* Now are we exiting loss recovery ? */
4341 if (will_exit_fast_recovery) {
4342 /* Ok, we must exit fast recovery */
4343 asoc->fast_retran_loss_recovery = 0;
4345 if ((asoc->sat_t3_loss_recovery) &&
4346 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
4348 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
4349 /* end satellite t3 loss recovery */
4350 asoc->sat_t3_loss_recovery = 0;
4352 /* Adjust and set the new rwnd value */
4353 #ifdef SCTP_LOG_RWND
4354 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4355 asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * sctp_peer_chunk_oh), a_rwnd);
4358 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4359 (u_int32_t)(asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh)));
4360 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4361 /* SWS sender side engages */
4362 asoc->peers_rwnd = 0;
4365 * Now we must setup so we have a timer up for anyone with
4368 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4369 struct sctp_tmit_chunk *chk;
4370 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
4371 if (chk->whoTo == net &&
4372 (chk->sent < SCTP_DATAGRAM_ACKED ||
4373 chk->sent == SCTP_FORWARD_TSN_SKIP)) {
4375 * Not ack'ed and still outstanding to this
4376 * destination or marked and must be
4377 * sacked after fwd-tsn sent.
4379 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4380 stcb->sctp_ep, stcb, net);
4388 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
4389 struct sctp_nets *netp, int *abort_flag)
4391 /* Mutate a shutdown into a SACK */
4392 struct sctp_sack_chunk sack;
4395 sack.sack.cum_tsn_ack = cp->cumulative_tsn_ack;
4396 /* Arrange so a_rwnd does NOT change */
4397 sack.ch.chunk_type = SCTP_SELECTIVE_ACK;
4398 sack.ch.chunk_flags = 0;
4399 sack.ch.chunk_length = ntohs(sizeof(struct sctp_sack_chunk));
4401 htonl(stcb->asoc.peers_rwnd + stcb->asoc.total_flight);
4403 * no gaps in this one. This may cause a temporal view to reneging,
4404 * but hopefully the second chunk is a true SACK in the packet and
4405 * will correct this view. One will come soon after no matter what
4408 sack.sack.num_gap_ack_blks = 0;
4409 sack.sack.num_dup_tsns = 0;
4410 /* Now call the SACK processor */
4411 sctp_handle_sack(&sack, stcb, netp, abort_flag);
4415 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
4416 struct sctp_stream_in *strmin)
4418 struct sctp_tmit_chunk *chk, *nchk;
4419 struct sctp_association *asoc;
4423 tt = strmin->last_sequence_delivered;
4425 * First deliver anything prior to and including the stream no that
4428 chk = TAILQ_FIRST(&strmin->inqueue);
4430 nchk = TAILQ_NEXT(chk, sctp_next);
4431 if (compare_with_wrap(tt, chk->rec.data.stream_seq, MAX_SEQ) ||
4432 (tt == chk->rec.data.stream_seq)) {
4433 /* this is deliverable now */
4434 TAILQ_REMOVE(&strmin->inqueue, chk, sctp_next);
4435 /* subtract pending on streams */
4436 asoc->size_on_all_streams -= chk->send_size;
4437 asoc->cnt_on_all_streams--;
4438 /* deliver it to at least the delivery-q */
4439 sctp_deliver_data(stcb, &stcb->asoc, chk, 0);
4441 /* no more delivery now. */
4447 * now we must deliver things in queue the normal way if any
4450 tt = strmin->last_sequence_delivered + 1;
4451 chk = TAILQ_FIRST(&strmin->inqueue);
4453 nchk = TAILQ_NEXT(chk, sctp_next);
4454 if (tt == chk->rec.data.stream_seq) {
4455 /* this is deliverable now */
4456 TAILQ_REMOVE(&strmin->inqueue, chk, sctp_next);
4457 /* subtract pending on streams */
4458 asoc->size_on_all_streams -= chk->send_size;
4459 asoc->cnt_on_all_streams--;
4460 /* deliver it to at least the delivery-q */
4461 strmin->last_sequence_delivered =
4462 chk->rec.data.stream_seq;
4463 sctp_deliver_data(stcb, &stcb->asoc, chk, 0);
4464 tt = strmin->last_sequence_delivered + 1;
4474 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
4475 struct sctp_forward_tsn_chunk *fwd, int *abort_flag)
4478 * ISSUES that MUST be fixed for ECN! When we are the
4479 * sender of the forward TSN, when the SACK comes back
4480 * that acknowledges the FWD-TSN we must reset the
4481 * NONCE sum to match correctly. This will get quite
4482 * tricky since we may have sent more data interveneing and
4483 * must carefully account for what the SACK says on the
4484 * nonce and any gaps that are reported. This work
4485 * will NOT be done here, but I note it here since
4486 * it is really related to PR-SCTP and FWD-TSN's
4489 /* The pr-sctp fwd tsn */
4491 * here we will perform all the data receiver side steps for
4492 * processing FwdTSN, as required in by pr-sctp draft:
4494 * Assume we get FwdTSN(x):
4496 * 1) update local cumTSN to x
4497 * 2) try to further advance cumTSN to x + others we have
4498 * 3) examine and update re-ordering queue on pr-in-streams
4499 * 4) clean up re-assembly queue
4500 * 5) Send a sack to report where we are.
4502 struct sctp_strseq *stseq;
4503 struct sctp_association *asoc;
4504 u_int32_t new_cum_tsn, gap, back_out_htsn;
4505 unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size;
4506 struct sctp_stream_in *strm;
4507 struct sctp_tmit_chunk *chk, *at;
4509 cumack_set_flag = 0;
4512 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
4514 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
4515 kprintf("Bad size too small/big fwd-tsn\n");
4520 m_size = (stcb->asoc.mapping_array_size << 3);
4521 /*************************************************************/
4522 /* 1. Here we update local cumTSN and shift the bitmap array */
4523 /*************************************************************/
4524 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
4526 if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
4527 asoc->cumulative_tsn == new_cum_tsn) {
4528 /* Already got there ... */
4532 back_out_htsn = asoc->highest_tsn_inside_map;
4533 if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
4535 asoc->highest_tsn_inside_map = new_cum_tsn;
4536 #ifdef SCTP_MAP_LOGGING
4537 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
4541 * now we know the new TSN is more advanced, let's find the
4544 if ((compare_with_wrap(new_cum_tsn, asoc->mapping_array_base_tsn,
4546 (new_cum_tsn == asoc->mapping_array_base_tsn)) {
4547 gap = new_cum_tsn - asoc->mapping_array_base_tsn;
4549 /* try to prevent underflow here */
4550 gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
4553 if (gap > m_size || gap < 0) {
4554 asoc->highest_tsn_inside_map = back_out_htsn;
4555 if ((long)gap > sctp_sbspace(&stcb->sctp_socket->so_rcv)) {
4557 * out of range (of single byte chunks in the rwnd I
4559 * too questionable. better to drop it silently
4563 if (asoc->highest_tsn_inside_map >
4564 asoc->mapping_array_base_tsn) {
4565 gap = asoc->highest_tsn_inside_map -
4566 asoc->mapping_array_base_tsn;
4568 gap = asoc->highest_tsn_inside_map +
4569 (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
4571 cumack_set_flag = 1;
4573 for (i = 0; i <= gap; i++) {
4574 SCTP_SET_TSN_PRESENT(asoc->mapping_array, i);
4577 * Now after marking all, slide thing forward but no
4580 sctp_sack_check(stcb, 0, 0, abort_flag);
4584 if (cumack_set_flag) {
4586 * fwd-tsn went outside my gap array - not a
4587 * common occurance. Do the same thing we
4588 * do when a cookie-echo arrives.
4590 asoc->highest_tsn_inside_map = new_cum_tsn - 1;
4591 asoc->mapping_array_base_tsn = new_cum_tsn;
4592 asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
4593 #ifdef SCTP_MAP_LOGGING
4594 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
4596 asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
4598 /*************************************************************/
4599 /* 2. Clear up re-assembly queue */
4600 /*************************************************************/
4603 * First service it if pd-api is up, just in case we can
4604 * progress it forward
4606 if (asoc->fragmented_delivery_inprogress) {
4607 sctp_service_reassembly(stcb, asoc, 0);
4609 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
4610 /* For each one on here see if we need to toss it */
4612 * For now large messages held on the reasmqueue that are
4613 * complete will be tossed too. We could in theory do more
4614 * work to spin through and stop after dumping one msg
4615 * aka seeing the start of a new msg at the head, and call
4616 * the delivery function... to see if it can be delivered...
4617 * But for now we just dump everything on the queue.
4619 chk = TAILQ_FIRST(&asoc->reasmqueue);
4621 at = TAILQ_NEXT(chk, sctp_next);
4622 if (compare_with_wrap(asoc->cumulative_tsn,
4623 chk->rec.data.TSN_seq, MAX_TSN) ||
4624 asoc->cumulative_tsn == chk->rec.data.TSN_seq) {
4625 /* It needs to be tossed */
4626 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
4627 if (compare_with_wrap(chk->rec.data.TSN_seq,
4628 asoc->tsn_last_delivered, MAX_TSN)) {
4629 asoc->tsn_last_delivered =
4630 chk->rec.data.TSN_seq;
4631 asoc->str_of_pdapi =
4632 chk->rec.data.stream_number;
4633 asoc->ssn_of_pdapi =
4634 chk->rec.data.stream_seq;
4635 asoc->fragment_flags =
4636 chk->rec.data.rcv_flags;
4638 asoc->size_on_reasm_queue -= chk->send_size;
4639 asoc->cnt_on_reasm_queue--;
4642 /* Clear up any stream problem */
4643 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
4644 SCTP_DATA_UNORDERED &&
4645 (compare_with_wrap(chk->rec.data.stream_seq,
4646 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
4649 * We must dump forward this streams
4650 * sequence number if the chunk is not
4651 * unordered that is being skipped.
4652 * There is a chance that if the peer
4653 * does not include the last fragment
4654 * in its FWD-TSN we WILL have a problem
4655 * here since you would have a partial
4656 * chunk in queue that may not be
4658 * Also if a Partial delivery API as
4659 * started the user may get a partial
4660 * chunk. The next read returning a new
4661 * chunk... really ugly but I see no way
4662 * around it! Maybe a notify??
4664 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
4665 chk->rec.data.stream_seq;
4668 sctp_m_freem(chk->data);
4671 sctp_free_remote_addr(chk->whoTo);
4672 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
4673 sctppcbinfo.ipi_count_chunk--;
4674 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
4675 panic("Chunk count is negative");
4677 sctppcbinfo.ipi_gencnt_chunk++;
4680 * Ok we have gone beyond the end of the
4681 * fwd-tsn's mark. Some checks...
4683 if ((asoc->fragmented_delivery_inprogress) &&
4684 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4685 /* Special case PD-API is up and what we fwd-tsn'
4686 * over includes one that had the LAST_FRAG. We
4687 * no longer need to do the PD-API.
4689 asoc->fragmented_delivery_inprogress = 0;
4690 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
4691 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, NULL);
4699 if (asoc->fragmented_delivery_inprogress) {
4701 * Ok we removed cnt_gone chunks in the PD-API queue that
4702 * were being delivered. So now we must turn off the
4705 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
4706 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, NULL);
4707 asoc->fragmented_delivery_inprogress = 0;
4709 /*************************************************************/
4710 /* 3. Update the PR-stream re-ordering queues */
4711 /*************************************************************/
4712 stseq = (struct sctp_strseq *)((caddr_t)fwd + sizeof(*fwd));
4713 fwd_sz -= sizeof(*fwd);
4717 num_str = fwd_sz/sizeof(struct sctp_strseq);
4719 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
4720 kprintf("Using NEW method, %d strseq's reported in FWD-TSN\n",
4724 for (i = 0; i < num_str; i++) {
4727 st = ntohs(stseq[i].stream);
4728 stseq[i].stream = st;
4729 st = ntohs(stseq[i].sequence);
4730 stseq[i].sequence = st;
4732 if (stseq[i].stream > asoc->streamincnt) {
4734 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
4735 kprintf("Bogus stream number %d "
4736 "streamincnt is %d\n",
4737 stseq[i].stream, asoc->streamincnt);
4741 * It is arguable if we should continue. Since
4742 * the peer sent bogus stream info we may be in
4744 * a return may be a better choice?
4748 strm = &asoc->strmin[stseq[i].stream];
4749 if (compare_with_wrap(stseq[i].sequence,
4750 strm->last_sequence_delivered, MAX_SEQ)) {
4751 /* Update the sequence number */
4752 strm->last_sequence_delivered =
4755 /* now kick the stream the new way */
4756 sctp_kick_prsctp_reorder_queue(stcb, strm);