1 /* $KAME: sctp_indata.c,v 1.35 2004/08/17 04:06:17 itojun Exp $ */
2 /* $DragonFly: src/sys/netinet/sctp_indata.c,v 1.8 2008/06/05 18:06:32 swildner Exp $ */
5 * Copyright (C) 2002, 2003, 2004 Cisco Systems Inc,
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #if !(defined(__OpenBSD__) || defined(__APPLE__))
34 #include "opt_ipsec.h"
36 #if defined(__FreeBSD__) || defined(__DragonFly__)
37 #include "opt_inet6.h"
40 #if defined(__NetBSD__)
46 #elif !defined(__OpenBSD__)
50 #include <sys/param.h>
51 #include <sys/systm.h>
53 #include <sys/malloc.h>
54 #include <sys/socket.h>
55 #include <sys/socketvar.h>
56 #include <sys/sysctl.h>
59 #include <net/route.h>
62 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
63 #include <sys/limits.h>
65 #include <machine/limits.h>
67 #include <machine/cpu.h>
69 #include <netinet/in.h>
70 #include <netinet/in_systm.h>
71 #include <netinet/ip.h>
73 #include <netinet/ip6.h>
75 #include <netinet/in_pcb.h>
76 #include <netinet/in_var.h>
77 #include <netinet/ip_var.h>
79 #include <netinet6/ip6_var.h>
81 #include <netinet/ip_icmp.h>
82 #include <netinet/icmp_var.h>
83 #include <netinet/sctp_var.h>
84 #include <netinet/sctp_pcb.h>
85 #include <netinet/sctp_header.h>
86 #include <netinet/sctputil.h>
87 #include <netinet/sctp_output.h>
88 #include <netinet/sctp_input.h>
89 #include <netinet/sctp_hashdriver.h>
90 #include <netinet/sctp_indata.h>
91 #include <netinet/sctp_uio.h>
92 #include <netinet/sctp_timer.h>
95 #include <netinet6/ipsec.h>
96 #include <netproto/key/key.h>
102 #include <net/net_osdep.h>
105 extern u_int32_t sctp_debug_on;
109 * NOTES: On the outbound side of things I need to check the sack timer to
110 * see if I should generate a sack into the chunk queue (if I have data to
111 * send that is and will be sending it .. for bundling.
113 * The callback in sctp_usrreq.c will get called when the socket is read
114 * from. This will cause sctp_service_queues() to get called on the top
118 extern int sctp_strict_sacks;
121 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
123 u_int32_t calc, calc_w_oh;
126 if (sctp_debug_on & SCTP_DEBUG_INDATA4) {
127 kprintf("cc:%lu hiwat:%lu lowat:%lu mbcnt:%lu mbmax:%lu\n",
128 (u_long)stcb->sctp_socket->so_rcv.ssb_cc,
129 (u_long)stcb->sctp_socket->so_rcv.ssb_hiwat,
130 (u_long)stcb->sctp_socket->so_rcv.ssb_lowat,
131 (u_long)stcb->sctp_socket->so_rcv.ssb_mbcnt,
132 (u_long)stcb->sctp_socket->so_rcv.ssb_mbmax);
133 kprintf("Setting rwnd to: sb:%ld - (del:%d + reasm:%d str:%d)\n",
134 sctp_sbspace(&stcb->sctp_socket->so_rcv),
135 asoc->size_on_delivery_queue,
136 asoc->size_on_reasm_queue,
137 asoc->size_on_all_streams);
140 if (stcb->sctp_socket->so_rcv.ssb_cc == 0 &&
141 asoc->size_on_delivery_queue == 0 &&
142 asoc->size_on_reasm_queue == 0 &&
143 asoc->size_on_all_streams == 0) {
144 /* Full rwnd granted */
145 asoc->my_rwnd = max(stcb->sctp_socket->so_rcv.ssb_hiwat,
149 /* get actual space */
150 calc = (u_int32_t)sctp_sbspace(&stcb->sctp_socket->so_rcv);
152 /* take out what has NOT been put on socket queue and
153 * we yet hold for putting up.
155 calc = sctp_sbspace_sub(calc, (u_int32_t)asoc->size_on_delivery_queue);
156 calc = sctp_sbspace_sub(calc, (u_int32_t)asoc->size_on_reasm_queue);
157 calc = sctp_sbspace_sub(calc, (u_int32_t)asoc->size_on_all_streams);
159 /* what is the overhead of all these rwnd's */
160 calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
162 asoc->my_rwnd = calc;
163 if (calc_w_oh == 0) {
164 /* If our overhead is greater than the advertised
165 * rwnd, we clamp the rwnd to 1. This lets us
166 * still accept inbound segments, but hopefully will
167 * shut the sender down when he finally gets the message.
173 (asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) {
174 /* SWS engaged, tell peer none left */
177 if (sctp_debug_on & SCTP_DEBUG_INDATA4) {
178 kprintf(" - SWS zeros\n");
181 if (sctp_debug_on & SCTP_DEBUG_INDATA4) {
190 * Take a chk structure and build it into an mbuf. Hmm should we change things
191 * so that instead we store the data side in a chunk?
194 sctp_build_ctl_nchunk(struct sctp_tcb *stcb, uint32_t tsn, uint32_t ppid,
195 uint32_t context, uint16_t stream_no, uint16_t stream_seq, uint8_t flags)
197 struct sctp_sndrcvinfo *outinfo;
201 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVDATAIOEVNT) == 0) {
202 /* user does not want the sndrcv ctl */
206 MGETHDR(ret, MB_DONTWAIT, MT_CONTROL);
211 /* We need a CMSG header followed by the struct */
212 cmh = mtod(ret, struct cmsghdr *);
213 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
214 cmh->cmsg_level = IPPROTO_SCTP;
215 cmh->cmsg_type = SCTP_SNDRCV;
216 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
217 outinfo->sinfo_stream = stream_no;
218 outinfo->sinfo_ssn = stream_seq;
219 if (flags & SCTP_DATA_UNORDERED) {
220 outinfo->sinfo_flags = MSG_UNORDERED;
222 outinfo->sinfo_flags = 0;
224 outinfo->sinfo_ppid = ppid;
225 outinfo->sinfo_context = context;
226 outinfo->sinfo_assoc_id = sctp_get_associd(stcb);
227 outinfo->sinfo_tsn = tsn;
228 outinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
229 ret->m_len = cmh->cmsg_len;
230 ret->m_pkthdr.len = ret->m_len;
232 * We track how many control len's have gone upon the sb
233 * and do not count these in the rwnd calculation.
235 stcb->asoc.my_rwnd_control_len +=
236 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
242 * Take a chk structure and build it into an mbuf. Should we change things
243 * so that instead we store the data side in a chunk?
247 sctp_build_ctl(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk)
249 struct sctp_sndrcvinfo *outinfo;
252 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVDATAIOEVNT) == 0) {
253 /* user does not want the sndrcv ctl */
256 MGET(ret, MB_DONTWAIT, MT_CONTROL);
262 /* We need a CMSG header followed by the struct */
263 cmh = mtod(ret, struct cmsghdr *);
264 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
265 cmh->cmsg_level = IPPROTO_SCTP;
266 cmh->cmsg_type = SCTP_SNDRCV;
267 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
268 outinfo->sinfo_stream = chk->rec.data.stream_number;
269 outinfo->sinfo_ssn = chk->rec.data.stream_seq;
270 if (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
271 outinfo->sinfo_flags = MSG_UNORDERED;
273 outinfo->sinfo_flags = 0;
275 outinfo->sinfo_ppid = chk->rec.data.payloadtype;
276 outinfo->sinfo_context = chk->rec.data.context;
277 outinfo->sinfo_assoc_id = sctp_get_associd(stcb);
278 outinfo->sinfo_tsn = chk->rec.data.TSN_seq;
279 outinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
280 ret->m_len = cmh->cmsg_len;
281 stcb->asoc.my_rwnd_control_len +=
282 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
288 sctp_deliver_data(struct sctp_tcb *stcb, struct sctp_association *asoc,
289 struct sctp_tmit_chunk *chk, int hold_locks)
291 struct mbuf *control, *m;
293 struct sockaddr_in6 sin6;
297 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
298 kprintf("I am now in Deliver data! (%p)\n", chk);
301 /* get a write lock on the inp if not already */
302 if (hold_locks == 0) {
303 SCTP_TCB_UNLOCK(stcb);
304 SCTP_INP_WLOCK(stcb->sctp_ep);
308 /* We always add it to the queue */
309 if (stcb && (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
310 /* socket above is long gone */
312 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
313 kprintf("gone is gone!\n");
318 sctp_m_freem(chk->data);
320 sctp_free_remote_addr(chk->whoTo);
321 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
322 sctppcbinfo.ipi_count_chunk--;
323 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
324 panic("Chunk count is negative");
326 sctppcbinfo.ipi_gencnt_chunk++;
328 TAILQ_FOREACH(chk, &asoc->delivery_queue, sctp_next) {
329 asoc->size_on_delivery_queue -= chk->send_size;
330 asoc->cnt_on_delivery_queue--;
332 * Lose the data pointer, since its in the socket buffer
335 sctp_m_freem(chk->data);
337 /* Now free the address and data */
338 sctp_free_remote_addr(chk->whoTo);
339 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
340 sctppcbinfo.ipi_count_chunk--;
341 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
342 panic("Chunk count is negative");
344 sctppcbinfo.ipi_gencnt_chunk++;
347 SCTP_INP_WUNLOCK(stcb->sctp_ep);
351 TAILQ_INSERT_TAIL(&asoc->delivery_queue, chk, sctp_next);
352 asoc->size_on_delivery_queue += chk->send_size;
353 asoc->cnt_on_delivery_queue++;
355 if (asoc->fragmented_delivery_inprogress) {
357 * oh oh, fragmented delivery in progress
358 * return out of here.
361 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
362 kprintf("Fragmented delivery in progress?\n");
366 SCTP_INP_WUNLOCK(stcb->sctp_ep);
369 /* Now grab the first one */
370 chk = TAILQ_FIRST(&asoc->delivery_queue);
372 /* Nothing in queue */
374 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
375 kprintf("Nothing in queue?\n");
378 asoc->size_on_delivery_queue = 0;
379 asoc->cnt_on_delivery_queue = 0;
381 SCTP_INP_WUNLOCK(stcb->sctp_ep);
385 if (stcb->sctp_socket->so_rcv.ssb_cc >= stcb->sctp_socket->so_rcv.ssb_hiwat) {
386 /* Boy, there really is NO room */
388 SCTP_INP_WUNLOCK(stcb->sctp_ep);
392 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
393 kprintf("Now to the delivery with chk(%p)!\n", chk);
396 /* XXX need to append PKTHDR to the socket buffer first */
397 if ((chk->data->m_flags & M_PKTHDR) == 0) {
398 MGETHDR(m, MB_DONTWAIT, MT_DATA);
402 SCTP_INP_WUNLOCK(stcb->sctp_ep);
405 m->m_pkthdr.len = chk->send_size;
407 m->m_next = chk->data;
410 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
411 if (chk->data->m_next == NULL) {
412 /* hopefully we hit here most of the time */
413 chk->data->m_flags |= M_EOR;
415 /* Add the flag to the LAST mbuf in the chain */
417 while (m->m_next != NULL) {
424 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
425 struct sockaddr_in6 lsa6;
427 control = sctp_build_ctl(stcb, chk);
428 to = (struct sockaddr *)&chk->whoTo->ro._l_addr;
429 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
430 to->sa_family == AF_INET) {
431 struct sockaddr_in *sin;
433 sin = (struct sockaddr_in *)to;
434 bzero(&sin6, sizeof(sin6));
435 sin6.sin6_family = AF_INET6;
436 sin6.sin6_len = sizeof(struct sockaddr_in6);
437 sin6.sin6_addr.s6_addr16[2] = 0xffff;
438 bcopy(&sin->sin_addr, &sin6.sin6_addr.s6_addr16[3],
439 sizeof(sin6.sin6_addr.s6_addr16[3]));
440 sin6.sin6_port = sin->sin_port;
441 to = (struct sockaddr *)&sin6;
443 /* check and strip embedded scope junk */
444 to = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)to,
446 if (((struct sockaddr_in *)to)->sin_port == 0) {
447 kprintf("Huh a, port is %d not net:%x %d?\n",
448 ((struct sockaddr_in *)to)->sin_port,
450 (int)(ntohs(stcb->rport)));
451 ((struct sockaddr_in *)to)->sin_port = stcb->rport;
453 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < (long)chk->send_size) {
454 /* Gak not enough room */
456 sctp_m_freem(control);
457 stcb->asoc.my_rwnd_control_len -=
458 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
462 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
463 if (!sctp_sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv,
464 to, chk->data, control, stcb->asoc.my_vtag,
466 /* Gak not enough room */
468 sctp_m_freem(control);
469 stcb->asoc.my_rwnd_control_len -=
470 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
473 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
474 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
475 stcb->asoc.my_rwnd_control_len +=
479 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
483 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
485 /* append to a already started message. */
486 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
487 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) >=
488 (long)chk->send_size) {
489 ssb_append(&stcb->sctp_socket->so_rcv, chk->data);
492 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
496 SCTP_INP_WUNLOCK(stcb->sctp_ep);
497 /* free up the one we inserted */
499 /* Pull it off the queue */
501 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
502 kprintf("Free_it true, doing tickle wakeup\n");
505 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
506 TAILQ_REMOVE(&asoc->delivery_queue, chk, sctp_next);
507 asoc->size_on_delivery_queue -= chk->send_size;
508 asoc->cnt_on_delivery_queue--;
509 /* Lose the data pointer, since its in the socket buffer */
511 /* Now free the address and data */
512 sctp_free_remote_addr(chk->whoTo);
513 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
514 sctppcbinfo.ipi_count_chunk--;
515 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
516 panic("Chunk count is negative");
518 sctppcbinfo.ipi_gencnt_chunk++;
524 * We are delivering currently from the reassembly queue. We must continue to
525 * deliver until we either:
526 * 1) run out of space.
527 * 2) run out of sequential TSN's
528 * 3) hit the SCTP_DATA_LAST_FRAG flag.
531 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc, int hold_locks)
534 struct sockaddr_in6 sin6;
535 struct sctp_tmit_chunk *chk, *at;
536 struct mbuf *control, *m;
540 cntDel = stream_no = 0;
541 if (hold_locks == 0) {
543 * you always have the TCB lock, we need
544 * to have the inp write lock as well.
546 SCTP_TCB_UNLOCK(stcb);
547 SCTP_INP_WLOCK(stcb->sctp_ep);
550 if (stcb && (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
551 /* socket above is long gone */
552 asoc->fragmented_delivery_inprogress = 0;
553 TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
554 asoc->size_on_delivery_queue -= chk->send_size;
555 asoc->cnt_on_delivery_queue--;
557 * Lose the data pointer, since its in the socket buffer
560 sctp_m_freem(chk->data);
562 /* Now free the address and data */
563 sctp_free_remote_addr(chk->whoTo);
564 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
565 sctppcbinfo.ipi_count_chunk--;
566 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
567 panic("Chunk count is negative");
569 sctppcbinfo.ipi_gencnt_chunk++;
572 SCTP_INP_WUNLOCK(stcb->sctp_ep);
576 if (stcb->sctp_socket->so_rcv.ssb_cc >=
577 stcb->sctp_socket->so_rcv.ssb_hiwat) {
579 sctp_sorwakeup(stcb->sctp_ep,
583 SCTP_INP_WUNLOCK(stcb->sctp_ep);
586 chk = TAILQ_FIRST(&asoc->reasmqueue);
589 sctp_sorwakeup(stcb->sctp_ep,
593 SCTP_INP_WUNLOCK(stcb->sctp_ep);
596 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
597 /* Can't deliver more :< */
599 sctp_sorwakeup(stcb->sctp_ep,
603 SCTP_INP_WUNLOCK(stcb->sctp_ep);
606 stream_no = chk->rec.data.stream_number;
607 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
608 if (nxt_todel != chk->rec.data.stream_seq &&
609 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
611 * Not the next sequence to deliver in its stream OR
615 sctp_sorwakeup(stcb->sctp_ep,
619 SCTP_INP_WUNLOCK(stcb->sctp_ep);
623 if ((chk->data->m_flags & M_PKTHDR) == 0) {
624 MGETHDR(m, MB_DONTWAIT, MT_DATA);
628 SCTP_INP_WUNLOCK(stcb->sctp_ep);
631 m->m_pkthdr.len = chk->send_size;
633 m->m_next = chk->data;
636 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
637 if (chk->data->m_next == NULL) {
638 /* hopefully we hit here most of the time */
639 chk->data->m_flags |= M_EOR;
641 /* Add the flag to the LAST mbuf in the chain */
643 while (m->m_next != NULL) {
649 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
650 struct sockaddr_in6 lsa6;
652 control = sctp_build_ctl(stcb, chk);
653 to = (struct sockaddr *)&chk->whoTo->ro._l_addr;
654 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
655 to->sa_family == AF_INET) {
656 struct sockaddr_in *sin;
657 sin = (struct sockaddr_in *)to;
658 bzero(&sin6, sizeof(sin6));
659 sin6.sin6_family = AF_INET6;
660 sin6.sin6_len = sizeof(struct sockaddr_in6);
661 sin6.sin6_addr.s6_addr16[2] = 0xffff;
662 bcopy(&sin->sin_addr,
663 &sin6.sin6_addr.s6_addr16[3],
664 sizeof(sin6.sin6_addr.s6_addr16[3]));
665 sin6.sin6_port = sin->sin_port;
666 to = (struct sockaddr *)&sin6;
668 /* check and strip embedded scope junk */
669 to = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)to,
671 if (((struct sockaddr_in *)to)->sin_port == 0) {
672 kprintf("Huh b, port is %d not net:%x %d?\n",
673 ((struct sockaddr_in *)to)->sin_port,
675 (int)(ntohs(stcb->rport)));
676 ((struct sockaddr_in *)to)->sin_port = stcb->rport;
678 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) <
679 (long)chk->send_size) {
681 sctp_m_freem(control);
682 stcb->asoc.my_rwnd_control_len -=
683 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
685 sctp_sorwakeup(stcb->sctp_ep,
688 SCTP_INP_WUNLOCK(stcb->sctp_ep);
691 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
692 if (!sctp_sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv,
693 to, chk->data, control, stcb->asoc.my_vtag,
695 /* Gak not enough room */
697 sctp_m_freem(control);
698 stcb->asoc.my_rwnd_control_len -=
699 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
701 sctp_sorwakeup(stcb->sctp_ep,
704 SCTP_INP_WUNLOCK(stcb->sctp_ep);
705 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
708 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
709 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
710 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
711 stcb->asoc.my_rwnd_control_len +=
715 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
719 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) >=
720 (long)chk->send_size) {
721 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
722 ssb_append(&stcb->sctp_socket->so_rcv, chk->data);
723 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
726 /* out of space in the sb */
727 sctp_sorwakeup(stcb->sctp_ep,
730 SCTP_INP_WUNLOCK(stcb->sctp_ep);
734 /* pull it we did it */
735 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
736 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
737 asoc->fragmented_delivery_inprogress = 0;
738 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
739 asoc->strmin[stream_no].last_sequence_delivered++;
742 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
743 asoc->size_on_reasm_queue -= chk->send_size;
744 asoc->cnt_on_reasm_queue--;
745 /* free up the chk */
746 sctp_free_remote_addr(chk->whoTo);
748 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
749 sctppcbinfo.ipi_count_chunk--;
750 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
751 panic("Chunk count is negative");
753 sctppcbinfo.ipi_gencnt_chunk++;
754 if (asoc->fragmented_delivery_inprogress == 0) {
756 * Now lets see if we can deliver the next one on the
760 struct sctp_stream_in *strm;
762 strm = &asoc->strmin[stream_no];
763 nxt_todel = strm->last_sequence_delivered + 1;
764 chk = TAILQ_FIRST(&strm->inqueue);
765 if (chk && (nxt_todel == chk->rec.data.stream_seq)) {
766 while (chk != NULL) {
769 chk->rec.data.stream_seq) {
770 at = TAILQ_NEXT(chk, sctp_next);
771 TAILQ_REMOVE(&strm->inqueue,
773 asoc->size_on_all_streams -=
775 asoc->cnt_on_all_streams--;
776 strm->last_sequence_delivered++;
778 * We ignore the return of
779 * deliver_data here since we
780 * always can hold the chunk on
781 * the d-queue. And we have a
782 * finite number that can be
783 * delivered from the strq.
785 sctp_deliver_data(stcb, asoc, chk, 1);
791 strm->last_sequence_delivered + 1;
794 if (!TAILQ_EMPTY(&asoc->delivery_queue)) {
795 /* Here if deliver_data fails, we must break */
796 if (sctp_deliver_data(stcb, asoc, NULL, 1) == 0)
799 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
801 SCTP_INP_WUNLOCK(stcb->sctp_ep);
804 chk = TAILQ_FIRST(&asoc->reasmqueue);
807 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
810 SCTP_INP_WUNLOCK(stcb->sctp_ep);
814 * Queue the chunk either right into the socket buffer if it is the next one
815 * to go OR put it in the correct place in the delivery queue. If we do
816 * append to the so_buf, keep doing so until we are out of order.
817 * One big question still remains, what to do when the socket buffer is FULL??
820 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
821 struct sctp_tmit_chunk *chk, int *abort_flag)
823 struct sctp_stream_in *strm;
824 struct sctp_tmit_chunk *at;
830 * Need to add code to deal with 16 bit seq wrap
831 * without a TSN wrap for ordered delivery (maybe).
835 asoc->size_on_all_streams += chk->send_size;
836 asoc->cnt_on_all_streams++;
837 strm = &asoc->strmin[chk->rec.data.stream_number];
838 nxt_todel = strm->last_sequence_delivered + 1;
839 #ifdef SCTP_STR_LOGGING
840 sctp_log_strm_del(chk, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
843 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
844 kprintf("queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
845 (u_int)chk->rec.data.stream_seq,
846 (u_int)strm->last_sequence_delivered, (u_int)nxt_todel);
849 if (compare_with_wrap(strm->last_sequence_delivered,
850 chk->rec.data.stream_seq, MAX_SEQ) ||
851 (strm->last_sequence_delivered == chk->rec.data.stream_seq)) {
852 /* The incoming sseq is behind where we last delivered? */
854 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
855 kprintf("Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
856 chk->rec.data.stream_seq,
857 strm->last_sequence_delivered);
861 * throw it in the stream so it gets cleaned up in
862 * association destruction
864 TAILQ_INSERT_HEAD(&strm->inqueue, chk, sctp_next);
865 MGET(oper, MB_DONTWAIT, MT_DATA);
867 struct sctp_paramhdr *ph;
870 oper->m_len = sizeof(struct sctp_paramhdr) +
872 ph = mtod(oper, struct sctp_paramhdr *);
873 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
874 ph->param_length = htons(oper->m_len);
875 ippp = (u_int32_t *)(ph + 1);
876 *ippp = htonl(0x00000001);
878 sctp_abort_an_association(stcb->sctp_ep, stcb,
879 SCTP_PEER_FAULTY, oper);
885 if (nxt_todel == chk->rec.data.stream_seq) {
886 /* can be delivered right away */
888 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
889 kprintf("It's NEXT!\n");
892 #ifdef SCTP_STR_LOGGING
893 sctp_log_strm_del(chk, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
896 asoc->size_on_all_streams -= chk->send_size;
897 asoc->cnt_on_all_streams--;
898 strm->last_sequence_delivered++;
899 sctp_deliver_data(stcb, asoc, chk, 0);
900 chk = TAILQ_FIRST(&strm->inqueue);
901 while (chk != NULL) {
903 nxt_todel = strm->last_sequence_delivered + 1;
904 if (nxt_todel == chk->rec.data.stream_seq) {
905 at = TAILQ_NEXT(chk, sctp_next);
906 TAILQ_REMOVE(&strm->inqueue, chk, sctp_next);
907 asoc->size_on_all_streams -= chk->send_size;
908 asoc->cnt_on_all_streams--;
909 strm->last_sequence_delivered++;
911 * We ignore the return of deliver_data here
912 * since we always can hold the chunk on the
913 * d-queue. And we have a finite number that
914 * can be delivered from the strq.
916 #ifdef SCTP_STR_LOGGING
917 sctp_log_strm_del(chk, NULL,
918 SCTP_STR_LOG_FROM_IMMED_DEL);
920 sctp_deliver_data(stcb, asoc, chk, 0);
929 * Ok, we did not deliver this guy, find
930 * the correct place to put it on the queue.
933 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
934 kprintf("Queue Needed!\n");
937 if (TAILQ_EMPTY(&strm->inqueue)) {
939 #ifdef SCTP_STR_LOGGING
940 sctp_log_strm_del(chk, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
942 TAILQ_INSERT_HEAD(&strm->inqueue, chk, sctp_next);
944 TAILQ_FOREACH(at, &strm->inqueue, sctp_next) {
945 if (compare_with_wrap(at->rec.data.stream_seq,
946 chk->rec.data.stream_seq, MAX_SEQ)) {
948 * one in queue is bigger than the new
949 * one, insert before this one
951 #ifdef SCTP_STR_LOGGING
952 sctp_log_strm_del(chk, at,
953 SCTP_STR_LOG_FROM_INSERT_MD);
955 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
957 } else if (at->rec.data.stream_seq ==
958 chk->rec.data.stream_seq) {
960 * Gak, He sent me a duplicate str seq
964 * foo bar, I guess I will just free
965 * this new guy, should we abort too?
966 * FIX ME MAYBE? Or it COULD be that
967 * the SSN's have wrapped. Maybe I
968 * should compare to TSN somehow...
969 * sigh for now just blow away the
974 sctp_m_freem(chk->data);
976 asoc->size_on_all_streams -= chk->send_size;
977 asoc->cnt_on_all_streams--;
978 sctp_pegs[SCTP_DUP_SSN_RCVD]++;
979 sctp_free_remote_addr(chk->whoTo);
980 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
981 sctppcbinfo.ipi_count_chunk--;
982 if ((int)sctppcbinfo.ipi_count_chunk <
984 panic("Chunk count is negative");
986 sctppcbinfo.ipi_gencnt_chunk++;
989 if (TAILQ_NEXT(at, sctp_next) == NULL) {
991 * We are at the end, insert it
994 #ifdef SCTP_STR_LOGGING
995 sctp_log_strm_del(chk, at,
996 SCTP_STR_LOG_FROM_INSERT_TL);
998 TAILQ_INSERT_AFTER(&strm->inqueue,
1006 /* We delivered some chunks, wake them up */
1009 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1010 kprintf("Doing WAKEUP!\n");
1013 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1018 * Returns two things: You get the total size of the deliverable parts of the
1019 * first fragmented message on the reassembly queue. And you get a 1 back if
1020 * all of the message is ready or a 0 back if the message is still incomplete
1023 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, int *t_size)
1025 struct sctp_tmit_chunk *chk;
1029 chk = TAILQ_FIRST(&asoc->reasmqueue);
1031 /* nothing on the queue */
1034 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1035 /* Not a first on the queue */
1038 tsn = chk->rec.data.TSN_seq;
1040 if (tsn != chk->rec.data.TSN_seq) {
1043 *t_size += chk->send_size;
1044 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1048 chk = TAILQ_NEXT(chk, sctp_next);
1054 * Dump onto the re-assembly queue, in its proper place. After dumping on
1055 * the queue, see if anthing can be delivered. If so pull it off (or as much
1056 * as we can. If we run out of space then we must dump what we can and set
1057 * the appropriate flag to say we queued what we could.
1060 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1061 struct sctp_tmit_chunk *chk, int *abort_flag)
1064 u_int16_t nxt_todel;
1065 u_int32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
1068 struct sctp_tmit_chunk *at, *prev, *next;
1071 cum_ackp1 = asoc->tsn_last_delivered + 1;
1073 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
1074 /* This is the first one on the queue */
1075 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
1077 * we do not check for delivery of anything when
1078 * only one fragment is here
1080 asoc->size_on_reasm_queue = chk->send_size;
1081 asoc->cnt_on_reasm_queue++;
1082 if (chk->rec.data.TSN_seq == cum_ackp1) {
1083 if (asoc->fragmented_delivery_inprogress == 0 &&
1084 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
1085 SCTP_DATA_FIRST_FRAG) {
1087 * An empty queue, no delivery inprogress, we
1088 * hit the next one and it does NOT have a
1089 * FIRST fragment mark.
1092 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1093 kprintf("Gak, Evil plot, its not first, no fragmented delivery in progress\n");
1096 MGET(oper, MB_DONTWAIT, MT_DATA);
1098 struct sctp_paramhdr *ph;
1102 sizeof(struct sctp_paramhdr) +
1104 ph = mtod(oper, struct sctp_paramhdr *);
1106 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1107 ph->param_length = htons(oper->m_len);
1108 ippp = (u_int32_t *)(ph + 1);
1109 *ippp = htonl(0x10000001);
1111 sctp_abort_an_association(stcb->sctp_ep, stcb,
1112 SCTP_PEER_FAULTY, oper);
1114 } else if (asoc->fragmented_delivery_inprogress &&
1115 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1117 * We are doing a partial delivery and the NEXT
1118 * chunk MUST be either the LAST or MIDDLE
1119 * fragment NOT a FIRST
1122 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1123 kprintf("Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
1126 MGET(oper, MB_DONTWAIT, MT_DATA);
1128 struct sctp_paramhdr *ph;
1132 sizeof(struct sctp_paramhdr) +
1134 ph = mtod(oper, struct sctp_paramhdr *);
1136 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1137 ph->param_length = htons(oper->m_len);
1138 ippp = (u_int32_t *)(ph + 1);
1139 *ippp = htonl(0x10000002);
1141 sctp_abort_an_association(stcb->sctp_ep, stcb,
1142 SCTP_PEER_FAULTY, oper);
1144 } else if (asoc->fragmented_delivery_inprogress) {
1145 /* Here we are ok with a MIDDLE or LAST piece */
1146 if (chk->rec.data.stream_number !=
1147 asoc->str_of_pdapi) {
1148 /* Got to be the right STR No */
1150 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1151 kprintf("Gak, Evil plot, it IS not same stream number %d vs %d\n",
1152 chk->rec.data.stream_number,
1153 asoc->str_of_pdapi);
1156 MGET(oper, MB_DONTWAIT, MT_DATA);
1158 struct sctp_paramhdr *ph;
1161 sizeof(struct sctp_paramhdr) +
1164 struct sctp_paramhdr *);
1166 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1169 ippp = (u_int32_t *)(ph + 1);
1170 *ippp = htonl(0x10000003);
1172 sctp_abort_an_association(stcb->sctp_ep,
1173 stcb, SCTP_PEER_FAULTY, oper);
1175 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
1176 SCTP_DATA_UNORDERED &&
1177 chk->rec.data.stream_seq !=
1178 asoc->ssn_of_pdapi) {
1179 /* Got to be the right STR Seq */
1181 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1182 kprintf("Gak, Evil plot, it IS not same stream seq %d vs %d\n",
1183 chk->rec.data.stream_seq,
1184 asoc->ssn_of_pdapi);
1187 MGET(oper, MB_DONTWAIT, MT_DATA);
1189 struct sctp_paramhdr *ph;
1192 sizeof(struct sctp_paramhdr) +
1195 struct sctp_paramhdr *);
1197 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1200 ippp = (u_int32_t *)(ph + 1);
1201 *ippp = htonl(0x10000004);
1203 sctp_abort_an_association(stcb->sctp_ep,
1204 stcb, SCTP_PEER_FAULTY, oper);
1211 /* Find its place */
1212 at = TAILQ_FIRST(&asoc->reasmqueue);
1214 /* Grab the top flags */
1215 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1216 if (compare_with_wrap(at->rec.data.TSN_seq,
1217 chk->rec.data.TSN_seq, MAX_TSN)) {
1219 * one in queue is bigger than the new one, insert
1223 asoc->size_on_reasm_queue += chk->send_size;
1224 asoc->cnt_on_reasm_queue++;
1226 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1228 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1229 /* Gak, He sent me a duplicate str seq number */
1231 * foo bar, I guess I will just free this new guy,
1232 * should we abort too? FIX ME MAYBE? Or it COULD be
1233 * that the SSN's have wrapped. Maybe I should compare
1234 * to TSN somehow... sigh for now just blow away the
1238 sctp_m_freem(chk->data);
1240 sctp_free_remote_addr(chk->whoTo);
1241 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
1242 sctppcbinfo.ipi_count_chunk--;
1243 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
1244 panic("Chunk count is negative");
1246 sctppcbinfo.ipi_gencnt_chunk++;
1249 last_flags = at->rec.data.rcv_flags;
1250 last_tsn = at->rec.data.TSN_seq;
1252 if (TAILQ_NEXT(at, sctp_next) == NULL) {
1254 * We are at the end, insert it after this one
1256 /* check it first */
1257 asoc->size_on_reasm_queue += chk->send_size;
1258 asoc->cnt_on_reasm_queue++;
1259 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1264 /* Now the audits */
1266 prev_tsn = chk->rec.data.TSN_seq - 1;
1267 if (prev_tsn == prev->rec.data.TSN_seq) {
1269 * Ok the one I am dropping onto the end
1270 * is the NEXT. A bit of valdiation here.
1272 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1273 SCTP_DATA_FIRST_FRAG ||
1274 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1275 SCTP_DATA_MIDDLE_FRAG) {
1277 * Insert chk MUST be a MIDDLE or LAST fragment
1279 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1280 SCTP_DATA_FIRST_FRAG) {
1282 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1283 kprintf("Prev check - It can be a midlle or last but not a first\n");
1284 kprintf("Gak, Evil plot, it's a FIRST!\n");
1287 MGET(oper, MB_DONTWAIT, MT_DATA);
1289 struct sctp_paramhdr *ph;
1293 sizeof(struct sctp_paramhdr) +
1296 struct sctp_paramhdr *);
1298 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1302 ippp = (u_int32_t *)(ph + 1);
1303 *ippp = htonl(0x10000005);
1305 sctp_abort_an_association(stcb->sctp_ep,
1306 stcb, SCTP_PEER_FAULTY, oper);
1310 if (chk->rec.data.stream_number !=
1311 prev->rec.data.stream_number) {
1313 * Huh, need the correct STR here, they
1317 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1318 kprintf("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1319 chk->rec.data.stream_number,
1320 prev->rec.data.stream_number);
1323 MGET(oper, MB_DONTWAIT, MT_DATA);
1325 struct sctp_paramhdr *ph;
1329 sizeof(struct sctp_paramhdr) +
1332 struct sctp_paramhdr *);
1334 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1337 ippp = (u_int32_t *)(ph + 1);
1338 *ippp = htonl(0x10000006);
1341 sctp_abort_an_association(stcb->sctp_ep,
1342 stcb, SCTP_PEER_FAULTY, oper);
1347 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1348 chk->rec.data.stream_seq !=
1349 prev->rec.data.stream_seq) {
1351 * Huh, need the correct STR here, they
1355 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1356 kprintf("Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1357 chk->rec.data.stream_seq,
1358 prev->rec.data.stream_seq);
1361 MGET(oper, MB_DONTWAIT, MT_DATA);
1363 struct sctp_paramhdr *ph;
1367 sizeof(struct sctp_paramhdr) +
1370 struct sctp_paramhdr *);
1372 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1375 ippp = (u_int32_t *)(ph + 1);
1376 *ippp = htonl(0x10000007);
1379 sctp_abort_an_association(stcb->sctp_ep,
1380 stcb, SCTP_PEER_FAULTY, oper);
1385 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1386 SCTP_DATA_LAST_FRAG) {
1387 /* Insert chk MUST be a FIRST */
1388 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1389 SCTP_DATA_FIRST_FRAG) {
1391 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1392 kprintf("Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1395 MGET(oper, MB_DONTWAIT, MT_DATA);
1397 struct sctp_paramhdr *ph;
1401 sizeof(struct sctp_paramhdr) +
1404 struct sctp_paramhdr *);
1406 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1409 ippp = (u_int32_t *)(ph + 1);
1410 *ippp = htonl(0x10000008);
1413 sctp_abort_an_association(stcb->sctp_ep,
1414 stcb, SCTP_PEER_FAULTY, oper);
1424 post_tsn = chk->rec.data.TSN_seq + 1;
1425 if (post_tsn == next->rec.data.TSN_seq) {
1427 * Ok the one I am inserting ahead of
1428 * is my NEXT one. A bit of valdiation here.
1430 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1431 /* Insert chk MUST be a last fragment */
1432 if ((chk->rec.data.rcv_flags&SCTP_DATA_FRAG_MASK)
1433 != SCTP_DATA_LAST_FRAG) {
1435 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1436 kprintf("Next chk - Next is FIRST, we must be LAST\n");
1437 kprintf("Gak, Evil plot, its not a last!\n");
1440 MGET(oper, MB_DONTWAIT, MT_DATA);
1442 struct sctp_paramhdr *ph;
1446 sizeof(struct sctp_paramhdr) +
1449 struct sctp_paramhdr *);
1451 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1454 ippp = (u_int32_t *)(ph + 1);
1455 *ippp = htonl(0x10000009);
1458 sctp_abort_an_association(stcb->sctp_ep,
1459 stcb, SCTP_PEER_FAULTY, oper);
1464 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1465 SCTP_DATA_MIDDLE_FRAG ||
1466 (next->rec.data.rcv_flags&SCTP_DATA_FRAG_MASK) ==
1467 SCTP_DATA_LAST_FRAG) {
1468 /* Insert chk CAN be MIDDLE or FIRST NOT LAST */
1469 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1470 SCTP_DATA_LAST_FRAG) {
1472 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1473 kprintf("Next chk - Next is a MIDDLE/LAST\n");
1474 kprintf("Gak, Evil plot, new prev chunk is a LAST\n");
1477 MGET(oper, MB_DONTWAIT, MT_DATA);
1479 struct sctp_paramhdr *ph;
1483 sizeof(struct sctp_paramhdr) +
1486 struct sctp_paramhdr *);
1488 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1491 ippp = (u_int32_t *)(ph + 1);
1492 *ippp = htonl(0x1000000a);
1494 sctp_abort_an_association(stcb->sctp_ep,
1495 stcb, SCTP_PEER_FAULTY, oper);
1500 if (chk->rec.data.stream_number !=
1501 next->rec.data.stream_number) {
1503 * Huh, need the correct STR here, they
1507 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1508 kprintf("Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1509 chk->rec.data.stream_number,
1510 next->rec.data.stream_number);
1513 MGET(oper, MB_DONTWAIT, MT_DATA);
1515 struct sctp_paramhdr *ph;
1519 sizeof(struct sctp_paramhdr) +
1522 struct sctp_paramhdr *);
1524 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1527 ippp = (u_int32_t *)(ph + 1);
1528 *ippp = htonl(0x1000000b);
1531 sctp_abort_an_association(stcb->sctp_ep,
1532 stcb, SCTP_PEER_FAULTY, oper);
1537 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1538 chk->rec.data.stream_seq !=
1539 next->rec.data.stream_seq) {
1541 * Huh, need the correct STR here, they
1545 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1546 kprintf("Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1547 chk->rec.data.stream_seq,
1548 next->rec.data.stream_seq);
1551 MGET(oper, MB_DONTWAIT, MT_DATA);
1553 struct sctp_paramhdr *ph;
1557 sizeof(struct sctp_paramhdr) +
1560 struct sctp_paramhdr *);
1562 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1565 ippp = (u_int32_t *)(ph + 1);
1566 *ippp = htonl(0x1000000c);
1569 sctp_abort_an_association(stcb->sctp_ep,
1570 stcb, SCTP_PEER_FAULTY, oper);
1580 * now that we have all in there place we must check a number of
1581 * things to see if we can send data to the ULP.
1583 /* we need to do some delivery, if we can */
1584 chk = TAILQ_FIRST(&asoc->reasmqueue);
1587 asoc->size_on_reasm_queue = 0;
1588 asoc->cnt_on_reasm_queue = 0;
1591 if (asoc->fragmented_delivery_inprogress == 0) {
1593 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
1594 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
1595 (nxt_todel == chk->rec.data.stream_seq ||
1596 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
1598 * Yep the first one is here and its
1599 * ok to deliver but should we?
1601 if (TAILQ_EMPTY(&asoc->delivery_queue) &&
1602 (sctp_is_all_msg_on_reasm(asoc, &tsize) ||
1603 (asoc->size_on_reasm_queue >=
1604 (stcb->sctp_socket->so_rcv.ssb_hiwat >> 2) &&
1608 * start reception, by backing down the TSN
1609 * just in case we can't deliver. If we
1611 asoc->fragmented_delivery_inprogress = 1;
1612 asoc->tsn_last_delivered =
1613 chk->rec.data.TSN_seq - 1;
1614 asoc->str_of_pdapi =
1615 chk->rec.data.stream_number;
1616 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
1617 asoc->fragment_flags = chk->rec.data.rcv_flags;
1618 sctp_service_reassembly(stcb, asoc, 0);
1622 sctp_service_reassembly(stcb, asoc, 0);
1627 * This is an unfortunate routine. It checks to make sure a evil guy is not
1628 * stuffing us full of bad packet fragments. A broken peer could also do this
1629 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1633 sctp_does_chk_belong_to_reasm(struct sctp_association *asoc,
1634 struct sctp_tmit_chunk *chk)
1636 struct sctp_tmit_chunk *at;
1639 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1640 if (compare_with_wrap(chk->rec.data.TSN_seq,
1641 at->rec.data.TSN_seq, MAX_TSN)) {
1642 /* is it one bigger? */
1643 tsn_est = at->rec.data.TSN_seq + 1;
1644 if (tsn_est == chk->rec.data.TSN_seq) {
1645 /* yep. It better be a last then*/
1646 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1647 SCTP_DATA_LAST_FRAG) {
1649 * Ok this guy belongs next to a guy
1650 * that is NOT last, it should be a
1651 * middle/last, not a complete chunk.
1656 * This guy is ok since its a LAST and
1657 * the new chunk is a fully self-
1663 } else if (chk->rec.data.TSN_seq == at->rec.data.TSN_seq) {
1664 /* Software error since I have a dup? */
1668 * Ok, 'at' is larger than new chunk but does it
1669 * need to be right before it.
1671 tsn_est = chk->rec.data.TSN_seq + 1;
1672 if (tsn_est == at->rec.data.TSN_seq) {
1673 /* Yep, It better be a first */
1674 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1675 SCTP_DATA_FIRST_FRAG) {
1686 extern unsigned int sctp_max_chunks_on_queue;
1688 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1689 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1690 struct sctp_nets *net, u_int32_t *high_tsn, int *abort_flag,
1691 int *break_flag, int last_chunk)
1693 /* Process a data chunk */
1694 /* struct sctp_tmit_chunk *chk;*/
1695 struct sctp_tmit_chunk *chk;
1699 u_int16_t strmno, strmseq;
1703 tsn = ntohl(ch->dp.tsn);
1704 #ifdef SCTP_MAP_LOGGING
1705 sctp_log_map(0, tsn, asoc->cumulative_tsn, SCTP_MAP_PREPARE_SLIDE);
1707 if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1708 asoc->cumulative_tsn == tsn) {
1709 /* It is a duplicate */
1710 sctp_pegs[SCTP_DUPTSN_RECVD]++;
1711 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1712 /* Record a dup for the next outbound sack */
1713 asoc->dup_tsns[asoc->numduptsns] = tsn;
1718 /* Calculate the number of TSN's between the base and this TSN */
1719 if (tsn >= asoc->mapping_array_base_tsn) {
1720 gap = tsn - asoc->mapping_array_base_tsn;
1722 gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1;
1724 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1725 /* Can't hold the bit in the mapping at max array, toss it */
1728 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1729 if (sctp_expand_mapping_array(asoc)) {
1730 /* Can't expand, drop it */
1734 if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1737 /* See if we have received this one already */
1738 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
1739 sctp_pegs[SCTP_DUPTSN_RECVD]++;
1740 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1741 /* Record a dup for the next outbound sack */
1742 asoc->dup_tsns[asoc->numduptsns] = tsn;
1745 if (!callout_pending(&asoc->dack_timer.timer)) {
1747 * By starting the timer we assure that we
1748 * WILL sack at the end of the packet
1749 * when sctp_sack_check gets called.
1751 sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep,
1757 * Check to see about the GONE flag, duplicates would cause
1758 * a sack to be sent up above
1760 if (stcb && (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
1762 * wait a minute, this guy is gone, there is no
1763 * longer a receiver. Send peer an ABORT!
1765 struct mbuf *op_err;
1766 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1767 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err);
1772 * Now before going further we see if there is room. If NOT then
1773 * we MAY let one through only IF this TSN is the one we are
1774 * waiting for on a partial delivery API.
1777 /* now do the tests */
1778 if (((asoc->cnt_on_all_streams +
1779 asoc->cnt_on_delivery_queue +
1780 asoc->cnt_on_reasm_queue +
1781 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) ||
1782 (((int)asoc->my_rwnd) <= 0)) {
1784 * When we have NO room in the rwnd we check
1785 * to make sure the reader is doing its job...
1787 if (stcb->sctp_socket->so_rcv.ssb_cc) {
1788 /* some to read, wake-up */
1789 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1791 /* now is it in the mapping array of what we have accepted? */
1792 if (compare_with_wrap(tsn,
1793 asoc->highest_tsn_inside_map, MAX_TSN)) {
1795 /* Nope not in the valid range dump it */
1797 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1798 kprintf("My rwnd overrun1:tsn:%lx rwnd %lu sbspace:%ld delq:%d!\n",
1799 (u_long)tsn, (u_long)asoc->my_rwnd,
1800 sctp_sbspace(&stcb->sctp_socket->so_rcv),
1801 stcb->asoc.cnt_on_delivery_queue);
1804 sctp_set_rwnd(stcb, asoc);
1805 if ((asoc->cnt_on_all_streams +
1806 asoc->cnt_on_delivery_queue +
1807 asoc->cnt_on_reasm_queue +
1808 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) {
1809 sctp_pegs[SCTP_MSGC_DROP]++;
1811 sctp_pegs[SCTP_RWND_DROPS]++;
1818 strmno = ntohs(ch->dp.stream_id);
1819 if (strmno >= asoc->streamincnt) {
1820 struct sctp_paramhdr *phdr;
1823 MGETHDR(mb, MB_DONTWAIT, MT_DATA);
1825 /* add some space up front so prepend will work well */
1826 mb->m_data += sizeof(struct sctp_chunkhdr);
1827 phdr = mtod(mb, struct sctp_paramhdr *);
1829 * Error causes are just param's and this one has
1830 * two back to back phdr, one with the error type
1831 * and size, the other with the streamid and a rsvd
1833 mb->m_pkthdr.len = mb->m_len =
1834 (sizeof(struct sctp_paramhdr) * 2);
1835 phdr->param_type = htons(SCTP_CAUSE_INV_STRM);
1836 phdr->param_length =
1837 htons(sizeof(struct sctp_paramhdr) * 2);
1839 /* We insert the stream in the type field */
1840 phdr->param_type = ch->dp.stream_id;
1841 /* And set the length to 0 for the rsvd field */
1842 phdr->param_length = 0;
1843 sctp_queue_op_err(stcb, mb);
1845 sctp_pegs[SCTP_BAD_STRMNO]++;
1849 * Before we continue lets validate that we are not
1850 * being fooled by an evil attacker. We can only
1851 * have 4k chunks based on our TSN spread allowed
1852 * by the mapping array 512 * 8 bits, so there is
1853 * no way our stream sequence numbers could have wrapped.
1854 * We of course only validate the FIRST fragment so the
1857 strmseq = ntohs(ch->dp.stream_sequence);
1858 if ((ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1859 (ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1860 (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1861 strmseq, MAX_SEQ) ||
1862 asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1863 /* The incoming sseq is behind where we last delivered? */
1865 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1866 kprintf("EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1868 asoc->strmin[strmno].last_sequence_delivered);
1872 * throw it in the stream so it gets cleaned up in
1873 * association destruction
1875 MGET(oper, MB_DONTWAIT, MT_DATA);
1877 struct sctp_paramhdr *ph;
1880 oper->m_len = sizeof(struct sctp_paramhdr) +
1882 ph = mtod(oper, struct sctp_paramhdr *);
1883 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1884 ph->param_length = htons(oper->m_len);
1885 ippp = (u_int32_t *)(ph + 1);
1886 *ippp = htonl(0x20000001);
1888 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY,
1890 sctp_pegs[SCTP_BAD_SSN_WRAP]++;
1895 the_len = (chk_length-sizeof(struct sctp_data_chunk));
1896 if (last_chunk == 0) {
1897 dmbuf = sctp_m_copym(*m,
1898 (offset + sizeof(struct sctp_data_chunk)),
1899 the_len, MB_DONTWAIT);
1901 /* We can steal the last chunk */
1903 /* lop off the top part */
1904 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1905 if (dmbuf->m_pkthdr.len > the_len) {
1906 /* Trim the end round bytes off too */
1907 m_adj(dmbuf, -(dmbuf->m_pkthdr.len-the_len));
1909 sctp_pegs[SCTP_NO_COPY_IN]++;
1911 if (dmbuf == NULL) {
1912 sctp_pegs[SCTP_DROP_NOMEMORY]++;
1915 if ((ch->ch.chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1916 asoc->fragmented_delivery_inprogress == 0 &&
1917 TAILQ_EMPTY(&asoc->delivery_queue) &&
1918 ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) ||
1919 ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1920 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue))) &&
1921 ((long)(stcb->sctp_socket->so_rcv.ssb_hiwat -
1922 stcb->sctp_socket->so_rcv.ssb_cc) >= (long)the_len)) {
1923 /* Candidate for express delivery */
1925 * Its not fragmented,
1927 * Nothing in the delivery queue,
1928 * Its un-ordered OR ordered and the next to deliver AND
1929 * nothing else is stuck on the stream queue,
1930 * And there is room for it in the socket buffer.
1931 * Lets just stuff it up the buffer....
1934 struct mbuf *control, *mmm;
1935 struct sockaddr_in6 sin6;
1936 struct sockaddr_in6 lsa6;
1937 struct sockaddr *to;
1939 /* It would be nice to avoid this copy if we could :< */
1940 control = sctp_build_ctl_nchunk(stcb, tsn,
1941 ch->dp.protocol_id, 0, strmno, strmseq,
1942 ch->ch.chunk_flags);
1943 /* XXX need to append PKTHDR to the socket buffer first */
1945 if ((dmbuf->m_flags & M_PKTHDR) == 0) {
1947 MGETHDR(tmp, MB_DONTWAIT, MT_DATA);
1952 sctp_m_freem(control);
1953 stcb->asoc.my_rwnd_control_len -=
1954 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
1957 goto failed_express_del;
1959 tmp->m_pkthdr.len = the_len;
1961 tmp->m_next = dmbuf;
1964 to = (struct sockaddr *)&net->ro._l_addr;
1965 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
1966 to->sa_family == AF_INET) {
1967 struct sockaddr_in *sin;
1969 sin = (struct sockaddr_in *)to;
1970 bzero(&sin6, sizeof(sin6));
1971 sin6.sin6_family = AF_INET6;
1972 sin6.sin6_len = sizeof(struct sockaddr_in6);
1973 sin6.sin6_addr.s6_addr16[2] = 0xffff;
1974 bcopy(&sin->sin_addr,
1975 &sin6.sin6_addr.s6_addr16[3],
1976 sizeof(sin6.sin6_addr.s6_addr16[3]));
1977 sin6.sin6_port = sin->sin_port;
1978 to = (struct sockaddr *)&sin6;
1981 /* check and strip embedded scope junk */
1982 to = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)to,
1984 if (((struct sockaddr_in *)to)->sin_port == 0) {
1985 kprintf("Huh c, port is %d not net:%x %d?\n",
1986 ((struct sockaddr_in *)to)->sin_port,
1988 (int)(ntohs(stcb->rport)));
1989 ((struct sockaddr_in *)to)->sin_port = stcb->rport;
1994 while (mmm->m_next != NULL) {
1997 mmm->m_flags |= M_EOR;
1998 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1999 /* we have a new high score */
2000 asoc->highest_tsn_inside_map = tsn;
2001 #ifdef SCTP_MAP_LOGGING
2002 sctp_log_map(0, 1, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2005 SCTP_TCB_UNLOCK(stcb);
2006 SCTP_INP_WLOCK(stcb->sctp_ep);
2007 SCTP_TCB_LOCK(stcb);
2008 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
2009 if (!sctp_sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to, dmbuf,
2010 control, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2012 sctp_m_freem(control);
2013 stcb->asoc.my_rwnd_control_len -=
2014 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
2016 sctp_m_freem(dmbuf);
2017 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2018 goto failed_express_del;
2020 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2021 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
2022 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2023 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2026 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2028 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2029 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2030 if ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0) {
2032 /* for ordered, bump what we delivered */
2033 asoc->strmin[strmno].last_sequence_delivered++;
2035 sctp_pegs[SCTP_EXPRESS_ROUTE]++;
2036 #ifdef SCTP_STR_LOGGING
2037 sctp_log_strm_del_alt(tsn, strmseq,
2038 SCTP_STR_LOG_FROM_EXPRS_DEL);
2041 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
2042 kprintf("Express Delivery succeeds\n");
2045 goto finish_express_del;
2049 /* If we reach here this is a new chunk */
2050 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
2052 /* No memory so we drop the chunk */
2053 sctp_pegs[SCTP_DROP_NOMEMORY]++;
2054 if (last_chunk == 0) {
2055 /* we copied it, free the copy */
2056 sctp_m_freem(dmbuf);
2060 sctppcbinfo.ipi_count_chunk++;
2061 sctppcbinfo.ipi_gencnt_chunk++;
2062 chk->rec.data.TSN_seq = tsn;
2063 chk->rec.data.stream_seq = strmseq;
2064 chk->rec.data.stream_number = strmno;
2065 chk->rec.data.payloadtype = ch->dp.protocol_id;
2066 chk->rec.data.context = 0;
2067 chk->rec.data.doing_fast_retransmit = 0;
2068 chk->rec.data.rcv_flags = ch->ch.chunk_flags;
2070 chk->send_size = the_len;
2076 /* Mark it as received */
2077 /* Now queue it where it belongs */
2078 if ((chk->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
2079 SCTP_DATA_NOT_FRAG) {
2080 /* First a sanity check */
2081 if (asoc->fragmented_delivery_inprogress) {
2083 * Ok, we have a fragmented delivery in progress
2084 * if this chunk is next to deliver OR belongs in
2085 * our view to the reassembly, the peer is evil
2088 u_int32_t estimate_tsn;
2089 estimate_tsn = asoc->tsn_last_delivered + 1;
2090 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
2091 (estimate_tsn == chk->rec.data.TSN_seq)) {
2092 /* Evil/Broke peer */
2093 MGET(oper, MB_DONTWAIT, MT_DATA);
2095 struct sctp_paramhdr *ph;
2099 sizeof(struct sctp_paramhdr) +
2101 ph = mtod(oper, struct sctp_paramhdr *);
2103 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2104 ph->param_length = htons(oper->m_len);
2105 ippp = (u_int32_t *)(ph + 1);
2106 *ippp = htonl(0x20000002);
2108 sctp_abort_an_association(stcb->sctp_ep, stcb,
2109 SCTP_PEER_FAULTY, oper);
2112 sctp_pegs[SCTP_DROP_FRAG]++;
2115 if (sctp_does_chk_belong_to_reasm(asoc, chk)) {
2116 MGET(oper, MB_DONTWAIT, MT_DATA);
2118 struct sctp_paramhdr *ph;
2122 sizeof(struct sctp_paramhdr) +
2125 struct sctp_paramhdr *);
2127 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2130 ippp = (u_int32_t *)(ph + 1);
2131 *ippp = htonl(0x20000003);
2133 sctp_abort_an_association(stcb->sctp_ep,
2134 stcb, SCTP_PEER_FAULTY, oper);
2137 sctp_pegs[SCTP_DROP_FRAG]++;
2142 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
2144 * Reassembly queue is NOT empty
2145 * validate that this chk does not need to
2146 * be in reasembly queue. If it does then
2147 * our peer is broken or evil.
2149 if (sctp_does_chk_belong_to_reasm(asoc, chk)) {
2150 MGET(oper, MB_DONTWAIT, MT_DATA);
2152 struct sctp_paramhdr *ph;
2156 sizeof(struct sctp_paramhdr) +
2159 struct sctp_paramhdr *);
2161 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2164 ippp = (u_int32_t *)(ph + 1);
2165 *ippp = htonl(0x20000004);
2167 sctp_abort_an_association(stcb->sctp_ep,
2168 stcb, SCTP_PEER_FAULTY, oper);
2171 sctp_pegs[SCTP_DROP_FRAG]++;
2176 if (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
2177 /* queue directly into socket buffer */
2178 sctp_deliver_data(stcb, asoc, chk, 0);
2179 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2181 /* Special check for when streams are resetting.
2182 * We could be more smart about this and check the
2183 * actual stream to see if it is not being reset.. that
2184 * way we would not create a HOLB when amongst streams
2185 * being reset and those not being reset.
2187 * We take complete messages that have a stream reset
2188 * intervening (aka the TSN is after where our cum-ack needs
2189 * to be) off and put them on a pending_reply_queue. The
2190 * reassembly ones we do not have to worry about since
2191 * they are all sorted and proceessed by TSN order. It
2192 * is only the singletons I must worry about.
2194 if ((asoc->pending_reply) &&
2195 ((compare_with_wrap(tsn, ntohl(asoc->pending_reply->reset_at_tsn), MAX_TSN)) ||
2196 (tsn == ntohl(asoc->pending_reply->reset_at_tsn)))
2198 /* yep its past where we need to reset... go ahead and
2201 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue , chk, sctp_next);
2203 sctp_queue_data_to_stream(stcb, asoc, chk, abort_flag);
2207 /* Into the re-assembly queue */
2208 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2210 sctp_pegs[SCTP_DROP_FRAG]++;
2214 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
2215 /* we have a new high score */
2216 asoc->highest_tsn_inside_map = tsn;
2217 #ifdef SCTP_MAP_LOGGING
2218 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2225 sctp_pegs[SCTP_PEG_TSNS_RCVD]++;
2226 /* Set it present please */
2227 #ifdef SCTP_STR_LOGGING
2228 sctp_log_strm_del_alt(tsn, strmseq, SCTP_STR_LOG_FROM_MARK_TSN);
2230 #ifdef SCTP_MAP_LOGGING
2231 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2232 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2234 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2239 sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag)
2242 * Now we also need to check the mapping array in a couple of ways.
2243 * 1) Did we move the cum-ack point?
2245 struct sctp_association *asoc;
2247 int m_size, all_ones;
2248 int slide_from, slide_end, lgap, distance;
2249 #ifdef SCTP_MAP_LOGGING
2250 uint32_t old_cumack, old_base, old_highest;
2251 unsigned char aux_array[64];
2257 #ifdef SCTP_MAP_LOGGING
2258 old_cumack = asoc->cumulative_tsn;
2259 old_base = asoc->mapping_array_base_tsn;
2260 old_highest = asoc->highest_tsn_inside_map;
2261 if (asoc->mapping_array_size < 64)
2262 memcpy(aux_array, asoc->mapping_array,
2263 asoc->mapping_array_size);
2265 memcpy(aux_array, asoc->mapping_array, 64);
2269 * We could probably improve this a small bit by calculating the
2270 * offset of the current cum-ack as the starting point.
2273 m_size = stcb->asoc.mapping_array_size << 3;
2274 for (i = 0; i < m_size; i++) {
2275 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i)) {
2277 * Ok we found the first place that we are
2282 asoc->cumulative_tsn = asoc->mapping_array_base_tsn +
2287 if (compare_with_wrap(asoc->cumulative_tsn,
2288 asoc->highest_tsn_inside_map,
2290 panic("huh, cumack greater than high-tsn in map");
2293 (asoc->cumulative_tsn == asoc->highest_tsn_inside_map && at >= 8)) {
2294 /* The complete array was completed by a single FR */
2295 /* higest becomes the cum-ack */
2297 asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
2298 /* clear the array */
2300 clr = asoc->mapping_array_size;
2302 clr = (at >> 3) + 1;
2304 * this should be the allones case
2305 * but just in case :>
2307 if (clr > asoc->mapping_array_size)
2308 clr = asoc->mapping_array_size;
2310 memset(asoc->mapping_array, 0, clr);
2311 /* base becomes one ahead of the cum-ack */
2312 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2313 #ifdef SCTP_MAP_LOGGING
2314 sctp_log_map(old_base, old_cumack, old_highest,
2315 SCTP_MAP_PREPARE_SLIDE);
2316 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2317 asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED);
2319 } else if (at >= 8) {
2320 /* we can slide the mapping array down */
2321 /* Calculate the new byte postion we can move down */
2322 slide_from = at >> 3;
2323 /* now calculate the ceiling of the move using our highest TSN value */
2324 if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
2325 lgap = asoc->highest_tsn_inside_map -
2326 asoc->mapping_array_base_tsn;
2328 lgap = (MAX_TSN - asoc->mapping_array_base_tsn) +
2329 asoc->highest_tsn_inside_map + 1;
2331 slide_end = lgap >> 3;
2332 if (slide_end < slide_from) {
2333 panic("impossible slide");
2335 distance = (slide_end-slide_from) + 1;
2336 #ifdef SCTP_MAP_LOGGING
2337 sctp_log_map(old_base, old_cumack, old_highest,
2338 SCTP_MAP_PREPARE_SLIDE);
2339 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2340 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2342 if (distance + slide_from > asoc->mapping_array_size ||
2345 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
2346 kprintf("Ugh bad addition.. you can't hrumpp!\n");
2350 * Here we do NOT slide forward the array so that
2351 * hopefully when more data comes in to fill it up
2352 * we will be able to slide it forward. Really
2353 * I don't think this should happen :-0
2356 #ifdef SCTP_MAP_LOGGING
2357 sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2358 (uint32_t)asoc->mapping_array_size,
2359 SCTP_MAP_SLIDE_NONE);
2363 for (ii = 0; ii < distance; ii++) {
2364 asoc->mapping_array[ii] =
2365 asoc->mapping_array[slide_from + ii];
2367 for (ii = distance;ii <= slide_end; ii++) {
2368 asoc->mapping_array[ii] = 0;
2370 asoc->mapping_array_base_tsn += (slide_from << 3);
2371 #ifdef SCTP_MAP_LOGGING
2372 sctp_log_map(asoc->mapping_array_base_tsn,
2373 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2374 SCTP_MAP_SLIDE_RESULT);
2379 /* check the special flag for stream resets */
2380 if ((asoc->pending_reply) &&
2381 ((compare_with_wrap((asoc->cumulative_tsn+1), ntohl(asoc->pending_reply->reset_at_tsn), MAX_TSN)) ||
2382 ((asoc->cumulative_tsn+1) == ntohl(asoc->pending_reply->reset_at_tsn)))
2384 /* we have finished working through the backlogged TSN's now
2385 * time to reset streams.
2386 * 1: call reset function.
2387 * 2: free pending_reply space
2388 * 3: distribute any chunks in pending_reply_queue.
2390 struct sctp_tmit_chunk *chk;
2391 sctp_handle_stream_reset_response(stcb, asoc->pending_reply);
2392 FREE(asoc->pending_reply, M_PCB);
2393 asoc->pending_reply = NULL;
2394 chk = TAILQ_FIRST(&asoc->pending_reply_queue);
2396 TAILQ_REMOVE(&asoc->pending_reply_queue, chk, sctp_next);
2397 sctp_queue_data_to_stream(stcb, asoc, chk, abort_flag);
2401 chk = TAILQ_FIRST(&asoc->pending_reply_queue);
2405 * Now we need to see if we need to queue a sack or just start
2406 * the timer (if allowed).
2409 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2411 * Ok special case, in SHUTDOWN-SENT case.
2412 * here we maker sure SACK timer is off and
2413 * instead send a SHUTDOWN and a SACK
2415 if (callout_pending(&stcb->asoc.dack_timer.timer)) {
2416 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2417 stcb->sctp_ep, stcb, NULL);
2420 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
2421 kprintf("%s:%d sends a shutdown\n",
2427 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2428 sctp_send_sack(stcb);
2431 /* is there a gap now ? */
2432 is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2433 stcb->asoc.cumulative_tsn, MAX_TSN);
2434 if ((stcb->asoc.first_ack_sent == 0) || /* First time we send a sack */
2435 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no longer is one */
2436 (stcb->asoc.numduptsns) || /* we have dup's */
2437 (is_a_gap) || /* is still a gap */
2438 (callout_pending(&stcb->asoc.dack_timer.timer)) /* timer was up . second packet */
2441 * Ok we must build a SACK since the timer
2442 * is pending, we got our first packet OR
2443 * there are gaps or duplicates.
2445 stcb->asoc.first_ack_sent = 1;
2446 sctp_send_sack(stcb);
2447 /* The sending will stop the timer */
2449 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2450 stcb->sctp_ep, stcb, NULL);
2457 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc, int hold_locks)
2459 struct sctp_tmit_chunk *chk;
2461 u_int16_t nxt_todel;
2464 if (asoc->fragmented_delivery_inprogress) {
2465 sctp_service_reassembly(stcb, asoc, hold_locks);
2467 /* Can we proceed further, i.e. the PD-API is complete */
2468 if (asoc->fragmented_delivery_inprogress) {
2474 * Yes, reassembly delivery no longer in progress see if we
2475 * have some on the sb hold queue.
2478 if (stcb->sctp_socket->so_rcv.ssb_cc >= stcb->sctp_socket->so_rcv.ssb_hiwat) {
2480 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2483 /* If deliver_data says no we must stop */
2484 if (sctp_deliver_data(stcb, asoc, NULL, hold_locks) == 0)
2487 chk = TAILQ_FIRST(&asoc->delivery_queue);
2490 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2493 * Now is there some other chunk I can deliver
2494 * from the reassembly queue.
2496 chk = TAILQ_FIRST(&asoc->reasmqueue);
2498 asoc->size_on_reasm_queue = 0;
2499 asoc->cnt_on_reasm_queue = 0;
2502 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2503 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2504 ((nxt_todel == chk->rec.data.stream_seq) ||
2505 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2507 * Yep the first one is here. We setup to
2508 * start reception, by backing down the TSN
2509 * just in case we can't deliver.
2513 * Before we start though either all of the
2514 * message should be here or 1/4 the socket buffer
2515 * max or nothing on the delivery queue and something
2518 if (TAILQ_EMPTY(&asoc->delivery_queue) &&
2519 (sctp_is_all_msg_on_reasm(asoc, &tsize) ||
2520 (asoc->size_on_reasm_queue >=
2521 (stcb->sctp_socket->so_rcv.ssb_hiwat >> 2) && tsize))) {
2522 asoc->fragmented_delivery_inprogress = 1;
2523 asoc->tsn_last_delivered = chk->rec.data.TSN_seq-1;
2524 asoc->str_of_pdapi = chk->rec.data.stream_number;
2525 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2526 asoc->fragment_flags = chk->rec.data.rcv_flags;
2527 sctp_service_reassembly(stcb, asoc, hold_locks);
2533 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2534 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2535 struct sctp_nets *net, u_int32_t *high_tsn)
2537 struct sctp_data_chunk *ch, chunk_buf;
2538 struct sctp_association *asoc;
2539 int num_chunks = 0; /* number of control chunks processed */
2540 int chk_length, break_flag, last_chunk;
2541 int abort_flag = 0, was_a_gap = 0;
2545 sctp_set_rwnd(stcb, &stcb->asoc);
2549 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2550 stcb->asoc.cumulative_tsn, MAX_TSN)) {
2551 /* there was a gap before this data was processed */
2555 * setup where we got the last DATA packet from for
2556 * any SACK that may need to go out. Don't bump
2557 * the net. This is done ONLY when a chunk
2560 asoc->last_data_chunk_from = net;
2563 * Now before we proceed we must figure out if this
2564 * is a wasted cluster... i.e. it is a small packet
2565 * sent in and yet the driver underneath allocated a
2566 * full cluster for it. If so we must copy it to a
2567 * smaller mbuf and free up the cluster mbuf. This
2568 * will help with cluster starvation.
2570 if (m->m_len < (long)MHLEN && m->m_next == NULL) {
2571 /* we only handle mbufs that are singletons.. not chains */
2572 #ifdef __DragonFly__
2573 if ((*mm)->m_flags & M_PKTHDR)
2574 MGETHDR(m, MB_DONTWAIT, MT_HEADER);
2577 MGET(m, MB_DONTWAIT, MT_DATA);
2579 /* ok lets see if we can copy the data up */
2582 if ((*mm)->m_flags & M_PKTHDR) {
2583 /* got to copy the header first */
2585 M_COPY_PKTHDR(m, (*mm));
2587 M_MOVE_PKTHDR(m, (*mm));
2590 /* get the pointers and copy */
2591 to = mtod(m, caddr_t *);
2592 from = mtod((*mm), caddr_t *);
2593 memcpy(to, from, (*mm)->m_len);
2594 /* copy the length and free up the old */
2595 m->m_len = (*mm)->m_len;
2597 /* sucess, back copy */
2600 /* We are in trouble in the mbuf world .. yikes */
2604 /* get pointer to the first chunk header */
2605 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2606 sizeof(chunk_buf), (u_int8_t *)&chunk_buf);
2608 kprintf(" ... its short\n");
2612 * process all DATA chunks...
2616 if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
2617 kprintf("In process data off:%d length:%d iphlen:%d ch->type:%d\n",
2618 *offset, length, iphlen, (int)ch->ch.chunk_type);
2622 *high_tsn = asoc->cumulative_tsn;
2624 while (ch->ch.chunk_type == SCTP_DATA) {
2625 /* validate chunk length */
2626 chk_length = ntohs(ch->ch.chunk_length);
2627 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1 ||
2628 length - *offset < chk_length) {
2630 * Need to send an abort since we had a invalid
2633 struct mbuf *op_err;
2634 MGET(op_err, MB_DONTWAIT, MT_DATA);
2636 struct sctp_paramhdr *ph;
2639 op_err->m_len = sizeof(struct sctp_paramhdr) +
2641 ph = mtod(op_err, struct sctp_paramhdr *);
2643 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2644 ph->param_length = htons(op_err->m_len);
2645 ippp = (u_int32_t *)(ph + 1);
2646 *ippp = htonl(0x30000001);
2648 sctp_abort_association(inp, stcb, m, iphlen, sh,
2653 if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
2654 kprintf("A chunk of len:%d to process (tot:%d)\n",
2655 chk_length, length - *offset);
2659 #ifdef SCTP_AUDITING_ENABLED
2660 sctp_audit_log(0xB1, 0);
2662 if (SCTP_SIZE32(chk_length) == *offset - length) {
2667 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2668 chk_length, net, high_tsn, &abort_flag, &break_flag,
2672 if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
2673 kprintf("Now incr num_chunks to %d\n",
2683 * Set because of out of rwnd space and no drop rep
2689 *offset += SCTP_SIZE32(chk_length);
2690 if (*offset >= length) {
2691 /* no more data left in the mbuf chain */
2694 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2695 sizeof(chunk_buf), (u_int8_t *)&chunk_buf);
2703 * we need to report rwnd overrun drops.
2705 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2709 * Did we get data, if so update the time for
2710 * auto-close and give peer credit for being
2713 sctp_pegs[SCTP_DATA_DG_RECV]++;
2714 stcb->asoc.overall_error_count = 0;
2715 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2717 /* now service all of the reassm queue and delivery queue */
2718 sctp_service_queues(stcb, asoc, 0);
2719 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2721 * Assure that we ack right away by making
2722 * sure that a d-ack timer is running. So the
2723 * sack_check will send a sack.
2725 sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb,
2728 /* Start a sack timer or QUEUE a SACK for sending */
2729 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
2737 sctp_handle_segments(struct sctp_tcb *stcb, struct sctp_association *asoc,
2738 struct sctp_sack_chunk *ch, u_long last_tsn, u_long *biggest_tsn_acked,
2739 u_long *biggest_newly_acked_tsn, int num_seg, int *ecn_seg_sums)
2741 /************************************************/
2742 /* process fragments and update sendqueue */
2743 /************************************************/
2744 struct sctp_sack *sack;
2745 struct sctp_gap_ack_block *frag;
2746 struct sctp_tmit_chunk *tp1;
2749 #ifdef SCTP_FR_LOGGING
2752 uint16_t frag_strt, frag_end, primary_flag_set;
2753 u_long last_frag_high;
2755 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
2756 primary_flag_set = 1;
2758 primary_flag_set = 0;
2762 frag = (struct sctp_gap_ack_block *)((caddr_t)sack +
2763 sizeof(struct sctp_sack));
2766 for (i = 0; i < num_seg; i++) {
2767 frag_strt = ntohs(frag->start);
2768 frag_end = ntohs(frag->end);
2769 /* some sanity checks on the fargment offsets */
2770 if (frag_strt > frag_end) {
2771 /* this one is malformed, skip */
2775 if (compare_with_wrap((frag_end+last_tsn), *biggest_tsn_acked,
2777 *biggest_tsn_acked = frag_end+last_tsn;
2779 /* mark acked dgs and find out the highestTSN being acked */
2781 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2783 /* save the locations of the last frags */
2784 last_frag_high = frag_end + last_tsn;
2787 * now lets see if we need to reset the queue
2788 * due to a out-of-order SACK fragment
2790 if (compare_with_wrap(frag_strt+last_tsn,
2791 last_frag_high, MAX_TSN)) {
2793 * if the new frag starts after the last TSN
2794 * frag covered, we are ok
2795 * and this one is beyond the last one
2800 * ok, they have reset us, so we need to reset
2801 * the queue this will cause extra hunting but
2802 * hey, they chose the performance
2803 * hit when they failed to order there gaps..
2805 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2807 last_frag_high = frag_end + last_tsn;
2809 for (j = frag_strt + last_tsn; j <= frag_end + last_tsn; j++) {
2811 #ifdef SCTP_FR_LOGGING
2812 if (tp1->rec.data.doing_fast_retransmit)
2816 if (tp1->rec.data.TSN_seq == j) {
2817 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2818 /* must be held until cum-ack passes */
2819 /* ECN Nonce: Add the nonce value to the sender's nonce sum */
2820 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
2822 * If it is less than
2824 * no-longer in flight.
2826 * already be set via
2829 * i.e. ACKED or MARKED.
2831 if (compare_with_wrap(tp1->rec.data.TSN_seq,
2832 *biggest_newly_acked_tsn,
2834 *biggest_newly_acked_tsn =
2835 tp1->rec.data.TSN_seq;
2837 tp1->whoTo->flight_size -= tp1->book_size;
2838 if (tp1->whoTo->flight_size < 0) {
2839 tp1->whoTo->flight_size = 0;
2841 asoc->total_flight -=
2844 if (asoc->total_flight < 0) {
2845 asoc->total_flight = 0;
2848 asoc->total_flight_count--;
2849 if (asoc->total_flight_count < 0) {
2850 asoc->total_flight_count = 0;
2853 if (tp1->snd_count < 2) {
2854 /* True non-retransmited chunk */
2855 tp1->whoTo->net_ack2 +=
2858 /* update RTO too? */
2861 sctp_calculate_rto(stcb,
2864 &tp1->sent_rcv_time);
2865 tp1->whoTo->rto_pending = 0;
2870 if (tp1->sent <= SCTP_DATAGRAM_RESEND &&
2871 tp1->sent != SCTP_DATAGRAM_UNSENT &&
2872 compare_with_wrap(tp1->rec.data.TSN_seq,
2873 asoc->this_sack_highest_gap,
2875 asoc->this_sack_highest_gap =
2876 tp1->rec.data.TSN_seq;
2877 if (primary_flag_set) {
2878 tp1->whoTo->cacc_saw_newack = 1;
2881 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2884 SCTP_DEBUG_INDATA3) {
2885 kprintf("Hmm. one that is in RESEND that is now ACKED\n");
2888 asoc->sent_queue_retran_cnt--;
2889 #ifdef SCTP_AUDITING_ENABLED
2890 sctp_audit_log(0xB2,
2891 (asoc->sent_queue_retran_cnt & 0x000000ff));
2894 if (asoc->sent_queue_retran_cnt < 0) {
2895 kprintf("huh3 retran went negative?\n");
2896 #ifdef SCTP_AUDITING_ENABLED
2901 asoc->sent_queue_retran_cnt = 0;
2905 (*ecn_seg_sums) += tp1->rec.data.ect_nonce;
2906 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
2907 tp1->sent = SCTP_DATAGRAM_MARKED;
2910 } /* if (tp1->TSN_seq == j) */
2911 if (compare_with_wrap(tp1->rec.data.TSN_seq, j,
2914 tp1 = TAILQ_NEXT(tp1, sctp_next);
2915 }/* end while (tp1) */
2916 } /* end for (j = fragStart */
2917 frag++; /* next one */
2919 #ifdef SCTP_FR_LOGGING
2921 sctp_log_fr(*biggest_tsn_acked, *biggest_newly_acked_tsn,
2922 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
2927 sctp_check_for_revoked(struct sctp_association *asoc, u_long cum_ack,
2928 u_long biggest_tsn_acked)
2930 struct sctp_tmit_chunk *tp1;
2933 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2935 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
2938 * ok this guy is either ACK or MARKED. If it is ACKED
2939 * it has been previously acked but not this time i.e.
2940 * revoked. If it is MARKED it was ACK'ed again.
2942 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
2943 /* it has been revoked */
2945 * We do NOT add back to flight size here since
2946 * it is really NOT in flight. Resend (when/if
2947 * it occurs will add to flight size
2949 tp1->sent = SCTP_DATAGRAM_SENT;
2951 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
2952 /* it has been re-acked in this SACK */
2953 tp1->sent = SCTP_DATAGRAM_ACKED;
2956 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
2958 /* above the sack */
2961 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
2963 tp1 = TAILQ_NEXT(tp1, sctp_next);
2965 if (tot_revoked > 0) {
2966 /* Setup the ecn nonce re-sync point. We
2967 * do this since once data is revoked
2968 * we begin to retransmit things, which
2969 * do NOT have the ECN bits set. This means
2970 * we are now out of sync and must wait until
2971 * we get back in sync with the peer to
2974 tp1 = TAILQ_FIRST(&asoc->send_queue);
2976 asoc->nonce_resync_tsn = asoc->sending_seq;
2978 asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
2980 asoc->nonce_wait_for_ecne = 0;
2981 asoc->nonce_sum_check = 0;
2986 extern int sctp_peer_chunk_oh;
2989 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
2990 u_long biggest_tsn_acked, int strike_enabled,
2991 u_long biggest_tsn_newly_acked, int accum_moved)
2993 struct sctp_tmit_chunk *tp1;
2997 u_int32_t sending_seq;
2998 int primary_switch_active = 0;
2999 int double_switch_active = 0;
3001 /* select the sending_seq, this is
3002 * either the next thing ready to
3003 * be sent but not transmitted, OR,
3004 * the next seq we assign.
3006 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3008 sending_seq = asoc->sending_seq;
3010 sending_seq = tp1->rec.data.TSN_seq;
3013 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3014 primary_switch_active = 1;
3016 if (asoc->primary_destination->dest_state & SCTP_ADDR_DOUBLE_SWITCH) {
3017 double_switch_active = 1;
3019 if (stcb->asoc.peer_supports_prsctp ) {
3020 SCTP_GETTIME_TIMEVAL(&now);
3022 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3025 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3027 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3031 if ((tp1->flags & (SCTP_PR_SCTP_ENABLED|SCTP_PR_SCTP_BUFFER)) ==
3032 SCTP_PR_SCTP_ENABLED &&
3033 tp1->sent < SCTP_DATAGRAM_ACKED) {
3034 /* Is it expired? */
3036 if (timercmp(&now, &tp1->rec.data.timetodrop, >))
3038 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3041 /* Yes so drop it */
3042 if (tp1->data != NULL) {
3043 sctp_release_pr_sctp_chunk(stcb, tp1,
3044 (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT),
3047 tp1 = TAILQ_NEXT(tp1, sctp_next);
3052 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3053 asoc->this_sack_highest_gap, MAX_TSN)) {
3054 /* we are beyond the tsn in the sack */
3057 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3058 /* either a RESEND, ACKED, or MARKED */
3060 tp1 = TAILQ_NEXT(tp1, sctp_next);
3063 if (primary_switch_active && (strike_enabled == 0)) {
3064 if (tp1->whoTo != asoc->primary_destination) {
3066 * We can only strike things on the primary if
3067 * the strike_enabled flag is clear
3069 tp1 = TAILQ_NEXT(tp1, sctp_next);
3072 } else if (primary_switch_active) {
3073 if (tp1->whoTo->cacc_saw_newack == 0) {
3075 * Only one was received but it was NOT
3078 tp1 = TAILQ_NEXT(tp1, sctp_next);
3082 if (double_switch_active &&
3083 (compare_with_wrap(asoc->primary_destination->next_tsn_at_change,
3084 tp1->rec.data.TSN_seq, MAX_TSN))) {
3086 * With a double switch we do NOT mark unless we
3087 * are beyond the switch point.
3089 tp1 = TAILQ_NEXT(tp1, sctp_next);
3093 * Here we check to see if we were have already done a FR
3094 * and if so we see if the biggest TSN we saw in the sack is
3095 * smaller than the recovery point. If so we don't strike the
3096 * tsn... otherwise we CAN strike the TSN.
3098 if (accum_moved && asoc->fast_retran_loss_recovery) {
3100 * Strike the TSN if in fast-recovery and
3104 } else if (tp1->rec.data.doing_fast_retransmit) {
3106 * For those that have done a FR we must
3107 * take special consideration if we strike. I.e
3108 * the biggest_newly_acked must be higher
3109 * than the sending_seq at the time we did
3112 #ifdef SCTP_FR_TO_ALTERNATE
3114 * If FR's go to new networks, then we
3115 * must only do this for singly homed asoc's. However
3116 * if the FR's go to the same network (Armando's work)
3117 * then its ok to FR multiple times.
3119 if (asoc->numnets < 2)
3124 if ((compare_with_wrap(biggest_tsn_newly_acked,
3125 tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
3126 (biggest_tsn_newly_acked ==
3127 tp1->rec.data.fast_retran_tsn)) {
3129 * Strike the TSN, since this ack is
3130 * beyond where things were when we did
3133 #ifdef SCTP_FR_LOGGING
3134 sctp_log_fr(biggest_tsn_newly_acked,
3135 tp1->rec.data.TSN_seq,
3136 tp1->rec.data.fast_retran_tsn,
3137 SCTP_FR_LOG_STRIKE_CHUNK);
3143 } else if (compare_with_wrap(tp1->rec.data.TSN_seq,
3144 biggest_tsn_newly_acked, MAX_TSN)) {
3146 * We don't strike these:
3147 * This is the HTNA algorithm i.e. we don't strike
3148 * If our TSN is larger than the Highest TSN Newly
3153 /* Strike the TSN */
3156 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3157 /* Increment the count to resend */
3158 struct sctp_nets *alt;
3160 #ifdef SCTP_FR_LOGGING
3161 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3165 /* This is a subsequent FR */
3166 sctp_pegs[SCTP_DUP_FR]++;
3168 asoc->sent_queue_retran_cnt++;
3169 #ifdef SCTP_FR_TO_ALTERNATE
3170 /* Can we find an alternate? */
3171 alt = sctp_find_alternate_net(stcb, tp1->whoTo);
3174 * default behavior is to NOT retransmit FR's
3175 * to an alternate. Armando Caro's paper details
3180 tp1->rec.data.doing_fast_retransmit = 1;
3182 /* mark the sending seq for possible subsequent FR's */
3183 if (TAILQ_EMPTY(&asoc->send_queue)) {
3185 * If the queue of send is empty then its the
3186 * next sequence number that will be assigned so
3187 * we subtract one from this to get the one we
3190 tp1->rec.data.fast_retran_tsn = sending_seq - 1;
3193 * If there are chunks on the send queue
3194 * (unsent data that has made it from the
3195 * stream queues but not out the door, we take
3196 * the first one (which will have the lowest
3197 * TSN) and subtract one to get the one we last
3200 struct sctp_tmit_chunk *ttt;
3201 ttt = TAILQ_FIRST(&asoc->send_queue);
3202 tp1->rec.data.fast_retran_tsn =
3203 ttt->rec.data.TSN_seq - 1;
3207 * this guy had a RTO calculation pending on it,
3210 tp1->whoTo->rto_pending = 0;
3213 /* fix counts and things */
3215 tp1->whoTo->net_ack++;
3216 tp1->whoTo->flight_size -= tp1->book_size;
3217 if (tp1->whoTo->flight_size < 0) {
3218 tp1->whoTo->flight_size = 0;
3220 #ifdef SCTP_LOG_RWND
3221 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3222 asoc->peers_rwnd , tp1->send_size, sctp_peer_chunk_oh);
3224 /* add back to the rwnd */
3225 asoc->peers_rwnd += (tp1->send_size + sctp_peer_chunk_oh);
3227 /* remove from the total flight */
3228 asoc->total_flight -= tp1->book_size;
3229 if (asoc->total_flight < 0) {
3230 asoc->total_flight = 0;
3232 asoc->total_flight_count--;
3233 if (asoc->total_flight_count < 0) {
3234 asoc->total_flight_count = 0;
3236 if (alt != tp1->whoTo) {
3237 /* yes, there is an alternate. */
3238 sctp_free_remote_addr(tp1->whoTo);
3243 tp1 = TAILQ_NEXT(tp1, sctp_next);
3246 if (tot_retrans > 0) {
3247 /* Setup the ecn nonce re-sync point. We
3248 * do this since once we go to FR something
3249 * we introduce a Karn's rule scenario and
3250 * won't know the totals for the ECN bits.
3252 asoc->nonce_resync_tsn = sending_seq;
3253 asoc->nonce_wait_for_ecne = 0;
3254 asoc->nonce_sum_check = 0;
3259 struct sctp_tmit_chunk *
3260 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3261 struct sctp_association *asoc)
3263 struct sctp_tmit_chunk *tp1, *tp2, *a_adv=NULL;
3267 if (asoc->peer_supports_prsctp == 0) {
3270 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3272 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3273 tp1->sent != SCTP_DATAGRAM_RESEND) {
3274 /* no chance to advance, out of here */
3277 if ((tp1->flags & SCTP_PR_SCTP_ENABLED) == 0) {
3279 * We can't fwd-tsn past any that are reliable
3280 * aka retransmitted until the asoc fails.
3285 SCTP_GETTIME_TIMEVAL(&now);
3288 tp2 = TAILQ_NEXT(tp1, sctp_next);
3290 * now we got a chunk which is marked for another
3291 * retransmission to a PR-stream but has run
3292 * out its chances already maybe OR has been
3293 * marked to skip now. Can we skip it if its a
3296 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3297 (tp1->flags & SCTP_PR_SCTP_BUFFER) == 0) {
3299 * Now is this one marked for resend and its time
3303 if (timercmp(&now, &tp1->rec.data.timetodrop, >))
3305 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3308 /* Yes so drop it */
3310 sctp_release_pr_sctp_chunk(stcb, tp1,
3311 (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT),
3316 * No, we are done when hit one for resend whos
3317 * time as not expired.
3323 * Ok now if this chunk is marked to drop it
3324 * we can clean up the chunk, advance our peer ack point
3325 * and we can check the next chunk.
3327 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3328 /* advance PeerAckPoint goes forward */
3329 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3332 * we don't want to de-queue it here. Just wait for the
3333 * next peer SACK to come with a new cumTSN and then
3334 * the chunk will be droped in the normal fashion.
3337 sctp_free_bufspace(stcb, asoc, tp1);
3339 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
3340 kprintf("--total out:%lu total_mbuf_out:%lu\n",
3341 (u_long)asoc->total_output_queue_size,
3342 (u_long)asoc->total_output_mbuf_queue_size);
3346 * Maybe there should be another notification
3349 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3350 (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT),
3352 sctp_m_freem(tp1->data);
3354 sctp_sowwakeup(stcb->sctp_ep,
3358 /* If it is still in RESEND we can advance no further */
3362 * If we hit here we just dumped tp1, move to next
3363 * tsn on sent queue.
3370 #ifdef SCTP_HIGH_SPEED
3371 struct sctp_hs_raise_drop {
3374 int32_t drop_percent;
3377 #define SCTP_HS_TABLE_SIZE 73
3379 struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = {
3387 {1058,8,33}, /* 7 */
3388 {1284,9,32}, /* 8 */
3389 {1529,10,31}, /* 9 */
3390 {1793,11,30}, /* 10 */
3391 {2076,12,29}, /* 11 */
3392 {2378,13,28}, /* 12 */
3393 {2699,14,28}, /* 13 */
3394 {3039,15,27}, /* 14 */
3395 {3399,16,27}, /* 15 */
3396 {3778,17,26}, /* 16 */
3397 {4177,18,26}, /* 17 */
3398 {4596,19,25}, /* 18 */
3399 {5036,20,25}, /* 19 */
3400 {5497,21,24}, /* 20 */
3401 {5979,22,24}, /* 21 */
3402 {6483,23,23}, /* 22 */
3403 {7009,24,23}, /* 23 */
3404 {7558,25,22}, /* 24 */
3405 {8130,26,22}, /* 25 */
3406 {8726,27,22}, /* 26 */
3407 {9346,28,21}, /* 27 */
3408 {9991,29,21}, /* 28 */
3409 {10661,30,21}, /* 29 */
3410 {11358,31,20}, /* 30 */
3411 {12082,32,20}, /* 31 */
3412 {12834,33,20}, /* 32 */
3413 {13614,34,19}, /* 33 */
3414 {14424,35,19}, /* 34 */
3415 {15265,36,19}, /* 35 */
3416 {16137,37,19}, /* 36 */
3417 {17042,38,18}, /* 37 */
3418 {17981,39,18}, /* 38 */
3419 {18955,40,18}, /* 39 */
3420 {19965,41,17}, /* 40 */
3421 {21013,42,17}, /* 41 */
3422 {22101,43,17}, /* 42 */
3423 {23230,44,17}, /* 43 */
3424 {24402,45,16}, /* 44 */
3425 {25618,46,16}, /* 45 */
3426 {26881,47,16}, /* 46 */
3427 {28193,48,16}, /* 47 */
3428 {29557,49,15}, /* 48 */
3429 {30975,50,15}, /* 49 */
3430 {32450,51,15}, /* 50 */
3431 {33986,52,15}, /* 51 */
3432 {35586,53,14}, /* 52 */
3433 {37253,54,14}, /* 53 */
3434 {38992,55,14}, /* 54 */
3435 {40808,56,14}, /* 55 */
3436 {42707,57,13}, /* 56 */
3437 {44694,58,13}, /* 57 */
3438 {46776,59,13}, /* 58 */
3439 {48961,60,13}, /* 59 */
3440 {51258,61,13}, /* 60 */
3441 {53677,62,12}, /* 61 */
3442 {56230,63,12}, /* 62 */
3443 {58932,64,12}, /* 63 */
3444 {61799,65,12}, /* 64 */
3445 {64851,66,11}, /* 65 */
3446 {68113,67,11}, /* 66 */
3447 {71617,68,11}, /* 67 */
3448 {75401,69,10}, /* 68 */
3449 {79517,70,10}, /* 69 */
3450 {84035,71,10}, /* 70 */
3451 {89053,72,10}, /* 71 */
3452 {94717,73,9} /* 72 */
3456 sctp_hs_cwnd_increase(struct sctp_nets *net)
3458 int cur_val, i, indx, incr;
3460 cur_val = net->cwnd >> 10;
3461 indx = SCTP_HS_TABLE_SIZE - 1;
3463 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3465 if (net->net_ack > net->mtu) {
3466 net->cwnd += net->mtu;
3467 #ifdef SCTP_CWND_LOGGING
3468 sctp_log_cwnd(net, net->mtu, SCTP_CWND_LOG_FROM_SS);
3471 net->cwnd += net->net_ack;
3472 #ifdef SCTP_CWND_LOGGING
3473 sctp_log_cwnd(net, net->net_ack, SCTP_CWND_LOG_FROM_SS);
3477 for (i=net->last_hs_used; i<SCTP_HS_TABLE_SIZE; i++) {
3478 if (cur_val < sctp_cwnd_adjust[i].cwnd) {
3483 net->last_hs_used = indx;
3484 incr = ((sctp_cwnd_adjust[indx].increase) << 10);
3486 #ifdef SCTP_CWND_LOGGING
3487 sctp_log_cwnd(net, incr, SCTP_CWND_LOG_FROM_SS);
3493 sctp_hs_cwnd_decrease(struct sctp_nets *net)
3495 int cur_val, i, indx;
3496 #ifdef SCTP_CWND_LOGGING
3497 int old_cwnd = net->cwnd;
3500 cur_val = net->cwnd >> 10;
3501 indx = net->last_hs_used;
3502 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3504 net->ssthresh = net->cwnd / 2;
3505 if (net->ssthresh < (net->mtu*2)) {
3506 net->ssthresh = 2 * net->mtu;
3508 net->cwnd = net->ssthresh;
3509 #ifdef SCTP_CWND_LOGGING
3510 sctp_log_cwnd(net, (net->cwnd-old_cwnd), SCTP_CWND_LOG_FROM_FR);
3513 /* drop by the proper amount */
3514 net->ssthresh = net->cwnd - (int)((net->cwnd / 100) *
3515 sctp_cwnd_adjust[net->last_hs_used].drop_percent);
3516 net->cwnd = net->ssthresh;
3517 /* now where are we */
3518 indx = net->last_hs_used;
3519 cur_val = net->cwnd >> 10;
3520 /* reset where we are in the table */
3521 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
3522 /* feel out of hs */
3523 net->last_hs_used = 0;
3525 for (i = indx; i >= 1; i--) {
3526 if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) {
3530 net->last_hs_used = indx;
3537 sctp_handle_sack(struct sctp_sack_chunk *ch, struct sctp_tcb *stcb,
3538 struct sctp_nets *net_from, int *abort_now)
3540 struct sctp_association *asoc;
3541 struct sctp_sack *sack;
3542 struct sctp_tmit_chunk *tp1, *tp2;
3543 u_long cum_ack, last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked;
3545 unsigned int sack_length;
3547 int some_on_streamwheel;
3549 int strike_enabled = 0, cnt_of_cacc = 0;
3550 int accum_moved = 0;
3551 int marking_allowed = 1;
3552 int will_exit_fast_recovery=0;
3554 struct sctp_nets *net = NULL;
3555 int nonce_sum_flag, ecn_seg_sums=0;
3559 * Handle the incoming sack on data I have been sending.
3563 * we take any chance we can to service our queues since we
3564 * cannot get awoken when the socket is read from :<
3566 asoc->overall_error_count = 0;
3568 if (asoc->sent_queue_retran_cnt) {
3570 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3571 kprintf("Handling SACK for asoc:%p retran:%d\n",
3572 asoc, asoc->sent_queue_retran_cnt);
3577 sctp_service_queues(stcb, asoc, 0);
3580 * Now perform the actual SACK handling:
3581 * 1) Verify that it is not an old sack, if so discard.
3582 * 2) If there is nothing left in the send queue (cum-ack is equal
3583 * to last acked) then you have a duplicate too, update any rwnd
3584 * change and verify no timers are running. then return.
3585 * 3) Process any new consequtive data i.e. cum-ack moved
3586 * process these first and note that it moved.
3587 * 4) Process any sack blocks.
3588 * 5) Drop any acked from the queue.
3589 * 6) Check for any revoked blocks and mark.
3590 * 7) Update the cwnd.
3591 * 8) Nothing left, sync up flightsizes and things, stop all timers
3592 * and also check for shutdown_pending state. If so then go ahead
3593 * and send off the shutdown. If in shutdown recv, send off the
3594 * shutdown-ack and start that timer, Ret.
3595 * 9) Strike any non-acked things and do FR procedure if needed being
3596 * sure to set the FR flag.
3597 * 10) Do pr-sctp procedures.
3598 * 11) Apply any FR penalties.
3599 * 12) Assure we will SACK if in shutdown_recv state.
3603 sack_length = ntohs(ch->ch.chunk_length);
3604 if (sack_length < sizeof(struct sctp_sack_chunk)) {
3606 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3607 kprintf("Bad size on sack chunk .. to small\n");
3613 nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
3615 cum_ack = last_tsn = ntohl(sack->cum_tsn_ack);
3616 num_seg = ntohs(sack->num_gap_ack_blks);
3619 if (TAILQ_EMPTY(&asoc->send_queue)) {
3620 send_s = asoc->sending_seq;
3622 tp1 = TAILQ_FIRST(&asoc->send_queue);
3623 send_s = tp1->rec.data.TSN_seq;
3626 if (sctp_strict_sacks) {
3627 if (cum_ack == send_s ||
3628 compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
3631 * no way, we have not even sent this TSN out yet.
3632 * Peer is hopelessly messed up with us.
3637 MGET(oper, MB_DONTWAIT, MT_DATA);
3639 struct sctp_paramhdr *ph;
3642 oper->m_len = sizeof(struct sctp_paramhdr) +
3644 ph = mtod(oper, struct sctp_paramhdr *);
3645 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3646 ph->param_length = htons(oper->m_len);
3647 ippp = (u_int32_t *)(ph + 1);
3648 *ippp = htonl(0x30000002);
3650 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper);
3654 /* update the Rwnd of the peer */
3655 a_rwnd = (u_int32_t)ntohl(sack->a_rwnd);
3656 if (asoc->sent_queue_retran_cnt) {
3658 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3659 kprintf("cum_ack:%lx num_seg:%u last_acked_seq:%x\n",
3660 cum_ack, (u_int)num_seg, asoc->last_acked_seq);
3664 if (compare_with_wrap(asoc->t3timeout_highest_marked, cum_ack, MAX_TSN)) {
3665 /* we are not allowed to mark for FR */
3666 marking_allowed = 0;
3668 /**********************/
3669 /* 1) check the range */
3670 /**********************/
3671 if (compare_with_wrap(asoc->last_acked_seq, last_tsn, MAX_TSN)) {
3672 /* acking something behind */
3673 if (asoc->sent_queue_retran_cnt) {
3675 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3676 kprintf("The cum-ack is behind us\n");
3683 if (TAILQ_EMPTY(&asoc->sent_queue)) {
3684 /* nothing left on sendqueue.. consider done */
3685 #ifdef SCTP_LOG_RWND
3686 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
3687 asoc->peers_rwnd, 0, 0, a_rwnd);
3689 asoc->peers_rwnd = a_rwnd;
3690 if (asoc->sent_queue_retran_cnt) {
3692 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
3693 kprintf("Huh? retran set but none on queue\n");
3696 asoc->sent_queue_retran_cnt = 0;
3698 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3699 /* SWS sender side engages */
3700 asoc->peers_rwnd = 0;
3702 /* stop any timers */
3703 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3704 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3706 net->partial_bytes_acked = 0;
3707 net->flight_size = 0;
3709 asoc->total_flight = 0;
3710 asoc->total_flight_count = 0;
3714 * We init netAckSz and netAckSz2 to 0. These are used to track 2
3715 * things. The total byte count acked is tracked in netAckSz AND
3716 * netAck2 is used to track the total bytes acked that are un-
3717 * amibguious and were never retransmitted. We track these on a
3718 * per destination address basis.
3720 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3721 net->prev_cwnd = net->cwnd;
3725 /* process the new consecutive TSN first */
3726 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3728 if (compare_with_wrap(last_tsn, tp1->rec.data.TSN_seq,
3730 last_tsn == tp1->rec.data.TSN_seq) {
3731 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
3732 /* ECN Nonce: Add the nonce to the sender's nonce sum */
3733 asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
3735 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3737 * If it is less than ACKED, it is now
3738 * no-longer in flight. Higher values
3739 * may occur during marking
3741 if ((tp1->whoTo->dest_state &
3742 SCTP_ADDR_UNCONFIRMED) &&
3743 (tp1->snd_count < 2) ) {
3745 * If there was no retran and
3746 * the address is un-confirmed
3747 * and we sent there and are
3748 * now sacked.. its confirmed,
3751 tp1->whoTo->dest_state &=
3752 ~SCTP_ADDR_UNCONFIRMED;
3754 tp1->whoTo->flight_size -=
3756 if (tp1->whoTo->flight_size < 0) {
3757 tp1->whoTo->flight_size = 0;
3759 asoc->total_flight -= tp1->book_size;
3760 if (asoc->total_flight < 0) {
3761 asoc->total_flight = 0;
3763 asoc->total_flight_count--;
3764 if (asoc->total_flight_count < 0) {
3765 asoc->total_flight_count = 0;
3767 tp1->whoTo->net_ack += tp1->send_size;
3768 if (tp1->snd_count < 2) {
3769 /* True non-retransmited chunk */
3770 tp1->whoTo->net_ack2 +=
3772 /* update RTO too? */
3775 sctp_calculate_rto(stcb,
3777 &tp1->sent_rcv_time);
3778 tp1->whoTo->rto_pending = 0;
3783 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3785 if (sctp_debug_on & SCTP_DEBUG_INDATA3) {
3786 kprintf("Hmm. one that is in RESEND that is now ACKED\n");
3789 asoc->sent_queue_retran_cnt--;
3790 #ifdef SCTP_AUDITING_ENABLED
3791 sctp_audit_log(0xB3,
3792 (asoc->sent_queue_retran_cnt & 0x000000ff));
3794 if (asoc->sent_queue_retran_cnt < 0) {
3795 kprintf("huh4 retran went negative?\n");
3796 #ifdef SCTP_AUDITING_ENABLED
3797 sctp_auditing(31, inp, tcb,
3800 asoc->sent_queue_retran_cnt = 0;
3805 tp1->sent = SCTP_DATAGRAM_ACKED;
3810 tp1 = TAILQ_NEXT(tp1, sctp_next);
3812 /*******************************************/
3813 /* cancel ALL T3-send timer if accum moved */
3814 /*******************************************/
3816 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3817 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3821 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
3822 /* always set this up to cum-ack */
3823 asoc->this_sack_highest_gap = last_tsn;
3825 if (((num_seg * sizeof (sizeof(struct sctp_gap_ack_block))) + sizeof(struct sctp_sack_chunk)) > sack_length) {
3826 /* skip corrupt segments */
3832 if (asoc->primary_destination->dest_state &
3833 SCTP_ADDR_SWITCH_PRIMARY) {
3834 /* clear the nets CACC flags */
3835 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3836 net->cacc_saw_newack = 0;
3840 * thisSackHighestGap will increase while handling NEW segments
3843 sctp_handle_segments(stcb, asoc, ch, last_tsn,
3844 &biggest_tsn_acked, &biggest_tsn_newly_acked,
3845 num_seg, &ecn_seg_sums);
3847 if (sctp_strict_sacks) {
3848 /* validate the biggest_tsn_acked in the gap acks
3849 * if strict adherence is wanted.
3851 if ((biggest_tsn_acked == send_s) ||
3852 (compare_with_wrap(biggest_tsn_acked, send_s, MAX_TSN))) {
3854 * peer is either confused or we are under
3855 * attack. We must abort.
3861 if (asoc->primary_destination->dest_state &
3862 SCTP_ADDR_SWITCH_PRIMARY) {
3863 /* clear the nets CACC flags */
3864 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3865 if (net->cacc_saw_newack) {
3873 if (cnt_of_cacc < 2) {
3879 /********************************************/
3880 /* drop the acked chunks from the sendqueue */
3881 /********************************************/
3882 asoc->last_acked_seq = cum_ack;
3883 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3884 if ((cum_ack == asoc->primary_destination->next_tsn_at_change) ||
3885 (compare_with_wrap(cum_ack,
3886 asoc->primary_destination->next_tsn_at_change, MAX_TSN))) {
3887 struct sctp_nets *lnet;
3888 /* Turn off the switch flag for ALL addresses */
3889 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
3890 asoc->primary_destination->dest_state &=
3891 ~(SCTP_ADDR_SWITCH_PRIMARY|SCTP_ADDR_DOUBLE_SWITCH);
3895 /* Drag along the t3 timeout point so we don't have a problem at wrap */
3896 if (marking_allowed) {
3897 asoc->t3timeout_highest_marked = cum_ack;
3899 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3901 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
3905 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3906 /* no more sent on list */
3909 tp2 = TAILQ_NEXT(tp1, sctp_next);
3910 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3912 sctp_free_bufspace(stcb, asoc, tp1);
3914 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
3915 kprintf("--total out:%lu total_mbuf_out:%lu\n",
3916 (u_long)asoc->total_output_queue_size,
3917 (u_long)asoc->total_output_mbuf_queue_size);
3921 sctp_m_freem(tp1->data);
3922 if (tp1->flags & SCTP_PR_SCTP_BUFFER) {
3923 asoc->sent_queue_cnt_removeable--;
3928 asoc->sent_queue_cnt--;
3929 sctp_free_remote_addr(tp1->whoTo);
3930 sctppcbinfo.ipi_count_chunk--;
3931 asoc->chunks_on_out_queue--;
3933 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
3934 panic("Chunk count is going negative");
3936 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, tp1);
3937 sctppcbinfo.ipi_gencnt_chunk++;
3938 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
3940 } while (tp1 != NULL);
3943 if (asoc->fast_retran_loss_recovery && accum_moved) {
3944 if (compare_with_wrap(asoc->last_acked_seq,
3945 asoc->fast_recovery_tsn, MAX_TSN) ||
3946 asoc->last_acked_seq == asoc->fast_recovery_tsn) {
3947 /* Setup so we will exit RFC2582 fast recovery */
3948 will_exit_fast_recovery = 1;
3952 /* Check for revoked fragments if we hand
3953 * fragments in a previous segment. If we
3954 * had no previous fragments we cannot have
3957 if (asoc->saw_sack_with_frags)
3958 sctp_check_for_revoked(asoc, cum_ack, biggest_tsn_acked);
3961 asoc->saw_sack_with_frags = 1;
3963 asoc->saw_sack_with_frags = 0;
3965 /******************************/
3967 /******************************/
3968 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3969 /* if nothing was acked on this destination skip it */
3970 if (net->net_ack == 0)
3973 if (net->net_ack2 > 0) {
3975 * Karn's rule applies to clearing error count,
3978 net->error_count = 0;
3979 if ((net->dest_state&SCTP_ADDR_NOT_REACHABLE) ==
3980 SCTP_ADDR_NOT_REACHABLE) {
3981 /* addr came good */
3982 net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
3983 net->dest_state |= SCTP_ADDR_REACHABLE;
3984 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
3985 SCTP_RECEIVED_SACK, (void *)net);
3986 /* now was it the primary? if so restore */
3987 if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
3988 sctp_set_primary_addr(stcb, NULL, net);
3993 if (asoc->fast_retran_loss_recovery &&
3994 will_exit_fast_recovery == 0) {
3995 /* If we are in loss recovery we skip any cwnd update */
3996 sctp_pegs[SCTP_CWND_SKIP]++;
3997 goto skip_cwnd_update;
4000 /* If the cumulative ack moved we can proceed */
4001 if (net->cwnd <= net->ssthresh) {
4002 /* We are in slow start */
4003 if (net->flight_size + net->net_ack >=
4005 #ifdef SCTP_HIGH_SPEED
4006 sctp_hs_cwnd_increase(net);
4008 if (net->net_ack > net->mtu) {
4009 net->cwnd += net->mtu;
4010 #ifdef SCTP_CWND_LOGGING
4011 sctp_log_cwnd(net, net->mtu,
4012 SCTP_CWND_LOG_FROM_SS);
4016 net->cwnd += net->net_ack;
4017 #ifdef SCTP_CWND_LOGGING
4018 sctp_log_cwnd(net, net->net_ack,
4019 SCTP_CWND_LOG_FROM_SS);
4024 sctp_pegs[SCTP_CWND_SS]++;
4027 sctp_pegs[SCTP_CWND_NOUSE_SS]++;
4028 dif = net->cwnd - (net->flight_size +
4030 #ifdef SCTP_CWND_LOGGING
4031 /* sctp_log_cwnd(net, net->net_ack,
4032 SCTP_CWND_LOG_NOADV_SS);*/
4034 if (dif > sctp_pegs[SCTP_CWND_DIFF_SA]) {
4035 sctp_pegs[SCTP_CWND_DIFF_SA] =
4037 sctp_pegs[SCTP_OQS_AT_SS] =
4038 asoc->total_output_queue_size;
4039 sctp_pegs[SCTP_SQQ_AT_SS] =
4040 asoc->sent_queue_cnt;
4041 sctp_pegs[SCTP_SQC_AT_SS] =
4042 asoc->send_queue_cnt;
4046 /* We are in congestion avoidance */
4047 if (net->flight_size + net->net_ack >=
4050 * add to pba only if we had a cwnd's
4051 * worth (or so) in flight OR the
4052 * burst limit was applied.
4054 net->partial_bytes_acked +=
4058 * Do we need to increase
4059 * (if pba is > cwnd)?
4061 if (net->partial_bytes_acked >=
4064 net->partial_bytes_acked) {
4065 net->partial_bytes_acked -=
4068 net->partial_bytes_acked =
4071 net->cwnd += net->mtu;
4072 #ifdef SCTP_CWND_LOGGING
4073 sctp_log_cwnd(net, net->mtu,
4074 SCTP_CWND_LOG_FROM_CA);
4076 sctp_pegs[SCTP_CWND_CA]++;
4080 sctp_pegs[SCTP_CWND_NOUSE_CA]++;
4081 #ifdef SCTP_CWND_LOGGING
4082 /* sctp_log_cwnd(net, net->net_ack,
4083 SCTP_CWND_LOG_NOADV_CA);
4086 dif = net->cwnd - (net->flight_size +
4088 if (dif > sctp_pegs[SCTP_CWND_DIFF_CA]) {
4089 sctp_pegs[SCTP_CWND_DIFF_CA] =
4091 sctp_pegs[SCTP_OQS_AT_CA] =
4092 asoc->total_output_queue_size;
4093 sctp_pegs[SCTP_SQQ_AT_CA] =
4094 asoc->sent_queue_cnt;
4095 sctp_pegs[SCTP_SQC_AT_CA] =
4096 asoc->send_queue_cnt;
4103 sctp_pegs[SCTP_CWND_NOCUM]++;
4107 * NOW, according to Karn's rule do we need to restore the
4108 * RTO timer back? Check our net_ack2. If not set then we
4109 * have a ambiguity.. i.e. all data ack'd was sent to more
4113 if (net->net_ack2) {
4114 /* restore any doubled timers */
4115 net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1;
4116 if (net->RTO < stcb->asoc.minrto) {
4117 net->RTO = stcb->asoc.minrto;
4119 if (net->RTO > stcb->asoc.maxrto) {
4120 net->RTO = stcb->asoc.maxrto;
4123 if (net->cwnd > sctp_pegs[SCTP_MAX_CWND]) {
4124 sctp_pegs[SCTP_MAX_CWND] = net->cwnd;
4127 /**********************************/
4128 /* Now what about shutdown issues */
4129 /**********************************/
4130 some_on_streamwheel = 0;
4131 if (!TAILQ_EMPTY(&asoc->out_wheel)) {
4132 /* Check to see if some data queued */
4133 struct sctp_stream_out *outs;
4134 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
4135 if (!TAILQ_EMPTY(&outs->outqueue)) {
4136 some_on_streamwheel = 1;
4141 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue) &&
4142 some_on_streamwheel == 0) {
4143 /* nothing left on sendqueue.. consider done */
4144 /* stop all timers */
4145 #ifdef SCTP_LOG_RWND
4146 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4147 asoc->peers_rwnd, 0, 0, a_rwnd);
4149 asoc->peers_rwnd = a_rwnd;
4150 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4151 /* SWS sender side engages */
4152 asoc->peers_rwnd = 0;
4154 /* stop any timers */
4155 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4156 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4158 net->flight_size = 0;
4159 net->partial_bytes_acked = 0;
4161 asoc->total_flight = 0;
4162 asoc->total_flight_count = 0;
4164 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) {
4165 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
4167 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
4168 kprintf("%s:%d sends a shutdown\n",
4174 sctp_send_shutdown(stcb,
4175 stcb->asoc.primary_destination);
4176 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4177 stcb->sctp_ep, stcb, asoc->primary_destination);
4178 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4179 stcb->sctp_ep, stcb, asoc->primary_destination);
4180 } else if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) {
4181 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT;
4183 sctp_send_shutdown_ack(stcb,
4184 stcb->asoc.primary_destination);
4186 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4187 stcb->sctp_ep, stcb, asoc->primary_destination);
4192 * Now here we are going to recycle net_ack for a different
4195 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4198 if ((num_seg > 0) && marking_allowed) {
4199 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4200 strike_enabled, biggest_tsn_newly_acked, accum_moved);
4203 /*********************************************/
4204 /* Here we perform PR-SCTP procedures */
4206 /*********************************************/
4207 /* C1. update advancedPeerAckPoint */
4208 if (compare_with_wrap(cum_ack, asoc->advanced_peer_ack_point, MAX_TSN)) {
4209 asoc->advanced_peer_ack_point = cum_ack;
4211 /* C2. try to further move advancedPeerAckPoint ahead */
4212 if (asoc->peer_supports_prsctp) {
4213 struct sctp_tmit_chunk *lchk;
4214 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4215 /* C3. See if we need to send a Fwd-TSN */
4216 if (compare_with_wrap(asoc->advanced_peer_ack_point, cum_ack,
4219 * ISSUE with ECN, see FWD-TSN processing for notes
4220 * on issues that will occur when the ECN NONCE stuff
4221 * is put into SCTP for cross checking.
4223 send_forward_tsn(stcb, asoc);
4225 /* ECN Nonce: Disable Nonce Sum check when FWD TSN is sent and store resync tsn*/
4226 asoc->nonce_sum_check = 0;
4227 asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
4229 /* Assure a timer is up */
4230 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4231 stcb->sctp_ep, stcb, lchk->whoTo);
4235 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4236 if (asoc->fast_retran_loss_recovery == 0) {
4237 /* out of a RFC2582 Fast recovery window? */
4238 if (net->net_ack > 0) {
4240 * per section 7.2.3, are there
4241 * any destinations that had a fast
4242 * retransmit to them. If so what we
4243 * need to do is adjust ssthresh and
4246 struct sctp_tmit_chunk *lchk;
4247 #ifdef SCTP_HIGH_SPEED
4248 sctp_hs_cwnd_decrease(net);
4250 #ifdef SCTP_CWND_LOGGING
4251 int old_cwnd = net->cwnd;
4253 net->ssthresh = net->cwnd / 2;
4254 if (net->ssthresh < (net->mtu*2)) {
4255 net->ssthresh = 2 * net->mtu;
4257 net->cwnd = net->ssthresh;
4258 #ifdef SCTP_CWND_LOGGING
4259 sctp_log_cwnd(net, (net->cwnd-old_cwnd),
4260 SCTP_CWND_LOG_FROM_FR);
4264 lchk = TAILQ_FIRST(&asoc->send_queue);
4266 net->partial_bytes_acked = 0;
4267 /* Turn on fast recovery window */
4268 asoc->fast_retran_loss_recovery = 1;
4270 /* Mark end of the window */
4271 asoc->fast_recovery_tsn = asoc->sending_seq - 1;
4273 asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
4277 /* Disable Nonce Sum Checking and store the resync tsn*/
4278 asoc->nonce_sum_check = 0;
4279 asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1;
4281 sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
4282 stcb->sctp_ep, stcb, net);
4283 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4284 stcb->sctp_ep, stcb, net);
4286 } else if (net->net_ack > 0) {
4288 * Mark a peg that we WOULD have done a cwnd reduction
4289 * but RFC2582 prevented this action.
4291 sctp_pegs[SCTP_FR_INAWINDOW]++;
4296 /******************************************************************
4297 * Here we do the stuff with ECN Nonce checking.
4298 * We basically check to see if the nonce sum flag was incorrect
4299 * or if resynchronization needs to be done. Also if we catch a
4300 * misbehaving receiver we give him the kick.
4301 ******************************************************************/
4303 if (asoc->ecn_nonce_allowed) {
4304 if (asoc->nonce_sum_check) {
4305 if (nonce_sum_flag != ((asoc->nonce_sum_expect_base + ecn_seg_sums) & SCTP_SACK_NONCE_SUM)) {
4306 if (asoc->nonce_wait_for_ecne == 0) {
4307 struct sctp_tmit_chunk *lchk;
4308 lchk = TAILQ_FIRST(&asoc->send_queue);
4309 asoc->nonce_wait_for_ecne = 1;
4311 asoc->nonce_wait_tsn = lchk->rec.data.TSN_seq;
4313 asoc->nonce_wait_tsn = asoc->sending_seq;
4316 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_wait_tsn, MAX_TSN) ||
4317 (asoc->last_acked_seq == asoc->nonce_wait_tsn)) {
4318 /* Misbehaving peer. We need to react to this guy */
4319 kprintf("Mis-behaving peer detected\n");
4320 asoc->ecn_allowed = 0;
4321 asoc->ecn_nonce_allowed = 0;
4326 /* See if Resynchronization Possible */
4327 if (compare_with_wrap(asoc->last_acked_seq, asoc->nonce_resync_tsn, MAX_TSN)) {
4328 asoc->nonce_sum_check = 1;
4329 /* now we must calculate what the base
4330 * is. We do this based on two things, we know
4331 * the total's for all the segments gap-acked
4332 * in the SACK, its stored in ecn_seg_sums.
4333 * We also know the SACK's nonce sum, its
4334 * in nonce_sum_flag. So we can build a truth
4335 * table to back-calculate the new value of asoc->nonce_sum_expect_base:
4337 * SACK-flag-Value Seg-Sums Base
4343 asoc->nonce_sum_expect_base = (ecn_seg_sums ^ nonce_sum_flag) & SCTP_SACK_NONCE_SUM;
4347 /* Now are we exiting loss recovery ? */
4348 if (will_exit_fast_recovery) {
4349 /* Ok, we must exit fast recovery */
4350 asoc->fast_retran_loss_recovery = 0;
4352 if ((asoc->sat_t3_loss_recovery) &&
4353 ((compare_with_wrap(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn,
4355 (asoc->last_acked_seq == asoc->sat_t3_recovery_tsn)))) {
4356 /* end satellite t3 loss recovery */
4357 asoc->sat_t3_loss_recovery = 0;
4359 /* Adjust and set the new rwnd value */
4360 #ifdef SCTP_LOG_RWND
4361 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4362 asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * sctp_peer_chunk_oh), a_rwnd);
4365 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4366 (u_int32_t)(asoc->total_flight + (asoc->sent_queue_cnt * sctp_peer_chunk_oh)));
4367 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4368 /* SWS sender side engages */
4369 asoc->peers_rwnd = 0;
4372 * Now we must setup so we have a timer up for anyone with
4375 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4376 struct sctp_tmit_chunk *chk;
4377 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
4378 if (chk->whoTo == net &&
4379 (chk->sent < SCTP_DATAGRAM_ACKED ||
4380 chk->sent == SCTP_FORWARD_TSN_SKIP)) {
4382 * Not ack'ed and still outstanding to this
4383 * destination or marked and must be
4384 * sacked after fwd-tsn sent.
4386 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4387 stcb->sctp_ep, stcb, net);
4395 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp,
4396 struct sctp_nets *netp, int *abort_flag)
4398 /* Mutate a shutdown into a SACK */
4399 struct sctp_sack_chunk sack;
4402 sack.sack.cum_tsn_ack = cp->cumulative_tsn_ack;
4403 /* Arrange so a_rwnd does NOT change */
4404 sack.ch.chunk_type = SCTP_SELECTIVE_ACK;
4405 sack.ch.chunk_flags = 0;
4406 sack.ch.chunk_length = ntohs(sizeof(struct sctp_sack_chunk));
4408 htonl(stcb->asoc.peers_rwnd + stcb->asoc.total_flight);
4410 * no gaps in this one. This may cause a temporal view to reneging,
4411 * but hopefully the second chunk is a true SACK in the packet and
4412 * will correct this view. One will come soon after no matter what
4415 sack.sack.num_gap_ack_blks = 0;
4416 sack.sack.num_dup_tsns = 0;
4417 /* Now call the SACK processor */
4418 sctp_handle_sack(&sack, stcb, netp, abort_flag);
4422 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
4423 struct sctp_stream_in *strmin)
4425 struct sctp_tmit_chunk *chk, *nchk;
4426 struct sctp_association *asoc;
4430 tt = strmin->last_sequence_delivered;
4432 * First deliver anything prior to and including the stream no that
4435 chk = TAILQ_FIRST(&strmin->inqueue);
4437 nchk = TAILQ_NEXT(chk, sctp_next);
4438 if (compare_with_wrap(tt, chk->rec.data.stream_seq, MAX_SEQ) ||
4439 (tt == chk->rec.data.stream_seq)) {
4440 /* this is deliverable now */
4441 TAILQ_REMOVE(&strmin->inqueue, chk, sctp_next);
4442 /* subtract pending on streams */
4443 asoc->size_on_all_streams -= chk->send_size;
4444 asoc->cnt_on_all_streams--;
4445 /* deliver it to at least the delivery-q */
4446 sctp_deliver_data(stcb, &stcb->asoc, chk, 0);
4448 /* no more delivery now. */
4454 * now we must deliver things in queue the normal way if any
4457 tt = strmin->last_sequence_delivered + 1;
4458 chk = TAILQ_FIRST(&strmin->inqueue);
4460 nchk = TAILQ_NEXT(chk, sctp_next);
4461 if (tt == chk->rec.data.stream_seq) {
4462 /* this is deliverable now */
4463 TAILQ_REMOVE(&strmin->inqueue, chk, sctp_next);
4464 /* subtract pending on streams */
4465 asoc->size_on_all_streams -= chk->send_size;
4466 asoc->cnt_on_all_streams--;
4467 /* deliver it to at least the delivery-q */
4468 strmin->last_sequence_delivered =
4469 chk->rec.data.stream_seq;
4470 sctp_deliver_data(stcb, &stcb->asoc, chk, 0);
4471 tt = strmin->last_sequence_delivered + 1;
4481 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
4482 struct sctp_forward_tsn_chunk *fwd, int *abort_flag)
4485 * ISSUES that MUST be fixed for ECN! When we are the
4486 * sender of the forward TSN, when the SACK comes back
4487 * that acknowledges the FWD-TSN we must reset the
4488 * NONCE sum to match correctly. This will get quite
4489 * tricky since we may have sent more data interveneing and
4490 * must carefully account for what the SACK says on the
4491 * nonce and any gaps that are reported. This work
4492 * will NOT be done here, but I note it here since
4493 * it is really related to PR-SCTP and FWD-TSN's
4496 /* The pr-sctp fwd tsn */
4498 * here we will perform all the data receiver side steps for
4499 * processing FwdTSN, as required in by pr-sctp draft:
4501 * Assume we get FwdTSN(x):
4503 * 1) update local cumTSN to x
4504 * 2) try to further advance cumTSN to x + others we have
4505 * 3) examine and update re-ordering queue on pr-in-streams
4506 * 4) clean up re-assembly queue
4507 * 5) Send a sack to report where we are.
4509 struct sctp_strseq *stseq;
4510 struct sctp_association *asoc;
4511 u_int32_t new_cum_tsn, gap, back_out_htsn;
4512 unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size;
4513 struct sctp_stream_in *strm;
4514 struct sctp_tmit_chunk *chk, *at;
4516 cumack_set_flag = 0;
4519 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
4521 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
4522 kprintf("Bad size too small/big fwd-tsn\n");
4527 m_size = (stcb->asoc.mapping_array_size << 3);
4528 /*************************************************************/
4529 /* 1. Here we update local cumTSN and shift the bitmap array */
4530 /*************************************************************/
4531 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
4533 if (compare_with_wrap(asoc->cumulative_tsn, new_cum_tsn, MAX_TSN) ||
4534 asoc->cumulative_tsn == new_cum_tsn) {
4535 /* Already got there ... */
4539 back_out_htsn = asoc->highest_tsn_inside_map;
4540 if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map,
4542 asoc->highest_tsn_inside_map = new_cum_tsn;
4543 #ifdef SCTP_MAP_LOGGING
4544 sctp_log_map(0, 0, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
4548 * now we know the new TSN is more advanced, let's find the
4551 if ((compare_with_wrap(new_cum_tsn, asoc->mapping_array_base_tsn,
4553 (new_cum_tsn == asoc->mapping_array_base_tsn)) {
4554 gap = new_cum_tsn - asoc->mapping_array_base_tsn;
4556 /* try to prevent underflow here */
4557 gap = new_cum_tsn + (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
4560 if (gap > m_size || gap < 0) {
4561 asoc->highest_tsn_inside_map = back_out_htsn;
4562 if ((long)gap > sctp_sbspace(&stcb->sctp_socket->so_rcv)) {
4564 * out of range (of single byte chunks in the rwnd I
4566 * too questionable. better to drop it silently
4570 if (asoc->highest_tsn_inside_map >
4571 asoc->mapping_array_base_tsn) {
4572 gap = asoc->highest_tsn_inside_map -
4573 asoc->mapping_array_base_tsn;
4575 gap = asoc->highest_tsn_inside_map +
4576 (MAX_TSN - asoc->mapping_array_base_tsn) + 1;
4578 cumack_set_flag = 1;
4580 for (i = 0; i <= gap; i++) {
4581 SCTP_SET_TSN_PRESENT(asoc->mapping_array, i);
4584 * Now after marking all, slide thing forward but no
4587 sctp_sack_check(stcb, 0, 0, abort_flag);
4591 if (cumack_set_flag) {
4593 * fwd-tsn went outside my gap array - not a
4594 * common occurance. Do the same thing we
4595 * do when a cookie-echo arrives.
4597 asoc->highest_tsn_inside_map = new_cum_tsn - 1;
4598 asoc->mapping_array_base_tsn = new_cum_tsn;
4599 asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
4600 #ifdef SCTP_MAP_LOGGING
4601 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
4603 asoc->last_echo_tsn = asoc->highest_tsn_inside_map;
4605 /*************************************************************/
4606 /* 2. Clear up re-assembly queue */
4607 /*************************************************************/
4610 * First service it if pd-api is up, just in case we can
4611 * progress it forward
4613 if (asoc->fragmented_delivery_inprogress) {
4614 sctp_service_reassembly(stcb, asoc, 0);
4616 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
4617 /* For each one on here see if we need to toss it */
4619 * For now large messages held on the reasmqueue that are
4620 * complete will be tossed too. We could in theory do more
4621 * work to spin through and stop after dumping one msg
4622 * aka seeing the start of a new msg at the head, and call
4623 * the delivery function... to see if it can be delivered...
4624 * But for now we just dump everything on the queue.
4626 chk = TAILQ_FIRST(&asoc->reasmqueue);
4628 at = TAILQ_NEXT(chk, sctp_next);
4629 if (compare_with_wrap(asoc->cumulative_tsn,
4630 chk->rec.data.TSN_seq, MAX_TSN) ||
4631 asoc->cumulative_tsn == chk->rec.data.TSN_seq) {
4632 /* It needs to be tossed */
4633 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
4634 if (compare_with_wrap(chk->rec.data.TSN_seq,
4635 asoc->tsn_last_delivered, MAX_TSN)) {
4636 asoc->tsn_last_delivered =
4637 chk->rec.data.TSN_seq;
4638 asoc->str_of_pdapi =
4639 chk->rec.data.stream_number;
4640 asoc->ssn_of_pdapi =
4641 chk->rec.data.stream_seq;
4642 asoc->fragment_flags =
4643 chk->rec.data.rcv_flags;
4645 asoc->size_on_reasm_queue -= chk->send_size;
4646 asoc->cnt_on_reasm_queue--;
4649 /* Clear up any stream problem */
4650 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
4651 SCTP_DATA_UNORDERED &&
4652 (compare_with_wrap(chk->rec.data.stream_seq,
4653 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
4656 * We must dump forward this streams
4657 * sequence number if the chunk is not
4658 * unordered that is being skipped.
4659 * There is a chance that if the peer
4660 * does not include the last fragment
4661 * in its FWD-TSN we WILL have a problem
4662 * here since you would have a partial
4663 * chunk in queue that may not be
4665 * Also if a Partial delivery API as
4666 * started the user may get a partial
4667 * chunk. The next read returning a new
4668 * chunk... really ugly but I see no way
4669 * around it! Maybe a notify??
4671 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
4672 chk->rec.data.stream_seq;
4675 sctp_m_freem(chk->data);
4678 sctp_free_remote_addr(chk->whoTo);
4679 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
4680 sctppcbinfo.ipi_count_chunk--;
4681 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
4682 panic("Chunk count is negative");
4684 sctppcbinfo.ipi_gencnt_chunk++;
4687 * Ok we have gone beyond the end of the
4688 * fwd-tsn's mark. Some checks...
4690 if ((asoc->fragmented_delivery_inprogress) &&
4691 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
4692 /* Special case PD-API is up and what we fwd-tsn'
4693 * over includes one that had the LAST_FRAG. We
4694 * no longer need to do the PD-API.
4696 asoc->fragmented_delivery_inprogress = 0;
4697 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
4698 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, NULL);
4706 if (asoc->fragmented_delivery_inprogress) {
4708 * Ok we removed cnt_gone chunks in the PD-API queue that
4709 * were being delivered. So now we must turn off the
4712 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
4713 stcb, SCTP_PARTIAL_DELIVERY_ABORTED, NULL);
4714 asoc->fragmented_delivery_inprogress = 0;
4716 /*************************************************************/
4717 /* 3. Update the PR-stream re-ordering queues */
4718 /*************************************************************/
4719 stseq = (struct sctp_strseq *)((caddr_t)fwd + sizeof(*fwd));
4720 fwd_sz -= sizeof(*fwd);
4724 num_str = fwd_sz/sizeof(struct sctp_strseq);
4726 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
4727 kprintf("Using NEW method, %d strseq's reported in FWD-TSN\n",
4731 for (i = 0; i < num_str; i++) {
4735 xx = (unsigned char *)&stseq[i];
4736 st = ntohs(stseq[i].stream);
4737 stseq[i].stream = st;
4738 st = ntohs(stseq[i].sequence);
4739 stseq[i].sequence = st;
4741 if (stseq[i].stream > asoc->streamincnt) {
4743 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
4744 kprintf("Bogus stream number %d "
4745 "streamincnt is %d\n",
4746 stseq[i].stream, asoc->streamincnt);
4750 * It is arguable if we should continue. Since
4751 * the peer sent bogus stream info we may be in
4753 * a return may be a better choice?
4757 strm = &asoc->strmin[stseq[i].stream];
4758 if (compare_with_wrap(stseq[i].sequence,
4759 strm->last_sequence_delivered, MAX_SEQ)) {
4760 /* Update the sequence number */
4761 strm->last_sequence_delivered =
4764 /* now kick the stream the new way */
4765 sctp_kick_prsctp_reorder_queue(stcb, strm);