1 /* $KAME: sctp_indata.c,v 1.35 2004/08/17 04:06:17 itojun Exp $ */
4 * Copyright (C) 2002, 2003, 2004 Cisco Systems Inc,
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the project nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #if !(defined(__OpenBSD__) || defined(__APPLE__))
33 #include "opt_ipsec.h"
35 #if defined(__FreeBSD__) || defined(__DragonFly__)
36 #include "opt_inet6.h"
39 #if defined(__NetBSD__)
45 #elif !defined(__OpenBSD__)
49 #include <sys/param.h>
50 #include <sys/systm.h>
52 #include <sys/malloc.h>
53 #include <sys/socket.h>
54 #include <sys/socketvar.h>
55 #include <sys/sysctl.h>
58 #include <net/route.h>
61 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
62 #include <sys/limits.h>
64 #include <machine/limits.h>
66 #include <machine/cpu.h>
68 #include <netinet/in.h>
69 #include <netinet/in_systm.h>
70 #include <netinet/ip.h>
72 #include <netinet/ip6.h>
74 #include <netinet/in_pcb.h>
75 #include <netinet/in_var.h>
76 #include <netinet/ip_var.h>
78 #include <netinet6/ip6_var.h>
80 #include <netinet/ip_icmp.h>
81 #include <netinet/icmp_var.h>
82 #include <netinet/sctp_var.h>
83 #include <netinet/sctp_pcb.h>
84 #include <netinet/sctp_header.h>
85 #include <netinet/sctputil.h>
86 #include <netinet/sctp_output.h>
87 #include <netinet/sctp_input.h>
88 #include <netinet/sctp_hashdriver.h>
89 #include <netinet/sctp_indata.h>
90 #include <netinet/sctp_uio.h>
91 #include <netinet/sctp_timer.h>
94 #include <netinet6/ipsec.h>
95 #include <netproto/key/key.h>
101 #include <net/net_osdep.h>
104 extern u_int32_t sctp_debug_on;
108 * NOTES: On the outbound side of things I need to check the sack timer to
109 * see if I should generate a sack into the chunk queue (if I have data to
110 * send that is and will be sending it .. for bundling.
112 * The callback in sctp_usrreq.c will get called when the socket is read
113 * from. This will cause sctp_service_queues() to get called on the top
117 extern int sctp_strict_sacks;
120 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
122 u_int32_t calc, calc_w_oh;
125 if (sctp_debug_on & SCTP_DEBUG_INDATA4) {
126 kprintf("cc:%lu hiwat:%lu lowat:%lu mbcnt:%lu mbmax:%lu\n",
127 (u_long)stcb->sctp_socket->so_rcv.ssb_cc,
128 (u_long)stcb->sctp_socket->so_rcv.ssb_hiwat,
129 (u_long)stcb->sctp_socket->so_rcv.ssb_lowat,
130 (u_long)stcb->sctp_socket->so_rcv.ssb_mbcnt,
131 (u_long)stcb->sctp_socket->so_rcv.ssb_mbmax);
132 kprintf("Setting rwnd to: sb:%ld - (del:%d + reasm:%d str:%d)\n",
133 sctp_sbspace(&stcb->sctp_socket->so_rcv),
134 asoc->size_on_delivery_queue,
135 asoc->size_on_reasm_queue,
136 asoc->size_on_all_streams);
139 if (stcb->sctp_socket->so_rcv.ssb_cc == 0 &&
140 asoc->size_on_delivery_queue == 0 &&
141 asoc->size_on_reasm_queue == 0 &&
142 asoc->size_on_all_streams == 0) {
143 /* Full rwnd granted */
144 asoc->my_rwnd = max(stcb->sctp_socket->so_rcv.ssb_hiwat,
148 /* get actual space */
149 calc = (u_int32_t)sctp_sbspace(&stcb->sctp_socket->so_rcv);
151 /* take out what has NOT been put on socket queue and
152 * we yet hold for putting up.
154 calc = sctp_sbspace_sub(calc, (u_int32_t)asoc->size_on_delivery_queue);
155 calc = sctp_sbspace_sub(calc, (u_int32_t)asoc->size_on_reasm_queue);
156 calc = sctp_sbspace_sub(calc, (u_int32_t)asoc->size_on_all_streams);
158 /* what is the overhead of all these rwnd's */
159 calc_w_oh = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
161 asoc->my_rwnd = calc;
162 if (calc_w_oh == 0) {
163 /* If our overhead is greater than the advertised
164 * rwnd, we clamp the rwnd to 1. This lets us
165 * still accept inbound segments, but hopefully will
166 * shut the sender down when he finally gets the message.
172 (asoc->my_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_receiver)) {
173 /* SWS engaged, tell peer none left */
176 if (sctp_debug_on & SCTP_DEBUG_INDATA4) {
177 kprintf(" - SWS zeros\n");
180 if (sctp_debug_on & SCTP_DEBUG_INDATA4) {
189 * Take a chk structure and build it into an mbuf. Hmm should we change things
190 * so that instead we store the data side in a chunk?
193 sctp_build_ctl_nchunk(struct sctp_tcb *stcb, uint32_t tsn, uint32_t ppid,
194 uint32_t context, uint16_t stream_no, uint16_t stream_seq, uint8_t flags)
196 struct sctp_sndrcvinfo *outinfo;
200 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVDATAIOEVNT) == 0) {
201 /* user does not want the sndrcv ctl */
205 MGETHDR(ret, MB_DONTWAIT, MT_CONTROL);
210 /* We need a CMSG header followed by the struct */
211 cmh = mtod(ret, struct cmsghdr *);
212 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
213 cmh->cmsg_level = IPPROTO_SCTP;
214 cmh->cmsg_type = SCTP_SNDRCV;
215 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
216 outinfo->sinfo_stream = stream_no;
217 outinfo->sinfo_ssn = stream_seq;
218 if (flags & SCTP_DATA_UNORDERED) {
219 outinfo->sinfo_flags = MSG_UNORDERED;
221 outinfo->sinfo_flags = 0;
223 outinfo->sinfo_ppid = ppid;
224 outinfo->sinfo_context = context;
225 outinfo->sinfo_assoc_id = sctp_get_associd(stcb);
226 outinfo->sinfo_tsn = tsn;
227 outinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
228 ret->m_len = cmh->cmsg_len;
229 ret->m_pkthdr.len = ret->m_len;
231 * We track how many control len's have gone upon the sb
232 * and do not count these in the rwnd calculation.
234 stcb->asoc.my_rwnd_control_len +=
235 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
241 * Take a chk structure and build it into an mbuf. Should we change things
242 * so that instead we store the data side in a chunk?
246 sctp_build_ctl(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk)
248 struct sctp_sndrcvinfo *outinfo;
251 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_RECVDATAIOEVNT) == 0) {
252 /* user does not want the sndrcv ctl */
255 MGET(ret, MB_DONTWAIT, MT_CONTROL);
261 /* We need a CMSG header followed by the struct */
262 cmh = mtod(ret, struct cmsghdr *);
263 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
264 cmh->cmsg_level = IPPROTO_SCTP;
265 cmh->cmsg_type = SCTP_SNDRCV;
266 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
267 outinfo->sinfo_stream = chk->rec.data.stream_number;
268 outinfo->sinfo_ssn = chk->rec.data.stream_seq;
269 if (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
270 outinfo->sinfo_flags = MSG_UNORDERED;
272 outinfo->sinfo_flags = 0;
274 outinfo->sinfo_ppid = chk->rec.data.payloadtype;
275 outinfo->sinfo_context = chk->rec.data.context;
276 outinfo->sinfo_assoc_id = sctp_get_associd(stcb);
277 outinfo->sinfo_tsn = chk->rec.data.TSN_seq;
278 outinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
279 ret->m_len = cmh->cmsg_len;
280 stcb->asoc.my_rwnd_control_len +=
281 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
287 sctp_deliver_data(struct sctp_tcb *stcb, struct sctp_association *asoc,
288 struct sctp_tmit_chunk *chk, int hold_locks)
290 struct mbuf *control, *m;
292 struct sockaddr_in6 sin6;
296 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
297 kprintf("I am now in Deliver data! (%p)\n", chk);
300 /* get a write lock on the inp if not already */
301 if (hold_locks == 0) {
302 SCTP_TCB_UNLOCK(stcb);
303 SCTP_INP_WLOCK(stcb->sctp_ep);
307 /* We always add it to the queue */
308 if (stcb && (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
309 /* socket above is long gone */
311 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
312 kprintf("gone is gone!\n");
317 sctp_m_freem(chk->data);
319 sctp_free_remote_addr(chk->whoTo);
320 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
321 sctppcbinfo.ipi_count_chunk--;
322 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
323 panic("Chunk count is negative");
325 sctppcbinfo.ipi_gencnt_chunk++;
327 TAILQ_FOREACH(chk, &asoc->delivery_queue, sctp_next) {
328 asoc->size_on_delivery_queue -= chk->send_size;
329 asoc->cnt_on_delivery_queue--;
331 * Lose the data pointer, since its in the socket buffer
334 sctp_m_freem(chk->data);
336 /* Now free the address and data */
337 sctp_free_remote_addr(chk->whoTo);
338 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
339 sctppcbinfo.ipi_count_chunk--;
340 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
341 panic("Chunk count is negative");
343 sctppcbinfo.ipi_gencnt_chunk++;
346 SCTP_INP_WUNLOCK(stcb->sctp_ep);
350 TAILQ_INSERT_TAIL(&asoc->delivery_queue, chk, sctp_next);
351 asoc->size_on_delivery_queue += chk->send_size;
352 asoc->cnt_on_delivery_queue++;
354 if (asoc->fragmented_delivery_inprogress) {
356 * oh oh, fragmented delivery in progress
357 * return out of here.
360 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
361 kprintf("Fragmented delivery in progress?\n");
365 SCTP_INP_WUNLOCK(stcb->sctp_ep);
368 /* Now grab the first one */
369 chk = TAILQ_FIRST(&asoc->delivery_queue);
371 /* Nothing in queue */
373 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
374 kprintf("Nothing in queue?\n");
377 asoc->size_on_delivery_queue = 0;
378 asoc->cnt_on_delivery_queue = 0;
380 SCTP_INP_WUNLOCK(stcb->sctp_ep);
384 if (stcb->sctp_socket->so_rcv.ssb_cc >= stcb->sctp_socket->so_rcv.ssb_hiwat) {
385 /* Boy, there really is NO room */
387 SCTP_INP_WUNLOCK(stcb->sctp_ep);
391 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
392 kprintf("Now to the delivery with chk(%p)!\n", chk);
395 /* XXX need to append PKTHDR to the socket buffer first */
396 if ((chk->data->m_flags & M_PKTHDR) == 0) {
397 MGETHDR(m, MB_DONTWAIT, MT_DATA);
401 SCTP_INP_WUNLOCK(stcb->sctp_ep);
404 m->m_pkthdr.len = chk->send_size;
406 m->m_next = chk->data;
409 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
410 if (chk->data->m_next == NULL) {
411 /* hopefully we hit here most of the time */
412 chk->data->m_flags |= M_EOR;
414 /* Add the flag to the LAST mbuf in the chain */
416 while (m->m_next != NULL) {
423 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
424 struct sockaddr_in6 lsa6;
426 control = sctp_build_ctl(stcb, chk);
427 to = (struct sockaddr *)&chk->whoTo->ro._l_addr;
428 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
429 to->sa_family == AF_INET) {
430 struct sockaddr_in *sin;
432 sin = (struct sockaddr_in *)to;
433 bzero(&sin6, sizeof(sin6));
434 sin6.sin6_family = AF_INET6;
435 sin6.sin6_len = sizeof(struct sockaddr_in6);
436 sin6.sin6_addr.s6_addr16[2] = 0xffff;
437 bcopy(&sin->sin_addr, &sin6.sin6_addr.s6_addr16[3],
438 sizeof(sin6.sin6_addr.s6_addr16[3]));
439 sin6.sin6_port = sin->sin_port;
440 to = (struct sockaddr *)&sin6;
442 /* check and strip embedded scope junk */
443 to = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)to,
445 if (((struct sockaddr_in *)to)->sin_port == 0) {
446 kprintf("Huh a, port is %d not net:%p %d?\n",
447 ((struct sockaddr_in *)to)->sin_port,
449 (int)(ntohs(stcb->rport)));
450 ((struct sockaddr_in *)to)->sin_port = stcb->rport;
452 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) < (long)chk->send_size) {
453 /* Gak not enough room */
455 sctp_m_freem(control);
456 stcb->asoc.my_rwnd_control_len -=
457 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
461 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
462 if (!sctp_sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv,
463 to, chk->data, control, stcb->asoc.my_vtag,
465 /* Gak not enough room */
467 sctp_m_freem(control);
468 stcb->asoc.my_rwnd_control_len -=
469 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
472 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
473 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
474 stcb->asoc.my_rwnd_control_len +=
478 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
482 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
484 /* append to a already started message. */
485 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
486 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) >=
487 (long)chk->send_size) {
488 ssb_append(&stcb->sctp_socket->so_rcv, chk->data);
491 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
495 SCTP_INP_WUNLOCK(stcb->sctp_ep);
496 /* free up the one we inserted */
498 /* Pull it off the queue */
500 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
501 kprintf("Free_it true, doing tickle wakeup\n");
504 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
505 TAILQ_REMOVE(&asoc->delivery_queue, chk, sctp_next);
506 asoc->size_on_delivery_queue -= chk->send_size;
507 asoc->cnt_on_delivery_queue--;
508 /* Lose the data pointer, since its in the socket buffer */
510 /* Now free the address and data */
511 sctp_free_remote_addr(chk->whoTo);
512 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
513 sctppcbinfo.ipi_count_chunk--;
514 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
515 panic("Chunk count is negative");
517 sctppcbinfo.ipi_gencnt_chunk++;
523 * We are delivering currently from the reassembly queue. We must continue to
524 * deliver until we either:
525 * 1) run out of space.
526 * 2) run out of sequential TSN's
527 * 3) hit the SCTP_DATA_LAST_FRAG flag.
530 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc, int hold_locks)
533 struct sockaddr_in6 sin6;
534 struct sctp_tmit_chunk *chk, *at;
535 struct mbuf *control, *m;
539 cntDel = stream_no = 0;
540 if (hold_locks == 0) {
542 * you always have the TCB lock, we need
543 * to have the inp write lock as well.
545 SCTP_TCB_UNLOCK(stcb);
546 SCTP_INP_WLOCK(stcb->sctp_ep);
549 if (stcb && (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
550 /* socket above is long gone */
551 asoc->fragmented_delivery_inprogress = 0;
552 TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
553 asoc->size_on_delivery_queue -= chk->send_size;
554 asoc->cnt_on_delivery_queue--;
556 * Lose the data pointer, since its in the socket buffer
559 sctp_m_freem(chk->data);
561 /* Now free the address and data */
562 sctp_free_remote_addr(chk->whoTo);
563 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
564 sctppcbinfo.ipi_count_chunk--;
565 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
566 panic("Chunk count is negative");
568 sctppcbinfo.ipi_gencnt_chunk++;
571 SCTP_INP_WUNLOCK(stcb->sctp_ep);
575 if (stcb->sctp_socket->so_rcv.ssb_cc >=
576 stcb->sctp_socket->so_rcv.ssb_hiwat) {
578 sctp_sorwakeup(stcb->sctp_ep,
582 SCTP_INP_WUNLOCK(stcb->sctp_ep);
585 chk = TAILQ_FIRST(&asoc->reasmqueue);
588 sctp_sorwakeup(stcb->sctp_ep,
592 SCTP_INP_WUNLOCK(stcb->sctp_ep);
595 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
596 /* Can't deliver more :< */
598 sctp_sorwakeup(stcb->sctp_ep,
602 SCTP_INP_WUNLOCK(stcb->sctp_ep);
605 stream_no = chk->rec.data.stream_number;
606 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
607 if (nxt_todel != chk->rec.data.stream_seq &&
608 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
610 * Not the next sequence to deliver in its stream OR
614 sctp_sorwakeup(stcb->sctp_ep,
618 SCTP_INP_WUNLOCK(stcb->sctp_ep);
622 if ((chk->data->m_flags & M_PKTHDR) == 0) {
623 MGETHDR(m, MB_DONTWAIT, MT_DATA);
627 SCTP_INP_WUNLOCK(stcb->sctp_ep);
630 m->m_pkthdr.len = chk->send_size;
632 m->m_next = chk->data;
635 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
636 if (chk->data->m_next == NULL) {
637 /* hopefully we hit here most of the time */
638 chk->data->m_flags |= M_EOR;
640 /* Add the flag to the LAST mbuf in the chain */
642 while (m->m_next != NULL) {
648 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
649 struct sockaddr_in6 lsa6;
651 control = sctp_build_ctl(stcb, chk);
652 to = (struct sockaddr *)&chk->whoTo->ro._l_addr;
653 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
654 to->sa_family == AF_INET) {
655 struct sockaddr_in *sin;
656 sin = (struct sockaddr_in *)to;
657 bzero(&sin6, sizeof(sin6));
658 sin6.sin6_family = AF_INET6;
659 sin6.sin6_len = sizeof(struct sockaddr_in6);
660 sin6.sin6_addr.s6_addr16[2] = 0xffff;
661 bcopy(&sin->sin_addr,
662 &sin6.sin6_addr.s6_addr16[3],
663 sizeof(sin6.sin6_addr.s6_addr16[3]));
664 sin6.sin6_port = sin->sin_port;
665 to = (struct sockaddr *)&sin6;
667 /* check and strip embedded scope junk */
668 to = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)to,
670 if (((struct sockaddr_in *)to)->sin_port == 0) {
671 kprintf("Huh b, port is %d not net:%p %d?\n",
672 ((struct sockaddr_in *)to)->sin_port,
674 (int)(ntohs(stcb->rport)));
675 ((struct sockaddr_in *)to)->sin_port = stcb->rport;
677 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) <
678 (long)chk->send_size) {
680 sctp_m_freem(control);
681 stcb->asoc.my_rwnd_control_len -=
682 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
684 sctp_sorwakeup(stcb->sctp_ep,
687 SCTP_INP_WUNLOCK(stcb->sctp_ep);
690 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
691 if (!sctp_sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv,
692 to, chk->data, control, stcb->asoc.my_vtag,
694 /* Gak not enough room */
696 sctp_m_freem(control);
697 stcb->asoc.my_rwnd_control_len -=
698 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
700 sctp_sorwakeup(stcb->sctp_ep,
703 SCTP_INP_WUNLOCK(stcb->sctp_ep);
704 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
707 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
708 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
709 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
710 stcb->asoc.my_rwnd_control_len +=
714 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
718 if (sctp_sbspace(&stcb->sctp_socket->so_rcv) >=
719 (long)chk->send_size) {
720 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
721 ssb_append(&stcb->sctp_socket->so_rcv, chk->data);
722 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
725 /* out of space in the sb */
726 sctp_sorwakeup(stcb->sctp_ep,
729 SCTP_INP_WUNLOCK(stcb->sctp_ep);
733 /* pull it we did it */
734 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
735 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
736 asoc->fragmented_delivery_inprogress = 0;
737 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
738 asoc->strmin[stream_no].last_sequence_delivered++;
741 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
742 asoc->size_on_reasm_queue -= chk->send_size;
743 asoc->cnt_on_reasm_queue--;
744 /* free up the chk */
745 sctp_free_remote_addr(chk->whoTo);
747 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
748 sctppcbinfo.ipi_count_chunk--;
749 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
750 panic("Chunk count is negative");
752 sctppcbinfo.ipi_gencnt_chunk++;
753 if (asoc->fragmented_delivery_inprogress == 0) {
755 * Now lets see if we can deliver the next one on the
759 struct sctp_stream_in *strm;
761 strm = &asoc->strmin[stream_no];
762 nxt_todel = strm->last_sequence_delivered + 1;
763 chk = TAILQ_FIRST(&strm->inqueue);
764 if (chk && (nxt_todel == chk->rec.data.stream_seq)) {
765 while (chk != NULL) {
768 chk->rec.data.stream_seq) {
769 at = TAILQ_NEXT(chk, sctp_next);
770 TAILQ_REMOVE(&strm->inqueue,
772 asoc->size_on_all_streams -=
774 asoc->cnt_on_all_streams--;
775 strm->last_sequence_delivered++;
777 * We ignore the return of
778 * deliver_data here since we
779 * always can hold the chunk on
780 * the d-queue. And we have a
781 * finite number that can be
782 * delivered from the strq.
784 sctp_deliver_data(stcb, asoc, chk, 1);
790 strm->last_sequence_delivered + 1;
793 if (!TAILQ_EMPTY(&asoc->delivery_queue)) {
794 /* Here if deliver_data fails, we must break */
795 if (sctp_deliver_data(stcb, asoc, NULL, 1) == 0)
798 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
800 SCTP_INP_WUNLOCK(stcb->sctp_ep);
803 chk = TAILQ_FIRST(&asoc->reasmqueue);
806 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
809 SCTP_INP_WUNLOCK(stcb->sctp_ep);
813 * Queue the chunk either right into the socket buffer if it is the next one
814 * to go OR put it in the correct place in the delivery queue. If we do
815 * append to the so_buf, keep doing so until we are out of order.
816 * One big question still remains, what to do when the socket buffer is FULL??
819 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
820 struct sctp_tmit_chunk *chk, int *abort_flag)
822 struct sctp_stream_in *strm;
823 struct sctp_tmit_chunk *at;
829 * Need to add code to deal with 16 bit seq wrap
830 * without a TSN wrap for ordered delivery (maybe).
834 asoc->size_on_all_streams += chk->send_size;
835 asoc->cnt_on_all_streams++;
836 strm = &asoc->strmin[chk->rec.data.stream_number];
837 nxt_todel = strm->last_sequence_delivered + 1;
838 #ifdef SCTP_STR_LOGGING
839 sctp_log_strm_del(chk, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
842 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
843 kprintf("queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
844 (u_int)chk->rec.data.stream_seq,
845 (u_int)strm->last_sequence_delivered, (u_int)nxt_todel);
848 if (compare_with_wrap(strm->last_sequence_delivered,
849 chk->rec.data.stream_seq, MAX_SEQ) ||
850 (strm->last_sequence_delivered == chk->rec.data.stream_seq)) {
851 /* The incoming sseq is behind where we last delivered? */
853 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
854 kprintf("Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
855 chk->rec.data.stream_seq,
856 strm->last_sequence_delivered);
860 * throw it in the stream so it gets cleaned up in
861 * association destruction
863 TAILQ_INSERT_HEAD(&strm->inqueue, chk, sctp_next);
864 MGET(oper, MB_DONTWAIT, MT_DATA);
866 struct sctp_paramhdr *ph;
869 oper->m_len = sizeof(struct sctp_paramhdr) +
871 ph = mtod(oper, struct sctp_paramhdr *);
872 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
873 ph->param_length = htons(oper->m_len);
874 ippp = (u_int32_t *)(ph + 1);
875 *ippp = htonl(0x00000001);
877 sctp_abort_an_association(stcb->sctp_ep, stcb,
878 SCTP_PEER_FAULTY, oper);
884 if (nxt_todel == chk->rec.data.stream_seq) {
885 /* can be delivered right away */
887 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
888 kprintf("It's NEXT!\n");
891 #ifdef SCTP_STR_LOGGING
892 sctp_log_strm_del(chk, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
895 asoc->size_on_all_streams -= chk->send_size;
896 asoc->cnt_on_all_streams--;
897 strm->last_sequence_delivered++;
898 sctp_deliver_data(stcb, asoc, chk, 0);
899 chk = TAILQ_FIRST(&strm->inqueue);
900 while (chk != NULL) {
902 nxt_todel = strm->last_sequence_delivered + 1;
903 if (nxt_todel == chk->rec.data.stream_seq) {
904 at = TAILQ_NEXT(chk, sctp_next);
905 TAILQ_REMOVE(&strm->inqueue, chk, sctp_next);
906 asoc->size_on_all_streams -= chk->send_size;
907 asoc->cnt_on_all_streams--;
908 strm->last_sequence_delivered++;
910 * We ignore the return of deliver_data here
911 * since we always can hold the chunk on the
912 * d-queue. And we have a finite number that
913 * can be delivered from the strq.
915 #ifdef SCTP_STR_LOGGING
916 sctp_log_strm_del(chk, NULL,
917 SCTP_STR_LOG_FROM_IMMED_DEL);
919 sctp_deliver_data(stcb, asoc, chk, 0);
928 * Ok, we did not deliver this guy, find
929 * the correct place to put it on the queue.
932 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
933 kprintf("Queue Needed!\n");
936 if (TAILQ_EMPTY(&strm->inqueue)) {
938 #ifdef SCTP_STR_LOGGING
939 sctp_log_strm_del(chk, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
941 TAILQ_INSERT_HEAD(&strm->inqueue, chk, sctp_next);
943 TAILQ_FOREACH(at, &strm->inqueue, sctp_next) {
944 if (compare_with_wrap(at->rec.data.stream_seq,
945 chk->rec.data.stream_seq, MAX_SEQ)) {
947 * one in queue is bigger than the new
948 * one, insert before this one
950 #ifdef SCTP_STR_LOGGING
951 sctp_log_strm_del(chk, at,
952 SCTP_STR_LOG_FROM_INSERT_MD);
954 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
956 } else if (at->rec.data.stream_seq ==
957 chk->rec.data.stream_seq) {
959 * Gak, He sent me a duplicate str seq
963 * foo bar, I guess I will just free
964 * this new guy, should we abort too?
965 * FIX ME MAYBE? Or it COULD be that
966 * the SSN's have wrapped. Maybe I
967 * should compare to TSN somehow...
968 * sigh for now just blow away the
973 sctp_m_freem(chk->data);
975 asoc->size_on_all_streams -= chk->send_size;
976 asoc->cnt_on_all_streams--;
977 sctp_pegs[SCTP_DUP_SSN_RCVD]++;
978 sctp_free_remote_addr(chk->whoTo);
979 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
980 sctppcbinfo.ipi_count_chunk--;
981 if ((int)sctppcbinfo.ipi_count_chunk <
983 panic("Chunk count is negative");
985 sctppcbinfo.ipi_gencnt_chunk++;
988 if (TAILQ_NEXT(at, sctp_next) == NULL) {
990 * We are at the end, insert it
993 #ifdef SCTP_STR_LOGGING
994 sctp_log_strm_del(chk, at,
995 SCTP_STR_LOG_FROM_INSERT_TL);
997 TAILQ_INSERT_AFTER(&strm->inqueue,
1005 /* We delivered some chunks, wake them up */
1008 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1009 kprintf("Doing WAKEUP!\n");
1012 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1017 * Returns two things: You get the total size of the deliverable parts of the
1018 * first fragmented message on the reassembly queue. And you get a 1 back if
1019 * all of the message is ready or a 0 back if the message is still incomplete
1022 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, int *t_size)
1024 struct sctp_tmit_chunk *chk;
1028 chk = TAILQ_FIRST(&asoc->reasmqueue);
1030 /* nothing on the queue */
1033 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1034 /* Not a first on the queue */
1037 tsn = chk->rec.data.TSN_seq;
1039 if (tsn != chk->rec.data.TSN_seq) {
1042 *t_size += chk->send_size;
1043 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1047 chk = TAILQ_NEXT(chk, sctp_next);
1053 * Dump onto the re-assembly queue, in its proper place. After dumping on
1054 * the queue, see if anthing can be delivered. If so pull it off (or as much
1055 * as we can. If we run out of space then we must dump what we can and set
1056 * the appropriate flag to say we queued what we could.
1059 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1060 struct sctp_tmit_chunk *chk, int *abort_flag)
1063 u_int16_t nxt_todel;
1064 u_int32_t cum_ackp1, last_tsn, prev_tsn, post_tsn;
1067 struct sctp_tmit_chunk *at, *prev, *next;
1070 cum_ackp1 = asoc->tsn_last_delivered + 1;
1072 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
1073 /* This is the first one on the queue */
1074 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
1076 * we do not check for delivery of anything when
1077 * only one fragment is here
1079 asoc->size_on_reasm_queue = chk->send_size;
1080 asoc->cnt_on_reasm_queue++;
1081 if (chk->rec.data.TSN_seq == cum_ackp1) {
1082 if (asoc->fragmented_delivery_inprogress == 0 &&
1083 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
1084 SCTP_DATA_FIRST_FRAG) {
1086 * An empty queue, no delivery inprogress, we
1087 * hit the next one and it does NOT have a
1088 * FIRST fragment mark.
1091 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1092 kprintf("Gak, Evil plot, its not first, no fragmented delivery in progress\n");
1095 MGET(oper, MB_DONTWAIT, MT_DATA);
1097 struct sctp_paramhdr *ph;
1101 sizeof(struct sctp_paramhdr) +
1103 ph = mtod(oper, struct sctp_paramhdr *);
1105 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1106 ph->param_length = htons(oper->m_len);
1107 ippp = (u_int32_t *)(ph + 1);
1108 *ippp = htonl(0x10000001);
1110 sctp_abort_an_association(stcb->sctp_ep, stcb,
1111 SCTP_PEER_FAULTY, oper);
1113 } else if (asoc->fragmented_delivery_inprogress &&
1114 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1116 * We are doing a partial delivery and the NEXT
1117 * chunk MUST be either the LAST or MIDDLE
1118 * fragment NOT a FIRST
1121 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1122 kprintf("Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
1125 MGET(oper, MB_DONTWAIT, MT_DATA);
1127 struct sctp_paramhdr *ph;
1131 sizeof(struct sctp_paramhdr) +
1133 ph = mtod(oper, struct sctp_paramhdr *);
1135 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1136 ph->param_length = htons(oper->m_len);
1137 ippp = (u_int32_t *)(ph + 1);
1138 *ippp = htonl(0x10000002);
1140 sctp_abort_an_association(stcb->sctp_ep, stcb,
1141 SCTP_PEER_FAULTY, oper);
1143 } else if (asoc->fragmented_delivery_inprogress) {
1144 /* Here we are ok with a MIDDLE or LAST piece */
1145 if (chk->rec.data.stream_number !=
1146 asoc->str_of_pdapi) {
1147 /* Got to be the right STR No */
1149 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1150 kprintf("Gak, Evil plot, it IS not same stream number %d vs %d\n",
1151 chk->rec.data.stream_number,
1152 asoc->str_of_pdapi);
1155 MGET(oper, MB_DONTWAIT, MT_DATA);
1157 struct sctp_paramhdr *ph;
1160 sizeof(struct sctp_paramhdr) +
1163 struct sctp_paramhdr *);
1165 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1168 ippp = (u_int32_t *)(ph + 1);
1169 *ippp = htonl(0x10000003);
1171 sctp_abort_an_association(stcb->sctp_ep,
1172 stcb, SCTP_PEER_FAULTY, oper);
1174 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
1175 SCTP_DATA_UNORDERED &&
1176 chk->rec.data.stream_seq !=
1177 asoc->ssn_of_pdapi) {
1178 /* Got to be the right STR Seq */
1180 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1181 kprintf("Gak, Evil plot, it IS not same stream seq %d vs %d\n",
1182 chk->rec.data.stream_seq,
1183 asoc->ssn_of_pdapi);
1186 MGET(oper, MB_DONTWAIT, MT_DATA);
1188 struct sctp_paramhdr *ph;
1191 sizeof(struct sctp_paramhdr) +
1194 struct sctp_paramhdr *);
1196 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1199 ippp = (u_int32_t *)(ph + 1);
1200 *ippp = htonl(0x10000004);
1202 sctp_abort_an_association(stcb->sctp_ep,
1203 stcb, SCTP_PEER_FAULTY, oper);
1210 /* Find its place */
1211 at = TAILQ_FIRST(&asoc->reasmqueue);
1213 /* Grab the top flags */
1214 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1215 if (compare_with_wrap(at->rec.data.TSN_seq,
1216 chk->rec.data.TSN_seq, MAX_TSN)) {
1218 * one in queue is bigger than the new one, insert
1222 asoc->size_on_reasm_queue += chk->send_size;
1223 asoc->cnt_on_reasm_queue++;
1225 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1227 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1228 /* Gak, He sent me a duplicate str seq number */
1230 * foo bar, I guess I will just free this new guy,
1231 * should we abort too? FIX ME MAYBE? Or it COULD be
1232 * that the SSN's have wrapped. Maybe I should compare
1233 * to TSN somehow... sigh for now just blow away the
1237 sctp_m_freem(chk->data);
1239 sctp_free_remote_addr(chk->whoTo);
1240 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
1241 sctppcbinfo.ipi_count_chunk--;
1242 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
1243 panic("Chunk count is negative");
1245 sctppcbinfo.ipi_gencnt_chunk++;
1248 last_flags = at->rec.data.rcv_flags;
1249 last_tsn = at->rec.data.TSN_seq;
1251 if (TAILQ_NEXT(at, sctp_next) == NULL) {
1253 * We are at the end, insert it after this one
1255 /* check it first */
1256 asoc->size_on_reasm_queue += chk->send_size;
1257 asoc->cnt_on_reasm_queue++;
1258 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1263 /* Now the audits */
1265 prev_tsn = chk->rec.data.TSN_seq - 1;
1266 if (prev_tsn == prev->rec.data.TSN_seq) {
1268 * Ok the one I am dropping onto the end
1269 * is the NEXT. A bit of valdiation here.
1271 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1272 SCTP_DATA_FIRST_FRAG ||
1273 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1274 SCTP_DATA_MIDDLE_FRAG) {
1276 * Insert chk MUST be a MIDDLE or LAST fragment
1278 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1279 SCTP_DATA_FIRST_FRAG) {
1281 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1282 kprintf("Prev check - It can be a midlle or last but not a first\n");
1283 kprintf("Gak, Evil plot, it's a FIRST!\n");
1286 MGET(oper, MB_DONTWAIT, MT_DATA);
1288 struct sctp_paramhdr *ph;
1292 sizeof(struct sctp_paramhdr) +
1295 struct sctp_paramhdr *);
1297 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1301 ippp = (u_int32_t *)(ph + 1);
1302 *ippp = htonl(0x10000005);
1304 sctp_abort_an_association(stcb->sctp_ep,
1305 stcb, SCTP_PEER_FAULTY, oper);
1309 if (chk->rec.data.stream_number !=
1310 prev->rec.data.stream_number) {
1312 * Huh, need the correct STR here, they
1316 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1317 kprintf("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1318 chk->rec.data.stream_number,
1319 prev->rec.data.stream_number);
1322 MGET(oper, MB_DONTWAIT, MT_DATA);
1324 struct sctp_paramhdr *ph;
1328 sizeof(struct sctp_paramhdr) +
1331 struct sctp_paramhdr *);
1333 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1336 ippp = (u_int32_t *)(ph + 1);
1337 *ippp = htonl(0x10000006);
1340 sctp_abort_an_association(stcb->sctp_ep,
1341 stcb, SCTP_PEER_FAULTY, oper);
1346 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1347 chk->rec.data.stream_seq !=
1348 prev->rec.data.stream_seq) {
1350 * Huh, need the correct STR here, they
1354 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1355 kprintf("Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1356 chk->rec.data.stream_seq,
1357 prev->rec.data.stream_seq);
1360 MGET(oper, MB_DONTWAIT, MT_DATA);
1362 struct sctp_paramhdr *ph;
1366 sizeof(struct sctp_paramhdr) +
1369 struct sctp_paramhdr *);
1371 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1374 ippp = (u_int32_t *)(ph + 1);
1375 *ippp = htonl(0x10000007);
1378 sctp_abort_an_association(stcb->sctp_ep,
1379 stcb, SCTP_PEER_FAULTY, oper);
1384 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1385 SCTP_DATA_LAST_FRAG) {
1386 /* Insert chk MUST be a FIRST */
1387 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1388 SCTP_DATA_FIRST_FRAG) {
1390 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1391 kprintf("Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1394 MGET(oper, MB_DONTWAIT, MT_DATA);
1396 struct sctp_paramhdr *ph;
1400 sizeof(struct sctp_paramhdr) +
1403 struct sctp_paramhdr *);
1405 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1408 ippp = (u_int32_t *)(ph + 1);
1409 *ippp = htonl(0x10000008);
1412 sctp_abort_an_association(stcb->sctp_ep,
1413 stcb, SCTP_PEER_FAULTY, oper);
1423 post_tsn = chk->rec.data.TSN_seq + 1;
1424 if (post_tsn == next->rec.data.TSN_seq) {
1426 * Ok the one I am inserting ahead of
1427 * is my NEXT one. A bit of valdiation here.
1429 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1430 /* Insert chk MUST be a last fragment */
1431 if ((chk->rec.data.rcv_flags&SCTP_DATA_FRAG_MASK)
1432 != SCTP_DATA_LAST_FRAG) {
1434 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1435 kprintf("Next chk - Next is FIRST, we must be LAST\n");
1436 kprintf("Gak, Evil plot, its not a last!\n");
1439 MGET(oper, MB_DONTWAIT, MT_DATA);
1441 struct sctp_paramhdr *ph;
1445 sizeof(struct sctp_paramhdr) +
1448 struct sctp_paramhdr *);
1450 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1453 ippp = (u_int32_t *)(ph + 1);
1454 *ippp = htonl(0x10000009);
1457 sctp_abort_an_association(stcb->sctp_ep,
1458 stcb, SCTP_PEER_FAULTY, oper);
1463 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1464 SCTP_DATA_MIDDLE_FRAG ||
1465 (next->rec.data.rcv_flags&SCTP_DATA_FRAG_MASK) ==
1466 SCTP_DATA_LAST_FRAG) {
1467 /* Insert chk CAN be MIDDLE or FIRST NOT LAST */
1468 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1469 SCTP_DATA_LAST_FRAG) {
1471 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1472 kprintf("Next chk - Next is a MIDDLE/LAST\n");
1473 kprintf("Gak, Evil plot, new prev chunk is a LAST\n");
1476 MGET(oper, MB_DONTWAIT, MT_DATA);
1478 struct sctp_paramhdr *ph;
1482 sizeof(struct sctp_paramhdr) +
1485 struct sctp_paramhdr *);
1487 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1490 ippp = (u_int32_t *)(ph + 1);
1491 *ippp = htonl(0x1000000a);
1493 sctp_abort_an_association(stcb->sctp_ep,
1494 stcb, SCTP_PEER_FAULTY, oper);
1499 if (chk->rec.data.stream_number !=
1500 next->rec.data.stream_number) {
1502 * Huh, need the correct STR here, they
1506 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1507 kprintf("Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1508 chk->rec.data.stream_number,
1509 next->rec.data.stream_number);
1512 MGET(oper, MB_DONTWAIT, MT_DATA);
1514 struct sctp_paramhdr *ph;
1518 sizeof(struct sctp_paramhdr) +
1521 struct sctp_paramhdr *);
1523 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1526 ippp = (u_int32_t *)(ph + 1);
1527 *ippp = htonl(0x1000000b);
1530 sctp_abort_an_association(stcb->sctp_ep,
1531 stcb, SCTP_PEER_FAULTY, oper);
1536 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1537 chk->rec.data.stream_seq !=
1538 next->rec.data.stream_seq) {
1540 * Huh, need the correct STR here, they
1544 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1545 kprintf("Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1546 chk->rec.data.stream_seq,
1547 next->rec.data.stream_seq);
1550 MGET(oper, MB_DONTWAIT, MT_DATA);
1552 struct sctp_paramhdr *ph;
1556 sizeof(struct sctp_paramhdr) +
1559 struct sctp_paramhdr *);
1561 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1564 ippp = (u_int32_t *)(ph + 1);
1565 *ippp = htonl(0x1000000c);
1568 sctp_abort_an_association(stcb->sctp_ep,
1569 stcb, SCTP_PEER_FAULTY, oper);
1579 * now that we have all in there place we must check a number of
1580 * things to see if we can send data to the ULP.
1582 /* we need to do some delivery, if we can */
1583 chk = TAILQ_FIRST(&asoc->reasmqueue);
1586 asoc->size_on_reasm_queue = 0;
1587 asoc->cnt_on_reasm_queue = 0;
1590 if (asoc->fragmented_delivery_inprogress == 0) {
1592 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
1593 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
1594 (nxt_todel == chk->rec.data.stream_seq ||
1595 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
1597 * Yep the first one is here and its
1598 * ok to deliver but should we?
1600 if (TAILQ_EMPTY(&asoc->delivery_queue) &&
1601 (sctp_is_all_msg_on_reasm(asoc, &tsize) ||
1602 (asoc->size_on_reasm_queue >=
1603 (stcb->sctp_socket->so_rcv.ssb_hiwat >> 2) &&
1607 * start reception, by backing down the TSN
1608 * just in case we can't deliver. If we
1610 asoc->fragmented_delivery_inprogress = 1;
1611 asoc->tsn_last_delivered =
1612 chk->rec.data.TSN_seq - 1;
1613 asoc->str_of_pdapi =
1614 chk->rec.data.stream_number;
1615 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
1616 asoc->fragment_flags = chk->rec.data.rcv_flags;
1617 sctp_service_reassembly(stcb, asoc, 0);
1621 sctp_service_reassembly(stcb, asoc, 0);
1626 * This is an unfortunate routine. It checks to make sure a evil guy is not
1627 * stuffing us full of bad packet fragments. A broken peer could also do this
1628 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1632 sctp_does_chk_belong_to_reasm(struct sctp_association *asoc,
1633 struct sctp_tmit_chunk *chk)
1635 struct sctp_tmit_chunk *at;
1638 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1639 if (compare_with_wrap(chk->rec.data.TSN_seq,
1640 at->rec.data.TSN_seq, MAX_TSN)) {
1641 /* is it one bigger? */
1642 tsn_est = at->rec.data.TSN_seq + 1;
1643 if (tsn_est == chk->rec.data.TSN_seq) {
1644 /* yep. It better be a last then*/
1645 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1646 SCTP_DATA_LAST_FRAG) {
1648 * Ok this guy belongs next to a guy
1649 * that is NOT last, it should be a
1650 * middle/last, not a complete chunk.
1655 * This guy is ok since its a LAST and
1656 * the new chunk is a fully self-
1662 } else if (chk->rec.data.TSN_seq == at->rec.data.TSN_seq) {
1663 /* Software error since I have a dup? */
1667 * Ok, 'at' is larger than new chunk but does it
1668 * need to be right before it.
1670 tsn_est = chk->rec.data.TSN_seq + 1;
1671 if (tsn_est == at->rec.data.TSN_seq) {
1672 /* Yep, It better be a first */
1673 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1674 SCTP_DATA_FIRST_FRAG) {
1685 extern unsigned int sctp_max_chunks_on_queue;
1687 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1688 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1689 struct sctp_nets *net, u_int32_t *high_tsn, int *abort_flag,
1690 int *break_flag, int last_chunk)
1692 /* Process a data chunk */
1693 /* struct sctp_tmit_chunk *chk;*/
1694 struct sctp_tmit_chunk *chk;
1698 u_int16_t strmno, strmseq;
1702 tsn = ntohl(ch->dp.tsn);
1703 #ifdef SCTP_MAP_LOGGING
1704 sctp_log_map(0, tsn, asoc->cumulative_tsn, SCTP_MAP_PREPARE_SLIDE);
1706 if (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN) ||
1707 asoc->cumulative_tsn == tsn) {
1708 /* It is a duplicate */
1709 sctp_pegs[SCTP_DUPTSN_RECVD]++;
1710 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1711 /* Record a dup for the next outbound sack */
1712 asoc->dup_tsns[asoc->numduptsns] = tsn;
1717 /* Calculate the number of TSN's between the base and this TSN */
1718 if (tsn >= asoc->mapping_array_base_tsn) {
1719 gap = tsn - asoc->mapping_array_base_tsn;
1721 gap = (MAX_TSN - asoc->mapping_array_base_tsn) + tsn + 1;
1723 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1724 /* Can't hold the bit in the mapping at max array, toss it */
1727 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1728 if (sctp_expand_mapping_array(asoc)) {
1729 /* Can't expand, drop it */
1733 if (compare_with_wrap(tsn, *high_tsn, MAX_TSN)) {
1736 /* See if we have received this one already */
1737 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
1738 sctp_pegs[SCTP_DUPTSN_RECVD]++;
1739 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1740 /* Record a dup for the next outbound sack */
1741 asoc->dup_tsns[asoc->numduptsns] = tsn;
1744 if (!callout_pending(&asoc->dack_timer.timer)) {
1746 * By starting the timer we assure that we
1747 * WILL sack at the end of the packet
1748 * when sctp_sack_check gets called.
1750 sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep,
1756 * Check to see about the GONE flag, duplicates would cause
1757 * a sack to be sent up above
1759 if (stcb && (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
1761 * wait a minute, this guy is gone, there is no
1762 * longer a receiver. Send peer an ABORT!
1764 struct mbuf *op_err;
1765 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1766 sctp_abort_an_association(stcb->sctp_ep, stcb, 0, op_err);
1771 * Now before going further we see if there is room. If NOT then
1772 * we MAY let one through only IF this TSN is the one we are
1773 * waiting for on a partial delivery API.
1776 /* now do the tests */
1777 if (((asoc->cnt_on_all_streams +
1778 asoc->cnt_on_delivery_queue +
1779 asoc->cnt_on_reasm_queue +
1780 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) ||
1781 (((int)asoc->my_rwnd) <= 0)) {
1783 * When we have NO room in the rwnd we check
1784 * to make sure the reader is doing its job...
1786 if (stcb->sctp_socket->so_rcv.ssb_cc) {
1787 /* some to read, wake-up */
1788 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1790 /* now is it in the mapping array of what we have accepted? */
1791 if (compare_with_wrap(tsn,
1792 asoc->highest_tsn_inside_map, MAX_TSN)) {
1794 /* Nope not in the valid range dump it */
1796 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1797 kprintf("My rwnd overrun1:tsn:%lx rwnd %lu sbspace:%ld delq:%d!\n",
1798 (u_long)tsn, (u_long)asoc->my_rwnd,
1799 sctp_sbspace(&stcb->sctp_socket->so_rcv),
1800 stcb->asoc.cnt_on_delivery_queue);
1803 sctp_set_rwnd(stcb, asoc);
1804 if ((asoc->cnt_on_all_streams +
1805 asoc->cnt_on_delivery_queue +
1806 asoc->cnt_on_reasm_queue +
1807 asoc->cnt_msg_on_sb) > sctp_max_chunks_on_queue) {
1808 sctp_pegs[SCTP_MSGC_DROP]++;
1810 sctp_pegs[SCTP_RWND_DROPS]++;
1817 strmno = ntohs(ch->dp.stream_id);
1818 if (strmno >= asoc->streamincnt) {
1819 struct sctp_paramhdr *phdr;
1822 MGETHDR(mb, MB_DONTWAIT, MT_DATA);
1824 /* add some space up front so prepend will work well */
1825 mb->m_data += sizeof(struct sctp_chunkhdr);
1826 phdr = mtod(mb, struct sctp_paramhdr *);
1828 * Error causes are just param's and this one has
1829 * two back to back phdr, one with the error type
1830 * and size, the other with the streamid and a rsvd
1832 mb->m_pkthdr.len = mb->m_len =
1833 (sizeof(struct sctp_paramhdr) * 2);
1834 phdr->param_type = htons(SCTP_CAUSE_INV_STRM);
1835 phdr->param_length =
1836 htons(sizeof(struct sctp_paramhdr) * 2);
1838 /* We insert the stream in the type field */
1839 phdr->param_type = ch->dp.stream_id;
1840 /* And set the length to 0 for the rsvd field */
1841 phdr->param_length = 0;
1842 sctp_queue_op_err(stcb, mb);
1844 sctp_pegs[SCTP_BAD_STRMNO]++;
1848 * Before we continue lets validate that we are not
1849 * being fooled by an evil attacker. We can only
1850 * have 4k chunks based on our TSN spread allowed
1851 * by the mapping array 512 * 8 bits, so there is
1852 * no way our stream sequence numbers could have wrapped.
1853 * We of course only validate the FIRST fragment so the
1856 strmseq = ntohs(ch->dp.stream_sequence);
1857 if ((ch->ch.chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1858 (ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1859 (compare_with_wrap(asoc->strmin[strmno].last_sequence_delivered,
1860 strmseq, MAX_SEQ) ||
1861 asoc->strmin[strmno].last_sequence_delivered == strmseq)) {
1862 /* The incoming sseq is behind where we last delivered? */
1864 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
1865 kprintf("EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1867 asoc->strmin[strmno].last_sequence_delivered);
1871 * throw it in the stream so it gets cleaned up in
1872 * association destruction
1874 MGET(oper, MB_DONTWAIT, MT_DATA);
1876 struct sctp_paramhdr *ph;
1879 oper->m_len = sizeof(struct sctp_paramhdr) +
1881 ph = mtod(oper, struct sctp_paramhdr *);
1882 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1883 ph->param_length = htons(oper->m_len);
1884 ippp = (u_int32_t *)(ph + 1);
1885 *ippp = htonl(0x20000001);
1887 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY,
1889 sctp_pegs[SCTP_BAD_SSN_WRAP]++;
1894 the_len = (chk_length-sizeof(struct sctp_data_chunk));
1895 if (last_chunk == 0) {
1896 dmbuf = sctp_m_copym(*m,
1897 (offset + sizeof(struct sctp_data_chunk)),
1898 the_len, MB_DONTWAIT);
1900 /* We can steal the last chunk */
1902 /* lop off the top part */
1903 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1904 if (dmbuf->m_pkthdr.len > the_len) {
1905 /* Trim the end round bytes off too */
1906 m_adj(dmbuf, -(dmbuf->m_pkthdr.len-the_len));
1908 sctp_pegs[SCTP_NO_COPY_IN]++;
1910 if (dmbuf == NULL) {
1911 sctp_pegs[SCTP_DROP_NOMEMORY]++;
1914 if ((ch->ch.chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1915 asoc->fragmented_delivery_inprogress == 0 &&
1916 TAILQ_EMPTY(&asoc->delivery_queue) &&
1917 ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) ||
1918 ((asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1919 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue))) &&
1920 ((long)(stcb->sctp_socket->so_rcv.ssb_hiwat -
1921 stcb->sctp_socket->so_rcv.ssb_cc) >= (long)the_len)) {
1922 /* Candidate for express delivery */
1924 * Its not fragmented,
1926 * Nothing in the delivery queue,
1927 * Its un-ordered OR ordered and the next to deliver AND
1928 * nothing else is stuck on the stream queue,
1929 * And there is room for it in the socket buffer.
1930 * Lets just stuff it up the buffer....
1933 struct mbuf *control, *mmm;
1934 struct sockaddr_in6 sin6;
1935 struct sockaddr_in6 lsa6;
1936 struct sockaddr *to;
1938 /* It would be nice to avoid this copy if we could :< */
1939 control = sctp_build_ctl_nchunk(stcb, tsn,
1940 ch->dp.protocol_id, 0, strmno, strmseq,
1941 ch->ch.chunk_flags);
1942 /* XXX need to append PKTHDR to the socket buffer first */
1944 if ((dmbuf->m_flags & M_PKTHDR) == 0) {
1946 MGETHDR(tmp, MB_DONTWAIT, MT_DATA);
1951 sctp_m_freem(control);
1952 stcb->asoc.my_rwnd_control_len -=
1953 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
1956 goto failed_express_del;
1958 tmp->m_pkthdr.len = the_len;
1960 tmp->m_next = dmbuf;
1963 to = (struct sockaddr *)&net->ro._l_addr;
1964 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
1965 to->sa_family == AF_INET) {
1966 struct sockaddr_in *sin;
1968 sin = (struct sockaddr_in *)to;
1969 bzero(&sin6, sizeof(sin6));
1970 sin6.sin6_family = AF_INET6;
1971 sin6.sin6_len = sizeof(struct sockaddr_in6);
1972 sin6.sin6_addr.s6_addr16[2] = 0xffff;
1973 bcopy(&sin->sin_addr,
1974 &sin6.sin6_addr.s6_addr16[3],
1975 sizeof(sin6.sin6_addr.s6_addr16[3]));
1976 sin6.sin6_port = sin->sin_port;
1977 to = (struct sockaddr *)&sin6;
1980 /* check and strip embedded scope junk */
1981 to = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)to,
1983 if (((struct sockaddr_in *)to)->sin_port == 0) {
1984 kprintf("Huh c, port is %d not net:%p %d?\n",
1985 ((struct sockaddr_in *)to)->sin_port,
1987 (int)(ntohs(stcb->rport)));
1988 ((struct sockaddr_in *)to)->sin_port = stcb->rport;
1993 while (mmm->m_next != NULL) {
1996 mmm->m_flags |= M_EOR;
1997 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
1998 /* we have a new high score */
1999 asoc->highest_tsn_inside_map = tsn;
2000 #ifdef SCTP_MAP_LOGGING
2001 sctp_log_map(0, 1, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2004 SCTP_TCB_UNLOCK(stcb);
2005 SCTP_INP_WLOCK(stcb->sctp_ep);
2006 SCTP_TCB_LOCK(stcb);
2007 lwkt_gettoken(&stcb->sctp_socket->so_rcv.ssb_token);
2008 if (!sctp_sbappendaddr_nocheck(&stcb->sctp_socket->so_rcv, to, dmbuf,
2009 control, stcb->asoc.my_vtag, stcb->sctp_ep)) {
2011 sctp_m_freem(control);
2012 stcb->asoc.my_rwnd_control_len -=
2013 CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
2015 sctp_m_freem(dmbuf);
2016 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2017 goto failed_express_del;
2019 lwkt_reltoken(&stcb->sctp_socket->so_rcv.ssb_token);
2020 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0) {
2021 if (sctp_add_to_socket_q(stcb->sctp_ep, stcb)) {
2022 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2025 stcb->asoc.my_rwnd_control_len += sizeof(struct mbuf);
2027 SCTP_INP_WUNLOCK(stcb->sctp_ep);
2028 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2029 if ((ch->ch.chunk_flags & SCTP_DATA_UNORDERED) == 0) {
2031 /* for ordered, bump what we delivered */
2032 asoc->strmin[strmno].last_sequence_delivered++;
2034 sctp_pegs[SCTP_EXPRESS_ROUTE]++;
2035 #ifdef SCTP_STR_LOGGING
2036 sctp_log_strm_del_alt(tsn, strmseq,
2037 SCTP_STR_LOG_FROM_EXPRS_DEL);
2040 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
2041 kprintf("Express Delivery succeeds\n");
2044 goto finish_express_del;
2048 /* If we reach here this is a new chunk */
2049 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
2051 /* No memory so we drop the chunk */
2052 sctp_pegs[SCTP_DROP_NOMEMORY]++;
2053 if (last_chunk == 0) {
2054 /* we copied it, free the copy */
2055 sctp_m_freem(dmbuf);
2059 sctppcbinfo.ipi_count_chunk++;
2060 sctppcbinfo.ipi_gencnt_chunk++;
2061 chk->rec.data.TSN_seq = tsn;
2062 chk->rec.data.stream_seq = strmseq;
2063 chk->rec.data.stream_number = strmno;
2064 chk->rec.data.payloadtype = ch->dp.protocol_id;
2065 chk->rec.data.context = 0;
2066 chk->rec.data.doing_fast_retransmit = 0;
2067 chk->rec.data.rcv_flags = ch->ch.chunk_flags;
2069 chk->send_size = the_len;
2075 /* Mark it as received */
2076 /* Now queue it where it belongs */
2077 if ((chk->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
2078 SCTP_DATA_NOT_FRAG) {
2079 /* First a sanity check */
2080 if (asoc->fragmented_delivery_inprogress) {
2082 * Ok, we have a fragmented delivery in progress
2083 * if this chunk is next to deliver OR belongs in
2084 * our view to the reassembly, the peer is evil
2087 u_int32_t estimate_tsn;
2088 estimate_tsn = asoc->tsn_last_delivered + 1;
2089 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
2090 (estimate_tsn == chk->rec.data.TSN_seq)) {
2091 /* Evil/Broke peer */
2092 MGET(oper, MB_DONTWAIT, MT_DATA);
2094 struct sctp_paramhdr *ph;
2098 sizeof(struct sctp_paramhdr) +
2100 ph = mtod(oper, struct sctp_paramhdr *);
2102 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2103 ph->param_length = htons(oper->m_len);
2104 ippp = (u_int32_t *)(ph + 1);
2105 *ippp = htonl(0x20000002);
2107 sctp_abort_an_association(stcb->sctp_ep, stcb,
2108 SCTP_PEER_FAULTY, oper);
2111 sctp_pegs[SCTP_DROP_FRAG]++;
2114 if (sctp_does_chk_belong_to_reasm(asoc, chk)) {
2115 MGET(oper, MB_DONTWAIT, MT_DATA);
2117 struct sctp_paramhdr *ph;
2121 sizeof(struct sctp_paramhdr) +
2124 struct sctp_paramhdr *);
2126 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2129 ippp = (u_int32_t *)(ph + 1);
2130 *ippp = htonl(0x20000003);
2132 sctp_abort_an_association(stcb->sctp_ep,
2133 stcb, SCTP_PEER_FAULTY, oper);
2136 sctp_pegs[SCTP_DROP_FRAG]++;
2141 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
2143 * Reassembly queue is NOT empty
2144 * validate that this chk does not need to
2145 * be in reasembly queue. If it does then
2146 * our peer is broken or evil.
2148 if (sctp_does_chk_belong_to_reasm(asoc, chk)) {
2149 MGET(oper, MB_DONTWAIT, MT_DATA);
2151 struct sctp_paramhdr *ph;
2155 sizeof(struct sctp_paramhdr) +
2158 struct sctp_paramhdr *);
2160 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2163 ippp = (u_int32_t *)(ph + 1);
2164 *ippp = htonl(0x20000004);
2166 sctp_abort_an_association(stcb->sctp_ep,
2167 stcb, SCTP_PEER_FAULTY, oper);
2170 sctp_pegs[SCTP_DROP_FRAG]++;
2175 if (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
2176 /* queue directly into socket buffer */
2177 sctp_deliver_data(stcb, asoc, chk, 0);
2178 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2180 /* Special check for when streams are resetting.
2181 * We could be more smart about this and check the
2182 * actual stream to see if it is not being reset.. that
2183 * way we would not create a HOLB when amongst streams
2184 * being reset and those not being reset.
2186 * We take complete messages that have a stream reset
2187 * intervening (aka the TSN is after where our cum-ack needs
2188 * to be) off and put them on a pending_reply_queue. The
2189 * reassembly ones we do not have to worry about since
2190 * they are all sorted and proceessed by TSN order. It
2191 * is only the singletons I must worry about.
2193 if ((asoc->pending_reply) &&
2194 ((compare_with_wrap(tsn, ntohl(asoc->pending_reply->reset_at_tsn), MAX_TSN)) ||
2195 (tsn == ntohl(asoc->pending_reply->reset_at_tsn)))
2197 /* yep its past where we need to reset... go ahead and
2200 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue , chk, sctp_next);
2202 sctp_queue_data_to_stream(stcb, asoc, chk, abort_flag);
2206 /* Into the re-assembly queue */
2207 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2209 sctp_pegs[SCTP_DROP_FRAG]++;
2213 if (compare_with_wrap(tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
2214 /* we have a new high score */
2215 asoc->highest_tsn_inside_map = tsn;
2216 #ifdef SCTP_MAP_LOGGING
2217 sctp_log_map(0, 2, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2224 sctp_pegs[SCTP_PEG_TSNS_RCVD]++;
2225 /* Set it present please */
2226 #ifdef SCTP_STR_LOGGING
2227 sctp_log_strm_del_alt(tsn, strmseq, SCTP_STR_LOG_FROM_MARK_TSN);
2229 #ifdef SCTP_MAP_LOGGING
2230 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2231 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2233 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2238 sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag)
2241 * Now we also need to check the mapping array in a couple of ways.
2242 * 1) Did we move the cum-ack point?
2244 struct sctp_association *asoc;
2246 int m_size, all_ones;
2247 int slide_from, slide_end, lgap, distance;
2248 #ifdef SCTP_MAP_LOGGING
2249 uint32_t old_cumack, old_base, old_highest;
2250 unsigned char aux_array[64];
2256 #ifdef SCTP_MAP_LOGGING
2257 old_cumack = asoc->cumulative_tsn;
2258 old_base = asoc->mapping_array_base_tsn;
2259 old_highest = asoc->highest_tsn_inside_map;
2260 if (asoc->mapping_array_size < 64)
2261 memcpy(aux_array, asoc->mapping_array,
2262 asoc->mapping_array_size);
2264 memcpy(aux_array, asoc->mapping_array, 64);
2268 * We could probably improve this a small bit by calculating the
2269 * offset of the current cum-ack as the starting point.
2272 m_size = stcb->asoc.mapping_array_size << 3;
2273 for (i = 0; i < m_size; i++) {
2274 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i)) {
2276 * Ok we found the first place that we are
2281 asoc->cumulative_tsn = asoc->mapping_array_base_tsn +
2286 if (compare_with_wrap(asoc->cumulative_tsn,
2287 asoc->highest_tsn_inside_map,
2289 panic("huh, cumack greater than high-tsn in map");
2292 (asoc->cumulative_tsn == asoc->highest_tsn_inside_map && at >= 8)) {
2293 /* The complete array was completed by a single FR */
2294 /* higest becomes the cum-ack */
2296 asoc->cumulative_tsn = asoc->highest_tsn_inside_map;
2297 /* clear the array */
2299 clr = asoc->mapping_array_size;
2301 clr = (at >> 3) + 1;
2303 * this should be the allones case
2304 * but just in case :>
2306 if (clr > asoc->mapping_array_size)
2307 clr = asoc->mapping_array_size;
2309 memset(asoc->mapping_array, 0, clr);
2310 /* base becomes one ahead of the cum-ack */
2311 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2312 #ifdef SCTP_MAP_LOGGING
2313 sctp_log_map(old_base, old_cumack, old_highest,
2314 SCTP_MAP_PREPARE_SLIDE);
2315 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2316 asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_CLEARED);
2318 } else if (at >= 8) {
2319 /* we can slide the mapping array down */
2320 /* Calculate the new byte postion we can move down */
2321 slide_from = at >> 3;
2322 /* now calculate the ceiling of the move using our highest TSN value */
2323 if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
2324 lgap = asoc->highest_tsn_inside_map -
2325 asoc->mapping_array_base_tsn;
2327 lgap = (MAX_TSN - asoc->mapping_array_base_tsn) +
2328 asoc->highest_tsn_inside_map + 1;
2330 slide_end = lgap >> 3;
2331 if (slide_end < slide_from) {
2332 panic("impossible slide");
2334 distance = (slide_end-slide_from) + 1;
2335 #ifdef SCTP_MAP_LOGGING
2336 sctp_log_map(old_base, old_cumack, old_highest,
2337 SCTP_MAP_PREPARE_SLIDE);
2338 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2339 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2341 if (distance + slide_from > asoc->mapping_array_size ||
2344 if (sctp_debug_on & SCTP_DEBUG_INDATA1) {
2345 kprintf("Ugh bad addition.. you can't hrumpp!\n");
2349 * Here we do NOT slide forward the array so that
2350 * hopefully when more data comes in to fill it up
2351 * we will be able to slide it forward. Really
2352 * I don't think this should happen :-0
2355 #ifdef SCTP_MAP_LOGGING
2356 sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2357 (uint32_t)asoc->mapping_array_size,
2358 SCTP_MAP_SLIDE_NONE);
2362 for (ii = 0; ii < distance; ii++) {
2363 asoc->mapping_array[ii] =
2364 asoc->mapping_array[slide_from + ii];
2366 for (ii = distance;ii <= slide_end; ii++) {
2367 asoc->mapping_array[ii] = 0;
2369 asoc->mapping_array_base_tsn += (slide_from << 3);
2370 #ifdef SCTP_MAP_LOGGING
2371 sctp_log_map(asoc->mapping_array_base_tsn,
2372 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2373 SCTP_MAP_SLIDE_RESULT);
2378 /* check the special flag for stream resets */
2379 if ((asoc->pending_reply) &&
2380 ((compare_with_wrap((asoc->cumulative_tsn+1), ntohl(asoc->pending_reply->reset_at_tsn), MAX_TSN)) ||
2381 ((asoc->cumulative_tsn+1) == ntohl(asoc->pending_reply->reset_at_tsn)))
2383 /* we have finished working through the backlogged TSN's now
2384 * time to reset streams.
2385 * 1: call reset function.
2386 * 2: free pending_reply space
2387 * 3: distribute any chunks in pending_reply_queue.
2389 struct sctp_tmit_chunk *chk;
2390 sctp_handle_stream_reset_response(stcb, asoc->pending_reply);
2391 FREE(asoc->pending_reply, M_PCB);
2392 asoc->pending_reply = NULL;
2393 chk = TAILQ_FIRST(&asoc->pending_reply_queue);
2395 TAILQ_REMOVE(&asoc->pending_reply_queue, chk, sctp_next);
2396 sctp_queue_data_to_stream(stcb, asoc, chk, abort_flag);
2400 chk = TAILQ_FIRST(&asoc->pending_reply_queue);
2404 * Now we need to see if we need to queue a sack or just start
2405 * the timer (if allowed).
2408 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2410 * Ok special case, in SHUTDOWN-SENT case.
2411 * here we maker sure SACK timer is off and
2412 * instead send a SHUTDOWN and a SACK
2414 if (callout_pending(&stcb->asoc.dack_timer.timer)) {
2415 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2416 stcb->sctp_ep, stcb, NULL);
2419 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
2420 kprintf("%s:%d sends a shutdown\n",
2426 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
2427 sctp_send_sack(stcb);
2430 /* is there a gap now ? */
2431 is_a_gap = compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2432 stcb->asoc.cumulative_tsn, MAX_TSN);
2433 if ((stcb->asoc.first_ack_sent == 0) || /* First time we send a sack */
2434 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no longer is one */
2435 (stcb->asoc.numduptsns) || /* we have dup's */
2436 (is_a_gap) || /* is still a gap */
2437 (callout_pending(&stcb->asoc.dack_timer.timer)) /* timer was up . second packet */
2440 * Ok we must build a SACK since the timer
2441 * is pending, we got our first packet OR
2442 * there are gaps or duplicates.
2444 stcb->asoc.first_ack_sent = 1;
2445 sctp_send_sack(stcb);
2446 /* The sending will stop the timer */
2448 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2449 stcb->sctp_ep, stcb, NULL);
2456 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc, int hold_locks)
2458 struct sctp_tmit_chunk *chk;
2460 u_int16_t nxt_todel;
2463 if (asoc->fragmented_delivery_inprogress) {
2464 sctp_service_reassembly(stcb, asoc, hold_locks);
2466 /* Can we proceed further, i.e. the PD-API is complete */
2467 if (asoc->fragmented_delivery_inprogress) {
2473 * Yes, reassembly delivery no longer in progress see if we
2474 * have some on the sb hold queue.
2477 if (stcb->sctp_socket->so_rcv.ssb_cc >= stcb->sctp_socket->so_rcv.ssb_hiwat) {
2479 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2482 /* If deliver_data says no we must stop */
2483 if (sctp_deliver_data(stcb, asoc, NULL, hold_locks) == 0)
2486 chk = TAILQ_FIRST(&asoc->delivery_queue);
2489 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
2492 * Now is there some other chunk I can deliver
2493 * from the reassembly queue.
2495 chk = TAILQ_FIRST(&asoc->reasmqueue);
2497 asoc->size_on_reasm_queue = 0;
2498 asoc->cnt_on_reasm_queue = 0;
2501 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2502 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2503 ((nxt_todel == chk->rec.data.stream_seq) ||
2504 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2506 * Yep the first one is here. We setup to
2507 * start reception, by backing down the TSN
2508 * just in case we can't deliver.
2512 * Before we start though either all of the
2513 * message should be here or 1/4 the socket buffer
2514 * max or nothing on the delivery queue and something
2517 if (TAILQ_EMPTY(&asoc->delivery_queue) &&
2518 (sctp_is_all_msg_on_reasm(asoc, &tsize) ||
2519 (asoc->size_on_reasm_queue >=
2520 (stcb->sctp_socket->so_rcv.ssb_hiwat >> 2) && tsize))) {
2521 asoc->fragmented_delivery_inprogress = 1;
2522 asoc->tsn_last_delivered = chk->rec.data.TSN_seq-1;
2523 asoc->str_of_pdapi = chk->rec.data.stream_number;
2524 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2525 asoc->fragment_flags = chk->rec.data.rcv_flags;
2526 sctp_service_reassembly(stcb, asoc, hold_locks);
2532 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2533 struct sctphdr *sh, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2534 struct sctp_nets *net, u_int32_t *high_tsn)
2536 struct sctp_data_chunk *ch, chunk_buf;
2537 struct sctp_association *asoc;
2538 int num_chunks = 0; /* number of control chunks processed */
2539 int chk_length, break_flag, last_chunk;
2540 int abort_flag = 0, was_a_gap = 0;
2544 sctp_set_rwnd(stcb, &stcb->asoc);
2548 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map,
2549 stcb->asoc.cumulative_tsn, MAX_TSN)) {
2550 /* there was a gap before this data was processed */
2554 * setup where we got the last DATA packet from for
2555 * any SACK that may need to go out. Don't bump
2556 * the net. This is done ONLY when a chunk
2559 asoc->last_data_chunk_from = net;
2562 * Now before we proceed we must figure out if this
2563 * is a wasted cluster... i.e. it is a small packet
2564 * sent in and yet the driver underneath allocated a
2565 * full cluster for it. If so we must copy it to a
2566 * smaller mbuf and free up the cluster mbuf. This
2567 * will help with cluster starvation.
2569 if (m->m_len < (long)MHLEN && m->m_next == NULL) {
2570 /* we only handle mbufs that are singletons.. not chains */
2571 #ifdef __DragonFly__
2572 if ((*mm)->m_flags & M_PKTHDR)
2573 MGETHDR(m, MB_DONTWAIT, MT_HEADER);
2576 MGET(m, MB_DONTWAIT, MT_DATA);
2578 /* ok lets see if we can copy the data up */
2581 if ((*mm)->m_flags & M_PKTHDR) {
2582 /* got to copy the header first */
2584 M_COPY_PKTHDR(m, (*mm));
2586 M_MOVE_PKTHDR(m, (*mm));
2589 /* get the pointers and copy */
2590 to = mtod(m, caddr_t *);
2591 from = mtod((*mm), caddr_t *);
2592 memcpy(to, from, (*mm)->m_len);
2593 /* copy the length and free up the old */
2594 m->m_len = (*mm)->m_len;
2596 /* sucess, back copy */
2599 /* We are in trouble in the mbuf world .. yikes */
2603 /* get pointer to the first chunk header */
2604 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2605 sizeof(chunk_buf), (u_int8_t *)&chunk_buf);
2607 kprintf(" ... its short\n");
2611 * process all DATA chunks...
2615 if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
2616 kprintf("In process data off:%d length:%d iphlen:%d ch->type:%d\n",
2617 *offset, length, iphlen, (int)ch->ch.chunk_type);
2621 *high_tsn = asoc->cumulative_tsn;
2623 while (ch->ch.chunk_type == SCTP_DATA) {
2624 /* validate chunk length */
2625 chk_length = ntohs(ch->ch.chunk_length);
2626 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1 ||
2627 length - *offset < chk_length) {
2629 * Need to send an abort since we had a invalid
2632 struct mbuf *op_err;
2633 MGET(op_err, MB_DONTWAIT, MT_DATA);
2635 struct sctp_paramhdr *ph;
2638 op_err->m_len = sizeof(struct sctp_paramhdr) +
2640 ph = mtod(op_err, struct sctp_paramhdr *);
2642 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2643 ph->param_length = htons(op_err->m_len);
2644 ippp = (u_int32_t *)(ph + 1);
2645 *ippp = htonl(0x30000001);
2647 sctp_abort_association(inp, stcb, m, iphlen, sh,
2652 if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
2653 kprintf("A chunk of len:%d to process (tot:%d)\n",
2654 chk_length, length - *offset);
2658 #ifdef SCTP_AUDITING_ENABLED
2659 sctp_audit_log(0xB1, 0);
2661 if (SCTP_SIZE32(chk_length) == *offset - length) {
2666 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2667 chk_length, net, high_tsn, &abort_flag, &break_flag,
2671 if (sctp_debug_on & SCTP_DEBUG_INPUT1) {
2672 kprintf("Now incr num_chunks to %d\n",
2682 * Set because of out of rwnd space and no drop rep
2688 *offset += SCTP_SIZE32(chk_length);
2689 if (*offset >= length) {
2690 /* no more data left in the mbuf chain */
2693 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2694 sizeof(chunk_buf), (u_int8_t *)&chunk_buf);
2702 * we need to report rwnd overrun drops.
2704 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2708 * Did we get data, if so update the time for
2709 * auto-close and give peer credit for being
2712 sctp_pegs[SCTP_DATA_DG_RECV]++;
2713 stcb->asoc.overall_error_count = 0;
2714 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2716 /* now service all of the reassm queue and delivery queue */
2717 sctp_service_queues(stcb, asoc, 0);
2718 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2720 * Assure that we ack right away by making
2721 * sure that a d-ack timer is running. So the
2722 * sack_check will send a sack.
2724 sctp_timer_start(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb,
2727 /* Start a sack timer or QUEUE a SACK for sending */
2728 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
2736 sctp_handle_segments(struct sctp_tcb *stcb, struct sctp_association *asoc,
2737 struct sctp_sack_chunk *ch, u_long last_tsn, u_long *biggest_tsn_acked,
2738 u_long *biggest_newly_acked_tsn, int num_seg, int *ecn_seg_sums)
2740 /************************************************/
2741 /* process fragments and update sendqueue */
2742 /************************************************/
2743 struct sctp_sack *sack;
2744 struct sctp_gap_ack_block *frag;
2745 struct sctp_tmit_chunk *tp1;
2748 #ifdef SCTP_FR_LOGGING
2751 uint16_t frag_strt, frag_end, primary_flag_set;
2752 u_long last_frag_high;
2754 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
2755 primary_flag_set = 1;
2757 primary_flag_set = 0;
2761 frag = (struct sctp_gap_ack_block *)((caddr_t)sack +
2762 sizeof(struct sctp_sack));
2765 for (i = 0; i < num_seg; i++) {
2766 frag_strt = ntohs(frag->start);
2767 frag_end = ntohs(frag->end);
2768 /* some sanity checks on the fargment offsets */
2769 if (frag_strt > frag_end) {
2770 /* this one is malformed, skip */
2774 if (compare_with_wrap((frag_end+last_tsn), *biggest_tsn_acked,
2776 *biggest_tsn_acked = frag_end+last_tsn;
2778 /* mark acked dgs and find out the highestTSN being acked */
2780 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2782 /* save the locations of the last frags */
2783 last_frag_high = frag_end + last_tsn;
2786 * now lets see if we need to reset the queue
2787 * due to a out-of-order SACK fragment
2789 if (compare_with_wrap(frag_strt+last_tsn,
2790 last_frag_high, MAX_TSN)) {
2792 * if the new frag starts after the last TSN
2793 * frag covered, we are ok
2794 * and this one is beyond the last one
2799 * ok, they have reset us, so we need to reset
2800 * the queue this will cause extra hunting but
2801 * hey, they chose the performance
2802 * hit when they failed to order there gaps..
2804 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2806 last_frag_high = frag_end + last_tsn;
2808 for (j = frag_strt + last_tsn; j <= frag_end + last_tsn; j++) {
2810 #ifdef SCTP_FR_LOGGING
2811 if (tp1->rec.data.doing_fast_retransmit)
2815 if (tp1->rec.data.TSN_seq == j) {
2816 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2817 /* must be held until cum-ack passes */
2818 /* ECN Nonce: Add the nonce value to the sender's nonce sum */
2819 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
2821 * If it is less than
2823 * no-longer in flight.
2825 * already be set via
2828 * i.e. ACKED or MARKED.
2830 if (compare_with_wrap(tp1->rec.data.TSN_seq,
2831 *biggest_newly_acked_tsn,
2833 *biggest_newly_acked_tsn =
2834 tp1->rec.data.TSN_seq;
2836 tp1->whoTo->flight_size -= tp1->book_size;
2837 if (tp1->whoTo->flight_size < 0) {
2838 tp1->whoTo->flight_size = 0;
2840 asoc->total_flight -=
2843 if (asoc->total_flight < 0) {
2844 asoc->total_flight = 0;
2847 asoc->total_flight_count--;
2848 if (asoc->total_flight_count < 0) {
2849 asoc->total_flight_count = 0;
2852 if (tp1->snd_count < 2) {
2853 /* True non-retransmited chunk */
2854 tp1->whoTo->net_ack2 +=
2857 /* update RTO too? */
2860 sctp_calculate_rto(stcb,
2863 &tp1->sent_rcv_time);
2864 tp1->whoTo->rto_pending = 0;
2869 if (tp1->sent <= SCTP_DATAGRAM_RESEND &&
2870 tp1->sent != SCTP_DATAGRAM_UNSENT &&
2871 compare_with_wrap(tp1->rec.data.TSN_seq,
2872 asoc->this_sack_highest_gap,
2874 asoc->this_sack_highest_gap =
2875 tp1->rec.data.TSN_seq;
2876 if (primary_flag_set) {
2877 tp1->whoTo->cacc_saw_newack = 1;
2880 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2883 SCTP_DEBUG_INDATA3) {
2884 kprintf("Hmm. one that is in RESEND that is now ACKED\n");
2887 asoc->sent_queue_retran_cnt--;
2888 #ifdef SCTP_AUDITING_ENABLED
2889 sctp_audit_log(0xB2,
2890 (asoc->sent_queue_retran_cnt & 0x000000ff));
2893 if (asoc->sent_queue_retran_cnt < 0) {
2894 kprintf("huh3 retran went negative?\n");
2895 #ifdef SCTP_AUDITING_ENABLED
2900 asoc->sent_queue_retran_cnt = 0;
2904 (*ecn_seg_sums) += tp1->rec.data.ect_nonce;
2905 (*ecn_seg_sums) &= SCTP_SACK_NONCE_SUM;
2906 tp1->sent = SCTP_DATAGRAM_MARKED;
2909 } /* if (tp1->TSN_seq == j) */
2910 if (compare_with_wrap(tp1->rec.data.TSN_seq, j,
2913 tp1 = TAILQ_NEXT(tp1, sctp_next);
2914 }/* end while (tp1) */
2915 } /* end for (j = fragStart */
2916 frag++; /* next one */
2918 #ifdef SCTP_FR_LOGGING
2920 sctp_log_fr(*biggest_tsn_acked, *biggest_newly_acked_tsn,
2921 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
2926 sctp_check_for_revoked(struct sctp_association *asoc, u_long cum_ack,
2927 u_long biggest_tsn_acked)
2929 struct sctp_tmit_chunk *tp1;
2932 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2934 if (compare_with_wrap(tp1->rec.data.TSN_seq, cum_ack,
2937 * ok this guy is either ACK or MARKED. If it is ACKED
2938 * it has been previously acked but not this time i.e.
2939 * revoked. If it is MARKED it was ACK'ed again.
2941 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
2942 /* it has been revoked */
2944 * We do NOT add back to flight size here since
2945 * it is really NOT in flight. Resend (when/if
2946 * it occurs will add to flight size
2948 tp1->sent = SCTP_DATAGRAM_SENT;
2950 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
2951 /* it has been re-acked in this SACK */
2952 tp1->sent = SCTP_DATAGRAM_ACKED;
2955 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
2957 /* above the sack */
2960 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
2962 tp1 = TAILQ_NEXT(tp1, sctp_next);
2964 if (tot_revoked > 0) {
2965 /* Setup the ecn nonce re-sync point. We
2966 * do this since once data is revoked
2967 * we begin to retransmit things, which
2968 * do NOT have the ECN bits set. This means
2969 * we are now out of sync and must wait until
2970 * we get back in sync with the peer to
2973 tp1 = TAILQ_FIRST(&asoc->send_queue);
2975 asoc->nonce_resync_tsn = asoc->sending_seq;
2977 asoc->nonce_resync_tsn = tp1->rec.data.TSN_seq;
2979 asoc->nonce_wait_for_ecne = 0;
2980 asoc->nonce_sum_check = 0;
2985 extern int sctp_peer_chunk_oh;
2988 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
2989 u_long biggest_tsn_acked, int strike_enabled,
2990 u_long biggest_tsn_newly_acked, int accum_moved)
2992 struct sctp_tmit_chunk *tp1;
2996 u_int32_t sending_seq;
2997 int primary_switch_active = 0;
2998 int double_switch_active = 0;
3000 /* select the sending_seq, this is
3001 * either the next thing ready to
3002 * be sent but not transmitted, OR,
3003 * the next seq we assign.
3005 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3007 sending_seq = asoc->sending_seq;
3009 sending_seq = tp1->rec.data.TSN_seq;
3012 if (asoc->primary_destination->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3013 primary_switch_active = 1;
3015 if (asoc->primary_destination->dest_state & SCTP_ADDR_DOUBLE_SWITCH) {
3016 double_switch_active = 1;
3018 if (stcb->asoc.peer_supports_prsctp ) {
3019 SCTP_GETTIME_TIMEVAL(&now);
3021 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3024 if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
3026 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3030 if ((tp1->flags & (SCTP_PR_SCTP_ENABLED|SCTP_PR_SCTP_BUFFER)) ==
3031 SCTP_PR_SCTP_ENABLED &&
3032 tp1->sent < SCTP_DATAGRAM_ACKED) {
3033 /* Is it expired? */
3035 if (timercmp(&now, &tp1->rec.data.timetodrop, >))
3037 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >))
3040 /* Yes so drop it */
3041 if (tp1->data != NULL) {
3042 sctp_release_pr_sctp_chunk(stcb, tp1,
3043 (SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT),
3046 tp1 = TAILQ_NEXT(tp1, sctp_next);
3051 if (compare_with_wrap(tp1->rec.data.TSN_seq,
3052 asoc->this_sack_highest_gap, MAX_TSN)) {
3053 /* we are beyond the tsn in the sack */
3056 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3057 /* either a RESEND, ACKED, or MARKED */
3059 tp1 = TAILQ_NEXT(tp1, sctp_next);
3062 if (primary_switch_active && (strike_enabled == 0)) {
3063 if (tp1->whoTo != asoc->primary_destination) {
3065 * We can only strike things on the primary if
3066 * the strike_enabled flag is clear
3068 tp1 = TAILQ_NEXT(tp1, sctp_next);
3071 } else if (primary_switch_active) {
3072 if (tp1->whoTo->cacc_saw_newack == 0) {
3074 * Only one was received but it was NOT
3077 tp1 = TAILQ_NEXT(tp1, sctp_next);
3081 if (double_switch_active &&
3082 (compare_with_wrap(asoc->primary_destination->next_tsn_at_change,
3083 tp1->rec.data.TSN_seq, MAX_TSN))) {
3085 * With a double switch we do NOT mark unless we
3086 * are beyond the switch point.
3088 tp1 = TAILQ_NEXT(tp1, sctp_next);
3092 * Here we check to see if we were have already done a FR
3093 * and if so we see if the biggest TSN we saw in the sack is
3094 * smaller than the recovery point. If so we don't strike the
3095 * tsn... otherwise we CAN strike the TSN.
3097 if (accum_moved && asoc->fast_retran_loss_recovery) {
3099 * Strike the TSN if in fast-recovery and
3103 } else if (tp1->rec.data.doing_fast_retransmit) {
3105 * For those that have done a FR we must
3106 * take special consideration if we strike. I.e
3107 * the biggest_newly_acked must be higher
3108 * than the sending_seq at the time we did
3111 #ifdef SCTP_FR_TO_ALTERNATE
3113 * If FR's go to new networks, then we
3114 * must only do this for singly homed asoc's. However
3115 * if the FR's go to the same network (Armando's work)
3116 * then its ok to FR multiple times.
3118 if (asoc->numnets < 2)
3123 if ((compare_with_wrap(biggest_tsn_newly_acked,
3124 tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||