2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <netinet/sctp_os.h>
40 #include <netinet/sctp_var.h>
41 #include <netinet/sctp_sysctl.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctp_pcb.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_indata.h>
51 #include <netinet/sctp_bsd_addr.h>
52 #include <netinet/sctp_input.h>
53 #include <netinet/sctp_crc32.h>
54 #include <netinet/sctp_lock_bsd.h>
56 * NOTES: On the outbound side of things I need to check the sack timer to
57 * see if I should generate a sack into the chunk queue (if I have data to
58 * send that is and will be sending it .. for bundling.
60 * The callback in sctp_usrreq.c will get called when the socket is read from.
61 * This will cause sctp_service_queues() to get called on the top entry in
65 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
66 struct sctp_stream_in *strm,
67 struct sctp_tcb *stcb,
68 struct sctp_association *asoc,
69 struct sctp_tmit_chunk *chk, int hold_rlock);
72 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
74 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
77 /* Calculate what the rwnd would be */
79 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
84 * This is really set wrong with respect to a 1-2-m socket. Since
85 * the sb_cc is the count that everyone as put up. When we re-write
86 * sctp_soreceive then we will fix this so that ONLY this
87 * associations data is taken into account.
89 if (stcb->sctp_socket == NULL) {
93 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0,
94 ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue));
95 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0,
96 ("size_on_all_streams is %u", asoc->size_on_all_streams));
97 if (stcb->asoc.sb_cc == 0 &&
98 asoc->cnt_on_reasm_queue == 0 &&
99 asoc->cnt_on_all_streams == 0) {
100 /* Full rwnd granted */
101 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
104 /* get actual space */
105 calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
107 * take out what has NOT been put on socket queue and we yet hold
110 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue +
111 asoc->cnt_on_reasm_queue * MSIZE));
112 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams +
113 asoc->cnt_on_all_streams * MSIZE));
119 /* what is the overhead of all these rwnd's */
120 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
122 * If the window gets too small due to ctrl-stuff, reduce it to 1,
123 * even it is 0. SWS engaged
125 if (calc < stcb->asoc.my_rwnd_control_len) {
132 * Build out our readq entry based on the incoming packet.
134 struct sctp_queued_to_read *
135 sctp_build_readq_entry(struct sctp_tcb *stcb,
136 struct sctp_nets *net,
137 uint32_t tsn, uint32_t ppid,
138 uint32_t context, uint16_t sid,
139 uint32_t mid, uint8_t flags,
142 struct sctp_queued_to_read *read_queue_e = NULL;
144 sctp_alloc_a_readq(stcb, read_queue_e);
145 if (read_queue_e == NULL) {
148 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read));
149 read_queue_e->sinfo_stream = sid;
150 read_queue_e->sinfo_flags = (flags << 8);
151 read_queue_e->sinfo_ppid = ppid;
152 read_queue_e->sinfo_context = context;
153 read_queue_e->sinfo_tsn = tsn;
154 read_queue_e->sinfo_cumtsn = tsn;
155 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
156 read_queue_e->mid = mid;
157 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff;
158 TAILQ_INIT(&read_queue_e->reasm);
159 read_queue_e->whoFrom = net;
160 atomic_add_int(&net->ref_count, 1);
161 read_queue_e->data = dm;
162 read_queue_e->stcb = stcb;
163 read_queue_e->port_from = stcb->rport;
164 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
165 read_queue_e->do_not_ref_stcb = 1;
168 return (read_queue_e);
172 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
174 struct sctp_extrcvinfo *seinfo;
175 struct sctp_sndrcvinfo *outinfo;
176 struct sctp_rcvinfo *rcvinfo;
177 struct sctp_nxtinfo *nxtinfo;
184 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
185 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
186 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
187 /* user does not want any ancillary data */
192 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
193 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
195 seinfo = (struct sctp_extrcvinfo *)sinfo;
196 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
197 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
199 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
203 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
204 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
206 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
209 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
215 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
220 SCTP_BUF_LEN(ret) = 0;
222 /* We need a CMSG header followed by the struct */
223 cmh = mtod(ret, struct cmsghdr *);
225 * Make sure that there is no un-initialized padding between the
226 * cmsg header and cmsg data and after the cmsg data.
229 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
230 cmh->cmsg_level = IPPROTO_SCTP;
231 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
232 cmh->cmsg_type = SCTP_RCVINFO;
233 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
234 rcvinfo->rcv_sid = sinfo->sinfo_stream;
235 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
236 rcvinfo->rcv_flags = sinfo->sinfo_flags;
237 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
238 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
239 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
240 rcvinfo->rcv_context = sinfo->sinfo_context;
241 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
242 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
243 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
246 cmh->cmsg_level = IPPROTO_SCTP;
247 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
248 cmh->cmsg_type = SCTP_NXTINFO;
249 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
250 nxtinfo->nxt_sid = seinfo->serinfo_next_stream;
251 nxtinfo->nxt_flags = 0;
252 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
253 nxtinfo->nxt_flags |= SCTP_UNORDERED;
255 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
256 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
258 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
259 nxtinfo->nxt_flags |= SCTP_COMPLETE;
261 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid;
262 nxtinfo->nxt_length = seinfo->serinfo_next_length;
263 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid;
264 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
265 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
267 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
268 cmh->cmsg_level = IPPROTO_SCTP;
269 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
271 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
272 cmh->cmsg_type = SCTP_EXTRCV;
273 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
274 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
276 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
277 cmh->cmsg_type = SCTP_SNDRCV;
279 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
286 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
291 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
294 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
296 * This tsn is behind the cum ack and thus we don't need to
297 * worry about it being moved from one to the other.
301 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
302 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap);
303 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap);
304 KASSERT(in_r || in_nr, ("%s: Things are really messed up now", __func__));
306 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
307 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
308 asoc->highest_tsn_inside_nr_map = tsn;
312 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
313 if (tsn == asoc->highest_tsn_inside_map) {
314 /* We must back down to see what the new highest is. */
315 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
316 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
317 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
318 asoc->highest_tsn_inside_map = i;
322 if (!SCTP_TSN_GE(i, asoc->mapping_array_base_tsn)) {
323 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
330 sctp_place_control_in_stream(struct sctp_stream_in *strm,
331 struct sctp_association *asoc,
332 struct sctp_queued_to_read *control)
334 struct sctp_queued_to_read *at;
335 struct sctp_readhead *q;
336 uint8_t flags, unordered;
338 flags = (control->sinfo_flags >> 8);
339 unordered = flags & SCTP_DATA_UNORDERED;
341 q = &strm->uno_inqueue;
342 if (asoc->idata_supported == 0) {
343 if (!TAILQ_EMPTY(q)) {
345 * Only one stream can be here in old style
350 TAILQ_INSERT_TAIL(q, control, next_instrm);
351 control->on_strm_q = SCTP_ON_UNORDERED;
357 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
358 control->end_added = 1;
359 control->first_frag_seen = 1;
360 control->last_frag_seen = 1;
362 if (TAILQ_EMPTY(q)) {
364 TAILQ_INSERT_HEAD(q, control, next_instrm);
366 control->on_strm_q = SCTP_ON_UNORDERED;
368 control->on_strm_q = SCTP_ON_ORDERED;
372 TAILQ_FOREACH(at, q, next_instrm) {
373 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) {
375 * one in queue is bigger than the new one,
376 * insert before this one
378 TAILQ_INSERT_BEFORE(at, control, next_instrm);
380 control->on_strm_q = SCTP_ON_UNORDERED;
382 control->on_strm_q = SCTP_ON_ORDERED;
385 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) {
387 * Gak, He sent me a duplicate msg id
388 * number?? return -1 to abort.
392 if (TAILQ_NEXT(at, next_instrm) == NULL) {
394 * We are at the end, insert it
397 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
398 sctp_log_strm_del(control, at,
399 SCTP_STR_LOG_FROM_INSERT_TL);
401 TAILQ_INSERT_AFTER(q, at, control, next_instrm);
403 control->on_strm_q = SCTP_ON_UNORDERED;
405 control->on_strm_q = SCTP_ON_ORDERED;
416 sctp_abort_in_reasm(struct sctp_tcb *stcb,
417 struct sctp_queued_to_read *control,
418 struct sctp_tmit_chunk *chk,
419 int *abort_flag, int opspot)
421 char msg[SCTP_DIAG_INFO_LEN];
424 if (stcb->asoc.idata_supported) {
425 SCTP_SNPRINTF(msg, sizeof(msg),
426 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x",
428 control->fsn_included,
431 chk->rec.data.fsn, chk->rec.data.mid);
433 SCTP_SNPRINTF(msg, sizeof(msg),
434 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x",
436 control->fsn_included,
440 (uint16_t)chk->rec.data.mid);
442 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
443 sctp_m_freem(chk->data);
445 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
446 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
447 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, false, SCTP_SO_NOT_LOCKED);
452 sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control)
455 * The control could not be placed and must be cleaned.
457 struct sctp_tmit_chunk *chk, *nchk;
459 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
460 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
462 sctp_m_freem(chk->data);
464 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
466 sctp_free_remote_addr(control->whoFrom);
468 sctp_m_freem(control->data);
469 control->data = NULL;
471 sctp_free_a_readq(stcb, control);
475 * Queue the chunk either right into the socket buffer if it is the next one
476 * to go OR put it in the correct place in the delivery queue. If we do
477 * append to the so_buf, keep doing so until we are out of order as
478 * long as the control's entered are non-fragmented.
481 sctp_queue_data_to_stream(struct sctp_tcb *stcb,
482 struct sctp_association *asoc,
483 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm)
486 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
487 * all the data in one stream this could happen quite rapidly. One
488 * could use the TSN to keep track of things, but this scheme breaks
489 * down in the other type of stream usage that could occur. Send a
490 * single msg to stream 0, send 4Billion messages to stream 1, now
491 * send a message to stream 0. You have a situation where the TSN
492 * has wrapped but not in the stream. Is this worth worrying about
493 * or should we just change our queue sort at the bottom to be by
496 * Could it also be legal for a peer to send ssn 1 with TSN 2 and
497 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN
498 * assignment this could happen... and I don't see how this would be
499 * a violation. So for now I am undecided an will leave the sort by
500 * SSN alone. Maybe a hybrid approach is the answer
503 struct sctp_queued_to_read *at;
507 struct sctp_stream_in *strm;
508 char msg[SCTP_DIAG_INFO_LEN];
510 strm = &asoc->strmin[control->sinfo_stream];
511 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
512 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
514 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) {
515 /* The incoming sseq is behind where we last delivered? */
516 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n",
517 strm->last_mid_delivered, control->mid);
519 * throw it in the stream so it gets cleaned up in
520 * association destruction
522 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm);
523 if (asoc->idata_supported) {
524 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
525 strm->last_mid_delivered, control->sinfo_tsn,
526 control->sinfo_stream, control->mid);
528 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
529 (uint16_t)strm->last_mid_delivered,
531 control->sinfo_stream,
532 (uint16_t)control->mid);
534 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
535 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
536 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
541 asoc->size_on_all_streams += control->length;
542 sctp_ucount_incr(asoc->cnt_on_all_streams);
543 nxt_todel = strm->last_mid_delivered + 1;
544 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
545 /* can be delivered right away? */
546 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
547 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
549 /* EY it wont be queued if it could be delivered directly */
551 if (asoc->size_on_all_streams >= control->length) {
552 asoc->size_on_all_streams -= control->length;
555 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
557 asoc->size_on_all_streams = 0;
560 sctp_ucount_decr(asoc->cnt_on_all_streams);
561 strm->last_mid_delivered++;
562 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
563 sctp_add_to_readq(stcb->sctp_ep, stcb,
565 &stcb->sctp_socket->so_rcv, 1,
566 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
567 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) {
569 nxt_todel = strm->last_mid_delivered + 1;
570 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) &&
571 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) {
572 if (control->on_strm_q == SCTP_ON_ORDERED) {
573 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
574 if (asoc->size_on_all_streams >= control->length) {
575 asoc->size_on_all_streams -= control->length;
578 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
580 asoc->size_on_all_streams = 0;
583 sctp_ucount_decr(asoc->cnt_on_all_streams);
586 panic("Huh control: %p is on_strm_q: %d",
587 control, control->on_strm_q);
590 control->on_strm_q = 0;
591 strm->last_mid_delivered++;
593 * We ignore the return of deliver_data here
594 * since we always can hold the chunk on the
595 * d-queue. And we have a finite number that
596 * can be delivered from the strq.
598 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
599 sctp_log_strm_del(control, NULL,
600 SCTP_STR_LOG_FROM_IMMED_DEL);
602 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
603 sctp_add_to_readq(stcb->sctp_ep, stcb,
605 &stcb->sctp_socket->so_rcv, 1,
606 SCTP_READ_LOCK_NOT_HELD,
609 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) {
617 * Ok, we did not deliver this guy, find the correct place
618 * to put it on the queue.
620 if (sctp_place_control_in_stream(strm, asoc, control)) {
621 SCTP_SNPRINTF(msg, sizeof(msg),
622 "Queue to str MID: %u duplicate", control->mid);
623 sctp_clean_up_control(stcb, control);
624 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
625 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
626 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
633 sctp_setup_tail_pointer(struct sctp_queued_to_read *control)
635 struct mbuf *m, *prev = NULL;
636 struct sctp_tcb *stcb;
638 stcb = control->stcb;
639 control->held_length = 0;
643 if (SCTP_BUF_LEN(m) == 0) {
644 /* Skip mbufs with NO length */
647 control->data = sctp_m_free(m);
650 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
651 m = SCTP_BUF_NEXT(prev);
654 control->tail_mbuf = prev;
659 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
660 if (control->on_read_q) {
662 * On read queue so we must increment the SB stuff,
663 * we assume caller has done any locks of SB.
665 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
667 m = SCTP_BUF_NEXT(m);
670 control->tail_mbuf = prev;
675 sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added)
677 struct mbuf *prev = NULL;
678 struct sctp_tcb *stcb;
680 stcb = control->stcb;
683 panic("Control broken");
688 if (control->tail_mbuf == NULL) {
690 sctp_m_freem(control->data);
692 sctp_setup_tail_pointer(control);
695 control->tail_mbuf->m_next = m;
697 if (SCTP_BUF_LEN(m) == 0) {
698 /* Skip mbufs with NO length */
701 control->tail_mbuf->m_next = sctp_m_free(m);
702 m = control->tail_mbuf->m_next;
704 SCTP_BUF_NEXT(prev) = sctp_m_free(m);
705 m = SCTP_BUF_NEXT(prev);
708 control->tail_mbuf = prev;
713 if (control->on_read_q) {
715 * On read queue so we must increment the SB stuff,
716 * we assume caller has done any locks of SB.
718 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m);
720 *added += SCTP_BUF_LEN(m);
721 atomic_add_int(&control->length, SCTP_BUF_LEN(m));
722 m = SCTP_BUF_NEXT(m);
725 control->tail_mbuf = prev;
730 sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control)
732 memset(nc, 0, sizeof(struct sctp_queued_to_read));
733 nc->sinfo_stream = control->sinfo_stream;
734 nc->mid = control->mid;
735 TAILQ_INIT(&nc->reasm);
736 nc->top_fsn = control->top_fsn;
737 nc->mid = control->mid;
738 nc->sinfo_flags = control->sinfo_flags;
739 nc->sinfo_ppid = control->sinfo_ppid;
740 nc->sinfo_context = control->sinfo_context;
741 nc->fsn_included = 0xffffffff;
742 nc->sinfo_tsn = control->sinfo_tsn;
743 nc->sinfo_cumtsn = control->sinfo_cumtsn;
744 nc->sinfo_assoc_id = control->sinfo_assoc_id;
745 nc->whoFrom = control->whoFrom;
746 atomic_add_int(&nc->whoFrom->ref_count, 1);
747 nc->stcb = control->stcb;
748 nc->port_from = control->port_from;
749 nc->do_not_ref_stcb = control->do_not_ref_stcb;
753 sctp_reset_a_control(struct sctp_queued_to_read *control,
754 struct sctp_inpcb *inp, uint32_t tsn)
756 control->fsn_included = tsn;
757 if (control->on_read_q) {
759 * We have to purge it from there, hopefully this will work
762 TAILQ_REMOVE(&inp->read_queue, control, next);
763 control->on_read_q = 0;
768 sctp_handle_old_unordered_data(struct sctp_tcb *stcb,
769 struct sctp_association *asoc,
770 struct sctp_stream_in *strm,
771 struct sctp_queued_to_read *control,
773 int inp_read_lock_held)
776 * Special handling for the old un-ordered data chunk. All the
777 * chunks/TSN's go to mid 0. So we have to do the old style watching
778 * to see if we have it all. If you return one, no other control
779 * entries on the un-ordered queue will be looked at. In theory
780 * there should be no others entries in reality, unless the guy is
781 * sending both unordered NDATA and unordered DATA...
783 struct sctp_tmit_chunk *chk, *lchk, *tchk;
785 struct sctp_queued_to_read *nc;
788 if (control->first_frag_seen == 0) {
789 /* Nothing we can do, we have not seen the first piece yet */
792 /* Collapse any we can */
795 fsn = control->fsn_included + 1;
796 /* Now what can we add? */
797 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) {
798 if (chk->rec.data.fsn == fsn) {
800 sctp_alloc_a_readq(stcb, nc);
804 memset(nc, 0, sizeof(struct sctp_queued_to_read));
805 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
806 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, inp_read_lock_held);
810 if (control->end_added) {
812 if (!TAILQ_EMPTY(&control->reasm)) {
814 * Ok we have to move anything left
815 * on the control queue to a new
818 sctp_build_readq_entry_from_ctl(nc, control);
819 tchk = TAILQ_FIRST(&control->reasm);
820 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
821 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
822 if (asoc->size_on_reasm_queue >= tchk->send_size) {
823 asoc->size_on_reasm_queue -= tchk->send_size;
826 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size);
828 asoc->size_on_reasm_queue = 0;
831 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
832 nc->first_frag_seen = 1;
833 nc->fsn_included = tchk->rec.data.fsn;
834 nc->data = tchk->data;
835 nc->sinfo_ppid = tchk->rec.data.ppid;
836 nc->sinfo_tsn = tchk->rec.data.tsn;
837 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn);
839 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED);
840 sctp_setup_tail_pointer(nc);
841 tchk = TAILQ_FIRST(&control->reasm);
843 /* Spin the rest onto the queue */
845 TAILQ_REMOVE(&control->reasm, tchk, sctp_next);
846 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next);
847 tchk = TAILQ_FIRST(&control->reasm);
850 * Now lets add it to the queue
851 * after removing control
853 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm);
854 nc->on_strm_q = SCTP_ON_UNORDERED;
855 if (control->on_strm_q) {
856 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
857 control->on_strm_q = 0;
860 if (control->pdapi_started) {
861 strm->pd_api_started = 0;
862 control->pdapi_started = 0;
864 if (control->on_strm_q) {
865 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
866 control->on_strm_q = 0;
867 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
869 if (control->on_read_q == 0) {
870 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
871 &stcb->sctp_socket->so_rcv, control->end_added,
872 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
874 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
875 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) {
877 * Switch to the new guy and
883 if (nc->on_strm_q == 0) {
884 sctp_free_a_readq(stcb, nc);
889 sctp_free_a_readq(stcb, nc);
896 if (cnt_added && strm->pd_api_started) {
897 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
899 if ((control->length > pd_point) && (strm->pd_api_started == 0)) {
900 strm->pd_api_started = 1;
901 control->pdapi_started = 1;
902 sctp_add_to_readq(stcb->sctp_ep, stcb, control,
903 &stcb->sctp_socket->so_rcv, control->end_added,
904 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
905 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
913 sctp_inject_old_unordered_data(struct sctp_tcb *stcb,
914 struct sctp_association *asoc,
915 struct sctp_queued_to_read *control,
916 struct sctp_tmit_chunk *chk,
919 struct sctp_tmit_chunk *at;
923 * Here we need to place the chunk into the control structure sorted
924 * in the correct order.
926 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
927 /* Its the very first one. */
928 SCTPDBG(SCTP_DEBUG_XXX,
929 "chunk is a first fsn: %u becomes fsn_included\n",
931 at = TAILQ_FIRST(&control->reasm);
932 if (at && SCTP_TSN_GT(chk->rec.data.fsn, at->rec.data.fsn)) {
934 * The first chunk in the reassembly is a smaller
935 * TSN than this one, even though this has a first,
936 * it must be from a subsequent msg.
940 if (control->first_frag_seen) {
942 * In old un-ordered we can reassembly on one
943 * control multiple messages. As long as the next
944 * FIRST is greater then the old first (TSN i.e. FSN
950 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) {
952 * Easy way the start of a new guy beyond
957 if ((chk->rec.data.fsn == control->fsn_included) ||
958 (control->pdapi_started)) {
960 * Ok this should not happen, if it does we
961 * started the pd-api on the higher TSN
962 * (since the equals part is a TSN failure
965 * We are completely hosed in that case
966 * since I have no way to recover. This
967 * really will only happen if we can get
968 * more TSN's higher before the
971 sctp_abort_in_reasm(stcb, control, chk,
973 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
978 * Ok we have two firsts and the one we just got is
979 * smaller than the one we previously placed.. yuck!
980 * We must swap them out.
983 tdata = control->data;
984 control->data = chk->data;
986 /* Save the lengths */
987 chk->send_size = control->length;
988 /* Recompute length of control and tail pointer */
989 sctp_setup_tail_pointer(control);
990 /* Fix the FSN included */
991 tmp = control->fsn_included;
992 control->fsn_included = chk->rec.data.fsn;
993 chk->rec.data.fsn = tmp;
994 /* Fix the TSN included */
995 tmp = control->sinfo_tsn;
996 control->sinfo_tsn = chk->rec.data.tsn;
997 chk->rec.data.tsn = tmp;
998 /* Fix the PPID included */
999 tmp = control->sinfo_ppid;
1000 control->sinfo_ppid = chk->rec.data.ppid;
1001 chk->rec.data.ppid = tmp;
1002 /* Fix tail pointer */
1005 control->first_frag_seen = 1;
1006 control->fsn_included = chk->rec.data.fsn;
1007 control->top_fsn = chk->rec.data.fsn;
1008 control->sinfo_tsn = chk->rec.data.tsn;
1009 control->sinfo_ppid = chk->rec.data.ppid;
1010 control->data = chk->data;
1011 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1013 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1014 sctp_setup_tail_pointer(control);
1019 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1020 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1022 * This one in queue is bigger than the new one,
1023 * insert the new one before at.
1025 asoc->size_on_reasm_queue += chk->send_size;
1026 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1028 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1030 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1032 * They sent a duplicate fsn number. This really
1033 * should not happen since the FSN is a TSN and it
1034 * should have been dropped earlier.
1036 sctp_abort_in_reasm(stcb, control, chk,
1038 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
1042 if (inserted == 0) {
1043 /* Its at the end */
1044 asoc->size_on_reasm_queue += chk->send_size;
1045 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1046 control->top_fsn = chk->rec.data.fsn;
1047 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1052 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc,
1053 struct sctp_stream_in *strm, int inp_read_lock_held)
1056 * Given a stream, strm, see if any of the SSN's on it that are
1057 * fragmented are ready to deliver. If so go ahead and place them on
1058 * the read queue. In so placing if we have hit the end, then we
1059 * need to remove them from the stream's queue.
1061 struct sctp_queued_to_read *control, *nctl = NULL;
1062 uint32_t next_to_del;
1066 if (stcb->sctp_socket) {
1067 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
1068 stcb->sctp_ep->partial_delivery_point);
1070 pd_point = stcb->sctp_ep->partial_delivery_point;
1072 control = TAILQ_FIRST(&strm->uno_inqueue);
1074 if ((control != NULL) &&
1075 (asoc->idata_supported == 0)) {
1076 /* Special handling needed for "old" data format */
1077 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) {
1081 if (strm->pd_api_started) {
1082 /* Can't add more */
1086 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n",
1087 control, control->end_added, control->mid, control->top_fsn, control->fsn_included);
1088 nctl = TAILQ_NEXT(control, next_instrm);
1089 if (control->end_added) {
1090 /* We just put the last bit on */
1091 if (control->on_strm_q) {
1093 if (control->on_strm_q != SCTP_ON_UNORDERED) {
1094 panic("Huh control: %p on_q: %d -- not unordered?",
1095 control, control->on_strm_q);
1098 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1099 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1100 if (asoc->size_on_all_streams >= control->length) {
1101 asoc->size_on_all_streams -= control->length;
1104 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1106 asoc->size_on_all_streams = 0;
1109 sctp_ucount_decr(asoc->cnt_on_all_streams);
1110 control->on_strm_q = 0;
1112 if (control->on_read_q == 0) {
1113 sctp_add_to_readq(stcb->sctp_ep, stcb,
1115 &stcb->sctp_socket->so_rcv, control->end_added,
1116 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1119 /* Can we do a PD-API for this un-ordered guy? */
1120 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) {
1121 strm->pd_api_started = 1;
1122 control->pdapi_started = 1;
1123 sctp_add_to_readq(stcb->sctp_ep, stcb,
1125 &stcb->sctp_socket->so_rcv, control->end_added,
1126 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1134 control = TAILQ_FIRST(&strm->inqueue);
1135 if (strm->pd_api_started) {
1136 /* Can't add more */
1139 if (control == NULL) {
1142 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) {
1144 * Ok the guy at the top was being partially delivered
1145 * completed, so we remove it. Note the pd_api flag was
1146 * taken off when the chunk was merged on in
1147 * sctp_queue_data_for_reasm below.
1149 nctl = TAILQ_NEXT(control, next_instrm);
1150 SCTPDBG(SCTP_DEBUG_XXX,
1151 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n",
1152 control, control->end_added, control->mid,
1153 control->top_fsn, control->fsn_included,
1154 strm->last_mid_delivered);
1155 if (control->end_added) {
1156 if (control->on_strm_q) {
1158 if (control->on_strm_q != SCTP_ON_ORDERED) {
1159 panic("Huh control: %p on_q: %d -- not ordered?",
1160 control, control->on_strm_q);
1163 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1164 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1165 if (asoc->size_on_all_streams >= control->length) {
1166 asoc->size_on_all_streams -= control->length;
1169 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1171 asoc->size_on_all_streams = 0;
1174 sctp_ucount_decr(asoc->cnt_on_all_streams);
1175 control->on_strm_q = 0;
1177 if (strm->pd_api_started && control->pdapi_started) {
1178 control->pdapi_started = 0;
1179 strm->pd_api_started = 0;
1181 if (control->on_read_q == 0) {
1182 sctp_add_to_readq(stcb->sctp_ep, stcb,
1184 &stcb->sctp_socket->so_rcv, control->end_added,
1185 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1190 if (strm->pd_api_started) {
1192 * Can't add more must have gotten an un-ordered above being
1193 * partially delivered.
1198 next_to_del = strm->last_mid_delivered + 1;
1200 SCTPDBG(SCTP_DEBUG_XXX,
1201 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n",
1202 control, control->end_added, control->mid, control->top_fsn, control->fsn_included,
1204 nctl = TAILQ_NEXT(control, next_instrm);
1205 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) &&
1206 (control->first_frag_seen)) {
1209 /* Ok we can deliver it onto the stream. */
1210 if (control->end_added) {
1211 /* We are done with it afterwards */
1212 if (control->on_strm_q) {
1214 if (control->on_strm_q != SCTP_ON_ORDERED) {
1215 panic("Huh control: %p on_q: %d -- not ordered?",
1216 control, control->on_strm_q);
1219 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
1220 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1221 if (asoc->size_on_all_streams >= control->length) {
1222 asoc->size_on_all_streams -= control->length;
1225 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1227 asoc->size_on_all_streams = 0;
1230 sctp_ucount_decr(asoc->cnt_on_all_streams);
1231 control->on_strm_q = 0;
1235 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
1237 * A singleton now slipping through - mark
1238 * it non-revokable too
1240 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1241 } else if (control->end_added == 0) {
1243 * Check if we can defer adding until its
1246 if ((control->length < pd_point) || (strm->pd_api_started)) {
1248 * Don't need it or cannot add more
1249 * (one being delivered that way)
1254 done = (control->end_added) && (control->last_frag_seen);
1255 if (control->on_read_q == 0) {
1257 if (asoc->size_on_all_streams >= control->length) {
1258 asoc->size_on_all_streams -= control->length;
1261 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
1263 asoc->size_on_all_streams = 0;
1266 strm->pd_api_started = 1;
1267 control->pdapi_started = 1;
1269 sctp_add_to_readq(stcb->sctp_ep, stcb,
1271 &stcb->sctp_socket->so_rcv, control->end_added,
1272 inp_read_lock_held, SCTP_SO_NOT_LOCKED);
1274 strm->last_mid_delivered = next_to_del;
1286 sctp_add_chk_to_control(struct sctp_queued_to_read *control,
1287 struct sctp_stream_in *strm,
1288 struct sctp_tcb *stcb, struct sctp_association *asoc,
1289 struct sctp_tmit_chunk *chk, int hold_rlock)
1292 * Given a control and a chunk, merge the data from the chk onto the
1293 * control and free up the chunk resources.
1298 if (control->on_read_q && (hold_rlock == 0)) {
1300 * Its being pd-api'd so we must do some locks.
1302 SCTP_INP_READ_LOCK(stcb->sctp_ep);
1305 if (control->data == NULL) {
1306 control->data = chk->data;
1307 sctp_setup_tail_pointer(control);
1309 sctp_add_to_tail_pointer(control, chk->data, &added);
1311 control->fsn_included = chk->rec.data.fsn;
1312 asoc->size_on_reasm_queue -= chk->send_size;
1313 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
1314 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1316 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1317 control->first_frag_seen = 1;
1318 control->sinfo_tsn = chk->rec.data.tsn;
1319 control->sinfo_ppid = chk->rec.data.ppid;
1321 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1323 if ((control->on_strm_q) && (control->on_read_q)) {
1324 if (control->pdapi_started) {
1325 control->pdapi_started = 0;
1326 strm->pd_api_started = 0;
1328 if (control->on_strm_q == SCTP_ON_UNORDERED) {
1330 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
1331 control->on_strm_q = 0;
1332 } else if (control->on_strm_q == SCTP_ON_ORDERED) {
1334 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
1336 * Don't need to decrement
1337 * size_on_all_streams, since control is on
1340 sctp_ucount_decr(asoc->cnt_on_all_streams);
1341 control->on_strm_q = 0;
1343 } else if (control->on_strm_q) {
1344 panic("Unknown state on ctrl: %p on_strm_q: %d", control,
1345 control->on_strm_q);
1349 control->end_added = 1;
1350 control->last_frag_seen = 1;
1353 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
1355 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1360 * Dump onto the re-assembly queue, in its proper place. After dumping on the
1361 * queue, see if anthing can be delivered. If so pull it off (or as much as
1362 * we can. If we run out of space then we must dump what we can and set the
1363 * appropriate flag to say we queued what we could.
1366 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
1367 struct sctp_queued_to_read *control,
1368 struct sctp_tmit_chunk *chk,
1369 int created_control,
1370 int *abort_flag, uint32_t tsn)
1373 struct sctp_tmit_chunk *at, *nat;
1374 struct sctp_stream_in *strm;
1375 int do_wakeup, unordered;
1378 strm = &asoc->strmin[control->sinfo_stream];
1380 * For old un-ordered data chunks.
1382 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
1387 /* Must be added to the stream-in queue */
1388 if (created_control) {
1389 if ((unordered == 0) || (asoc->idata_supported)) {
1390 sctp_ucount_incr(asoc->cnt_on_all_streams);
1392 if (sctp_place_control_in_stream(strm, asoc, control)) {
1393 /* Duplicate SSN? */
1394 sctp_abort_in_reasm(stcb, control, chk,
1396 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1397 sctp_clean_up_control(stcb, control);
1400 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) {
1402 * Ok we created this control and now lets validate
1403 * that its legal i.e. there is a B bit set, if not
1404 * and we have up to the cum-ack then its invalid.
1406 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
1407 sctp_abort_in_reasm(stcb, control, chk,
1409 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1414 if ((asoc->idata_supported == 0) && (unordered == 1)) {
1415 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag);
1419 * Ok we must queue the chunk into the reasembly portion: o if its
1420 * the first it goes to the control mbuf. o if its not first but the
1421 * next in sequence it goes to the control, and each succeeding one
1422 * in order also goes. o if its not in order we place it on the list
1425 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1426 /* Its the very first one. */
1427 SCTPDBG(SCTP_DEBUG_XXX,
1428 "chunk is a first fsn: %u becomes fsn_included\n",
1430 if (control->first_frag_seen) {
1432 * Error on senders part, they either sent us two
1433 * data chunks with FIRST, or they sent two
1434 * un-ordered chunks that were fragmented at the
1435 * same time in the same stream.
1437 sctp_abort_in_reasm(stcb, control, chk,
1439 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1442 control->first_frag_seen = 1;
1443 control->sinfo_ppid = chk->rec.data.ppid;
1444 control->sinfo_tsn = chk->rec.data.tsn;
1445 control->fsn_included = chk->rec.data.fsn;
1446 control->data = chk->data;
1447 sctp_mark_non_revokable(asoc, chk->rec.data.tsn);
1449 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1450 sctp_setup_tail_pointer(control);
1451 asoc->size_on_all_streams += control->length;
1453 /* Place the chunk in our list */
1456 if (control->last_frag_seen == 0) {
1457 /* Still willing to raise highest FSN seen */
1458 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1459 SCTPDBG(SCTP_DEBUG_XXX,
1460 "We have a new top_fsn: %u\n",
1462 control->top_fsn = chk->rec.data.fsn;
1464 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1465 SCTPDBG(SCTP_DEBUG_XXX,
1466 "The last fsn is now in place fsn: %u\n",
1468 control->last_frag_seen = 1;
1469 if (SCTP_TSN_GT(control->top_fsn, chk->rec.data.fsn)) {
1470 SCTPDBG(SCTP_DEBUG_XXX,
1471 "New fsn: %u is not at top_fsn: %u -- abort\n",
1474 sctp_abort_in_reasm(stcb, control, chk,
1476 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1480 if (asoc->idata_supported || control->first_frag_seen) {
1482 * For IDATA we always check since we know
1483 * that the first fragment is 0. For old
1484 * DATA we have to receive the first before
1485 * we know the first FSN (which is the TSN).
1487 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1489 * We have already delivered up to
1492 sctp_abort_in_reasm(stcb, control, chk,
1494 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1499 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1500 /* Second last? huh? */
1501 SCTPDBG(SCTP_DEBUG_XXX,
1502 "Duplicate last fsn: %u (top: %u) -- abort\n",
1503 chk->rec.data.fsn, control->top_fsn);
1504 sctp_abort_in_reasm(stcb, control,
1506 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1509 if (asoc->idata_supported || control->first_frag_seen) {
1511 * For IDATA we always check since we know
1512 * that the first fragment is 0. For old
1513 * DATA we have to receive the first before
1514 * we know the first FSN (which is the TSN).
1517 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) {
1519 * We have already delivered up to
1522 SCTPDBG(SCTP_DEBUG_XXX,
1523 "New fsn: %u is already seen in included_fsn: %u -- abort\n",
1524 chk->rec.data.fsn, control->fsn_included);
1525 sctp_abort_in_reasm(stcb, control, chk,
1527 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1532 * validate not beyond top FSN if we have seen last
1535 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) {
1536 SCTPDBG(SCTP_DEBUG_XXX,
1537 "New fsn: %u is beyond or at top_fsn: %u -- abort\n",
1540 sctp_abort_in_reasm(stcb, control, chk,
1542 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1547 * If we reach here, we need to place the new chunk in the
1548 * reassembly for this control.
1550 SCTPDBG(SCTP_DEBUG_XXX,
1551 "chunk is a not first fsn: %u needs to be inserted\n",
1553 TAILQ_FOREACH(at, &control->reasm, sctp_next) {
1554 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) {
1555 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
1556 /* Last not at the end? huh? */
1557 SCTPDBG(SCTP_DEBUG_XXX,
1558 "Last fragment not last in list: -- abort\n");
1559 sctp_abort_in_reasm(stcb, control,
1561 SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1565 * This one in queue is bigger than the new
1566 * one, insert the new one before at.
1568 SCTPDBG(SCTP_DEBUG_XXX,
1569 "Insert it before fsn: %u\n",
1571 asoc->size_on_reasm_queue += chk->send_size;
1572 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1573 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1576 } else if (at->rec.data.fsn == chk->rec.data.fsn) {
1578 * Gak, He sent me a duplicate str seq
1582 * foo bar, I guess I will just free this
1583 * new guy, should we abort too? FIX ME
1584 * MAYBE? Or it COULD be that the SSN's have
1585 * wrapped. Maybe I should compare to TSN
1586 * somehow... sigh for now just blow away
1589 SCTPDBG(SCTP_DEBUG_XXX,
1590 "Duplicate to fsn: %u -- abort\n",
1592 sctp_abort_in_reasm(stcb, control,
1594 SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1598 if (inserted == 0) {
1599 /* Goes on the end */
1600 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n",
1602 asoc->size_on_reasm_queue += chk->send_size;
1603 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1604 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next);
1608 * Ok lets see if we can suck any up into the control structure that
1609 * are in seq if it makes sense.
1613 * If the first fragment has not been seen there is no sense in
1616 if (control->first_frag_seen) {
1617 next_fsn = control->fsn_included + 1;
1618 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) {
1619 if (at->rec.data.fsn == next_fsn) {
1620 /* We can add this one now to the control */
1621 SCTPDBG(SCTP_DEBUG_XXX,
1622 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n",
1625 next_fsn, control->fsn_included);
1626 TAILQ_REMOVE(&control->reasm, at, sctp_next);
1627 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD);
1628 if (control->on_read_q) {
1632 * We only add to the
1633 * size-on-all-streams if its not on
1634 * the read q. The read q flag will
1635 * cause a sballoc so its accounted
1638 asoc->size_on_all_streams += lenadded;
1641 if (control->end_added && control->pdapi_started) {
1642 if (strm->pd_api_started) {
1643 strm->pd_api_started = 0;
1644 control->pdapi_started = 0;
1646 if (control->on_read_q == 0) {
1647 sctp_add_to_readq(stcb->sctp_ep, stcb,
1649 &stcb->sctp_socket->so_rcv, control->end_added,
1650 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1660 /* Need to wakeup the reader */
1661 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED);
1665 static struct sctp_queued_to_read *
1666 sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported)
1668 struct sctp_queued_to_read *control;
1671 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) {
1672 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1677 if (idata_supported) {
1678 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) {
1679 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) {
1684 control = TAILQ_FIRST(&strm->uno_inqueue);
1691 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1692 struct mbuf **m, int offset, int chk_length,
1693 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag,
1694 int *break_flag, int last_chunk, uint8_t chk_type)
1696 struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */
1697 struct sctp_stream_in *strm;
1698 uint32_t tsn, fsn, gap, mid;
1701 int need_reasm_check = 0;
1703 struct mbuf *op_err;
1704 char msg[SCTP_DIAG_INFO_LEN];
1705 struct sctp_queued_to_read *control, *ncontrol;
1708 struct sctp_stream_reset_list *liste;
1711 int created_control = 0;
1713 if (chk_type == SCTP_IDATA) {
1714 struct sctp_idata_chunk *chunk, chunk_buf;
1716 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset,
1717 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf);
1718 chk_flags = chunk->ch.chunk_flags;
1719 clen = sizeof(struct sctp_idata_chunk);
1720 tsn = ntohl(chunk->dp.tsn);
1721 sid = ntohs(chunk->dp.sid);
1722 mid = ntohl(chunk->dp.mid);
1723 if (chk_flags & SCTP_DATA_FIRST_FRAG) {
1725 ppid = chunk->dp.ppid_fsn.ppid;
1727 fsn = ntohl(chunk->dp.ppid_fsn.fsn);
1728 ppid = 0xffffffff; /* Use as an invalid value. */
1731 struct sctp_data_chunk *chunk, chunk_buf;
1733 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset,
1734 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf);
1735 chk_flags = chunk->ch.chunk_flags;
1736 clen = sizeof(struct sctp_data_chunk);
1737 tsn = ntohl(chunk->dp.tsn);
1738 sid = ntohs(chunk->dp.sid);
1739 mid = (uint32_t)(ntohs(chunk->dp.ssn));
1741 ppid = chunk->dp.ppid;
1743 if ((size_t)chk_length == clen) {
1745 * Need to send an abort since we had a empty data chunk.
1747 op_err = sctp_generate_no_user_data_cause(tsn);
1748 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1749 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
1753 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1754 asoc->send_sack = 1;
1756 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0);
1757 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1758 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1763 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn);
1764 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1765 /* It is a duplicate */
1766 SCTP_STAT_INCR(sctps_recvdupdata);
1767 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1768 /* Record a dup for the next outbound sack */
1769 asoc->dup_tsns[asoc->numduptsns] = tsn;
1772 asoc->send_sack = 1;
1775 /* Calculate the number of TSN's between the base and this TSN */
1776 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1777 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1778 /* Can't hold the bit in the mapping at max array, toss it */
1781 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) {
1782 SCTP_TCB_LOCK_ASSERT(stcb);
1783 if (sctp_expand_mapping_array(asoc, gap)) {
1784 /* Can't expand, drop it */
1788 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1791 /* See if we have received this one already */
1792 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1793 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1794 SCTP_STAT_INCR(sctps_recvdupdata);
1795 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1796 /* Record a dup for the next outbound sack */
1797 asoc->dup_tsns[asoc->numduptsns] = tsn;
1800 asoc->send_sack = 1;
1804 * Check to see about the GONE flag, duplicates would cause a sack
1805 * to be sent up above
1807 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1808 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1809 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1811 * wait a minute, this guy is gone, there is no longer a
1812 * receiver. Send peer an ABORT!
1814 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1815 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
1820 * Now before going further we see if there is room. If NOT then we
1821 * MAY let one through only IF this TSN is the one we are waiting
1822 * for on a partial delivery API.
1825 /* Is the stream valid? */
1826 if (sid >= asoc->streamincnt) {
1827 struct sctp_error_invalid_stream *cause;
1829 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream),
1830 0, M_NOWAIT, 1, MT_DATA);
1831 if (op_err != NULL) {
1832 /* add some space up front so prepend will work well */
1833 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr));
1834 cause = mtod(op_err, struct sctp_error_invalid_stream *);
1836 * Error causes are just param's and this one has
1837 * two back to back phdr, one with the error type
1838 * and size, the other with the streamid and a rsvd
1840 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream);
1841 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM);
1842 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream));
1843 cause->stream_id = htons(sid);
1844 cause->reserved = htons(0);
1845 sctp_queue_op_err(stcb, op_err);
1847 SCTP_STAT_INCR(sctps_badsid);
1848 SCTP_TCB_LOCK_ASSERT(stcb);
1849 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1850 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1851 asoc->highest_tsn_inside_nr_map = tsn;
1853 if (tsn == (asoc->cumulative_tsn + 1)) {
1854 /* Update cum-ack */
1855 asoc->cumulative_tsn = tsn;
1860 * If its a fragmented message, lets see if we can find the control
1861 * on the reassembly queues.
1863 if ((chk_type == SCTP_IDATA) &&
1864 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) &&
1867 * The first *must* be fsn 0, and other (middle/end) pieces
1868 * can *not* be fsn 0. XXX: This can happen in case of a
1869 * wrap around. Ignore is for now.
1871 SCTP_SNPRINTF(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", mid, chk_flags);
1874 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported);
1875 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n",
1876 chk_flags, control);
1877 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1878 /* See if we can find the re-assembly entity */
1879 if (control != NULL) {
1880 /* We found something, does it belong? */
1881 if (ordered && (mid != control->mid)) {
1882 SCTP_SNPRINTF(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid);
1884 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1885 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1886 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
1890 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) {
1892 * We can't have a switched order with an
1895 SCTP_SNPRINTF(msg, sizeof(msg),
1896 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1900 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) {
1902 * We can't have a switched unordered with a
1905 SCTP_SNPRINTF(msg, sizeof(msg),
1906 "All fragments of a user message must be ordered or unordered (TSN=%8.8x)",
1913 * Its a complete segment. Lets validate we don't have a
1914 * re-assembly going on with the same Stream/Seq (for
1915 * ordered) or in the same Stream for unordered.
1917 if (control != NULL) {
1918 if (ordered || asoc->idata_supported) {
1919 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n",
1921 SCTP_SNPRINTF(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid);
1924 if ((tsn == control->fsn_included + 1) &&
1925 (control->end_added == 0)) {
1926 SCTP_SNPRINTF(msg, sizeof(msg),
1927 "Illegal message sequence, missing end for MID: %8.8x",
1928 control->fsn_included);
1936 /* now do the tests */
1937 if (((asoc->cnt_on_all_streams +
1938 asoc->cnt_on_reasm_queue +
1939 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1940 (((int)asoc->my_rwnd) <= 0)) {
1942 * When we have NO room in the rwnd we check to make sure
1943 * the reader is doing its job...
1945 if (SCTP_SBAVAIL(&stcb->sctp_socket->so_rcv) > 0) {
1946 /* some to read, wake-up */
1947 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1949 /* now is it in the mapping array of what we have accepted? */
1950 if (chk_type == SCTP_DATA) {
1951 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1952 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1953 /* Nope not in the valid range dump it */
1955 sctp_set_rwnd(stcb, asoc);
1956 if ((asoc->cnt_on_all_streams +
1957 asoc->cnt_on_reasm_queue +
1958 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1959 SCTP_STAT_INCR(sctps_datadropchklmt);
1961 SCTP_STAT_INCR(sctps_datadroprwnd);
1967 if (control == NULL) {
1970 if (SCTP_TSN_GT(fsn, control->top_fsn)) {
1975 #ifdef SCTP_ASOCLOG_OF_TSNS
1976 SCTP_TCB_LOCK_ASSERT(stcb);
1977 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1978 asoc->tsn_in_at = 0;
1979 asoc->tsn_in_wrapped = 1;
1981 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1982 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid;
1983 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid;
1984 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1985 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1986 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1987 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1988 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1992 * Before we continue lets validate that we are not being fooled by
1993 * an evil attacker. We can only have Nk chunks based on our TSN
1994 * spread allowed by the mapping array N * 8 bits, so there is no
1995 * way our stream sequence numbers could have wrapped. We of course
1996 * only validate the FIRST fragment so the bit must be set.
1998 if ((chk_flags & SCTP_DATA_FIRST_FRAG) &&
1999 (TAILQ_EMPTY(&asoc->resetHead)) &&
2000 (chk_flags & SCTP_DATA_UNORDERED) == 0 &&
2001 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) {
2002 /* The incoming sseq is behind where we last delivered? */
2003 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n",
2004 mid, asoc->strmin[sid].last_mid_delivered);
2006 if (asoc->idata_supported) {
2007 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x",
2008 asoc->strmin[sid].last_mid_delivered,
2013 SCTP_SNPRINTF(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
2014 (uint16_t)asoc->strmin[sid].last_mid_delivered,
2019 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2020 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
2021 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2025 if (chk_type == SCTP_IDATA) {
2026 the_len = (chk_length - sizeof(struct sctp_idata_chunk));
2028 the_len = (chk_length - sizeof(struct sctp_data_chunk));
2030 if (last_chunk == 0) {
2031 if (chk_type == SCTP_IDATA) {
2032 dmbuf = SCTP_M_COPYM(*m,
2033 (offset + sizeof(struct sctp_idata_chunk)),
2036 dmbuf = SCTP_M_COPYM(*m,
2037 (offset + sizeof(struct sctp_data_chunk)),
2040 #ifdef SCTP_MBUF_LOGGING
2041 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
2042 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
2046 /* We can steal the last chunk */
2050 /* lop off the top part */
2051 if (chk_type == SCTP_IDATA) {
2052 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk)));
2054 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
2056 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
2057 l_len = SCTP_BUF_LEN(dmbuf);
2060 * need to count up the size hopefully does not hit
2066 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
2067 l_len += SCTP_BUF_LEN(lat);
2070 if (l_len > the_len) {
2071 /* Trim the end round bytes off too */
2072 m_adj(dmbuf, -(l_len - the_len));
2075 if (dmbuf == NULL) {
2076 SCTP_STAT_INCR(sctps_nomem);
2080 * Now no matter what, we need a control, get one if we don't have
2081 * one (we may have gotten it above when we found the message was
2084 if (control == NULL) {
2085 sctp_alloc_a_readq(stcb, control);
2086 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
2091 if (control == NULL) {
2092 SCTP_STAT_INCR(sctps_nomem);
2095 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2098 control->data = dmbuf;
2099 control->tail_mbuf = NULL;
2100 for (mm = control->data; mm; mm = mm->m_next) {
2101 control->length += SCTP_BUF_LEN(mm);
2102 if (SCTP_BUF_NEXT(mm) == NULL) {
2103 control->tail_mbuf = mm;
2106 control->end_added = 1;
2107 control->last_frag_seen = 1;
2108 control->first_frag_seen = 1;
2109 control->fsn_included = fsn;
2110 control->top_fsn = fsn;
2112 created_control = 1;
2114 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n",
2115 chk_flags, ordered, mid, control);
2116 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
2117 TAILQ_EMPTY(&asoc->resetHead) &&
2119 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) &&
2120 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) {
2121 /* Candidate for express delivery */
2123 * Its not fragmented, No PD-API is up, Nothing in the
2124 * delivery queue, Its un-ordered OR ordered and the next to
2125 * deliver AND nothing else is stuck on the stream queue,
2126 * And there is room for it in the socket buffer. Lets just
2127 * stuff it up the buffer....
2129 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2130 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2131 asoc->highest_tsn_inside_nr_map = tsn;
2133 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n",
2136 sctp_add_to_readq(stcb->sctp_ep, stcb,
2137 control, &stcb->sctp_socket->so_rcv,
2138 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2140 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) {
2141 /* for ordered, bump what we delivered */
2142 asoc->strmin[sid].last_mid_delivered++;
2144 SCTP_STAT_INCR(sctps_recvexpress);
2145 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2146 sctp_log_strm_del_alt(stcb, tsn, mid, sid,
2147 SCTP_STR_LOG_FROM_EXPRS_DEL);
2150 goto finish_express_del;
2153 /* Now will we need a chunk too? */
2154 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
2155 sctp_alloc_a_chunk(stcb, chk);
2157 /* No memory so we drop the chunk */
2158 SCTP_STAT_INCR(sctps_nomem);
2159 if (last_chunk == 0) {
2160 /* we copied it, free the copy */
2161 sctp_m_freem(dmbuf);
2165 chk->rec.data.tsn = tsn;
2166 chk->no_fr_allowed = 0;
2167 chk->rec.data.fsn = fsn;
2168 chk->rec.data.mid = mid;
2169 chk->rec.data.sid = sid;
2170 chk->rec.data.ppid = ppid;
2171 chk->rec.data.context = stcb->asoc.context;
2172 chk->rec.data.doing_fast_retransmit = 0;
2173 chk->rec.data.rcv_flags = chk_flags;
2175 chk->send_size = the_len;
2177 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n",
2180 atomic_add_int(&net->ref_count, 1);
2183 /* Set the appropriate TSN mark */
2184 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
2185 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
2186 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
2187 asoc->highest_tsn_inside_nr_map = tsn;
2190 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
2191 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
2192 asoc->highest_tsn_inside_map = tsn;
2195 /* Now is it complete (i.e. not fragmented)? */
2196 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
2198 * Special check for when streams are resetting. We could be
2199 * more smart about this and check the actual stream to see
2200 * if it is not being reset.. that way we would not create a
2201 * HOLB when amongst streams being reset and those not being
2205 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2206 SCTP_TSN_GT(tsn, liste->tsn)) {
2208 * yep its past where we need to reset... go ahead
2211 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2213 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2215 struct sctp_queued_to_read *lcontrol, *nlcontrol;
2216 unsigned char inserted = 0;
2218 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) {
2219 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) {
2223 TAILQ_INSERT_BEFORE(lcontrol, control, next);
2228 if (inserted == 0) {
2230 * must be put at end, use prevP
2231 * (all setup from loop) to setup
2234 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2237 goto finish_express_del;
2239 if (chk_flags & SCTP_DATA_UNORDERED) {
2240 /* queue directly into socket buffer */
2241 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n",
2243 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2244 sctp_add_to_readq(stcb->sctp_ep, stcb,
2246 &stcb->sctp_socket->so_rcv, 1,
2247 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2250 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control,
2252 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2260 goto finish_express_del;
2262 /* If we reach here its a reassembly */
2263 need_reasm_check = 1;
2264 SCTPDBG(SCTP_DEBUG_XXX,
2265 "Queue data to stream for reasm control: %p MID: %u\n",
2267 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn);
2270 * the assoc is now gone and chk was put onto the reasm
2271 * queue, which has all been freed.
2279 /* Here we tidy up things */
2280 if (tsn == (asoc->cumulative_tsn + 1)) {
2281 /* Update cum-ack */
2282 asoc->cumulative_tsn = tsn;
2288 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2290 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2292 SCTP_STAT_INCR(sctps_recvdata);
2293 /* Set it present please */
2294 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2295 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN);
2297 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2298 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2299 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2301 if (need_reasm_check) {
2302 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD);
2303 need_reasm_check = 0;
2305 /* check the special flag for stream resets */
2306 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2307 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2309 * we have finished working through the backlogged TSN's now
2310 * time to reset streams. 1: call reset function. 2: free
2311 * pending_reply space 3: distribute any chunks in
2312 * pending_reply_queue.
2314 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2315 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2316 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
2317 SCTP_FREE(liste, SCTP_M_STRESET);
2318 /* sa_ignore FREED_MEMORY */
2319 liste = TAILQ_FIRST(&asoc->resetHead);
2320 if (TAILQ_EMPTY(&asoc->resetHead)) {
2321 /* All can be removed */
2322 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2323 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2324 strm = &asoc->strmin[control->sinfo_stream];
2325 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2329 if (need_reasm_check) {
2330 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2331 need_reasm_check = 0;
2335 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) {
2336 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) {
2340 * if control->sinfo_tsn is <= liste->tsn we
2341 * can process it which is the NOT of
2342 * control->sinfo_tsn > liste->tsn
2344 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next);
2345 strm = &asoc->strmin[control->sinfo_stream];
2346 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check);
2350 if (need_reasm_check) {
2351 (void)sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_NOT_HELD);
2352 need_reasm_check = 0;
2360 static const int8_t sctp_map_lookup_tab[256] = {
2361 0, 1, 0, 2, 0, 1, 0, 3,
2362 0, 1, 0, 2, 0, 1, 0, 4,
2363 0, 1, 0, 2, 0, 1, 0, 3,
2364 0, 1, 0, 2, 0, 1, 0, 5,
2365 0, 1, 0, 2, 0, 1, 0, 3,
2366 0, 1, 0, 2, 0, 1, 0, 4,
2367 0, 1, 0, 2, 0, 1, 0, 3,
2368 0, 1, 0, 2, 0, 1, 0, 6,
2369 0, 1, 0, 2, 0, 1, 0, 3,
2370 0, 1, 0, 2, 0, 1, 0, 4,
2371 0, 1, 0, 2, 0, 1, 0, 3,
2372 0, 1, 0, 2, 0, 1, 0, 5,
2373 0, 1, 0, 2, 0, 1, 0, 3,
2374 0, 1, 0, 2, 0, 1, 0, 4,
2375 0, 1, 0, 2, 0, 1, 0, 3,
2376 0, 1, 0, 2, 0, 1, 0, 7,
2377 0, 1, 0, 2, 0, 1, 0, 3,
2378 0, 1, 0, 2, 0, 1, 0, 4,
2379 0, 1, 0, 2, 0, 1, 0, 3,
2380 0, 1, 0, 2, 0, 1, 0, 5,
2381 0, 1, 0, 2, 0, 1, 0, 3,
2382 0, 1, 0, 2, 0, 1, 0, 4,
2383 0, 1, 0, 2, 0, 1, 0, 3,
2384 0, 1, 0, 2, 0, 1, 0, 6,
2385 0, 1, 0, 2, 0, 1, 0, 3,
2386 0, 1, 0, 2, 0, 1, 0, 4,
2387 0, 1, 0, 2, 0, 1, 0, 3,
2388 0, 1, 0, 2, 0, 1, 0, 5,
2389 0, 1, 0, 2, 0, 1, 0, 3,
2390 0, 1, 0, 2, 0, 1, 0, 4,
2391 0, 1, 0, 2, 0, 1, 0, 3,
2392 0, 1, 0, 2, 0, 1, 0, 8
2396 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2399 * Now we also need to check the mapping array in a couple of ways.
2400 * 1) Did we move the cum-ack point?
2402 * When you first glance at this you might think that all entries
2403 * that make up the position of the cum-ack would be in the
2404 * nr-mapping array only.. i.e. things up to the cum-ack are always
2405 * deliverable. Thats true with one exception, when its a fragmented
2406 * message we may not deliver the data until some threshold (or all
2407 * of it) is in place. So we must OR the nr_mapping_array and
2408 * mapping_array to get a true picture of the cum-ack.
2410 struct sctp_association *asoc;
2413 int slide_from, slide_end, lgap, distance;
2414 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2418 old_cumack = asoc->cumulative_tsn;
2419 old_base = asoc->mapping_array_base_tsn;
2420 old_highest = asoc->highest_tsn_inside_map;
2422 * We could probably improve this a small bit by calculating the
2423 * offset of the current cum-ack as the starting point.
2426 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2427 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2431 /* there is a 0 bit */
2432 at += sctp_map_lookup_tab[val];
2436 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2438 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2439 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2441 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2442 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2444 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2445 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2446 sctp_print_mapping_array(asoc);
2447 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2448 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2450 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2451 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2454 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2455 highest_tsn = asoc->highest_tsn_inside_nr_map;
2457 highest_tsn = asoc->highest_tsn_inside_map;
2459 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2460 /* The complete array was completed by a single FR */
2461 /* highest becomes the cum-ack */
2467 /* clear the array */
2468 clr = ((at + 7) >> 3);
2469 if (clr > asoc->mapping_array_size) {
2470 clr = asoc->mapping_array_size;
2472 memset(asoc->mapping_array, 0, clr);
2473 memset(asoc->nr_mapping_array, 0, clr);
2475 for (i = 0; i < asoc->mapping_array_size; i++) {
2476 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2477 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2478 sctp_print_mapping_array(asoc);
2482 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2483 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2484 } else if (at >= 8) {
2485 /* we can slide the mapping array down */
2486 /* slide_from holds where we hit the first NON 0xff byte */
2489 * now calculate the ceiling of the move using our highest
2492 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2493 slide_end = (lgap >> 3);
2494 if (slide_end < slide_from) {
2495 sctp_print_mapping_array(asoc);
2497 panic("impossible slide");
2499 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n",
2500 lgap, slide_end, slide_from, at);
2504 if (slide_end > asoc->mapping_array_size) {
2506 panic("would overrun buffer");
2508 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n",
2509 asoc->mapping_array_size, slide_end);
2510 slide_end = asoc->mapping_array_size;
2513 distance = (slide_end - slide_from) + 1;
2514 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2515 sctp_log_map(old_base, old_cumack, old_highest,
2516 SCTP_MAP_PREPARE_SLIDE);
2517 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end,
2518 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM);
2520 if (distance + slide_from > asoc->mapping_array_size ||
2523 * Here we do NOT slide forward the array so that
2524 * hopefully when more data comes in to fill it up
2525 * we will be able to slide it forward. Really I
2526 * don't think this should happen :-0
2528 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2529 sctp_log_map((uint32_t)distance, (uint32_t)slide_from,
2530 (uint32_t)asoc->mapping_array_size,
2531 SCTP_MAP_SLIDE_NONE);
2536 for (ii = 0; ii < distance; ii++) {
2537 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2538 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2540 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2541 asoc->mapping_array[ii] = 0;
2542 asoc->nr_mapping_array[ii] = 0;
2544 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2545 asoc->highest_tsn_inside_map += (slide_from << 3);
2547 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2548 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2550 asoc->mapping_array_base_tsn += (slide_from << 3);
2551 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2552 sctp_log_map(asoc->mapping_array_base_tsn,
2553 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2554 SCTP_MAP_SLIDE_RESULT);
2561 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2563 struct sctp_association *asoc;
2564 uint32_t highest_tsn;
2567 sctp_slide_mapping_arrays(stcb);
2569 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2570 highest_tsn = asoc->highest_tsn_inside_nr_map;
2572 highest_tsn = asoc->highest_tsn_inside_map;
2574 /* Is there a gap now? */
2575 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2578 * Now we need to see if we need to queue a sack or just start the
2579 * timer (if allowed).
2581 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2583 * Ok special case, in SHUTDOWN-SENT case. here we maker
2584 * sure SACK timer is off and instead send a SHUTDOWN and a
2587 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2588 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2589 stcb->sctp_ep, stcb, NULL,
2590 SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2592 sctp_send_shutdown(stcb,
2593 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2595 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2599 * CMT DAC algorithm: increase number of packets received
2602 stcb->asoc.cmt_dac_pkts_rcvd++;
2604 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2606 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2608 (stcb->asoc.numduptsns) || /* we have dup's */
2609 (is_a_gap) || /* is still a gap */
2610 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2611 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)) { /* hit limit of pkts */
2612 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2613 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2614 (stcb->asoc.send_sack == 0) &&
2615 (stcb->asoc.numduptsns == 0) &&
2616 (stcb->asoc.delayed_ack) &&
2617 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2619 * CMT DAC algorithm: With CMT, delay acks
2620 * even in the face of reordering.
2621 * Therefore, if acks that do not have to be
2622 * sent because of the above reasons, will
2623 * be delayed. That is, acks that would have
2624 * been sent due to gap reports will be
2625 * delayed with DAC. Start the delayed ack
2628 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2629 stcb->sctp_ep, stcb, NULL);
2632 * Ok we must build a SACK since the timer
2633 * is pending, we got our first packet OR
2634 * there are gaps or duplicates.
2636 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL,
2637 SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
2638 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2641 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2642 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2643 stcb->sctp_ep, stcb, NULL);
2650 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2651 struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2652 struct sctp_nets *net, uint32_t *high_tsn)
2654 struct sctp_chunkhdr *ch, chunk_buf;
2655 struct sctp_association *asoc;
2656 int num_chunks = 0; /* number of control chunks processed */
2658 int break_flag, last_chunk;
2659 int abort_flag = 0, was_a_gap;
2661 uint32_t highest_tsn;
2662 uint16_t chk_length;
2665 sctp_set_rwnd(stcb, &stcb->asoc);
2668 SCTP_TCB_LOCK_ASSERT(stcb);
2670 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2671 highest_tsn = asoc->highest_tsn_inside_nr_map;
2673 highest_tsn = asoc->highest_tsn_inside_map;
2675 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2677 * setup where we got the last DATA packet from for any SACK that
2678 * may need to go out. Don't bump the net. This is done ONLY when a
2679 * chunk is assigned.
2681 asoc->last_data_chunk_from = net;
2684 * Now before we proceed we must figure out if this is a wasted
2685 * cluster... i.e. it is a small packet sent in and yet the driver
2686 * underneath allocated a full cluster for it. If so we must copy it
2687 * to a smaller mbuf and free up the cluster mbuf. This will help
2688 * with cluster starvation.
2690 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2691 /* we only handle mbufs that are singletons.. not chains */
2692 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2694 /* ok lets see if we can copy the data up */
2697 /* get the pointers and copy */
2698 to = mtod(m, caddr_t *);
2699 from = mtod((*mm), caddr_t *);
2700 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2701 /* copy the length and free up the old */
2702 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2704 /* success, back copy */
2707 /* We are in trouble in the mbuf world .. yikes */
2711 /* get pointer to the first chunk header */
2712 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2713 sizeof(struct sctp_chunkhdr),
2714 (uint8_t *)&chunk_buf);
2719 * process all DATA chunks...
2721 *high_tsn = asoc->cumulative_tsn;
2723 asoc->data_pkts_seen++;
2724 while (stop_proc == 0) {
2725 /* validate chunk length */
2726 chk_length = ntohs(ch->chunk_length);
2727 if (length - *offset < chk_length) {
2728 /* all done, mutulated chunk */
2732 if ((asoc->idata_supported == 1) &&
2733 (ch->chunk_type == SCTP_DATA)) {
2734 struct mbuf *op_err;
2735 char msg[SCTP_DIAG_INFO_LEN];
2737 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated");
2738 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2739 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2740 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2743 if ((asoc->idata_supported == 0) &&
2744 (ch->chunk_type == SCTP_IDATA)) {
2745 struct mbuf *op_err;
2746 char msg[SCTP_DIAG_INFO_LEN];
2748 SCTP_SNPRINTF(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated");
2749 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2750 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2751 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2754 if ((ch->chunk_type == SCTP_DATA) ||
2755 (ch->chunk_type == SCTP_IDATA)) {
2758 if (ch->chunk_type == SCTP_DATA) {
2759 clen = sizeof(struct sctp_data_chunk);
2761 clen = sizeof(struct sctp_idata_chunk);
2763 if (chk_length < clen) {
2765 * Need to send an abort since we had a
2766 * invalid data chunk.
2768 struct mbuf *op_err;
2769 char msg[SCTP_DIAG_INFO_LEN];
2771 SCTP_SNPRINTF(msg, sizeof(msg), "%s chunk of length %u",
2772 ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA",
2774 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2775 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
2776 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2779 #ifdef SCTP_AUDITING_ENABLED
2780 sctp_audit_log(0xB1, 0);
2782 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2787 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset,
2788 chk_length, net, high_tsn, &abort_flag, &break_flag,
2789 last_chunk, ch->chunk_type)) {
2797 * Set because of out of rwnd space and no
2798 * drop rep space left.
2804 /* not a data chunk in the data region */
2805 switch (ch->chunk_type) {
2806 case SCTP_INITIATION:
2807 case SCTP_INITIATION_ACK:
2808 case SCTP_SELECTIVE_ACK:
2809 case SCTP_NR_SELECTIVE_ACK:
2810 case SCTP_HEARTBEAT_REQUEST:
2811 case SCTP_HEARTBEAT_ACK:
2812 case SCTP_ABORT_ASSOCIATION:
2814 case SCTP_SHUTDOWN_ACK:
2815 case SCTP_OPERATION_ERROR:
2816 case SCTP_COOKIE_ECHO:
2817 case SCTP_COOKIE_ACK:
2820 case SCTP_SHUTDOWN_COMPLETE:
2821 case SCTP_AUTHENTICATION:
2822 case SCTP_ASCONF_ACK:
2823 case SCTP_PACKET_DROPPED:
2824 case SCTP_STREAM_RESET:
2825 case SCTP_FORWARD_CUM_TSN:
2829 * Now, what do we do with KNOWN
2830 * chunks that are NOT in the right
2833 * For now, I do nothing but ignore
2834 * them. We may later want to add
2835 * sysctl stuff to switch out and do
2836 * either an ABORT() or possibly
2839 struct mbuf *op_err;
2840 char msg[SCTP_DIAG_INFO_LEN];
2842 SCTP_SNPRINTF(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2844 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2845 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2850 * Unknown chunk type: use bit rules after
2853 if (chk_length < sizeof(struct sctp_chunkhdr)) {
2855 * Need to send an abort since we
2856 * had a invalid chunk.
2858 struct mbuf *op_err;
2859 char msg[SCTP_DIAG_INFO_LEN];
2861 SCTP_SNPRINTF(msg, sizeof(msg), "Chunk of length %u", chk_length);
2862 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2863 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
2864 sctp_abort_an_association(inp, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
2867 if (ch->chunk_type & 0x40) {
2868 /* Add a error report to the queue */
2869 struct mbuf *op_err;
2870 struct sctp_gen_error_cause *cause;
2872 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause),
2873 0, M_NOWAIT, 1, MT_DATA);
2874 if (op_err != NULL) {
2875 cause = mtod(op_err, struct sctp_gen_error_cause *);
2876 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK);
2877 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause)));
2878 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause);
2879 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2880 if (SCTP_BUF_NEXT(op_err) != NULL) {
2881 sctp_queue_op_err(stcb, op_err);
2883 sctp_m_freem(op_err);
2887 if ((ch->chunk_type & 0x80) == 0) {
2888 /* discard the rest of this packet */
2890 } /* else skip this bad chunk and
2893 } /* switch of chunk type */
2895 *offset += SCTP_SIZE32(chk_length);
2896 if ((*offset >= length) || stop_proc) {
2897 /* no more data left in the mbuf chain */
2901 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset,
2902 sizeof(struct sctp_chunkhdr),
2903 (uint8_t *)&chunk_buf);
2912 * we need to report rwnd overrun drops.
2914 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2918 * Did we get data, if so update the time for auto-close and
2919 * give peer credit for being alive.
2921 SCTP_STAT_INCR(sctps_recvpktwithdata);
2922 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2923 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2924 stcb->asoc.overall_error_count,
2926 SCTP_FROM_SCTP_INDATA,
2929 stcb->asoc.overall_error_count = 0;
2930 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2932 /* now service all of the reassm queue if needed */
2933 if (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_SENT) {
2934 /* Assure that we ack right away */
2935 stcb->asoc.send_sack = 1;
2937 /* Start a sack timer or QUEUE a SACK for sending */
2938 sctp_sack_check(stcb, was_a_gap);
2943 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2944 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2946 uint32_t *biggest_newly_acked_tsn,
2947 uint32_t *this_sack_lowest_newack,
2950 struct sctp_tmit_chunk *tp1;
2951 unsigned int theTSN;
2952 int j, wake_him = 0, circled = 0;
2954 /* Recover the tp1 we last saw */
2957 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2959 for (j = frag_strt; j <= frag_end; j++) {
2960 theTSN = j + last_tsn;
2962 if (tp1->rec.data.doing_fast_retransmit)
2966 * CMT: CUCv2 algorithm. For each TSN being
2967 * processed from the sent queue, track the
2968 * next expected pseudo-cumack, or
2969 * rtx_pseudo_cumack, if required. Separate
2970 * cumack trackers for first transmissions,
2971 * and retransmissions.
2973 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2974 (tp1->whoTo->find_pseudo_cumack == 1) &&
2975 (tp1->snd_count == 1)) {
2976 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn;
2977 tp1->whoTo->find_pseudo_cumack = 0;
2979 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2980 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2981 (tp1->snd_count > 1)) {
2982 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn;
2983 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2985 if (tp1->rec.data.tsn == theTSN) {
2986 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2988 * must be held until
2991 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2993 * If it is less than RESEND, it is
2994 * now no-longer in flight.
2995 * Higher values may already be set
2996 * via previous Gap Ack Blocks...
2997 * i.e. ACKED or RESEND.
2999 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3000 *biggest_newly_acked_tsn)) {
3001 *biggest_newly_acked_tsn = tp1->rec.data.tsn;
3004 * CMT: SFR algo (and HTNA) - set
3005 * saw_newack to 1 for dest being
3006 * newly acked. update
3007 * this_sack_highest_newack if
3010 if (tp1->rec.data.chunk_was_revoked == 0)
3011 tp1->whoTo->saw_newack = 1;
3013 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3014 tp1->whoTo->this_sack_highest_newack)) {
3015 tp1->whoTo->this_sack_highest_newack =
3019 * CMT DAC algo: also update
3020 * this_sack_lowest_newack
3022 if (*this_sack_lowest_newack == 0) {
3023 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3024 sctp_log_sack(*this_sack_lowest_newack,
3029 SCTP_LOG_TSN_ACKED);
3031 *this_sack_lowest_newack = tp1->rec.data.tsn;
3034 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
3035 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
3036 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
3037 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
3038 * Separate pseudo_cumack trackers for first transmissions and
3041 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) {
3042 if (tp1->rec.data.chunk_was_revoked == 0) {
3043 tp1->whoTo->new_pseudo_cumack = 1;
3045 tp1->whoTo->find_pseudo_cumack = 1;
3047 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3048 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
3050 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) {
3051 if (tp1->rec.data.chunk_was_revoked == 0) {
3052 tp1->whoTo->new_pseudo_cumack = 1;
3054 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3056 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3057 sctp_log_sack(*biggest_newly_acked_tsn,
3062 SCTP_LOG_TSN_ACKED);
3064 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3065 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
3066 tp1->whoTo->flight_size,
3068 (uint32_t)(uintptr_t)tp1->whoTo,
3071 sctp_flight_size_decrease(tp1);
3072 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3073 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3076 sctp_total_flight_decrease(stcb, tp1);
3078 tp1->whoTo->net_ack += tp1->send_size;
3079 if (tp1->snd_count < 2) {
3081 * True non-retransmitted chunk
3083 tp1->whoTo->net_ack2 += tp1->send_size;
3090 sctp_calculate_rto(stcb,
3093 &tp1->sent_rcv_time,
3094 SCTP_RTT_FROM_DATA)) {
3097 if (tp1->whoTo->rto_needed == 0) {
3098 tp1->whoTo->rto_needed = 1;
3104 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
3105 if (SCTP_TSN_GT(tp1->rec.data.tsn,
3106 stcb->asoc.this_sack_highest_gap)) {
3107 stcb->asoc.this_sack_highest_gap =
3110 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3111 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
3112 #ifdef SCTP_AUDITING_ENABLED
3113 sctp_audit_log(0xB2,
3114 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
3119 * All chunks NOT UNSENT fall through here and are marked
3120 * (leave PR-SCTP ones that are to skip alone though)
3122 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
3123 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3124 tp1->sent = SCTP_DATAGRAM_MARKED;
3126 if (tp1->rec.data.chunk_was_revoked) {
3127 /* deflate the cwnd */
3128 tp1->whoTo->cwnd -= tp1->book_size;
3129 tp1->rec.data.chunk_was_revoked = 0;
3131 /* NR Sack code here */
3133 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
3134 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
3135 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--;
3138 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
3141 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
3142 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
3143 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) {
3144 stcb->asoc.trigger_reset = 1;
3146 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3152 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3153 sctp_m_freem(tp1->data);
3160 } /* if (tp1->tsn == theTSN) */
3161 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) {
3164 tp1 = TAILQ_NEXT(tp1, sctp_next);
3165 if ((tp1 == NULL) && (circled == 0)) {
3167 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3169 } /* end while (tp1) */
3172 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3174 /* In case the fragments were not in order we must reset */
3175 } /* end for (j = fragStart */
3177 return (wake_him); /* Return value only used for nr-sack */
3181 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3182 uint32_t last_tsn, uint32_t *biggest_tsn_acked,
3183 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack,
3184 int num_seg, int num_nr_seg, int *rto_ok)
3186 struct sctp_gap_ack_block *frag, block;
3187 struct sctp_tmit_chunk *tp1;
3192 uint16_t frag_strt, frag_end, prev_frag_end;
3194 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3198 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3201 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3203 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3204 sizeof(struct sctp_gap_ack_block), (uint8_t *)&block);
3205 *offset += sizeof(block);
3207 return (chunk_freed);
3209 frag_strt = ntohs(frag->start);
3210 frag_end = ntohs(frag->end);
3212 if (frag_strt > frag_end) {
3213 /* This gap report is malformed, skip it. */
3216 if (frag_strt <= prev_frag_end) {
3217 /* This gap report is not in order, so restart. */
3218 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3220 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3221 *biggest_tsn_acked = last_tsn + frag_end;
3228 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3229 non_revocable, &num_frs, biggest_newly_acked_tsn,
3230 this_sack_lowest_newack, rto_ok)) {
3233 prev_frag_end = frag_end;
3235 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3237 sctp_log_fr(*biggest_tsn_acked,
3238 *biggest_newly_acked_tsn,
3239 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3241 return (chunk_freed);
3245 sctp_check_for_revoked(struct sctp_tcb *stcb,
3246 struct sctp_association *asoc, uint32_t cumack,
3247 uint32_t biggest_tsn_acked)
3249 struct sctp_tmit_chunk *tp1;
3251 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3252 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) {
3254 * ok this guy is either ACK or MARKED. If it is
3255 * ACKED it has been previously acked but not this
3256 * time i.e. revoked. If it is MARKED it was ACK'ed
3259 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) {
3262 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3263 /* it has been revoked */
3264 tp1->sent = SCTP_DATAGRAM_SENT;
3265 tp1->rec.data.chunk_was_revoked = 1;
3267 * We must add this stuff back in to assure
3268 * timers and such get started.
3270 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3271 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3272 tp1->whoTo->flight_size,
3274 (uint32_t)(uintptr_t)tp1->whoTo,
3277 sctp_flight_size_increase(tp1);
3278 sctp_total_flight_increase(stcb, tp1);
3280 * We inflate the cwnd to compensate for our
3281 * artificial inflation of the flight_size.
3283 tp1->whoTo->cwnd += tp1->book_size;
3284 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3285 sctp_log_sack(asoc->last_acked_seq,
3290 SCTP_LOG_TSN_REVOKED);
3292 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3293 /* it has been re-acked in this SACK */
3294 tp1->sent = SCTP_DATAGRAM_ACKED;
3297 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3303 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3304 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3306 struct sctp_tmit_chunk *tp1;
3307 int strike_flag = 0;
3309 uint32_t sending_seq;
3310 struct sctp_nets *net;
3311 int num_dests_sacked = 0;
3314 * select the sending_seq, this is either the next thing ready to be
3315 * sent but not transmitted, OR, the next seq we assign.
3317 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3319 sending_seq = asoc->sending_seq;
3321 sending_seq = tp1->rec.data.tsn;
3324 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3325 if ((asoc->sctp_cmt_on_off > 0) &&
3326 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3327 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3328 if (net->saw_newack)
3332 if (stcb->asoc.prsctp_supported) {
3333 (void)SCTP_GETTIME_TIMEVAL(&now);
3335 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3337 if (tp1->no_fr_allowed) {
3338 /* this one had a timeout or something */
3341 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3342 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3343 sctp_log_fr(biggest_tsn_newly_acked,
3346 SCTP_FR_LOG_CHECK_STRIKE);
3348 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) ||
3349 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3353 if (stcb->asoc.prsctp_supported) {
3354 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3355 /* Is it expired? */
3356 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3357 /* Yes so drop it */
3358 if (tp1->data != NULL) {
3359 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3360 SCTP_SO_NOT_LOCKED);
3366 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) &&
3367 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3368 /* we are beyond the tsn in the sack */
3371 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3372 /* either a RESEND, ACKED, or MARKED */
3374 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3375 /* Continue strikin FWD-TSN chunks */
3376 tp1->rec.data.fwd_tsn_cnt++;
3381 * CMT : SFR algo (covers part of DAC and HTNA as well)
3383 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3385 * No new acks were received for data sent to this
3386 * dest. Therefore, according to the SFR algo for
3387 * CMT, no data sent to this dest can be marked for
3388 * FR using this SACK.
3391 } else if (tp1->whoTo &&
3392 SCTP_TSN_GT(tp1->rec.data.tsn,
3393 tp1->whoTo->this_sack_highest_newack) &&
3394 !(accum_moved && asoc->fast_retran_loss_recovery)) {
3396 * CMT: New acks were received for data sent to this
3397 * dest. But no new acks were seen for data sent
3398 * after tp1. Therefore, according to the SFR algo
3399 * for CMT, tp1 cannot be marked for FR using this
3400 * SACK. This step covers part of the DAC algo and
3401 * the HTNA algo as well.
3406 * Here we check to see if we were have already done a FR
3407 * and if so we see if the biggest TSN we saw in the sack is
3408 * smaller than the recovery point. If so we don't strike
3409 * the tsn... otherwise we CAN strike the TSN.
3412 * @@@ JRI: Check for CMT if (accum_moved &&
3413 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3416 if (accum_moved && asoc->fast_retran_loss_recovery) {
3418 * Strike the TSN if in fast-recovery and cum-ack
3421 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3422 sctp_log_fr(biggest_tsn_newly_acked,
3425 SCTP_FR_LOG_STRIKE_CHUNK);
3427 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3430 if ((asoc->sctp_cmt_on_off > 0) &&
3431 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3433 * CMT DAC algorithm: If SACK flag is set to
3434 * 0, then lowest_newack test will not pass
3435 * because it would have been set to the
3436 * cumack earlier. If not already to be
3437 * rtx'd, If not a mixed sack and if tp1 is
3438 * not between two sacked TSNs, then mark by
3439 * one more. NOTE that we are marking by one
3440 * additional time since the SACK DAC flag
3441 * indicates that two packets have been
3442 * received after this missing TSN.
3444 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3445 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3446 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3447 sctp_log_fr(16 + num_dests_sacked,
3450 SCTP_FR_LOG_STRIKE_CHUNK);
3455 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3456 (asoc->sctp_cmt_on_off == 0)) {
3458 * For those that have done a FR we must take
3459 * special consideration if we strike. I.e the
3460 * biggest_newly_acked must be higher than the
3461 * sending_seq at the time we did the FR.
3464 #ifdef SCTP_FR_TO_ALTERNATE
3466 * If FR's go to new networks, then we must only do
3467 * this for singly homed asoc's. However if the FR's
3468 * go to the same network (Armando's work) then its
3469 * ok to FR multiple times.
3476 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3477 tp1->rec.data.fast_retran_tsn)) {
3479 * Strike the TSN, since this ack is
3480 * beyond where things were when we
3483 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3484 sctp_log_fr(biggest_tsn_newly_acked,
3487 SCTP_FR_LOG_STRIKE_CHUNK);
3489 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3493 if ((asoc->sctp_cmt_on_off > 0) &&
3494 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3496 * CMT DAC algorithm: If
3497 * SACK flag is set to 0,
3498 * then lowest_newack test
3499 * will not pass because it
3500 * would have been set to
3501 * the cumack earlier. If
3502 * not already to be rtx'd,
3503 * If not a mixed sack and
3504 * if tp1 is not between two
3505 * sacked TSNs, then mark by
3506 * one more. NOTE that we
3507 * are marking by one
3508 * additional time since the
3509 * SACK DAC flag indicates
3510 * that two packets have
3511 * been received after this
3514 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3515 (num_dests_sacked == 1) &&
3516 SCTP_TSN_GT(this_sack_lowest_newack,
3517 tp1->rec.data.tsn)) {
3518 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3519 sctp_log_fr(32 + num_dests_sacked,
3522 SCTP_FR_LOG_STRIKE_CHUNK);
3524 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3532 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3535 } else if (SCTP_TSN_GT(tp1->rec.data.tsn,
3536 biggest_tsn_newly_acked)) {
3538 * We don't strike these: This is the HTNA
3539 * algorithm i.e. we don't strike If our TSN is
3540 * larger than the Highest TSN Newly Acked.
3544 /* Strike the TSN */
3545 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3546 sctp_log_fr(biggest_tsn_newly_acked,
3549 SCTP_FR_LOG_STRIKE_CHUNK);
3551 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3554 if ((asoc->sctp_cmt_on_off > 0) &&
3555 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3557 * CMT DAC algorithm: If SACK flag is set to
3558 * 0, then lowest_newack test will not pass
3559 * because it would have been set to the
3560 * cumack earlier. If not already to be
3561 * rtx'd, If not a mixed sack and if tp1 is
3562 * not between two sacked TSNs, then mark by
3563 * one more. NOTE that we are marking by one
3564 * additional time since the SACK DAC flag
3565 * indicates that two packets have been
3566 * received after this missing TSN.
3568 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3569 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) {
3570 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3571 sctp_log_fr(48 + num_dests_sacked,
3574 SCTP_FR_LOG_STRIKE_CHUNK);
3580 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3581 struct sctp_nets *alt;
3583 /* fix counts and things */
3584 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3585 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3586 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3588 (uint32_t)(uintptr_t)tp1->whoTo,
3592 tp1->whoTo->net_ack++;
3593 sctp_flight_size_decrease(tp1);
3594 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3595 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3600 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3601 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3602 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3604 /* add back to the rwnd */
3605 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3607 /* remove from the total flight */
3608 sctp_total_flight_decrease(stcb, tp1);
3610 if ((stcb->asoc.prsctp_supported) &&
3611 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3613 * Has it been retransmitted tv_sec times? -
3614 * we store the retran count there.
3616 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3617 /* Yes, so drop it */
3618 if (tp1->data != NULL) {
3619 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3620 SCTP_SO_NOT_LOCKED);
3622 /* Make sure to flag we had a FR */
3623 if (tp1->whoTo != NULL) {
3624 tp1->whoTo->net_ack++;
3630 * SCTP_PRINTF("OK, we are now ready to FR this
3633 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3634 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count,
3638 /* This is a subsequent FR */
3639 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3641 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3642 if (asoc->sctp_cmt_on_off > 0) {
3644 * CMT: Using RTX_SSTHRESH policy for CMT.
3645 * If CMT is being used, then pick dest with
3646 * largest ssthresh for any retransmission.
3648 tp1->no_fr_allowed = 1;
3650 /* sa_ignore NO_NULL_CHK */
3651 if (asoc->sctp_cmt_pf > 0) {
3653 * JRS 5/18/07 - If CMT PF is on,
3654 * use the PF version of
3657 alt = sctp_find_alternate_net(stcb, alt, 2);
3660 * JRS 5/18/07 - If only CMT is on,
3661 * use the CMT version of
3664 /* sa_ignore NO_NULL_CHK */
3665 alt = sctp_find_alternate_net(stcb, alt, 1);
3671 * CUCv2: If a different dest is picked for
3672 * the retransmission, then new
3673 * (rtx-)pseudo_cumack needs to be tracked
3674 * for orig dest. Let CUCv2 track new (rtx-)
3675 * pseudo-cumack always.
3678 tp1->whoTo->find_pseudo_cumack = 1;
3679 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3681 } else { /* CMT is OFF */
3682 #ifdef SCTP_FR_TO_ALTERNATE
3683 /* Can we find an alternate? */
3684 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3687 * default behavior is to NOT retransmit
3688 * FR's to an alternate. Armando Caro's
3689 * paper details why.
3695 tp1->rec.data.doing_fast_retransmit = 1;
3696 /* mark the sending seq for possible subsequent FR's */
3698 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3699 * (uint32_t)tpi->rec.data.tsn);
3701 if (TAILQ_EMPTY(&asoc->send_queue)) {
3703 * If the queue of send is empty then its
3704 * the next sequence number that will be
3705 * assigned so we subtract one from this to
3706 * get the one we last sent.
3708 tp1->rec.data.fast_retran_tsn = sending_seq;
3711 * If there are chunks on the send queue
3712 * (unsent data that has made it from the
3713 * stream queues but not out the door, we
3714 * take the first one (which will have the
3715 * lowest TSN) and subtract one to get the
3718 struct sctp_tmit_chunk *ttt;
3720 ttt = TAILQ_FIRST(&asoc->send_queue);
3721 tp1->rec.data.fast_retran_tsn =
3727 * this guy had a RTO calculation pending on
3730 if ((tp1->whoTo != NULL) &&
3731 (tp1->whoTo->rto_needed == 0)) {
3732 tp1->whoTo->rto_needed = 1;
3736 if (alt != tp1->whoTo) {
3737 /* yes, there is an alternate. */
3738 sctp_free_remote_addr(tp1->whoTo);
3739 /* sa_ignore FREED_MEMORY */
3741 atomic_add_int(&alt->ref_count, 1);
3747 struct sctp_tmit_chunk *
3748 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3749 struct sctp_association *asoc)
3751 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3755 if (asoc->prsctp_supported == 0) {
3758 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3759 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3760 tp1->sent != SCTP_DATAGRAM_RESEND &&
3761 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3762 /* no chance to advance, out of here */
3765 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3766 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3767 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3768 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3769 asoc->advanced_peer_ack_point,
3770 tp1->rec.data.tsn, 0, 0);
3773 if (!PR_SCTP_ENABLED(tp1->flags)) {
3775 * We can't fwd-tsn past any that are reliable aka
3776 * retransmitted until the asoc fails.
3781 (void)SCTP_GETTIME_TIMEVAL(&now);
3785 * now we got a chunk which is marked for another
3786 * retransmission to a PR-stream but has run out its chances
3787 * already maybe OR has been marked to skip now. Can we skip
3788 * it if its a resend?
3790 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3791 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3793 * Now is this one marked for resend and its time is
3796 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3797 /* Yes so drop it */
3799 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3800 1, SCTP_SO_NOT_LOCKED);
3804 * No, we are done when hit one for resend
3805 * whos time as not expired.
3811 * Ok now if this chunk is marked to drop it we can clean up
3812 * the chunk, advance our peer ack point and we can check
3815 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3816 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3817 /* advance PeerAckPoint goes forward */
3818 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) {
3819 asoc->advanced_peer_ack_point = tp1->rec.data.tsn;
3821 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) {
3822 /* No update but we do save the chk */
3827 * If it is still in RESEND we can advance no
3837 sctp_fs_audit(struct sctp_association *asoc)
3839 struct sctp_tmit_chunk *chk;
3840 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3843 int entry_flight, entry_cnt;
3848 entry_flight = asoc->total_flight;
3849 entry_cnt = asoc->total_flight_count;
3851 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3854 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3855 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3856 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n",
3861 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3863 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3865 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3872 if ((inflight > 0) || (inbetween > 0)) {
3874 panic("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d",
3875 inflight, inbetween, resend, above, acked);
3877 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n",
3878 entry_flight, entry_cnt);
3879 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n",
3880 inflight, inbetween, resend, above, acked);
3888 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3889 struct sctp_association *asoc,
3890 struct sctp_tmit_chunk *tp1)
3892 tp1->window_probe = 0;
3893 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3894 /* TSN's skipped we do NOT move back. */
3895 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3896 tp1->whoTo ? tp1->whoTo->flight_size : 0,
3898 (uint32_t)(uintptr_t)tp1->whoTo,
3902 /* First setup this by shrinking flight */
3903 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3904 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3907 sctp_flight_size_decrease(tp1);
3908 sctp_total_flight_decrease(stcb, tp1);
3909 /* Now mark for resend */
3910 tp1->sent = SCTP_DATAGRAM_RESEND;
3911 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3913 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3914 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3915 tp1->whoTo->flight_size,
3917 (uint32_t)(uintptr_t)tp1->whoTo,
3923 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3924 uint32_t rwnd, int *abort_now, int ecne_seen)
3926 struct sctp_nets *net;
3927 struct sctp_association *asoc;
3928 struct sctp_tmit_chunk *tp1, *tp2;
3930 int win_probe_recovery = 0;
3931 int win_probe_recovered = 0;
3932 int j, done_once = 0;
3936 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3937 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3938 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3940 SCTP_TCB_LOCK_ASSERT(stcb);
3941 #ifdef SCTP_ASOCLOG_OF_TSNS
3942 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3943 stcb->asoc.cumack_log_at++;
3944 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3945 stcb->asoc.cumack_log_at = 0;
3949 old_rwnd = asoc->peers_rwnd;
3950 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3953 } else if (asoc->last_acked_seq == cumack) {
3954 /* Window update sack */
3955 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3956 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3957 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3958 /* SWS sender side engages */
3959 asoc->peers_rwnd = 0;
3961 if (asoc->peers_rwnd > old_rwnd) {
3967 /* First setup for CC stuff */
3968 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3969 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3970 /* Drag along the window_tsn for cwr's */
3971 net->cwr_window_tsn = cumack;
3973 net->prev_cwnd = net->cwnd;
3978 * CMT: Reset CUC and Fast recovery algo variables before
3981 net->new_pseudo_cumack = 0;
3982 net->will_exit_fast_recovery = 0;
3983 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3984 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3987 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3988 tp1 = TAILQ_LAST(&asoc->sent_queue,
3989 sctpchunk_listhead);
3990 send_s = tp1->rec.data.tsn + 1;
3992 send_s = asoc->sending_seq;
3994 if (SCTP_TSN_GE(cumack, send_s)) {
3995 struct mbuf *op_err;
3996 char msg[SCTP_DIAG_INFO_LEN];
4000 SCTP_SNPRINTF(msg, sizeof(msg),
4001 "Cum ack %8.8x greater or equal than TSN %8.8x",
4003 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4004 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4005 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
4008 asoc->this_sack_highest_gap = cumack;
4009 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4010 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4011 stcb->asoc.overall_error_count,
4013 SCTP_FROM_SCTP_INDATA,
4016 stcb->asoc.overall_error_count = 0;
4017 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
4018 /* process the new consecutive TSN first */
4019 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4020 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) {
4021 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4022 SCTP_PRINTF("Warning, an unsent is now acked?\n");
4024 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4026 * If it is less than ACKED, it is
4027 * now no-longer in flight. Higher
4028 * values may occur during marking
4030 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4031 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4032 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4033 tp1->whoTo->flight_size,
4035 (uint32_t)(uintptr_t)tp1->whoTo,
4038 sctp_flight_size_decrease(tp1);
4039 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4040 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4043 /* sa_ignore NO_NULL_CHK */
4044 sctp_total_flight_decrease(stcb, tp1);
4046 tp1->whoTo->net_ack += tp1->send_size;
4047 if (tp1->snd_count < 2) {
4049 * True non-retransmitted
4052 tp1->whoTo->net_ack2 +=
4055 /* update RTO too? */
4058 sctp_calculate_rto(stcb,
4061 &tp1->sent_rcv_time,
4062 SCTP_RTT_FROM_DATA)) {
4065 if (tp1->whoTo->rto_needed == 0) {
4066 tp1->whoTo->rto_needed = 1;
4072 * CMT: CUCv2 algorithm. From the
4073 * cumack'd TSNs, for each TSN being
4074 * acked for the first time, set the
4075 * following variables for the
4076 * corresp destination.
4077 * new_pseudo_cumack will trigger a
4079 * find_(rtx_)pseudo_cumack will
4080 * trigger search for the next
4081 * expected (rtx-)pseudo-cumack.
4083 tp1->whoTo->new_pseudo_cumack = 1;
4084 tp1->whoTo->find_pseudo_cumack = 1;
4085 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4086 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4087 /* sa_ignore NO_NULL_CHK */
4088 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4091 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4092 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4094 if (tp1->rec.data.chunk_was_revoked) {
4095 /* deflate the cwnd */
4096 tp1->whoTo->cwnd -= tp1->book_size;
4097 tp1->rec.data.chunk_was_revoked = 0;
4099 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4100 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4101 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4104 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4108 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4109 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4110 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4111 asoc->trigger_reset = 1;
4113 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4115 /* sa_ignore NO_NULL_CHK */
4116 sctp_free_bufspace(stcb, asoc, tp1, 1);
4117 sctp_m_freem(tp1->data);
4120 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4121 sctp_log_sack(asoc->last_acked_seq,
4126 SCTP_LOG_FREE_SENT);
4128 asoc->sent_queue_cnt--;
4129 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4135 /* sa_ignore NO_NULL_CHK */
4136 if (stcb->sctp_socket) {
4137 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4138 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4139 /* sa_ignore NO_NULL_CHK */
4140 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4142 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4144 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4145 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4149 /* JRS - Use the congestion control given in the CC module */
4150 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4151 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4152 if (net->net_ack2 > 0) {
4154 * Karn's rule applies to clearing error
4155 * count, this is optional.
4157 net->error_count = 0;
4158 if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) {
4159 /* addr came good */
4160 net->dest_state |= SCTP_ADDR_REACHABLE;
4161 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4162 0, (void *)net, SCTP_SO_NOT_LOCKED);
4164 if (net == stcb->asoc.primary_destination) {
4165 if (stcb->asoc.alternate) {
4167 * release the alternate,
4170 sctp_free_remote_addr(stcb->asoc.alternate);
4171 stcb->asoc.alternate = NULL;
4174 if (net->dest_state & SCTP_ADDR_PF) {
4175 net->dest_state &= ~SCTP_ADDR_PF;
4176 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4177 stcb->sctp_ep, stcb, net,
4178 SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4179 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4180 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4181 /* Done with this net */
4184 /* restore any doubled timers */
4185 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4186 if (net->RTO < stcb->asoc.minrto) {
4187 net->RTO = stcb->asoc.minrto;
4189 if (net->RTO > stcb->asoc.maxrto) {
4190 net->RTO = stcb->asoc.maxrto;
4194 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4196 asoc->last_acked_seq = cumack;
4198 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4199 /* nothing left in-flight */
4200 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4201 net->flight_size = 0;
4202 net->partial_bytes_acked = 0;
4204 asoc->total_flight = 0;
4205 asoc->total_flight_count = 0;
4209 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4210 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4211 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4212 /* SWS sender side engages */
4213 asoc->peers_rwnd = 0;
4215 if (asoc->peers_rwnd > old_rwnd) {
4216 win_probe_recovery = 1;
4218 /* Now assure a timer where data is queued at */
4221 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4222 if (win_probe_recovery && (net->window_probe)) {
4223 win_probe_recovered = 1;
4225 * Find first chunk that was used with window probe
4226 * and clear the sent
4228 /* sa_ignore FREED_MEMORY */
4229 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4230 if (tp1->window_probe) {
4231 /* move back to data send queue */
4232 sctp_window_probe_recovery(stcb, asoc, tp1);
4237 if (net->flight_size) {
4239 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4240 if (net->window_probe) {
4241 net->window_probe = 0;
4244 if (net->window_probe) {
4246 * In window probes we must assure a timer
4247 * is still running there
4249 net->window_probe = 0;
4250 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4251 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net);
4253 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4254 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4256 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4261 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4262 (asoc->sent_queue_retran_cnt == 0) &&
4263 (win_probe_recovered == 0) &&
4266 * huh, this should not happen unless all packets are
4267 * PR-SCTP and marked to skip of course.
4269 if (sctp_fs_audit(asoc)) {
4270 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4271 net->flight_size = 0;
4273 asoc->total_flight = 0;
4274 asoc->total_flight_count = 0;
4275 asoc->sent_queue_retran_cnt = 0;
4276 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4277 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4278 sctp_flight_size_increase(tp1);
4279 sctp_total_flight_increase(stcb, tp1);
4280 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4281 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4288 /**********************************/
4289 /* Now what about shutdown issues */
4290 /**********************************/
4291 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4292 /* nothing left on sendqueue.. consider done */
4294 if ((asoc->stream_queue_cnt == 1) &&
4295 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4296 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4297 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4298 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4300 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4301 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4302 (asoc->stream_queue_cnt == 1) &&
4303 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4304 struct mbuf *op_err;
4308 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4309 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_28;
4310 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
4313 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4314 (asoc->stream_queue_cnt == 0)) {
4315 struct sctp_nets *netp;
4317 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4318 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4319 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4321 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4322 sctp_stop_timers_for_shutdown(stcb);
4323 if (asoc->alternate) {
4324 netp = asoc->alternate;
4326 netp = asoc->primary_destination;
4328 sctp_send_shutdown(stcb, netp);
4329 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4330 stcb->sctp_ep, stcb, netp);
4331 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4332 stcb->sctp_ep, stcb, NULL);
4333 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4334 (asoc->stream_queue_cnt == 0)) {
4335 struct sctp_nets *netp;
4337 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4338 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
4339 sctp_stop_timers_for_shutdown(stcb);
4340 if (asoc->alternate) {
4341 netp = asoc->alternate;
4343 netp = asoc->primary_destination;
4345 sctp_send_shutdown_ack(stcb, netp);
4346 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4347 stcb->sctp_ep, stcb, netp);
4350 /*********************************************/
4351 /* Here we perform PR-SCTP procedures */
4353 /*********************************************/
4354 /* C1. update advancedPeerAckPoint */
4355 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4356 asoc->advanced_peer_ack_point = cumack;
4358 /* PR-Sctp issues need to be addressed too */
4359 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4360 struct sctp_tmit_chunk *lchk;
4361 uint32_t old_adv_peer_ack_point;
4363 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4364 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4365 /* C3. See if we need to send a Fwd-TSN */
4366 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4368 * ISSUE with ECN, see FWD-TSN processing.
4370 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4371 send_forward_tsn(stcb, asoc);
4373 /* try to FR fwd-tsn's that get lost too */
4374 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4375 send_forward_tsn(stcb, asoc);
4379 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
4380 if (lchk->whoTo != NULL) {
4385 /* Assure a timer is up */
4386 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4387 stcb->sctp_ep, stcb, lchk->whoTo);
4390 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4391 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4393 stcb->asoc.peers_rwnd,
4394 stcb->asoc.total_flight,
4395 stcb->asoc.total_output_queue_size);
4400 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4401 struct sctp_tcb *stcb,
4402 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4403 int *abort_now, uint8_t flags,
4404 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4406 struct sctp_association *asoc;
4407 struct sctp_tmit_chunk *tp1, *tp2;
4408 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4409 uint16_t wake_him = 0;
4410 uint32_t send_s = 0;
4412 int accum_moved = 0;
4413 int will_exit_fast_recovery = 0;
4414 uint32_t a_rwnd, old_rwnd;
4415 int win_probe_recovery = 0;
4416 int win_probe_recovered = 0;
4417 struct sctp_nets *net = NULL;
4420 uint8_t reneged_all = 0;
4421 uint8_t cmt_dac_flag;
4424 * we take any chance we can to service our queues since we cannot
4425 * get awoken when the socket is read from :<
4428 * Now perform the actual SACK handling: 1) Verify that it is not an
4429 * old sack, if so discard. 2) If there is nothing left in the send
4430 * queue (cum-ack is equal to last acked) then you have a duplicate
4431 * too, update any rwnd change and verify no timers are running.
4432 * then return. 3) Process any new consecutive data i.e. cum-ack
4433 * moved process these first and note that it moved. 4) Process any
4434 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4435 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4436 * sync up flightsizes and things, stop all timers and also check
4437 * for shutdown_pending state. If so then go ahead and send off the
4438 * shutdown. If in shutdown recv, send off the shutdown-ack and
4439 * start that timer, Ret. 9) Strike any non-acked things and do FR
4440 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4441 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4442 * if in shutdown_recv state.
4444 SCTP_TCB_LOCK_ASSERT(stcb);
4446 this_sack_lowest_newack = 0;
4447 SCTP_STAT_INCR(sctps_slowpath_sack);
4449 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4450 #ifdef SCTP_ASOCLOG_OF_TSNS
4451 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4452 stcb->asoc.cumack_log_at++;
4453 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4454 stcb->asoc.cumack_log_at = 0;
4459 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4460 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4461 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4464 old_rwnd = stcb->asoc.peers_rwnd;
4465 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4466 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4467 stcb->asoc.overall_error_count,
4469 SCTP_FROM_SCTP_INDATA,
4472 stcb->asoc.overall_error_count = 0;
4474 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4475 sctp_log_sack(asoc->last_acked_seq,
4482 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4484 uint32_t *dupdata, dblock;
4486 for (i = 0; i < num_dup; i++) {
4487 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4488 sizeof(uint32_t), (uint8_t *)&dblock);
4489 if (dupdata == NULL) {
4492 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4496 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4497 tp1 = TAILQ_LAST(&asoc->sent_queue,
4498 sctpchunk_listhead);
4499 send_s = tp1->rec.data.tsn + 1;
4502 send_s = asoc->sending_seq;
4504 if (SCTP_TSN_GE(cum_ack, send_s)) {
4505 struct mbuf *op_err;
4506 char msg[SCTP_DIAG_INFO_LEN];
4509 * no way, we have not even sent this TSN out yet. Peer is
4510 * hopelessly messed up with us.
4512 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4515 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n",
4516 tp1->rec.data.tsn, (void *)tp1);
4521 SCTP_SNPRINTF(msg, sizeof(msg),
4522 "Cum ack %8.8x greater or equal than TSN %8.8x",
4524 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4525 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_29;
4526 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
4529 /**********************/
4530 /* 1) check the range */
4531 /**********************/
4532 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4533 /* acking something behind */
4537 /* update the Rwnd of the peer */
4538 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4539 TAILQ_EMPTY(&asoc->send_queue) &&
4540 (asoc->stream_queue_cnt == 0)) {
4541 /* nothing left on send/sent and strmq */
4542 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4543 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4544 asoc->peers_rwnd, 0, 0, a_rwnd);
4546 asoc->peers_rwnd = a_rwnd;
4547 if (asoc->sent_queue_retran_cnt) {
4548 asoc->sent_queue_retran_cnt = 0;
4550 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4551 /* SWS sender side engages */
4552 asoc->peers_rwnd = 0;
4554 /* stop any timers */
4555 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4556 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4557 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4558 net->partial_bytes_acked = 0;
4559 net->flight_size = 0;
4561 asoc->total_flight = 0;
4562 asoc->total_flight_count = 0;
4566 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4567 * things. The total byte count acked is tracked in netAckSz AND
4568 * netAck2 is used to track the total bytes acked that are un-
4569 * ambiguous and were never retransmitted. We track these on a per
4570 * destination address basis.
4572 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4573 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4574 /* Drag along the window_tsn for cwr's */
4575 net->cwr_window_tsn = cum_ack;
4577 net->prev_cwnd = net->cwnd;
4582 * CMT: Reset CUC and Fast recovery algo variables before
4585 net->new_pseudo_cumack = 0;
4586 net->will_exit_fast_recovery = 0;
4587 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4588 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4592 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4593 * to be greater than the cumack. Also reset saw_newack to 0
4596 net->saw_newack = 0;
4597 net->this_sack_highest_newack = last_tsn;
4599 /* process the new consecutive TSN first */
4600 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4601 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) {
4602 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4604 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4606 * If it is less than ACKED, it is
4607 * now no-longer in flight. Higher
4608 * values may occur during marking
4610 if ((tp1->whoTo->dest_state &
4611 SCTP_ADDR_UNCONFIRMED) &&
4612 (tp1->snd_count < 2)) {
4614 * If there was no retran
4615 * and the address is
4616 * un-confirmed and we sent
4618 * sacked.. its confirmed,
4621 tp1->whoTo->dest_state &=
4622 ~SCTP_ADDR_UNCONFIRMED;
4624 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4625 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4626 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4627 tp1->whoTo->flight_size,
4629 (uint32_t)(uintptr_t)tp1->whoTo,
4632 sctp_flight_size_decrease(tp1);
4633 sctp_total_flight_decrease(stcb, tp1);
4634 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4635 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4639 tp1->whoTo->net_ack += tp1->send_size;
4641 /* CMT SFR and DAC algos */
4642 this_sack_lowest_newack = tp1->rec.data.tsn;
4643 tp1->whoTo->saw_newack = 1;
4645 if (tp1->snd_count < 2) {
4647 * True non-retransmitted
4650 tp1->whoTo->net_ack2 +=
4653 /* update RTO too? */
4656 sctp_calculate_rto(stcb,
4659 &tp1->sent_rcv_time,
4660 SCTP_RTT_FROM_DATA)) {
4663 if (tp1->whoTo->rto_needed == 0) {
4664 tp1->whoTo->rto_needed = 1;
4670 * CMT: CUCv2 algorithm. From the
4671 * cumack'd TSNs, for each TSN being
4672 * acked for the first time, set the
4673 * following variables for the
4674 * corresp destination.
4675 * new_pseudo_cumack will trigger a
4677 * find_(rtx_)pseudo_cumack will
4678 * trigger search for the next
4679 * expected (rtx-)pseudo-cumack.
4681 tp1->whoTo->new_pseudo_cumack = 1;
4682 tp1->whoTo->find_pseudo_cumack = 1;
4683 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4684 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4685 sctp_log_sack(asoc->last_acked_seq,
4690 SCTP_LOG_TSN_ACKED);
4692 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4693 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK);
4696 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4697 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4698 #ifdef SCTP_AUDITING_ENABLED
4699 sctp_audit_log(0xB3,
4700 (asoc->sent_queue_retran_cnt & 0x000000ff));
4703 if (tp1->rec.data.chunk_was_revoked) {
4704 /* deflate the cwnd */
4705 tp1->whoTo->cwnd -= tp1->book_size;
4706 tp1->rec.data.chunk_was_revoked = 0;
4708 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4709 tp1->sent = SCTP_DATAGRAM_ACKED;
4716 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4717 /* always set this up to cum-ack */
4718 asoc->this_sack_highest_gap = last_tsn;
4720 if ((num_seg > 0) || (num_nr_seg > 0)) {
4722 * thisSackHighestGap will increase while handling NEW
4723 * segments this_sack_highest_newack will increase while
4724 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4725 * used for CMT DAC algo. saw_newack will also change.
4727 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4728 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4729 num_seg, num_nr_seg, &rto_ok)) {
4733 * validate the biggest_tsn_acked in the gap acks if strict
4734 * adherence is wanted.
4736 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4738 * peer is either confused or we are under attack.
4741 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4742 biggest_tsn_acked, send_s);
4746 /*******************************************/
4747 /* cancel ALL T3-send timer if accum moved */
4748 /*******************************************/
4749 if (asoc->sctp_cmt_on_off > 0) {
4750 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4751 if (net->new_pseudo_cumack)
4752 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4754 SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4758 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4759 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4760 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
4764 /********************************************/
4765 /* drop the acked chunks from the sentqueue */
4766 /********************************************/
4767 asoc->last_acked_seq = cum_ack;
4769 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4770 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) {
4773 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4774 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) {
4775 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--;
4778 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid);
4782 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) &&
4783 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) &&
4784 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) {
4785 asoc->trigger_reset = 1;
4787 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4788 if (PR_SCTP_ENABLED(tp1->flags)) {
4789 if (asoc->pr_sctp_cnt != 0)
4790 asoc->pr_sctp_cnt--;
4792 asoc->sent_queue_cnt--;
4794 /* sa_ignore NO_NULL_CHK */
4795 sctp_free_bufspace(stcb, asoc, tp1, 1);
4796 sctp_m_freem(tp1->data);
4798 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4799 asoc->sent_queue_cnt_removeable--;
4802 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4803 sctp_log_sack(asoc->last_acked_seq,
4808 SCTP_LOG_FREE_SENT);
4810 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4813 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4815 panic("Warning flight size is positive and should be 0");
4817 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4818 asoc->total_flight);
4820 asoc->total_flight = 0;
4823 /* sa_ignore NO_NULL_CHK */
4824 if ((wake_him) && (stcb->sctp_socket)) {
4825 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4826 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4827 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4829 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4831 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4832 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4836 if (asoc->fast_retran_loss_recovery && accum_moved) {
4837 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4838 /* Setup so we will exit RFC2582 fast recovery */
4839 will_exit_fast_recovery = 1;
4843 * Check for revoked fragments:
4845 * if Previous sack - Had no frags then we can't have any revoked if
4846 * Previous sack - Had frag's then - If we now have frags aka
4847 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4848 * some of them. else - The peer revoked all ACKED fragments, since
4849 * we had some before and now we have NONE.
4853 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4854 asoc->saw_sack_with_frags = 1;
4855 } else if (asoc->saw_sack_with_frags) {
4856 int cnt_revoked = 0;
4858 /* Peer revoked all dg's marked or acked */
4859 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4860 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4861 tp1->sent = SCTP_DATAGRAM_SENT;
4862 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4863 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4864 tp1->whoTo->flight_size,
4866 (uint32_t)(uintptr_t)tp1->whoTo,
4869 sctp_flight_size_increase(tp1);
4870 sctp_total_flight_increase(stcb, tp1);
4871 tp1->rec.data.chunk_was_revoked = 1;
4873 * To ensure that this increase in
4874 * flightsize, which is artificial, does not
4875 * throttle the sender, we also increase the
4876 * cwnd artificially.
4878 tp1->whoTo->cwnd += tp1->book_size;
4885 asoc->saw_sack_with_frags = 0;
4888 asoc->saw_sack_with_nr_frags = 1;
4890 asoc->saw_sack_with_nr_frags = 0;
4892 /* JRS - Use the congestion control given in the CC module */
4893 if (ecne_seen == 0) {
4894 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4895 if (net->net_ack2 > 0) {
4897 * Karn's rule applies to clearing error
4898 * count, this is optional.
4900 net->error_count = 0;
4901 if ((net->dest_state & SCTP_ADDR_REACHABLE) == 0) {
4902 /* addr came good */
4903 net->dest_state |= SCTP_ADDR_REACHABLE;
4904 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4905 0, (void *)net, SCTP_SO_NOT_LOCKED);
4908 if (net == stcb->asoc.primary_destination) {
4909 if (stcb->asoc.alternate) {
4911 * release the alternate,
4914 sctp_free_remote_addr(stcb->asoc.alternate);
4915 stcb->asoc.alternate = NULL;
4919 if (net->dest_state & SCTP_ADDR_PF) {
4920 net->dest_state &= ~SCTP_ADDR_PF;
4921 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4922 stcb->sctp_ep, stcb, net,
4923 SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
4924 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4925 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4926 /* Done with this net */
4929 /* restore any doubled timers */
4930 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4931 if (net->RTO < stcb->asoc.minrto) {
4932 net->RTO = stcb->asoc.minrto;
4934 if (net->RTO > stcb->asoc.maxrto) {
4935 net->RTO = stcb->asoc.maxrto;
4939 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4942 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4943 /* nothing left in-flight */
4944 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4945 /* stop all timers */
4946 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4948 SCTP_FROM_SCTP_INDATA + SCTP_LOC_34);
4949 net->flight_size = 0;
4950 net->partial_bytes_acked = 0;
4952 asoc->total_flight = 0;
4953 asoc->total_flight_count = 0;
4956 /**********************************/
4957 /* Now what about shutdown issues */
4958 /**********************************/
4959 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4960 /* nothing left on sendqueue.. consider done */
4961 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4962 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4963 asoc->peers_rwnd, 0, 0, a_rwnd);
4965 asoc->peers_rwnd = a_rwnd;
4966 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4967 /* SWS sender side engages */
4968 asoc->peers_rwnd = 0;
4971 if ((asoc->stream_queue_cnt == 1) &&
4972 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4973 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4974 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) {
4975 SCTP_ADD_SUBSTATE(stcb, SCTP_STATE_PARTIAL_MSG_LEFT);
4977 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4978 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4979 (asoc->stream_queue_cnt == 1) &&
4980 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
4981 struct mbuf *op_err;
4985 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4986 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35;
4987 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
4990 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4991 (asoc->stream_queue_cnt == 0)) {
4992 struct sctp_nets *netp;
4994 if ((SCTP_GET_STATE(stcb) == SCTP_STATE_OPEN) ||
4995 (SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4996 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4998 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_SENT);
4999 sctp_stop_timers_for_shutdown(stcb);
5000 if (asoc->alternate) {
5001 netp = asoc->alternate;
5003 netp = asoc->primary_destination;
5005 sctp_send_shutdown(stcb, netp);
5006 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
5007 stcb->sctp_ep, stcb, netp);
5008 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
5009 stcb->sctp_ep, stcb, NULL);
5011 } else if ((SCTP_GET_STATE(stcb) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
5012 (asoc->stream_queue_cnt == 0)) {
5013 struct sctp_nets *netp;
5015 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
5016 SCTP_SET_STATE(stcb, SCTP_STATE_SHUTDOWN_ACK_SENT);
5017 sctp_stop_timers_for_shutdown(stcb);
5018 if (asoc->alternate) {
5019 netp = asoc->alternate;
5021 netp = asoc->primary_destination;
5023 sctp_send_shutdown_ack(stcb, netp);
5024 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5025 stcb->sctp_ep, stcb, netp);
5030 * Now here we are going to recycle net_ack for a different use...
5033 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5038 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5039 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5040 * automatically ensure that.
5042 if ((asoc->sctp_cmt_on_off > 0) &&
5043 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5044 (cmt_dac_flag == 0)) {
5045 this_sack_lowest_newack = cum_ack;
5047 if ((num_seg > 0) || (num_nr_seg > 0)) {
5048 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5049 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5051 /* JRS - Use the congestion control given in the CC module */
5052 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5054 /* Now are we exiting loss recovery ? */
5055 if (will_exit_fast_recovery) {
5056 /* Ok, we must exit fast recovery */
5057 asoc->fast_retran_loss_recovery = 0;
5059 if ((asoc->sat_t3_loss_recovery) &&
5060 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5061 /* end satellite t3 loss recovery */
5062 asoc->sat_t3_loss_recovery = 0;
5067 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5068 if (net->will_exit_fast_recovery) {
5069 /* Ok, we must exit fast recovery */
5070 net->fast_retran_loss_recovery = 0;
5074 /* Adjust and set the new rwnd value */
5075 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5076 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5077 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5079 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5080 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5081 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5082 /* SWS sender side engages */
5083 asoc->peers_rwnd = 0;
5085 if (asoc->peers_rwnd > old_rwnd) {
5086 win_probe_recovery = 1;
5090 * Now we must setup so we have a timer up for anyone with
5096 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5097 if (win_probe_recovery && (net->window_probe)) {
5098 win_probe_recovered = 1;
5100 * Find first chunk that was used with
5101 * window probe and clear the event. Put
5102 * it back into the send queue as if has
5105 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5106 if (tp1->window_probe) {
5107 sctp_window_probe_recovery(stcb, asoc, tp1);
5112 if (net->flight_size) {
5114 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5115 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5116 stcb->sctp_ep, stcb, net);
5118 if (net->window_probe) {
5119 net->window_probe = 0;
5122 if (net->window_probe) {
5124 * In window probes we must assure a timer
5125 * is still running there
5127 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5128 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5129 stcb->sctp_ep, stcb, net);
5131 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5132 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5134 SCTP_FROM_SCTP_INDATA + SCTP_LOC_36);
5139 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5140 (asoc->sent_queue_retran_cnt == 0) &&
5141 (win_probe_recovered == 0) &&
5144 * huh, this should not happen unless all packets are
5145 * PR-SCTP and marked to skip of course.
5147 if (sctp_fs_audit(asoc)) {
5148 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5149 net->flight_size = 0;
5151 asoc->total_flight = 0;
5152 asoc->total_flight_count = 0;
5153 asoc->sent_queue_retran_cnt = 0;
5154 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5155 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5156 sctp_flight_size_increase(tp1);
5157 sctp_total_flight_increase(stcb, tp1);
5158 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5159 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5166 /*********************************************/
5167 /* Here we perform PR-SCTP procedures */
5169 /*********************************************/
5170 /* C1. update advancedPeerAckPoint */
5171 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5172 asoc->advanced_peer_ack_point = cum_ack;
5174 /* C2. try to further move advancedPeerAckPoint ahead */
5175 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
5176 struct sctp_tmit_chunk *lchk;
5177 uint32_t old_adv_peer_ack_point;
5179 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5180 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5181 /* C3. See if we need to send a Fwd-TSN */
5182 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5184 * ISSUE with ECN, see FWD-TSN processing.
5186 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5187 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5188 0xee, cum_ack, asoc->advanced_peer_ack_point,
5189 old_adv_peer_ack_point);
5191 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5192 send_forward_tsn(stcb, asoc);
5194 /* try to FR fwd-tsn's that get lost too */
5195 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5196 send_forward_tsn(stcb, asoc);
5200 for (; lchk != NULL; lchk = TAILQ_NEXT(lchk, sctp_next)) {
5201 if (lchk->whoTo != NULL) {
5206 /* Assure a timer is up */
5207 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5208 stcb->sctp_ep, stcb, lchk->whoTo);
5211 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5212 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5214 stcb->asoc.peers_rwnd,
5215 stcb->asoc.total_flight,
5216 stcb->asoc.total_output_queue_size);
5221 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5224 uint32_t cum_ack, a_rwnd;
5226 cum_ack = ntohl(cp->cumulative_tsn_ack);
5227 /* Arrange so a_rwnd does NOT change */
5228 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5230 /* Now call the express sack handling */
5231 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5235 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5236 struct sctp_stream_in *strmin)
5238 struct sctp_queued_to_read *control, *ncontrol;
5239 struct sctp_association *asoc;
5241 int need_reasm_check = 0;
5244 mid = strmin->last_mid_delivered;
5246 * First deliver anything prior to and including the stream no that
5249 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5250 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5251 /* this is deliverable now */
5252 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5253 if (control->on_strm_q) {
5254 if (control->on_strm_q == SCTP_ON_ORDERED) {
5255 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5256 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5257 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5260 panic("strmin: %p ctl: %p unknown %d",
5261 strmin, control, control->on_strm_q);
5264 control->on_strm_q = 0;
5266 /* subtract pending on streams */
5267 if (asoc->size_on_all_streams >= control->length) {
5268 asoc->size_on_all_streams -= control->length;
5271 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5273 asoc->size_on_all_streams = 0;
5276 sctp_ucount_decr(asoc->cnt_on_all_streams);
5277 /* deliver it to at least the delivery-q */
5278 if (stcb->sctp_socket) {
5279 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5280 sctp_add_to_readq(stcb->sctp_ep, stcb,
5282 &stcb->sctp_socket->so_rcv,
5283 1, SCTP_READ_LOCK_HELD,
5284 SCTP_SO_NOT_LOCKED);
5287 /* Its a fragmented message */
5288 if (control->first_frag_seen) {
5290 * Make it so this is next to
5291 * deliver, we restore later
5293 strmin->last_mid_delivered = control->mid - 1;
5294 need_reasm_check = 1;
5299 /* no more delivery now. */
5303 if (need_reasm_check) {
5306 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5307 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) {
5308 /* Restore the next to deliver unless we are ahead */
5309 strmin->last_mid_delivered = mid;
5312 /* Left the front Partial one on */
5315 need_reasm_check = 0;
5318 * now we must deliver things in queue the normal way if any are
5321 mid = strmin->last_mid_delivered + 1;
5322 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) {
5323 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) {
5324 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) {
5325 /* this is deliverable now */
5326 if (control->on_strm_q) {
5327 if (control->on_strm_q == SCTP_ON_ORDERED) {
5328 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm);
5329 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5330 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm);
5333 panic("strmin: %p ctl: %p unknown %d",
5334 strmin, control, control->on_strm_q);
5337 control->on_strm_q = 0;
5339 /* subtract pending on streams */
5340 if (asoc->size_on_all_streams >= control->length) {
5341 asoc->size_on_all_streams -= control->length;
5344 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5346 asoc->size_on_all_streams = 0;
5349 sctp_ucount_decr(asoc->cnt_on_all_streams);
5350 /* deliver it to at least the delivery-q */
5351 strmin->last_mid_delivered = control->mid;
5352 if (stcb->sctp_socket) {
5353 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
5354 sctp_add_to_readq(stcb->sctp_ep, stcb,
5356 &stcb->sctp_socket->so_rcv, 1,
5357 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5359 mid = strmin->last_mid_delivered + 1;
5361 /* Its a fragmented message */
5362 if (control->first_frag_seen) {
5364 * Make it so this is next to
5367 strmin->last_mid_delivered = control->mid - 1;
5368 need_reasm_check = 1;
5376 if (need_reasm_check) {
5377 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD);
5382 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5383 struct sctp_association *asoc, struct sctp_stream_in *strm,
5384 struct sctp_queued_to_read *control, int ordered, uint32_t cumtsn)
5386 struct sctp_tmit_chunk *chk, *nchk;
5389 * For now large messages held on the stream reasm that are complete
5390 * will be tossed too. We could in theory do more work to spin
5391 * through and stop after dumping one msg aka seeing the start of a
5392 * new msg at the head, and call the delivery function... to see if
5393 * it can be delivered... But for now we just dump everything on the
5396 if (!asoc->idata_supported && !ordered &&
5397 control->first_frag_seen &&
5398 SCTP_TSN_GT(control->fsn_included, cumtsn)) {
5401 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) {
5402 /* Purge hanging chunks */
5403 if (!asoc->idata_supported && !ordered) {
5404 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) {
5408 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5409 if (asoc->size_on_reasm_queue >= chk->send_size) {
5410 asoc->size_on_reasm_queue -= chk->send_size;
5413 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size);
5415 asoc->size_on_reasm_queue = 0;
5418 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5420 sctp_m_freem(chk->data);
5423 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5425 if (!TAILQ_EMPTY(&control->reasm)) {
5426 /* This has to be old data, unordered */
5427 if (control->data) {
5428 sctp_m_freem(control->data);
5429 control->data = NULL;
5431 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn);
5432 chk = TAILQ_FIRST(&control->reasm);
5433 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
5434 TAILQ_REMOVE(&control->reasm, chk, sctp_next);
5435 sctp_add_chk_to_control(control, strm, stcb, asoc,
5436 chk, SCTP_READ_LOCK_HELD);
5438 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD);
5441 if (control->on_strm_q == SCTP_ON_ORDERED) {
5442 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5443 if (asoc->size_on_all_streams >= control->length) {
5444 asoc->size_on_all_streams -= control->length;
5447 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5449 asoc->size_on_all_streams = 0;
5452 sctp_ucount_decr(asoc->cnt_on_all_streams);
5453 control->on_strm_q = 0;
5454 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5455 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5456 control->on_strm_q = 0;
5458 } else if (control->on_strm_q) {
5459 panic("strm: %p ctl: %p unknown %d",
5460 strm, control, control->on_strm_q);
5463 control->on_strm_q = 0;
5464 if (control->on_read_q == 0) {
5465 sctp_free_remote_addr(control->whoFrom);
5466 if (control->data) {
5467 sctp_m_freem(control->data);
5468 control->data = NULL;
5470 sctp_free_a_readq(stcb, control);
5475 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5476 struct sctp_forward_tsn_chunk *fwd,
5477 int *abort_flag, struct mbuf *m, int offset)
5479 /* The pr-sctp fwd tsn */
5481 * here we will perform all the data receiver side steps for
5482 * processing FwdTSN, as required in by pr-sctp draft:
5484 * Assume we get FwdTSN(x):
5486 * 1) update local cumTSN to x 2) try to further advance cumTSN to x
5487 * + others we have 3) examine and update re-ordering queue on
5488 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5489 * report where we are.
5491 struct sctp_association *asoc;
5492 uint32_t new_cum_tsn, gap;
5493 unsigned int i, fwd_sz, m_size;
5495 struct sctp_stream_in *strm;
5496 struct sctp_queued_to_read *control, *ncontrol, *sv;
5499 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5500 SCTPDBG(SCTP_DEBUG_INDATA1,
5501 "Bad size too small/big fwd-tsn\n");
5504 m_size = (stcb->asoc.mapping_array_size << 3);
5505 /*************************************************************/
5506 /* 1. Here we update local cumTSN and shift the bitmap array */
5507 /*************************************************************/
5508 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5510 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5511 /* Already got there ... */
5515 * now we know the new TSN is more advanced, let's find the actual
5518 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5519 asoc->cumulative_tsn = new_cum_tsn;
5520 if (gap >= m_size) {
5521 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5522 struct mbuf *op_err;
5523 char msg[SCTP_DIAG_INFO_LEN];
5526 * out of range (of single byte chunks in the rwnd I
5527 * give out). This must be an attacker.
5530 SCTP_SNPRINTF(msg, sizeof(msg),
5531 "New cum ack %8.8x too high, highest TSN %8.8x",
5532 new_cum_tsn, asoc->highest_tsn_inside_map);
5533 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5534 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_37;
5535 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, false, SCTP_SO_NOT_LOCKED);
5538 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5540 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5541 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5542 asoc->highest_tsn_inside_map = new_cum_tsn;
5544 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5545 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5547 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5548 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5551 SCTP_TCB_LOCK_ASSERT(stcb);
5552 for (i = 0; i <= gap; i++) {
5553 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5554 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5555 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5556 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5557 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5562 /*************************************************************/
5563 /* 2. Clear up re-assembly queue */
5564 /*************************************************************/
5566 /* This is now done as part of clearing up the stream/seq */
5567 if (asoc->idata_supported == 0) {
5570 /* Flush all the un-ordered data based on cum-tsn */
5571 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5572 for (sid = 0; sid < asoc->streamincnt; sid++) {
5573 strm = &asoc->strmin[sid];
5574 if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5575 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), 0, new_cum_tsn);
5578 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5580 /*******************************************************/
5581 /* 3. Update the PR-stream re-ordering queues and fix */
5582 /* delivery issues as needed. */
5583 /*******************************************************/
5584 fwd_sz -= sizeof(*fwd);
5587 unsigned int num_str;
5590 uint16_t ordered, flags;
5591 struct sctp_strseq *stseq, strseqbuf;
5592 struct sctp_strseq_mid *stseq_m, strseqbuf_m;
5594 offset += sizeof(*fwd);
5596 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5597 if (asoc->idata_supported) {
5598 num_str = fwd_sz / sizeof(struct sctp_strseq_mid);
5600 num_str = fwd_sz / sizeof(struct sctp_strseq);
5602 for (i = 0; i < num_str; i++) {
5603 if (asoc->idata_supported) {
5604 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset,
5605 sizeof(struct sctp_strseq_mid),
5606 (uint8_t *)&strseqbuf_m);
5607 offset += sizeof(struct sctp_strseq_mid);
5608 if (stseq_m == NULL) {
5611 sid = ntohs(stseq_m->sid);
5612 mid = ntohl(stseq_m->mid);
5613 flags = ntohs(stseq_m->flags);
5614 if (flags & PR_SCTP_UNORDERED_FLAG) {
5620 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5621 sizeof(struct sctp_strseq),
5622 (uint8_t *)&strseqbuf);
5623 offset += sizeof(struct sctp_strseq);
5624 if (stseq == NULL) {
5627 sid = ntohs(stseq->sid);
5628 mid = (uint32_t)ntohs(stseq->ssn);
5636 * Ok we now look for the stream/seq on the read
5637 * queue where its not all delivered. If we find it
5638 * we transmute the read entry into a PDI_ABORTED.
5640 if (sid >= asoc->streamincnt) {
5641 /* screwed up streams, stop! */
5644 if ((asoc->str_of_pdapi == sid) &&
5645 (asoc->ssn_of_pdapi == mid)) {
5647 * If this is the one we were partially
5648 * delivering now then we no longer are.
5649 * Note this will change with the reassembly
5652 asoc->fragmented_delivery_inprogress = 0;
5654 strm = &asoc->strmin[sid];
5656 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, ncontrol) {
5657 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5658 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5662 if (asoc->idata_supported) {
5663 TAILQ_FOREACH_SAFE(control, &strm->uno_inqueue, next_instrm, ncontrol) {
5664 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) {
5665 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, control, ordered, new_cum_tsn);
5669 if (!TAILQ_EMPTY(&strm->uno_inqueue)) {
5670 sctp_flush_reassm_for_str_seq(stcb, asoc, strm, TAILQ_FIRST(&strm->uno_inqueue), ordered, new_cum_tsn);
5674 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) {
5675 if ((control->sinfo_stream == sid) &&
5676 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) {
5677 str_seq = (sid << 16) | (0x0000ffff & mid);
5678 control->pdapi_aborted = 1;
5679 sv = stcb->asoc.control_pdapi;
5680 control->end_added = 1;
5681 if (control->on_strm_q == SCTP_ON_ORDERED) {
5682 TAILQ_REMOVE(&strm->inqueue, control, next_instrm);
5683 if (asoc->size_on_all_streams >= control->length) {
5684 asoc->size_on_all_streams -= control->length;
5687 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length);
5689 asoc->size_on_all_streams = 0;
5692 sctp_ucount_decr(asoc->cnt_on_all_streams);
5693 } else if (control->on_strm_q == SCTP_ON_UNORDERED) {
5694 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm);
5696 } else if (control->on_strm_q) {
5697 panic("strm: %p ctl: %p unknown %d",
5698 strm, control, control->on_strm_q);
5701 control->on_strm_q = 0;
5702 stcb->asoc.control_pdapi = control;
5703 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5705 SCTP_PARTIAL_DELIVERY_ABORTED,
5707 SCTP_SO_NOT_LOCKED);
5708 stcb->asoc.control_pdapi = sv;
5710 } else if ((control->sinfo_stream == sid) &&
5711 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) {
5712 /* We are past our victim SSN */
5716 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) {
5717 /* Update the sequence number */
5718 strm->last_mid_delivered = mid;
5720 /* now kick the stream the new way */
5721 /* sa_ignore NO_NULL_CHK */
5722 sctp_kick_prsctp_reorder_queue(stcb, strm);
5724 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5727 * Now slide thing forward.
5729 sctp_slide_mapping_arrays(stcb);