2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
50 * NOTES: On the outbound side of things I need to check the sack timer to
51 * see if I should generate a sack into the chunk queue (if I have data to
52 * send that is and will be sending it .. for bundling.
54 * The callback in sctp_usrreq.c will get called when the socket is read from.
55 * This will cause sctp_service_queues() to get called on the top entry in
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
62 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
65 /* Calculate what the rwnd would be */
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 * This is really set wrong with respect to a 1-2-m socket. Since
73 * the sb_cc is the count that everyone as put up. When we re-write
74 * sctp_soreceive then we will fix this so that ONLY this
75 * associations data is taken into account.
77 if (stcb->sctp_socket == NULL)
80 if (stcb->asoc.sb_cc == 0 &&
81 asoc->size_on_reasm_queue == 0 &&
82 asoc->size_on_all_streams == 0) {
83 /* Full rwnd granted */
84 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
87 /* get actual space */
88 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
91 * take out what has NOT been put on socket queue and we yet hold
94 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95 asoc->cnt_on_reasm_queue * MSIZE));
96 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97 asoc->cnt_on_all_streams * MSIZE));
103 /* what is the overhead of all these rwnd's */
104 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
106 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107 * even it is 0. SWS engaged
109 if (calc < stcb->asoc.my_rwnd_control_len) {
118 * Build out our readq entry based on the incoming packet.
120 struct sctp_queued_to_read *
121 sctp_build_readq_entry(struct sctp_tcb *stcb,
122 struct sctp_nets *net,
123 uint32_t tsn, uint32_t ppid,
124 uint32_t context, uint16_t stream_no,
125 uint16_t stream_seq, uint8_t flags,
128 struct sctp_queued_to_read *read_queue_e = NULL;
130 sctp_alloc_a_readq(stcb, read_queue_e);
131 if (read_queue_e == NULL) {
134 read_queue_e->sinfo_stream = stream_no;
135 read_queue_e->sinfo_ssn = stream_seq;
136 read_queue_e->sinfo_flags = (flags << 8);
137 read_queue_e->sinfo_ppid = ppid;
138 read_queue_e->sinfo_context = context;
139 read_queue_e->sinfo_timetolive = 0;
140 read_queue_e->sinfo_tsn = tsn;
141 read_queue_e->sinfo_cumtsn = tsn;
142 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143 read_queue_e->whoFrom = net;
144 read_queue_e->length = 0;
145 atomic_add_int(&net->ref_count, 1);
146 read_queue_e->data = dm;
147 read_queue_e->spec_flags = 0;
148 read_queue_e->tail_mbuf = NULL;
149 read_queue_e->aux_data = NULL;
150 read_queue_e->stcb = stcb;
151 read_queue_e->port_from = stcb->rport;
152 read_queue_e->do_not_ref_stcb = 0;
153 read_queue_e->end_added = 0;
154 read_queue_e->some_taken = 0;
155 read_queue_e->pdapi_aborted = 0;
157 return (read_queue_e);
162 * Build out our readq entry based on the incoming packet.
164 static struct sctp_queued_to_read *
165 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166 struct sctp_tmit_chunk *chk)
168 struct sctp_queued_to_read *read_queue_e = NULL;
170 sctp_alloc_a_readq(stcb, read_queue_e);
171 if (read_queue_e == NULL) {
174 read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178 read_queue_e->sinfo_context = stcb->asoc.context;
179 read_queue_e->sinfo_timetolive = 0;
180 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183 read_queue_e->whoFrom = chk->whoTo;
184 read_queue_e->aux_data = NULL;
185 read_queue_e->length = 0;
186 atomic_add_int(&chk->whoTo->ref_count, 1);
187 read_queue_e->data = chk->data;
188 read_queue_e->tail_mbuf = NULL;
189 read_queue_e->stcb = stcb;
190 read_queue_e->port_from = stcb->rport;
191 read_queue_e->spec_flags = 0;
192 read_queue_e->do_not_ref_stcb = 0;
193 read_queue_e->end_added = 0;
194 read_queue_e->some_taken = 0;
195 read_queue_e->pdapi_aborted = 0;
197 return (read_queue_e);
202 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
204 struct sctp_extrcvinfo *seinfo;
205 struct sctp_sndrcvinfo *outinfo;
206 struct sctp_rcvinfo *rcvinfo;
207 struct sctp_nxtinfo *nxtinfo;
214 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
215 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
216 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
217 /* user does not want any ancillary data */
221 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
222 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
224 seinfo = (struct sctp_extrcvinfo *)sinfo;
225 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
226 (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
228 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
232 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
233 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
235 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
238 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
244 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
249 SCTP_BUF_LEN(ret) = 0;
251 /* We need a CMSG header followed by the struct */
252 cmh = mtod(ret, struct cmsghdr *);
253 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
254 cmh->cmsg_level = IPPROTO_SCTP;
255 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
256 cmh->cmsg_type = SCTP_RCVINFO;
257 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
258 rcvinfo->rcv_sid = sinfo->sinfo_stream;
259 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
260 rcvinfo->rcv_flags = sinfo->sinfo_flags;
261 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
262 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
263 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
264 rcvinfo->rcv_context = sinfo->sinfo_context;
265 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
266 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
267 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
270 cmh->cmsg_level = IPPROTO_SCTP;
271 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
272 cmh->cmsg_type = SCTP_NXTINFO;
273 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
274 nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
275 nxtinfo->nxt_flags = 0;
276 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
277 nxtinfo->nxt_flags |= SCTP_UNORDERED;
279 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
280 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
282 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
283 nxtinfo->nxt_flags |= SCTP_COMPLETE;
285 nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
286 nxtinfo->nxt_length = seinfo->sreinfo_next_length;
287 nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
288 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
289 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
291 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
292 cmh->cmsg_level = IPPROTO_SCTP;
293 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
295 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
296 cmh->cmsg_type = SCTP_EXTRCV;
297 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
298 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
300 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
301 cmh->cmsg_type = SCTP_SNDRCV;
303 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
311 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
313 uint32_t gap, i, cumackp1;
316 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
319 cumackp1 = asoc->cumulative_tsn + 1;
320 if (SCTP_TSN_GT(cumackp1, tsn)) {
322 * this tsn is behind the cum ack and thus we don't need to
323 * worry about it being moved from one to the other.
327 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
328 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
329 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
330 sctp_print_mapping_array(asoc);
332 panic("Things are really messed up now!!");
335 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
336 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
337 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
338 asoc->highest_tsn_inside_nr_map = tsn;
340 if (tsn == asoc->highest_tsn_inside_map) {
341 /* We must back down to see what the new highest is */
342 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
343 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
344 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
345 asoc->highest_tsn_inside_map = i;
351 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
358 * We are delivering currently from the reassembly queue. We must continue to
359 * deliver until we either: 1) run out of space. 2) run out of sequential
360 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
363 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
365 struct sctp_tmit_chunk *chk, *nchk;
370 struct sctp_queued_to_read *control, *ctl, *nctl;
375 cntDel = stream_no = 0;
376 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
377 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
378 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
379 /* socket above is long gone or going.. */
381 asoc->fragmented_delivery_inprogress = 0;
382 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
383 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
384 asoc->size_on_reasm_queue -= chk->send_size;
385 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
387 * Lose the data pointer, since its in the socket
391 sctp_m_freem(chk->data);
394 /* Now free the address and data */
395 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
396 /* sa_ignore FREED_MEMORY */
400 SCTP_TCB_LOCK_ASSERT(stcb);
401 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
402 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
403 /* Can't deliver more :< */
406 stream_no = chk->rec.data.stream_number;
407 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
408 if (nxt_todel != chk->rec.data.stream_seq &&
409 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
411 * Not the next sequence to deliver in its stream OR
416 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
418 control = sctp_build_readq_entry_chk(stcb, chk);
419 if (control == NULL) {
423 /* save it off for our future deliveries */
424 stcb->asoc.control_pdapi = control;
425 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
429 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
430 sctp_add_to_readq(stcb->sctp_ep,
431 stcb, control, &stcb->sctp_socket->so_rcv, end,
432 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
435 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
439 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
440 if (sctp_append_to_readq(stcb->sctp_ep, stcb,
441 stcb->asoc.control_pdapi,
442 chk->data, end, chk->rec.data.TSN_seq,
443 &stcb->sctp_socket->so_rcv)) {
445 * something is very wrong, either
446 * control_pdapi is NULL, or the tail_mbuf
447 * is corrupt, or there is a EOM already on
450 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
454 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
455 panic("This should not happen control_pdapi NULL?");
457 /* if we did not panic, it was a EOM */
458 panic("Bad chunking ??");
460 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
461 SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
463 SCTP_PRINTF("Bad chunking ??\n");
464 SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
472 /* pull it we did it */
473 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
474 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
475 asoc->fragmented_delivery_inprogress = 0;
476 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
477 asoc->strmin[stream_no].last_sequence_delivered++;
479 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
480 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
482 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
484 * turn the flag back on since we just delivered
487 asoc->fragmented_delivery_inprogress = 1;
489 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
490 asoc->last_flags_delivered = chk->rec.data.rcv_flags;
491 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
492 asoc->last_strm_no_delivered = chk->rec.data.stream_number;
494 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
495 asoc->size_on_reasm_queue -= chk->send_size;
496 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
497 /* free up the chk */
499 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
501 if (asoc->fragmented_delivery_inprogress == 0) {
503 * Now lets see if we can deliver the next one on
506 struct sctp_stream_in *strm;
508 strm = &asoc->strmin[stream_no];
509 nxt_todel = strm->last_sequence_delivered + 1;
510 TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
511 /* Deliver more if we can. */
512 if (nxt_todel == ctl->sinfo_ssn) {
513 TAILQ_REMOVE(&strm->inqueue, ctl, next);
514 asoc->size_on_all_streams -= ctl->length;
515 sctp_ucount_decr(asoc->cnt_on_all_streams);
516 strm->last_sequence_delivered++;
517 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
518 sctp_add_to_readq(stcb->sctp_ep, stcb,
520 &stcb->sctp_socket->so_rcv, 1,
521 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
525 nxt_todel = strm->last_sequence_delivered + 1;
533 * Queue the chunk either right into the socket buffer if it is the next one
534 * to go OR put it in the correct place in the delivery queue. If we do
535 * append to the so_buf, keep doing so until we are out of order. One big
536 * question still remains, what to do when the socket buffer is FULL??
539 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
540 struct sctp_queued_to_read *control, int *abort_flag)
543 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
544 * all the data in one stream this could happen quite rapidly. One
545 * could use the TSN to keep track of things, but this scheme breaks
546 * down in the other type of stream useage that could occur. Send a
547 * single msg to stream 0, send 4Billion messages to stream 1, now
548 * send a message to stream 0. You have a situation where the TSN
549 * has wrapped but not in the stream. Is this worth worrying about
550 * or should we just change our queue sort at the bottom to be by
553 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
554 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
555 * assignment this could happen... and I don't see how this would be
556 * a violation. So for now I am undecided an will leave the sort by
557 * SSN alone. Maybe a hybred approach is the answer
560 struct sctp_stream_in *strm;
561 struct sctp_queued_to_read *at;
565 char msg[SCTP_DIAG_INFO_LEN];
568 asoc->size_on_all_streams += control->length;
569 sctp_ucount_incr(asoc->cnt_on_all_streams);
570 strm = &asoc->strmin[control->sinfo_stream];
571 nxt_todel = strm->last_sequence_delivered + 1;
572 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
573 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
575 SCTPDBG(SCTP_DEBUG_INDATA1,
576 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
577 (uint32_t) control->sinfo_stream,
578 (uint32_t) strm->last_sequence_delivered,
579 (uint32_t) nxt_todel);
580 if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
581 /* The incoming sseq is behind where we last delivered? */
582 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
583 control->sinfo_ssn, strm->last_sequence_delivered);
586 * throw it in the stream so it gets cleaned up in
587 * association destruction
589 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
590 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
591 strm->last_sequence_delivered, control->sinfo_tsn,
592 control->sinfo_stream, control->sinfo_ssn);
593 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
594 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
595 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
600 if (nxt_todel == control->sinfo_ssn) {
601 /* can be delivered right away? */
602 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
603 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
605 /* EY it wont be queued if it could be delivered directly */
607 asoc->size_on_all_streams -= control->length;
608 sctp_ucount_decr(asoc->cnt_on_all_streams);
609 strm->last_sequence_delivered++;
611 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
612 sctp_add_to_readq(stcb->sctp_ep, stcb,
614 &stcb->sctp_socket->so_rcv, 1,
615 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
616 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
618 nxt_todel = strm->last_sequence_delivered + 1;
619 if (nxt_todel == control->sinfo_ssn) {
620 TAILQ_REMOVE(&strm->inqueue, control, next);
621 asoc->size_on_all_streams -= control->length;
622 sctp_ucount_decr(asoc->cnt_on_all_streams);
623 strm->last_sequence_delivered++;
625 * We ignore the return of deliver_data here
626 * since we always can hold the chunk on the
627 * d-queue. And we have a finite number that
628 * can be delivered from the strq.
630 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
631 sctp_log_strm_del(control, NULL,
632 SCTP_STR_LOG_FROM_IMMED_DEL);
634 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
635 sctp_add_to_readq(stcb->sctp_ep, stcb,
637 &stcb->sctp_socket->so_rcv, 1,
638 SCTP_READ_LOCK_NOT_HELD,
647 * Ok, we did not deliver this guy, find the correct place
648 * to put it on the queue.
650 if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
653 if (TAILQ_EMPTY(&strm->inqueue)) {
655 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
656 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
658 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
660 TAILQ_FOREACH(at, &strm->inqueue, next) {
661 if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
663 * one in queue is bigger than the
664 * new one, insert before this one
666 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
667 sctp_log_strm_del(control, at,
668 SCTP_STR_LOG_FROM_INSERT_MD);
670 TAILQ_INSERT_BEFORE(at, control, next);
672 } else if (at->sinfo_ssn == control->sinfo_ssn) {
674 * Gak, He sent me a duplicate str
678 * foo bar, I guess I will just free
679 * this new guy, should we abort
680 * too? FIX ME MAYBE? Or it COULD be
681 * that the SSN's have wrapped.
682 * Maybe I should compare to TSN
683 * somehow... sigh for now just blow
688 sctp_m_freem(control->data);
689 control->data = NULL;
690 asoc->size_on_all_streams -= control->length;
691 sctp_ucount_decr(asoc->cnt_on_all_streams);
692 if (control->whoFrom) {
693 sctp_free_remote_addr(control->whoFrom);
694 control->whoFrom = NULL;
696 sctp_free_a_readq(stcb, control);
699 if (TAILQ_NEXT(at, next) == NULL) {
701 * We are at the end, insert
704 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
705 sctp_log_strm_del(control, at,
706 SCTP_STR_LOG_FROM_INSERT_TL);
708 TAILQ_INSERT_AFTER(&strm->inqueue,
719 * Returns two things: You get the total size of the deliverable parts of the
720 * first fragmented message on the reassembly queue. And you get a 1 back if
721 * all of the message is ready or a 0 back if the message is still incomplete
724 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
726 struct sctp_tmit_chunk *chk;
730 chk = TAILQ_FIRST(&asoc->reasmqueue);
732 /* nothing on the queue */
735 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
736 /* Not a first on the queue */
739 tsn = chk->rec.data.TSN_seq;
740 TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
741 if (tsn != chk->rec.data.TSN_seq) {
744 *t_size += chk->send_size;
745 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
754 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
756 struct sctp_tmit_chunk *chk;
758 uint32_t tsize, pd_point;
761 chk = TAILQ_FIRST(&asoc->reasmqueue);
764 asoc->size_on_reasm_queue = 0;
765 asoc->cnt_on_reasm_queue = 0;
768 if (asoc->fragmented_delivery_inprogress == 0) {
770 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
771 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
772 (nxt_todel == chk->rec.data.stream_seq ||
773 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
775 * Yep the first one is here and its ok to deliver
778 if (stcb->sctp_socket) {
779 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
780 stcb->sctp_ep->partial_delivery_point);
782 pd_point = stcb->sctp_ep->partial_delivery_point;
784 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
786 * Yes, we setup to start reception, by
787 * backing down the TSN just in case we
788 * can't deliver. If we
790 asoc->fragmented_delivery_inprogress = 1;
791 asoc->tsn_last_delivered =
792 chk->rec.data.TSN_seq - 1;
794 chk->rec.data.stream_number;
795 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
796 asoc->pdapi_ppid = chk->rec.data.payloadtype;
797 asoc->fragment_flags = chk->rec.data.rcv_flags;
798 sctp_service_reassembly(stcb, asoc);
803 * Service re-assembly will deliver stream data queued at
804 * the end of fragmented delivery.. but it wont know to go
805 * back and call itself again... we do that here with the
808 sctp_service_reassembly(stcb, asoc);
809 if (asoc->fragmented_delivery_inprogress == 0) {
811 * finished our Fragmented delivery, could be more
820 * Dump onto the re-assembly queue, in its proper place. After dumping on the
821 * queue, see if anthing can be delivered. If so pull it off (or as much as
822 * we can. If we run out of space then we must dump what we can and set the
823 * appropriate flag to say we queued what we could.
826 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
827 struct sctp_tmit_chunk *chk, int *abort_flag)
830 char msg[SCTP_DIAG_INFO_LEN];
832 uint32_t cum_ackp1, prev_tsn, post_tsn;
833 struct sctp_tmit_chunk *at, *prev, *next;
836 cum_ackp1 = asoc->tsn_last_delivered + 1;
837 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
838 /* This is the first one on the queue */
839 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
841 * we do not check for delivery of anything when only one
844 asoc->size_on_reasm_queue = chk->send_size;
845 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
846 if (chk->rec.data.TSN_seq == cum_ackp1) {
847 if (asoc->fragmented_delivery_inprogress == 0 &&
848 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
849 SCTP_DATA_FIRST_FRAG) {
851 * An empty queue, no delivery inprogress,
852 * we hit the next one and it does NOT have
853 * a FIRST fragment mark.
855 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
856 snprintf(msg, sizeof(msg),
857 "Expected B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
858 chk->rec.data.TSN_seq,
859 chk->rec.data.stream_number,
860 chk->rec.data.stream_seq);
861 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
862 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
863 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
865 } else if (asoc->fragmented_delivery_inprogress &&
866 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
868 * We are doing a partial delivery and the
869 * NEXT chunk MUST be either the LAST or
870 * MIDDLE fragment NOT a FIRST
872 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
873 snprintf(msg, sizeof(msg),
874 "Didn't expect B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
875 chk->rec.data.TSN_seq,
876 chk->rec.data.stream_number,
877 chk->rec.data.stream_seq);
878 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
879 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
880 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
882 } else if (asoc->fragmented_delivery_inprogress) {
884 * Here we are ok with a MIDDLE or LAST
887 if (chk->rec.data.stream_number !=
888 asoc->str_of_pdapi) {
889 /* Got to be the right STR No */
890 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
891 chk->rec.data.stream_number,
893 snprintf(msg, sizeof(msg),
894 "Expected SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
896 chk->rec.data.TSN_seq,
897 chk->rec.data.stream_number,
898 chk->rec.data.stream_seq);
899 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
900 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
901 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
903 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
904 SCTP_DATA_UNORDERED &&
905 chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
906 /* Got to be the right STR Seq */
907 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
908 chk->rec.data.stream_seq,
910 snprintf(msg, sizeof(msg),
911 "Expected SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
913 chk->rec.data.TSN_seq,
914 chk->rec.data.stream_number,
915 chk->rec.data.stream_seq);
916 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
917 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
918 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
926 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
927 if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
929 * one in queue is bigger than the new one, insert
933 asoc->size_on_reasm_queue += chk->send_size;
934 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
936 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
938 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
939 /* Gak, He sent me a duplicate str seq number */
941 * foo bar, I guess I will just free this new guy,
942 * should we abort too? FIX ME MAYBE? Or it COULD be
943 * that the SSN's have wrapped. Maybe I should
944 * compare to TSN somehow... sigh for now just blow
948 sctp_m_freem(chk->data);
951 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
955 if (TAILQ_NEXT(at, sctp_next) == NULL) {
957 * We are at the end, insert it after this
961 asoc->size_on_reasm_queue += chk->send_size;
962 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
963 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
970 prev_tsn = chk->rec.data.TSN_seq - 1;
971 if (prev_tsn == prev->rec.data.TSN_seq) {
973 * Ok the one I am dropping onto the end is the
974 * NEXT. A bit of valdiation here.
976 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
977 SCTP_DATA_FIRST_FRAG ||
978 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
979 SCTP_DATA_MIDDLE_FRAG) {
981 * Insert chk MUST be a MIDDLE or LAST
984 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
985 SCTP_DATA_FIRST_FRAG) {
986 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
987 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
988 snprintf(msg, sizeof(msg),
989 "Can't handle B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
990 chk->rec.data.TSN_seq,
991 chk->rec.data.stream_number,
992 chk->rec.data.stream_seq);
993 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
994 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
995 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
999 if (chk->rec.data.stream_number !=
1000 prev->rec.data.stream_number) {
1002 * Huh, need the correct STR here,
1003 * they must be the same.
1005 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sid:%d not the same as at:%d\n",
1006 chk->rec.data.stream_number,
1007 prev->rec.data.stream_number);
1008 snprintf(msg, sizeof(msg),
1009 "Expect SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1010 prev->rec.data.stream_number,
1011 chk->rec.data.TSN_seq,
1012 chk->rec.data.stream_number,
1013 chk->rec.data.stream_seq);
1014 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1015 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1016 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1020 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1021 chk->rec.data.stream_seq !=
1022 prev->rec.data.stream_seq) {
1024 * Huh, need the correct STR here,
1025 * they must be the same.
1027 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1028 chk->rec.data.stream_seq,
1029 prev->rec.data.stream_seq);
1030 snprintf(msg, sizeof(msg),
1031 "Expect SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1032 prev->rec.data.stream_seq,
1033 chk->rec.data.TSN_seq,
1034 chk->rec.data.stream_number,
1035 chk->rec.data.stream_seq);
1036 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1037 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1038 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1042 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1043 SCTP_DATA_LAST_FRAG) {
1044 /* Insert chk MUST be a FIRST */
1045 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1046 SCTP_DATA_FIRST_FRAG) {
1047 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1048 snprintf(msg, sizeof(msg),
1049 "Expect B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1050 chk->rec.data.TSN_seq,
1051 chk->rec.data.stream_number,
1052 chk->rec.data.stream_seq);
1053 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1054 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1055 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1063 post_tsn = chk->rec.data.TSN_seq + 1;
1064 if (post_tsn == next->rec.data.TSN_seq) {
1066 * Ok the one I am inserting ahead of is my NEXT
1067 * one. A bit of valdiation here.
1069 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1070 /* Insert chk MUST be a last fragment */
1071 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1072 != SCTP_DATA_LAST_FRAG) {
1073 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1074 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1075 snprintf(msg, sizeof(msg),
1076 "Expect only E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1077 chk->rec.data.TSN_seq,
1078 chk->rec.data.stream_number,
1079 chk->rec.data.stream_seq);
1080 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1081 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1082 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1086 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1087 SCTP_DATA_MIDDLE_FRAG ||
1088 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1089 SCTP_DATA_LAST_FRAG) {
1091 * Insert chk CAN be MIDDLE or FIRST NOT
1094 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1095 SCTP_DATA_LAST_FRAG) {
1096 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1097 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1098 snprintf(msg, sizeof(msg),
1099 "Didn't expect E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1100 chk->rec.data.TSN_seq,
1101 chk->rec.data.stream_number,
1102 chk->rec.data.stream_seq);
1103 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1104 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1105 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1109 if (chk->rec.data.stream_number !=
1110 next->rec.data.stream_number) {
1112 * Huh, need the correct STR here,
1113 * they must be the same.
1115 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1116 chk->rec.data.stream_number,
1117 next->rec.data.stream_number);
1118 snprintf(msg, sizeof(msg),
1119 "Required SID %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1120 next->rec.data.stream_number,
1121 chk->rec.data.TSN_seq,
1122 chk->rec.data.stream_number,
1123 chk->rec.data.stream_seq);
1124 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1125 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1126 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1130 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1131 chk->rec.data.stream_seq !=
1132 next->rec.data.stream_seq) {
1134 * Huh, need the correct STR here,
1135 * they must be the same.
1137 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1138 chk->rec.data.stream_seq,
1139 next->rec.data.stream_seq);
1140 snprintf(msg, sizeof(msg),
1141 "Required SSN %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1142 next->rec.data.stream_seq,
1143 chk->rec.data.TSN_seq,
1144 chk->rec.data.stream_number,
1145 chk->rec.data.stream_seq);
1146 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1147 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1148 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1155 /* Do we need to do some delivery? check */
1156 sctp_deliver_reasm_check(stcb, asoc);
1160 * This is an unfortunate routine. It checks to make sure a evil guy is not
1161 * stuffing us full of bad packet fragments. A broken peer could also do this
1162 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1166 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1169 struct sctp_tmit_chunk *at;
1172 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1173 if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1174 /* is it one bigger? */
1175 tsn_est = at->rec.data.TSN_seq + 1;
1176 if (tsn_est == TSN_seq) {
1177 /* yep. It better be a last then */
1178 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1179 SCTP_DATA_LAST_FRAG) {
1181 * Ok this guy belongs next to a guy
1182 * that is NOT last, it should be a
1183 * middle/last, not a complete
1189 * This guy is ok since its a LAST
1190 * and the new chunk is a fully
1191 * self- contained one.
1196 } else if (TSN_seq == at->rec.data.TSN_seq) {
1197 /* Software error since I have a dup? */
1201 * Ok, 'at' is larger than new chunk but does it
1202 * need to be right before it.
1204 tsn_est = TSN_seq + 1;
1205 if (tsn_est == at->rec.data.TSN_seq) {
1206 /* Yep, It better be a first */
1207 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1208 SCTP_DATA_FIRST_FRAG) {
1220 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1221 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1222 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1223 int *break_flag, int last_chunk)
1225 /* Process a data chunk */
1226 /* struct sctp_tmit_chunk *chk; */
1227 struct sctp_tmit_chunk *chk;
1231 int need_reasm_check = 0;
1232 uint16_t strmno, strmseq;
1233 struct mbuf *op_err;
1234 char msg[SCTP_DIAG_INFO_LEN];
1235 struct sctp_queued_to_read *control;
1237 uint32_t protocol_id;
1238 uint8_t chunk_flags;
1239 struct sctp_stream_reset_list *liste;
1242 tsn = ntohl(ch->dp.tsn);
1243 chunk_flags = ch->ch.chunk_flags;
1244 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1245 asoc->send_sack = 1;
1247 protocol_id = ch->dp.protocol_id;
1248 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1249 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1250 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1255 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1256 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1257 /* It is a duplicate */
1258 SCTP_STAT_INCR(sctps_recvdupdata);
1259 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1260 /* Record a dup for the next outbound sack */
1261 asoc->dup_tsns[asoc->numduptsns] = tsn;
1264 asoc->send_sack = 1;
1267 /* Calculate the number of TSN's between the base and this TSN */
1268 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1269 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1270 /* Can't hold the bit in the mapping at max array, toss it */
1273 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1274 SCTP_TCB_LOCK_ASSERT(stcb);
1275 if (sctp_expand_mapping_array(asoc, gap)) {
1276 /* Can't expand, drop it */
1280 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1283 /* See if we have received this one already */
1284 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1285 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1286 SCTP_STAT_INCR(sctps_recvdupdata);
1287 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1288 /* Record a dup for the next outbound sack */
1289 asoc->dup_tsns[asoc->numduptsns] = tsn;
1292 asoc->send_sack = 1;
1296 * Check to see about the GONE flag, duplicates would cause a sack
1297 * to be sent up above
1299 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1300 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1301 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1303 * wait a minute, this guy is gone, there is no longer a
1304 * receiver. Send peer an ABORT!
1306 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1307 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1312 * Now before going further we see if there is room. If NOT then we
1313 * MAY let one through only IF this TSN is the one we are waiting
1314 * for on a partial delivery API.
1317 /* now do the tests */
1318 if (((asoc->cnt_on_all_streams +
1319 asoc->cnt_on_reasm_queue +
1320 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1321 (((int)asoc->my_rwnd) <= 0)) {
1323 * When we have NO room in the rwnd we check to make sure
1324 * the reader is doing its job...
1326 if (stcb->sctp_socket->so_rcv.sb_cc) {
1327 /* some to read, wake-up */
1328 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1331 so = SCTP_INP_SO(stcb->sctp_ep);
1332 atomic_add_int(&stcb->asoc.refcnt, 1);
1333 SCTP_TCB_UNLOCK(stcb);
1334 SCTP_SOCKET_LOCK(so, 1);
1335 SCTP_TCB_LOCK(stcb);
1336 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1337 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1338 /* assoc was freed while we were unlocked */
1339 SCTP_SOCKET_UNLOCK(so, 1);
1343 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1344 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1345 SCTP_SOCKET_UNLOCK(so, 1);
1348 /* now is it in the mapping array of what we have accepted? */
1349 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1350 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1351 /* Nope not in the valid range dump it */
1352 sctp_set_rwnd(stcb, asoc);
1353 if ((asoc->cnt_on_all_streams +
1354 asoc->cnt_on_reasm_queue +
1355 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1356 SCTP_STAT_INCR(sctps_datadropchklmt);
1358 SCTP_STAT_INCR(sctps_datadroprwnd);
1364 strmno = ntohs(ch->dp.stream_id);
1365 if (strmno >= asoc->streamincnt) {
1366 struct sctp_paramhdr *phdr;
1369 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1370 0, M_NOWAIT, 1, MT_DATA);
1372 /* add some space up front so prepend will work well */
1373 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1374 phdr = mtod(mb, struct sctp_paramhdr *);
1376 * Error causes are just param's and this one has
1377 * two back to back phdr, one with the error type
1378 * and size, the other with the streamid and a rsvd
1380 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1381 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1382 phdr->param_length =
1383 htons(sizeof(struct sctp_paramhdr) * 2);
1385 /* We insert the stream in the type field */
1386 phdr->param_type = ch->dp.stream_id;
1387 /* And set the length to 0 for the rsvd field */
1388 phdr->param_length = 0;
1389 sctp_queue_op_err(stcb, mb);
1391 SCTP_STAT_INCR(sctps_badsid);
1392 SCTP_TCB_LOCK_ASSERT(stcb);
1393 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1394 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1395 asoc->highest_tsn_inside_nr_map = tsn;
1397 if (tsn == (asoc->cumulative_tsn + 1)) {
1398 /* Update cum-ack */
1399 asoc->cumulative_tsn = tsn;
1404 * Before we continue lets validate that we are not being fooled by
1405 * an evil attacker. We can only have 4k chunks based on our TSN
1406 * spread allowed by the mapping array 512 * 8 bits, so there is no
1407 * way our stream sequence numbers could have wrapped. We of course
1408 * only validate the FIRST fragment so the bit must be set.
1410 strmseq = ntohs(ch->dp.stream_sequence);
1411 #ifdef SCTP_ASOCLOG_OF_TSNS
1412 SCTP_TCB_LOCK_ASSERT(stcb);
1413 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1414 asoc->tsn_in_at = 0;
1415 asoc->tsn_in_wrapped = 1;
1417 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1418 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1419 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1420 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1421 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1422 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1423 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1424 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1427 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1428 (TAILQ_EMPTY(&asoc->resetHead)) &&
1429 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1430 SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1431 /* The incoming sseq is behind where we last delivered? */
1432 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1433 strmseq, asoc->strmin[strmno].last_sequence_delivered);
1435 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1436 asoc->strmin[strmno].last_sequence_delivered,
1437 tsn, strmno, strmseq);
1438 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1439 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1440 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1444 /************************************
1445 * From here down we may find ch-> invalid
1446 * so its a good idea NOT to use it.
1447 *************************************/
1449 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1450 if (last_chunk == 0) {
1451 dmbuf = SCTP_M_COPYM(*m,
1452 (offset + sizeof(struct sctp_data_chunk)),
1454 #ifdef SCTP_MBUF_LOGGING
1455 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1458 for (mat = dmbuf; mat; mat = SCTP_BUF_NEXT(mat)) {
1459 if (SCTP_BUF_IS_EXTENDED(mat)) {
1460 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1466 /* We can steal the last chunk */
1470 /* lop off the top part */
1471 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1472 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1473 l_len = SCTP_BUF_LEN(dmbuf);
1476 * need to count up the size hopefully does not hit
1482 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1483 l_len += SCTP_BUF_LEN(lat);
1486 if (l_len > the_len) {
1487 /* Trim the end round bytes off too */
1488 m_adj(dmbuf, -(l_len - the_len));
1491 if (dmbuf == NULL) {
1492 SCTP_STAT_INCR(sctps_nomem);
1495 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1496 asoc->fragmented_delivery_inprogress == 0 &&
1497 TAILQ_EMPTY(&asoc->resetHead) &&
1499 ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1500 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1501 /* Candidate for express delivery */
1503 * Its not fragmented, No PD-API is up, Nothing in the
1504 * delivery queue, Its un-ordered OR ordered and the next to
1505 * deliver AND nothing else is stuck on the stream queue,
1506 * And there is room for it in the socket buffer. Lets just
1507 * stuff it up the buffer....
1510 /* It would be nice to avoid this copy if we could :< */
1511 sctp_alloc_a_readq(stcb, control);
1512 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1517 if (control == NULL) {
1518 goto failed_express_del;
1520 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1521 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1522 asoc->highest_tsn_inside_nr_map = tsn;
1524 sctp_add_to_readq(stcb->sctp_ep, stcb,
1525 control, &stcb->sctp_socket->so_rcv,
1526 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1528 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1529 /* for ordered, bump what we delivered */
1530 asoc->strmin[strmno].last_sequence_delivered++;
1532 SCTP_STAT_INCR(sctps_recvexpress);
1533 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1534 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1535 SCTP_STR_LOG_FROM_EXPRS_DEL);
1539 goto finish_express_del;
1542 /* If we reach here this is a new chunk */
1545 /* Express for fragmented delivery? */
1546 if ((asoc->fragmented_delivery_inprogress) &&
1547 (stcb->asoc.control_pdapi) &&
1548 (asoc->str_of_pdapi == strmno) &&
1549 (asoc->ssn_of_pdapi == strmseq)
1551 control = stcb->asoc.control_pdapi;
1552 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1553 /* Can't be another first? */
1554 goto failed_pdapi_express_del;
1556 if (tsn == (control->sinfo_tsn + 1)) {
1557 /* Yep, we can add it on */
1560 if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1563 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1565 &stcb->sctp_socket->so_rcv)) {
1566 SCTP_PRINTF("Append fails end:%d\n", end);
1567 goto failed_pdapi_express_del;
1569 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1570 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1571 asoc->highest_tsn_inside_nr_map = tsn;
1573 SCTP_STAT_INCR(sctps_recvexpressm);
1574 asoc->tsn_last_delivered = tsn;
1575 asoc->fragment_flags = chunk_flags;
1576 asoc->tsn_of_pdapi_last_delivered = tsn;
1577 asoc->last_flags_delivered = chunk_flags;
1578 asoc->last_strm_seq_delivered = strmseq;
1579 asoc->last_strm_no_delivered = strmno;
1581 /* clean up the flags and such */
1582 asoc->fragmented_delivery_inprogress = 0;
1583 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1584 asoc->strmin[strmno].last_sequence_delivered++;
1586 stcb->asoc.control_pdapi = NULL;
1587 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1589 * There could be another message
1592 need_reasm_check = 1;
1596 goto finish_express_del;
1599 failed_pdapi_express_del:
1601 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1602 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1603 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1604 asoc->highest_tsn_inside_nr_map = tsn;
1607 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1608 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1609 asoc->highest_tsn_inside_map = tsn;
1612 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1613 sctp_alloc_a_chunk(stcb, chk);
1615 /* No memory so we drop the chunk */
1616 SCTP_STAT_INCR(sctps_nomem);
1617 if (last_chunk == 0) {
1618 /* we copied it, free the copy */
1619 sctp_m_freem(dmbuf);
1623 chk->rec.data.TSN_seq = tsn;
1624 chk->no_fr_allowed = 0;
1625 chk->rec.data.stream_seq = strmseq;
1626 chk->rec.data.stream_number = strmno;
1627 chk->rec.data.payloadtype = protocol_id;
1628 chk->rec.data.context = stcb->asoc.context;
1629 chk->rec.data.doing_fast_retransmit = 0;
1630 chk->rec.data.rcv_flags = chunk_flags;
1632 chk->send_size = the_len;
1634 atomic_add_int(&net->ref_count, 1);
1637 sctp_alloc_a_readq(stcb, control);
1638 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1643 if (control == NULL) {
1644 /* No memory so we drop the chunk */
1645 SCTP_STAT_INCR(sctps_nomem);
1646 if (last_chunk == 0) {
1647 /* we copied it, free the copy */
1648 sctp_m_freem(dmbuf);
1652 control->length = the_len;
1655 /* Mark it as received */
1656 /* Now queue it where it belongs */
1657 if (control != NULL) {
1658 /* First a sanity check */
1659 if (asoc->fragmented_delivery_inprogress) {
1661 * Ok, we have a fragmented delivery in progress if
1662 * this chunk is next to deliver OR belongs in our
1663 * view to the reassembly, the peer is evil or
1666 uint32_t estimate_tsn;
1668 estimate_tsn = asoc->tsn_last_delivered + 1;
1669 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1670 (estimate_tsn == control->sinfo_tsn)) {
1671 /* Evil/Broke peer */
1672 sctp_m_freem(control->data);
1673 control->data = NULL;
1674 if (control->whoFrom) {
1675 sctp_free_remote_addr(control->whoFrom);
1676 control->whoFrom = NULL;
1678 sctp_free_a_readq(stcb, control);
1679 snprintf(msg, sizeof(msg), "Reas. queue emtpy, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1680 tsn, strmno, strmseq);
1681 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1682 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1683 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1687 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1688 sctp_m_freem(control->data);
1689 control->data = NULL;
1690 if (control->whoFrom) {
1691 sctp_free_remote_addr(control->whoFrom);
1692 control->whoFrom = NULL;
1694 sctp_free_a_readq(stcb, control);
1695 snprintf(msg, sizeof(msg), "PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1696 tsn, strmno, strmseq);
1697 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1698 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1699 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1705 /* No PDAPI running */
1706 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1708 * Reassembly queue is NOT empty validate
1709 * that this tsn does not need to be in
1710 * reasembly queue. If it does then our peer
1711 * is broken or evil.
1713 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1714 sctp_m_freem(control->data);
1715 control->data = NULL;
1716 if (control->whoFrom) {
1717 sctp_free_remote_addr(control->whoFrom);
1718 control->whoFrom = NULL;
1720 sctp_free_a_readq(stcb, control);
1721 snprintf(msg, sizeof(msg), "No PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1722 tsn, strmno, strmseq);
1723 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1724 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1725 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1731 /* ok, if we reach here we have passed the sanity checks */
1732 if (chunk_flags & SCTP_DATA_UNORDERED) {
1733 /* queue directly into socket buffer */
1734 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1735 sctp_add_to_readq(stcb->sctp_ep, stcb,
1737 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1740 * Special check for when streams are resetting. We
1741 * could be more smart about this and check the
1742 * actual stream to see if it is not being reset..
1743 * that way we would not create a HOLB when amongst
1744 * streams being reset and those not being reset.
1746 * We take complete messages that have a stream reset
1747 * intervening (aka the TSN is after where our
1748 * cum-ack needs to be) off and put them on a
1749 * pending_reply_queue. The reassembly ones we do
1750 * not have to worry about since they are all sorted
1751 * and proceessed by TSN order. It is only the
1752 * singletons I must worry about.
1754 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1755 SCTP_TSN_GT(tsn, liste->tsn)) {
1757 * yep its past where we need to reset... go
1758 * ahead and queue it.
1760 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
1762 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1764 struct sctp_queued_to_read *ctlOn,
1766 unsigned char inserted = 0;
1768 TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
1769 if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
1773 TAILQ_INSERT_BEFORE(ctlOn, control, next);
1778 if (inserted == 0) {
1780 * must be put at end, use
1781 * prevP (all setup from
1782 * loop) to setup nextP.
1784 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1788 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
1795 /* Into the re-assembly queue */
1796 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
1799 * the assoc is now gone and chk was put onto the
1800 * reasm queue, which has all been freed.
1807 if (tsn == (asoc->cumulative_tsn + 1)) {
1808 /* Update cum-ack */
1809 asoc->cumulative_tsn = tsn;
1815 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
1817 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
1819 SCTP_STAT_INCR(sctps_recvdata);
1820 /* Set it present please */
1821 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1822 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
1824 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1825 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
1826 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
1828 /* check the special flag for stream resets */
1829 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1830 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
1832 * we have finished working through the backlogged TSN's now
1833 * time to reset streams. 1: call reset function. 2: free
1834 * pending_reply space 3: distribute any chunks in
1835 * pending_reply_queue.
1837 struct sctp_queued_to_read *ctl, *nctl;
1839 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
1840 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
1841 SCTP_FREE(liste, SCTP_M_STRESET);
1842 /* sa_ignore FREED_MEMORY */
1843 liste = TAILQ_FIRST(&asoc->resetHead);
1844 if (TAILQ_EMPTY(&asoc->resetHead)) {
1845 /* All can be removed */
1846 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1847 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1848 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1854 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1855 if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
1859 * if ctl->sinfo_tsn is <= liste->tsn we can
1860 * process it which is the NOT of
1861 * ctl->sinfo_tsn > liste->tsn
1863 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1864 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1871 * Now service re-assembly to pick up anything that has been
1872 * held on reassembly queue?
1874 sctp_deliver_reasm_check(stcb, asoc);
1875 need_reasm_check = 0;
1877 if (need_reasm_check) {
1878 /* Another one waits ? */
1879 sctp_deliver_reasm_check(stcb, asoc);
1884 int8_t sctp_map_lookup_tab[256] = {
1885 0, 1, 0, 2, 0, 1, 0, 3,
1886 0, 1, 0, 2, 0, 1, 0, 4,
1887 0, 1, 0, 2, 0, 1, 0, 3,
1888 0, 1, 0, 2, 0, 1, 0, 5,
1889 0, 1, 0, 2, 0, 1, 0, 3,
1890 0, 1, 0, 2, 0, 1, 0, 4,
1891 0, 1, 0, 2, 0, 1, 0, 3,
1892 0, 1, 0, 2, 0, 1, 0, 6,
1893 0, 1, 0, 2, 0, 1, 0, 3,
1894 0, 1, 0, 2, 0, 1, 0, 4,
1895 0, 1, 0, 2, 0, 1, 0, 3,
1896 0, 1, 0, 2, 0, 1, 0, 5,
1897 0, 1, 0, 2, 0, 1, 0, 3,
1898 0, 1, 0, 2, 0, 1, 0, 4,
1899 0, 1, 0, 2, 0, 1, 0, 3,
1900 0, 1, 0, 2, 0, 1, 0, 7,
1901 0, 1, 0, 2, 0, 1, 0, 3,
1902 0, 1, 0, 2, 0, 1, 0, 4,
1903 0, 1, 0, 2, 0, 1, 0, 3,
1904 0, 1, 0, 2, 0, 1, 0, 5,
1905 0, 1, 0, 2, 0, 1, 0, 3,
1906 0, 1, 0, 2, 0, 1, 0, 4,
1907 0, 1, 0, 2, 0, 1, 0, 3,
1908 0, 1, 0, 2, 0, 1, 0, 6,
1909 0, 1, 0, 2, 0, 1, 0, 3,
1910 0, 1, 0, 2, 0, 1, 0, 4,
1911 0, 1, 0, 2, 0, 1, 0, 3,
1912 0, 1, 0, 2, 0, 1, 0, 5,
1913 0, 1, 0, 2, 0, 1, 0, 3,
1914 0, 1, 0, 2, 0, 1, 0, 4,
1915 0, 1, 0, 2, 0, 1, 0, 3,
1916 0, 1, 0, 2, 0, 1, 0, 8
1921 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
1924 * Now we also need to check the mapping array in a couple of ways.
1925 * 1) Did we move the cum-ack point?
1927 * When you first glance at this you might think that all entries that
1928 * make up the postion of the cum-ack would be in the nr-mapping
1929 * array only.. i.e. things up to the cum-ack are always
1930 * deliverable. Thats true with one exception, when its a fragmented
1931 * message we may not deliver the data until some threshold (or all
1932 * of it) is in place. So we must OR the nr_mapping_array and
1933 * mapping_array to get a true picture of the cum-ack.
1935 struct sctp_association *asoc;
1938 int slide_from, slide_end, lgap, distance;
1939 uint32_t old_cumack, old_base, old_highest, highest_tsn;
1943 old_cumack = asoc->cumulative_tsn;
1944 old_base = asoc->mapping_array_base_tsn;
1945 old_highest = asoc->highest_tsn_inside_map;
1947 * We could probably improve this a small bit by calculating the
1948 * offset of the current cum-ack as the starting point.
1951 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
1952 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
1956 /* there is a 0 bit */
1957 at += sctp_map_lookup_tab[val];
1961 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
1963 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
1964 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
1966 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
1967 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
1969 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
1970 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
1971 sctp_print_mapping_array(asoc);
1972 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1973 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
1975 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
1976 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
1979 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
1980 highest_tsn = asoc->highest_tsn_inside_nr_map;
1982 highest_tsn = asoc->highest_tsn_inside_map;
1984 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
1985 /* The complete array was completed by a single FR */
1986 /* highest becomes the cum-ack */
1994 /* clear the array */
1995 clr = ((at + 7) >> 3);
1996 if (clr > asoc->mapping_array_size) {
1997 clr = asoc->mapping_array_size;
1999 memset(asoc->mapping_array, 0, clr);
2000 memset(asoc->nr_mapping_array, 0, clr);
2002 for (i = 0; i < asoc->mapping_array_size; i++) {
2003 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2004 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2005 sctp_print_mapping_array(asoc);
2009 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2010 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2011 } else if (at >= 8) {
2012 /* we can slide the mapping array down */
2013 /* slide_from holds where we hit the first NON 0xff byte */
2016 * now calculate the ceiling of the move using our highest
2019 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2020 slide_end = (lgap >> 3);
2021 if (slide_end < slide_from) {
2022 sctp_print_mapping_array(asoc);
2024 panic("impossible slide");
2026 SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2027 lgap, slide_end, slide_from, at);
2031 if (slide_end > asoc->mapping_array_size) {
2033 panic("would overrun buffer");
2035 SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
2036 asoc->mapping_array_size, slide_end);
2037 slide_end = asoc->mapping_array_size;
2040 distance = (slide_end - slide_from) + 1;
2041 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2042 sctp_log_map(old_base, old_cumack, old_highest,
2043 SCTP_MAP_PREPARE_SLIDE);
2044 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2045 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2047 if (distance + slide_from > asoc->mapping_array_size ||
2050 * Here we do NOT slide forward the array so that
2051 * hopefully when more data comes in to fill it up
2052 * we will be able to slide it forward. Really I
2053 * don't think this should happen :-0
2056 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2057 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2058 (uint32_t) asoc->mapping_array_size,
2059 SCTP_MAP_SLIDE_NONE);
2064 for (ii = 0; ii < distance; ii++) {
2065 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2066 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2069 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2070 asoc->mapping_array[ii] = 0;
2071 asoc->nr_mapping_array[ii] = 0;
2073 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2074 asoc->highest_tsn_inside_map += (slide_from << 3);
2076 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2077 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2079 asoc->mapping_array_base_tsn += (slide_from << 3);
2080 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2081 sctp_log_map(asoc->mapping_array_base_tsn,
2082 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2083 SCTP_MAP_SLIDE_RESULT);
2090 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2092 struct sctp_association *asoc;
2093 uint32_t highest_tsn;
2096 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2097 highest_tsn = asoc->highest_tsn_inside_nr_map;
2099 highest_tsn = asoc->highest_tsn_inside_map;
2103 * Now we need to see if we need to queue a sack or just start the
2104 * timer (if allowed).
2106 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2108 * Ok special case, in SHUTDOWN-SENT case. here we maker
2109 * sure SACK timer is off and instead send a SHUTDOWN and a
2112 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2113 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2114 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2116 sctp_send_shutdown(stcb,
2117 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2118 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2122 /* is there a gap now ? */
2123 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2126 * CMT DAC algorithm: increase number of packets received
2129 stcb->asoc.cmt_dac_pkts_rcvd++;
2131 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2133 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2135 (stcb->asoc.numduptsns) || /* we have dup's */
2136 (is_a_gap) || /* is still a gap */
2137 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2138 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2141 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2142 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2143 (stcb->asoc.send_sack == 0) &&
2144 (stcb->asoc.numduptsns == 0) &&
2145 (stcb->asoc.delayed_ack) &&
2146 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2149 * CMT DAC algorithm: With CMT, delay acks
2150 * even in the face of
2152 * reordering. Therefore, if acks that do not
2153 * have to be sent because of the above
2154 * reasons, will be delayed. That is, acks
2155 * that would have been sent due to gap
2156 * reports will be delayed with DAC. Start
2157 * the delayed ack timer.
2159 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2160 stcb->sctp_ep, stcb, NULL);
2163 * Ok we must build a SACK since the timer
2164 * is pending, we got our first packet OR
2165 * there are gaps or duplicates.
2167 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2168 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2171 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2172 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2173 stcb->sctp_ep, stcb, NULL);
2180 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2182 struct sctp_tmit_chunk *chk;
2183 uint32_t tsize, pd_point;
2186 if (asoc->fragmented_delivery_inprogress) {
2187 sctp_service_reassembly(stcb, asoc);
2189 /* Can we proceed further, i.e. the PD-API is complete */
2190 if (asoc->fragmented_delivery_inprogress) {
2195 * Now is there some other chunk I can deliver from the reassembly
2199 chk = TAILQ_FIRST(&asoc->reasmqueue);
2201 asoc->size_on_reasm_queue = 0;
2202 asoc->cnt_on_reasm_queue = 0;
2205 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2206 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2207 ((nxt_todel == chk->rec.data.stream_seq) ||
2208 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2210 * Yep the first one is here. We setup to start reception,
2211 * by backing down the TSN just in case we can't deliver.
2215 * Before we start though either all of the message should
2216 * be here or the socket buffer max or nothing on the
2217 * delivery queue and something can be delivered.
2219 if (stcb->sctp_socket) {
2220 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
2221 stcb->sctp_ep->partial_delivery_point);
2223 pd_point = stcb->sctp_ep->partial_delivery_point;
2225 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2226 asoc->fragmented_delivery_inprogress = 1;
2227 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2228 asoc->str_of_pdapi = chk->rec.data.stream_number;
2229 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2230 asoc->pdapi_ppid = chk->rec.data.payloadtype;
2231 asoc->fragment_flags = chk->rec.data.rcv_flags;
2232 sctp_service_reassembly(stcb, asoc);
2233 if (asoc->fragmented_delivery_inprogress == 0) {
2241 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2242 struct sockaddr *src, struct sockaddr *dst,
2243 struct sctphdr *sh, struct sctp_inpcb *inp,
2244 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t * high_tsn,
2245 uint8_t use_mflowid, uint32_t mflowid,
2246 uint32_t vrf_id, uint16_t port)
2248 struct sctp_data_chunk *ch, chunk_buf;
2249 struct sctp_association *asoc;
2250 int num_chunks = 0; /* number of control chunks processed */
2252 int chk_length, break_flag, last_chunk;
2253 int abort_flag = 0, was_a_gap;
2255 uint32_t highest_tsn;
2258 sctp_set_rwnd(stcb, &stcb->asoc);
2261 SCTP_TCB_LOCK_ASSERT(stcb);
2263 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2264 highest_tsn = asoc->highest_tsn_inside_nr_map;
2266 highest_tsn = asoc->highest_tsn_inside_map;
2268 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2270 * setup where we got the last DATA packet from for any SACK that
2271 * may need to go out. Don't bump the net. This is done ONLY when a
2272 * chunk is assigned.
2274 asoc->last_data_chunk_from = net;
2277 * Now before we proceed we must figure out if this is a wasted
2278 * cluster... i.e. it is a small packet sent in and yet the driver
2279 * underneath allocated a full cluster for it. If so we must copy it
2280 * to a smaller mbuf and free up the cluster mbuf. This will help
2281 * with cluster starvation. Note for __Panda__ we don't do this
2282 * since it has clusters all the way down to 64 bytes.
2284 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2285 /* we only handle mbufs that are singletons.. not chains */
2286 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2288 /* ok lets see if we can copy the data up */
2291 /* get the pointers and copy */
2292 to = mtod(m, caddr_t *);
2293 from = mtod((*mm), caddr_t *);
2294 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2295 /* copy the length and free up the old */
2296 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2298 /* sucess, back copy */
2301 /* We are in trouble in the mbuf world .. yikes */
2305 /* get pointer to the first chunk header */
2306 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2307 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2312 * process all DATA chunks...
2314 *high_tsn = asoc->cumulative_tsn;
2316 asoc->data_pkts_seen++;
2317 while (stop_proc == 0) {
2318 /* validate chunk length */
2319 chk_length = ntohs(ch->ch.chunk_length);
2320 if (length - *offset < chk_length) {
2321 /* all done, mutulated chunk */
2325 if (ch->ch.chunk_type == SCTP_DATA) {
2326 if ((size_t)chk_length < sizeof(struct sctp_data_chunk)) {
2328 * Need to send an abort since we had a
2329 * invalid data chunk.
2331 struct mbuf *op_err;
2332 char msg[SCTP_DIAG_INFO_LEN];
2334 snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2336 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2337 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2338 sctp_abort_association(inp, stcb, m, iphlen,
2339 src, dst, sh, op_err,
2340 use_mflowid, mflowid,
2344 if ((size_t)chk_length == sizeof(struct sctp_data_chunk)) {
2346 * Need to send an abort since we had an
2349 struct mbuf *op_err;
2351 op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
2352 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2353 sctp_abort_association(inp, stcb, m, iphlen,
2354 src, dst, sh, op_err,
2355 use_mflowid, mflowid,
2359 #ifdef SCTP_AUDITING_ENABLED
2360 sctp_audit_log(0xB1, 0);
2362 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2367 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2368 chk_length, net, high_tsn, &abort_flag, &break_flag,
2377 * Set because of out of rwnd space and no
2378 * drop rep space left.
2384 /* not a data chunk in the data region */
2385 switch (ch->ch.chunk_type) {
2386 case SCTP_INITIATION:
2387 case SCTP_INITIATION_ACK:
2388 case SCTP_SELECTIVE_ACK:
2389 case SCTP_NR_SELECTIVE_ACK:
2390 case SCTP_HEARTBEAT_REQUEST:
2391 case SCTP_HEARTBEAT_ACK:
2392 case SCTP_ABORT_ASSOCIATION:
2394 case SCTP_SHUTDOWN_ACK:
2395 case SCTP_OPERATION_ERROR:
2396 case SCTP_COOKIE_ECHO:
2397 case SCTP_COOKIE_ACK:
2400 case SCTP_SHUTDOWN_COMPLETE:
2401 case SCTP_AUTHENTICATION:
2402 case SCTP_ASCONF_ACK:
2403 case SCTP_PACKET_DROPPED:
2404 case SCTP_STREAM_RESET:
2405 case SCTP_FORWARD_CUM_TSN:
2408 * Now, what do we do with KNOWN chunks that
2409 * are NOT in the right place?
2411 * For now, I do nothing but ignore them. We
2412 * may later want to add sysctl stuff to
2413 * switch out and do either an ABORT() or
2414 * possibly process them.
2416 if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2417 struct mbuf *op_err;
2419 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "");
2420 sctp_abort_association(inp, stcb,
2424 use_mflowid, mflowid,
2430 /* unknown chunk type, use bit rules */
2431 if (ch->ch.chunk_type & 0x40) {
2432 /* Add a error report to the queue */
2434 struct sctp_paramhdr *phd;
2436 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_NOWAIT, 1, MT_DATA);
2438 phd = mtod(merr, struct sctp_paramhdr *);
2440 * We cheat and use param
2441 * type since we did not
2442 * bother to define a error
2443 * cause struct. They are
2444 * the same basic format
2445 * with different names.
2448 htons(SCTP_CAUSE_UNRECOG_CHUNK);
2450 htons(chk_length + sizeof(*phd));
2451 SCTP_BUF_LEN(merr) = sizeof(*phd);
2452 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2453 if (SCTP_BUF_NEXT(merr)) {
2454 if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL)) {
2457 sctp_queue_op_err(stcb, merr);
2464 if ((ch->ch.chunk_type & 0x80) == 0) {
2465 /* discard the rest of this packet */
2467 } /* else skip this bad chunk and
2470 } /* switch of chunk type */
2472 *offset += SCTP_SIZE32(chk_length);
2473 if ((*offset >= length) || stop_proc) {
2474 /* no more data left in the mbuf chain */
2478 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2479 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2488 * we need to report rwnd overrun drops.
2490 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2494 * Did we get data, if so update the time for auto-close and
2495 * give peer credit for being alive.
2497 SCTP_STAT_INCR(sctps_recvpktwithdata);
2498 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2499 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2500 stcb->asoc.overall_error_count,
2502 SCTP_FROM_SCTP_INDATA,
2505 stcb->asoc.overall_error_count = 0;
2506 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2508 /* now service all of the reassm queue if needed */
2509 if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2510 sctp_service_queues(stcb, asoc);
2512 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2513 /* Assure that we ack right away */
2514 stcb->asoc.send_sack = 1;
2516 /* Start a sack timer or QUEUE a SACK for sending */
2517 sctp_sack_check(stcb, was_a_gap);
2522 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2523 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2525 uint32_t * biggest_newly_acked_tsn,
2526 uint32_t * this_sack_lowest_newack,
2529 struct sctp_tmit_chunk *tp1;
2530 unsigned int theTSN;
2531 int j, wake_him = 0, circled = 0;
2533 /* Recover the tp1 we last saw */
2536 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2538 for (j = frag_strt; j <= frag_end; j++) {
2539 theTSN = j + last_tsn;
2541 if (tp1->rec.data.doing_fast_retransmit)
2545 * CMT: CUCv2 algorithm. For each TSN being
2546 * processed from the sent queue, track the
2547 * next expected pseudo-cumack, or
2548 * rtx_pseudo_cumack, if required. Separate
2549 * cumack trackers for first transmissions,
2550 * and retransmissions.
2552 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2553 (tp1->snd_count == 1)) {
2554 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2555 tp1->whoTo->find_pseudo_cumack = 0;
2557 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2558 (tp1->snd_count > 1)) {
2559 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2560 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2562 if (tp1->rec.data.TSN_seq == theTSN) {
2563 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2565 * must be held until
2568 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2570 * If it is less than RESEND, it is
2571 * now no-longer in flight.
2572 * Higher values may already be set
2573 * via previous Gap Ack Blocks...
2574 * i.e. ACKED or RESEND.
2576 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2577 *biggest_newly_acked_tsn)) {
2578 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2581 * CMT: SFR algo (and HTNA) - set
2582 * saw_newack to 1 for dest being
2583 * newly acked. update
2584 * this_sack_highest_newack if
2587 if (tp1->rec.data.chunk_was_revoked == 0)
2588 tp1->whoTo->saw_newack = 1;
2590 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2591 tp1->whoTo->this_sack_highest_newack)) {
2592 tp1->whoTo->this_sack_highest_newack =
2593 tp1->rec.data.TSN_seq;
2596 * CMT DAC algo: also update
2597 * this_sack_lowest_newack
2599 if (*this_sack_lowest_newack == 0) {
2600 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2601 sctp_log_sack(*this_sack_lowest_newack,
2603 tp1->rec.data.TSN_seq,
2606 SCTP_LOG_TSN_ACKED);
2608 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2611 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2612 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2613 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2614 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2615 * Separate pseudo_cumack trackers for first transmissions and
2618 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2619 if (tp1->rec.data.chunk_was_revoked == 0) {
2620 tp1->whoTo->new_pseudo_cumack = 1;
2622 tp1->whoTo->find_pseudo_cumack = 1;
2624 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2625 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2627 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2628 if (tp1->rec.data.chunk_was_revoked == 0) {
2629 tp1->whoTo->new_pseudo_cumack = 1;
2631 tp1->whoTo->find_rtx_pseudo_cumack = 1;
2633 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2634 sctp_log_sack(*biggest_newly_acked_tsn,
2636 tp1->rec.data.TSN_seq,
2639 SCTP_LOG_TSN_ACKED);
2641 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2642 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2643 tp1->whoTo->flight_size,
2645 (uintptr_t) tp1->whoTo,
2646 tp1->rec.data.TSN_seq);
2648 sctp_flight_size_decrease(tp1);
2649 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2650 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2653 sctp_total_flight_decrease(stcb, tp1);
2655 tp1->whoTo->net_ack += tp1->send_size;
2656 if (tp1->snd_count < 2) {
2658 * True non-retransmited chunk
2660 tp1->whoTo->net_ack2 += tp1->send_size;
2668 sctp_calculate_rto(stcb,
2671 &tp1->sent_rcv_time,
2672 sctp_align_safe_nocopy,
2673 SCTP_RTT_FROM_DATA);
2676 if (tp1->whoTo->rto_needed == 0) {
2677 tp1->whoTo->rto_needed = 1;
2683 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2684 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2685 stcb->asoc.this_sack_highest_gap)) {
2686 stcb->asoc.this_sack_highest_gap =
2687 tp1->rec.data.TSN_seq;
2689 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2690 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2691 #ifdef SCTP_AUDITING_ENABLED
2692 sctp_audit_log(0xB2,
2693 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2698 * All chunks NOT UNSENT fall through here and are marked
2699 * (leave PR-SCTP ones that are to skip alone though)
2701 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2702 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2703 tp1->sent = SCTP_DATAGRAM_MARKED;
2705 if (tp1->rec.data.chunk_was_revoked) {
2706 /* deflate the cwnd */
2707 tp1->whoTo->cwnd -= tp1->book_size;
2708 tp1->rec.data.chunk_was_revoked = 0;
2710 /* NR Sack code here */
2712 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2713 if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2714 stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2717 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2720 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2726 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2727 sctp_m_freem(tp1->data);
2734 } /* if (tp1->TSN_seq == theTSN) */
2735 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
2738 tp1 = TAILQ_NEXT(tp1, sctp_next);
2739 if ((tp1 == NULL) && (circled == 0)) {
2741 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2743 } /* end while (tp1) */
2746 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2748 /* In case the fragments were not in order we must reset */
2749 } /* end for (j = fragStart */
2751 return (wake_him); /* Return value only used for nr-sack */
2756 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
2757 uint32_t last_tsn, uint32_t * biggest_tsn_acked,
2758 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
2759 int num_seg, int num_nr_seg, int *rto_ok)
2761 struct sctp_gap_ack_block *frag, block;
2762 struct sctp_tmit_chunk *tp1;
2767 uint16_t frag_strt, frag_end, prev_frag_end;
2769 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2773 for (i = 0; i < (num_seg + num_nr_seg); i++) {
2776 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2778 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
2779 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
2780 *offset += sizeof(block);
2782 return (chunk_freed);
2784 frag_strt = ntohs(frag->start);
2785 frag_end = ntohs(frag->end);
2787 if (frag_strt > frag_end) {
2788 /* This gap report is malformed, skip it. */
2791 if (frag_strt <= prev_frag_end) {
2792 /* This gap report is not in order, so restart. */
2793 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2795 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
2796 *biggest_tsn_acked = last_tsn + frag_end;
2803 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
2804 non_revocable, &num_frs, biggest_newly_acked_tsn,
2805 this_sack_lowest_newack, rto_ok)) {
2808 prev_frag_end = frag_end;
2810 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2812 sctp_log_fr(*biggest_tsn_acked,
2813 *biggest_newly_acked_tsn,
2814 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
2816 return (chunk_freed);
2820 sctp_check_for_revoked(struct sctp_tcb *stcb,
2821 struct sctp_association *asoc, uint32_t cumack,
2822 uint32_t biggest_tsn_acked)
2824 struct sctp_tmit_chunk *tp1;
2826 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2827 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
2829 * ok this guy is either ACK or MARKED. If it is
2830 * ACKED it has been previously acked but not this
2831 * time i.e. revoked. If it is MARKED it was ACK'ed
2834 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
2837 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
2838 /* it has been revoked */
2839 tp1->sent = SCTP_DATAGRAM_SENT;
2840 tp1->rec.data.chunk_was_revoked = 1;
2842 * We must add this stuff back in to assure
2843 * timers and such get started.
2845 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2846 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
2847 tp1->whoTo->flight_size,
2849 (uintptr_t) tp1->whoTo,
2850 tp1->rec.data.TSN_seq);
2852 sctp_flight_size_increase(tp1);
2853 sctp_total_flight_increase(stcb, tp1);
2855 * We inflate the cwnd to compensate for our
2856 * artificial inflation of the flight_size.
2858 tp1->whoTo->cwnd += tp1->book_size;
2859 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2860 sctp_log_sack(asoc->last_acked_seq,
2862 tp1->rec.data.TSN_seq,
2865 SCTP_LOG_TSN_REVOKED);
2867 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
2868 /* it has been re-acked in this SACK */
2869 tp1->sent = SCTP_DATAGRAM_ACKED;
2872 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
2879 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
2880 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
2882 struct sctp_tmit_chunk *tp1;
2883 int strike_flag = 0;
2885 int tot_retrans = 0;
2886 uint32_t sending_seq;
2887 struct sctp_nets *net;
2888 int num_dests_sacked = 0;
2891 * select the sending_seq, this is either the next thing ready to be
2892 * sent but not transmitted, OR, the next seq we assign.
2894 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
2896 sending_seq = asoc->sending_seq;
2898 sending_seq = tp1->rec.data.TSN_seq;
2901 /* CMT DAC algo: finding out if SACK is a mixed SACK */
2902 if ((asoc->sctp_cmt_on_off > 0) &&
2903 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
2904 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
2905 if (net->saw_newack)
2909 if (stcb->asoc.peer_supports_prsctp) {
2910 (void)SCTP_GETTIME_TIMEVAL(&now);
2912 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2914 if (tp1->no_fr_allowed) {
2915 /* this one had a timeout or something */
2918 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2919 if (tp1->sent < SCTP_DATAGRAM_RESEND)
2920 sctp_log_fr(biggest_tsn_newly_acked,
2921 tp1->rec.data.TSN_seq,
2923 SCTP_FR_LOG_CHECK_STRIKE);
2925 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
2926 tp1->sent == SCTP_DATAGRAM_UNSENT) {
2930 if (stcb->asoc.peer_supports_prsctp) {
2931 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
2932 /* Is it expired? */
2933 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
2934 /* Yes so drop it */
2935 if (tp1->data != NULL) {
2936 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
2937 SCTP_SO_NOT_LOCKED);
2943 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
2944 /* we are beyond the tsn in the sack */
2947 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
2948 /* either a RESEND, ACKED, or MARKED */
2950 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
2951 /* Continue strikin FWD-TSN chunks */
2952 tp1->rec.data.fwd_tsn_cnt++;
2957 * CMT : SFR algo (covers part of DAC and HTNA as well)
2959 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
2961 * No new acks were receieved for data sent to this
2962 * dest. Therefore, according to the SFR algo for
2963 * CMT, no data sent to this dest can be marked for
2964 * FR using this SACK.
2967 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2968 tp1->whoTo->this_sack_highest_newack)) {
2970 * CMT: New acks were receieved for data sent to
2971 * this dest. But no new acks were seen for data
2972 * sent after tp1. Therefore, according to the SFR
2973 * algo for CMT, tp1 cannot be marked for FR using
2974 * this SACK. This step covers part of the DAC algo
2975 * and the HTNA algo as well.
2980 * Here we check to see if we were have already done a FR
2981 * and if so we see if the biggest TSN we saw in the sack is
2982 * smaller than the recovery point. If so we don't strike
2983 * the tsn... otherwise we CAN strike the TSN.
2986 * @@@ JRI: Check for CMT if (accum_moved &&
2987 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
2990 if (accum_moved && asoc->fast_retran_loss_recovery) {
2992 * Strike the TSN if in fast-recovery and cum-ack
2995 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2996 sctp_log_fr(biggest_tsn_newly_acked,
2997 tp1->rec.data.TSN_seq,
2999 SCTP_FR_LOG_STRIKE_CHUNK);
3001 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3004 if ((asoc->sctp_cmt_on_off > 0) &&
3005 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3007 * CMT DAC algorithm: If SACK flag is set to
3008 * 0, then lowest_newack test will not pass
3009 * because it would have been set to the
3010 * cumack earlier. If not already to be
3011 * rtx'd, If not a mixed sack and if tp1 is
3012 * not between two sacked TSNs, then mark by
3013 * one more. NOTE that we are marking by one
3014 * additional time since the SACK DAC flag
3015 * indicates that two packets have been
3016 * received after this missing TSN.
3018 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3019 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3020 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3021 sctp_log_fr(16 + num_dests_sacked,
3022 tp1->rec.data.TSN_seq,
3024 SCTP_FR_LOG_STRIKE_CHUNK);
3029 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3030 (asoc->sctp_cmt_on_off == 0)) {
3032 * For those that have done a FR we must take
3033 * special consideration if we strike. I.e the
3034 * biggest_newly_acked must be higher than the
3035 * sending_seq at the time we did the FR.
3038 #ifdef SCTP_FR_TO_ALTERNATE
3040 * If FR's go to new networks, then we must only do
3041 * this for singly homed asoc's. However if the FR's
3042 * go to the same network (Armando's work) then its
3043 * ok to FR multiple times.
3051 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3052 tp1->rec.data.fast_retran_tsn)) {
3054 * Strike the TSN, since this ack is
3055 * beyond where things were when we
3058 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3059 sctp_log_fr(biggest_tsn_newly_acked,
3060 tp1->rec.data.TSN_seq,
3062 SCTP_FR_LOG_STRIKE_CHUNK);
3064 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3068 if ((asoc->sctp_cmt_on_off > 0) &&
3069 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3071 * CMT DAC algorithm: If
3072 * SACK flag is set to 0,
3073 * then lowest_newack test
3074 * will not pass because it
3075 * would have been set to
3076 * the cumack earlier. If
3077 * not already to be rtx'd,
3078 * If not a mixed sack and
3079 * if tp1 is not between two
3080 * sacked TSNs, then mark by
3081 * one more. NOTE that we
3082 * are marking by one
3083 * additional time since the
3084 * SACK DAC flag indicates
3085 * that two packets have
3086 * been received after this
3089 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3090 (num_dests_sacked == 1) &&
3091 SCTP_TSN_GT(this_sack_lowest_newack,
3092 tp1->rec.data.TSN_seq)) {
3093 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3094 sctp_log_fr(32 + num_dests_sacked,
3095 tp1->rec.data.TSN_seq,
3097 SCTP_FR_LOG_STRIKE_CHUNK);
3099 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3107 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3110 } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3111 biggest_tsn_newly_acked)) {
3113 * We don't strike these: This is the HTNA
3114 * algorithm i.e. we don't strike If our TSN is
3115 * larger than the Highest TSN Newly Acked.
3119 /* Strike the TSN */
3120 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3121 sctp_log_fr(biggest_tsn_newly_acked,
3122 tp1->rec.data.TSN_seq,
3124 SCTP_FR_LOG_STRIKE_CHUNK);
3126 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3129 if ((asoc->sctp_cmt_on_off > 0) &&
3130 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3132 * CMT DAC algorithm: If SACK flag is set to
3133 * 0, then lowest_newack test will not pass
3134 * because it would have been set to the
3135 * cumack earlier. If not already to be
3136 * rtx'd, If not a mixed sack and if tp1 is
3137 * not between two sacked TSNs, then mark by
3138 * one more. NOTE that we are marking by one
3139 * additional time since the SACK DAC flag
3140 * indicates that two packets have been
3141 * received after this missing TSN.
3143 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3144 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3145 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3146 sctp_log_fr(48 + num_dests_sacked,
3147 tp1->rec.data.TSN_seq,
3149 SCTP_FR_LOG_STRIKE_CHUNK);
3155 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3156 struct sctp_nets *alt;
3158 /* fix counts and things */
3159 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3160 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3161 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3163 (uintptr_t) tp1->whoTo,
3164 tp1->rec.data.TSN_seq);
3167 tp1->whoTo->net_ack++;
3168 sctp_flight_size_decrease(tp1);
3169 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3170 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3174 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3175 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3176 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3178 /* add back to the rwnd */
3179 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3181 /* remove from the total flight */
3182 sctp_total_flight_decrease(stcb, tp1);
3184 if ((stcb->asoc.peer_supports_prsctp) &&
3185 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3187 * Has it been retransmitted tv_sec times? -
3188 * we store the retran count there.
3190 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3191 /* Yes, so drop it */
3192 if (tp1->data != NULL) {
3193 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3194 SCTP_SO_NOT_LOCKED);
3196 /* Make sure to flag we had a FR */
3197 tp1->whoTo->net_ack++;
3202 * SCTP_PRINTF("OK, we are now ready to FR this
3205 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3206 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3210 /* This is a subsequent FR */
3211 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3213 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3214 if (asoc->sctp_cmt_on_off > 0) {
3216 * CMT: Using RTX_SSTHRESH policy for CMT.
3217 * If CMT is being used, then pick dest with
3218 * largest ssthresh for any retransmission.
3220 tp1->no_fr_allowed = 1;
3222 /* sa_ignore NO_NULL_CHK */
3223 if (asoc->sctp_cmt_pf > 0) {
3225 * JRS 5/18/07 - If CMT PF is on,
3226 * use the PF version of
3229 alt = sctp_find_alternate_net(stcb, alt, 2);
3232 * JRS 5/18/07 - If only CMT is on,
3233 * use the CMT version of
3236 /* sa_ignore NO_NULL_CHK */
3237 alt = sctp_find_alternate_net(stcb, alt, 1);
3243 * CUCv2: If a different dest is picked for
3244 * the retransmission, then new
3245 * (rtx-)pseudo_cumack needs to be tracked
3246 * for orig dest. Let CUCv2 track new (rtx-)
3247 * pseudo-cumack always.
3250 tp1->whoTo->find_pseudo_cumack = 1;
3251 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3253 } else {/* CMT is OFF */
3255 #ifdef SCTP_FR_TO_ALTERNATE
3256 /* Can we find an alternate? */
3257 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3260 * default behavior is to NOT retransmit
3261 * FR's to an alternate. Armando Caro's
3262 * paper details why.
3268 tp1->rec.data.doing_fast_retransmit = 1;
3270 /* mark the sending seq for possible subsequent FR's */
3272 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3273 * (uint32_t)tpi->rec.data.TSN_seq);
3275 if (TAILQ_EMPTY(&asoc->send_queue)) {
3277 * If the queue of send is empty then its
3278 * the next sequence number that will be
3279 * assigned so we subtract one from this to
3280 * get the one we last sent.
3282 tp1->rec.data.fast_retran_tsn = sending_seq;
3285 * If there are chunks on the send queue
3286 * (unsent data that has made it from the
3287 * stream queues but not out the door, we
3288 * take the first one (which will have the
3289 * lowest TSN) and subtract one to get the
3292 struct sctp_tmit_chunk *ttt;
3294 ttt = TAILQ_FIRST(&asoc->send_queue);
3295 tp1->rec.data.fast_retran_tsn =
3296 ttt->rec.data.TSN_seq;
3301 * this guy had a RTO calculation pending on
3304 if ((tp1->whoTo != NULL) &&
3305 (tp1->whoTo->rto_needed == 0)) {
3306 tp1->whoTo->rto_needed = 1;
3310 if (alt != tp1->whoTo) {
3311 /* yes, there is an alternate. */
3312 sctp_free_remote_addr(tp1->whoTo);
3313 /* sa_ignore FREED_MEMORY */
3315 atomic_add_int(&alt->ref_count, 1);
3321 struct sctp_tmit_chunk *
3322 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3323 struct sctp_association *asoc)
3325 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3329 if (asoc->peer_supports_prsctp == 0) {
3332 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3333 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3334 tp1->sent != SCTP_DATAGRAM_RESEND &&
3335 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3336 /* no chance to advance, out of here */
3339 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3340 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3341 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3342 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3343 asoc->advanced_peer_ack_point,
3344 tp1->rec.data.TSN_seq, 0, 0);
3347 if (!PR_SCTP_ENABLED(tp1->flags)) {
3349 * We can't fwd-tsn past any that are reliable aka
3350 * retransmitted until the asoc fails.
3355 (void)SCTP_GETTIME_TIMEVAL(&now);
3359 * now we got a chunk which is marked for another
3360 * retransmission to a PR-stream but has run out its chances
3361 * already maybe OR has been marked to skip now. Can we skip
3362 * it if its a resend?
3364 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3365 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3367 * Now is this one marked for resend and its time is
3370 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3371 /* Yes so drop it */
3373 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3374 1, SCTP_SO_NOT_LOCKED);
3378 * No, we are done when hit one for resend
3379 * whos time as not expired.
3385 * Ok now if this chunk is marked to drop it we can clean up
3386 * the chunk, advance our peer ack point and we can check
3389 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3390 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3391 /* advance PeerAckPoint goes forward */
3392 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3393 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3395 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3396 /* No update but we do save the chk */
3401 * If it is still in RESEND we can advance no
3411 sctp_fs_audit(struct sctp_association *asoc)
3413 struct sctp_tmit_chunk *chk;
3414 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3415 int entry_flight, entry_cnt, ret;
3417 entry_flight = asoc->total_flight;
3418 entry_cnt = asoc->total_flight_count;
3421 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3424 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3425 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3426 SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
3427 chk->rec.data.TSN_seq,
3431 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3433 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3435 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3442 if ((inflight > 0) || (inbetween > 0)) {
3444 panic("Flight size-express incorrect? \n");
3446 SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
3447 entry_flight, entry_cnt);
3449 SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3450 inflight, inbetween, resend, above, acked);
3459 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3460 struct sctp_association *asoc,
3461 struct sctp_tmit_chunk *tp1)
3463 tp1->window_probe = 0;
3464 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3465 /* TSN's skipped we do NOT move back. */
3466 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3467 tp1->whoTo->flight_size,
3469 (uintptr_t) tp1->whoTo,
3470 tp1->rec.data.TSN_seq);
3473 /* First setup this by shrinking flight */
3474 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3475 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3478 sctp_flight_size_decrease(tp1);
3479 sctp_total_flight_decrease(stcb, tp1);
3480 /* Now mark for resend */
3481 tp1->sent = SCTP_DATAGRAM_RESEND;
3482 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3484 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3485 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3486 tp1->whoTo->flight_size,
3488 (uintptr_t) tp1->whoTo,
3489 tp1->rec.data.TSN_seq);
3494 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3495 uint32_t rwnd, int *abort_now, int ecne_seen)
3497 struct sctp_nets *net;
3498 struct sctp_association *asoc;
3499 struct sctp_tmit_chunk *tp1, *tp2;
3501 int win_probe_recovery = 0;
3502 int win_probe_recovered = 0;
3503 int j, done_once = 0;
3506 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3507 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3508 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3510 SCTP_TCB_LOCK_ASSERT(stcb);
3511 #ifdef SCTP_ASOCLOG_OF_TSNS
3512 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3513 stcb->asoc.cumack_log_at++;
3514 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3515 stcb->asoc.cumack_log_at = 0;
3519 old_rwnd = asoc->peers_rwnd;
3520 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3523 } else if (asoc->last_acked_seq == cumack) {
3524 /* Window update sack */
3525 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3526 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3527 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3528 /* SWS sender side engages */
3529 asoc->peers_rwnd = 0;
3531 if (asoc->peers_rwnd > old_rwnd) {
3536 /* First setup for CC stuff */
3537 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3538 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3539 /* Drag along the window_tsn for cwr's */
3540 net->cwr_window_tsn = cumack;
3542 net->prev_cwnd = net->cwnd;
3547 * CMT: Reset CUC and Fast recovery algo variables before
3550 net->new_pseudo_cumack = 0;
3551 net->will_exit_fast_recovery = 0;
3552 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3553 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3556 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3559 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3560 tp1 = TAILQ_LAST(&asoc->sent_queue,
3561 sctpchunk_listhead);
3562 send_s = tp1->rec.data.TSN_seq + 1;
3564 send_s = asoc->sending_seq;
3566 if (SCTP_TSN_GE(cumack, send_s)) {
3568 struct mbuf *op_err;
3569 char msg[SCTP_DIAG_INFO_LEN];
3573 panic("Impossible sack 1");
3578 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal then TSN %8.8x",
3580 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3581 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3582 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3587 asoc->this_sack_highest_gap = cumack;
3588 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3589 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3590 stcb->asoc.overall_error_count,
3592 SCTP_FROM_SCTP_INDATA,
3595 stcb->asoc.overall_error_count = 0;
3596 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3597 /* process the new consecutive TSN first */
3598 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3599 if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3600 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3601 SCTP_PRINTF("Warning, an unsent is now acked?\n");
3603 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3605 * If it is less than ACKED, it is
3606 * now no-longer in flight. Higher
3607 * values may occur during marking
3609 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3610 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3611 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3612 tp1->whoTo->flight_size,
3614 (uintptr_t) tp1->whoTo,
3615 tp1->rec.data.TSN_seq);
3617 sctp_flight_size_decrease(tp1);
3618 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3619 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3622 /* sa_ignore NO_NULL_CHK */
3623 sctp_total_flight_decrease(stcb, tp1);
3625 tp1->whoTo->net_ack += tp1->send_size;
3626 if (tp1->snd_count < 2) {
3628 * True non-retransmited
3631 tp1->whoTo->net_ack2 +=
3634 /* update RTO too? */
3643 sctp_calculate_rto(stcb,
3645 &tp1->sent_rcv_time,
3646 sctp_align_safe_nocopy,
3647 SCTP_RTT_FROM_DATA);
3650 if (tp1->whoTo->rto_needed == 0) {
3651 tp1->whoTo->rto_needed = 1;
3657 * CMT: CUCv2 algorithm. From the
3658 * cumack'd TSNs, for each TSN being
3659 * acked for the first time, set the
3660 * following variables for the
3661 * corresp destination.
3662 * new_pseudo_cumack will trigger a
3664 * find_(rtx_)pseudo_cumack will
3665 * trigger search for the next
3666 * expected (rtx-)pseudo-cumack.
3668 tp1->whoTo->new_pseudo_cumack = 1;
3669 tp1->whoTo->find_pseudo_cumack = 1;
3670 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3672 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3673 /* sa_ignore NO_NULL_CHK */
3674 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3677 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3678 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3680 if (tp1->rec.data.chunk_was_revoked) {
3681 /* deflate the cwnd */
3682 tp1->whoTo->cwnd -= tp1->book_size;
3683 tp1->rec.data.chunk_was_revoked = 0;
3685 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3686 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3687 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3690 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3694 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3696 /* sa_ignore NO_NULL_CHK */
3697 sctp_free_bufspace(stcb, asoc, tp1, 1);
3698 sctp_m_freem(tp1->data);
3701 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3702 sctp_log_sack(asoc->last_acked_seq,
3704 tp1->rec.data.TSN_seq,
3707 SCTP_LOG_FREE_SENT);
3709 asoc->sent_queue_cnt--;
3710 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3717 /* sa_ignore NO_NULL_CHK */
3718 if (stcb->sctp_socket) {
3719 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3723 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3724 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3725 /* sa_ignore NO_NULL_CHK */
3726 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3728 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3729 so = SCTP_INP_SO(stcb->sctp_ep);
3730 atomic_add_int(&stcb->asoc.refcnt, 1);
3731 SCTP_TCB_UNLOCK(stcb);
3732 SCTP_SOCKET_LOCK(so, 1);
3733 SCTP_TCB_LOCK(stcb);
3734 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3735 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3736 /* assoc was freed while we were unlocked */
3737 SCTP_SOCKET_UNLOCK(so, 1);
3741 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
3742 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3743 SCTP_SOCKET_UNLOCK(so, 1);
3746 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3747 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
3751 /* JRS - Use the congestion control given in the CC module */
3752 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
3753 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3754 if (net->net_ack2 > 0) {
3756 * Karn's rule applies to clearing error
3757 * count, this is optional.
3759 net->error_count = 0;
3760 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
3761 /* addr came good */
3762 net->dest_state |= SCTP_ADDR_REACHABLE;
3763 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
3764 0, (void *)net, SCTP_SO_NOT_LOCKED);
3766 if (net == stcb->asoc.primary_destination) {
3767 if (stcb->asoc.alternate) {
3769 * release the alternate,
3772 sctp_free_remote_addr(stcb->asoc.alternate);
3773 stcb->asoc.alternate = NULL;
3776 if (net->dest_state & SCTP_ADDR_PF) {
3777 net->dest_state &= ~SCTP_ADDR_PF;
3778 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
3779 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
3780 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
3781 /* Done with this net */
3784 /* restore any doubled timers */
3785 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
3786 if (net->RTO < stcb->asoc.minrto) {
3787 net->RTO = stcb->asoc.minrto;
3789 if (net->RTO > stcb->asoc.maxrto) {
3790 net->RTO = stcb->asoc.maxrto;
3794 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
3796 asoc->last_acked_seq = cumack;
3798 if (TAILQ_EMPTY(&asoc->sent_queue)) {
3799 /* nothing left in-flight */
3800 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3801 net->flight_size = 0;
3802 net->partial_bytes_acked = 0;
3804 asoc->total_flight = 0;
3805 asoc->total_flight_count = 0;
3808 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3809 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3810 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3811 /* SWS sender side engages */
3812 asoc->peers_rwnd = 0;
3814 if (asoc->peers_rwnd > old_rwnd) {
3815 win_probe_recovery = 1;
3817 /* Now assure a timer where data is queued at */
3820 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3823 if (win_probe_recovery && (net->window_probe)) {
3824 win_probe_recovered = 1;
3826 * Find first chunk that was used with window probe
3827 * and clear the sent
3829 /* sa_ignore FREED_MEMORY */
3830 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3831 if (tp1->window_probe) {
3832 /* move back to data send queue */
3833 sctp_window_probe_recovery(stcb, asoc, tp1);
3838 if (net->RTO == 0) {
3839 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
3841 to_ticks = MSEC_TO_TICKS(net->RTO);
3843 if (net->flight_size) {
3845 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3846 sctp_timeout_handler, &net->rxt_timer);
3847 if (net->window_probe) {
3848 net->window_probe = 0;
3851 if (net->window_probe) {
3853 * In window probes we must assure a timer
3854 * is still running there
3856 net->window_probe = 0;
3857 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3858 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3859 sctp_timeout_handler, &net->rxt_timer);
3861 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3862 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3864 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
3869 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
3870 (asoc->sent_queue_retran_cnt == 0) &&
3871 (win_probe_recovered == 0) &&
3874 * huh, this should not happen unless all packets are
3875 * PR-SCTP and marked to skip of course.
3877 if (sctp_fs_audit(asoc)) {
3878 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3879 net->flight_size = 0;
3881 asoc->total_flight = 0;
3882 asoc->total_flight_count = 0;
3883 asoc->sent_queue_retran_cnt = 0;
3884 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3885 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3886 sctp_flight_size_increase(tp1);
3887 sctp_total_flight_increase(stcb, tp1);
3888 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3889 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3896 /**********************************/
3897 /* Now what about shutdown issues */
3898 /**********************************/
3899 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
3900 /* nothing left on sendqueue.. consider done */
3902 if ((asoc->stream_queue_cnt == 1) &&
3903 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
3904 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
3905 (asoc->locked_on_sending)
3907 struct sctp_stream_queue_pending *sp;
3910 * I may be in a state where we got all across.. but
3911 * cannot write more due to a shutdown... we abort
3912 * since the user did not indicate EOR in this case.
3913 * The sp will be cleaned during free of the asoc.
3915 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
3917 if ((sp) && (sp->length == 0)) {
3918 /* Let cleanup code purge it */
3919 if (sp->msg_is_complete) {
3920 asoc->stream_queue_cnt--;
3922 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
3923 asoc->locked_on_sending = NULL;
3924 asoc->stream_queue_cnt--;
3928 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
3929 (asoc->stream_queue_cnt == 0)) {
3930 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
3931 /* Need to abort here */
3932 struct mbuf *op_err;
3937 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
3938 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
3939 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3941 struct sctp_nets *netp;
3943 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
3944 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3945 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3947 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
3948 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
3949 sctp_stop_timers_for_shutdown(stcb);
3950 if (asoc->alternate) {
3951 netp = asoc->alternate;
3953 netp = asoc->primary_destination;
3955 sctp_send_shutdown(stcb, netp);
3956 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
3957 stcb->sctp_ep, stcb, netp);
3958 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
3959 stcb->sctp_ep, stcb, netp);
3961 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
3962 (asoc->stream_queue_cnt == 0)) {
3963 struct sctp_nets *netp;
3965 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
3968 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3969 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
3970 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
3971 sctp_stop_timers_for_shutdown(stcb);
3972 if (asoc->alternate) {
3973 netp = asoc->alternate;
3975 netp = asoc->primary_destination;
3977 sctp_send_shutdown_ack(stcb, netp);
3978 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
3979 stcb->sctp_ep, stcb, netp);
3982 /*********************************************/
3983 /* Here we perform PR-SCTP procedures */
3985 /*********************************************/
3986 /* C1. update advancedPeerAckPoint */
3987 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
3988 asoc->advanced_peer_ack_point = cumack;
3990 /* PR-Sctp issues need to be addressed too */
3991 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
3992 struct sctp_tmit_chunk *lchk;
3993 uint32_t old_adv_peer_ack_point;
3995 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
3996 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
3997 /* C3. See if we need to send a Fwd-TSN */
3998 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4000 * ISSUE with ECN, see FWD-TSN processing.
4002 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4003 send_forward_tsn(stcb, asoc);
4005 /* try to FR fwd-tsn's that get lost too */
4006 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4007 send_forward_tsn(stcb, asoc);
4012 /* Assure a timer is up */
4013 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4014 stcb->sctp_ep, stcb, lchk->whoTo);
4017 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4018 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4020 stcb->asoc.peers_rwnd,
4021 stcb->asoc.total_flight,
4022 stcb->asoc.total_output_queue_size);
4027 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4028 struct sctp_tcb *stcb,
4029 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4030 int *abort_now, uint8_t flags,
4031 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4033 struct sctp_association *asoc;
4034 struct sctp_tmit_chunk *tp1, *tp2;
4035 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4036 uint16_t wake_him = 0;
4037 uint32_t send_s = 0;
4039 int accum_moved = 0;
4040 int will_exit_fast_recovery = 0;
4041 uint32_t a_rwnd, old_rwnd;
4042 int win_probe_recovery = 0;
4043 int win_probe_recovered = 0;
4044 struct sctp_nets *net = NULL;
4047 uint8_t reneged_all = 0;
4048 uint8_t cmt_dac_flag;
4051 * we take any chance we can to service our queues since we cannot
4052 * get awoken when the socket is read from :<
4055 * Now perform the actual SACK handling: 1) Verify that it is not an
4056 * old sack, if so discard. 2) If there is nothing left in the send
4057 * queue (cum-ack is equal to last acked) then you have a duplicate
4058 * too, update any rwnd change and verify no timers are running.
4059 * then return. 3) Process any new consequtive data i.e. cum-ack
4060 * moved process these first and note that it moved. 4) Process any
4061 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4062 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4063 * sync up flightsizes and things, stop all timers and also check
4064 * for shutdown_pending state. If so then go ahead and send off the
4065 * shutdown. If in shutdown recv, send off the shutdown-ack and
4066 * start that timer, Ret. 9) Strike any non-acked things and do FR
4067 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4068 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4069 * if in shutdown_recv state.
4071 SCTP_TCB_LOCK_ASSERT(stcb);
4073 this_sack_lowest_newack = 0;
4074 SCTP_STAT_INCR(sctps_slowpath_sack);
4076 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4077 #ifdef SCTP_ASOCLOG_OF_TSNS
4078 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4079 stcb->asoc.cumack_log_at++;
4080 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4081 stcb->asoc.cumack_log_at = 0;
4086 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4087 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4088 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4090 old_rwnd = stcb->asoc.peers_rwnd;
4091 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4092 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4093 stcb->asoc.overall_error_count,
4095 SCTP_FROM_SCTP_INDATA,
4098 stcb->asoc.overall_error_count = 0;
4100 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4101 sctp_log_sack(asoc->last_acked_seq,
4108 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4110 uint32_t *dupdata, dblock;
4112 for (i = 0; i < num_dup; i++) {
4113 dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4114 sizeof(uint32_t), (uint8_t *) & dblock);
4115 if (dupdata == NULL) {
4118 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4121 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4123 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4124 tp1 = TAILQ_LAST(&asoc->sent_queue,
4125 sctpchunk_listhead);
4126 send_s = tp1->rec.data.TSN_seq + 1;
4129 send_s = asoc->sending_seq;
4131 if (SCTP_TSN_GE(cum_ack, send_s)) {
4132 struct mbuf *op_err;
4133 char msg[SCTP_DIAG_INFO_LEN];
4136 * no way, we have not even sent this TSN out yet.
4137 * Peer is hopelessly messed up with us.
4139 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4142 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
4143 tp1->rec.data.TSN_seq, (void *)tp1);
4148 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal then TSN %8.8x",
4150 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4151 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4152 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4156 /**********************/
4157 /* 1) check the range */
4158 /**********************/
4159 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4160 /* acking something behind */
4163 /* update the Rwnd of the peer */
4164 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4165 TAILQ_EMPTY(&asoc->send_queue) &&
4166 (asoc->stream_queue_cnt == 0)) {
4167 /* nothing left on send/sent and strmq */
4168 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4169 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4170 asoc->peers_rwnd, 0, 0, a_rwnd);
4172 asoc->peers_rwnd = a_rwnd;
4173 if (asoc->sent_queue_retran_cnt) {
4174 asoc->sent_queue_retran_cnt = 0;
4176 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4177 /* SWS sender side engages */
4178 asoc->peers_rwnd = 0;
4180 /* stop any timers */
4181 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4182 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4183 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4184 net->partial_bytes_acked = 0;
4185 net->flight_size = 0;
4187 asoc->total_flight = 0;
4188 asoc->total_flight_count = 0;
4192 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4193 * things. The total byte count acked is tracked in netAckSz AND
4194 * netAck2 is used to track the total bytes acked that are un-
4195 * amibguious and were never retransmitted. We track these on a per
4196 * destination address basis.
4198 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4199 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4200 /* Drag along the window_tsn for cwr's */
4201 net->cwr_window_tsn = cum_ack;
4203 net->prev_cwnd = net->cwnd;
4208 * CMT: Reset CUC and Fast recovery algo variables before
4211 net->new_pseudo_cumack = 0;
4212 net->will_exit_fast_recovery = 0;
4213 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4214 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4217 /* process the new consecutive TSN first */
4218 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4219 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4220 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4222 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4224 * If it is less than ACKED, it is
4225 * now no-longer in flight. Higher
4226 * values may occur during marking
4228 if ((tp1->whoTo->dest_state &
4229 SCTP_ADDR_UNCONFIRMED) &&
4230 (tp1->snd_count < 2)) {
4232 * If there was no retran
4233 * and the address is
4234 * un-confirmed and we sent
4236 * sacked.. its confirmed,
4239 tp1->whoTo->dest_state &=
4240 ~SCTP_ADDR_UNCONFIRMED;
4242 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4243 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4244 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4245 tp1->whoTo->flight_size,
4247 (uintptr_t) tp1->whoTo,
4248 tp1->rec.data.TSN_seq);
4250 sctp_flight_size_decrease(tp1);
4251 sctp_total_flight_decrease(stcb, tp1);
4252 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4253 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4257 tp1->whoTo->net_ack += tp1->send_size;
4259 /* CMT SFR and DAC algos */
4260 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4261 tp1->whoTo->saw_newack = 1;
4263 if (tp1->snd_count < 2) {
4265 * True non-retransmited
4268 tp1->whoTo->net_ack2 +=
4271 /* update RTO too? */
4275 sctp_calculate_rto(stcb,
4277 &tp1->sent_rcv_time,
4278 sctp_align_safe_nocopy,
4279 SCTP_RTT_FROM_DATA);
4282 if (tp1->whoTo->rto_needed == 0) {
4283 tp1->whoTo->rto_needed = 1;
4289 * CMT: CUCv2 algorithm. From the
4290 * cumack'd TSNs, for each TSN being
4291 * acked for the first time, set the
4292 * following variables for the
4293 * corresp destination.
4294 * new_pseudo_cumack will trigger a
4296 * find_(rtx_)pseudo_cumack will
4297 * trigger search for the next
4298 * expected (rtx-)pseudo-cumack.
4300 tp1->whoTo->new_pseudo_cumack = 1;
4301 tp1->whoTo->find_pseudo_cumack = 1;
4302 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4305 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4306 sctp_log_sack(asoc->last_acked_seq,
4308 tp1->rec.data.TSN_seq,
4311 SCTP_LOG_TSN_ACKED);
4313 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4314 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4317 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4318 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4319 #ifdef SCTP_AUDITING_ENABLED
4320 sctp_audit_log(0xB3,
4321 (asoc->sent_queue_retran_cnt & 0x000000ff));
4324 if (tp1->rec.data.chunk_was_revoked) {
4325 /* deflate the cwnd */
4326 tp1->whoTo->cwnd -= tp1->book_size;
4327 tp1->rec.data.chunk_was_revoked = 0;
4329 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4330 tp1->sent = SCTP_DATAGRAM_ACKED;
4337 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4338 /* always set this up to cum-ack */
4339 asoc->this_sack_highest_gap = last_tsn;
4341 if ((num_seg > 0) || (num_nr_seg > 0)) {
4344 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4345 * to be greater than the cumack. Also reset saw_newack to 0
4348 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4349 net->saw_newack = 0;
4350 net->this_sack_highest_newack = last_tsn;
4354 * thisSackHighestGap will increase while handling NEW
4355 * segments this_sack_highest_newack will increase while
4356 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4357 * used for CMT DAC algo. saw_newack will also change.
4359 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4360 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4361 num_seg, num_nr_seg, &rto_ok)) {
4364 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4366 * validate the biggest_tsn_acked in the gap acks if
4367 * strict adherence is wanted.
4369 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4371 * peer is either confused or we are under
4372 * attack. We must abort.
4374 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4375 biggest_tsn_acked, send_s);
4380 /*******************************************/
4381 /* cancel ALL T3-send timer if accum moved */
4382 /*******************************************/
4383 if (asoc->sctp_cmt_on_off > 0) {
4384 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4385 if (net->new_pseudo_cumack)
4386 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4388 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4393 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4394 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4395 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4399 /********************************************/
4400 /* drop the acked chunks from the sentqueue */
4401 /********************************************/
4402 asoc->last_acked_seq = cum_ack;
4404 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4405 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4408 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4409 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4410 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4413 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4417 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4418 if (PR_SCTP_ENABLED(tp1->flags)) {
4419 if (asoc->pr_sctp_cnt != 0)
4420 asoc->pr_sctp_cnt--;
4422 asoc->sent_queue_cnt--;
4424 /* sa_ignore NO_NULL_CHK */
4425 sctp_free_bufspace(stcb, asoc, tp1, 1);
4426 sctp_m_freem(tp1->data);
4428 if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4429 asoc->sent_queue_cnt_removeable--;
4432 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4433 sctp_log_sack(asoc->last_acked_seq,
4435 tp1->rec.data.TSN_seq,
4438 SCTP_LOG_FREE_SENT);
4440 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4443 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4445 panic("Warning flight size is postive and should be 0");
4447 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4448 asoc->total_flight);
4450 asoc->total_flight = 0;
4452 /* sa_ignore NO_NULL_CHK */
4453 if ((wake_him) && (stcb->sctp_socket)) {
4454 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4458 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4459 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4460 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4462 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4463 so = SCTP_INP_SO(stcb->sctp_ep);
4464 atomic_add_int(&stcb->asoc.refcnt, 1);
4465 SCTP_TCB_UNLOCK(stcb);
4466 SCTP_SOCKET_LOCK(so, 1);
4467 SCTP_TCB_LOCK(stcb);
4468 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4469 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4470 /* assoc was freed while we were unlocked */
4471 SCTP_SOCKET_UNLOCK(so, 1);
4475 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4476 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4477 SCTP_SOCKET_UNLOCK(so, 1);
4480 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4481 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4485 if (asoc->fast_retran_loss_recovery && accum_moved) {
4486 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4487 /* Setup so we will exit RFC2582 fast recovery */
4488 will_exit_fast_recovery = 1;
4492 * Check for revoked fragments:
4494 * if Previous sack - Had no frags then we can't have any revoked if
4495 * Previous sack - Had frag's then - If we now have frags aka
4496 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4497 * some of them. else - The peer revoked all ACKED fragments, since
4498 * we had some before and now we have NONE.
4502 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4503 asoc->saw_sack_with_frags = 1;
4504 } else if (asoc->saw_sack_with_frags) {
4505 int cnt_revoked = 0;
4507 /* Peer revoked all dg's marked or acked */
4508 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4509 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4510 tp1->sent = SCTP_DATAGRAM_SENT;
4511 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4512 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4513 tp1->whoTo->flight_size,
4515 (uintptr_t) tp1->whoTo,
4516 tp1->rec.data.TSN_seq);
4518 sctp_flight_size_increase(tp1);
4519 sctp_total_flight_increase(stcb, tp1);
4520 tp1->rec.data.chunk_was_revoked = 1;
4522 * To ensure that this increase in
4523 * flightsize, which is artificial, does not
4524 * throttle the sender, we also increase the
4525 * cwnd artificially.
4527 tp1->whoTo->cwnd += tp1->book_size;
4534 asoc->saw_sack_with_frags = 0;
4537 asoc->saw_sack_with_nr_frags = 1;
4539 asoc->saw_sack_with_nr_frags = 0;
4541 /* JRS - Use the congestion control given in the CC module */
4542 if (ecne_seen == 0) {
4543 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4544 if (net->net_ack2 > 0) {
4546 * Karn's rule applies to clearing error
4547 * count, this is optional.
4549 net->error_count = 0;
4550 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4551 /* addr came good */
4552 net->dest_state |= SCTP_ADDR_REACHABLE;
4553 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4554 0, (void *)net, SCTP_SO_NOT_LOCKED);
4556 if (net == stcb->asoc.primary_destination) {
4557 if (stcb->asoc.alternate) {
4559 * release the alternate,
4562 sctp_free_remote_addr(stcb->asoc.alternate);
4563 stcb->asoc.alternate = NULL;
4566 if (net->dest_state & SCTP_ADDR_PF) {
4567 net->dest_state &= ~SCTP_ADDR_PF;
4568 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4569 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4570 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4571 /* Done with this net */
4574 /* restore any doubled timers */
4575 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4576 if (net->RTO < stcb->asoc.minrto) {
4577 net->RTO = stcb->asoc.minrto;
4579 if (net->RTO > stcb->asoc.maxrto) {
4580 net->RTO = stcb->asoc.maxrto;
4584 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4586 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4587 /* nothing left in-flight */
4588 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4589 /* stop all timers */
4590 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4591 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4592 net->flight_size = 0;
4593 net->partial_bytes_acked = 0;
4595 asoc->total_flight = 0;
4596 asoc->total_flight_count = 0;
4598 /**********************************/
4599 /* Now what about shutdown issues */
4600 /**********************************/
4601 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4602 /* nothing left on sendqueue.. consider done */
4603 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4604 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4605 asoc->peers_rwnd, 0, 0, a_rwnd);
4607 asoc->peers_rwnd = a_rwnd;
4608 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4609 /* SWS sender side engages */
4610 asoc->peers_rwnd = 0;
4613 if ((asoc->stream_queue_cnt == 1) &&
4614 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4615 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4616 (asoc->locked_on_sending)
4618 struct sctp_stream_queue_pending *sp;
4621 * I may be in a state where we got all across.. but
4622 * cannot write more due to a shutdown... we abort
4623 * since the user did not indicate EOR in this case.
4625 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4627 if ((sp) && (sp->length == 0)) {
4628 asoc->locked_on_sending = NULL;
4629 if (sp->msg_is_complete) {
4630 asoc->stream_queue_cnt--;
4632 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4633 asoc->stream_queue_cnt--;
4637 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4638 (asoc->stream_queue_cnt == 0)) {
4639 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4640 /* Need to abort here */
4641 struct mbuf *op_err;
4646 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4647 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4648 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4651 struct sctp_nets *netp;
4653 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4654 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4655 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4657 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4658 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4659 sctp_stop_timers_for_shutdown(stcb);
4660 if (asoc->alternate) {
4661 netp = asoc->alternate;
4663 netp = asoc->primary_destination;
4665 sctp_send_shutdown(stcb, netp);
4666 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4667 stcb->sctp_ep, stcb, netp);
4668 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4669 stcb->sctp_ep, stcb, netp);
4672 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4673 (asoc->stream_queue_cnt == 0)) {
4674 struct sctp_nets *netp;
4676 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4679 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4680 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4681 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4682 sctp_stop_timers_for_shutdown(stcb);
4683 if (asoc->alternate) {
4684 netp = asoc->alternate;
4686 netp = asoc->primary_destination;
4688 sctp_send_shutdown_ack(stcb, netp);
4689 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4690 stcb->sctp_ep, stcb, netp);
4695 * Now here we are going to recycle net_ack for a different use...
4698 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4703 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4704 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4705 * automatically ensure that.
4707 if ((asoc->sctp_cmt_on_off > 0) &&
4708 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4709 (cmt_dac_flag == 0)) {
4710 this_sack_lowest_newack = cum_ack;
4712 if ((num_seg > 0) || (num_nr_seg > 0)) {
4713 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4714 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4716 /* JRS - Use the congestion control given in the CC module */
4717 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4719 /* Now are we exiting loss recovery ? */
4720 if (will_exit_fast_recovery) {
4721 /* Ok, we must exit fast recovery */
4722 asoc->fast_retran_loss_recovery = 0;
4724 if ((asoc->sat_t3_loss_recovery) &&
4725 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4726 /* end satellite t3 loss recovery */
4727 asoc->sat_t3_loss_recovery = 0;
4732 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4733 if (net->will_exit_fast_recovery) {
4734 /* Ok, we must exit fast recovery */
4735 net->fast_retran_loss_recovery = 0;
4739 /* Adjust and set the new rwnd value */
4740 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4741 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4742 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4744 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4745 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4746 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4747 /* SWS sender side engages */
4748 asoc->peers_rwnd = 0;
4750 if (asoc->peers_rwnd > old_rwnd) {
4751 win_probe_recovery = 1;
4754 * Now we must setup so we have a timer up for anyone with
4760 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4761 if (win_probe_recovery && (net->window_probe)) {
4762 win_probe_recovered = 1;
4764 * Find first chunk that was used with
4765 * window probe and clear the event. Put
4766 * it back into the send queue as if has
4769 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4770 if (tp1->window_probe) {
4771 sctp_window_probe_recovery(stcb, asoc, tp1);
4776 if (net->flight_size) {
4778 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4779 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4780 stcb->sctp_ep, stcb, net);
4782 if (net->window_probe) {
4783 net->window_probe = 0;
4786 if (net->window_probe) {
4788 * In window probes we must assure a timer
4789 * is still running there
4791 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4792 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4793 stcb->sctp_ep, stcb, net);
4796 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4797 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4799 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4804 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4805 (asoc->sent_queue_retran_cnt == 0) &&
4806 (win_probe_recovered == 0) &&
4809 * huh, this should not happen unless all packets are
4810 * PR-SCTP and marked to skip of course.
4812 if (sctp_fs_audit(asoc)) {
4813 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4814 net->flight_size = 0;
4816 asoc->total_flight = 0;
4817 asoc->total_flight_count = 0;
4818 asoc->sent_queue_retran_cnt = 0;
4819 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4820 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4821 sctp_flight_size_increase(tp1);
4822 sctp_total_flight_increase(stcb, tp1);
4823 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4824 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4831 /*********************************************/
4832 /* Here we perform PR-SCTP procedures */
4834 /*********************************************/
4835 /* C1. update advancedPeerAckPoint */
4836 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
4837 asoc->advanced_peer_ack_point = cum_ack;
4839 /* C2. try to further move advancedPeerAckPoint ahead */
4840 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4841 struct sctp_tmit_chunk *lchk;
4842 uint32_t old_adv_peer_ack_point;
4844 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4845 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4846 /* C3. See if we need to send a Fwd-TSN */
4847 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
4849 * ISSUE with ECN, see FWD-TSN processing.
4851 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
4852 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
4853 0xee, cum_ack, asoc->advanced_peer_ack_point,
4854 old_adv_peer_ack_point);
4856 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4857 send_forward_tsn(stcb, asoc);
4859 /* try to FR fwd-tsn's that get lost too */
4860 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4861 send_forward_tsn(stcb, asoc);
4866 /* Assure a timer is up */
4867 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4868 stcb->sctp_ep, stcb, lchk->whoTo);
4871 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4872 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4874 stcb->asoc.peers_rwnd,
4875 stcb->asoc.total_flight,
4876 stcb->asoc.total_output_queue_size);
4881 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
4884 uint32_t cum_ack, a_rwnd;
4886 cum_ack = ntohl(cp->cumulative_tsn_ack);
4887 /* Arrange so a_rwnd does NOT change */
4888 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
4890 /* Now call the express sack handling */
4891 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
4895 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
4896 struct sctp_stream_in *strmin)
4898 struct sctp_queued_to_read *ctl, *nctl;
4899 struct sctp_association *asoc;
4903 tt = strmin->last_sequence_delivered;
4905 * First deliver anything prior to and including the stream no that
4908 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
4909 if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
4910 /* this is deliverable now */
4911 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
4912 /* subtract pending on streams */
4913 asoc->size_on_all_streams -= ctl->length;
4914 sctp_ucount_decr(asoc->cnt_on_all_streams);
4915 /* deliver it to at least the delivery-q */
4916 if (stcb->sctp_socket) {
4917 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
4918 sctp_add_to_readq(stcb->sctp_ep, stcb,
4920 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
4923 /* no more delivery now. */
4928 * now we must deliver things in queue the normal way if any are
4931 tt = strmin->last_sequence_delivered + 1;
4932 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
4933 if (tt == ctl->sinfo_ssn) {
4934 /* this is deliverable now */
4935 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
4936 /* subtract pending on streams */
4937 asoc->size_on_all_streams -= ctl->length;
4938 sctp_ucount_decr(asoc->cnt_on_all_streams);
4939 /* deliver it to at least the delivery-q */
4940 strmin->last_sequence_delivered = ctl->sinfo_ssn;
4941 if (stcb->sctp_socket) {
4942 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
4943 sctp_add_to_readq(stcb->sctp_ep, stcb,
4945 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
4948 tt = strmin->last_sequence_delivered + 1;
4956 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
4957 struct sctp_association *asoc,
4958 uint16_t stream, uint16_t seq)
4960 struct sctp_tmit_chunk *chk, *nchk;
4962 /* For each one on here see if we need to toss it */
4964 * For now large messages held on the reasmqueue that are complete
4965 * will be tossed too. We could in theory do more work to spin
4966 * through and stop after dumping one msg aka seeing the start of a
4967 * new msg at the head, and call the delivery function... to see if
4968 * it can be delivered... But for now we just dump everything on the
4971 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
4973 * Do not toss it if on a different stream or marked for
4974 * unordered delivery in which case the stream sequence
4975 * number has no meaning.
4977 if ((chk->rec.data.stream_number != stream) ||
4978 ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
4981 if (chk->rec.data.stream_seq == seq) {
4982 /* It needs to be tossed */
4983 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
4984 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
4985 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
4986 asoc->str_of_pdapi = chk->rec.data.stream_number;
4987 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
4988 asoc->fragment_flags = chk->rec.data.rcv_flags;
4990 asoc->size_on_reasm_queue -= chk->send_size;
4991 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
4993 /* Clear up any stream problem */
4994 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
4995 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
4997 * We must dump forward this streams
4998 * sequence number if the chunk is not
4999 * unordered that is being skipped. There is
5000 * a chance that if the peer does not
5001 * include the last fragment in its FWD-TSN
5002 * we WILL have a problem here since you
5003 * would have a partial chunk in queue that
5004 * may not be deliverable. Also if a Partial
5005 * delivery API as started the user may get
5006 * a partial chunk. The next read returning
5007 * a new chunk... really ugly but I see no
5008 * way around it! Maybe a notify??
5010 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5013 sctp_m_freem(chk->data);
5016 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5017 } else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5019 * If the stream_seq is > than the purging one, we
5029 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5030 struct sctp_forward_tsn_chunk *fwd,
5031 int *abort_flag, struct mbuf *m, int offset)
5033 /* The pr-sctp fwd tsn */
5035 * here we will perform all the data receiver side steps for
5036 * processing FwdTSN, as required in by pr-sctp draft:
5038 * Assume we get FwdTSN(x):
5040 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5041 * others we have 3) examine and update re-ordering queue on
5042 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5043 * report where we are.
5045 struct sctp_association *asoc;
5046 uint32_t new_cum_tsn, gap;
5047 unsigned int i, fwd_sz, m_size;
5049 struct sctp_stream_in *strm;
5050 struct sctp_tmit_chunk *chk, *nchk;
5051 struct sctp_queued_to_read *ctl, *sv;
5054 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5055 SCTPDBG(SCTP_DEBUG_INDATA1,
5056 "Bad size too small/big fwd-tsn\n");
5059 m_size = (stcb->asoc.mapping_array_size << 3);
5060 /*************************************************************/
5061 /* 1. Here we update local cumTSN and shift the bitmap array */
5062 /*************************************************************/
5063 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5065 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5066 /* Already got there ... */
5070 * now we know the new TSN is more advanced, let's find the actual
5073 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5074 asoc->cumulative_tsn = new_cum_tsn;
5075 if (gap >= m_size) {
5076 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5077 struct mbuf *op_err;
5078 char msg[SCTP_DIAG_INFO_LEN];
5081 * out of range (of single byte chunks in the rwnd I
5082 * give out). This must be an attacker.
5085 snprintf(msg, sizeof(msg),
5086 "New cum ack %8.8x too high, highest TSN %8.8x",
5087 new_cum_tsn, asoc->highest_tsn_inside_map);
5088 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5089 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5090 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5093 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5095 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5096 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5097 asoc->highest_tsn_inside_map = new_cum_tsn;
5099 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5100 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5102 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5103 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5106 SCTP_TCB_LOCK_ASSERT(stcb);
5107 for (i = 0; i <= gap; i++) {
5108 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5109 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5110 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5111 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5112 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5117 /*************************************************************/
5118 /* 2. Clear up re-assembly queue */
5119 /*************************************************************/
5121 * First service it if pd-api is up, just in case we can progress it
5124 if (asoc->fragmented_delivery_inprogress) {
5125 sctp_service_reassembly(stcb, asoc);
5127 /* For each one on here see if we need to toss it */
5129 * For now large messages held on the reasmqueue that are complete
5130 * will be tossed too. We could in theory do more work to spin
5131 * through and stop after dumping one msg aka seeing the start of a
5132 * new msg at the head, and call the delivery function... to see if
5133 * it can be delivered... But for now we just dump everything on the
5136 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5137 if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5138 /* It needs to be tossed */
5139 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5140 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5141 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5142 asoc->str_of_pdapi = chk->rec.data.stream_number;
5143 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5144 asoc->fragment_flags = chk->rec.data.rcv_flags;
5146 asoc->size_on_reasm_queue -= chk->send_size;
5147 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5149 /* Clear up any stream problem */
5150 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5151 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5153 * We must dump forward this streams
5154 * sequence number if the chunk is not
5155 * unordered that is being skipped. There is
5156 * a chance that if the peer does not
5157 * include the last fragment in its FWD-TSN
5158 * we WILL have a problem here since you
5159 * would have a partial chunk in queue that
5160 * may not be deliverable. Also if a Partial
5161 * delivery API as started the user may get
5162 * a partial chunk. The next read returning
5163 * a new chunk... really ugly but I see no
5164 * way around it! Maybe a notify??
5166 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5169 sctp_m_freem(chk->data);
5172 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5175 * Ok we have gone beyond the end of the fwd-tsn's
5181 /*******************************************************/
5182 /* 3. Update the PR-stream re-ordering queues and fix */
5183 /* delivery issues as needed. */
5184 /*******************************************************/
5185 fwd_sz -= sizeof(*fwd);
5188 unsigned int num_str;
5189 struct sctp_strseq *stseq, strseqbuf;
5191 offset += sizeof(*fwd);
5193 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5194 num_str = fwd_sz / sizeof(struct sctp_strseq);
5195 for (i = 0; i < num_str; i++) {
5198 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5199 sizeof(struct sctp_strseq),
5200 (uint8_t *) & strseqbuf);
5201 offset += sizeof(struct sctp_strseq);
5202 if (stseq == NULL) {
5206 st = ntohs(stseq->stream);
5208 st = ntohs(stseq->sequence);
5209 stseq->sequence = st;
5214 * Ok we now look for the stream/seq on the read
5215 * queue where its not all delivered. If we find it
5216 * we transmute the read entry into a PDI_ABORTED.
5218 if (stseq->stream >= asoc->streamincnt) {
5219 /* screwed up streams, stop! */
5222 if ((asoc->str_of_pdapi == stseq->stream) &&
5223 (asoc->ssn_of_pdapi == stseq->sequence)) {
5225 * If this is the one we were partially
5226 * delivering now then we no longer are.
5227 * Note this will change with the reassembly
5230 asoc->fragmented_delivery_inprogress = 0;
5232 sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5233 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5234 if ((ctl->sinfo_stream == stseq->stream) &&
5235 (ctl->sinfo_ssn == stseq->sequence)) {
5236 str_seq = (stseq->stream << 16) | stseq->sequence;
5238 ctl->pdapi_aborted = 1;
5239 sv = stcb->asoc.control_pdapi;
5240 stcb->asoc.control_pdapi = ctl;
5241 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5243 SCTP_PARTIAL_DELIVERY_ABORTED,
5245 SCTP_SO_NOT_LOCKED);
5246 stcb->asoc.control_pdapi = sv;
5248 } else if ((ctl->sinfo_stream == stseq->stream) &&
5249 SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5250 /* We are past our victim SSN */
5254 strm = &asoc->strmin[stseq->stream];
5255 if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5256 /* Update the sequence number */
5257 strm->last_sequence_delivered = stseq->sequence;
5259 /* now kick the stream the new way */
5260 /* sa_ignore NO_NULL_CHK */
5261 sctp_kick_prsctp_reorder_queue(stcb, strm);
5263 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5266 * Now slide thing forward.
5268 sctp_slide_mapping_arrays(stcb);
5270 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5271 /* now lets kick out and check for more fragmented delivery */
5272 /* sa_ignore NO_NULL_CHK */
5273 sctp_deliver_reasm_check(stcb, &stcb->asoc);