2 * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * $DragonFly: src/sys/netinet/tcp_sack.c,v 1.8 2008/08/15 21:37:16 nth Exp $
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/queue.h>
41 #include <sys/thread.h>
42 #include <sys/types.h>
43 #include <sys/socket.h>
44 #include <sys/socketvar.h>
48 #include <netinet/in.h>
49 #include <netinet/in_systm.h>
50 #include <netinet/ip.h>
51 #include <netinet/in_var.h>
52 #include <netinet/in_pcb.h>
53 #include <netinet/ip_var.h>
54 #include <netinet/tcp.h>
55 #include <netinet/tcp_seq.h>
56 #include <netinet/tcp_var.h>
69 TAILQ_ENTRY(sackblock) sblk_list;
72 #define MAXSAVEDBLOCKS 8 /* per connection limit */
74 static int insert_block(struct scoreboard *scb,
75 const struct raw_sackblock *raw_sb, boolean_t *update);
77 static MALLOC_DEFINE(M_SACKBLOCK, "sblk", "sackblock struct");
80 * Per-tcpcb initialization.
83 tcp_sack_tcpcb_init(struct tcpcb *tp)
85 struct scoreboard *scb = &tp->scb;
88 TAILQ_INIT(&scb->sackblocks);
89 scb->lastfound = NULL;
93 * Find the SACK block containing or immediately preceding "seq".
94 * The boolean result indicates whether the sequence is actually
95 * contained in the SACK block.
98 sack_block_lookup(struct scoreboard *scb, tcp_seq seq, struct sackblock **sb)
100 struct sackblock *hint = scb->lastfound;
101 struct sackblock *cur, *last, *prev;
103 if (TAILQ_EMPTY(&scb->sackblocks)) {
109 /* No hint. Search from start to end. */
110 cur = TAILQ_FIRST(&scb->sackblocks);
112 prev = TAILQ_LAST(&scb->sackblocks, sackblock_list);
114 if (SEQ_GEQ(seq, hint->sblk_start)) {
115 /* Search from hint to end of list. */
118 prev = TAILQ_LAST(&scb->sackblocks, sackblock_list);
120 /* Search from front of list to hint. */
121 cur = TAILQ_FIRST(&scb->sackblocks);
123 prev = TAILQ_PREV(hint, sackblock_list, sblk_list);
128 if (SEQ_GT(cur->sblk_end, seq)) {
129 if (SEQ_GEQ(seq, cur->sblk_start)) {
130 *sb = scb->lastfound = cur;
133 *sb = scb->lastfound =
134 TAILQ_PREV(cur, sackblock_list, sblk_list);
138 cur = TAILQ_NEXT(cur, sblk_list);
139 } while (cur != last);
141 *sb = scb->lastfound = prev;
146 * Allocate a SACK block.
148 static __inline struct sackblock *
149 alloc_sackblock(struct scoreboard *scb, const struct raw_sackblock *raw_sb)
151 struct sackblock *sb;
153 if (scb->freecache != NULL) {
155 scb->freecache = NULL;
156 tcpstat.tcps_sacksbfast++;
158 sb = kmalloc(sizeof(struct sackblock), M_SACKBLOCK, M_NOWAIT);
160 tcpstat.tcps_sacksbfailed++;
164 sb->sblk_start = raw_sb->rblk_start;
165 sb->sblk_end = raw_sb->rblk_end;
169 static __inline struct sackblock *
170 alloc_sackblock_limit(struct scoreboard *scb,
171 const struct raw_sackblock *raw_sb)
173 if (scb->nblocks == MAXSAVEDBLOCKS) {
175 * Should try to kick out older blocks XXX JH
176 * May be able to coalesce with existing block.
177 * Or, go other way and free all blocks if we hit
180 tcpstat.tcps_sacksboverflow++;
183 return alloc_sackblock(scb, raw_sb);
190 free_sackblock(struct scoreboard *scb, struct sackblock *s)
192 if (scb->freecache == NULL) {
193 /* YYY Maybe use the latest freed block? */
197 kfree(s, M_SACKBLOCK);
201 * Free up SACK blocks for data that's been acked.
204 tcp_sack_ack_blocks(struct scoreboard *scb, tcp_seq th_ack)
206 struct sackblock *sb, *nb;
208 sb = TAILQ_FIRST(&scb->sackblocks);
209 while (sb && SEQ_LEQ(sb->sblk_end, th_ack)) {
210 nb = TAILQ_NEXT(sb, sblk_list);
211 if (scb->lastfound == sb)
212 scb->lastfound = NULL;
213 TAILQ_REMOVE(&scb->sackblocks, sb, sblk_list);
214 free_sackblock(scb, sb);
216 KASSERT(scb->nblocks >= 0,
217 ("SACK block count underflow: %d < 0", scb->nblocks));
220 if (sb && SEQ_GT(th_ack, sb->sblk_start))
221 sb->sblk_start = th_ack; /* other side reneged? XXX */
225 * Delete and free SACK blocks saved in scoreboard.
228 tcp_sack_cleanup(struct scoreboard *scb)
230 struct sackblock *sb, *nb;
232 TAILQ_FOREACH_MUTABLE(sb, &scb->sackblocks, sblk_list, nb) {
233 free_sackblock(scb, sb);
236 KASSERT(scb->nblocks == 0,
237 ("SACK block %d count not zero", scb->nblocks));
238 TAILQ_INIT(&scb->sackblocks);
239 scb->lastfound = NULL;
243 * Delete and free SACK blocks saved in scoreboard.
244 * Delete the one slot block cache.
247 tcp_sack_destroy(struct scoreboard *scb)
249 tcp_sack_cleanup(scb);
250 if (scb->freecache != NULL) {
251 kfree(scb->freecache, M_SACKBLOCK);
252 scb->freecache = NULL;
257 * Cleanup the reported SACK block information
260 tcp_sack_report_cleanup(struct tcpcb *tp)
263 ~(TSACK_F_DUPSEG | TSACK_F_ENCLOSESEG | TSACK_F_SACKLEFT);
264 tp->reportblk.rblk_start = tp->reportblk.rblk_end;
268 * Returns 0 if not D-SACK block,
270 * 2 if duplicate of out-of-order D-SACK block.
273 tcp_sack_ndsack_blocks(struct raw_sackblock *blocks, const int numblocks,
279 if (SEQ_LT(blocks[0].rblk_start, snd_una))
282 /* block 0 inside block 1 */
284 SEQ_GEQ(blocks[0].rblk_start, blocks[1].rblk_start) &&
285 SEQ_LEQ(blocks[0].rblk_end, blocks[1].rblk_end))
292 * Update scoreboard on new incoming ACK.
295 tcp_sack_add_blocks(struct tcpcb *tp, struct tcpopt *to)
297 const int numblocks = to->to_nsackblocks;
298 struct raw_sackblock *blocks = to->to_sackblocks;
299 struct scoreboard *scb = &tp->scb;
302 if (tcp_sack_ndsack_blocks(blocks, numblocks, tp->snd_una) > 0)
307 to->to_flags |= TOF_SACK_REDUNDANT;
308 for (i = startblock; i < numblocks; i++) {
309 struct raw_sackblock *newsackblock = &blocks[i];
313 /* Guard against ACK reordering */
314 if (SEQ_LT(newsackblock->rblk_start, tp->snd_una))
317 /* Don't accept bad SACK blocks */
318 if (SEQ_GT(newsackblock->rblk_end, tp->snd_max)) {
319 tcpstat.tcps_rcvbadsackopt++;
320 break; /* skip all other blocks */
322 tcpstat.tcps_sacksbupdate++;
324 error = insert_block(scb, newsackblock, &update);
326 to->to_flags &= ~TOF_SACK_REDUNDANT;
333 tcp_sack_update_scoreboard(struct tcpcb *tp, struct tcpopt *to)
335 struct scoreboard *scb = &tp->scb;
336 int rexmt_high_update = 0;
338 tcp_sack_ack_blocks(scb, tp->snd_una);
339 tcp_sack_add_blocks(tp, to);
340 tcp_sack_update_lostseq(scb, tp->snd_una, tp->t_maxseg,
342 if (SEQ_LT(tp->rexmt_high, tp->snd_una)) {
343 tp->rexmt_high = tp->snd_una;
344 rexmt_high_update = 1;
346 if (tp->sack_flags & TSACK_F_SACKRESCUED) {
347 if (SEQ_LT(tp->rexmt_rescue, tp->snd_una)) {
348 tp->sack_flags &= ~TSACK_F_SACKRESCUED;
349 } else if (tcp_aggressive_rescuesack && rexmt_high_update &&
350 SEQ_LT(tp->rexmt_rescue, tp->rexmt_high)) {
351 /* Drag RescueRxt along with HighRxt */
352 tp->rexmt_rescue = tp->rexmt_high;
358 * Insert SACK block into sender's scoreboard.
361 insert_block(struct scoreboard *scb, const struct raw_sackblock *raw_sb,
364 struct sackblock *sb, *workingblock;
365 boolean_t overlap_front;
368 if (TAILQ_EMPTY(&scb->sackblocks)) {
369 struct sackblock *newblock;
371 KASSERT(scb->nblocks == 0, ("emply scb w/ blocks"));
373 newblock = alloc_sackblock(scb, raw_sb);
374 if (newblock == NULL)
376 TAILQ_INSERT_HEAD(&scb->sackblocks, newblock, sblk_list);
381 KASSERT(scb->nblocks > 0, ("insert_block() called w/ no blocks"));
382 KASSERT(scb->nblocks <= MAXSAVEDBLOCKS,
383 ("too many SACK blocks %d", scb->nblocks));
385 overlap_front = sack_block_lookup(scb, raw_sb->rblk_start, &sb);
388 workingblock = alloc_sackblock_limit(scb, raw_sb);
389 if (workingblock == NULL)
391 TAILQ_INSERT_HEAD(&scb->sackblocks, workingblock, sblk_list);
394 if (overlap_front || sb->sblk_end == raw_sb->rblk_start) {
395 tcpstat.tcps_sacksbreused++;
397 /* Extend old block */
399 if (SEQ_GT(raw_sb->rblk_end, sb->sblk_end)) {
400 sb->sblk_end = raw_sb->rblk_end;
402 /* Exact match, nothing to consolidate */
407 workingblock = alloc_sackblock_limit(scb, raw_sb);
408 if (workingblock == NULL)
410 TAILQ_INSERT_AFTER(&scb->sackblocks, sb, workingblock,
416 /* Consolidate right-hand side. */
417 sb = TAILQ_NEXT(workingblock, sblk_list);
419 SEQ_GEQ(workingblock->sblk_end, sb->sblk_end)) {
420 struct sackblock *nextblock;
422 nextblock = TAILQ_NEXT(sb, sblk_list);
423 if (scb->lastfound == sb)
424 scb->lastfound = NULL;
425 /* Remove completely overlapped block */
426 TAILQ_REMOVE(&scb->sackblocks, sb, sblk_list);
427 free_sackblock(scb, sb);
429 KASSERT(scb->nblocks > 0,
430 ("removed overlapped block: %d blocks left", scb->nblocks));
434 SEQ_GEQ(workingblock->sblk_end, sb->sblk_start)) {
435 /* Extend new block to cover partially overlapped old block. */
436 workingblock->sblk_end = sb->sblk_end;
437 if (scb->lastfound == sb)
438 scb->lastfound = NULL;
439 TAILQ_REMOVE(&scb->sackblocks, sb, sblk_list);
440 free_sackblock(scb, sb);
442 KASSERT(scb->nblocks > 0,
443 ("removed partial right: %d blocks left", scb->nblocks));
448 #ifdef DEBUG_SACK_BLOCKS
450 tcp_sack_dump_blocks(struct scoreboard *scb)
452 struct sackblock *sb;
454 kprintf("%d blocks:", scb->nblocks);
455 TAILQ_FOREACH(sb, &scb->sackblocks, sblk_list)
456 kprintf(" [%u, %u)", sb->sblk_start, sb->sblk_end);
461 tcp_sack_dump_blocks(struct scoreboard *scb)
467 * Optimization to quickly determine which packets are lost.
470 tcp_sack_update_lostseq(struct scoreboard *scb, tcp_seq snd_una, u_int maxseg,
473 struct sackblock *sb;
475 int bytes_sacked = 0;
480 * The RFC3517bis recommends to reduce the byte threshold.
481 * However, it will cause extra spurious retransmit if
482 * segments are reordered. Before certain DupThresh adaptive
483 * algorithm is implemented, we don't reduce the byte
484 * threshold (tcp_rfc3517bis_rxt is off by default).
486 if (tcp_do_rfc3517bis && tcp_rfc3517bis_rxt)
487 rxtthresh_bytes = (rxtthresh - 1) * maxseg;
489 rxtthresh_bytes = rxtthresh * maxseg;
491 sb = TAILQ_LAST(&scb->sackblocks, sackblock_list);
494 bytes_sacked += sb->sblk_end - sb->sblk_start;
495 if (nsackblocks == rxtthresh ||
496 bytes_sacked >= rxtthresh_bytes) {
497 scb->lostseq = sb->sblk_start;
500 sb = TAILQ_PREV(sb, sackblock_list, sblk_list);
502 scb->lostseq = snd_una;
506 * Return whether the given sequence number is considered lost.
509 tcp_sack_islost(struct scoreboard *scb, tcp_seq seqnum)
511 return SEQ_LT(seqnum, scb->lostseq);
515 * True if at least "amount" has been SACKed. Used by Early Retransmit.
518 tcp_sack_has_sacked(struct scoreboard *scb, u_int amount)
520 struct sackblock *sb;
521 int bytes_sacked = 0;
523 TAILQ_FOREACH(sb, &scb->sackblocks, sblk_list) {
524 bytes_sacked += sb->sblk_end - sb->sblk_start;
525 if (bytes_sacked >= amount)
532 * Number of bytes SACKed below seq.
535 tcp_sack_bytes_below(struct scoreboard *scb, tcp_seq seq)
537 struct sackblock *sb;
538 int bytes_sacked = 0;
540 sb = TAILQ_FIRST(&scb->sackblocks);
541 while (sb && SEQ_GT(seq, sb->sblk_start)) {
542 bytes_sacked += seq_min(seq, sb->sblk_end) - sb->sblk_start;
543 sb = TAILQ_NEXT(sb, sblk_list);
549 * Return estimate of the number of bytes outstanding in the network.
552 tcp_sack_compute_pipe(struct tcpcb *tp)
554 struct scoreboard *scb = &tp->scb;
555 struct sackblock *sb;
556 int nlost, nretransmitted;
559 nlost = tp->snd_max - scb->lostseq;
560 nretransmitted = tp->rexmt_high - tp->snd_una;
562 TAILQ_FOREACH(sb, &scb->sackblocks, sblk_list) {
563 if (SEQ_LT(sb->sblk_start, tp->rexmt_high)) {
564 end = seq_min(sb->sblk_end, tp->rexmt_high);
565 nretransmitted -= end - sb->sblk_start;
567 if (SEQ_GEQ(sb->sblk_start, scb->lostseq))
568 nlost -= sb->sblk_end - sb->sblk_start;
571 return (nlost + nretransmitted);
575 * Return the sequence number and length of the next segment to transmit
576 * when in Fast Recovery.
579 tcp_sack_nextseg(struct tcpcb *tp, tcp_seq *nextrexmt, uint32_t *plen,
582 struct scoreboard *scb = &tp->scb;
583 struct socket *so = tp->t_inpcb->inp_socket;
584 struct sackblock *sb;
585 const struct sackblock *lastblock =
586 TAILQ_LAST(&scb->sackblocks, sackblock_list);
590 /* skip SACKed data */
591 tcp_sack_skip_sacked(scb, &tp->rexmt_high);
593 /* Look for lost data. */
594 torexmt = tp->rexmt_high;
596 if (lastblock != NULL) {
597 if (SEQ_LT(torexmt, lastblock->sblk_end) &&
598 tcp_sack_islost(scb, torexmt)) {
600 *nextrexmt = torexmt;
601 /* If the left-hand edge has been SACKed, pull it in. */
602 if (sack_block_lookup(scb, torexmt + tp->t_maxseg, &sb))
603 *plen = sb->sblk_start - torexmt;
605 *plen = tp->t_maxseg;
610 /* See if unsent data available within send window. */
611 off = tp->snd_max - tp->snd_una;
612 len = (long) ulmin(so->so_snd.ssb_cc, tp->snd_wnd) - off;
614 *nextrexmt = tp->snd_max; /* Send new data. */
615 *plen = tp->t_maxseg;
619 /* We're less certain this data has been lost. */
620 if (lastblock != NULL && SEQ_LT(torexmt, lastblock->sblk_end))
623 /* Rescue retransmission */
624 if (tcp_do_rescuesack || tcp_do_rfc3517bis) {
625 tcpstat.tcps_sackrescue_try++;
626 if (tp->sack_flags & TSACK_F_SACKRESCUED) {
627 if (!tcp_aggressive_rescuesack)
631 * Aggressive variant of the rescue retransmission.
633 * The idea of the rescue retransmission is to sustain
634 * the ACK clock thus to avoid timeout retransmission.
636 * Under some situations, the conservative approach
637 * suggested in the draft
638 * http://tools.ietf.org/html/
639 * draft-nishida-tcpm-rescue-retransmission-00
640 * could not sustain ACK clock, since it only allows
641 * one rescue retransmission before a cumulative ACK
642 * covers the segement transmitted by rescue
645 * We try to locate the next unSACKed segment which
646 * follows the previously sent rescue segment. If
647 * there is no such segment, we loop back to the first
648 * unacknowledged segment.
652 * Skip SACKed data, but here we follow
653 * the last transmitted rescue segment.
655 torexmt = tp->rexmt_rescue;
656 tcp_sack_skip_sacked(scb, &torexmt);
658 if (torexmt == tp->snd_max) {
659 /* Nothing left to retransmit; restart */
660 torexmt = tp->snd_una;
664 } else if (tcp_do_smartsack && lastblock == NULL) {
665 tcpstat.tcps_sackrescue_try++;
674 * Return the next sequence number higher than "*prexmt" that has
678 tcp_sack_skip_sacked(struct scoreboard *scb, tcp_seq *prexmt)
680 struct sackblock *sb;
682 /* skip SACKed data */
683 if (sack_block_lookup(scb, *prexmt, &sb))
684 *prexmt = sb->sblk_end;
689 tcp_sack_save_scoreboard(struct scoreboard *scb)
691 struct scoreboard *scb = &tp->scb;
693 scb->sackblocks_prev = scb->sackblocks;
694 TAILQ_INIT(&scb->sackblocks);
698 tcp_sack_revert_scoreboard(struct scoreboard *scb, tcp_seq snd_una,
701 struct sackblock *sb;
703 scb->sackblocks = scb->sackblocks_prev;
705 TAILQ_FOREACH(sb, &scb->sackblocks, sblk_list)
707 tcp_sack_ack_blocks(scb, snd_una);
708 scb->lastfound = NULL;
712 #ifdef DEBUG_SACK_HISTORY
714 tcp_sack_dump_history(char *msg, struct tcpcb *tp)
719 /* only need a couple of these to debug most problems */
723 kprintf("%s:\tnsackhistory %d: ", msg, tp->nsackhistory);
724 for (i = 0; i < tp->nsackhistory; ++i)
725 kprintf("[%u, %u) ", tp->sackhistory[i].rblk_start,
726 tp->sackhistory[i].rblk_end);
731 tcp_sack_dump_history(char *msg, struct tcpcb *tp)
737 * Remove old SACK blocks from the SACK history that have already been ACKed.
740 tcp_sack_ack_history(struct tcpcb *tp)
742 int i, nblocks, openslot;
744 tcp_sack_dump_history("before tcp_sack_ack_history", tp);
745 nblocks = tp->nsackhistory;
746 for (i = openslot = 0; i < nblocks; ++i) {
747 if (SEQ_LEQ(tp->sackhistory[i].rblk_end, tp->rcv_nxt)) {
751 if (SEQ_LT(tp->sackhistory[i].rblk_start, tp->rcv_nxt))
752 tp->sackhistory[i].rblk_start = tp->rcv_nxt;
756 tp->sackhistory[openslot++] = tp->sackhistory[i];
758 tcp_sack_dump_history("after tcp_sack_ack_history", tp);
759 KASSERT(openslot == tp->nsackhistory,
760 ("tcp_sack_ack_history miscounted: %d != %d",
761 openslot, tp->nsackhistory));
765 * Add or merge newblock into reported history.
766 * Also remove or update SACK blocks that will be acked.
769 tcp_sack_update_reported_history(struct tcpcb *tp, tcp_seq start, tcp_seq end)
771 struct raw_sackblock copy[MAX_SACK_REPORT_BLOCKS];
774 tcp_sack_dump_history("before tcp_sack_update_reported_history", tp);
778 * 1) newblock == oldblock
779 * 2) oldblock contains newblock
780 * 3) newblock contains oldblock
781 * 4) tail of oldblock overlaps or abuts start of newblock
782 * 5) tail of newblock overlaps or abuts head of oldblock
784 for (i = cindex = 0; i < tp->nsackhistory; ++i) {
785 struct raw_sackblock *oldblock = &tp->sackhistory[i];
786 tcp_seq old_start = oldblock->rblk_start;
787 tcp_seq old_end = oldblock->rblk_end;
789 if (SEQ_LT(end, old_start) || SEQ_GT(start, old_end)) {
790 /* Case 0: no overlap. Copy old block. */
791 copy[cindex++] = *oldblock;
795 if (SEQ_GEQ(start, old_start) && SEQ_LEQ(end, old_end)) {
796 /* Cases 1 & 2. Move block to front of history. */
801 /* no need to check rest of blocks */
802 for (j = i + 1; j < tp->nsackhistory; ++j)
803 copy[cindex++] = tp->sackhistory[j];
807 if (SEQ_GEQ(old_end, start) && SEQ_LT(old_start, start)) {
808 /* Case 4: extend start of new block. */
810 } else if (SEQ_GEQ(end, old_start) && SEQ_GT(old_end, end)) {
811 /* Case 5: extend end of new block */
814 /* Case 3. Delete old block by not copying it. */
815 KASSERT(SEQ_LEQ(start, old_start) &&
816 SEQ_GEQ(end, old_end),
817 ("bad logic: old [%u, %u), new [%u, %u)",
818 old_start, old_end, start, end));
822 /* insert new block */
823 tp->sackhistory[0].rblk_start = start;
824 tp->sackhistory[0].rblk_end = end;
825 cindex = min(cindex, MAX_SACK_REPORT_BLOCKS - 1);
826 for (i = 0; i < cindex; ++i)
827 tp->sackhistory[i + 1] = copy[i];
828 tp->nsackhistory = cindex + 1;
829 tcp_sack_dump_history("after tcp_sack_update_reported_history", tp);
833 * Fill in SACK report to return to data sender.
836 tcp_sack_fill_report(struct tcpcb *tp, u_char *opt, u_int *plen)
838 u_int optlen = *plen;
839 uint32_t *lp = (uint32_t *)(opt + optlen);
841 tcp_seq hstart = tp->rcv_nxt, hend;
844 KASSERT(TCP_MAXOLEN - optlen >=
845 TCPOLEN_SACK_ALIGNED + TCPOLEN_SACK_BLOCK,
846 ("no room for SACK header and one block: optlen %d", optlen));
848 if (tp->sack_flags & TSACK_F_DUPSEG)
849 tcpstat.tcps_snddsackopt++;
851 tcpstat.tcps_sndsackopt++;
854 optlen += TCPOLEN_SACK_ALIGNED;
856 tcp_sack_ack_history(tp);
857 if (tp->reportblk.rblk_start != tp->reportblk.rblk_end) {
858 *lp++ = htonl(tp->reportblk.rblk_start);
859 *lp++ = htonl(tp->reportblk.rblk_end);
860 optlen += TCPOLEN_SACK_BLOCK;
861 hstart = tp->reportblk.rblk_start;
862 hend = tp->reportblk.rblk_end;
863 if (tp->sack_flags & TSACK_F_ENCLOSESEG) {
864 KASSERT(TCP_MAXOLEN - optlen >= TCPOLEN_SACK_BLOCK,
865 ("no room for enclosing SACK block: oplen %d",
867 *lp++ = htonl(tp->encloseblk.rblk_start);
868 *lp++ = htonl(tp->encloseblk.rblk_end);
869 optlen += TCPOLEN_SACK_BLOCK;
870 hstart = tp->encloseblk.rblk_start;
871 hend = tp->encloseblk.rblk_end;
873 if (SEQ_GT(hstart, tp->rcv_nxt))
874 tcp_sack_update_reported_history(tp, hstart, hend);
876 if (tcp_do_smartsack && (tp->sack_flags & TSACK_F_SACKLEFT)) {
877 /* Fill in from left! Walk re-assembly queue. */
880 q = TAILQ_FIRST(&tp->t_segq);
882 TCP_MAXOLEN - optlen >= TCPOLEN_SACK_BLOCK) {
883 *lp++ = htonl(q->tqe_th->th_seq);
884 *lp++ = htonl(TCP_SACK_BLKEND(
885 q->tqe_th->th_seq + q->tqe_len,
886 q->tqe_th->th_flags));
887 optlen += TCPOLEN_SACK_BLOCK;
888 q = TAILQ_NEXT(q, tqe_q);
893 /* Fill in SACK blocks from right side. */
894 while (n < tp->nsackhistory &&
895 TCP_MAXOLEN - optlen >= TCPOLEN_SACK_BLOCK) {
896 if (tp->sackhistory[n].rblk_start != hstart) {
897 *lp++ = htonl(tp->sackhistory[n].rblk_start);
898 *lp++ = htonl(tp->sackhistory[n].rblk_end);
899 optlen += TCPOLEN_SACK_BLOCK;
904 tp->reportblk.rblk_start = tp->reportblk.rblk_end;
906 ~(TSACK_F_DUPSEG | TSACK_F_ENCLOSESEG | TSACK_F_SACKLEFT);
907 nblocks = (lp - olp - 1) / 2;
908 *olp = htonl(TCPOPT_SACK_ALIGNED |
909 (TCPOLEN_SACK + nblocks * TCPOLEN_SACK_BLOCK));