route: Routing table is replicated to all CPUs, not # of CPUs on power of 2
[dragonfly.git] / sys / netinet / tcp_sack.c
1 /*
2  * Copyright (c) 2003, 2004 Jeffrey M. Hsu.  All rights reserved.
3  * Copyright (c) 2003, 2004 The DragonFly Project.  All rights reserved.
4  *
5  * This code is derived from software contributed to The DragonFly Project
6  * by Jeffrey M. Hsu.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the name of The DragonFly Project nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific, prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  * $DragonFly: src/sys/netinet/tcp_sack.c,v 1.8 2008/08/15 21:37:16 nth Exp $
34  */
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/malloc.h>
40 #include <sys/queue.h>
41 #include <sys/thread.h>
42 #include <sys/types.h>
43 #include <sys/socket.h>
44 #include <sys/socketvar.h>
45
46 #include <net/if.h>
47
48 #include <netinet/in.h>
49 #include <netinet/in_systm.h>
50 #include <netinet/ip.h>
51 #include <netinet/in_var.h>
52 #include <netinet/in_pcb.h>
53 #include <netinet/ip_var.h>
54 #include <netinet/tcp.h>
55 #include <netinet/tcp_seq.h>
56 #include <netinet/tcp_var.h>
57
58 /*
59  * Implemented:
60  *
61  * RFC 2018
62  * RFC 2883
63  * RFC 3517
64  */
65
66 struct sackblock {
67         tcp_seq                 sblk_start;
68         tcp_seq                 sblk_end;
69         TAILQ_ENTRY(sackblock)  sblk_list;
70 };
71
72 #define MAXSAVEDBLOCKS  8                       /* per connection limit */
73
74 static int insert_block(struct scoreboard *scb,
75                         const struct raw_sackblock *raw_sb, boolean_t *update);
76
77 static MALLOC_DEFINE(M_SACKBLOCK, "sblk", "sackblock struct");
78
79 /*
80  * Per-tcpcb initialization.
81  */
82 void
83 tcp_sack_tcpcb_init(struct tcpcb *tp)
84 {
85         struct scoreboard *scb = &tp->scb;
86
87         scb->nblocks = 0;
88         TAILQ_INIT(&scb->sackblocks);
89         scb->lastfound = NULL;
90 }
91
92 /*
93  * Find the SACK block containing or immediately preceding "seq".
94  * The boolean result indicates whether the sequence is actually
95  * contained in the SACK block.
96  */
97 static boolean_t
98 sack_block_lookup(struct scoreboard *scb, tcp_seq seq, struct sackblock **sb)
99 {
100         struct sackblock *hint = scb->lastfound;
101         struct sackblock *cur, *last, *prev;
102
103         if (TAILQ_EMPTY(&scb->sackblocks)) {
104                 *sb = NULL;
105                 return FALSE;
106         }
107
108         if (hint == NULL) {
109                 /* No hint.  Search from start to end. */
110                 cur = TAILQ_FIRST(&scb->sackblocks);
111                 last = NULL;
112                 prev = TAILQ_LAST(&scb->sackblocks, sackblock_list);
113         } else  {
114                 if (SEQ_GEQ(seq, hint->sblk_start)) {
115                         /* Search from hint to end of list. */
116                         cur = hint;
117                         last = NULL;
118                         prev = TAILQ_LAST(&scb->sackblocks, sackblock_list);
119                 } else {
120                         /* Search from front of list to hint. */
121                         cur = TAILQ_FIRST(&scb->sackblocks);
122                         last = hint;
123                         prev = TAILQ_PREV(hint, sackblock_list, sblk_list);
124                 }
125         }
126
127         do {
128                 if (SEQ_GT(cur->sblk_end, seq)) {
129                         if (SEQ_GEQ(seq, cur->sblk_start)) {
130                                 *sb = scb->lastfound = cur;
131                                 return TRUE;
132                         } else {
133                                 *sb = scb->lastfound =
134                                     TAILQ_PREV(cur, sackblock_list, sblk_list);
135                                 return FALSE;
136                         }
137                 }
138                 cur = TAILQ_NEXT(cur, sblk_list);
139         } while (cur != last);
140
141         *sb = scb->lastfound = prev;
142         return FALSE;
143 }
144
145 /*
146  * Allocate a SACK block.
147  */
148 static __inline struct sackblock *
149 alloc_sackblock(struct scoreboard *scb, const struct raw_sackblock *raw_sb)
150 {
151         struct sackblock *sb;
152
153         if (scb->freecache != NULL) {
154                 sb = scb->freecache;
155                 scb->freecache = NULL;
156                 tcpstat.tcps_sacksbfast++;
157         } else {
158                 sb = kmalloc(sizeof(struct sackblock), M_SACKBLOCK, M_NOWAIT);
159                 if (sb == NULL) {
160                         tcpstat.tcps_sacksbfailed++;
161                         return NULL;
162                 }
163         }
164         sb->sblk_start = raw_sb->rblk_start;
165         sb->sblk_end = raw_sb->rblk_end;
166         return sb;
167 }
168
169 static __inline struct sackblock *
170 alloc_sackblock_limit(struct scoreboard *scb,
171     const struct raw_sackblock *raw_sb)
172 {
173         if (scb->nblocks == MAXSAVEDBLOCKS) {
174                 /*
175                  * Should try to kick out older blocks XXX JH
176                  * May be able to coalesce with existing block.
177                  * Or, go other way and free all blocks if we hit
178                  * this limit.
179                  */
180                 tcpstat.tcps_sacksboverflow++;
181                 return NULL;
182         }
183         return alloc_sackblock(scb, raw_sb);
184 }
185
186 /*
187  * Free a SACK block.
188  */
189 static __inline void
190 free_sackblock(struct scoreboard *scb, struct sackblock *s)
191 {
192         if (scb->freecache == NULL) {
193                 /* YYY Maybe use the latest freed block? */
194                 scb->freecache = s;
195                 return;
196         }
197         kfree(s, M_SACKBLOCK);
198 }
199
200 /*
201  * Free up SACK blocks for data that's been acked.
202  */
203 static void
204 tcp_sack_ack_blocks(struct tcpcb *tp, tcp_seq th_ack)
205 {
206         struct scoreboard *scb = &tp->scb;
207         struct sackblock *sb, *nb;
208
209         sb = TAILQ_FIRST(&scb->sackblocks);
210         while (sb && SEQ_LEQ(sb->sblk_end, th_ack)) {
211                 nb = TAILQ_NEXT(sb, sblk_list);
212                 if (scb->lastfound == sb)
213                         scb->lastfound = NULL;
214                 TAILQ_REMOVE(&scb->sackblocks, sb, sblk_list);
215                 free_sackblock(scb, sb);
216                 --scb->nblocks;
217                 KASSERT(scb->nblocks >= 0,
218                     ("SACK block count underflow: %d < 0", scb->nblocks));
219                 sb = nb;
220         }
221         if (sb && SEQ_GEQ(th_ack, sb->sblk_start)) {
222                 /* Other side reneged? XXX */
223                 tcpstat.tcps_sackrenege++;
224                 tcp_sack_discard(tp);
225         }
226 }
227
228 /*
229  * Delete and free SACK blocks saved in scoreboard.
230  */
231 static void
232 tcp_sack_cleanup(struct scoreboard *scb)
233 {
234         struct sackblock *sb, *nb;
235
236         TAILQ_FOREACH_MUTABLE(sb, &scb->sackblocks, sblk_list, nb) {
237                 free_sackblock(scb, sb);
238                 --scb->nblocks;
239         }
240         KASSERT(scb->nblocks == 0,
241             ("SACK block %d count not zero", scb->nblocks));
242         TAILQ_INIT(&scb->sackblocks);
243         scb->lastfound = NULL;
244 }
245
246 /*
247  * Discard SACK scoreboard, HighRxt, RescueRxt and LostSeq.
248  */
249 void
250 tcp_sack_discard(struct tcpcb *tp)
251 {
252         tcp_sack_cleanup(&tp->scb);
253         tp->rexmt_high = tp->snd_una;
254         tp->sack_flags &= ~TSACK_F_SACKRESCUED;
255         tp->scb.lostseq = tp->snd_una;
256 }
257
258 /*
259  * Delete and free SACK blocks saved in scoreboard.
260  * Delete the one slot block cache.
261  */
262 void
263 tcp_sack_destroy(struct scoreboard *scb)
264 {
265         tcp_sack_cleanup(scb);
266         if (scb->freecache != NULL) {
267                 kfree(scb->freecache, M_SACKBLOCK);
268                 scb->freecache = NULL;
269         }
270 }
271
272 /*
273  * Cleanup the reported SACK block information
274  */
275 void
276 tcp_sack_report_cleanup(struct tcpcb *tp)
277 {
278         tp->sack_flags &=
279             ~(TSACK_F_DUPSEG | TSACK_F_ENCLOSESEG | TSACK_F_SACKLEFT);
280         tp->reportblk.rblk_start = tp->reportblk.rblk_end;
281 }
282
283 /*
284  * Returns      0 if not D-SACK block,
285  *              1 if D-SACK,
286  *              2 if duplicate of out-of-order D-SACK block.
287  */
288 int
289 tcp_sack_ndsack_blocks(const struct raw_sackblock *blocks, const int numblocks,
290     tcp_seq snd_una)
291 {
292         if (numblocks == 0)
293                 return 0;
294
295         if (SEQ_LT(blocks[0].rblk_start, snd_una))
296                 return 1;
297
298         /* block 0 inside block 1 */
299         if (numblocks > 1 &&
300             SEQ_GEQ(blocks[0].rblk_start, blocks[1].rblk_start) &&
301             SEQ_LEQ(blocks[0].rblk_end, blocks[1].rblk_end))
302                 return 2;
303
304         return 0;
305 }
306
307 /*
308  * Update scoreboard on new incoming ACK.
309  */
310 static void
311 tcp_sack_add_blocks(struct tcpcb *tp, struct tcpopt *to)
312 {
313         const int numblocks = to->to_nsackblocks;
314         struct raw_sackblock *blocks = to->to_sackblocks;
315         struct scoreboard *scb = &tp->scb;
316         int startblock, i;
317
318         if (tcp_sack_ndsack_blocks(blocks, numblocks, tp->snd_una) > 0)
319                 startblock = 1;
320         else
321                 startblock = 0;
322
323         to->to_flags |= TOF_SACK_REDUNDANT;
324         for (i = startblock; i < numblocks; i++) {
325                 struct raw_sackblock *newsackblock = &blocks[i];
326                 boolean_t update;
327                 int error;
328
329                 /* Guard against ACK reordering */
330                 if (SEQ_LEQ(newsackblock->rblk_start, tp->snd_una))
331                         continue;
332
333                 /* Don't accept bad SACK blocks */
334                 if (SEQ_GT(newsackblock->rblk_end, tp->snd_max)) {
335                         tcpstat.tcps_rcvbadsackopt++;
336                         break;          /* skip all other blocks */
337                 }
338                 tcpstat.tcps_sacksbupdate++;
339
340                 error = insert_block(scb, newsackblock, &update);
341                 if (update)
342                         to->to_flags &= ~TOF_SACK_REDUNDANT;
343                 if (error)
344                         break;
345         }
346 }
347
348 void
349 tcp_sack_update_scoreboard(struct tcpcb *tp, struct tcpopt *to)
350 {
351         struct scoreboard *scb = &tp->scb;
352         int rexmt_high_update = 0;
353
354         tcp_sack_ack_blocks(tp, tp->snd_una);
355         tcp_sack_add_blocks(tp, to);
356         tcp_sack_update_lostseq(scb, tp->snd_una, tp->t_maxseg,
357             tp->t_rxtthresh);
358         if (SEQ_LT(tp->rexmt_high, tp->snd_una)) {
359                 tp->rexmt_high = tp->snd_una;
360                 rexmt_high_update = 1;
361         }
362         if (tp->sack_flags & TSACK_F_SACKRESCUED) {
363                 if (SEQ_LEQ(tp->rexmt_rescue, tp->snd_una)) {
364                         tp->sack_flags &= ~TSACK_F_SACKRESCUED;
365                 } else if (tcp_aggressive_rescuesack && rexmt_high_update &&
366                     SEQ_LT(tp->rexmt_rescue, tp->rexmt_high)) {
367                         /* Drag RescueRxt along with HighRxt */
368                         tp->rexmt_rescue = tp->rexmt_high;
369                 }
370         }
371 }
372
373 /*
374  * Insert SACK block into sender's scoreboard.
375  */
376 static int
377 insert_block(struct scoreboard *scb, const struct raw_sackblock *raw_sb,
378     boolean_t *update)
379 {
380         struct sackblock *sb, *workingblock;
381         boolean_t overlap_front;
382
383         *update = TRUE;
384         if (TAILQ_EMPTY(&scb->sackblocks)) {
385                 struct sackblock *newblock;
386
387                 KASSERT(scb->nblocks == 0, ("emply scb w/ blocks"));
388
389                 newblock = alloc_sackblock(scb, raw_sb);
390                 if (newblock == NULL)
391                         return ENOMEM;
392                 TAILQ_INSERT_HEAD(&scb->sackblocks, newblock, sblk_list);
393                 scb->nblocks = 1;
394                 return 0;
395         }
396
397         KASSERT(scb->nblocks > 0, ("insert_block() called w/ no blocks"));
398         KASSERT(scb->nblocks <= MAXSAVEDBLOCKS,
399             ("too many SACK blocks %d", scb->nblocks));
400
401         overlap_front = sack_block_lookup(scb, raw_sb->rblk_start, &sb);
402
403         if (sb == NULL) {
404                 workingblock = alloc_sackblock_limit(scb, raw_sb);
405                 if (workingblock == NULL)
406                         return ENOMEM;
407                 TAILQ_INSERT_HEAD(&scb->sackblocks, workingblock, sblk_list);
408                 ++scb->nblocks;
409         } else {
410                 if (overlap_front || sb->sblk_end == raw_sb->rblk_start) {
411                         tcpstat.tcps_sacksbreused++;
412
413                         /* Extend old block */
414                         workingblock = sb;
415                         if (SEQ_GT(raw_sb->rblk_end, sb->sblk_end)) {
416                                 sb->sblk_end = raw_sb->rblk_end;
417                         } else {
418                                 /* Exact match, nothing to consolidate */
419                                 *update = FALSE;
420                                 return 0;
421                         }
422                 } else {
423                         workingblock = alloc_sackblock_limit(scb, raw_sb);
424                         if (workingblock == NULL)
425                                 return ENOMEM;
426                         TAILQ_INSERT_AFTER(&scb->sackblocks, sb, workingblock,
427                             sblk_list);
428                         ++scb->nblocks;
429                 }
430         }
431
432         /* Consolidate right-hand side. */
433         sb = TAILQ_NEXT(workingblock, sblk_list);
434         while (sb != NULL &&
435             SEQ_GEQ(workingblock->sblk_end, sb->sblk_end)) {
436                 struct sackblock *nextblock;
437
438                 nextblock = TAILQ_NEXT(sb, sblk_list);
439                 if (scb->lastfound == sb)
440                         scb->lastfound = NULL;
441                 /* Remove completely overlapped block */
442                 TAILQ_REMOVE(&scb->sackblocks, sb, sblk_list);
443                 free_sackblock(scb, sb);
444                 --scb->nblocks;
445                 KASSERT(scb->nblocks > 0,
446                     ("removed overlapped block: %d blocks left", scb->nblocks));
447                 sb = nextblock;
448         }
449         if (sb != NULL &&
450             SEQ_GEQ(workingblock->sblk_end, sb->sblk_start)) {
451                 /* Extend new block to cover partially overlapped old block. */
452                 workingblock->sblk_end = sb->sblk_end;
453                 if (scb->lastfound == sb)
454                         scb->lastfound = NULL;
455                 TAILQ_REMOVE(&scb->sackblocks, sb, sblk_list);
456                 free_sackblock(scb, sb);
457                 --scb->nblocks;
458                 KASSERT(scb->nblocks > 0,
459                     ("removed partial right: %d blocks left", scb->nblocks));
460         }
461         return 0;
462 }
463
464 #ifdef DEBUG_SACK_BLOCKS
465 static void
466 tcp_sack_dump_blocks(const struct scoreboard *scb)
467 {
468         const struct sackblock *sb;
469
470         kprintf("%d blocks:", scb->nblocks);
471         TAILQ_FOREACH(sb, &scb->sackblocks, sblk_list)
472                 kprintf(" [%u, %u)", sb->sblk_start, sb->sblk_end);
473         kprintf("\n");
474 }
475 #else
476 static __inline void
477 tcp_sack_dump_blocks(const struct scoreboard *scb)
478 {
479 }
480 #endif
481
482 /*
483  * Optimization to quickly determine which packets are lost.
484  */
485 void
486 tcp_sack_update_lostseq(struct scoreboard *scb, tcp_seq snd_una, u_int maxseg,
487     int rxtthresh)
488 {
489         struct sackblock *sb;
490         int nsackblocks = 0;
491         int bytes_sacked = 0;
492         int rxtthresh_bytes;
493
494         if (tcp_do_rfc3517bis)
495                 rxtthresh_bytes = (rxtthresh - 1) * maxseg;
496         else
497                 rxtthresh_bytes = rxtthresh * maxseg;
498
499         sb = TAILQ_LAST(&scb->sackblocks, sackblock_list);
500         while (sb != NULL) {
501                 ++nsackblocks;
502                 bytes_sacked += sb->sblk_end - sb->sblk_start;
503                 if (nsackblocks == rxtthresh ||
504                     bytes_sacked >= rxtthresh_bytes) {
505                         scb->lostseq = sb->sblk_start;
506                         return;
507                 }
508                 sb = TAILQ_PREV(sb, sackblock_list, sblk_list);
509         }
510         scb->lostseq = snd_una;
511 }
512
513 /*
514  * Return whether the given sequence number is considered lost.
515  */
516 boolean_t
517 tcp_sack_islost(const struct scoreboard *scb, tcp_seq seqnum)
518 {
519         return SEQ_LT(seqnum, scb->lostseq);
520 }
521
522 /*
523  * True if at least "amount" has been SACKed.  Used by Early Retransmit.
524  */
525 boolean_t
526 tcp_sack_has_sacked(const struct scoreboard *scb, u_int amount)
527 {
528         const struct sackblock *sb;
529         int bytes_sacked = 0;
530
531         TAILQ_FOREACH(sb, &scb->sackblocks, sblk_list) {
532                 bytes_sacked += sb->sblk_end - sb->sblk_start;
533                 if (bytes_sacked >= amount)
534                         return TRUE;
535         }
536         return FALSE;
537 }
538
539 /*
540  * Number of bytes SACKed below seq.
541  */
542 int
543 tcp_sack_bytes_below(const struct scoreboard *scb, tcp_seq seq)
544 {
545         const struct sackblock *sb;
546         int bytes_sacked = 0;
547
548         sb = TAILQ_FIRST(&scb->sackblocks);
549         while (sb && SEQ_GT(seq, sb->sblk_start)) {
550                 bytes_sacked += seq_min(seq, sb->sblk_end) - sb->sblk_start;
551                 sb = TAILQ_NEXT(sb, sblk_list);
552         }
553         return bytes_sacked;
554 }
555
556 /*
557  * Return estimate of the number of bytes outstanding in the network.
558  */
559 uint32_t
560 tcp_sack_compute_pipe(const struct tcpcb *tp)
561 {
562         const struct scoreboard *scb = &tp->scb;
563         const struct sackblock *sb;
564         int nlost, nretransmitted;
565         tcp_seq end;
566
567         nlost = tp->snd_max - scb->lostseq;
568         nretransmitted = tp->rexmt_high - tp->snd_una;
569
570         TAILQ_FOREACH(sb, &scb->sackblocks, sblk_list) {
571                 if (SEQ_LT(sb->sblk_start, tp->rexmt_high)) {
572                         end = seq_min(sb->sblk_end, tp->rexmt_high);
573                         nretransmitted -= end - sb->sblk_start;
574                 }
575                 if (SEQ_GEQ(sb->sblk_start, scb->lostseq))
576                         nlost -= sb->sblk_end - sb->sblk_start;
577         }
578
579         return (nlost + nretransmitted);
580 }
581
582 /*
583  * Return the sequence number and length of the next segment to transmit
584  * when in Fast Recovery.
585  */
586 boolean_t
587 tcp_sack_nextseg(struct tcpcb *tp, tcp_seq *nextrexmt, uint32_t *plen,
588     boolean_t *rescue)
589 {
590         struct scoreboard *scb = &tp->scb;
591         struct socket *so = tp->t_inpcb->inp_socket;
592         struct sackblock *sb;
593         const struct sackblock *lastblock =
594             TAILQ_LAST(&scb->sackblocks, sackblock_list);
595         tcp_seq torexmt;
596         long len, off, sendwin;
597
598         /* skip SACKed data */
599         tcp_sack_skip_sacked(scb, &tp->rexmt_high);
600
601         /* Look for lost data. */
602         torexmt = tp->rexmt_high;
603         *rescue = FALSE;
604         if (lastblock != NULL) {
605                 if (SEQ_LT(torexmt, lastblock->sblk_end) &&
606                     tcp_sack_islost(scb, torexmt)) {
607 sendunsacked:
608                         *nextrexmt = torexmt;
609                         /* If the left-hand edge has been SACKed, pull it in. */
610                         if (sack_block_lookup(scb, torexmt + tp->t_maxseg, &sb))
611                                 *plen = sb->sblk_start - torexmt;
612                         else
613                                 *plen = tp->t_maxseg;
614                         return TRUE;
615                 }
616         }
617
618         /* See if unsent data available within send window. */
619         off = tp->snd_max - tp->snd_una;
620         sendwin = min(tp->snd_wnd, tp->snd_bwnd);
621         len = (long) ulmin(so->so_snd.ssb_cc, sendwin) - off;
622         if (len > 0) {
623                 *nextrexmt = tp->snd_max;       /* Send new data. */
624                 *plen = tp->t_maxseg;
625                 return TRUE;
626         }
627
628         /* We're less certain this data has been lost. */
629         if (lastblock != NULL && SEQ_LT(torexmt, lastblock->sblk_end))
630                 goto sendunsacked;
631
632         /* Rescue retransmission */
633         if (tcp_do_rescuesack || tcp_do_rfc3517bis) {
634                 tcpstat.tcps_sackrescue_try++;
635                 if (tp->sack_flags & TSACK_F_SACKRESCUED) {
636                         if (!tcp_aggressive_rescuesack)
637                                 return FALSE;
638
639                         /*
640                          * Aggressive variant of the rescue retransmission.
641                          *
642                          * The idea of the rescue retransmission is to sustain
643                          * the ACK clock thus to avoid timeout retransmission.
644                          *
645                          * Under some situations, the conservative approach
646                          * suggested in the draft
647                          * http://tools.ietf.org/html/
648                          * draft-nishida-tcpm-rescue-retransmission-00
649                          * could not sustain ACK clock, since it only allows
650                          * one rescue retransmission before a cumulative ACK
651                          * covers the segement transmitted by rescue
652                          * retransmission.
653                          *
654                          * We try to locate the next unSACKed segment which
655                          * follows the previously sent rescue segment.  If
656                          * there is no such segment, we loop back to the first
657                          * unacknowledged segment.
658                          */
659
660                         /*
661                          * Skip SACKed data, but here we follow
662                          * the last transmitted rescue segment.
663                          */
664                         torexmt = tp->rexmt_rescue;
665                         tcp_sack_skip_sacked(scb, &torexmt);
666                 }
667                 if (torexmt == tp->snd_max) {
668                         /* Nothing left to retransmit; restart */
669                         torexmt = tp->snd_una;
670                 }
671                 *rescue = TRUE;
672                 goto sendunsacked;
673         } else if (tcp_do_smartsack && lastblock == NULL) {
674                 tcpstat.tcps_sackrescue_try++;
675                 *rescue = TRUE;
676                 goto sendunsacked;
677         }
678
679         return FALSE;
680 }
681
682 /*
683  * Return the next sequence number higher than "*prexmt" that has
684  * not been SACKed.
685  */
686 void
687 tcp_sack_skip_sacked(struct scoreboard *scb, tcp_seq *prexmt)
688 {
689         struct sackblock *sb;
690
691         /* skip SACKed data */
692         if (sack_block_lookup(scb, *prexmt, &sb))
693                 *prexmt = sb->sblk_end;
694 }
695
696 /*
697  * The length of the first amount of unSACKed data
698  */
699 uint32_t
700 tcp_sack_first_unsacked_len(const struct tcpcb *tp)
701 {
702         const struct sackblock *sb;
703
704         sb = TAILQ_FIRST(&tp->scb.sackblocks);
705         if (sb == NULL)
706                 return tp->t_maxseg;
707
708         KASSERT(SEQ_LT(tp->snd_una, sb->sblk_start),
709             ("invalid sb start %u, snd_una %u",
710              sb->sblk_start, tp->snd_una));
711         return (sb->sblk_start - tp->snd_una);
712 }
713
714 #ifdef later
715 void
716 tcp_sack_save_scoreboard(struct scoreboard *scb)
717 {
718         struct scoreboard *scb = &tp->scb;
719
720         scb->sackblocks_prev = scb->sackblocks;
721         TAILQ_INIT(&scb->sackblocks);
722 }
723
724 void
725 tcp_sack_revert_scoreboard(struct scoreboard *scb, tcp_seq snd_una,
726                            u_int maxseg)
727 {
728         struct sackblock *sb;
729
730         scb->sackblocks = scb->sackblocks_prev;
731         scb->nblocks = 0;
732         TAILQ_FOREACH(sb, &scb->sackblocks, sblk_list)
733                 ++scb->nblocks;
734         tcp_sack_ack_blocks(scb, snd_una);
735         scb->lastfound = NULL;
736 }
737 #endif
738
739 #ifdef DEBUG_SACK_HISTORY
740 static void
741 tcp_sack_dump_history(const char *msg, const struct tcpcb *tp)
742 {
743         int i;
744         static int ndumped;
745
746         /* only need a couple of these to debug most problems */
747         if (++ndumped > 900)
748                 return;
749
750         kprintf("%s:\tnsackhistory %d: ", msg, tp->nsackhistory);
751         for (i = 0; i < tp->nsackhistory; ++i)
752                 kprintf("[%u, %u) ", tp->sackhistory[i].rblk_start,
753                     tp->sackhistory[i].rblk_end);
754         kprintf("\n");
755 }
756 #else
757 static __inline void
758 tcp_sack_dump_history(const char *msg, const struct tcpcb *tp)
759 {
760 }
761 #endif
762
763 /*
764  * Remove old SACK blocks from the SACK history that have already been ACKed.
765  */
766 static void
767 tcp_sack_ack_history(struct tcpcb *tp)
768 {
769         int i, nblocks, openslot;
770
771         tcp_sack_dump_history("before tcp_sack_ack_history", tp);
772         nblocks = tp->nsackhistory;
773         for (i = openslot = 0; i < nblocks; ++i) {
774                 if (SEQ_LEQ(tp->sackhistory[i].rblk_end, tp->rcv_nxt)) {
775                         --tp->nsackhistory;
776                         continue;
777                 }
778                 if (SEQ_LT(tp->sackhistory[i].rblk_start, tp->rcv_nxt))
779                         tp->sackhistory[i].rblk_start = tp->rcv_nxt;
780                 if (i == openslot)
781                         ++openslot;
782                 else
783                         tp->sackhistory[openslot++] = tp->sackhistory[i];
784         }
785         tcp_sack_dump_history("after tcp_sack_ack_history", tp);
786         KASSERT(openslot == tp->nsackhistory,
787             ("tcp_sack_ack_history miscounted: %d != %d",
788             openslot, tp->nsackhistory));
789 }
790
791 /*
792  * Add or merge newblock into reported history.
793  * Also remove or update SACK blocks that will be acked.
794  */
795 static void
796 tcp_sack_update_reported_history(struct tcpcb *tp, tcp_seq start, tcp_seq end)
797 {
798         struct raw_sackblock copy[MAX_SACK_REPORT_BLOCKS];
799         int i, cindex;
800
801         tcp_sack_dump_history("before tcp_sack_update_reported_history", tp);
802         /*
803          * Six cases:
804          *      0) no overlap
805          *      1) newblock == oldblock
806          *      2) oldblock contains newblock
807          *      3) newblock contains oldblock
808          *      4) tail of oldblock overlaps or abuts start of newblock
809          *      5) tail of newblock overlaps or abuts head of oldblock
810          */
811         for (i = cindex = 0; i < tp->nsackhistory; ++i) {
812                 struct raw_sackblock *oldblock = &tp->sackhistory[i];
813                 tcp_seq old_start = oldblock->rblk_start;
814                 tcp_seq old_end = oldblock->rblk_end;
815
816                 if (SEQ_LT(end, old_start) || SEQ_GT(start, old_end)) {
817                         /* Case 0:  no overlap.  Copy old block. */
818                         copy[cindex++] = *oldblock;
819                         continue;
820                 }
821
822                 if (SEQ_GEQ(start, old_start) && SEQ_LEQ(end, old_end)) {
823                         /* Cases 1 & 2.  Move block to front of history. */
824                         int j;
825
826                         start = old_start;
827                         end = old_end;
828                         /* no need to check rest of blocks */
829                         for (j = i + 1; j < tp->nsackhistory; ++j)
830                                 copy[cindex++] = tp->sackhistory[j];
831                         break;
832                 }
833
834                 if (SEQ_GEQ(old_end, start) && SEQ_LT(old_start, start)) {
835                         /* Case 4:  extend start of new block. */
836                         start = old_start;
837                 } else if (SEQ_GEQ(end, old_start) && SEQ_GT(old_end, end)) {
838                         /* Case 5: extend end of new block */
839                         end = old_end;
840                 } else {
841                         /* Case 3.  Delete old block by not copying it. */
842                         KASSERT(SEQ_LEQ(start, old_start) &&
843                                 SEQ_GEQ(end, old_end),
844                             ("bad logic: old [%u, %u), new [%u, %u)",
845                              old_start, old_end, start, end));
846                 }
847         }
848
849         /* insert new block */
850         tp->sackhistory[0].rblk_start = start;
851         tp->sackhistory[0].rblk_end = end;
852         cindex = min(cindex, MAX_SACK_REPORT_BLOCKS - 1);
853         for (i = 0; i < cindex; ++i)
854                 tp->sackhistory[i + 1] = copy[i];
855         tp->nsackhistory = cindex + 1;
856         tcp_sack_dump_history("after tcp_sack_update_reported_history", tp);
857 }
858
859 /*
860  * Fill in SACK report to return to data sender.
861  */
862 void
863 tcp_sack_fill_report(struct tcpcb *tp, u_char *opt, u_int *plen)
864 {
865         u_int optlen = *plen;
866         uint32_t *lp = (uint32_t *)(opt + optlen);
867         uint32_t *olp;
868         tcp_seq hstart = tp->rcv_nxt, hend;
869         int nblocks;
870
871         KASSERT(TCP_MAXOLEN - optlen >=
872             TCPOLEN_SACK_ALIGNED + TCPOLEN_SACK_BLOCK,
873             ("no room for SACK header and one block: optlen %d", optlen));
874
875         if (tp->sack_flags & TSACK_F_DUPSEG)
876                 tcpstat.tcps_snddsackopt++;
877         else
878                 tcpstat.tcps_sndsackopt++;
879
880         olp = lp++;
881         optlen += TCPOLEN_SACK_ALIGNED;
882
883         tcp_sack_ack_history(tp);
884         if (tp->reportblk.rblk_start != tp->reportblk.rblk_end) {
885                 *lp++ = htonl(tp->reportblk.rblk_start);
886                 *lp++ = htonl(tp->reportblk.rblk_end);
887                 optlen += TCPOLEN_SACK_BLOCK;
888                 hstart = tp->reportblk.rblk_start;
889                 hend = tp->reportblk.rblk_end;
890                 if (tp->sack_flags & TSACK_F_ENCLOSESEG) {
891                         KASSERT(TCP_MAXOLEN - optlen >= TCPOLEN_SACK_BLOCK,
892                             ("no room for enclosing SACK block: oplen %d",
893                             optlen));
894                         *lp++ = htonl(tp->encloseblk.rblk_start);
895                         *lp++ = htonl(tp->encloseblk.rblk_end);
896                         optlen += TCPOLEN_SACK_BLOCK;
897                         hstart = tp->encloseblk.rblk_start;
898                         hend = tp->encloseblk.rblk_end;
899                 }
900                 if (SEQ_GT(hstart, tp->rcv_nxt))
901                         tcp_sack_update_reported_history(tp, hstart, hend);
902         }
903         if (tcp_do_smartsack && (tp->sack_flags & TSACK_F_SACKLEFT)) {
904                 /* Fill in from left!  Walk re-assembly queue. */
905                 struct tseg_qent *q;
906
907                 q = TAILQ_FIRST(&tp->t_segq);
908                 while (q != NULL &&
909                     TCP_MAXOLEN - optlen >= TCPOLEN_SACK_BLOCK) {
910                         *lp++ = htonl(q->tqe_th->th_seq);
911                         *lp++ = htonl(TCP_SACK_BLKEND(
912                             q->tqe_th->th_seq + q->tqe_len,
913                             q->tqe_th->th_flags));
914                         optlen += TCPOLEN_SACK_BLOCK;
915                         q = TAILQ_NEXT(q, tqe_q);
916                 }
917         } else {
918                 int n = 0;
919
920                 /* Fill in SACK blocks from right side. */
921                 while (n < tp->nsackhistory &&
922                     TCP_MAXOLEN - optlen >= TCPOLEN_SACK_BLOCK) {
923                         if (tp->sackhistory[n].rblk_start != hstart) {
924                                 *lp++ = htonl(tp->sackhistory[n].rblk_start);
925                                 *lp++ = htonl(tp->sackhistory[n].rblk_end);
926                                 optlen += TCPOLEN_SACK_BLOCK;
927                         }
928                         ++n;
929                 }
930         }
931         tp->reportblk.rblk_start = tp->reportblk.rblk_end;
932         tp->sack_flags &=
933             ~(TSACK_F_DUPSEG | TSACK_F_ENCLOSESEG | TSACK_F_SACKLEFT);
934         nblocks = (lp - olp - 1) / 2;
935         *olp = htonl(TCPOPT_SACK_ALIGNED |
936                      (TCPOLEN_SACK + nblocks * TCPOLEN_SACK_BLOCK));
937         *plen = optlen;
938 }