| 1 | /* |
| 2 | * Copyright (c) 2003, 2004 Jeffrey M. Hsu. All rights reserved. |
| 3 | * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved. |
| 4 | * |
| 5 | * This code is derived from software contributed to The DragonFly Project |
| 6 | * by Jeffrey M. Hsu. |
| 7 | * |
| 8 | * Redistribution and use in source and binary forms, with or without |
| 9 | * modification, are permitted provided that the following conditions |
| 10 | * are met: |
| 11 | * 1. Redistributions of source code must retain the above copyright |
| 12 | * notice, this list of conditions and the following disclaimer. |
| 13 | * 2. Redistributions in binary form must reproduce the above copyright |
| 14 | * notice, this list of conditions and the following disclaimer in the |
| 15 | * documentation and/or other materials provided with the distribution. |
| 16 | * 3. Neither the name of The DragonFly Project nor the names of its |
| 17 | * contributors may be used to endorse or promote products derived |
| 18 | * from this software without specific, prior written permission. |
| 19 | * |
| 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 21 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
| 23 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE |
| 24 | * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
| 25 | * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, |
| 26 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
| 27 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
| 28 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, |
| 29 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT |
| 30 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 31 | * SUCH DAMAGE. |
| 32 | * |
| 33 | * $DragonFly: src/sys/netinet/tcp_sack.c,v 1.8 2008/08/15 21:37:16 nth Exp $ |
| 34 | */ |
| 35 | |
| 36 | #include <sys/param.h> |
| 37 | #include <sys/systm.h> |
| 38 | #include <sys/kernel.h> |
| 39 | #include <sys/malloc.h> |
| 40 | #include <sys/queue.h> |
| 41 | #include <sys/thread.h> |
| 42 | #include <sys/types.h> |
| 43 | #include <sys/socket.h> |
| 44 | #include <sys/socketvar.h> |
| 45 | |
| 46 | #include <net/if.h> |
| 47 | |
| 48 | #include <netinet/in.h> |
| 49 | #include <netinet/in_systm.h> |
| 50 | #include <netinet/ip.h> |
| 51 | #include <netinet/in_var.h> |
| 52 | #include <netinet/in_pcb.h> |
| 53 | #include <netinet/ip_var.h> |
| 54 | #include <netinet/tcp.h> |
| 55 | #include <netinet/tcp_seq.h> |
| 56 | #include <netinet/tcp_var.h> |
| 57 | |
| 58 | /* |
| 59 | * Implemented: |
| 60 | * |
| 61 | * RFC 2018 |
| 62 | * RFC 2883 |
| 63 | * RFC 3517 |
| 64 | */ |
| 65 | |
| 66 | struct sackblock { |
| 67 | tcp_seq sblk_start; |
| 68 | tcp_seq sblk_end; |
| 69 | TAILQ_ENTRY(sackblock) sblk_list; |
| 70 | }; |
| 71 | |
| 72 | #define MAXSAVEDBLOCKS 8 /* per connection limit */ |
| 73 | |
| 74 | static int insert_block(struct scoreboard *scb, |
| 75 | const struct raw_sackblock *raw_sb); |
| 76 | static void update_lostseq(struct scoreboard *scb, tcp_seq snd_una, |
| 77 | u_int maxseg); |
| 78 | |
| 79 | static MALLOC_DEFINE(M_SACKBLOCK, "sblk", "sackblock struct"); |
| 80 | |
| 81 | /* |
| 82 | * Per-tcpcb initialization. |
| 83 | */ |
| 84 | void |
| 85 | tcp_sack_tcpcb_init(struct tcpcb *tp) |
| 86 | { |
| 87 | struct scoreboard *scb = &tp->scb; |
| 88 | |
| 89 | scb->nblocks = 0; |
| 90 | TAILQ_INIT(&scb->sackblocks); |
| 91 | scb->lastfound = NULL; |
| 92 | } |
| 93 | |
| 94 | /* |
| 95 | * Find the SACK block containing or immediately preceding "seq". |
| 96 | * The boolean result indicates whether the sequence is actually |
| 97 | * contained in the SACK block. |
| 98 | */ |
| 99 | static boolean_t |
| 100 | sack_block_lookup(struct scoreboard *scb, tcp_seq seq, struct sackblock **sb) |
| 101 | { |
| 102 | struct sackblock *hint = scb->lastfound; |
| 103 | struct sackblock *cur, *last, *prev; |
| 104 | |
| 105 | if (TAILQ_EMPTY(&scb->sackblocks)) { |
| 106 | *sb = NULL; |
| 107 | return FALSE; |
| 108 | } |
| 109 | |
| 110 | if (hint == NULL) { |
| 111 | /* No hint. Search from start to end. */ |
| 112 | cur = TAILQ_FIRST(&scb->sackblocks); |
| 113 | last = NULL; |
| 114 | prev = TAILQ_LAST(&scb->sackblocks, sackblock_list); |
| 115 | } else { |
| 116 | if (SEQ_GEQ(seq, hint->sblk_start)) { |
| 117 | /* Search from hint to end of list. */ |
| 118 | cur = hint; |
| 119 | last = NULL; |
| 120 | prev = TAILQ_LAST(&scb->sackblocks, sackblock_list); |
| 121 | } else { |
| 122 | /* Search from front of list to hint. */ |
| 123 | cur = TAILQ_FIRST(&scb->sackblocks); |
| 124 | last = hint; |
| 125 | prev = TAILQ_PREV(hint, sackblock_list, sblk_list); |
| 126 | } |
| 127 | } |
| 128 | |
| 129 | do { |
| 130 | if (SEQ_GT(cur->sblk_end, seq)) { |
| 131 | if (SEQ_GEQ(seq, cur->sblk_start)) { |
| 132 | *sb = scb->lastfound = cur; |
| 133 | return TRUE; |
| 134 | } else { |
| 135 | *sb = scb->lastfound = |
| 136 | TAILQ_PREV(cur, sackblock_list, sblk_list); |
| 137 | return FALSE; |
| 138 | } |
| 139 | } |
| 140 | cur = TAILQ_NEXT(cur, sblk_list); |
| 141 | } while (cur != last); |
| 142 | |
| 143 | *sb = scb->lastfound = prev; |
| 144 | return FALSE; |
| 145 | } |
| 146 | |
| 147 | /* |
| 148 | * Allocate a SACK block. |
| 149 | */ |
| 150 | static __inline struct sackblock * |
| 151 | alloc_sackblock(struct scoreboard *scb, const struct raw_sackblock *raw_sb) |
| 152 | { |
| 153 | struct sackblock *sb; |
| 154 | |
| 155 | if (scb->freecache != NULL) { |
| 156 | sb = scb->freecache; |
| 157 | scb->freecache = NULL; |
| 158 | tcpstat.tcps_sacksbfast++; |
| 159 | } else { |
| 160 | sb = kmalloc(sizeof(struct sackblock), M_SACKBLOCK, M_NOWAIT); |
| 161 | if (sb == NULL) { |
| 162 | tcpstat.tcps_sacksbfailed++; |
| 163 | return NULL; |
| 164 | } |
| 165 | } |
| 166 | sb->sblk_start = raw_sb->rblk_start; |
| 167 | sb->sblk_end = raw_sb->rblk_end; |
| 168 | return sb; |
| 169 | } |
| 170 | |
| 171 | static __inline struct sackblock * |
| 172 | alloc_sackblock_limit(struct scoreboard *scb, |
| 173 | const struct raw_sackblock *raw_sb) |
| 174 | { |
| 175 | if (scb->nblocks == MAXSAVEDBLOCKS) { |
| 176 | /* |
| 177 | * Should try to kick out older blocks XXX JH |
| 178 | * May be able to coalesce with existing block. |
| 179 | * Or, go other way and free all blocks if we hit |
| 180 | * this limit. |
| 181 | */ |
| 182 | tcpstat.tcps_sacksboverflow++; |
| 183 | return NULL; |
| 184 | } |
| 185 | return alloc_sackblock(scb, raw_sb); |
| 186 | } |
| 187 | |
| 188 | /* |
| 189 | * Free a SACK block. |
| 190 | */ |
| 191 | static __inline void |
| 192 | free_sackblock(struct scoreboard *scb, struct sackblock *s) |
| 193 | { |
| 194 | if (scb->freecache == NULL) { |
| 195 | /* YYY Maybe use the latest freed block? */ |
| 196 | scb->freecache = s; |
| 197 | return; |
| 198 | } |
| 199 | kfree(s, M_SACKBLOCK); |
| 200 | } |
| 201 | |
| 202 | /* |
| 203 | * Free up SACK blocks for data that's been acked. |
| 204 | */ |
| 205 | static void |
| 206 | tcp_sack_ack_blocks(struct scoreboard *scb, tcp_seq th_ack) |
| 207 | { |
| 208 | struct sackblock *sb, *nb; |
| 209 | |
| 210 | sb = TAILQ_FIRST(&scb->sackblocks); |
| 211 | while (sb && SEQ_LEQ(sb->sblk_end, th_ack)) { |
| 212 | nb = TAILQ_NEXT(sb, sblk_list); |
| 213 | if (scb->lastfound == sb) |
| 214 | scb->lastfound = NULL; |
| 215 | TAILQ_REMOVE(&scb->sackblocks, sb, sblk_list); |
| 216 | free_sackblock(scb, sb); |
| 217 | --scb->nblocks; |
| 218 | KASSERT(scb->nblocks >= 0, |
| 219 | ("SACK block count underflow: %d < 0", scb->nblocks)); |
| 220 | sb = nb; |
| 221 | } |
| 222 | if (sb && SEQ_GT(th_ack, sb->sblk_start)) |
| 223 | sb->sblk_start = th_ack; /* other side reneged? XXX */ |
| 224 | } |
| 225 | |
| 226 | /* |
| 227 | * Delete and free SACK blocks saved in scoreboard. |
| 228 | */ |
| 229 | void |
| 230 | tcp_sack_cleanup(struct scoreboard *scb) |
| 231 | { |
| 232 | struct sackblock *sb, *nb; |
| 233 | |
| 234 | TAILQ_FOREACH_MUTABLE(sb, &scb->sackblocks, sblk_list, nb) { |
| 235 | free_sackblock(scb, sb); |
| 236 | --scb->nblocks; |
| 237 | } |
| 238 | KASSERT(scb->nblocks == 0, |
| 239 | ("SACK block %d count not zero", scb->nblocks)); |
| 240 | TAILQ_INIT(&scb->sackblocks); |
| 241 | scb->lastfound = NULL; |
| 242 | } |
| 243 | |
| 244 | /* |
| 245 | * Delete and free SACK blocks saved in scoreboard. |
| 246 | * Delete the one slot block cache. |
| 247 | */ |
| 248 | void |
| 249 | tcp_sack_destroy(struct scoreboard *scb) |
| 250 | { |
| 251 | tcp_sack_cleanup(scb); |
| 252 | if (scb->freecache != NULL) { |
| 253 | kfree(scb->freecache, M_SACKBLOCK); |
| 254 | scb->freecache = NULL; |
| 255 | } |
| 256 | } |
| 257 | |
| 258 | /* |
| 259 | * Cleanup the reported SACK block information |
| 260 | */ |
| 261 | void |
| 262 | tcp_sack_report_cleanup(struct tcpcb *tp) |
| 263 | { |
| 264 | tp->t_flags &= ~(TF_DUPSEG | TF_ENCLOSESEG | TF_SACKLEFT); |
| 265 | tp->reportblk.rblk_start = tp->reportblk.rblk_end; |
| 266 | } |
| 267 | |
| 268 | /* |
| 269 | * Returns 0 if not D-SACK block, |
| 270 | * 1 if D-SACK, |
| 271 | * 2 if duplicate of out-of-order D-SACK block. |
| 272 | */ |
| 273 | int |
| 274 | tcp_sack_ndsack_blocks(struct raw_sackblock *blocks, const int numblocks, |
| 275 | tcp_seq snd_una) |
| 276 | { |
| 277 | if (numblocks == 0) |
| 278 | return 0; |
| 279 | |
| 280 | if (SEQ_LT(blocks[0].rblk_start, snd_una)) |
| 281 | return 1; |
| 282 | |
| 283 | /* block 0 inside block 1 */ |
| 284 | if (numblocks > 1 && |
| 285 | SEQ_GEQ(blocks[0].rblk_start, blocks[1].rblk_start) && |
| 286 | SEQ_LEQ(blocks[0].rblk_end, blocks[1].rblk_end)) |
| 287 | return 2; |
| 288 | |
| 289 | return 0; |
| 290 | } |
| 291 | |
| 292 | /* |
| 293 | * Update scoreboard on new incoming ACK. |
| 294 | */ |
| 295 | static void |
| 296 | tcp_sack_add_blocks(struct tcpcb *tp, struct tcpopt *to) |
| 297 | { |
| 298 | const int numblocks = to->to_nsackblocks; |
| 299 | struct raw_sackblock *blocks = to->to_sackblocks; |
| 300 | struct scoreboard *scb = &tp->scb; |
| 301 | int startblock; |
| 302 | int i; |
| 303 | |
| 304 | if (tcp_sack_ndsack_blocks(blocks, numblocks, tp->snd_una) > 0) |
| 305 | startblock = 1; |
| 306 | else |
| 307 | startblock = 0; |
| 308 | |
| 309 | for (i = startblock; i < numblocks; i++) { |
| 310 | struct raw_sackblock *newsackblock = &blocks[i]; |
| 311 | |
| 312 | /* don't accept bad SACK blocks */ |
| 313 | if (SEQ_GT(newsackblock->rblk_end, tp->snd_max)) { |
| 314 | tcpstat.tcps_rcvbadsackopt++; |
| 315 | break; /* skip all other blocks */ |
| 316 | } |
| 317 | tcpstat.tcps_sacksbupdate++; |
| 318 | |
| 319 | if (insert_block(scb, newsackblock)) |
| 320 | break; |
| 321 | } |
| 322 | } |
| 323 | |
| 324 | void |
| 325 | tcp_sack_update_scoreboard(struct tcpcb *tp, struct tcpopt *to) |
| 326 | { |
| 327 | struct scoreboard *scb = &tp->scb; |
| 328 | |
| 329 | tcp_sack_ack_blocks(scb, tp->snd_una); |
| 330 | tcp_sack_add_blocks(tp, to); |
| 331 | update_lostseq(scb, tp->snd_una, tp->t_maxseg); |
| 332 | if (SEQ_LT(tp->rexmt_high, tp->snd_una)) |
| 333 | tp->rexmt_high = tp->snd_una; |
| 334 | if ((tp->t_flags & TF_SACKRESCUED) && |
| 335 | SEQ_LT(tp->rexmt_rescue, tp->snd_una)) |
| 336 | tp->t_flags &= ~TF_SACKRESCUED; |
| 337 | } |
| 338 | |
| 339 | /* |
| 340 | * Insert SACK block into sender's scoreboard. |
| 341 | */ |
| 342 | static int |
| 343 | insert_block(struct scoreboard *scb, const struct raw_sackblock *raw_sb) |
| 344 | { |
| 345 | struct sackblock *sb, *workingblock; |
| 346 | boolean_t overlap_front; |
| 347 | |
| 348 | if (TAILQ_EMPTY(&scb->sackblocks)) { |
| 349 | struct sackblock *newblock; |
| 350 | |
| 351 | KASSERT(scb->nblocks == 0, ("emply scb w/ blocks")); |
| 352 | |
| 353 | newblock = alloc_sackblock(scb, raw_sb); |
| 354 | if (newblock == NULL) |
| 355 | return ENOMEM; |
| 356 | TAILQ_INSERT_HEAD(&scb->sackblocks, newblock, sblk_list); |
| 357 | scb->nblocks = 1; |
| 358 | return 0; |
| 359 | } |
| 360 | |
| 361 | KASSERT(scb->nblocks > 0, ("insert_block() called w/ no blocks")); |
| 362 | KASSERT(scb->nblocks <= MAXSAVEDBLOCKS, |
| 363 | ("too many SACK blocks %d", scb->nblocks)); |
| 364 | |
| 365 | overlap_front = sack_block_lookup(scb, raw_sb->rblk_start, &sb); |
| 366 | |
| 367 | if (sb == NULL) { |
| 368 | workingblock = alloc_sackblock_limit(scb, raw_sb); |
| 369 | if (workingblock == NULL) |
| 370 | return ENOMEM; |
| 371 | TAILQ_INSERT_HEAD(&scb->sackblocks, workingblock, sblk_list); |
| 372 | ++scb->nblocks; |
| 373 | } else { |
| 374 | if (overlap_front || sb->sblk_end == raw_sb->rblk_start) { |
| 375 | /* Extend old block */ |
| 376 | workingblock = sb; |
| 377 | if (SEQ_GT(raw_sb->rblk_end, sb->sblk_end)) |
| 378 | sb->sblk_end = raw_sb->rblk_end; |
| 379 | tcpstat.tcps_sacksbreused++; |
| 380 | } else { |
| 381 | workingblock = alloc_sackblock_limit(scb, raw_sb); |
| 382 | if (workingblock == NULL) |
| 383 | return ENOMEM; |
| 384 | TAILQ_INSERT_AFTER(&scb->sackblocks, sb, workingblock, |
| 385 | sblk_list); |
| 386 | ++scb->nblocks; |
| 387 | } |
| 388 | } |
| 389 | |
| 390 | /* Consolidate right-hand side. */ |
| 391 | sb = TAILQ_NEXT(workingblock, sblk_list); |
| 392 | while (sb != NULL && |
| 393 | SEQ_GEQ(workingblock->sblk_end, sb->sblk_end)) { |
| 394 | struct sackblock *nextblock; |
| 395 | |
| 396 | nextblock = TAILQ_NEXT(sb, sblk_list); |
| 397 | if (scb->lastfound == sb) |
| 398 | scb->lastfound = NULL; |
| 399 | /* Remove completely overlapped block */ |
| 400 | TAILQ_REMOVE(&scb->sackblocks, sb, sblk_list); |
| 401 | free_sackblock(scb, sb); |
| 402 | --scb->nblocks; |
| 403 | KASSERT(scb->nblocks > 0, |
| 404 | ("removed overlapped block: %d blocks left", scb->nblocks)); |
| 405 | sb = nextblock; |
| 406 | } |
| 407 | if (sb != NULL && |
| 408 | SEQ_GEQ(workingblock->sblk_end, sb->sblk_start)) { |
| 409 | /* Extend new block to cover partially overlapped old block. */ |
| 410 | workingblock->sblk_end = sb->sblk_end; |
| 411 | if (scb->lastfound == sb) |
| 412 | scb->lastfound = NULL; |
| 413 | TAILQ_REMOVE(&scb->sackblocks, sb, sblk_list); |
| 414 | free_sackblock(scb, sb); |
| 415 | --scb->nblocks; |
| 416 | KASSERT(scb->nblocks > 0, |
| 417 | ("removed partial right: %d blocks left", scb->nblocks)); |
| 418 | } |
| 419 | return 0; |
| 420 | } |
| 421 | |
| 422 | #ifdef DEBUG_SACK_BLOCKS |
| 423 | static void |
| 424 | tcp_sack_dump_blocks(struct scoreboard *scb) |
| 425 | { |
| 426 | struct sackblock *sb; |
| 427 | |
| 428 | kprintf("%d blocks:", scb->nblocks); |
| 429 | TAILQ_FOREACH(sb, &scb->sackblocks, sblk_list) |
| 430 | kprintf(" [%u, %u)", sb->sblk_start, sb->sblk_end); |
| 431 | kprintf("\n"); |
| 432 | } |
| 433 | #else |
| 434 | static __inline void |
| 435 | tcp_sack_dump_blocks(struct scoreboard *scb) |
| 436 | { |
| 437 | } |
| 438 | #endif |
| 439 | |
| 440 | /* |
| 441 | * Optimization to quickly determine which packets are lost. |
| 442 | */ |
| 443 | static void |
| 444 | update_lostseq(struct scoreboard *scb, tcp_seq snd_una, u_int maxseg) |
| 445 | { |
| 446 | struct sackblock *sb; |
| 447 | int nsackblocks = 0; |
| 448 | int bytes_sacked = 0; |
| 449 | |
| 450 | sb = TAILQ_LAST(&scb->sackblocks, sackblock_list); |
| 451 | while (sb != NULL) { |
| 452 | ++nsackblocks; |
| 453 | bytes_sacked += sb->sblk_end - sb->sblk_start; |
| 454 | if (nsackblocks == tcprexmtthresh || |
| 455 | bytes_sacked >= tcprexmtthresh * maxseg) { |
| 456 | scb->lostseq = sb->sblk_start; |
| 457 | return; |
| 458 | } |
| 459 | sb = TAILQ_PREV(sb, sackblock_list, sblk_list); |
| 460 | } |
| 461 | scb->lostseq = snd_una; |
| 462 | } |
| 463 | |
| 464 | /* |
| 465 | * Return whether the given sequence number is considered lost. |
| 466 | */ |
| 467 | static boolean_t |
| 468 | scb_islost(struct scoreboard *scb, tcp_seq seqnum) |
| 469 | { |
| 470 | return SEQ_LT(seqnum, scb->lostseq); |
| 471 | } |
| 472 | |
| 473 | /* |
| 474 | * True if at least "amount" has been SACKed. Used by Early Retransmit. |
| 475 | */ |
| 476 | boolean_t |
| 477 | tcp_sack_has_sacked(struct scoreboard *scb, u_int amount) |
| 478 | { |
| 479 | struct sackblock *sb; |
| 480 | int bytes_sacked = 0; |
| 481 | |
| 482 | TAILQ_FOREACH(sb, &scb->sackblocks, sblk_list) { |
| 483 | bytes_sacked += sb->sblk_end - sb->sblk_start; |
| 484 | if (bytes_sacked >= amount) |
| 485 | return TRUE; |
| 486 | } |
| 487 | return FALSE; |
| 488 | } |
| 489 | |
| 490 | /* |
| 491 | * Number of bytes SACKed below seq. |
| 492 | */ |
| 493 | int |
| 494 | tcp_sack_bytes_below(struct scoreboard *scb, tcp_seq seq) |
| 495 | { |
| 496 | struct sackblock *sb; |
| 497 | int bytes_sacked = 0; |
| 498 | |
| 499 | sb = TAILQ_FIRST(&scb->sackblocks); |
| 500 | while (sb && SEQ_GT(seq, sb->sblk_start)) { |
| 501 | bytes_sacked += seq_min(seq, sb->sblk_end) - sb->sblk_start; |
| 502 | sb = TAILQ_NEXT(sb, sblk_list); |
| 503 | } |
| 504 | return bytes_sacked; |
| 505 | } |
| 506 | |
| 507 | /* |
| 508 | * Return estimate of the number of bytes outstanding in the network. |
| 509 | */ |
| 510 | uint32_t |
| 511 | tcp_sack_compute_pipe(struct tcpcb *tp) |
| 512 | { |
| 513 | struct scoreboard *scb = &tp->scb; |
| 514 | struct sackblock *sb; |
| 515 | int nlost, nretransmitted; |
| 516 | tcp_seq end; |
| 517 | |
| 518 | nlost = tp->snd_max - scb->lostseq; |
| 519 | nretransmitted = tp->rexmt_high - tp->snd_una; |
| 520 | |
| 521 | TAILQ_FOREACH(sb, &scb->sackblocks, sblk_list) { |
| 522 | if (SEQ_LT(sb->sblk_start, tp->rexmt_high)) { |
| 523 | end = seq_min(sb->sblk_end, tp->rexmt_high); |
| 524 | nretransmitted -= end - sb->sblk_start; |
| 525 | } |
| 526 | if (SEQ_GEQ(sb->sblk_start, scb->lostseq)) |
| 527 | nlost -= sb->sblk_end - sb->sblk_start; |
| 528 | } |
| 529 | |
| 530 | return (nlost + nretransmitted); |
| 531 | } |
| 532 | |
| 533 | /* |
| 534 | * Return the sequence number and length of the next segment to transmit |
| 535 | * when in Fast Recovery. |
| 536 | */ |
| 537 | boolean_t |
| 538 | tcp_sack_nextseg(struct tcpcb *tp, tcp_seq *nextrexmt, uint32_t *plen, |
| 539 | boolean_t *rescue) |
| 540 | { |
| 541 | struct scoreboard *scb = &tp->scb; |
| 542 | struct socket *so = tp->t_inpcb->inp_socket; |
| 543 | struct sackblock *sb; |
| 544 | const struct sackblock *lastblock = |
| 545 | TAILQ_LAST(&scb->sackblocks, sackblock_list); |
| 546 | tcp_seq torexmt; |
| 547 | long len, off; |
| 548 | |
| 549 | /* skip SACKed data */ |
| 550 | tcp_sack_skip_sacked(scb, &tp->rexmt_high); |
| 551 | |
| 552 | /* Look for lost data. */ |
| 553 | torexmt = tp->rexmt_high; |
| 554 | *rescue = FALSE; |
| 555 | if (lastblock != NULL) { |
| 556 | if (SEQ_LT(torexmt, lastblock->sblk_end) && |
| 557 | scb_islost(scb, torexmt)) { |
| 558 | sendunsacked: |
| 559 | *nextrexmt = torexmt; |
| 560 | /* If the left-hand edge has been SACKed, pull it in. */ |
| 561 | if (sack_block_lookup(scb, torexmt + tp->t_maxseg, &sb)) |
| 562 | *plen = sb->sblk_start - torexmt; |
| 563 | else |
| 564 | *plen = tp->t_maxseg; |
| 565 | return TRUE; |
| 566 | } |
| 567 | } |
| 568 | |
| 569 | /* See if unsent data available within send window. */ |
| 570 | off = tp->snd_max - tp->snd_una; |
| 571 | len = (long) ulmin(so->so_snd.ssb_cc, tp->snd_wnd) - off; |
| 572 | if (len > 0) { |
| 573 | *nextrexmt = tp->snd_max; /* Send new data. */ |
| 574 | *plen = tp->t_maxseg; |
| 575 | return TRUE; |
| 576 | } |
| 577 | |
| 578 | /* We're less certain this data has been lost. */ |
| 579 | if (lastblock != NULL && SEQ_LT(torexmt, lastblock->sblk_end)) |
| 580 | goto sendunsacked; |
| 581 | |
| 582 | if (tcp_do_rescuesack) { |
| 583 | tcpstat.tcps_sackrescue_try++; |
| 584 | if (lastblock == NULL) |
| 585 | tcpstat.tcps_sackrescue_smart++; |
| 586 | |
| 587 | if (tp->t_flags & TF_SACKRESCUED) |
| 588 | return FALSE; |
| 589 | *rescue = TRUE; |
| 590 | tcpstat.tcps_sackrescue++; |
| 591 | goto sendunsacked; |
| 592 | } else if (tcp_do_smartsack && lastblock == NULL) { |
| 593 | goto sendunsacked; |
| 594 | } |
| 595 | |
| 596 | return FALSE; |
| 597 | } |
| 598 | |
| 599 | /* |
| 600 | * Return the next sequence number higher than "*prexmt" that has |
| 601 | * not been SACKed. |
| 602 | */ |
| 603 | void |
| 604 | tcp_sack_skip_sacked(struct scoreboard *scb, tcp_seq *prexmt) |
| 605 | { |
| 606 | struct sackblock *sb; |
| 607 | |
| 608 | /* skip SACKed data */ |
| 609 | if (sack_block_lookup(scb, *prexmt, &sb)) |
| 610 | *prexmt = sb->sblk_end; |
| 611 | } |
| 612 | |
| 613 | #ifdef later |
| 614 | void |
| 615 | tcp_sack_save_scoreboard(struct scoreboard *scb) |
| 616 | { |
| 617 | struct scoreboard *scb = &tp->scb; |
| 618 | |
| 619 | scb->sackblocks_prev = scb->sackblocks; |
| 620 | TAILQ_INIT(&scb->sackblocks); |
| 621 | } |
| 622 | |
| 623 | void |
| 624 | tcp_sack_revert_scoreboard(struct scoreboard *scb, tcp_seq snd_una, |
| 625 | u_int maxseg) |
| 626 | { |
| 627 | struct sackblock *sb; |
| 628 | |
| 629 | scb->sackblocks = scb->sackblocks_prev; |
| 630 | scb->nblocks = 0; |
| 631 | TAILQ_FOREACH(sb, &scb->sackblocks, sblk_list) |
| 632 | ++scb->nblocks; |
| 633 | tcp_sack_ack_blocks(scb, snd_una); |
| 634 | scb->lastfound = NULL; |
| 635 | } |
| 636 | #endif |
| 637 | |
| 638 | #ifdef DEBUG_SACK_HISTORY |
| 639 | static void |
| 640 | tcp_sack_dump_history(char *msg, struct tcpcb *tp) |
| 641 | { |
| 642 | int i; |
| 643 | static int ndumped; |
| 644 | |
| 645 | /* only need a couple of these to debug most problems */ |
| 646 | if (++ndumped > 900) |
| 647 | return; |
| 648 | |
| 649 | kprintf("%s:\tnsackhistory %d: ", msg, tp->nsackhistory); |
| 650 | for (i = 0; i < tp->nsackhistory; ++i) |
| 651 | kprintf("[%u, %u) ", tp->sackhistory[i].rblk_start, |
| 652 | tp->sackhistory[i].rblk_end); |
| 653 | kprintf("\n"); |
| 654 | } |
| 655 | #else |
| 656 | static __inline void |
| 657 | tcp_sack_dump_history(char *msg, struct tcpcb *tp) |
| 658 | { |
| 659 | } |
| 660 | #endif |
| 661 | |
| 662 | /* |
| 663 | * Remove old SACK blocks from the SACK history that have already been ACKed. |
| 664 | */ |
| 665 | static void |
| 666 | tcp_sack_ack_history(struct tcpcb *tp) |
| 667 | { |
| 668 | int i, nblocks, openslot; |
| 669 | |
| 670 | tcp_sack_dump_history("before tcp_sack_ack_history", tp); |
| 671 | nblocks = tp->nsackhistory; |
| 672 | for (i = openslot = 0; i < nblocks; ++i) { |
| 673 | if (SEQ_LEQ(tp->sackhistory[i].rblk_end, tp->rcv_nxt)) { |
| 674 | --tp->nsackhistory; |
| 675 | continue; |
| 676 | } |
| 677 | if (SEQ_LT(tp->sackhistory[i].rblk_start, tp->rcv_nxt)) |
| 678 | tp->sackhistory[i].rblk_start = tp->rcv_nxt; |
| 679 | if (i == openslot) |
| 680 | ++openslot; |
| 681 | else |
| 682 | tp->sackhistory[openslot++] = tp->sackhistory[i]; |
| 683 | } |
| 684 | tcp_sack_dump_history("after tcp_sack_ack_history", tp); |
| 685 | KASSERT(openslot == tp->nsackhistory, |
| 686 | ("tcp_sack_ack_history miscounted: %d != %d", |
| 687 | openslot, tp->nsackhistory)); |
| 688 | } |
| 689 | |
| 690 | /* |
| 691 | * Add or merge newblock into reported history. |
| 692 | * Also remove or update SACK blocks that will be acked. |
| 693 | */ |
| 694 | static void |
| 695 | tcp_sack_update_reported_history(struct tcpcb *tp, tcp_seq start, tcp_seq end) |
| 696 | { |
| 697 | struct raw_sackblock copy[MAX_SACK_REPORT_BLOCKS]; |
| 698 | int i, cindex; |
| 699 | |
| 700 | tcp_sack_dump_history("before tcp_sack_update_reported_history", tp); |
| 701 | /* |
| 702 | * Six cases: |
| 703 | * 0) no overlap |
| 704 | * 1) newblock == oldblock |
| 705 | * 2) oldblock contains newblock |
| 706 | * 3) newblock contains oldblock |
| 707 | * 4) tail of oldblock overlaps or abuts start of newblock |
| 708 | * 5) tail of newblock overlaps or abuts head of oldblock |
| 709 | */ |
| 710 | for (i = cindex = 0; i < tp->nsackhistory; ++i) { |
| 711 | struct raw_sackblock *oldblock = &tp->sackhistory[i]; |
| 712 | tcp_seq old_start = oldblock->rblk_start; |
| 713 | tcp_seq old_end = oldblock->rblk_end; |
| 714 | |
| 715 | if (SEQ_LT(end, old_start) || SEQ_GT(start, old_end)) { |
| 716 | /* Case 0: no overlap. Copy old block. */ |
| 717 | copy[cindex++] = *oldblock; |
| 718 | continue; |
| 719 | } |
| 720 | |
| 721 | if (SEQ_GEQ(start, old_start) && SEQ_LEQ(end, old_end)) { |
| 722 | /* Cases 1 & 2. Move block to front of history. */ |
| 723 | int j; |
| 724 | |
| 725 | start = old_start; |
| 726 | end = old_end; |
| 727 | /* no need to check rest of blocks */ |
| 728 | for (j = i + 1; j < tp->nsackhistory; ++j) |
| 729 | copy[cindex++] = tp->sackhistory[j]; |
| 730 | break; |
| 731 | } |
| 732 | |
| 733 | if (SEQ_GEQ(old_end, start) && SEQ_LT(old_start, start)) { |
| 734 | /* Case 4: extend start of new block. */ |
| 735 | start = old_start; |
| 736 | } else if (SEQ_GEQ(end, old_start) && SEQ_GT(old_end, end)) { |
| 737 | /* Case 5: extend end of new block */ |
| 738 | end = old_end; |
| 739 | } else { |
| 740 | /* Case 3. Delete old block by not copying it. */ |
| 741 | KASSERT(SEQ_LEQ(start, old_start) && |
| 742 | SEQ_GEQ(end, old_end), |
| 743 | ("bad logic: old [%u, %u), new [%u, %u)", |
| 744 | old_start, old_end, start, end)); |
| 745 | } |
| 746 | } |
| 747 | |
| 748 | /* insert new block */ |
| 749 | tp->sackhistory[0].rblk_start = start; |
| 750 | tp->sackhistory[0].rblk_end = end; |
| 751 | cindex = min(cindex, MAX_SACK_REPORT_BLOCKS - 1); |
| 752 | for (i = 0; i < cindex; ++i) |
| 753 | tp->sackhistory[i + 1] = copy[i]; |
| 754 | tp->nsackhistory = cindex + 1; |
| 755 | tcp_sack_dump_history("after tcp_sack_update_reported_history", tp); |
| 756 | } |
| 757 | |
| 758 | /* |
| 759 | * Fill in SACK report to return to data sender. |
| 760 | */ |
| 761 | void |
| 762 | tcp_sack_fill_report(struct tcpcb *tp, u_char *opt, u_int *plen) |
| 763 | { |
| 764 | u_int optlen = *plen; |
| 765 | uint32_t *lp = (uint32_t *)(opt + optlen); |
| 766 | uint32_t *olp; |
| 767 | tcp_seq hstart = tp->rcv_nxt, hend; |
| 768 | int nblocks; |
| 769 | |
| 770 | KASSERT(TCP_MAXOLEN - optlen >= |
| 771 | TCPOLEN_SACK_ALIGNED + TCPOLEN_SACK_BLOCK, |
| 772 | ("no room for SACK header and one block: optlen %d", optlen)); |
| 773 | |
| 774 | if (tp->t_flags & TF_DUPSEG) |
| 775 | tcpstat.tcps_snddsackopt++; |
| 776 | else |
| 777 | tcpstat.tcps_sndsackopt++; |
| 778 | |
| 779 | olp = lp++; |
| 780 | optlen += TCPOLEN_SACK_ALIGNED; |
| 781 | |
| 782 | tcp_sack_ack_history(tp); |
| 783 | if (tp->reportblk.rblk_start != tp->reportblk.rblk_end) { |
| 784 | *lp++ = htonl(tp->reportblk.rblk_start); |
| 785 | *lp++ = htonl(tp->reportblk.rblk_end); |
| 786 | optlen += TCPOLEN_SACK_BLOCK; |
| 787 | hstart = tp->reportblk.rblk_start; |
| 788 | hend = tp->reportblk.rblk_end; |
| 789 | if (tp->t_flags & TF_ENCLOSESEG) { |
| 790 | KASSERT(TCP_MAXOLEN - optlen >= TCPOLEN_SACK_BLOCK, |
| 791 | ("no room for enclosing SACK block: oplen %d", |
| 792 | optlen)); |
| 793 | *lp++ = htonl(tp->encloseblk.rblk_start); |
| 794 | *lp++ = htonl(tp->encloseblk.rblk_end); |
| 795 | optlen += TCPOLEN_SACK_BLOCK; |
| 796 | hstart = tp->encloseblk.rblk_start; |
| 797 | hend = tp->encloseblk.rblk_end; |
| 798 | } |
| 799 | if (SEQ_GT(hstart, tp->rcv_nxt)) |
| 800 | tcp_sack_update_reported_history(tp, hstart, hend); |
| 801 | } |
| 802 | if (tcp_do_smartsack && (tp->t_flags & TF_SACKLEFT)) { |
| 803 | /* Fill in from left! Walk re-assembly queue. */ |
| 804 | struct tseg_qent *q; |
| 805 | |
| 806 | q = LIST_FIRST(&tp->t_segq); |
| 807 | while (q != NULL && |
| 808 | TCP_MAXOLEN - optlen >= TCPOLEN_SACK_BLOCK) { |
| 809 | *lp++ = htonl(q->tqe_th->th_seq); |
| 810 | *lp++ = htonl(TCP_SACK_BLKEND( |
| 811 | q->tqe_th->th_seq + q->tqe_len, |
| 812 | q->tqe_th->th_flags)); |
| 813 | optlen += TCPOLEN_SACK_BLOCK; |
| 814 | q = LIST_NEXT(q, tqe_q); |
| 815 | } |
| 816 | } else { |
| 817 | int n = 0; |
| 818 | |
| 819 | /* Fill in SACK blocks from right side. */ |
| 820 | while (n < tp->nsackhistory && |
| 821 | TCP_MAXOLEN - optlen >= TCPOLEN_SACK_BLOCK) { |
| 822 | if (tp->sackhistory[n].rblk_start != hstart) { |
| 823 | *lp++ = htonl(tp->sackhistory[n].rblk_start); |
| 824 | *lp++ = htonl(tp->sackhistory[n].rblk_end); |
| 825 | optlen += TCPOLEN_SACK_BLOCK; |
| 826 | } |
| 827 | ++n; |
| 828 | } |
| 829 | } |
| 830 | tp->reportblk.rblk_start = tp->reportblk.rblk_end; |
| 831 | tp->t_flags &= ~(TF_DUPSEG | TF_ENCLOSESEG | TF_SACKLEFT); |
| 832 | nblocks = (lp - olp - 1) / 2; |
| 833 | *olp = htonl(TCPOPT_SACK_ALIGNED | |
| 834 | (TCPOLEN_SACK + nblocks * TCPOLEN_SACK_BLOCK)); |
| 835 | *plen = optlen; |
| 836 | } |