1 /* $KAME: sctp_output.c,v 1.46 2005/03/06 16:04:17 itojun Exp $ */
2 /* $DragonFly: src/sys/netinet/sctp_output.c,v 1.2 2005/07/15 15:02:02 eirikn Exp $ */
5 * Copyright (C) 2002, 2003, 2004 Cisco Systems Inc,
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the project nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #if !(defined(__OpenBSD__) || defined (__APPLE__))
34 #include "opt_ipsec.h"
36 #if defined(__FreeBSD__) || defined(__DragonFly__)
37 #include "opt_compat.h"
38 #include "opt_inet6.h"
41 #if defined(__NetBSD__)
46 #elif !defined(__OpenBSD__)
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/malloc.h>
54 #include <sys/domain.h>
56 #include <sys/protosw.h>
57 #include <sys/socket.h>
58 #include <sys/socketvar.h>
60 #include <sys/kernel.h>
61 #include <sys/sysctl.h>
62 #include <sys/resourcevar.h>
65 #include <sys/domain.h>
68 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000)
69 #include <sys/limits.h>
71 #include <machine/limits.h>
73 #include <machine/cpu.h>
76 #include <net/if_types.h>
78 #if defined(__FreeBSD__) || defined(__DragonFly__)
79 #include <net/if_var.h>
82 #include <net/route.h>
84 #include <netinet/in.h>
85 #include <netinet/in_systm.h>
86 #include <netinet/ip.h>
87 #include <netinet/in_pcb.h>
88 #include <netinet/in_var.h>
89 #include <netinet/ip_var.h>
92 #include <netinet/ip6.h>
93 #include <netinet6/ip6_var.h>
94 #include <netinet6/scope6_var.h>
95 #include <netinet6/nd6.h>
97 #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__DragonFly__)
98 #include <netinet6/in6_pcb.h>
99 #elif defined(__OpenBSD__)
100 #include <netinet/in_pcb.h>
103 #include <netinet/icmp6.h>
107 #include <net/net_osdep.h>
109 #if defined(HAVE_NRL_INPCB) || defined(__FreeBSD__) || defined(__DragonFly__)
115 #include <netinet/sctp_pcb.h>
119 #include <netinet6/ipsec.h>
120 #include <netkey/key.h>
126 #include <netinet/sctp_var.h>
127 #include <netinet/sctp_header.h>
128 #include <netinet/sctputil.h>
129 #include <netinet/sctp_pcb.h>
130 #include <netinet/sctp_output.h>
131 #include <netinet/sctp_uio.h>
132 #include <netinet/sctputil.h>
133 #include <netinet/sctp_hashdriver.h>
134 #include <netinet/sctp_timer.h>
135 #include <netinet/sctp_asconf.h>
136 #include <netinet/sctp_indata.h>
139 extern uint32_t sctp_debug_on;
142 extern int sctp_peer_chunk_oh;
145 sctp_find_cmsg(int c_type, void *data, struct mbuf *control, int cpsize)
150 tlen = control->m_len;
153 * Independent of how many mbufs, find the c_type inside the control
154 * structure and copy out the data.
157 if ((tlen-at) < (int)CMSG_ALIGN(sizeof(cmh))) {
158 /* not enough room for one more we are done. */
161 m_copydata(control, at, sizeof(cmh), (caddr_t)&cmh);
162 if ((cmh.cmsg_len + at) > tlen) {
164 * this is real messed up since there is not enough
165 * data here to cover the cmsg header. We are done.
169 if ((cmh.cmsg_level == IPPROTO_SCTP) &&
170 (c_type == cmh.cmsg_type)) {
171 /* found the one we want, copy it out */
172 at += CMSG_ALIGN(sizeof(struct cmsghdr));
173 if ((int)(cmh.cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr))) < cpsize) {
175 * space of cmsg_len after header not
180 m_copydata(control, at, cpsize, data);
183 at += CMSG_ALIGN(cmh.cmsg_len);
184 if (cmh.cmsg_len == 0) {
194 sctp_add_addr_to_mbuf(struct mbuf *m, struct ifaddr *ifa)
196 struct sctp_paramhdr *parmh;
199 if (ifa->ifa_addr->sa_family == AF_INET) {
200 len = sizeof(struct sctp_ipv4addr_param);
201 } else if (ifa->ifa_addr->sa_family == AF_INET6) {
202 len = sizeof(struct sctp_ipv6addr_param);
208 if (M_TRAILINGSPACE(m) >= len) {
209 /* easy side we just drop it on the end */
210 parmh = (struct sctp_paramhdr *)(m->m_data + m->m_len);
213 /* Need more space */
215 while (mret->m_next != NULL) {
218 MGET(mret->m_next, M_DONTWAIT, MT_DATA);
219 if (mret->m_next == NULL) {
220 /* We are hosed, can't add more addresses */
224 parmh = mtod(mret, struct sctp_paramhdr *);
226 /* now add the parameter */
227 if (ifa->ifa_addr->sa_family == AF_INET) {
228 struct sctp_ipv4addr_param *ipv4p;
229 struct sockaddr_in *sin;
230 sin = (struct sockaddr_in *)ifa->ifa_addr;
231 ipv4p = (struct sctp_ipv4addr_param *)parmh;
232 parmh->param_type = htons(SCTP_IPV4_ADDRESS);
233 parmh->param_length = htons(len);
234 ipv4p->addr = sin->sin_addr.s_addr;
236 } else if (ifa->ifa_addr->sa_family == AF_INET6) {
237 struct sctp_ipv6addr_param *ipv6p;
238 struct sockaddr_in6 *sin6;
239 sin6 = (struct sockaddr_in6 *)ifa->ifa_addr;
240 ipv6p = (struct sctp_ipv6addr_param *)parmh;
241 parmh->param_type = htons(SCTP_IPV6_ADDRESS);
242 parmh->param_length = htons(len);
243 memcpy(ipv6p->addr, &sin6->sin6_addr,
244 sizeof(ipv6p->addr));
245 /* clear embedded scope in the address */
246 in6_clearscope((struct in6_addr *)ipv6p->addr);
257 sctp_add_cookie(struct sctp_inpcb *inp, struct mbuf *init, int init_offset,
258 struct mbuf *initack, int initack_offset, struct sctp_state_cookie *stc_in)
260 struct mbuf *copy_init, *copy_initack, *m_at, *sig, *mret;
261 struct sctp_state_cookie *stc;
262 struct sctp_paramhdr *ph;
269 MGET(mret, M_DONTWAIT, MT_DATA);
273 copy_init = sctp_m_copym(init, init_offset, M_COPYALL, M_DONTWAIT);
274 if (copy_init == NULL) {
278 copy_initack = sctp_m_copym(initack, initack_offset, M_COPYALL,
280 if (copy_initack == NULL) {
282 sctp_m_freem(copy_init);
285 /* easy side we just drop it on the end */
286 ph = mtod(mret, struct sctp_paramhdr *);
287 mret->m_len = sizeof(struct sctp_state_cookie) +
288 sizeof(struct sctp_paramhdr);
289 stc = (struct sctp_state_cookie *)((caddr_t)ph +
290 sizeof(struct sctp_paramhdr));
291 ph->param_type = htons(SCTP_STATE_COOKIE);
292 ph->param_length = 0; /* fill in at the end */
293 /* Fill in the stc cookie data */
296 /* tack the INIT and then the INIT-ACK onto the chain */
299 for (m_at = mret; m_at; m_at = m_at->m_next) {
300 cookie_sz += m_at->m_len;
301 if (m_at->m_next == NULL) {
302 m_at->m_next = copy_init;
307 for (m_at = copy_init; m_at; m_at = m_at->m_next) {
308 cookie_sz += m_at->m_len;
309 if (m_at->m_next == NULL) {
310 m_at->m_next = copy_initack;
315 for (m_at = copy_initack; m_at; m_at = m_at->m_next) {
316 cookie_sz += m_at->m_len;
317 if (m_at->m_next == NULL) {
321 MGET(sig, M_DONTWAIT, MT_DATA);
325 sctp_m_freem(copy_init);
326 sctp_m_freem(copy_initack);
332 signature = (uint8_t *)(mtod(sig, caddr_t) + sig_offset);
333 /* Time to sign the cookie */
334 sctp_hash_digest_m((char *)inp->sctp_ep.secret_key[
335 (int)(inp->sctp_ep.current_secret_number)],
336 SCTP_SECRET_SIZE, mret, sizeof(struct sctp_paramhdr),
337 (uint8_t *)signature);
338 sig->m_len += SCTP_SIGNATURE_SIZE;
339 cookie_sz += SCTP_SIGNATURE_SIZE;
341 ph->param_length = htons(cookie_sz);
346 static struct sockaddr_in *
347 sctp_is_v4_ifa_addr_prefered (struct ifaddr *ifa, uint8_t loopscope, uint8_t ipv4_scope, uint8_t *sin_loop, uint8_t *sin_local)
349 struct sockaddr_in *sin;
351 * Here we determine if its a prefered address. A
352 * prefered address means it is the same scope or
353 * higher scope then the destination.
354 * L = loopback, P = private, G = global
355 * -----------------------------------------
356 * src | dest | result
357 *-----------------------------------------
359 *-----------------------------------------
361 *-----------------------------------------
363 *-----------------------------------------
365 *-----------------------------------------
367 *-----------------------------------------
369 *-----------------------------------------
371 *-----------------------------------------
373 *-----------------------------------------
375 *-----------------------------------------
378 if (ifa->ifa_addr->sa_family != AF_INET) {
382 /* Ok the address may be ok */
383 sin = (struct sockaddr_in *)ifa->ifa_addr;
384 if (sin->sin_addr.s_addr == 0) {
387 *sin_local = *sin_loop = 0;
388 if ((ifa->ifa_ifp->if_type == IFT_LOOP) ||
389 (IN4_ISLOOPBACK_ADDRESS(&sin->sin_addr))) {
393 if ((IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
396 if (!loopscope && *sin_loop) {
397 /* Its a loopback address and we don't have loop scope */
400 if (!ipv4_scope && *sin_local) {
401 /* Its a private address, and we don't have private address scope */
404 if (((ipv4_scope == 0) && (loopscope == 0)) && (*sin_local)) {
405 /* its a global src and a private dest */
408 /* its a prefered address */
412 static struct sockaddr_in *
413 sctp_is_v4_ifa_addr_acceptable (struct ifaddr *ifa, uint8_t loopscope, uint8_t ipv4_scope, uint8_t *sin_loop, uint8_t *sin_local)
415 struct sockaddr_in *sin;
417 * Here we determine if its a acceptable address. A
418 * acceptable address means it is the same scope or
419 * higher scope but we can allow for NAT which means
420 * its ok to have a global dest and a private src.
422 * L = loopback, P = private, G = global
423 * -----------------------------------------
424 * src | dest | result
425 *-----------------------------------------
427 *-----------------------------------------
429 *-----------------------------------------
431 *-----------------------------------------
433 *-----------------------------------------
435 *-----------------------------------------
436 * G | P | yes - probably this won't work.
437 *-----------------------------------------
439 *-----------------------------------------
441 *-----------------------------------------
443 *-----------------------------------------
446 if (ifa->ifa_addr->sa_family != AF_INET) {
450 /* Ok the address may be ok */
451 sin = (struct sockaddr_in *)ifa->ifa_addr;
452 if (sin->sin_addr.s_addr == 0) {
455 *sin_local = *sin_loop = 0;
456 if ((ifa->ifa_ifp->if_type == IFT_LOOP) ||
457 (IN4_ISLOOPBACK_ADDRESS(&sin->sin_addr))) {
461 if ((IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
464 if (!loopscope && *sin_loop) {
465 /* Its a loopback address and we don't have loop scope */
468 /* its an acceptable address */
473 * This treats the address list on the ep as a restricted list
474 * (negative list). If a the passed address is listed, then
475 * the address is NOT allowed on the association.
478 sctp_is_addr_restricted(struct sctp_tcb *stcb, struct sockaddr *addr)
480 struct sctp_laddr *laddr;
485 /* There are no restrictions, no TCB :-) */
489 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list, sctp_nxt_addr) {
492 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
493 printf("There are %d addresses on the restricted list\n", cnt);
497 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list, sctp_nxt_addr) {
498 if (laddr->ifa == NULL) {
500 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
501 printf("Help I have fallen and I can't get up!\n");
507 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
509 printf("Restricted address[%d]:", cnt);
510 sctp_print_address(laddr->ifa->ifa_addr);
513 if (sctp_cmpaddr(addr, laddr->ifa->ifa_addr) == 1) {
514 /* Yes it is on the list */
522 sctp_is_addr_in_ep(struct sctp_inpcb *inp, struct ifaddr *ifa)
524 struct sctp_laddr *laddr;
528 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
529 if (laddr->ifa == NULL) {
531 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
532 printf("Help I have fallen and I can't get up!\n");
537 if (laddr->ifa->ifa_addr == NULL)
539 if (laddr->ifa == ifa)
542 if (laddr->ifa->ifa_addr->sa_family != ifa->ifa_addr->sa_family) {
543 /* skip non compatible address comparison */
546 if (sctp_cmpaddr(ifa->ifa_addr, laddr->ifa->ifa_addr) == 1) {
547 /* Yes it is restricted */
556 static struct in_addr
557 sctp_choose_v4_boundspecific_inp(struct sctp_inpcb *inp,
563 struct sctp_laddr *laddr;
564 struct sockaddr_in *sin;
567 uint8_t sin_loop, sin_local;
569 /* first question, is the ifn we will emit on
570 * in our list, if so, we want that one.
574 /* is a prefered one on the interface we route out? */
575 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
576 sin = sctp_is_v4_ifa_addr_prefered (ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
579 if (sctp_is_addr_in_ep(inp, ifa)) {
580 return (sin->sin_addr);
583 /* is an acceptable one on the interface we route out? */
584 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
585 sin = sctp_is_v4_ifa_addr_acceptable (ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
588 if (sctp_is_addr_in_ep(inp, ifa)) {
589 return (sin->sin_addr);
593 /* ok, what about a prefered address in the inp */
594 for (laddr = LIST_FIRST(&inp->sctp_addr_list);
595 laddr && (laddr != inp->next_addr_touse);
596 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
597 if (laddr->ifa == NULL) {
598 /* address has been removed */
601 sin = sctp_is_v4_ifa_addr_prefered (laddr->ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
604 return (sin->sin_addr);
607 /* ok, what about an acceptable address in the inp */
608 for (laddr = LIST_FIRST(&inp->sctp_addr_list);
609 laddr && (laddr != inp->next_addr_touse);
610 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
611 if (laddr->ifa == NULL) {
612 /* address has been removed */
615 sin = sctp_is_v4_ifa_addr_acceptable (laddr->ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
618 return (sin->sin_addr);
622 /* no address bound can be a source for the destination we are in trouble */
624 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
625 printf("Src address selection for EP, no acceptable src address found for address\n");
628 memset(&ans, 0, sizeof(ans));
634 static struct in_addr
635 sctp_choose_v4_boundspecific_stcb(struct sctp_inpcb *inp,
636 struct sctp_tcb *stcb,
637 struct sctp_nets *net,
641 int non_asoc_addr_ok)
644 * Here we have two cases, bound all asconf
645 * allowed. bound all asconf not allowed.
648 struct sctp_laddr *laddr, *starting_point;
652 uint8_t sin_loop, sin_local, start_at_beginning=0;
653 struct sockaddr_in *sin;
655 /* first question, is the ifn we will emit on
656 * in our list, if so, we want that one.
660 if (inp->sctp_flags & SCTP_PCB_FLAGS_DO_ASCONF) {
662 * Here we use the list of addresses on the endpoint. Then
663 * the addresses listed on the "restricted" list is just that,
664 * address that have not been added and can't be used (unless
665 * the non_asoc_addr_ok is set).
668 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
669 printf("Have a STCB - asconf allowed, not bound all have a netgative list\n");
672 /* first question, is the ifn we will emit on
673 * in our list, if so, we want that one.
676 /* first try for an prefered address on the ep */
677 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
678 if (sctp_is_addr_in_ep(inp, ifa)) {
679 sin = sctp_is_v4_ifa_addr_prefered (ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
682 if ((non_asoc_addr_ok == 0) &&
683 (sctp_is_addr_restricted(stcb, (struct sockaddr *)sin))) {
684 /* on the no-no list */
687 return (sin->sin_addr);
690 /* next try for an acceptable address on the ep */
691 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
692 if (sctp_is_addr_in_ep(inp, ifa)) {
693 sin = sctp_is_v4_ifa_addr_acceptable (ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
696 if ((non_asoc_addr_ok == 0) &&
697 (sctp_is_addr_restricted(stcb, (struct sockaddr *)sin))) {
698 /* on the no-no list */
701 return (sin->sin_addr);
706 /* if we can't find one like that then we must
707 * look at all addresses bound to pick one at
708 * first prefereable then secondly acceptable.
710 starting_point = stcb->asoc.last_used_address;
712 if (stcb->asoc.last_used_address == NULL) {
713 start_at_beginning=1;
714 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
716 /* search beginning with the last used address */
717 for (laddr = stcb->asoc.last_used_address; laddr;
718 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
719 if (laddr->ifa == NULL) {
720 /* address has been removed */
723 sin = sctp_is_v4_ifa_addr_prefered (laddr->ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
726 if ((non_asoc_addr_ok == 0) &&
727 (sctp_is_addr_restricted(stcb, (struct sockaddr *)sin))) {
728 /* on the no-no list */
731 return (sin->sin_addr);
734 if (start_at_beginning == 0) {
735 stcb->asoc.last_used_address = NULL;
736 goto sctpv4_from_the_top;
738 /* now try for any higher scope than the destination */
739 stcb->asoc.last_used_address = starting_point;
740 start_at_beginning = 0;
741 sctpv4_from_the_top2:
742 if (stcb->asoc.last_used_address == NULL) {
743 start_at_beginning=1;
744 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
746 /* search beginning with the last used address */
747 for (laddr = stcb->asoc.last_used_address; laddr;
748 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
749 if (laddr->ifa == NULL) {
750 /* address has been removed */
753 sin = sctp_is_v4_ifa_addr_acceptable (laddr->ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
756 if ((non_asoc_addr_ok == 0) &&
757 (sctp_is_addr_restricted(stcb, (struct sockaddr *)sin))) {
758 /* on the no-no list */
761 return (sin->sin_addr);
763 if (start_at_beginning == 0) {
764 stcb->asoc.last_used_address = NULL;
765 goto sctpv4_from_the_top2;
769 * Here we have an address list on the association, thats the
770 * only valid source addresses that we can use.
773 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
774 printf("Have a STCB - no asconf allowed, not bound all have a postive list\n");
777 /* First look at all addresses for one that is on
778 * the interface we route out
780 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list,
782 if (laddr->ifa == NULL) {
783 /* address has been removed */
786 sin = sctp_is_v4_ifa_addr_prefered (laddr->ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
789 /* first question, is laddr->ifa an address associated with the emit interface */
791 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
792 if (laddr->ifa == ifa) {
793 sin = (struct sockaddr_in *)laddr->ifa->ifa_addr;
794 return (sin->sin_addr);
796 if (sctp_cmpaddr(ifa->ifa_addr, laddr->ifa->ifa_addr) == 1) {
797 sin = (struct sockaddr_in *)laddr->ifa->ifa_addr;
798 return (sin->sin_addr);
803 /* what about an acceptable one on the interface? */
804 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list,
806 if (laddr->ifa == NULL) {
807 /* address has been removed */
810 sin = sctp_is_v4_ifa_addr_acceptable (laddr->ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
813 /* first question, is laddr->ifa an address associated with the emit interface */
815 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
816 if (laddr->ifa == ifa) {
817 sin = (struct sockaddr_in *)laddr->ifa->ifa_addr;
818 return (sin->sin_addr);
820 if (sctp_cmpaddr(ifa->ifa_addr, laddr->ifa->ifa_addr) == 1) {
821 sin = (struct sockaddr_in *)laddr->ifa->ifa_addr;
822 return (sin->sin_addr);
827 /* ok, next one that is preferable in general */
828 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list,
830 if (laddr->ifa == NULL) {
831 /* address has been removed */
834 sin = sctp_is_v4_ifa_addr_prefered (laddr->ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
837 return (sin->sin_addr);
840 /* last, what about one that is acceptable */
841 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list,
843 if (laddr->ifa == NULL) {
844 /* address has been removed */
847 sin = sctp_is_v4_ifa_addr_acceptable (laddr->ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
850 return (sin->sin_addr);
853 memset(&ans, 0, sizeof(ans));
857 static struct sockaddr_in *
858 sctp_select_v4_nth_prefered_addr_from_ifn_boundall (struct ifnet *ifn, struct sctp_tcb *stcb, int non_asoc_addr_ok,
859 uint8_t loopscope, uint8_t ipv4_scope, int cur_addr_num)
862 struct sockaddr_in *sin;
863 uint8_t sin_loop, sin_local;
864 int num_eligible_addr = 0;
865 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
866 sin = sctp_is_v4_ifa_addr_prefered (ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
870 if ((non_asoc_addr_ok == 0) && sctp_is_addr_restricted(stcb, (struct sockaddr *)sin)) {
871 /* It is restricted for some reason.. probably
877 if (cur_addr_num == num_eligible_addr) {
886 sctp_count_v4_num_prefered_boundall (struct ifnet *ifn, struct sctp_tcb *stcb, int non_asoc_addr_ok,
887 uint8_t loopscope, uint8_t ipv4_scope, uint8_t *sin_loop, uint8_t *sin_local)
890 struct sockaddr_in *sin;
891 int num_eligible_addr = 0;
893 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
894 sin = sctp_is_v4_ifa_addr_prefered (ifa, loopscope, ipv4_scope, sin_loop, sin_local);
898 if ((non_asoc_addr_ok == 0) && sctp_is_addr_restricted(stcb, (struct sockaddr *)sin)) {
899 /* It is restricted for some reason.. probably
907 return (num_eligible_addr);
911 static struct in_addr
912 sctp_choose_v4_boundall(struct sctp_inpcb *inp,
913 struct sctp_tcb *stcb,
914 struct sctp_nets *net,
918 int non_asoc_addr_ok)
920 int cur_addr_num=0, num_prefered=0;
921 uint8_t sin_loop, sin_local;
923 struct sockaddr_in *sin;
927 * For v4 we can use (in boundall) any address in the association. If
928 * non_asoc_addr_ok is set we can use any address (at least in theory).
929 * So we look for prefered addresses first. If we find one, we use it.
930 * Otherwise we next try to get an address on the interface, which we
931 * should be able to do (unless non_asoc_addr_ok is false and we are
932 * routed out that way). In these cases where we can't use the address
933 * of the interface we go through all the ifn's looking for an address
934 * we can use and fill that in. Punting means we send back address
935 * 0, which will probably cause problems actually since then IP will
936 * fill in the address of the route ifn, which means we probably already
937 * rejected it.. i.e. here comes an abort :-<.
941 cur_addr_num = net->indx_of_eligible_next_to_use;
944 goto bound_all_v4_plan_c;
946 num_prefered = sctp_count_v4_num_prefered_boundall (ifn, stcb, non_asoc_addr_ok, loopscope, ipv4_scope, &sin_loop, &sin_local);
948 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
949 printf("Found %d prefered source addresses\n", num_prefered);
952 if (num_prefered == 0) {
953 /* no eligible addresses, we must use some other
954 * interface address if we can find one.
956 goto bound_all_v4_plan_b;
958 /* Ok we have num_eligible_addr set with how many we can use,
959 * this may vary from call to call due to addresses being deprecated etc..
961 if (cur_addr_num >= num_prefered) {
964 /* select the nth address from the list (where cur_addr_num is the nth) and
965 * 0 is the first one, 1 is the second one etc...
968 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
969 printf("cur_addr_num:%d\n", cur_addr_num);
972 sin = sctp_select_v4_nth_prefered_addr_from_ifn_boundall (ifn, stcb, non_asoc_addr_ok, loopscope,
973 ipv4_scope, cur_addr_num);
975 /* if sin is NULL something changed??, plan_a now */
977 return (sin->sin_addr);
981 * plan_b: Look at the interface that we emit on
982 * and see if we can find an acceptable address.
985 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
986 sin = sctp_is_v4_ifa_addr_acceptable (ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
990 if ((non_asoc_addr_ok == 0) && sctp_is_addr_restricted(stcb, (struct sockaddr *)sin)) {
991 /* It is restricted for some reason.. probably
997 return (sin->sin_addr);
1000 * plan_c: Look at all interfaces and find a prefered
1001 * address. If we reache here we are in trouble I think.
1003 bound_all_v4_plan_c:
1004 for (ifn = TAILQ_FIRST(&ifnet);
1005 ifn && (ifn != inp->next_ifn_touse);
1006 ifn=TAILQ_NEXT(ifn, if_list)) {
1007 if (loopscope == 0 && ifn->if_type == IFT_LOOP) {
1008 /* wrong base scope */
1011 if (ifn == rt->rt_ifp)
1012 /* already looked at this guy */
1014 num_prefered = sctp_count_v4_num_prefered_boundall (ifn, stcb, non_asoc_addr_ok,
1015 loopscope, ipv4_scope, &sin_loop, &sin_local);
1017 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1018 printf("Found ifn:%x %d prefered source addresses\n", (u_int)ifn, num_prefered);
1021 if (num_prefered == 0) {
1023 * None on this interface.
1027 /* Ok we have num_eligible_addr set with how many we can use,
1028 * this may vary from call to call due to addresses being deprecated etc..
1030 if (cur_addr_num >= num_prefered) {
1033 sin = sctp_select_v4_nth_prefered_addr_from_ifn_boundall (ifn, stcb, non_asoc_addr_ok, loopscope,
1034 ipv4_scope, cur_addr_num);
1037 return (sin->sin_addr);
1042 * plan_d: We are in deep trouble. No prefered address on
1043 * any interface. And the emit interface does not
1044 * even have an acceptable address. Take anything
1045 * we can get! If this does not work we are
1046 * probably going to emit a packet that will
1047 * illicit an ABORT, falling through.
1050 for (ifn = TAILQ_FIRST(&ifnet);
1051 ifn && (ifn != inp->next_ifn_touse);
1052 ifn=TAILQ_NEXT(ifn, if_list)) {
1053 if (loopscope == 0 && ifn->if_type == IFT_LOOP) {
1054 /* wrong base scope */
1057 if (ifn == rt->rt_ifp)
1058 /* already looked at this guy */
1061 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
1062 sin = sctp_is_v4_ifa_addr_acceptable (ifa, loopscope, ipv4_scope, &sin_loop, &sin_local);
1066 if ((non_asoc_addr_ok == 0) && sctp_is_addr_restricted(stcb, (struct sockaddr *)sin)) {
1067 /* It is restricted for some reason.. probably
1073 return (sin->sin_addr);
1077 * Ok we can find NO address to source from that is
1078 * not on our negative list. It is either the special
1079 * ASCONF case where we are sourceing from a intf that
1080 * has been ifconfig'd to a different address (i.e.
1081 * it holds a ADD/DEL/SET-PRIM and the proper lookup
1082 * address. OR we are hosed, and this baby is going
1083 * to abort the association.
1085 if (non_asoc_addr_ok) {
1086 return (((struct sockaddr_in *)(rt->rt_ifa->ifa_addr))->sin_addr);
1088 memset(&ans, 0, sizeof(ans));
1095 /* tcb may be NULL */
1097 sctp_ipv4_source_address_selection(struct sctp_inpcb *inp,
1098 struct sctp_tcb *stcb, struct route *ro, struct sctp_nets *net,
1099 int non_asoc_addr_ok)
1102 struct sockaddr_in *to = (struct sockaddr_in *)&ro->ro_dst;
1103 uint8_t ipv4_scope, loopscope;
1106 * - Find the route if needed, cache if I can.
1107 * - Look at interface address in route, Is it
1108 * in the bound list. If so we have the best source.
1109 * - If not we must rotate amongst the addresses.
1113 * Do we need to pay attention to scope. We can have
1114 * a private address or a global address we are sourcing
1115 * or sending to. So if we draw it out
1116 * source * dest * result
1117 * ------------------------------------------
1118 * a Private * Global * NAT?
1119 * ------------------------------------------
1120 * b Private * Private * No problem
1121 * ------------------------------------------
1122 * c Global * Private * Huh, How will this work?
1123 * ------------------------------------------
1124 * d Global * Global * No Problem
1125 * ------------------------------------------
1127 * And then we add to that what happens if there are multiple
1128 * addresses assigned to an interface. Remember the ifa on a
1129 * ifn is a linked list of addresses. So one interface can
1130 * have more than one IPv4 address. What happens if we
1131 * have both a private and a global address? Do we then
1132 * use context of destination to sort out which one is
1133 * best? And what about NAT's sending P->G may get you
1134 * a NAT translation, or should you select the G thats
1135 * on the interface in preference.
1139 * - count the number of addresses on the interface.
1140 * - if its one, no problem except case <c>. For <a>
1141 * we will assume a NAT out there.
1142 * - if there are more than one, then we need to worry
1143 * about scope P or G. We should prefer G -> G and
1144 * P -> P if possible. Then as a secondary fall back
1145 * to mixed types G->P being a last ditch one.
1146 * - The above all works for bound all, but bound
1147 * specific we need to use the same concept but instead
1148 * only consider the bound addresses. If the bound set
1149 * is NOT assigned to the interface then we must use
1150 * rotation amongst them.
1152 * Notes: For v4, we can always punt and let ip_output
1153 * decide by sending back a source of 0.0.0.0
1156 if (ro->ro_rt == NULL) {
1158 * Need a route to cache.
1161 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
1162 rtalloc_ign(ro, 0UL);
1167 if (ro->ro_rt == NULL) {
1168 /* No route to host .. punt */
1169 memset(&ans, 0, sizeof(ans));
1172 /* Setup our scopes */
1174 ipv4_scope = stcb->asoc.ipv4_local_scope;
1175 loopscope = stcb->asoc.loopback_scope;
1177 /* Scope based on outbound address */
1178 if ((IN4_ISPRIVATE_ADDRESS(&to->sin_addr))) {
1181 } else if (IN4_ISLOOPBACK_ADDRESS(&to->sin_addr)) {
1190 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1191 printf("Scope setup loop:%d ipv4_scope:%d\n",
1192 loopscope, ipv4_scope);
1195 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1197 * When bound to all if the address list is set
1198 * it is a negative list. Addresses being added
1201 return (sctp_choose_v4_boundall(inp, stcb, net, ro->ro_rt,
1202 ipv4_scope, loopscope, non_asoc_addr_ok));
1205 * Three possiblities here:
1207 * a) stcb is NULL, which means we operate only from
1208 * the list of addresses (ifa's) bound to the assoc and
1209 * we care not about the list.
1210 * b) stcb is NOT-NULL, which means we have an assoc structure and
1211 * auto-asconf is on. This means that the list of addresses is
1212 * a NOT list. We use the list from the inp, but any listed address
1213 * in our list is NOT yet added. However if the non_asoc_addr_ok is
1214 * set we CAN use an address NOT available (i.e. being added). Its
1216 * c) stcb is NOT-NULL, which means we have an assoc structure and
1217 * auto-asconf is off. This means that the list of addresses is
1218 * the ONLY addresses I can use.. its positive.
1220 * Note we collapse b & c into the same function just like in
1221 * the v6 address selection.
1224 return (sctp_choose_v4_boundspecific_stcb(inp, stcb, net,
1225 ro->ro_rt, ipv4_scope, loopscope, non_asoc_addr_ok));
1227 return (sctp_choose_v4_boundspecific_inp(inp, ro->ro_rt,
1228 ipv4_scope, loopscope));
1230 /* this should not be reached */
1231 memset(&ans, 0, sizeof(ans));
1237 static struct sockaddr_in6 *
1238 sctp_is_v6_ifa_addr_acceptable (struct ifaddr *ifa, int loopscope, int loc_scope, int *sin_loop, int *sin_local)
1240 struct in6_ifaddr *ifa6;
1241 struct sockaddr_in6 *sin6;
1243 if (ifa->ifa_addr->sa_family != AF_INET6) {
1247 ifa6 = (struct in6_ifaddr *)ifa;
1248 /* ok to use deprecated addresses? */
1249 if (!ip6_use_deprecated) {
1250 if (IFA6_IS_DEPRECATED(ifa6)) {
1251 /* can't use this type */
1255 /* are we ok, with the current state of this address? */
1256 if (ifa6->ia6_flags &
1257 (IN6_IFF_DETACHED | IN6_IFF_NOTREADY | IN6_IFF_ANYCAST)) {
1258 /* Can't use these types */
1261 /* Ok the address may be ok */
1262 sin6 = (struct sockaddr_in6 *)ifa->ifa_addr;
1263 *sin_local = *sin_loop = 0;
1264 if ((ifa->ifa_ifp->if_type == IFT_LOOP) ||
1265 (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr))) {
1268 if (!loopscope && *sin_loop) {
1269 /* Its a loopback address and we don't have loop scope */
1272 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1273 /* we skip unspecifed addresses */
1277 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
1280 if (!loc_scope && *sin_local) {
1281 /* Its a link local address, and we don't have link local scope */
1288 static struct sockaddr_in6 *
1289 sctp_choose_v6_boundspecific_stcb(struct sctp_inpcb *inp,
1290 struct sctp_tcb *stcb,
1291 struct sctp_nets *net,
1295 int non_asoc_addr_ok)
1298 * Each endpoint has a list of local addresses associated
1299 * with it. The address list is either a "negative list" i.e.
1300 * those addresses that are NOT allowed to be used as a source OR
1301 * a "postive list" i.e. those addresses that CAN be used.
1303 * Its a negative list if asconf is allowed. What we do
1304 * in this case is use the ep address list BUT we have
1305 * to cross check it against the negative list.
1307 * In the case where NO asconf is allowed, we have just
1308 * a straight association level list that we must use to
1309 * find a source address.
1311 struct sctp_laddr *laddr, *starting_point;
1312 struct sockaddr_in6 *sin6;
1313 int sin_loop, sin_local;
1314 int start_at_beginning=0;
1319 if (inp->sctp_flags & SCTP_PCB_FLAGS_DO_ASCONF) {
1321 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1322 printf("Have a STCB - asconf allowed, not bound all have a netgative list\n");
1325 /* first question, is the ifn we will emit on
1326 * in our list, if so, we want that one.
1329 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
1330 if (sctp_is_addr_in_ep(inp, ifa)) {
1331 sin6 = sctp_is_v6_ifa_addr_acceptable (ifa, loopscope, loc_scope, &sin_loop, &sin_local);
1334 if ((non_asoc_addr_ok == 0) &&
1335 (sctp_is_addr_restricted(stcb, (struct sockaddr *)sin6))) {
1336 /* on the no-no list */
1343 starting_point = stcb->asoc.last_used_address;
1344 /* First try for matching scope */
1346 if (stcb->asoc.last_used_address == NULL) {
1347 start_at_beginning=1;
1348 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
1350 /* search beginning with the last used address */
1351 for (laddr = stcb->asoc.last_used_address; laddr;
1352 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
1353 if (laddr->ifa == NULL) {
1354 /* address has been removed */
1357 sin6 = sctp_is_v6_ifa_addr_acceptable (laddr->ifa, loopscope, loc_scope, &sin_loop, &sin_local);
1360 if ((non_asoc_addr_ok == 0) && (sctp_is_addr_restricted(stcb, (struct sockaddr *)sin6))) {
1361 /* on the no-no list */
1364 /* is it of matching scope ? */
1365 if ((loopscope == 0) &&
1369 /* all of global scope we are ok with it */
1372 if (loopscope && sin_loop)
1373 /* both on the loopback, thats ok */
1375 if (loc_scope && sin_local)
1376 /* both local scope */
1380 if (start_at_beginning == 0) {
1381 stcb->asoc.last_used_address = NULL;
1382 goto sctp_from_the_top;
1384 /* now try for any higher scope than the destination */
1385 stcb->asoc.last_used_address = starting_point;
1386 start_at_beginning = 0;
1388 if (stcb->asoc.last_used_address == NULL) {
1389 start_at_beginning=1;
1390 stcb->asoc.last_used_address = LIST_FIRST(&inp->sctp_addr_list);
1392 /* search beginning with the last used address */
1393 for (laddr = stcb->asoc.last_used_address; laddr;
1394 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
1395 if (laddr->ifa == NULL) {
1396 /* address has been removed */
1399 sin6 = sctp_is_v6_ifa_addr_acceptable (laddr->ifa, loopscope, loc_scope, &sin_loop, &sin_local);
1402 if ((non_asoc_addr_ok == 0) && (sctp_is_addr_restricted(stcb, (struct sockaddr *)sin6))) {
1403 /* on the no-no list */
1408 if (start_at_beginning == 0) {
1409 stcb->asoc.last_used_address = NULL;
1410 goto sctp_from_the_top2;
1414 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1415 printf("Have a STCB - no asconf allowed, not bound all have a postive list\n");
1418 /* First try for interface output match */
1419 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list,
1421 if (laddr->ifa == NULL) {
1422 /* address has been removed */
1425 sin6 = sctp_is_v6_ifa_addr_acceptable (laddr->ifa, loopscope, loc_scope, &sin_loop, &sin_local);
1428 /* first question, is laddr->ifa an address associated with the emit interface */
1430 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
1431 if (laddr->ifa == ifa) {
1432 sin6 = (struct sockaddr_in6 *)laddr->ifa->ifa_addr;
1435 if (sctp_cmpaddr(ifa->ifa_addr, laddr->ifa->ifa_addr) == 1) {
1436 sin6 = (struct sockaddr_in6 *)laddr->ifa->ifa_addr;
1442 /* Next try for matching scope */
1443 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list,
1445 if (laddr->ifa == NULL) {
1446 /* address has been removed */
1449 sin6 = sctp_is_v6_ifa_addr_acceptable (laddr->ifa, loopscope, loc_scope, &sin_loop, &sin_local);
1453 if ((loopscope == 0) &&
1457 /* all of global scope we are ok with it */
1460 if (loopscope && sin_loop)
1461 /* both on the loopback, thats ok */
1463 if (loc_scope && sin_local)
1464 /* both local scope */
1467 /* ok, now try for a higher scope in the source address */
1468 /* First try for matching scope */
1469 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list,
1471 if (laddr->ifa == NULL) {
1472 /* address has been removed */
1475 sin6 = sctp_is_v6_ifa_addr_acceptable (laddr->ifa, loopscope, loc_scope, &sin_loop, &sin_local);
1484 static struct sockaddr_in6 *
1485 sctp_choose_v6_boundspecific_inp(struct sctp_inpcb *inp,
1491 * Here we are bound specific and have only
1492 * an inp. We must find an address that is bound
1493 * that we can give out as a src address. We
1494 * prefer two addresses of same scope if we can
1495 * find them that way.
1497 struct sctp_laddr *laddr;
1498 struct sockaddr_in6 *sin6;
1501 int sin_loop, sin_local;
1503 /* first question, is the ifn we will emit on
1504 * in our list, if so, we want that one.
1509 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
1510 sin6 = sctp_is_v6_ifa_addr_acceptable (ifa, loopscope, loc_scope, &sin_loop, &sin_local);
1513 if (sctp_is_addr_in_ep(inp, ifa)) {
1518 for (laddr = LIST_FIRST(&inp->sctp_addr_list);
1519 laddr && (laddr != inp->next_addr_touse);
1520 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
1521 if (laddr->ifa == NULL) {
1522 /* address has been removed */
1525 sin6 = sctp_is_v6_ifa_addr_acceptable (laddr->ifa, loopscope, loc_scope, &sin_loop, &sin_local);
1529 if ((loopscope == 0) &&
1533 /* all of global scope we are ok with it */
1536 if (loopscope && sin_loop)
1537 /* both on the loopback, thats ok */
1539 if (loc_scope && sin_local)
1540 /* both local scope */
1544 /* if we reach here, we could not find two addresses
1545 * of the same scope to give out. Lets look for any higher level
1546 * scope for a source address.
1548 for (laddr = LIST_FIRST(&inp->sctp_addr_list);
1549 laddr && (laddr != inp->next_addr_touse);
1550 laddr = LIST_NEXT(laddr, sctp_nxt_addr)) {
1551 if (laddr->ifa == NULL) {
1552 /* address has been removed */
1555 sin6 = sctp_is_v6_ifa_addr_acceptable (laddr->ifa, loopscope, loc_scope, &sin_loop, &sin_local);
1560 /* no address bound can be a source for the destination */
1562 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1563 printf("Src address selection for EP, no acceptable src address found for address\n");
1570 static struct sockaddr_in6 *
1571 sctp_select_v6_nth_addr_from_ifn_boundall (struct ifnet *ifn, struct sctp_tcb *stcb, int non_asoc_addr_ok, uint8_t loopscope,
1572 uint8_t loc_scope, int cur_addr_num, int match_scope)
1575 struct sockaddr_in6 *sin6;
1576 int sin_loop, sin_local;
1577 int num_eligible_addr = 0;
1579 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
1580 sin6 = sctp_is_v6_ifa_addr_acceptable (ifa, loopscope, loc_scope, &sin_loop, &sin_local);
1584 if ((non_asoc_addr_ok == 0) && sctp_is_addr_restricted(stcb, (struct sockaddr *)sin6)) {
1585 /* It is restricted for some reason.. probably
1592 /* Here we are asked to match scope if possible */
1593 if (loopscope && sin_loop)
1594 /* src and destination are loopback scope */
1596 if (loc_scope && sin_local)
1597 /* src and destination are local scope */
1599 if ((loopscope == 0) &&
1603 /* src and destination are global scope */
1608 if (num_eligible_addr == cur_addr_num) {
1612 num_eligible_addr++;
1619 sctp_count_v6_num_eligible_boundall (struct ifnet *ifn, struct sctp_tcb *stcb,
1620 int non_asoc_addr_ok, uint8_t loopscope, uint8_t loc_scope)
1623 struct sockaddr_in6 *sin6;
1624 int num_eligible_addr = 0;
1625 int sin_loop, sin_local;
1627 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
1628 sin6 = sctp_is_v6_ifa_addr_acceptable (ifa, loopscope, loc_scope, &sin_loop, &sin_local);
1632 if ((non_asoc_addr_ok == 0) && sctp_is_addr_restricted(stcb, (struct sockaddr *)sin6)) {
1633 /* It is restricted for some reason.. probably
1639 num_eligible_addr++;
1641 return (num_eligible_addr);
1645 static struct sockaddr_in6 *
1646 sctp_choose_v6_boundall(struct sctp_inpcb *inp,
1647 struct sctp_tcb *stcb,
1648 struct sctp_nets *net,
1652 int non_asoc_addr_ok)
1654 /* Ok, we are bound all SO any address
1655 * is ok to use as long as it is NOT in the negative
1658 int num_eligible_addr;
1660 int started_at_beginning=0;
1661 int match_scope_prefered;
1662 /* first question is, how many eligible addresses are
1663 * there for the destination ifn that we are using that
1664 * are within the proper scope?
1667 struct sockaddr_in6 *sin6;
1671 cur_addr_num = net->indx_of_eligible_next_to_use;
1673 if (cur_addr_num == 0) {
1674 match_scope_prefered = 1;
1676 match_scope_prefered = 0;
1678 num_eligible_addr = sctp_count_v6_num_eligible_boundall (ifn, stcb, non_asoc_addr_ok, loopscope, loc_scope);
1680 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1681 printf("Found %d eligible source addresses\n", num_eligible_addr);
1684 if (num_eligible_addr == 0) {
1685 /* no eligible addresses, we must use some other
1686 * interface address if we can find one.
1688 goto bound_all_v6_plan_b;
1690 /* Ok we have num_eligible_addr set with how many we can use,
1691 * this may vary from call to call due to addresses being deprecated etc..
1693 if (cur_addr_num >= num_eligible_addr) {
1696 /* select the nth address from the list (where cur_addr_num is the nth) and
1697 * 0 is the first one, 1 is the second one etc...
1700 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1701 printf("cur_addr_num:%d match_scope_prefered:%d select it\n",
1702 cur_addr_num, match_scope_prefered);
1705 sin6 = sctp_select_v6_nth_addr_from_ifn_boundall (ifn, stcb, non_asoc_addr_ok, loopscope,
1706 loc_scope, cur_addr_num, match_scope_prefered);
1707 if (match_scope_prefered && (sin6 == NULL)) {
1708 /* retry without the preference for matching scope */
1710 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1711 printf("retry with no match_scope_prefered\n");
1714 sin6 = sctp_select_v6_nth_addr_from_ifn_boundall (ifn, stcb, non_asoc_addr_ok, loopscope,
1715 loc_scope, cur_addr_num, 0);
1719 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1720 printf("Selected address %d ifn:%x for the route\n", cur_addr_num, (u_int)ifn);
1724 /* store so we get the next one */
1725 if (cur_addr_num < 255)
1726 net->indx_of_eligible_next_to_use = cur_addr_num + 1;
1728 net->indx_of_eligible_next_to_use = 0;
1732 num_eligible_addr = 0;
1733 bound_all_v6_plan_b:
1734 /* ok, if we reach here we either fell through
1735 * due to something changing during an interupt (unlikely)
1736 * or we have NO eligible source addresses for the ifn
1737 * of the route (most likely). We must look at all the other
1738 * interfaces EXCEPT rt->rt_ifp and do the same game.
1741 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1742 printf("bound-all Plan B\n");
1745 if (inp->next_ifn_touse == NULL) {
1746 started_at_beginning=1;
1747 inp->next_ifn_touse = TAILQ_FIRST(&ifnet);
1749 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1750 printf("Start at first IFN:%x\n", (u_int)inp->next_ifn_touse);
1754 inp->next_ifn_touse = TAILQ_NEXT(inp->next_ifn_touse, if_list);
1756 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1757 printf("Resume at IFN:%x\n", (u_int)inp->next_ifn_touse);
1760 if (inp->next_ifn_touse == NULL) {
1762 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1763 printf("IFN Resets\n");
1766 started_at_beginning=1;
1767 inp->next_ifn_touse = TAILQ_FIRST(&ifnet);
1770 for (ifn = inp->next_ifn_touse; ifn;
1771 ifn = TAILQ_NEXT(ifn, if_list)) {
1772 if (loopscope == 0 && ifn->if_type == IFT_LOOP) {
1773 /* wrong base scope */
1776 if (loc_scope && (ifn->if_index != loc_scope)) {
1777 /* by definition the scope (from to->sin6_scopeid)
1778 * must match that of the interface. If not then
1779 * we could pick a wrong scope for the address.
1780 * Ususally we don't hit plan-b since the route
1781 * handles this. However we can hit plan-b when
1782 * we send to local-host so the route is the
1783 * loopback interface, but the destination is a
1788 if (ifn == rt->rt_ifp) {
1789 /* already looked at this guy */
1792 /* Address rotation will only work when we are not
1793 * rotating sourced interfaces and are using the interface
1794 * of the route. We would need to have a per interface index
1795 * in order to do proper rotation.
1797 num_eligible_addr = sctp_count_v6_num_eligible_boundall (ifn, stcb, non_asoc_addr_ok, loopscope, loc_scope);
1799 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1800 printf("IFN:%x has %d eligible\n", (u_int)ifn, num_eligible_addr);
1803 if (num_eligible_addr == 0) {
1804 /* none we can use */
1807 /* Ok we have num_eligible_addr set with how many we can use,
1808 * this may vary from call to call due to addresses being deprecated etc..
1810 inp->next_ifn_touse = ifn;
1812 /* select the first one we can find with perference for matching scope.
1814 sin6 = sctp_select_v6_nth_addr_from_ifn_boundall (ifn, stcb, non_asoc_addr_ok, loopscope, loc_scope, 0, 1);
1816 /* can't find one with matching scope how about a source with higher
1819 sin6 = sctp_select_v6_nth_addr_from_ifn_boundall (ifn, stcb, non_asoc_addr_ok, loopscope, loc_scope, 0, 0);
1821 /* Hmm, can't find one in the interface now */
1825 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1826 printf("Selected the %d'th address of ifn:%x\n",
1833 if (started_at_beginning == 0) {
1834 /* we have not been through all of them yet, force
1835 * us to go through them all.
1838 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1839 printf("Force a recycle\n");
1842 inp->next_ifn_touse = NULL;
1843 goto bound_all_v6_plan_b;
1849 /* stcb and net may be NULL */
1851 sctp_ipv6_source_address_selection(struct sctp_inpcb *inp,
1852 struct sctp_tcb *stcb, struct route *ro, struct sctp_nets *net,
1853 int non_asoc_addr_ok)
1855 struct in6_addr ans;
1856 struct sockaddr_in6 *rt_addr;
1857 uint8_t loc_scope, loopscope;
1858 struct sockaddr_in6 *to = (struct sockaddr_in6 *)&ro->ro_dst;
1861 * This routine is tricky standard v6 src address
1862 * selection cannot take into account what we have
1863 * bound etc, so we can't use it.
1865 * Instead here is what we must do:
1866 * 1) Make sure we have a route, if we
1867 * don't have a route we can never reach the peer.
1868 * 2) Once we have a route, determine the scope of the
1869 * route. Link local, loopback or global.
1870 * 3) Next we divide into three types. Either we
1871 * are bound all.. which means we want to use
1872 * one of the addresses of the interface we are
1874 * 4a) We have not stcb, which means we are using the
1875 * specific addresses bound on an inp, in this
1876 * case we are similar to the stcb case (4b below)
1877 * accept the list is always a positive list.<or>
1878 * 4b) We are bound specific with a stcb, which means we have a
1879 * list of bound addresses and we must see if the
1880 * ifn of the route is actually one of the bound addresses.
1881 * If not, then we must rotate addresses amongst properly
1882 * scoped bound addresses, if so we use the address
1884 * 5) Always, no matter which path we take through the above
1885 * we must be sure the source address we use is allowed to
1886 * be used. I.e. IN6_IFF_DETACHED, IN6_IFF_NOTREADY, and IN6_IFF_ANYCAST
1887 * addresses cannot be used.
1888 * 6) Addresses that are deprecated MAY be used
1889 * if (!ip6_use_deprecated) {
1890 * if (IFA6_IS_DEPRECATED(ifa6)) {
1896 /*** 1> determine route, if not already done */
1897 if (ro->ro_rt == NULL) {
1899 * Need a route to cache.
1901 #ifndef SCOPEDROUTING
1903 scope_save = to->sin6_scope_id;
1904 to->sin6_scope_id = 0;
1907 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
1908 rtalloc_ign(ro, 0UL);
1912 #ifndef SCOPEDROUTING
1913 to->sin6_scope_id = scope_save;
1916 if (ro->ro_rt == NULL) {
1918 * no route to host. this packet is going no-where.
1919 * We probably should make sure we arrange to send back
1923 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1924 printf("No route to host, this packet cannot be sent!\n");
1927 memset(&ans, 0, sizeof(ans));
1931 /*** 2a> determine scope for outbound address/route */
1932 loc_scope = loopscope = 0;
1934 * We base our scope on the outbound packet scope and route,
1935 * NOT the TCB (if there is one). This way in local scope we will only
1936 * use a local scope src address when we send to a local address.
1939 if (IN6_IS_ADDR_LOOPBACK(&to->sin6_addr)) {
1940 /* If the route goes to the loopback address OR
1941 * the address is a loopback address, we are loopback
1945 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1946 printf("Loopback scope is set\n");
1952 /* mark it as local */
1953 net->addr_is_local = 1;
1956 } else if (IN6_IS_ADDR_LINKLOCAL(&to->sin6_addr)) {
1958 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1959 printf("Link local scope is set, id:%d\n", to->sin6_scope_id);
1962 if (to->sin6_scope_id)
1963 loc_scope = to->sin6_scope_id;
1970 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1971 printf("Global scope is set\n");
1976 /* now, depending on which way we are bound we call the appropriate
1977 * routine to do steps 3-6
1980 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1981 printf("Destination address:");
1982 sctp_print_address((struct sockaddr *)to);
1986 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1988 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1989 printf("Calling bound-all src addr selection for v6\n");
1992 rt_addr = sctp_choose_v6_boundall(inp, stcb, net, ro->ro_rt, loc_scope, loopscope, non_asoc_addr_ok);
1995 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
1996 printf("Calling bound-specific src addr selection for v6\n");
2000 rt_addr = sctp_choose_v6_boundspecific_stcb(inp, stcb, net, ro->ro_rt, loc_scope, loopscope, non_asoc_addr_ok);
2002 /* we can't have a non-asoc address since we have no association */
2003 rt_addr = sctp_choose_v6_boundspecific_inp(inp, ro->ro_rt, loc_scope, loopscope);
2005 if (rt_addr == NULL) {
2006 /* no suitable address? */
2007 struct in6_addr in6;
2009 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
2010 printf("V6 packet will reach dead-end no suitable src address\n");
2013 memset(&in6, 0, sizeof(in6));
2017 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
2018 printf("Source address selected is:");
2019 sctp_print_address((struct sockaddr *)rt_addr);
2022 return (rt_addr->sin6_addr);
2026 sctp_get_ect(struct sctp_tcb *stcb,
2027 struct sctp_tmit_chunk *chk)
2029 uint8_t this_random;
2035 if (sctp_ecn_nonce == 0)
2036 /* no nonce, always return ECT0 */
2037 return (SCTP_ECT0_BIT);
2039 if (stcb->asoc.peer_supports_ecn_nonce == 0) {
2040 /* Peer does NOT support it, so we send a ECT0 only */
2041 return (SCTP_ECT0_BIT);
2045 return (SCTP_ECT0_BIT);
2047 if (((stcb->asoc.hb_random_idx == 3) &&
2048 (stcb->asoc.hb_ect_randombit > 7)) ||
2049 (stcb->asoc.hb_random_idx > 3)) {
2051 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
2052 memcpy(stcb->asoc.hb_random_values, &rndval,
2053 sizeof(stcb->asoc.hb_random_values));
2054 this_random = stcb->asoc.hb_random_values[0];
2055 stcb->asoc.hb_random_idx = 0;
2056 stcb->asoc.hb_ect_randombit = 0;
2058 if (stcb->asoc.hb_ect_randombit > 7) {
2059 stcb->asoc.hb_ect_randombit = 0;
2060 stcb->asoc.hb_random_idx++;
2062 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
2064 if ((this_random >> stcb->asoc.hb_ect_randombit) & 0x01) {
2066 /* ECN Nonce stuff */
2067 chk->rec.data.ect_nonce = SCTP_ECT1_BIT;
2068 stcb->asoc.hb_ect_randombit++;
2069 return (SCTP_ECT1_BIT);
2071 stcb->asoc.hb_ect_randombit++;
2072 return (SCTP_ECT0_BIT);
2076 extern int sctp_no_csum_on_loopback;
2079 sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
2080 struct sctp_tcb *stcb, /* may be NULL */
2081 struct sctp_nets *net,
2082 struct sockaddr *to,
2084 int nofragment_flag,
2086 struct sctp_tmit_chunk *chk,
2088 /* nofragment_flag to tell if IP_DF should be set (IPv4 only) */
2091 * Given a mbuf chain (via m_next) that holds a packet header
2092 * WITH a SCTPHDR but no IP header, endpoint inp and sa structure.
2093 * - calculate SCTP checksum and fill in
2094 * - prepend a IP address header
2095 * - if boundall use INADDR_ANY
2096 * - if boundspecific do source address selection
2097 * - set fragmentation option for ipV4
2098 * - On return from IP output, check/adjust mtu size
2099 * - of output interface and smallest_mtu size as well.
2101 struct sctphdr *sctphdr;
2105 unsigned int have_mtu;
2108 if ((net) && (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)) {
2112 if ((m->m_flags & M_PKTHDR) == 0) {
2114 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
2115 printf("Software error: sctp_lowlevel_chunk_output() called with non pkthdr!\n");
2121 /* Calculate the csum and fill in the length of the packet */
2122 sctphdr = mtod(m, struct sctphdr *);
2124 if (sctp_no_csum_on_loopback &&
2126 (stcb->asoc.loopback_scope)) {
2127 sctphdr->checksum = 0;
2128 m->m_pkthdr.len = sctp_calculate_len(m);
2130 sctphdr->checksum = 0;
2131 csum = sctp_calculate_sum(m, &m->m_pkthdr.len, 0);
2132 sctphdr->checksum = csum;
2134 if (to->sa_family == AF_INET) {
2136 struct route iproute;
2137 M_PREPEND(m, sizeof(struct ip), M_DONTWAIT);
2139 /* failed to prepend data, give up */
2142 ip = mtod(m, struct ip *);
2143 ip->ip_v = IPVERSION;
2144 ip->ip_hl = (sizeof(struct ip) >> 2);
2145 if (nofragment_flag) {
2146 #if defined(WITH_CONVERT_IP_OFF) || defined(__FreeBSD__) || defined(__DragonFly__)
2147 #if defined( __OpenBSD__) || defined(__NetBSD__)
2148 /* OpenBSD has WITH_CONVERT_IP_OFF defined?? */
2149 ip->ip_off = htons(IP_DF);
2154 ip->ip_off = htons(IP_DF);
2159 /* FreeBSD and Apple have RANDOM_IP_ID switch */
2160 #if defined(RANDOM_IP_ID) || defined(__NetBSD__) || defined(__OpenBSD__)
2161 ip->ip_id = htons(ip_randomid());
2163 ip->ip_id = htons(ip_id++);
2166 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
2167 ip->ip_ttl = inp->ip_inp.inp.inp_ip_ttl;
2169 ip->ip_ttl = inp->inp_ip_ttl;
2171 #if defined(__OpenBSD__) || defined(__NetBSD__)
2172 ip->ip_len = htons(m->m_pkthdr.len);
2174 ip->ip_len = m->m_pkthdr.len;
2177 if ((stcb->asoc.ecn_allowed) && ecn_ok) {
2179 #if defined(__FreeBSD__) || defined (__APPLE__) || defined(__DragonFly__)
2180 ip->ip_tos = (u_char)((inp->ip_inp.inp.inp_ip_tos & 0x000000fc) |
2181 sctp_get_ect(stcb, chk));
2182 #elif defined(__NetBSD__)
2183 ip->ip_tos = (u_char)((inp->ip_inp.inp.inp_ip.ip_tos & 0x000000fc) |
2184 sctp_get_ect(stcb, chk));
2186 ip->ip_tos = (u_char)((inp->inp_ip_tos & 0x000000fc) |
2187 sctp_get_ect(stcb, chk));
2191 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
2192 ip->ip_tos = inp->ip_inp.inp.inp_ip_tos;
2193 #elif defined(__NetBSD__)
2194 ip->ip_tos = inp->ip_inp.inp.inp_ip.ip_tos;
2196 ip->ip_tos = inp->inp_ip_tos;
2200 /* no association at all */
2201 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
2202 ip->ip_tos = inp->ip_inp.inp.inp_ip_tos;
2204 ip->ip_tos = inp->inp_ip_tos;
2207 ip->ip_p = IPPROTO_SCTP;
2211 memset(&iproute, 0, sizeof(iproute));
2212 memcpy(&ro->ro_dst, to, to->sa_len);
2214 ro = (struct route *)&net->ro;
2216 /* Now the address selection part */
2217 ip->ip_dst.s_addr = ((struct sockaddr_in *)to)->sin_addr.s_addr;
2219 /* call the routine to select the src address */
2221 if (net->src_addr_selected == 0) {
2222 /* Cache the source address */
2223 ((struct sockaddr_in *)&net->ro._s_addr)->sin_addr = sctp_ipv4_source_address_selection(inp,
2225 ro, net, out_of_asoc_ok);
2227 net->src_addr_selected = 1;
2229 ip->ip_src = ((struct sockaddr_in *)&net->ro._s_addr)->sin_addr;
2231 ip->ip_src = sctp_ipv4_source_address_selection(inp,
2232 stcb, ro, net, out_of_asoc_ok);
2235 * If source address selection fails and we find no route then
2236 * the ip_ouput should fail as well with a NO_ROUTE_TO_HOST
2237 * type error. We probably should catch that somewhere and
2238 * abort the association right away (assuming this is an INIT
2241 if ((ro->ro_rt == NULL)) {
2243 * src addr selection failed to find a route (or valid
2244 * source addr), so we can't get there from here!
2247 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
2248 printf("low_level_output: dropped v4 packet- no valid source addr\n");
2249 printf("Destination was %x\n", (u_int)(ntohl(ip->ip_dst.s_addr)));
2251 #endif /* SCTP_DEBUG */
2253 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb)
2254 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
2256 SCTP_FAILED_THRESHOLD,
2258 net->dest_state &= ~SCTP_ADDR_REACHABLE;
2259 net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
2261 if (net == stcb->asoc.primary_destination) {
2262 /* need a new primary */
2263 struct sctp_nets *alt;
2264 alt = sctp_find_alternate_net(stcb, net);
2266 if (sctp_set_primary_addr(stcb,
2267 (struct sockaddr *)NULL,
2269 net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
2270 net->src_addr_selected = 0;
2277 return (EHOSTUNREACH);
2279 have_mtu = ro->ro_rt->rt_ifp->if_mtu;
2282 o_flgs = (IP_RAWOUTPUT | (inp->sctp_socket->so_options & (SO_DONTROUTE | SO_BROADCAST)));
2284 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
2285 printf("Calling ipv4 output routine from low level src addr:%x\n",
2286 (u_int)(ntohl(ip->ip_src.s_addr)));
2287 printf("Destination is %x\n", (u_int)(ntohl(ip->ip_dst.s_addr)));
2288 printf("RTP route is %p through\n", ro->ro_rt);
2291 if ((have_mtu) && (net) && (have_mtu > net->mtu)) {
2292 ro->ro_rt->rt_ifp->if_mtu = net->mtu;
2294 ret = ip_output(m, inp->ip_inp.inp.inp_options,
2295 ro, o_flgs, inp->ip_inp.inp.inp_moptions
2296 #if defined(__OpenBSD__) || (defined(__FreeBSD__) && __FreeBSD_version >= 480000) \
2297 || defined(__DragonFly__)
2298 , (struct inpcb *)NULL
2300 #if defined(__NetBSD__)
2301 ,(struct socket *)inp->sctp_socket
2305 if ((ro->ro_rt) && (have_mtu) && (net) && (have_mtu > net->mtu)) {
2306 ro->ro_rt->rt_ifp->if_mtu = have_mtu;
2308 sctp_pegs[SCTP_DATAGRAMS_SENT]++;
2310 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
2311 printf("Ip output returns %d\n", ret);
2315 /* free tempy routes */
2319 /* PMTU check versus smallest asoc MTU goes here */
2320 if (ro->ro_rt != NULL) {
2321 if (ro->ro_rt->rt_rmx.rmx_mtu &&
2322 (stcb->asoc.smallest_mtu > ro->ro_rt->rt_rmx.rmx_mtu)) {
2323 sctp_mtu_size_reset(inp, &stcb->asoc,
2324 ro->ro_rt->rt_rmx.rmx_mtu);
2327 /* route was freed */
2328 net->src_addr_selected = 0;
2334 else if (to->sa_family == AF_INET6) {
2335 struct ip6_hdr *ip6h;
2336 #ifdef NEW_STRUCT_ROUTE
2337 struct route ip6route;
2339 struct route_in6 ip6route;
2343 uint16_t flowBottom;
2344 u_char tosBottom, tosTop;
2345 struct sockaddr_in6 *sin6, tmp, *lsa6, lsa6_tmp;
2346 struct sockaddr_in6 lsa6_storage;
2349 u_short prev_port=0;
2351 M_PREPEND(m, sizeof(struct ip6_hdr), M_DONTWAIT);
2353 /* failed to prepend data, give up */
2356 ip6h = mtod(m, struct ip6_hdr *);
2359 * We assume here that inp_flow is in host byte order within
2362 flowBottom = ((struct in6pcb *)inp)->in6p_flowinfo & 0x0000ffff;
2363 flowTop = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x000f0000) >> 16);
2365 tosTop = (((((struct in6pcb *)inp)->in6p_flowinfo & 0xf0) >> 4) | IPV6_VERSION);
2367 /* protect *sin6 from overwrite */
2368 sin6 = (struct sockaddr_in6 *)to;
2372 /* KAME hack: embed scopeid */
2373 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
2374 if (in6_embedscope(&sin6->sin6_addr, sin6, NULL, NULL) != 0)
2376 if (in6_embedscope(&sin6->sin6_addr, sin6) != 0)
2380 memset(&ip6route, 0, sizeof(ip6route));
2381 ro = (struct route *)&ip6route;
2382 memcpy(&ro->ro_dst, sin6, sin6->sin6_len);
2384 ro = (struct route *)&net->ro;
2387 if ((stcb->asoc.ecn_allowed) && ecn_ok) {
2389 tosBottom = (((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) | sctp_get_ect(stcb, chk)) << 4);
2392 tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4);
2395 /* we could get no asoc if it is a O-O-T-B packet */
2396 tosBottom = ((((struct in6pcb *)inp)->in6p_flowinfo & 0x0c) << 4);
2398 ip6h->ip6_flow = htonl(((tosTop << 24) | ((tosBottom|flowTop) << 16) | flowBottom));
2399 ip6h->ip6_nxt = IPPROTO_SCTP;
2400 ip6h->ip6_plen = m->m_pkthdr.len;
2401 ip6h->ip6_dst = sin6->sin6_addr;
2404 * Add SRC address selection here:
2405 * we can only reuse to a limited degree the kame src-addr-sel,
2406 * since we can try their selection but it may not be bound.
2408 bzero(&lsa6_tmp, sizeof(lsa6_tmp));
2409 lsa6_tmp.sin6_family = AF_INET6;
2410 lsa6_tmp.sin6_len = sizeof(lsa6_tmp);
2413 if (net->src_addr_selected == 0) {
2414 /* Cache the source address */
2415 ((struct sockaddr_in6 *)&net->ro._s_addr)->sin6_addr = sctp_ipv6_source_address_selection(inp,
2416 stcb, ro, net, out_of_asoc_ok);
2419 net->src_addr_selected = 1;
2421 lsa6->sin6_addr = ((struct sockaddr_in6 *)&net->ro._s_addr)->sin6_addr;
2423 lsa6->sin6_addr = sctp_ipv6_source_address_selection(
2424 inp, stcb, ro, net, out_of_asoc_ok);
2426 lsa6->sin6_port = inp->sctp_lport;
2428 if ((ro->ro_rt == NULL)) {
2430 * src addr selection failed to find a route (or valid
2431 * source addr), so we can't get there from here!
2434 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
2435 printf("low_level_output: dropped v6 pkt- no valid source addr\n");
2440 if ((net->dest_state & SCTP_ADDR_REACHABLE) && stcb)
2441 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
2443 SCTP_FAILED_THRESHOLD,
2445 net->dest_state &= ~SCTP_ADDR_REACHABLE;
2446 net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
2448 if (net == stcb->asoc.primary_destination) {
2449 /* need a new primary */
2450 struct sctp_nets *alt;
2451 alt = sctp_find_alternate_net(stcb, net);
2453 if (sctp_set_primary_addr(stcb,
2454 (struct sockaddr *)NULL,
2456 net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
2457 net->src_addr_selected = 0;
2463 return (EHOSTUNREACH);
2466 #ifndef SCOPEDROUTING
2468 * XXX: sa6 may not have a valid sin6_scope_id in
2469 * the non-SCOPEDROUTING case.
2471 bzero(&lsa6_storage, sizeof(lsa6_storage));
2472 lsa6_storage.sin6_family = AF_INET6;
2473 lsa6_storage.sin6_len = sizeof(lsa6_storage);
2474 if ((error = in6_recoverscope(&lsa6_storage, &lsa6->sin6_addr,
2480 lsa6_storage.sin6_addr = lsa6->sin6_addr;
2481 lsa6_storage.sin6_port = inp->sctp_lport;
2482 lsa6 = &lsa6_storage;
2483 #endif /* SCOPEDROUTING */
2484 ip6h->ip6_src = lsa6->sin6_addr;
2487 * We set the hop limit now since there is a good chance that
2488 * our ro pointer is now filled
2490 ip6h->ip6_hlim = in6_selecthlim((struct in6pcb *)&inp->ip_inp.inp,
2492 (ro->ro_rt ? (ro->ro_rt->rt_ifp) : (NULL)) :
2495 ifp = ro->ro_rt->rt_ifp;
2497 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
2498 /* Copy to be sure something bad is not happening */
2499 sin6->sin6_addr = ip6h->ip6_dst;
2500 lsa6->sin6_addr = ip6h->ip6_src;
2502 printf("Calling ipv6 output routine from low level\n");
2504 sctp_print_address((struct sockaddr *)lsa6);
2506 sctp_print_address((struct sockaddr *)sin6);
2508 #endif /* SCTP_DEBUG */
2510 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
2511 /* preserve the port and scope for link local send */
2512 prev_scope = sin6->sin6_scope_id;
2513 prev_port = sin6->sin6_port;
2515 ret = ip6_output(m, ((struct in6pcb *)inp)->in6p_outputopts,
2516 #ifdef NEW_STRUCT_ROUTE
2519 (struct route_in6 *)ro,
2522 ((struct in6pcb *)inp)->in6p_moptions,
2523 #if defined(__NetBSD__)
2524 (struct socket *)inp->sctp_socket,
2527 #if (defined(__FreeBSD__) && __FreeBSD_version >= 480000) || defined(__DragonFly__)
2532 /* for link local this must be done */
2533 sin6->sin6_scope_id = prev_scope;
2534 sin6->sin6_port = prev_port;
2537 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
2538 printf("return from send is %d\n", ret);
2540 #endif /* SCTP_DEBUG_OUTPUT */
2541 sctp_pegs[SCTP_DATAGRAMS_SENT]++;
2543 /* Now if we had a temp route free it */
2548 /* PMTU check versus smallest asoc MTU goes here */
2549 if (ro->ro_rt == NULL) {
2550 /* Route was freed */
2551 net->src_addr_selected = 0;
2553 if (ro->ro_rt != NULL) {
2554 if (ro->ro_rt->rt_rmx.rmx_mtu &&
2555 (stcb->asoc.smallest_mtu > ro->ro_rt->rt_rmx.rmx_mtu)) {
2556 sctp_mtu_size_reset(inp,
2558 ro->ro_rt->rt_rmx.rmx_mtu);
2561 #if (defined(SCTP_BASE_FREEBSD) && __FreeBSD_version < 500000) || defined(__APPLE__)
2562 #define ND_IFINFO(ifp) (&nd_ifinfo[ifp->if_index])
2563 #endif /* SCTP_BASE_FREEBSD */
2564 if (ND_IFINFO(ifp)->linkmtu &&
2565 (stcb->asoc.smallest_mtu > ND_IFINFO(ifp)->linkmtu)) {
2566 sctp_mtu_size_reset(inp,
2568 ND_IFINFO(ifp)->linkmtu);
2577 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
2578 printf("Unknown protocol (TSNH) type %d\n", ((struct sockaddr *)to)->sa_family);
2587 int sctp_is_address_in_scope(struct ifaddr *ifa,
2588 int ipv4_addr_legal,
2589 int ipv6_addr_legal,
2591 int ipv4_local_scope,
2595 if ((loopback_scope == 0) &&
2597 (ifa->ifa_ifp->if_type == IFT_LOOP)) {
2598 /* skip loopback if not in scope *
2602 if ((ifa->ifa_addr->sa_family == AF_INET) && ipv4_addr_legal) {
2603 struct sockaddr_in *sin;
2604 sin = (struct sockaddr_in *)ifa->ifa_addr;
2605 if (sin->sin_addr.s_addr == 0) {
2606 /* not in scope , unspecified */
2609 if ((ipv4_local_scope == 0) &&
2610 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
2611 /* private address not in scope */
2614 } else if ((ifa->ifa_addr->sa_family == AF_INET6) && ipv6_addr_legal) {
2615 struct sockaddr_in6 *sin6;
2616 struct in6_ifaddr *ifa6;
2618 ifa6 = (struct in6_ifaddr *)ifa;
2619 /* ok to use deprecated addresses? */
2620 if (!ip6_use_deprecated) {
2621 if (ifa6->ia6_flags &
2622 IN6_IFF_DEPRECATED) {
2626 if (ifa6->ia6_flags &
2629 IN6_IFF_NOTREADY)) {
2632 sin6 = (struct sockaddr_in6 *)ifa->ifa_addr;
2633 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
2634 /* skip unspecifed addresses */
2637 if (/*(local_scope == 0) && */
2638 (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr))) {
2641 if ((site_scope == 0) &&
2642 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
2653 sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
2655 struct mbuf *m, *m_at, *m_last;
2656 struct sctp_nets *net;
2657 struct sctp_init_msg *initm;
2658 struct sctp_supported_addr_param *sup_addr;
2659 struct sctp_ecn_supported_param *ecn;
2660 struct sctp_prsctp_supported_param *prsctp;
2661 struct sctp_ecn_nonce_supported_param *ecn_nonce;
2662 struct sctp_supported_chunk_types_param *pr_supported;
2666 /* INIT's always go to the primary (and usually ONLY address) */
2668 net = stcb->asoc.primary_destination;
2670 net = TAILQ_FIRST(&stcb->asoc.nets);
2675 /* we confirm any address we send an INIT to */
2676 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2677 sctp_set_primary_addr(stcb, NULL, net);
2679 /* we confirm any address we send an INIT to */
2680 net->dest_state &= ~SCTP_ADDR_UNCONFIRMED;
2683 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
2684 printf("Sending INIT to ");
2685 sctp_print_address ((struct sockaddr *)&net->ro._l_addr);
2688 if (((struct sockaddr *)&(net->ro._l_addr))->sa_family == AF_INET6) {
2689 /* special hook, if we are sending to link local
2690 * it will not show up in our private address count.
2692 struct sockaddr_in6 *sin6l;
2693 sin6l = &net->ro._l_addr.sin6;
2694 if (IN6_IS_ADDR_LINKLOCAL(&sin6l->sin6_addr))
2697 if (callout_pending(&net->rxt_timer.timer)) {
2698 /* This case should not happen */
2701 /* start the INIT timer */
2702 if (sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net)) {
2703 /* we are hosed since I can't start the INIT timer? */
2706 MGETHDR(m, M_DONTWAIT, MT_HEADER);
2708 /* No memory, INIT timer will re-attempt. */
2711 /* make it into a M_EXT */
2712 MCLGET(m, M_DONTWAIT);
2713 if ((m->m_flags & M_EXT) != M_EXT) {
2714 /* Failed to get cluster buffer */
2718 m->m_data += SCTP_MIN_OVERHEAD;
2719 m->m_len = sizeof(struct sctp_init_msg);
2720 /* Now lets put the SCTP header in place */
2721 initm = mtod(m, struct sctp_init_msg *);
2722 initm->sh.src_port = inp->sctp_lport;
2723 initm->sh.dest_port = stcb->rport;
2724 initm->sh.v_tag = 0;
2725 initm->sh.checksum = 0; /* calculate later */
2726 /* now the chunk header */
2727 initm->msg.ch.chunk_type = SCTP_INITIATION;
2728 initm->msg.ch.chunk_flags = 0;
2729 /* fill in later from mbuf we build */
2730 initm->msg.ch.chunk_length = 0;
2731 /* place in my tag */
2732 initm->msg.init.initiate_tag = htonl(stcb->asoc.my_vtag);
2733 /* set up some of the credits. */
2734 initm->msg.init.a_rwnd = htonl(max(inp->sctp_socket->so_rcv.sb_hiwat,
2735 SCTP_MINIMAL_RWND));
2737 initm->msg.init.num_outbound_streams = htons(stcb->asoc.pre_open_streams);
2738 initm->msg.init.num_inbound_streams = htons(stcb->asoc.max_inbound_streams);
2739 initm->msg.init.initial_tsn = htonl(stcb->asoc.init_seq_number);
2740 /* now the address restriction */
2741 sup_addr = (struct sctp_supported_addr_param *)((caddr_t)initm +
2743 sup_addr->ph.param_type = htons(SCTP_SUPPORTED_ADDRTYPE);
2744 /* we support 2 types IPv6/IPv4 */
2745 sup_addr->ph.param_length = htons(sizeof(*sup_addr) +
2747 sup_addr->addr_type[0] = htons(SCTP_IPV4_ADDRESS);
2748 sup_addr->addr_type[1] = htons(SCTP_IPV6_ADDRESS);
2749 m->m_len += sizeof(*sup_addr) + sizeof(uint16_t);
2751 /* if (inp->sctp_flags & SCTP_PCB_FLAGS_ADAPTIONEVNT) {*/
2752 if (inp->sctp_ep.adaption_layer_indicator) {
2753 struct sctp_adaption_layer_indication *ali;
2754 ali = (struct sctp_adaption_layer_indication *)(
2755 (caddr_t)sup_addr + sizeof(*sup_addr) + sizeof(uint16_t));
2756 ali->ph.param_type = htons(SCTP_ULP_ADAPTION);
2757 ali->ph.param_length = htons(sizeof(*ali));
2758 ali->indication = ntohl(inp->sctp_ep.adaption_layer_indicator);
2759 m->m_len += sizeof(*ali);
2760 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali +
2763 ecn = (struct sctp_ecn_supported_param *)((caddr_t)sup_addr +
2764 sizeof(*sup_addr) + sizeof(uint16_t));
2767 /* now any cookie time extensions */
2768 if (stcb->asoc.cookie_preserve_req) {
2769 struct sctp_cookie_perserve_param *cookie_preserve;
2770 cookie_preserve = (struct sctp_cookie_perserve_param *)(ecn);
2771 cookie_preserve->ph.param_type = htons(SCTP_COOKIE_PRESERVE);
2772 cookie_preserve->ph.param_length = htons(
2773 sizeof(*cookie_preserve));
2774 cookie_preserve->time = htonl(stcb->asoc.cookie_preserve_req);
2775 m->m_len += sizeof(*cookie_preserve);
2776 ecn = (struct sctp_ecn_supported_param *)(
2777 (caddr_t)cookie_preserve + sizeof(*cookie_preserve));
2778 stcb->asoc.cookie_preserve_req = 0;
2782 if (sctp_ecn == 1) {
2783 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
2784 ecn->ph.param_length = htons(sizeof(*ecn));
2785 m->m_len += sizeof(*ecn);
2786 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
2789 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
2791 /* And now tell the peer we do pr-sctp */
2792 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
2793 prsctp->ph.param_length = htons(sizeof(*prsctp));
2794 m->m_len += sizeof(*prsctp);
2797 /* And now tell the peer we do all the extensions */
2798 pr_supported = (struct sctp_supported_chunk_types_param *)((caddr_t)prsctp +
2801 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
2802 pr_supported->ph.param_length = htons(sizeof(*pr_supported) + SCTP_EXT_COUNT);
2803 pr_supported->chunk_types[0] = SCTP_ASCONF;
2804 pr_supported->chunk_types[1] = SCTP_ASCONF_ACK;
2805 pr_supported->chunk_types[2] = SCTP_FORWARD_CUM_TSN;
2806 pr_supported->chunk_types[3] = SCTP_PACKET_DROPPED;
2807 pr_supported->chunk_types[4] = SCTP_STREAM_RESET;
2808 pr_supported->chunk_types[5] = 0; /* pad */
2809 pr_supported->chunk_types[6] = 0; /* pad */
2810 pr_supported->chunk_types[7] = 0; /* pad */
2812 m->m_len += (sizeof(*pr_supported) + SCTP_EXT_COUNT + SCTP_PAD_EXT_COUNT);
2813 /* ECN nonce: And now tell the peer we support ECN nonce */
2815 if (sctp_ecn_nonce) {
2816 ecn_nonce = (struct sctp_ecn_nonce_supported_param *)((caddr_t)pr_supported +
2817 sizeof(*pr_supported) + SCTP_EXT_COUNT + SCTP_PAD_EXT_COUNT);
2818 ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED);
2819 ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce));
2820 m->m_len += sizeof(*ecn_nonce);
2824 /* now the addresses */
2825 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2831 TAILQ_FOREACH(ifn, &ifnet, if_list) {
2832 if ((stcb->asoc.loopback_scope == 0) &&
2833 (ifn->if_type == IFT_LOOP)) {
2835 * Skip loopback devices if loopback_scope
2840 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
2841 if (sctp_is_address_in_scope(ifa,
2842 stcb->asoc.ipv4_addr_legal,
2843 stcb->asoc.ipv6_addr_legal,
2844 stcb->asoc.loopback_scope,
2845 stcb->asoc.ipv4_local_scope,
2846 stcb->asoc.local_scope,
2847 stcb->asoc.site_scope) == 0) {
2854 TAILQ_FOREACH(ifn, &ifnet, if_list) {
2855 if ((stcb->asoc.loopback_scope == 0) &&
2856 (ifn->if_type == IFT_LOOP)) {
2858 * Skip loopback devices if loopback_scope
2863 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
2864 if (sctp_is_address_in_scope(ifa,
2865 stcb->asoc.ipv4_addr_legal,
2866 stcb->asoc.ipv6_addr_legal,
2867 stcb->asoc.loopback_scope,
2868 stcb->asoc.ipv4_local_scope,
2869 stcb->asoc.local_scope,
2870 stcb->asoc.site_scope) == 0) {
2873 m_at = sctp_add_addr_to_mbuf(m_at, ifa);
2878 struct sctp_laddr *laddr;
2881 /* First, how many ? */
2882 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2883 if (laddr->ifa == NULL) {
2886 if (laddr->ifa->ifa_addr == NULL)
2888 if (sctp_is_address_in_scope(laddr->ifa,
2889 stcb->asoc.ipv4_addr_legal,
2890 stcb->asoc.ipv6_addr_legal,
2891 stcb->asoc.loopback_scope,
2892 stcb->asoc.ipv4_local_scope,
2893 stcb->asoc.local_scope,
2894 stcb->asoc.site_scope) == 0) {
2899 /* To get through a NAT we only list addresses if
2900 * we have more than one. That way if you just
2901 * bind a single address we let the source of the init
2902 * dictate our address.
2905 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
2906 if (laddr->ifa == NULL) {
2909 if (laddr->ifa->ifa_addr == NULL) {
2913 if (sctp_is_address_in_scope(laddr->ifa,
2914 stcb->asoc.ipv4_addr_legal,
2915 stcb->asoc.ipv6_addr_legal,
2916 stcb->asoc.loopback_scope,
2917 stcb->asoc.ipv4_local_scope,
2918 stcb->asoc.local_scope,
2919 stcb->asoc.site_scope) == 0) {
2922 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa);
2926 /* calulate the size and update pkt header and chunk header */
2927 m->m_pkthdr.len = 0;
2928 for (m_at = m; m_at; m_at = m_at->m_next) {
2929 if (m_at->m_next == NULL)
2931 m->m_pkthdr.len += m_at->m_len;
2933 initm->msg.ch.chunk_length = htons((m->m_pkthdr.len -
2934 sizeof(struct sctphdr)));
2935 /* We pass 0 here to NOT set IP_DF if its IPv4, we
2936 * ignore the return here since the timer will drive
2940 /* I don't expect this to execute but we will be safe here */
2941 padval = m->m_pkthdr.len % 4;
2942 if ((padval) && (m_last)) {
2943 /* The compiler worries that m_last may not be
2944 * set even though I think it is impossible :->
2945 * however we add m_last here just in case.
2948 ret = sctp_add_pad_tombuf(m_last, (4-padval));
2950 /* Houston we have a problem, no space */
2954 m->m_pkthdr.len += padval;
2957 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
2958 printf("Calling lowlevel output stcb:%x net:%x\n",
2959 (u_int)stcb, (u_int)net);
2962 ret = sctp_lowlevel_chunk_output(inp, stcb, net,
2963 (struct sockaddr *)&net->ro._l_addr, m, 0, 0, NULL, 0);
2965 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
2966 printf("Low level output returns %d\n", ret);
2969 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, net);
2970 SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
2974 sctp_arethere_unrecognized_parameters(struct mbuf *in_initpkt,
2975 int param_offset, int *abort_processing, struct sctp_chunkhdr *cp)
2977 /* Given a mbuf containing an INIT or INIT-ACK
2978 * with the param_offset being equal to the
2979 * beginning of the params i.e. (iphlen + sizeof(struct sctp_init_msg)
2980 * parse through the parameters to the end of the mbuf verifying
2981 * that all parameters are known.
2983 * For unknown parameters build and return a mbuf with
2984 * UNRECOGNIZED_PARAMETER errors. If the flags indicate
2985 * to stop processing this chunk stop, and set *abort_processing
2988 * By having param_offset be pre-set to where parameters begin
2989 * it is hoped that this routine may be reused in the future
2992 struct sctp_paramhdr *phdr, params;
2994 struct mbuf *mat, *op_err;
2996 int at, limit, pad_needed;
2997 uint16_t ptype, plen;
3000 *abort_processing = 0;
3003 limit = ntohs(cp->chunk_length) - sizeof(struct sctp_init_chunk);
3005 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
3006 printf("Limit is %d bytes\n", limit);
3012 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
3013 while ((phdr != NULL) && ((size_t)limit >= sizeof(struct sctp_paramhdr))) {
3014 ptype = ntohs(phdr->param_type);
3015 plen = ntohs(phdr->param_length);
3016 limit -= SCTP_SIZE32(plen);
3017 if (plen < sizeof(struct sctp_paramhdr)) {
3019 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
3020 printf("sctp_output.c:Impossible length in parameter < %d\n", plen);
3023 *abort_processing = 1;
3026 /* All parameters for all chunks that we
3027 * know/understand are listed here. We process
3028 * them other places and make appropriate
3029 * stop actions per the upper bits. However
3030 * this is the generic routine processor's can
3031 * call to get back an operr.. to either incorporate (init-ack)
3034 if ((ptype == SCTP_HEARTBEAT_INFO) ||
3035 (ptype == SCTP_IPV4_ADDRESS) ||
3036 (ptype == SCTP_IPV6_ADDRESS) ||
3037 (ptype == SCTP_STATE_COOKIE) ||
3038 (ptype == SCTP_UNRECOG_PARAM) ||
3039 (ptype == SCTP_COOKIE_PRESERVE) ||
3040 (ptype == SCTP_SUPPORTED_ADDRTYPE) ||
3041 (ptype == SCTP_PRSCTP_SUPPORTED) ||
3042 (ptype == SCTP_ADD_IP_ADDRESS) ||
3043 (ptype == SCTP_DEL_IP_ADDRESS) ||
3044 (ptype == SCTP_ECN_CAPABLE) ||
3045 (ptype == SCTP_ULP_ADAPTION) ||
3046 (ptype == SCTP_ERROR_CAUSE_IND) ||
3047 (ptype == SCTP_SET_PRIM_ADDR) ||
3048 (ptype == SCTP_SUCCESS_REPORT) ||
3049 (ptype == SCTP_ULP_ADAPTION) ||
3050 (ptype == SCTP_SUPPORTED_CHUNK_EXT) ||
3051 (ptype == SCTP_ECN_NONCE_SUPPORTED)
3054 at += SCTP_SIZE32(plen);
3055 } else if (ptype == SCTP_HOSTNAME_ADDRESS) {
3056 /* We can NOT handle HOST NAME addresses!! */
3058 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
3059 printf("Can't handle hostname addresses.. abort processing\n");
3062 *abort_processing = 1;
3063 if (op_err == NULL) {
3064 /* Ok need to try to get a mbuf */
3065 MGETHDR(op_err, M_DONTWAIT, MT_DATA);
3068 op_err->m_pkthdr.len = 0;
3069 /* pre-reserve space for ip and sctp header and chunk hdr*/
3070 op_err->m_data += sizeof(struct ip6_hdr);
3071 op_err->m_data += sizeof(struct sctphdr);
3072 op_err->m_data += sizeof(struct sctp_chunkhdr);
3076 /* If we have space */
3077 struct sctp_paramhdr s;
3080 pad_needed = 4 - (err_at % 4);
3081 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
3082 err_at += pad_needed;
3084 s.param_type = htons(SCTP_CAUSE_UNRESOLV_ADDR);
3085 s.param_length = htons(sizeof(s) + plen);
3086 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
3087 err_at += sizeof(s);
3088 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, plen);
3090 sctp_m_freem(op_err);
3091 /* we are out of memory but we
3092 * still need to have a look at what to
3093 * do (the system is in trouble though).
3097 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
3102 /* we do not recognize the parameter
3103 * figure out what we do.
3106 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
3107 printf("Got parameter type %x - unknown\n",
3111 if ((ptype & 0x4000) == 0x4000) {
3112 /* Report bit is set?? */
3114 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
3115 printf("Report bit is set\n");
3118 if (op_err == NULL) {
3119 /* Ok need to try to get an mbuf */
3120 MGETHDR(op_err, M_DONTWAIT, MT_DATA);
3123 op_err->m_pkthdr.len = 0;
3124 op_err->m_data += sizeof(struct ip6_hdr);
3125 op_err->m_data += sizeof(struct sctphdr);
3126 op_err->m_data += sizeof(struct sctp_chunkhdr);
3130 /* If we have space */
3131 struct sctp_paramhdr s;
3134 pad_needed = 4 - (err_at % 4);
3135 m_copyback(op_err, err_at, pad_needed, (caddr_t)&cpthis);
3136 err_at += pad_needed;
3138 s.param_type = htons(SCTP_UNRECOG_PARAM);
3139 s.param_length = htons(sizeof(s) + plen);
3140 m_copyback(op_err, err_at, sizeof(s), (caddr_t)&s);
3141 err_at += sizeof(s);
3142 if (plen > sizeof(tempbuf)) {
3143 plen = sizeof(tempbuf);
3145 phdr = sctp_get_next_param(mat, at, (struct sctp_paramhdr *)tempbuf, plen);
3147 sctp_m_freem(op_err);
3148 /* we are out of memory but we
3149 * still need to have a look at what to
3150 * do (the system is in trouble though).
3152 goto more_processing;
3154 m_copyback(op_err, err_at, plen, (caddr_t)phdr);
3159 if ((ptype & 0x8000) == 0x0000) {
3161 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
3162 printf("Abort bit is now setting1\n");
3167 /* skip this chunk and continue processing */
3168 at += SCTP_SIZE32(plen);
3172 phdr = sctp_get_next_param(mat, at, ¶ms, sizeof(params));
3178 sctp_are_there_new_addresses(struct sctp_association *asoc,
3179 struct mbuf *in_initpkt, int iphlen, int offset)
3182 * Given a INIT packet, look through the packet to verify that
3183 * there are NO new addresses. As we go through the parameters
3184 * add reports of any un-understood parameters that require an
3185 * error. Also we must return (1) to drop the packet if we see
3186 * a un-understood parameter that tells us to drop the chunk.
3188 struct sockaddr_in sin4, *sa4;
3189 struct sockaddr_in6 sin6, *sa6;
3190 struct sockaddr *sa_touse;
3191 struct sockaddr *sa;
3192 struct sctp_paramhdr *phdr, params;
3195 uint16_t ptype, plen;
3198 struct sctp_nets *net;
3200 memset(&sin4, 0, sizeof(sin4));
3201 memset(&sin6, 0, sizeof(sin6));
3202 sin4.sin_family = AF_INET;
3203 sin4.sin_len = sizeof(sin4);
3204 sin6.sin6_family = AF_INET6;
3205 sin6.sin6_len = sizeof(sin6);
3208 /* First what about the src address of the pkt ? */
3209 iph = mtod(in_initpkt, struct ip *);
3210 if (iph->ip_v == IPVERSION) {
3211 /* source addr is IPv4 */
3212 sin4.sin_addr = iph->ip_src;
3213 sa_touse = (struct sockaddr *)&sin4;
3214 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
3215 /* source addr is IPv6 */
3216 struct ip6_hdr *ip6h;
3217 ip6h = mtod(in_initpkt, struct ip6_hdr *);
3218 sin6.sin6_addr = ip6h->ip6_src;
3219 sa_touse = (struct sockaddr *)&sin6;
3225 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3226 sa = (struct sockaddr *)&net->ro._l_addr;
3227 if (sa->sa_family == sa_touse->sa_family) {
3228 if (sa->sa_family == AF_INET) {
3229 sa4 = (struct sockaddr_in *)sa;
3230 if (sa4->sin_addr.s_addr ==
3231 sin4.sin_addr.s_addr) {
3235 } else if (sa->sa_family == AF_INET6) {
3236 sa6 = (struct sockaddr_in6 *)sa;
3237 if (SCTP6_ARE_ADDR_EQUAL(&sa6->sin6_addr,
3246 /* New address added! no need to look futher. */
3249 /* Ok so far lets munge through the rest of the packet */
3253 offset += sizeof(struct sctp_init_chunk);
3254 phdr = sctp_get_next_param(mat, offset, ¶ms, sizeof(params));
3256 ptype = ntohs(phdr->param_type);
3257 plen = ntohs(phdr->param_length);
3258 if (ptype == SCTP_IPV4_ADDRESS) {
3259 struct sctp_ipv4addr_param *p4, p4_buf;
3261 phdr = sctp_get_next_param(mat, offset,
3262 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
3263 if (plen != sizeof(struct sctp_ipv4addr_param) ||
3267 p4 = (struct sctp_ipv4addr_param *)phdr;
3268 sin4.sin_addr.s_addr = p4->addr;
3269 sa_touse = (struct sockaddr *)&sin4;
3270 } else if (ptype == SCTP_IPV6_ADDRESS) {
3271 struct sctp_ipv6addr_param *p6, p6_buf;
3273 phdr = sctp_get_next_param(mat, offset,
3274 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
3275 if (plen != sizeof(struct sctp_ipv6addr_param) ||
3279 p6 = (struct sctp_ipv6addr_param *)phdr;
3280 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
3282 sa_touse = (struct sockaddr *)&sin4;
3286 /* ok, sa_touse points to one to check */
3288 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3289 sa = (struct sockaddr *)&net->ro._l_addr;
3290 if (sa->sa_family != sa_touse->sa_family) {
3293 if (sa->sa_family == AF_INET) {
3294 sa4 = (struct sockaddr_in *)sa;
3295 if (sa4->sin_addr.s_addr ==
3296 sin4.sin_addr.s_addr) {
3300 } else if (sa->sa_family == AF_INET6) {
3301 sa6 = (struct sockaddr_in6 *)sa;
3302 if (SCTP6_ARE_ADDR_EQUAL(
3303 &sa6->sin6_addr, &sin6.sin6_addr)) {
3310 /* New addr added! no need to look further */
3314 offset += SCTP_SIZE32(plen);
3315 phdr = sctp_get_next_param(mat, offset, ¶ms, sizeof(params));
3321 * Given a MBUF chain that was sent into us containing an
3322 * INIT. Build a INIT-ACK with COOKIE and send back.
3323 * We assume that the in_initpkt has done a pullup to
3324 * include IPv6/4header, SCTP header and initial part of
3325 * INIT message (i.e. the struct sctp_init_msg).
3328 sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3329 struct mbuf *init_pkt, int iphlen, int offset, struct sctphdr *sh,
3330 struct sctp_init_chunk *init_chk)
3332 struct sctp_association *asoc;
3333 struct mbuf *m, *m_at, *m_tmp, *m_cookie, *op_err, *m_last;
3334 struct sctp_init_msg *initackm_out;
3335 struct sctp_ecn_supported_param *ecn;
3336 struct sctp_prsctp_supported_param *prsctp;
3337 struct sctp_ecn_nonce_supported_param *ecn_nonce;
3338 struct sctp_supported_chunk_types_param *pr_supported;
3339 struct sockaddr_storage store;
3340 struct sockaddr_in *sin;
3341 struct sockaddr_in6 *sin6;
3344 struct ip6_hdr *ip6;
3345 struct sockaddr *to;
3346 struct sctp_state_cookie stc;
3347 struct sctp_nets *net=NULL;
3349 uint16_t his_limit, i_want;
3350 int abort_flag, padval, sz_of;
3358 if ((asoc != NULL) &&
3359 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
3360 (sctp_are_there_new_addresses(asoc, init_pkt, iphlen, offset))) {
3361 /* new addresses, out of here in non-cookie-wait states */
3363 * Send a ABORT, we don't add the new address error clause though
3364 * we even set the T bit and copy in the 0 tag.. this looks no
3365 * different than if no listner was present.
3367 sctp_send_abort(init_pkt, iphlen, sh, 0, NULL);
3371 op_err = sctp_arethere_unrecognized_parameters(init_pkt,
3372 (offset+sizeof(struct sctp_init_chunk)),
3373 &abort_flag, (struct sctp_chunkhdr *)init_chk);
3375 sctp_send_abort(init_pkt, iphlen, sh, init_chk->init.initiate_tag, op_err);
3378 MGETHDR(m, M_DONTWAIT, MT_HEADER);
3380 /* No memory, INIT timer will re-attempt. */
3382 sctp_m_freem(op_err);
3385 MCLGET(m, M_DONTWAIT);
3386 if ((m->m_flags & M_EXT) != M_EXT) {
3387 /* Failed to get cluster buffer */
3389 sctp_m_freem(op_err);
3393 m->m_data += SCTP_MIN_OVERHEAD;
3394 m->m_pkthdr.rcvif = 0;
3395 m->m_len = sizeof(struct sctp_init_msg);
3397 /* the time I built cookie */
3398 SCTP_GETTIME_TIMEVAL(&stc.time_entered);
3400 /* populate any tie tags */
3402 /* unlock before tag selections */
3403 SCTP_TCB_UNLOCK(stcb);
3404 if (asoc->my_vtag_nonce == 0)
3405 asoc->my_vtag_nonce = sctp_select_a_tag(inp);
3406 stc.tie_tag_my_vtag = asoc->my_vtag_nonce;
3408 if (asoc->peer_vtag_nonce == 0)
3409 asoc->peer_vtag_nonce = sctp_select_a_tag(inp);
3410 stc.tie_tag_peer_vtag = asoc->peer_vtag_nonce;
3412 stc.cookie_life = asoc->cookie_life;
3413 net = asoc->primary_destination;
3414 /* now we must relock */
3415 SCTP_INP_RLOCK(inp);
3416 /* we may be in trouble here if the inp got freed
3417 * most likely this set of tests will protect
3418 * us but there is a chance not.
3420 if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE|SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
3422 sctp_m_freem(op_err);
3424 sctp_send_abort(init_pkt, iphlen, sh, 0, NULL);
3427 SCTP_TCB_LOCK(stcb);
3428 SCTP_INP_RUNLOCK(stcb->sctp_ep);
3430 stc.tie_tag_my_vtag = 0;
3431 stc.tie_tag_peer_vtag = 0;
3432 /* life I will award this cookie */
3433 stc.cookie_life = inp->sctp_ep.def_cookie_life;
3436 /* copy in the ports for later check */
3437 stc.myport = sh->dest_port;
3438 stc.peerport = sh->src_port;
3441 * If we wanted to honor cookie life extentions, we would add
3442 * to stc.cookie_life. For now we should NOT honor any extension
3444 stc.site_scope = stc.local_scope = stc.loopback_scope = 0;
3445 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
3446 struct inpcb *in_inp;
3447 /* Its a V6 socket */
3448 in_inp = (struct inpcb *)inp;
3449 stc.ipv6_addr_legal = 1;
3450 /* Now look at the binding flag to see if V4 will be legal */
3452 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
3453 (in_inp->inp_flags & IN6P_IPV6_V6ONLY)
3454 #elif defined(__OpenBSD__)
3455 (0) /* For openbsd we do dual bind only */
3457 (((struct in6pcb *)in_inp)->in6p_flags & IN6P_IPV6_V6ONLY)
3460 stc.ipv4_addr_legal = 1;
3462 /* V4 addresses are NOT legal on the association */
3463 stc.ipv4_addr_legal = 0;
3466 /* Its a V4 socket, no - V6 */
3467 stc.ipv4_addr_legal = 1;
3468 stc.ipv6_addr_legal = 0;
3471 #ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
3476 /* now for scope setup */
3477 memset((caddr_t)&store, 0, sizeof(store));
3478 sin = (struct sockaddr_in *)&store;
3479 sin6 = (struct sockaddr_in6 *)&store;
3481 to = (struct sockaddr *)&store;
3482 iph = mtod(init_pkt, struct ip *);
3483 if (iph->ip_v == IPVERSION) {
3484 struct in_addr addr;
3485 struct route iproute;
3487 sin->sin_family = AF_INET;
3488 sin->sin_len = sizeof(struct sockaddr_in);
3489 sin->sin_port = sh->src_port;
3490 sin->sin_addr = iph->ip_src;
3491 /* lookup address */
3492 stc.address[0] = sin->sin_addr.s_addr;
3496 stc.addr_type = SCTP_IPV4_ADDRESS;
3497 /* local from address */
3498 memset(&iproute, 0, sizeof(iproute));
3500 memcpy(&ro->ro_dst, sin, sizeof(*sin));
3501 addr = sctp_ipv4_source_address_selection(inp, NULL,
3506 stc.laddress[0] = addr.s_addr;
3507 stc.laddress[1] = 0;
3508 stc.laddress[2] = 0;
3509 stc.laddress[3] = 0;
3510 stc.laddr_type = SCTP_IPV4_ADDRESS;
3511 /* scope_id is only for v6 */
3513 #ifndef SCTP_DONT_DO_PRIVADDR_SCOPE
3514 if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
3519 #endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
3520 /* Must use the address in this case */
3521 if (sctp_is_address_on_local_host((struct sockaddr *)sin)) {
3522 stc.loopback_scope = 1;
3525 stc.local_scope = 1;
3527 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
3528 struct in6_addr addr;
3529 #ifdef NEW_STRUCT_ROUTE
3530 struct route iproute6;
3532 struct route_in6 iproute6;
3534 ip6 = mtod(init_pkt, struct ip6_hdr *);
3535 sin6->sin6_family = AF_INET6;
3536 sin6->sin6_len = sizeof(struct sockaddr_in6);
3537 sin6->sin6_port = sh->src_port;
3538 sin6->sin6_addr = ip6->ip6_src;
3539 /* lookup address */
3540 memcpy(&stc.address, &sin6->sin6_addr,
3541 sizeof(struct in6_addr));
3542 sin6->sin6_scope_id = 0;
3543 stc.addr_type = SCTP_IPV6_ADDRESS;
3545 if (sctp_is_address_on_local_host((struct sockaddr *)sin6)) {
3546 stc.loopback_scope = 1;
3547 stc.local_scope = 1;
3550 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
3552 * If the new destination is a LINK_LOCAL
3553 * we must have common both site and local
3554 * scope. Don't set local scope though since
3555 * we must depend on the source to be added
3556 * implicitly. We cannot assure just because
3557 * we share one link that all links are common.
3559 stc.local_scope = 0;
3562 /* we start counting for the private
3563 * address stuff at 1. since the link
3564 * local we source from won't show
3565 * up in our scoped cou8nt.
3568 /* pull out the scope_id from incoming pkt */
3569 (void)in6_recoverscope(sin6, &ip6->ip6_src,
3570 init_pkt->m_pkthdr.rcvif);
3571 #if defined(SCTP_BASE_FREEBSD) || defined(__APPLE__) || defined(__DragonFly__)
3572 in6_embedscope(&sin6->sin6_addr, sin6, NULL,
3575 in6_embedscope(&sin6->sin6_addr, sin6);
3577 stc.scope_id = sin6->sin6_scope_id;
3578 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
3580 * If the new destination is SITE_LOCAL
3581 * then we must have site scope in common.
3585 /* local from address */
3586 memset(&iproute6, 0, sizeof(iproute6));
3587 ro = (struct route *)&iproute6;
3588 memcpy(&ro->ro_dst, sin6, sizeof(*sin6));
3589 addr = sctp_ipv6_source_address_selection(inp, NULL,
3594 memcpy(&stc.laddress, &addr, sizeof(struct in6_addr));
3595 stc.laddr_type = SCTP_IPV6_ADDRESS;
3598 /* set the scope per the existing tcb */
3599 struct sctp_nets *lnet;
3601 stc.loopback_scope = asoc->loopback_scope;
3602 stc.ipv4_scope = asoc->ipv4_local_scope;
3603 stc.site_scope = asoc->site_scope;
3604 stc.local_scope = asoc->local_scope;
3605 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) {
3606 if (lnet->ro._l_addr.sin6.sin6_family == AF_INET6) {
3607 if (IN6_IS_ADDR_LINKLOCAL(&lnet->ro._l_addr.sin6.sin6_addr)) {
3608 /* if we have a LL address, start counting
3616 /* use the net pointer */
3617 to = (struct sockaddr *)&net->ro._l_addr;
3618 if (to->sa_family == AF_INET) {
3619 sin = (struct sockaddr_in *)to;
3620 stc.address[0] = sin->sin_addr.s_addr;
3624 stc.addr_type = SCTP_IPV4_ADDRESS;
3625 if (net->src_addr_selected == 0) {
3626 /* strange case here, the INIT
3627 * should have did the selection.
3629 net->ro._s_addr.sin.sin_addr =
3630 sctp_ipv4_source_address_selection(inp,
3631 stcb, (struct route *)&net->ro, net, 0);
3632 net->src_addr_selected = 1;
3636 stc.laddress[0] = net->ro._s_addr.sin.sin_addr.s_addr;
3637 stc.laddress[1] = 0;
3638 stc.laddress[2] = 0;
3639 stc.laddress[3] = 0;
3640 stc.laddr_type = SCTP_IPV4_ADDRESS;
3641 } else if (to->sa_family == AF_INET6) {
3642 sin6 = (struct sockaddr_in6 *)to;
3643 memcpy(&stc.address, &sin6->sin6_addr,
3644 sizeof(struct in6_addr));
3645 stc.addr_type = SCTP_IPV6_ADDRESS;
3646 if (net->src_addr_selected == 0) {
3647 /* strange case here, the INIT
3648 * should have did the selection.
3650 net->ro._s_addr.sin6.sin6_addr =
3651 sctp_ipv6_source_address_selection(inp,
3652 stcb, (struct route *)&net->ro, net, 0);
3653 net->src_addr_selected = 1;
3655 memcpy(&stc.laddress, &net->ro._l_addr.sin6.sin6_addr,
3656 sizeof(struct in6_addr));
3657 stc.laddr_type = SCTP_IPV6_ADDRESS;
3660 /* Now lets put the SCTP header in place */
3661 initackm_out = mtod(m, struct sctp_init_msg *);
3662 initackm_out->sh.src_port = inp->sctp_lport;
3663 initackm_out->sh.dest_port = sh->src_port;
3664 initackm_out->sh.v_tag = init_chk->init.initiate_tag;
3665 /* Save it off for quick ref */
3666 stc.peers_vtag = init_chk->init.initiate_tag;
3667 initackm_out->sh.checksum = 0; /* calculate later */
3669 strncpy(stc.identification, SCTP_VERSION_STRING,
3670 min(strlen(SCTP_VERSION_STRING), sizeof(stc.identification)));
3671 /* now the chunk header */
3672 initackm_out->msg.ch.chunk_type = SCTP_INITIATION_ACK;
3673 initackm_out->msg.ch.chunk_flags = 0;
3674 /* fill in later from mbuf we build */
3675 initackm_out->msg.ch.chunk_length = 0;
3676 /* place in my tag */
3677 if ((asoc != NULL) &&
3678 ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
3679 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED))) {
3680 /* re-use the v-tags and init-seq here */
3681 initackm_out->msg.init.initiate_tag = htonl(asoc->my_vtag);
3682 initackm_out->msg.init.initial_tsn = htonl(asoc->init_seq_number);
3684 initackm_out->msg.init.initiate_tag = htonl(sctp_select_a_tag(inp));
3685 /* get a TSN to use too */
3686 initackm_out->msg.init.initial_tsn = htonl(sctp_select_initial_TSN(&inp->sctp_ep));
3688 /* save away my tag to */
3689 stc.my_vtag = initackm_out->msg.init.initiate_tag;
3691 /* set up some of the credits. */
3692 initackm_out->msg.init.a_rwnd = htonl(max(inp->sctp_socket->so_rcv.sb_hiwat, SCTP_MINIMAL_RWND));
3693 /* set what I want */
3694 his_limit = ntohs(init_chk->init.num_inbound_streams);
3695 /* choose what I want */
3697 if (asoc->streamoutcnt > inp->sctp_ep.pre_open_stream_count) {
3698 i_want = asoc->streamoutcnt;
3700 i_want = inp->sctp_ep.pre_open_stream_count;
3703 i_want = inp->sctp_ep.pre_open_stream_count;
3705 if (his_limit < i_want) {
3706 /* I Want more :< */
3707 initackm_out->msg.init.num_outbound_streams = init_chk->init.num_inbound_streams;
3709 /* I can have what I want :> */
3710 initackm_out->msg.init.num_outbound_streams = htons(i_want);
3712 /* tell him his limt. */
3713 initackm_out->msg.init.num_inbound_streams =
3714 htons(inp->sctp_ep.max_open_streams_intome);
3715 /* setup the ECN pointer */
3717 /* if (inp->sctp_flags & SCTP_PCB_FLAGS_ADAPTIONEVNT) {*/
3718 if (inp->sctp_ep.adaption_layer_indicator) {
3719 struct sctp_adaption_layer_indication *ali;
3720 ali = (struct sctp_adaption_layer_indication *)(
3721 (caddr_t)initackm_out + sizeof(*initackm_out));
3722 ali->ph.param_type = htons(SCTP_ULP_ADAPTION);
3723 ali->ph.param_length = htons(sizeof(*ali));
3724 ali->indication = ntohl(inp->sctp_ep.adaption_layer_indicator);
3725 m->m_len += sizeof(*ali);
3726 ecn = (struct sctp_ecn_supported_param *)((caddr_t)ali +
3729 ecn = (struct sctp_ecn_supported_param*)(
3730 (caddr_t)initackm_out + sizeof(*initackm_out));
3734 if (sctp_ecn == 1) {
3735 ecn->ph.param_type = htons(SCTP_ECN_CAPABLE);
3736 ecn->ph.param_length = htons(sizeof(*ecn));
3737 m->m_len += sizeof(*ecn);
3739 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn +
3742 prsctp = (struct sctp_prsctp_supported_param *)((caddr_t)ecn);
3744 /* And now tell the peer we do pr-sctp */
3745 prsctp->ph.param_type = htons(SCTP_PRSCTP_SUPPORTED);
3746 prsctp->ph.param_length = htons(sizeof(*prsctp));
3747 m->m_len += sizeof(*prsctp);
3750 /* And now tell the peer we do all the extensions */
3751 pr_supported = (struct sctp_supported_chunk_types_param *)((caddr_t)prsctp +
3754 pr_supported->ph.param_type = htons(SCTP_SUPPORTED_CHUNK_EXT);
3755 pr_supported->ph.param_length = htons(sizeof(*pr_supported) + SCTP_EXT_COUNT);
3756 pr_supported->chunk_types[0] = SCTP_ASCONF;
3757 pr_supported->chunk_types[1] = SCTP_ASCONF_ACK;
3758 pr_supported->chunk_types[2] = SCTP_FORWARD_CUM_TSN;
3759 pr_supported->chunk_types[3] = SCTP_PACKET_DROPPED;
3760 pr_supported->chunk_types[4] = SCTP_STREAM_RESET;
3761 pr_supported->chunk_types[5] = 0; /* pad */
3762 pr_supported->chunk_types[6] = 0; /* pad */
3763 pr_supported->chunk_types[7] = 0; /* pad */
3765 m->m_len += (sizeof(*pr_supported) + SCTP_EXT_COUNT + SCTP_PAD_EXT_COUNT);
3766 if (sctp_ecn_nonce) {
3767 /* ECN nonce: And now tell the peer we support ECN nonce */
3768 ecn_nonce = (struct sctp_ecn_nonce_supported_param *)((caddr_t)pr_supported +
3769 sizeof(*pr_supported) + SCTP_EXT_COUNT + SCTP_PAD_EXT_COUNT);
3770 ecn_nonce->ph.param_type = htons(SCTP_ECN_NONCE_SUPPORTED);
3771 ecn_nonce->ph.param_length = htons(sizeof(*ecn_nonce));
3772 m->m_len += sizeof(*ecn_nonce);
3776 /* now the addresses */
3777 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3780 int cnt = cnt_inits_to;
3782 TAILQ_FOREACH(ifn, &ifnet, if_list) {
3783 if ((stc.loopback_scope == 0) &&
3784 (ifn->if_type == IFT_LOOP)) {
3786 * Skip loopback devices if loopback_scope
3791 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
3792 if (sctp_is_address_in_scope(ifa,
3793 stc.ipv4_addr_legal, stc.ipv6_addr_legal,
3794 stc.loopback_scope, stc.ipv4_scope,
3795 stc.local_scope, stc.site_scope) == 0) {
3802 TAILQ_FOREACH(ifn, &ifnet, if_list) {
3803 if ((stc.loopback_scope == 0) &&
3804 (ifn->if_type == IFT_LOOP)) {
3806 * Skip loopback devices if
3807 * loopback_scope not set
3811 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
3812 if (sctp_is_address_in_scope(ifa,
3813 stc.ipv4_addr_legal,
3814 stc.ipv6_addr_legal,
3815 stc.loopback_scope, stc.ipv4_scope,
3816 stc.local_scope, stc.site_scope) == 0) {
3819 m_at = sctp_add_addr_to_mbuf(m_at, ifa);
3824 struct sctp_laddr *laddr;
3827 /* First, how many ? */
3828 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
3829 if (laddr->ifa == NULL) {
3832 if (laddr->ifa->ifa_addr == NULL)
3834 if (sctp_is_address_in_scope(laddr->ifa,
3835 stc.ipv4_addr_legal, stc.ipv6_addr_legal,
3836 stc.loopback_scope, stc.ipv4_scope,
3837 stc.local_scope, stc.site_scope) == 0) {
3842 /* If we bind a single address only we won't list
3843 * any. This way you can get through a NAT
3846 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
3847 if (laddr->ifa == NULL) {
3849 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
3850 printf("Help I have fallen and I can't get up!\n");
3855 if (laddr->ifa->ifa_addr == NULL)
3857 if (sctp_is_address_in_scope(laddr->ifa,
3858 stc.ipv4_addr_legal, stc.ipv6_addr_legal,
3859 stc.loopback_scope, stc.ipv4_scope,
3860 stc.local_scope, stc.site_scope) == 0) {
3863 m_at = sctp_add_addr_to_mbuf(m_at, laddr->ifa);
3868 /* tack on the operational error if present */
3870 if (op_err->m_pkthdr.len % 4) {
3871 /* must add a pad to the param */
3874 padlen = 4 - (op_err->m_pkthdr.len % 4);
3875 m_copyback(op_err, op_err->m_pkthdr.len, padlen, (caddr_t)&cpthis);
3877 while (m_at->m_next != NULL) {
3878 m_at = m_at->m_next;
3880 m_at->m_next = op_err;
3881 while (m_at->m_next != NULL) {
3882 m_at = m_at->m_next;
3885 /* Get total size of init packet */
3886 sz_of = SCTP_SIZE32(ntohs(init_chk->ch.chunk_length));
3887 /* pre-calulate the size and update pkt header and chunk header */
3888 m->m_pkthdr.len = 0;
3889 for (m_tmp = m; m_tmp; m_tmp = m_tmp->m_next) {
3890 m->m_pkthdr.len += m_tmp->m_len;
3891 if (m_tmp->m_next == NULL) {
3892 /* m_tmp should now point to last one */
3897 * Figure now the size of the cookie. We know the size of the
3898 * INIT-ACK. The Cookie is going to be the size of INIT, INIT-ACK,
3899 * COOKIE-STRUCTURE and SIGNATURE.
3903 * take our earlier INIT calc and add in the sz we just calculated
3904 * minus the size of the sctphdr (its not included in chunk size
3907 /* add once for the INIT-ACK */
3908 sz_of += (m->m_pkthdr.len - sizeof(struct sctphdr));
3910 /* add a second time for the INIT-ACK in the cookie */
3911 sz_of += (m->m_pkthdr.len - sizeof(struct sctphdr));
3913 /* Now add the cookie header and cookie message struct */
3914 sz_of += sizeof(struct sctp_state_cookie_param);
3915 /* ...and add the size of our signature */
3916 sz_of += SCTP_SIGNATURE_SIZE;
3917 initackm_out->msg.ch.chunk_length = htons(sz_of);
3919 /* Now we must build a cookie */
3920 m_cookie = sctp_add_cookie(inp, init_pkt, offset, m,
3921 sizeof(struct sctphdr), &stc);
3922 if (m_cookie == NULL) {
3923 /* memory problem */
3927 /* Now append the cookie to the end and update the space/size */
3928 m_tmp->m_next = m_cookie;
3931 * We pass 0 here to NOT set IP_DF if its IPv4, we ignore the
3932 * return here since the timer will drive a retranmission.
3934 padval = m->m_pkthdr.len % 4;
3935 if ((padval) && (m_last)) {
3936 /* see my previous comments on m_last */
3938 ret = sctp_add_pad_tombuf(m_last, (4-padval));
3940 /* Houston we have a problem, no space */
3944 m->m_pkthdr.len += padval;
3946 sctp_lowlevel_chunk_output(inp, NULL, NULL, to, m, 0, 0, NULL, 0);
3951 sctp_insert_on_wheel(struct sctp_association *asoc,
3952 struct sctp_stream_out *strq)
3954 struct sctp_stream_out *stre, *strn;
3955 stre = TAILQ_FIRST(&asoc->out_wheel);
3957 /* only one on wheel */
3958 TAILQ_INSERT_HEAD(&asoc->out_wheel, strq, next_spoke);
3961 for (; stre; stre = strn) {
3962 strn = TAILQ_NEXT(stre, next_spoke);
3963 if (stre->stream_no > strq->stream_no) {
3964 TAILQ_INSERT_BEFORE(stre, strq, next_spoke);
3966 } else if (stre->stream_no == strq->stream_no) {
3967 /* huh, should not happen */
3969 } else if (strn == NULL) {
3970 /* next one is null */
3971 TAILQ_INSERT_AFTER(&asoc->out_wheel, stre, strq,
3978 sctp_remove_from_wheel(struct sctp_association *asoc,
3979 struct sctp_stream_out *strq)
3981 /* take off and then setup so we know it is not on the wheel */
3982 TAILQ_REMOVE(&asoc->out_wheel, strq, next_spoke);
3983 strq->next_spoke.tqe_next = NULL;
3984 strq->next_spoke.tqe_prev = NULL;
3989 sctp_prune_prsctp(struct sctp_tcb *stcb,
3990 struct sctp_association *asoc,
3991 struct sctp_sndrcvinfo *srcv,
3996 struct sctp_tmit_chunk *chk, *nchk;
3997 if ((asoc->peer_supports_prsctp) && (asoc->sent_queue_cnt_removeable > 0)) {
3998 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
4000 * Look for chunks marked with the PR_SCTP
4001 * flag AND the buffer space flag. If the one
4002 * being sent is equal or greater priority then
4003 * purge the old one and free some space.
4005 if ((chk->flags & (SCTP_PR_SCTP_ENABLED |
4006 SCTP_PR_SCTP_BUFFER)) ==
4007 (SCTP_PR_SCTP_ENABLED|SCTP_PR_SCTP_BUFFER)) {
4009 * This one is PR-SCTP AND buffer space
4012 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
4013 /* Lower numbers equates to
4014 * higher priority so if the
4015 * one we are looking at has a
4016 * larger or equal priority we
4017 * want to drop the data and
4018 * NOT retransmit it.
4027 if (chk->sent > SCTP_DATAGRAM_UNSENT)
4028 cause = SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_SENT;
4030 cause = SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_UNSENT;
4031 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
4034 freed_spc += ret_spc;
4035 if (freed_spc >= dataout) {
4038 } /* if chunk was present */
4039 } /* if of sufficent priority */
4040 } /* if chunk has enabled */
4041 } /* tailqforeach */
4043 chk = TAILQ_FIRST(&asoc->send_queue);
4045 nchk = TAILQ_NEXT(chk, sctp_next);
4046 /* Here we must move to the sent queue and mark */
4047 if ((chk->flags & (SCTP_PR_SCTP_ENABLED |
4048 SCTP_PR_SCTP_BUFFER)) ==
4049 (SCTP_PR_SCTP_ENABLED|SCTP_PR_SCTP_BUFFER)) {
4050 if (chk->rec.data.timetodrop.tv_sec >= (long)srcv->sinfo_timetolive) {
4057 ret_spc = sctp_release_pr_sctp_chunk(stcb, chk,
4058 SCTP_RESPONSE_TO_USER_REQ|SCTP_NOTIFY_DATAGRAM_UNSENT,
4061 freed_spc += ret_spc;
4062 if (freed_spc >= dataout) {
4065 } /* end if chk->data */
4066 } /* end if right class */
4067 } /* end if chk pr-sctp */
4069 } /* end while (chk) */
4070 } /* if enabled in asoc */
4074 sctp_prepare_chunk(struct sctp_tmit_chunk *template,
4075 struct sctp_tcb *stcb,
4076 struct sctp_sndrcvinfo *srcv,
4077 struct sctp_stream_out *strq,
4078 struct sctp_nets *net)
4080 bzero(template, sizeof(struct sctp_tmit_chunk));
4081 template->sent = SCTP_DATAGRAM_UNSENT;
4082 if ((stcb->asoc.peer_supports_prsctp) &&
4083 (srcv->sinfo_flags & (MSG_PR_SCTP_TTL|MSG_PR_SCTP_BUF)) &&
4084 (srcv->sinfo_timetolive > 0)
4087 * Peer supports PR-SCTP
4088 * The flags is set against this send for PR-SCTP
4089 * And timetolive is a postive value, zero is reserved
4090 * to mean a reliable send for both buffer/time
4093 if (srcv->sinfo_flags & MSG_PR_SCTP_BUF) {
4095 * Time to live is a priority stored in tv_sec
4096 * when doing the buffer drop thing.
4098 template->rec.data.timetodrop.tv_sec = srcv->sinfo_timetolive;
4102 SCTP_GETTIME_TIMEVAL(&template->rec.data.timetodrop);
4103 tv.tv_sec = srcv->sinfo_timetolive / 1000;
4104 tv.tv_usec = (srcv->sinfo_timetolive * 1000) % 1000000;
4106 timeradd(&template->rec.data.timetodrop, &tv,
4107 &template->rec.data.timetodrop);
4109 timevaladd(&template->rec.data.timetodrop, &tv);
4113 if ((srcv->sinfo_flags & MSG_UNORDERED) == 0) {
4114 template->rec.data.stream_seq = strq->next_sequence_sent;
4116 template->rec.data.stream_seq = 0;
4118 template->rec.data.TSN_seq = 0; /* not yet assigned */
4120 template->rec.data.stream_number = srcv->sinfo_stream;
4121 template->rec.data.payloadtype = srcv->sinfo_ppid;
4122 template->rec.data.context = srcv->sinfo_context;
4123 template->rec.data.doing_fast_retransmit = 0;
4124 template->rec.data.ect_nonce = 0; /* ECN Nonce */
4126 if (srcv->sinfo_flags & MSG_ADDR_OVER) {
4127 template->whoTo = net;
4129 if (stcb->asoc.primary_destination)
4130 template->whoTo = stcb->asoc.primary_destination;
4133 template->whoTo = net;
4136 /* the actual chunk flags */
4137 if (srcv->sinfo_flags & MSG_UNORDERED) {
4138 template->rec.data.rcv_flags = SCTP_DATA_UNORDERED;
4140 template->rec.data.rcv_flags = 0;
4142 /* no flags yet, FRAGMENT_OK goes here */
4143 template->flags = 0;
4145 if (stcb->asoc.peer_supports_prsctp) {
4146 if (srcv->sinfo_timetolive > 0) {
4148 * We only set the flag if timetolive (or
4149 * priority) was set to a positive number.
4150 * Zero is reserved specifically to be
4151 * EXCLUDED and sent reliable.
4153 if (srcv->sinfo_flags & MSG_PR_SCTP_TTL) {
4154 template->flags |= SCTP_PR_SCTP_ENABLED;
4156 if (srcv->sinfo_flags & MSG_PR_SCTP_BUF) {
4157 template->flags |= SCTP_PR_SCTP_BUFFER;
4161 template->asoc = &stcb->asoc;
4166 sctp_get_frag_point(struct sctp_tcb *stcb,
4167 struct sctp_association *asoc)
4171 /* For endpoints that have both 6 and 4 addresses
4172 * we must reserver room for the 6 ip header, for
4173 * those that are only dealing with V4 we use
4174 * a larger frag point.
4176 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
4177 ovh = SCTP_MED_OVERHEAD;
4179 ovh = SCTP_MED_V4_OVERHEAD;
4182 if (stcb->sctp_ep->sctp_frag_point > asoc->smallest_mtu)
4183 siz = asoc->smallest_mtu - ovh;
4185 siz = (stcb->sctp_ep->sctp_frag_point - ovh);
4187 if (siz > (MCLBYTES-sizeof(struct sctp_data_chunk))) { */
4188 /* A data chunk MUST fit in a cluster */
4189 /* siz = (MCLBYTES - sizeof(struct sctp_data_chunk));*/
4193 /* make it an even word boundary please */
4198 extern unsigned int sctp_max_chunks_on_queue;
4200 #define SBLOCKWAIT(f) (((f)&MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
4203 sctp_msg_append(struct sctp_tcb *stcb,
4204 struct sctp_nets *net,
4206 struct sctp_sndrcvinfo *srcv,
4210 struct sctp_association *asoc;
4211 struct sctp_stream_out *strq;
4212 struct sctp_tmit_chunk *chk;
4213 struct sctpchunk_listhead tmp;
4214 struct sctp_tmit_chunk template;
4215 struct mbuf *n, *mnext;
4217 unsigned int dataout, siz;
4222 if ((stcb == NULL) || (net == NULL) || (m == NULL) || (srcv == NULL)) {
4223 /* Software fault, you blew it on the call */
4225 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
4226 printf("software error in sctp_msg_append:1\n");
4227 printf("stcb:%p net:%p m:%p srcv:%p\n",
4228 stcb, net, m, srcv);
4235 so = stcb->sctp_socket;
4237 if (srcv->sinfo_flags & MSG_ABORT) {
4238 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
4239 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_ECHOED)) {
4240 /* It has to be up before we abort */
4241 /* how big is the user initiated abort? */
4242 if ((m->m_flags & M_PKTHDR) && (m->m_pkthdr.len)) {
4243 dataout = m->m_pkthdr.len;
4247 for (n = m; n; n = n->m_next) {
4248 dataout += n->m_len;
4251 M_PREPEND(m, sizeof(struct sctp_paramhdr), M_DONTWAIT);
4253 struct sctp_paramhdr *ph;
4254 m->m_len = sizeof(struct sctp_paramhdr) + dataout;
4255 ph = mtod(m, struct sctp_paramhdr *);
4256 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4257 ph->param_length = htons(m->m_len);
4259 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_RESPONSE_TO_USER_REQ, m);
4262 /* Only free if we don't send an abort */
4267 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
4268 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
4269 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
4270 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
4271 /* got data while shutting down */
4276 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
4277 /* Invalid stream number */
4281 if (asoc->strmout == NULL) {
4282 /* huh? software error */
4284 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
4285 printf("software error in sctp_msg_append:2\n");
4291 strq = &asoc->strmout[srcv->sinfo_stream];
4292 /* how big is it ? */
4293 if ((m->m_flags & M_PKTHDR) && (m->m_pkthdr.len)) {
4294 dataout = m->m_pkthdr.len;
4298 for (n = m; n; n = n->m_next) {
4299 dataout += n->m_len;
4303 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
4304 printf("Attempt to send out %d bytes\n",
4309 /* lock the socket buf */
4310 SOCKBUF_LOCK(&so->so_snd);
4311 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
4315 if (dataout > so->so_snd.sb_hiwat) {
4316 /* It will NEVER fit */
4320 if ((srcv->sinfo_flags & MSG_EOF) &&
4321 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
4326 if ((so->so_snd.sb_hiwat <
4327 (dataout + asoc->total_output_queue_size)) ||
4328 (asoc->chunks_on_out_queue > sctp_max_chunks_on_queue) ||
4329 (asoc->total_output_mbuf_queue_size >
4330 so->so_snd.sb_mbmax)
4332 /* XXX Buffer space hunt for data to skip */
4333 if (asoc->peer_supports_prsctp) {
4334 sctp_prune_prsctp(stcb, asoc, srcv, dataout);
4336 while ((so->so_snd.sb_hiwat <
4337 (dataout + asoc->total_output_queue_size)) ||
4338 (asoc->chunks_on_out_queue > sctp_max_chunks_on_queue) ||
4339 (asoc->total_output_mbuf_queue_size >
4340 so->so_snd.sb_mbmax)) {
4341 struct sctp_inpcb *inp;
4342 /* Now did we free up enough room? */
4343 if ((so->so_state & SS_NBIO)
4344 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
4345 || (flags & MSG_NBIO)
4348 /* Non-blocking io in place */
4349 error = EWOULDBLOCK;
4353 * We store off a pointer to the endpoint.
4354 * Since on return from this we must check to
4355 * see if an so_error is set. If so we may have
4356 * been reset and our stcb destroyed. Returning
4357 * an error will cause the correct error return
4358 * through and fix this all.
4360 inp = stcb->sctp_ep;
4362 * Not sure how else to do this since
4363 * the level we suspended at is not
4364 * known deep down where we are. I will
4365 * drop to spl0() so that others can
4369 inp->sctp_tcb_at_block = (void *)stcb;
4370 inp->error_on_block = 0;
4371 sbunlock(&so->so_snd);
4372 error = sbwait(&so->so_snd);
4374 * XXX: This is ugly but I have
4375 * recreated most of what goes on to
4376 * block in the sb. UGHH
4377 * May want to add the bit about being
4378 * no longer connected.. but this then
4379 * further dooms the UDP model NOT to
4382 inp->sctp_tcb_at_block = 0;
4383 if (inp->error_on_block)
4384 error = inp->error_on_block;
4386 error = so->so_error;
4390 error = sblock(&so->so_snd, M_WAITOK);
4393 /* Otherwise we cycle back and recheck
4396 #if defined(__FreeBSD__) && __FreeBSD_version >= 502115
4397 if (so->so_rcv.sb_state & SBS_CANTSENDMORE) {
4399 if (so->so_state & SS_CANTSENDMORE) {
4405 error = so->so_error;
4410 /* If we have a packet header fix it if it was broke */
4411 if (m->m_flags & M_PKTHDR) {
4412 m->m_pkthdr.len = dataout;
4414 /* use the smallest one, user set value or
4415 * smallest mtu of the asoc
4417 siz = sctp_get_frag_point(stcb, asoc);
4418 SOCKBUF_UNLOCK(&so->so_snd);
4419 if ((dataout) && (dataout <= siz)) {
4421 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
4424 SOCKBUF_LOCK(&so->so_snd);
4427 sctp_prepare_chunk(chk, stcb, srcv, strq, net);
4428 chk->whoTo->ref_count++;
4429 chk->rec.data.rcv_flags |= SCTP_DATA_NOT_FRAG;
4431 /* no flags yet, FRAGMENT_OK goes here */
4432 sctppcbinfo.ipi_count_chunk++;
4433 sctppcbinfo.ipi_gencnt_chunk++;
4434 asoc->chunks_on_out_queue++;
4437 /* Total in the MSIZE */
4438 for (mm = chk->data; mm; mm = mm->m_next) {
4440 if (mm->m_flags & M_EXT) {
4441 mbcnt += chk->data->m_ext.ext_size;
4444 /* fix up the send_size if it is not present */
4445 chk->send_size = dataout;
4446 chk->book_size = chk->send_size;
4448 /* ok, we are commited */
4449 if ((srcv->sinfo_flags & MSG_UNORDERED) == 0) {
4450 /* bump the ssn if we are unordered. */
4451 strq->next_sequence_sent++;
4453 chk->data->m_nextpkt = 0;
4454 asoc->stream_queue_cnt++;
4455 TAILQ_INSERT_TAIL(&strq->outqueue, chk, sctp_next);
4456 /* now check if this stream is on the wheel */
4457 if ((strq->next_spoke.tqe_next == NULL) &&
4458 (strq->next_spoke.tqe_prev == NULL)) {
4459 /* Insert it on the wheel since it is not
4462 sctp_insert_on_wheel(asoc, strq);
4464 } else if ((dataout) && (dataout > siz)) {
4466 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NO_FRAGMENT) &&
4469 SOCKBUF_LOCK(&so->so_snd);
4472 /* setup the template */
4473 sctp_prepare_chunk(&template, stcb, srcv, strq, net);
4476 while (dataout > siz) {
4478 * We can wait since this is called from the user
4481 n->m_nextpkt = m_split(n, siz, M_WAIT);
4482 if (n->m_nextpkt == NULL) {
4484 SOCKBUF_LOCK(&so->so_snd);
4491 * ok, now we have a chain on m where m->m_nextpkt points to
4492 * the next chunk and m/m->m_next chain is the piece to send.
4493 * We must go through the chains and thread them on to
4494 * sctp_tmit_chunk chains and place them all on the stream
4495 * queue, breaking the m->m_nextpkt pointers as we go.
4501 * first go through and allocate a sctp_tmit chunk
4502 * for each chunk piece
4504 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
4507 * ok we must spin through and dump anything
4508 * we have allocated and then jump to the
4511 chk = TAILQ_FIRST(&tmp);
4513 TAILQ_REMOVE(&tmp, chk, sctp_next);
4514 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
4515 sctppcbinfo.ipi_count_chunk--;
4516 asoc->chunks_on_out_queue--;
4517 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
4518 panic("Chunk count is negative");
4520 sctppcbinfo.ipi_gencnt_chunk++;
4521 chk = TAILQ_FIRST(&tmp);
4524 SOCKBUF_LOCK(&so->so_snd);
4527 sctppcbinfo.ipi_count_chunk++;
4528 asoc->chunks_on_out_queue++;
4530 sctppcbinfo.ipi_gencnt_chunk++;
4532 chk->whoTo->ref_count++;
4534 /* Total in the MSIZE */
4536 for (mm = chk->data; mm; mm = mm->m_next) {
4538 if (mm->m_flags & M_EXT) {
4539 mbcnt_e += chk->data->m_ext.ext_size;
4542 /* now fix the chk->send_size */
4543 if (chk->data->m_flags & M_PKTHDR) {
4544 chk->send_size = chk->data->m_pkthdr.len;
4548 for (nn = chk->data; nn; nn = nn->m_next) {
4549 chk->send_size += nn->m_len;
4552 chk->book_size = chk->send_size;
4553 chk->mbcnt = mbcnt_e;
4555 if (chk->flags & SCTP_PR_SCTP_BUFFER) {
4556 asoc->sent_queue_cnt_removeable++;
4559 TAILQ_INSERT_TAIL(&tmp, chk, sctp_next);
4562 /* now that we have enough space for all de-couple the
4563 * chain of mbufs by going through our temp array
4564 * and breaking the pointers.
4566 /* ok, we are commited */
4567 if ((srcv->sinfo_flags & MSG_UNORDERED) == 0) {
4568 /* bump the ssn if we are unordered. */
4569 strq->next_sequence_sent++;
4571 /* Mark the first/last flags. This will
4572 * result int a 3 for a single item on the list
4574 chk = TAILQ_FIRST(&tmp);
4575 chk->rec.data.rcv_flags |= SCTP_DATA_FIRST_FRAG;
4576 chk = TAILQ_LAST(&tmp, sctpchunk_listhead);
4577 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4578 /* now break any chains on the queue and
4579 * move it to the streams actual queue.
4581 chk = TAILQ_FIRST(&tmp);
4583 chk->data->m_nextpkt = 0;
4584 TAILQ_REMOVE(&tmp, chk, sctp_next);
4585 asoc->stream_queue_cnt++;
4586 TAILQ_INSERT_TAIL(&strq->outqueue, chk, sctp_next);
4587 chk = TAILQ_FIRST(&tmp);
4589 /* now check if this stream is on the wheel */
4590 if ((strq->next_spoke.tqe_next == NULL) &&
4591 (strq->next_spoke.tqe_prev == NULL)) {
4592 /* Insert it on the wheel since it is not
4595 sctp_insert_on_wheel(asoc, strq);
4598 SOCKBUF_LOCK(&so->so_snd);
4599 /* has a SHUTDOWN been (also) requested by the user on this asoc? */
4602 if ((srcv->sinfo_flags & MSG_EOF) &&
4603 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
4605 int some_on_streamwheel = 0;
4607 if (!TAILQ_EMPTY(&asoc->out_wheel)) {
4608 /* Check to see if some data queued */
4609 struct sctp_stream_out *outs;
4610 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
4611 if (!TAILQ_EMPTY(&outs->outqueue)) {
4612 some_on_streamwheel = 1;
4618 if (TAILQ_EMPTY(&asoc->send_queue) &&
4619 TAILQ_EMPTY(&asoc->sent_queue) &&
4620 (some_on_streamwheel == 0)) {
4621 /* there is nothing queued to send, so I'm done... */
4622 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
4623 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4624 /* only send SHUTDOWN the first time through */
4626 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
4627 printf("%s:%d sends a shutdown\n",
4633 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
4634 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
4635 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
4636 asoc->primary_destination);
4637 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
4638 asoc->primary_destination);
4642 * we still got (or just got) data to send, so set
4646 * XXX sockets draft says that MSG_EOF should be sent
4647 * with no data. currently, we will allow user data
4648 * to be sent first and move to SHUTDOWN-PENDING
4650 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
4653 #ifdef SCTP_MBCNT_LOGGING
4654 sctp_log_mbcnt(SCTP_LOG_MBCNT_INCREASE,
4655 asoc->total_output_queue_size,
4657 asoc->total_output_mbuf_queue_size,
4660 asoc->total_output_queue_size += dataout;
4661 asoc->total_output_mbuf_queue_size += mbcnt;
4662 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
4663 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
4664 so->so_snd.sb_cc += dataout;
4665 so->so_snd.sb_mbcnt += mbcnt;
4669 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
4670 printf("++total out:%d total_mbuf_out:%d\n",
4671 (int)asoc->total_output_queue_size,
4672 (int)asoc->total_output_mbuf_queue_size);
4677 sbunlock(&so->so_snd);
4679 SOCKBUF_UNLOCK(&so->so_snd);
4681 if (m && m->m_nextpkt) {
4684 mnext = n->m_nextpkt;
4685 n->m_nextpkt = NULL;
4695 static struct mbuf *
4696 sctp_copy_mbufchain(struct mbuf *clonechain,
4697 struct mbuf *outchain)
4699 struct mbuf *appendchain;
4700 #if defined(__FreeBSD__) || defined(__NetBSD__)
4701 /* Supposedly m_copypacket is an optimization, use it if we can */
4702 if (clonechain->m_flags & M_PKTHDR) {
4703 appendchain = m_copypacket(clonechain, M_DONTWAIT);
4704 sctp_pegs[SCTP_CACHED_SRC]++;
4706 appendchain = m_copy(clonechain, 0, M_COPYALL);
4707 #elif defined(__APPLE__)
4708 appendchain = sctp_m_copym(clonechain, 0, M_COPYALL, M_DONTWAIT);
4710 appendchain = m_copy(clonechain, 0, M_COPYALL);
4713 if (appendchain == NULL) {
4716 sctp_m_freem(outchain);
4720 /* tack on to the end */
4724 if (m->m_next == NULL) {
4725 m->m_next = appendchain;
4730 if (outchain->m_flags & M_PKTHDR) {
4736 append_tot += t->m_len;
4739 outchain->m_pkthdr.len += append_tot;
4743 return (appendchain);
4748 sctp_sendall_iterator(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr, u_int32_t val)
4750 struct sctp_copy_all *ca;
4752 int turned_on_nonblock=0, ret;
4754 ca = (struct sctp_copy_all *)ptr;
4755 if (ca->m == NULL) {
4758 if (ca->inp != inp) {
4762 m = sctp_copy_mbufchain(ca->m, NULL);
4764 /* can't copy so we are done */
4768 if ((stcb->sctp_socket->so_state & SS_NBIO) == 0) {
4769 /* we have to do this non-blocking */
4770 turned_on_nonblock = 1;
4771 stcb->sctp_socket->so_state |= SS_NBIO;
4773 ret = sctp_msg_append(stcb, stcb->asoc.primary_destination, m, &ca->sndrcv, 0);
4774 if (turned_on_nonblock) {
4775 /* we turned on non-blocking so turn it off */
4776 stcb->sctp_socket->so_state &= ~SS_NBIO;
4786 sctp_sendall_completes(void *ptr, u_int32_t val)
4788 struct sctp_copy_all *ca;
4789 ca = (struct sctp_copy_all *)ptr;
4790 /* Do a notify here?
4791 * Kacheong suggests that the notify
4792 * be done at the send time.. so you would
4793 * push up a notification if any send failed.
4794 * Don't know if this is feasable since the
4795 * only failures we have is "memory" related and
4796 * if you cannot get an mbuf to send the data
4797 * you surely can't get an mbuf to send up
4798 * to notify the user you can't send the data :->
4801 /* now free everything */
4807 #define MC_ALIGN(m, len) do { \
4808 (m)->m_data += (MCLBYTES - (len)) & ~(sizeof(long) - 1); \
4813 static struct mbuf *
4814 sctp_copy_out_all(struct uio *uio, int len)
4816 struct mbuf *ret, *at;
4817 int left, willcpy, cancpy, error;
4819 MGETHDR(ret, M_WAIT, MT_HEADER);
4826 ret->m_pkthdr.len = len;
4827 MCLGET(ret, M_WAIT);
4831 if ((ret->m_flags & M_EXT) == 0) {
4835 cancpy = M_TRAILINGSPACE(ret);
4836 willcpy = min(cancpy, left);
4839 /* Align data to the end */
4840 MC_ALIGN(at, willcpy);
4841 error = uiomove(mtod(at, caddr_t), willcpy, uio);
4847 at->m_len = willcpy;
4848 at->m_nextpkt = at->m_next = 0;
4851 MGET(at->m_next, M_WAIT, MT_DATA);
4852 if (at->m_next == NULL) {
4861 if ((at->m_flags & M_EXT) == 0) {
4864 cancpy = M_TRAILINGSPACE(at);
4865 willcpy = min(cancpy, left);
4872 sctp_sendall (struct sctp_inpcb *inp, struct uio *uio, struct mbuf *m, struct sctp_sndrcvinfo *srcv)
4875 struct sctp_copy_all *ca;
4876 MALLOC(ca, struct sctp_copy_all *,
4877 sizeof(struct sctp_copy_all), M_PCB, M_WAIT);
4882 memset (ca, 0, sizeof(struct sctp_copy_all));
4886 /* take off the sendall flag, it would
4887 * be bad if we failed to do this :-0
4889 ca->sndrcv.sinfo_flags &= ~MSG_SENDALL;
4891 /* get length and mbuf chain */
4893 ca->sndlen = uio->uio_resid;
4894 ca->m = sctp_copy_out_all(uio, ca->sndlen);
4895 if (ca->m == NULL) {
4900 if ((m->m_flags & M_PKTHDR) == 0) {
4905 ca->sndlen += m->m_len;
4909 ca->sndlen = m->m_pkthdr.len;
4914 ret = sctp_initiate_iterator(sctp_sendall_iterator, SCTP_PCB_ANY_FLAGS, SCTP_ASOC_ANY_STATE,
4915 (void *)ca, 0, sctp_sendall_completes, inp);
4918 printf("Failed to initate iterator to takeover associations\n");
4929 sctp_toss_old_cookies(struct sctp_association *asoc)
4931 struct sctp_tmit_chunk *chk, *nchk;
4932 chk = TAILQ_FIRST(&asoc->control_send_queue);
4934 nchk = TAILQ_NEXT(chk, sctp_next);
4935 if (chk->rec.chunk_id == SCTP_COOKIE_ECHO) {
4936 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
4938 sctp_m_freem(chk->data);
4941 asoc->ctrl_queue_cnt--;
4943 sctp_free_remote_addr(chk->whoTo);
4944 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
4945 sctppcbinfo.ipi_count_chunk--;
4946 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
4947 panic("Chunk count is negative");
4949 sctppcbinfo.ipi_gencnt_chunk++;
4956 sctp_toss_old_asconf(struct sctp_tcb *stcb)
4958 struct sctp_association *asoc;
4959 struct sctp_tmit_chunk *chk, *chk_tmp;
4962 for (chk = TAILQ_FIRST(&asoc->control_send_queue); chk != NULL;
4965 chk_tmp = TAILQ_NEXT(chk, sctp_next);
4966 /* find SCTP_ASCONF chunk in queue (only one ever in queue) */
4967 if (chk->rec.chunk_id == SCTP_ASCONF) {
4968 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
4970 sctp_m_freem(chk->data);
4973 asoc->ctrl_queue_cnt--;
4975 sctp_free_remote_addr(chk->whoTo);
4976 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
4977 sctppcbinfo.ipi_count_chunk--;
4978 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
4979 panic("Chunk count is negative");
4981 sctppcbinfo.ipi_gencnt_chunk++;
4988 sctp_clean_up_datalist(struct sctp_tcb *stcb,
4989 struct sctp_association *asoc,
4990 struct sctp_tmit_chunk **data_list,
4992 struct sctp_nets *net)
4995 for (i = 0; i < bundle_at; i++) {
4996 /* off of the send queue */
4998 /* Any chunk NOT 0 you zap the time
4999 * chunk 0 gets zapped or set based on
5000 * if a RTO measurment is needed.
5002 data_list[i]->do_rtt = 0;
5005 data_list[i]->sent_rcv_time = net->last_sent_time;
5006 TAILQ_REMOVE(&asoc->send_queue,
5009 /* on to the sent queue */
5010 TAILQ_INSERT_TAIL(&asoc->sent_queue,
5013 /* This does not lower until the cum-ack passes it */
5014 asoc->sent_queue_cnt++;
5015 asoc->send_queue_cnt--;
5016 if ((asoc->peers_rwnd <= 0) &&
5017 (asoc->total_flight == 0) &&
5019 /* Mark the chunk as being a window probe */
5021 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
5022 printf("WINDOW PROBE SET\n");
5025 sctp_pegs[SCTP_WINDOW_PROBES]++;
5026 data_list[i]->rec.data.state_flags |= SCTP_WINDOW_PROBE;
5028 data_list[i]->rec.data.state_flags &= ~SCTP_WINDOW_PROBE;
5030 #ifdef SCTP_AUDITING_ENABLED
5031 sctp_audit_log(0xC2, 3);
5033 data_list[i]->sent = SCTP_DATAGRAM_SENT;
5034 data_list[i]->snd_count = 1;
5035 net->flight_size += data_list[i]->book_size;
5036 asoc->total_flight += data_list[i]->book_size;
5037 asoc->total_flight_count++;
5038 #ifdef SCTP_LOG_RWND
5039 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
5040 asoc->peers_rwnd , data_list[i]->send_size, sctp_peer_chunk_oh);
5042 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
5043 (u_int32_t)(data_list[i]->send_size + sctp_peer_chunk_oh));
5044 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5045 /* SWS sender side engages */
5046 asoc->peers_rwnd = 0;
5052 sctp_clean_up_ctl(struct sctp_association *asoc)
5054 struct sctp_tmit_chunk *chk, *nchk;
5055 for (chk = TAILQ_FIRST(&asoc->control_send_queue);
5057 nchk = TAILQ_NEXT(chk, sctp_next);
5058 if ((chk->rec.chunk_id == SCTP_SELECTIVE_ACK) ||
5059 (chk->rec.chunk_id == SCTP_HEARTBEAT_REQUEST) ||
5060 (chk->rec.chunk_id == SCTP_HEARTBEAT_ACK) ||
5061 (chk->rec.chunk_id == SCTP_SHUTDOWN) ||
5062 (chk->rec.chunk_id == SCTP_SHUTDOWN_ACK) ||
5063 (chk->rec.chunk_id == SCTP_OPERATION_ERROR) ||
5064 (chk->rec.chunk_id == SCTP_PACKET_DROPPED) ||
5065 (chk->rec.chunk_id == SCTP_COOKIE_ACK) ||
5066 (chk->rec.chunk_id == SCTP_ECN_CWR) ||
5067 (chk->rec.chunk_id == SCTP_ASCONF_ACK)) {
5068 /* Stray chunks must be cleaned up */
5070 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
5072 sctp_m_freem(chk->data);
5075 asoc->ctrl_queue_cnt--;
5076 sctp_free_remote_addr(chk->whoTo);
5077 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
5078 sctppcbinfo.ipi_count_chunk--;
5079 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
5080 panic("Chunk count is negative");
5082 sctppcbinfo.ipi_gencnt_chunk++;
5083 } else if (chk->rec.chunk_id == SCTP_STREAM_RESET) {
5084 struct sctp_stream_reset_req *strreq;
5085 /* special handling, we must look into the param */
5086 strreq = mtod(chk->data, struct sctp_stream_reset_req *);
5087 if (strreq->sr_req.ph.param_type == ntohs(SCTP_STR_RESET_RESPONSE)) {
5088 goto clean_up_anyway;
5095 sctp_move_to_outqueue(struct sctp_tcb *stcb,
5096 struct sctp_stream_out *strq)
5098 /* Move from the stream to the send_queue keeping track of the total */
5099 struct sctp_association *asoc;
5103 struct sctp_tmit_chunk *chk, *nchk;
5104 struct sctp_data_chunk *dchkh;
5105 struct sctpchunk_listhead tmp;
5110 chk = TAILQ_FIRST(&strq->outqueue);
5112 nchk = TAILQ_NEXT(chk, sctp_next);
5113 /* now put in the chunk header */
5115 M_PREPEND(chk->data, sizeof(struct sctp_data_chunk), M_DONTWAIT);
5116 if (chk->data == NULL) {
5121 if (orig != chk->data) {
5122 /* A new mbuf was added, account for it */
5123 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5124 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5125 stcb->sctp_socket->so_snd.sb_mbcnt += MSIZE;
5127 #ifdef SCTP_MBCNT_LOGGING
5128 sctp_log_mbcnt(SCTP_LOG_MBCNT_INCREASE,
5129 asoc->total_output_queue_size,
5131 asoc->total_output_mbuf_queue_size,
5134 stcb->asoc.total_output_mbuf_queue_size += MSIZE;
5135 chk->mbcnt += MSIZE;
5137 chk->send_size += sizeof(struct sctp_data_chunk);
5138 /* This should NOT have to do anything, but
5139 * I would rather be cautious
5141 if (!failed && ((size_t)chk->data->m_len < sizeof(struct sctp_data_chunk))) {
5142 m_pullup(chk->data, sizeof(struct sctp_data_chunk));
5143 if (chk->data == NULL) {
5148 dchkh = mtod(chk->data, struct sctp_data_chunk *);
5149 dchkh->ch.chunk_length = htons(chk->send_size);
5150 /* Chunks must be padded to even word boundary */
5151 padval = chk->send_size % 4;
5153 /* For fragmented messages this should not
5154 * run except possibly on the last chunk
5156 if (sctp_pad_lastmbuf(chk->data, (4 - padval))) {
5157 /* we are in big big trouble no mbufs :< */
5161 chk->send_size += (4 - padval);
5163 /* pull from stream queue */
5164 TAILQ_REMOVE(&strq->outqueue, chk, sctp_next);
5165 asoc->stream_queue_cnt--;
5166 TAILQ_INSERT_TAIL(&tmp, chk, sctp_next);
5167 /* add it in to the size of moved chunks */
5168 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
5169 /* we pull only one message */
5175 /* Gak, we just lost the user message */
5176 chk = TAILQ_FIRST(&tmp);
5178 nchk = TAILQ_NEXT(chk, sctp_next);
5179 TAILQ_REMOVE(&tmp, chk, sctp_next);
5181 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
5182 (SCTP_NOTIFY_DATAGRAM_UNSENT|SCTP_INTERNAL_ERROR),
5186 sctp_m_freem(chk->data);
5190 sctp_free_remote_addr(chk->whoTo);
5193 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
5194 sctppcbinfo.ipi_count_chunk--;
5195 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
5196 panic("Chunk count is negative");
5198 sctppcbinfo.ipi_gencnt_chunk++;
5203 /* now pull them off of temp wheel */
5204 chk = TAILQ_FIRST(&tmp);
5206 nchk = TAILQ_NEXT(chk, sctp_next);
5207 /* insert on send_queue */
5208 TAILQ_REMOVE(&tmp, chk, sctp_next);
5209 TAILQ_INSERT_TAIL(&asoc->send_queue, chk, sctp_next);
5210 asoc->send_queue_cnt++;
5212 chk->rec.data.TSN_seq = asoc->sending_seq++;
5214 dchkh = mtod(chk->data, struct sctp_data_chunk *);
5215 /* Put the rest of the things in place now. Size
5216 * was done earlier in previous loop prior to
5219 dchkh->ch.chunk_type = SCTP_DATA;
5220 dchkh->ch.chunk_flags = chk->rec.data.rcv_flags;
5221 dchkh->dp.tsn = htonl(chk->rec.data.TSN_seq);
5222 dchkh->dp.stream_id = htons(strq->stream_no);
5223 dchkh->dp.stream_sequence = htons(chk->rec.data.stream_seq);
5224 dchkh->dp.protocol_id = chk->rec.data.payloadtype;
5225 /* total count moved */
5226 tot_moved += chk->send_size;
5233 sctp_fill_outqueue(struct sctp_tcb *stcb,
5234 struct sctp_nets *net)
5236 struct sctp_association *asoc;
5237 struct sctp_tmit_chunk *chk;
5238 struct sctp_stream_out *strq, *strqn;
5239 int mtu_fromwheel, goal_mtu;
5240 unsigned int moved, seenend, cnt_mvd=0;
5243 /* Attempt to move at least 1 MTU's worth
5244 * onto the wheel for each destination address
5246 goal_mtu = net->cwnd - net->flight_size;
5247 if ((unsigned int)goal_mtu < net->mtu) {
5248 goal_mtu = net->mtu;
5250 if (sctp_pegs[SCTP_MOVED_MTU] < (unsigned int)goal_mtu) {
5251 sctp_pegs[SCTP_MOVED_MTU] = goal_mtu;
5253 seenend = moved = mtu_fromwheel = 0;
5254 if (asoc->last_out_stream == NULL) {
5255 strq = asoc->last_out_stream = TAILQ_FIRST(&asoc->out_wheel);
5256 if (asoc->last_out_stream == NULL) {
5257 /* huh nothing on the wheel, TSNH */
5262 strq = TAILQ_NEXT(asoc->last_out_stream, next_spoke);
5265 asoc->last_out_stream = TAILQ_FIRST(&asoc->out_wheel);
5267 while (mtu_fromwheel < goal_mtu) {
5271 strq = TAILQ_FIRST(&asoc->out_wheel);
5272 } else if ((moved == 0) && (seenend)) {
5273 /* none left on the wheel */
5274 sctp_pegs[SCTP_MOVED_NLEF]++;
5278 * clear the flags and rotate back through
5283 strq = TAILQ_FIRST(&asoc->out_wheel);
5289 strqn = TAILQ_NEXT(strq, next_spoke);
5290 if ((chk = TAILQ_FIRST(&strq->outqueue)) == NULL) {
5291 /* none left on this queue, prune a spoke? */
5292 sctp_remove_from_wheel(asoc, strq);
5293 if (strq == asoc->last_out_stream) {
5294 /* the last one we used went off the wheel */
5295 asoc->last_out_stream = NULL;
5300 if (chk->whoTo != net) {
5301 /* Skip this stream, first one on stream
5302 * does not head to our current destination.
5307 mtu_fromwheel += sctp_move_to_outqueue(stcb, strq);
5310 asoc->last_out_stream = strq;
5313 sctp_pegs[SCTP_MOVED_MAX]++;
5315 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5316 printf("Ok we moved %d chunks to send queue\n",
5320 if (sctp_pegs[SCTP_MOVED_QMAX] < cnt_mvd) {
5321 sctp_pegs[SCTP_MOVED_QMAX] = cnt_mvd;
5326 sctp_fix_ecn_echo(struct sctp_association *asoc)
5328 struct sctp_tmit_chunk *chk;
5329 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
5330 if (chk->rec.chunk_id == SCTP_ECN_ECHO) {
5331 chk->sent = SCTP_DATAGRAM_UNSENT;
5337 sctp_move_to_an_alt(struct sctp_tcb *stcb,
5338 struct sctp_association *asoc,
5339 struct sctp_nets *net)
5341 struct sctp_tmit_chunk *chk;
5342 struct sctp_nets *a_net;
5343 a_net = sctp_find_alternate_net(stcb, net);
5344 if ((a_net != net) &&
5345 ((a_net->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE)) {
5347 * We only proceed if a valid alternate is found that is
5348 * not this one and is reachable. Here we must move all
5349 * chunks queued in the send queue off of the destination
5350 * address to our alternate.
5352 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
5353 if (chk->whoTo == net) {
5354 /* Move the chunk to our alternate */
5355 sctp_free_remote_addr(chk->whoTo);
5363 static int sctp_from_user_send=0;
5366 sctp_med_chunk_output(struct sctp_inpcb *inp,
5367 struct sctp_tcb *stcb,
5368 struct sctp_association *asoc,
5371 int control_only, int *cwnd_full, int from_where,
5372 struct timeval *now, int *now_filled)
5375 * Ok this is the generic chunk service queue.
5376 * we must do the following:
5377 * - Service the stream queue that is next, moving any message
5378 * (note I must get a complete message i.e. FIRST/MIDDLE and
5379 * LAST to the out queue in one pass) and assigning TSN's
5380 * - Check to see if the cwnd/rwnd allows any output, if so we
5381 * go ahead and fomulate and send the low level chunks. Making
5382 * sure to combine any control in the control chunk queue also.
5384 struct sctp_nets *net;
5385 struct mbuf *outchain;
5386 struct sctp_tmit_chunk *chk, *nchk;
5387 struct sctphdr *shdr;
5388 /* temp arrays for unlinking */
5389 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
5390 int no_fragmentflg, error;
5391 int one_chunk, hbflag;
5392 int asconf, cookie, no_out_cnt;
5393 int bundle_at, ctl_cnt, no_data_chunks, cwnd_full_ind;
5394 unsigned int mtu, r_mtu, omtu;
5397 ctl_cnt = no_out_cnt = asconf = cookie = 0;
5399 * First lets prime the pump. For each destination, if there
5400 * is room in the flight size, attempt to pull an MTU's worth
5401 * out of the stream queues into the general send_queue
5403 #ifdef SCTP_AUDITING_ENABLED
5404 sctp_audit_log(0xC2, 2);
5407 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5408 printf("***********************\n");
5417 /* Nothing to possible to send? */
5418 if (TAILQ_EMPTY(&asoc->control_send_queue) &&
5419 TAILQ_EMPTY(&asoc->send_queue) &&
5420 TAILQ_EMPTY(&asoc->out_wheel)) {
5422 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5423 printf("All wheels empty\n");
5428 if (asoc->peers_rwnd <= 0) {
5429 /* No room in peers rwnd */
5432 if (asoc->total_flight > 0) {
5433 /* we are allowed one chunk in flight */
5435 sctp_pegs[SCTP_RWND_BLOCKED]++;
5439 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5440 printf("Ok we have done the fillup no_data_chunk=%d tf=%d prw:%d\n",
5441 (int)no_data_chunks,
5442 (int)asoc->total_flight, (int)asoc->peers_rwnd);
5445 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5447 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5448 printf("net:%p fs:%d cwnd:%d\n",
5449 net, net->flight_size, net->cwnd);
5452 if (net->flight_size >= net->cwnd) {
5453 /* skip this network, no room */
5456 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5457 printf("Ok skip fillup->fs:%d > cwnd:%d\n",
5462 sctp_pegs[SCTP_CWND_NOFILL]++;
5466 * spin through the stream queues moving one message and
5467 * assign TSN's as appropriate.
5469 sctp_fill_outqueue(stcb, net);
5471 *cwnd_full = cwnd_full_ind;
5472 /* now service each destination and send out what we can for it */
5474 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5476 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
5479 printf("We have %d chunks on the send_queue\n", chk_cnt);
5481 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
5484 printf("We have %d chunks on the sent_queue\n", chk_cnt);
5485 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
5488 printf("We have %d chunks on the control_queue\n", chk_cnt);
5491 /* If we have data to send, and DSACK is running, stop it
5492 * and build a SACK to dump on to bundle with output. This
5493 * actually MAY make it so the bundling does not occur if
5494 * the SACK is big but I think this is ok because basic SACK
5495 * space is pre-reserved in our fragmentation size choice.
5497 if ((TAILQ_FIRST(&asoc->send_queue) != NULL) &&
5498 (no_data_chunks == 0)) {
5499 /* We will be sending something */
5500 if (callout_pending(&stcb->asoc.dack_timer.timer)) {
5501 /* Yep a callout is pending */
5502 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
5505 sctp_send_sack(stcb);
5508 /* Nothing to send? */
5509 if ((TAILQ_FIRST(&asoc->control_send_queue) == NULL) &&
5510 (TAILQ_FIRST(&asoc->send_queue) == NULL)) {
5513 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5514 /* how much can we send? */
5515 if (net->ref_count < 2) {
5516 /* Ref-count of 1 so we cannot have data or control
5517 * queued to this address. Skip it.
5521 ctl_cnt = bundle_at = 0;
5526 if ((net->ro.ro_rt) && (net->ro.ro_rt->rt_ifp)) {
5527 /* if we have a route and an ifp
5528 * check to see if we have room to
5532 ifp = net->ro.ro_rt->rt_ifp;
5533 if ((ifp->if_snd.ifq_len + 2) >= ifp->if_snd.ifq_maxlen) {
5534 sctp_pegs[SCTP_IFP_QUEUE_FULL]++;
5535 #ifdef SCTP_LOG_MAXBURST
5536 sctp_log_maxburst(net, ifp->if_snd.ifq_len, ifp->if_snd.ifq_maxlen, SCTP_MAX_IFP_APPLIED);
5541 if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
5542 mtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
5544 mtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
5546 if (mtu > asoc->peers_rwnd) {
5547 if (asoc->total_flight > 0) {
5548 /* We have a packet in flight somewhere */
5549 r_mtu = asoc->peers_rwnd;
5551 /* We are always allowed to send one MTU out */
5559 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5560 printf("Ok r_mtu is %d mtu is %d for this net:%p one_chunk:%d\n",
5561 r_mtu, mtu, net, one_chunk);
5564 /************************/
5565 /* Control transmission */
5566 /************************/
5567 /* Now first lets go through the control queue */
5568 for (chk = TAILQ_FIRST(&asoc->control_send_queue);
5570 nchk = TAILQ_NEXT(chk, sctp_next);
5571 if (chk->whoTo != net) {
5573 * No, not sent to the network we are
5578 if (chk->data == NULL) {
5581 if ((chk->data->m_flags & M_PKTHDR) == 0) {
5583 * NOTE: the chk queue MUST have the PKTHDR
5584 * flag set on it with a total in the
5585 * m_pkthdr.len field!! else the chunk will
5590 if (chk->sent != SCTP_DATAGRAM_UNSENT) {
5592 * It must be unsent. Cookies and ASCONF's
5593 * hang around but there timers will force
5594 * when marked for resend.
5598 /* Here we do NOT factor the r_mtu */
5599 if ((chk->data->m_pkthdr.len < (int)mtu) ||
5600 (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
5602 * We probably should glom the mbuf chain from
5603 * the chk->data for control but the problem
5604 * is it becomes yet one more level of
5605 * tracking to do if for some reason output
5606 * fails. Then I have got to reconstruct the
5607 * merged control chain.. el yucko.. for now
5608 * we take the easy way and do the copy
5610 outchain = sctp_copy_mbufchain(chk->data,
5612 if (outchain == NULL) {
5615 /* update our MTU size */
5616 mtu -= chk->data->m_pkthdr.len;
5620 /* Do clear IP_DF ? */
5621 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
5624 /* Mark things to be removed, if needed */
5625 if ((chk->rec.chunk_id == SCTP_SELECTIVE_ACK) ||
5626 (chk->rec.chunk_id == SCTP_HEARTBEAT_REQUEST) ||
5627 (chk->rec.chunk_id == SCTP_HEARTBEAT_ACK) ||
5628 (chk->rec.chunk_id == SCTP_SHUTDOWN) ||
5629 (chk->rec.chunk_id == SCTP_SHUTDOWN_ACK) ||
5630 (chk->rec.chunk_id == SCTP_OPERATION_ERROR) ||
5631 (chk->rec.chunk_id == SCTP_COOKIE_ACK) ||
5632 (chk->rec.chunk_id == SCTP_ECN_CWR) ||
5633 (chk->rec.chunk_id == SCTP_PACKET_DROPPED) ||
5634 (chk->rec.chunk_id == SCTP_ASCONF_ACK)) {
5636 if (chk->rec.chunk_id == SCTP_HEARTBEAT_REQUEST)
5638 /* remove these chunks at the end */
5639 if (chk->rec.chunk_id == SCTP_SELECTIVE_ACK) {
5640 /* turn off the timer */
5641 if (callout_pending(&stcb->asoc.dack_timer.timer)) {
5642 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
5649 * Other chunks, since they have
5650 * timers running (i.e. COOKIE or
5651 * ASCONF) we just "trust" that it
5652 * gets sent or retransmitted.
5655 if (chk->rec.chunk_id == SCTP_COOKIE_ECHO) {
5658 } else if (chk->rec.chunk_id == SCTP_ASCONF) {
5660 * set hb flag since we can use
5666 chk->sent = SCTP_DATAGRAM_SENT;
5671 * Ok we are out of room but we can
5672 * output without effecting the flight
5673 * size since this little guy is a
5674 * control only packet.
5677 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
5681 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
5684 if (outchain->m_len == 0) {
5686 * Special case for when you
5687 * get a 0 len mbuf at the
5688 * head due to the lack of a
5689 * MHDR at the beginning.
5691 outchain->m_len = sizeof(struct sctphdr);
5693 M_PREPEND(outchain, sizeof(struct sctphdr), M_DONTWAIT);
5694 if (outchain == NULL) {
5697 goto error_out_again;
5700 shdr = mtod(outchain, struct sctphdr *);
5701 shdr->src_port = inp->sctp_lport;
5702 shdr->dest_port = stcb->rport;
5703 shdr->v_tag = htonl(stcb->asoc.peer_vtag);
5706 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
5707 (struct sockaddr *)&net->ro._l_addr,
5709 no_fragmentflg, 0, NULL, asconf))) {
5710 if (error == ENOBUFS) {
5711 asoc->ifp_had_enobuf = 1;
5713 sctp_pegs[SCTP_DATA_OUT_ERR]++;
5714 if (from_where == 0) {
5715 sctp_pegs[SCTP_ERROUT_FRM_USR]++;
5719 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
5720 printf("Gak got ctrl error %d\n", error);
5723 /* error, could not output */
5726 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5727 printf("Update HB anyway\n");
5730 if (*now_filled == 0) {
5731 SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
5733 *now = net->last_sent_time;
5735 net->last_sent_time = *now;
5739 if (error == EHOSTUNREACH) {
5742 * unreachable during
5746 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5747 printf("Moving data to an alterante\n");
5750 sctp_move_to_an_alt(stcb, asoc, net);
5752 sctp_clean_up_ctl (asoc);
5755 asoc->ifp_had_enobuf = 0;
5756 /* Only HB or ASCONF advances time */
5758 if (*now_filled == 0) {
5759 SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
5761 *now = net->last_sent_time;
5763 net->last_sent_time = *now;
5768 * increase the number we sent, if a
5769 * cookie is sent we don't tell them
5773 *num_out += ctl_cnt;
5774 /* recalc a clean slate and setup */
5775 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
5776 mtu = (net->mtu - SCTP_MIN_OVERHEAD);
5778 mtu = (net->mtu - SCTP_MIN_V4_OVERHEAD);
5784 /*********************/
5785 /* Data transmission */
5786 /*********************/
5787 /* now lets add any data within the MTU constraints */
5788 if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
5789 omtu = net->mtu - (sizeof(struct ip) + sizeof(struct sctphdr));
5791 omtu = net->mtu - (sizeof(struct ip6_hdr) + sizeof(struct sctphdr));
5795 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5796 printf("Now to data transmission\n");
5800 if (((asoc->state & SCTP_STATE_OPEN) == SCTP_STATE_OPEN) ||
5802 for (chk = TAILQ_FIRST(&asoc->send_queue); chk; chk = nchk) {
5803 if (no_data_chunks) {
5804 /* let only control go out */
5806 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5807 printf("Either nothing to send or we are full\n");
5812 if (net->flight_size >= net->cwnd) {
5813 /* skip this net, no room for data */
5815 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5816 printf("fs:%d > cwnd:%d\n",
5817 net->flight_size, net->cwnd);
5820 sctp_pegs[SCTP_CWND_BLOCKED]++;
5824 nchk = TAILQ_NEXT(chk, sctp_next);
5825 if (chk->whoTo != net) {
5826 /* No, not sent to this net */
5828 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5829 printf("chk->whoTo:%p not %p\n",
5837 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5838 printf("Can we pick up a chunk?\n");
5841 if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
5842 /* strange, we have a chunk that is to bit
5843 * for its destination and yet no fragment ok flag.
5844 * Something went wrong when the PMTU changed...we did
5845 * not mark this chunk for some reason?? I will
5846 * fix it here by letting IP fragment it for now and
5847 * printing a warning. This really should not happen ...
5849 /*#ifdef SCTP_DEBUG*/
5850 printf("Warning chunk of %d bytes > mtu:%d and yet PMTU disc missed\n",
5851 chk->send_size, mtu);
5853 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
5856 if (((chk->send_size <= mtu) && (chk->send_size <= r_mtu)) ||
5857 ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) && (chk->send_size <= asoc->peers_rwnd))) {
5858 /* ok we will add this one */
5860 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5861 printf("Picking up the chunk\n");
5864 outchain = sctp_copy_mbufchain(chk->data, outchain);
5865 if (outchain == NULL) {
5867 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5868 printf("Gakk no memory\n");
5871 if (!callout_pending(&net->rxt_timer.timer)) {
5872 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
5876 /* upate our MTU size */
5877 /* Do clear IP_DF ? */
5878 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
5881 mtu -= chk->send_size;
5882 r_mtu -= chk->send_size;
5883 data_list[bundle_at++] = chk;
5884 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
5892 if ((r_mtu <= 0) || one_chunk) {
5898 * Must be sent in order of the TSN's
5902 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5903 printf("ok no more chk:%d > mtu:%d || < r_mtu:%d\n",
5904 chk->send_size, mtu, r_mtu);
5911 } /* if asoc.state OPEN */
5912 /* Is there something to send for this destination? */
5914 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5915 printf("ok now is chain assembled? %p\n",
5921 /* We may need to start a control timer or two */
5923 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, net);
5927 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, net);
5930 /* must start a send timer if data is being sent */
5931 if (bundle_at && (!callout_pending(&net->rxt_timer.timer))) {
5932 /* no timer running on this destination
5936 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5937 printf("ok lets start a send timer .. we will transmit %p\n",
5941 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
5943 /* Now send it, if there is anything to send :> */
5944 if ((outchain->m_flags & M_PKTHDR) == 0) {
5947 MGETHDR(t, M_DONTWAIT, MT_HEADER);
5949 sctp_m_freem(outchain);
5952 t->m_next = outchain;
5953 t->m_pkthdr.len = 0;
5954 t->m_pkthdr.rcvif = 0;
5959 outchain->m_pkthdr.len += t->m_len;
5963 if (outchain->m_len == 0) {
5964 /* Special case for when you get a 0 len
5965 * mbuf at the head due to the lack
5966 * of a MHDR at the beginning.
5968 MH_ALIGN(outchain, sizeof(struct sctphdr));
5969 outchain->m_len = sizeof(struct sctphdr);
5971 M_PREPEND(outchain, sizeof(struct sctphdr), M_DONTWAIT);
5972 if (outchain == NULL) {
5978 shdr = mtod(outchain, struct sctphdr *);
5979 shdr->src_port = inp->sctp_lport;
5980 shdr->dest_port = stcb->rport;
5981 shdr->v_tag = htonl(stcb->asoc.peer_vtag);
5983 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
5984 (struct sockaddr *)&net->ro._l_addr,
5986 no_fragmentflg, bundle_at, data_list[0], asconf))) {
5987 /* error, we could not output */
5988 if (error == ENOBUFS) {
5989 asoc->ifp_had_enobuf = 1;
5991 sctp_pegs[SCTP_DATA_OUT_ERR]++;
5992 if (from_where == 0) {
5993 sctp_pegs[SCTP_ERROUT_FRM_USR]++;
5998 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
5999 printf("Gak send error %d\n", error);
6004 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
6005 printf("Update HB time anyway\n");
6008 if (*now_filled == 0) {
6009 SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
6011 *now = net->last_sent_time;
6013 net->last_sent_time = *now;
6017 if (error == EHOSTUNREACH) {
6019 * Destination went unreachable during
6023 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
6024 printf("Calling the movement routine\n");
6027 sctp_move_to_an_alt(stcb, asoc, net);
6029 sctp_clean_up_ctl (asoc);
6032 asoc->ifp_had_enobuf = 0;
6034 if (bundle_at || hbflag) {
6035 /* For data/asconf and hb set time */
6036 if (*now_filled == 0) {
6037 SCTP_GETTIME_TIMEVAL(&net->last_sent_time);
6039 *now = net->last_sent_time;
6041 net->last_sent_time = *now;
6046 *num_out += (ctl_cnt + bundle_at);
6049 if (!net->rto_pending) {
6050 /* setup for a RTO measurement */
6051 net->rto_pending = 1;
6052 data_list[0]->do_rtt = 1;
6054 data_list[0]->do_rtt = 0;
6056 sctp_pegs[SCTP_PEG_TSNS_SENT] += bundle_at;
6057 sctp_clean_up_datalist(stcb, asoc, data_list, bundle_at, net);
6064 /* At the end there should be no NON timed
6065 * chunks hanging on this queue.
6067 if ((*num_out == 0) && (*reason_code == 0)) {
6070 sctp_clean_up_ctl (asoc);
6075 sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
6077 /* Prepend a OPERATIONAL_ERROR chunk header
6078 * and put on the end of the control chunk queue.
6080 /* Sender had better have gotten a MGETHDR or else
6081 * the control chunk will be forever skipped
6083 struct sctp_chunkhdr *hdr;
6084 struct sctp_tmit_chunk *chk;
6087 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
6090 sctp_m_freem(op_err);
6093 sctppcbinfo.ipi_count_chunk++;
6094 sctppcbinfo.ipi_gencnt_chunk++;
6095 M_PREPEND(op_err, sizeof(struct sctp_chunkhdr), M_DONTWAIT);
6096 if (op_err == NULL) {
6097 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
6098 sctppcbinfo.ipi_count_chunk--;
6099 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
6100 panic("Chunk count is negative");
6102 sctppcbinfo.ipi_gencnt_chunk++;
6107 while (mat != NULL) {
6108 chk->send_size += mat->m_len;
6111 chk->rec.chunk_id = SCTP_OPERATION_ERROR;
6112 chk->sent = SCTP_DATAGRAM_UNSENT;
6115 chk->asoc = &stcb->asoc;
6117 chk->whoTo = chk->asoc->primary_destination;
6118 chk->whoTo->ref_count++;
6119 hdr = mtod(op_err, struct sctp_chunkhdr *);
6120 hdr->chunk_type = SCTP_OPERATION_ERROR;
6121 hdr->chunk_flags = 0;
6122 hdr->chunk_length = htons(chk->send_size);
6123 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue,
6126 chk->asoc->ctrl_queue_cnt++;
6130 sctp_send_cookie_echo(struct mbuf *m,
6132 struct sctp_tcb *stcb,
6133 struct sctp_nets *net)
6136 * pull out the cookie and put it at the front of the control
6140 struct mbuf *cookie, *mat;
6141 struct sctp_paramhdr parm, *phdr;
6142 struct sctp_chunkhdr *hdr;
6143 struct sctp_tmit_chunk *chk;
6144 uint16_t ptype, plen;
6145 /* First find the cookie in the param area */
6147 at = offset + sizeof(struct sctp_init_chunk);
6150 phdr = sctp_get_next_param(m, at, &parm, sizeof(parm));
6154 ptype = ntohs(phdr->param_type);
6155 plen = ntohs(phdr->param_length);
6156 if (ptype == SCTP_STATE_COOKIE) {
6158 /* found the cookie */
6159 if ((pad = (plen % 4))) {
6162 cookie = sctp_m_copym(m, at, plen, M_DONTWAIT);
6163 if (cookie == NULL) {
6169 at += SCTP_SIZE32(plen);
6171 if (cookie == NULL) {
6172 /* Did not find the cookie */
6175 /* ok, we got the cookie lets change it into a cookie echo chunk */
6177 /* first the change from param to cookie */
6178 hdr = mtod(cookie, struct sctp_chunkhdr *);
6179 hdr->chunk_type = SCTP_COOKIE_ECHO;
6180 hdr->chunk_flags = 0;
6181 /* now we MUST have a PKTHDR on it */
6182 if ((cookie->m_flags & M_PKTHDR) != M_PKTHDR) {
6183 /* we hope this happens rarely */
6184 MGETHDR(mat, M_DONTWAIT, MT_HEADER);
6186 sctp_m_freem(cookie);
6190 mat->m_pkthdr.rcvif = 0;
6191 mat->m_next = cookie;
6194 cookie->m_pkthdr.len = plen;
6195 /* get the chunk stuff now and place it in the FRONT of the queue */
6196 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
6199 sctp_m_freem(cookie);
6202 sctppcbinfo.ipi_count_chunk++;
6203 sctppcbinfo.ipi_gencnt_chunk++;
6204 chk->send_size = cookie->m_pkthdr.len;
6205 chk->rec.chunk_id = SCTP_COOKIE_ECHO;
6206 chk->sent = SCTP_DATAGRAM_UNSENT;
6209 chk->asoc = &stcb->asoc;
6211 chk->whoTo = chk->asoc->primary_destination;
6212 chk->whoTo->ref_count++;
6213 TAILQ_INSERT_HEAD(&chk->asoc->control_send_queue, chk, sctp_next);
6214 chk->asoc->ctrl_queue_cnt++;
6219 sctp_send_heartbeat_ack(struct sctp_tcb *stcb,
6223 struct sctp_nets *net)
6225 /* take a HB request and make it into a
6226 * HB ack and send it.
6228 struct mbuf *outchain;
6229 struct sctp_chunkhdr *chdr;
6230 struct sctp_tmit_chunk *chk;
6234 /* must have a net pointer */
6237 outchain = sctp_m_copym(m, offset, chk_length, M_DONTWAIT);
6238 if (outchain == NULL) {
6239 /* gak out of memory */
6242 chdr = mtod(outchain, struct sctp_chunkhdr *);
6243 chdr->chunk_type = SCTP_HEARTBEAT_ACK;
6244 chdr->chunk_flags = 0;
6245 if ((outchain->m_flags & M_PKTHDR) != M_PKTHDR) {
6246 /* should not happen but we are cautious. */
6248 MGETHDR(tmp, M_DONTWAIT, MT_HEADER);
6253 tmp->m_pkthdr.rcvif = 0;
6254 tmp->m_next = outchain;
6257 outchain->m_pkthdr.len = chk_length;
6258 if (chk_length % 4) {
6262 padlen = 4 - (outchain->m_pkthdr.len % 4);
6263 m_copyback(outchain, outchain->m_pkthdr.len, padlen, (caddr_t)&cpthis);
6265 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
6268 sctp_m_freem(outchain);
6271 sctppcbinfo.ipi_count_chunk++;
6272 sctppcbinfo.ipi_gencnt_chunk++;
6274 chk->send_size = chk_length;
6275 chk->rec.chunk_id = SCTP_HEARTBEAT_ACK;
6276 chk->sent = SCTP_DATAGRAM_UNSENT;
6279 chk->asoc = &stcb->asoc;
6280 chk->data = outchain;
6282 chk->whoTo->ref_count++;
6283 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
6284 chk->asoc->ctrl_queue_cnt++;
6288 sctp_send_cookie_ack(struct sctp_tcb *stcb) {
6289 /* formulate and queue a cookie-ack back to sender */
6290 struct mbuf *cookie_ack;
6291 struct sctp_chunkhdr *hdr;
6292 struct sctp_tmit_chunk *chk;
6295 MGETHDR(cookie_ack, M_DONTWAIT, MT_HEADER);
6296 if (cookie_ack == NULL) {
6300 cookie_ack->m_data += SCTP_MIN_OVERHEAD;
6301 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
6304 sctp_m_freem(cookie_ack);
6307 sctppcbinfo.ipi_count_chunk++;
6308 sctppcbinfo.ipi_gencnt_chunk++;
6310 chk->send_size = sizeof(struct sctp_chunkhdr);
6311 chk->rec.chunk_id = SCTP_COOKIE_ACK;
6312 chk->sent = SCTP_DATAGRAM_UNSENT;
6315 chk->asoc = &stcb->asoc;
6316 chk->data = cookie_ack;
6317 if (chk->asoc->last_control_chunk_from != NULL) {
6318 chk->whoTo = chk->asoc->last_control_chunk_from;
6320 chk->whoTo = chk->asoc->primary_destination;
6322 chk->whoTo->ref_count++;
6323 hdr = mtod(cookie_ack, struct sctp_chunkhdr *);
6324 hdr->chunk_type = SCTP_COOKIE_ACK;
6325 hdr->chunk_flags = 0;
6326 hdr->chunk_length = htons(chk->send_size);
6327 cookie_ack->m_pkthdr.len = cookie_ack->m_len = chk->send_size;
6328 cookie_ack->m_pkthdr.rcvif = 0;
6329 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
6330 chk->asoc->ctrl_queue_cnt++;
6336 sctp_send_shutdown_ack(struct sctp_tcb *stcb, struct sctp_nets *net)
6338 /* formulate and queue a SHUTDOWN-ACK back to the sender */
6339 struct mbuf *m_shutdown_ack;
6340 struct sctp_shutdown_ack_chunk *ack_cp;
6341 struct sctp_tmit_chunk *chk;
6343 m_shutdown_ack = NULL;
6344 MGETHDR(m_shutdown_ack, M_DONTWAIT, MT_HEADER);
6345 if (m_shutdown_ack == NULL) {
6349 m_shutdown_ack->m_data += SCTP_MIN_OVERHEAD;
6350 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
6353 sctp_m_freem(m_shutdown_ack);
6356 sctppcbinfo.ipi_count_chunk++;
6357 sctppcbinfo.ipi_gencnt_chunk++;
6359 chk->send_size = sizeof(struct sctp_chunkhdr);
6360 chk->rec.chunk_id = SCTP_SHUTDOWN_ACK;
6361 chk->sent = SCTP_DATAGRAM_UNSENT;
6364 chk->asoc = &stcb->asoc;
6365 chk->data = m_shutdown_ack;
6369 ack_cp = mtod(m_shutdown_ack, struct sctp_shutdown_ack_chunk *);
6370 ack_cp->ch.chunk_type = SCTP_SHUTDOWN_ACK;
6371 ack_cp->ch.chunk_flags = 0;
6372 ack_cp->ch.chunk_length = htons(chk->send_size);
6373 m_shutdown_ack->m_pkthdr.len = m_shutdown_ack->m_len = chk->send_size;
6374 m_shutdown_ack->m_pkthdr.rcvif = 0;
6375 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
6376 chk->asoc->ctrl_queue_cnt++;
6381 sctp_send_shutdown(struct sctp_tcb *stcb, struct sctp_nets *net)
6383 /* formulate and queue a SHUTDOWN to the sender */
6384 struct mbuf *m_shutdown;
6385 struct sctp_shutdown_chunk *shutdown_cp;
6386 struct sctp_tmit_chunk *chk;
6389 MGETHDR(m_shutdown, M_DONTWAIT, MT_HEADER);
6390 if (m_shutdown == NULL) {
6394 m_shutdown->m_data += SCTP_MIN_OVERHEAD;
6395 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
6398 sctp_m_freem(m_shutdown);
6401 sctppcbinfo.ipi_count_chunk++;
6402 sctppcbinfo.ipi_gencnt_chunk++;
6404 chk->send_size = sizeof(struct sctp_shutdown_chunk);
6405 chk->rec.chunk_id = SCTP_SHUTDOWN;
6406 chk->sent = SCTP_DATAGRAM_UNSENT;
6409 chk->asoc = &stcb->asoc;
6410 chk->data = m_shutdown;
6414 shutdown_cp = mtod(m_shutdown, struct sctp_shutdown_chunk *);
6415 shutdown_cp->ch.chunk_type = SCTP_SHUTDOWN;
6416 shutdown_cp->ch.chunk_flags = 0;
6417 shutdown_cp->ch.chunk_length = htons(chk->send_size);
6418 shutdown_cp->cumulative_tsn_ack = htonl(stcb->asoc.cumulative_tsn);
6419 m_shutdown->m_pkthdr.len = m_shutdown->m_len = chk->send_size;
6420 m_shutdown->m_pkthdr.rcvif = 0;
6421 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
6422 chk->asoc->ctrl_queue_cnt++;
6424 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
6425 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
6426 stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0;
6427 soisdisconnecting(stcb->sctp_ep->sctp_socket);
6433 sctp_send_asconf(struct sctp_tcb *stcb, struct sctp_nets *net)
6436 * formulate and queue an ASCONF to the peer
6437 * ASCONF parameters should be queued on the assoc queue
6439 struct sctp_tmit_chunk *chk;
6440 struct mbuf *m_asconf;
6441 struct sctp_asconf_chunk *acp;
6444 /* compose an ASCONF chunk, maximum length is PMTU */
6445 m_asconf = sctp_compose_asconf(stcb);
6446 if (m_asconf == NULL) {
6449 acp = mtod(m_asconf, struct sctp_asconf_chunk *);
6450 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
6453 sctp_m_freem(m_asconf);
6456 sctppcbinfo.ipi_count_chunk++;
6457 sctppcbinfo.ipi_gencnt_chunk++;
6459 chk->data = m_asconf;
6460 chk->send_size = m_asconf->m_pkthdr.len;
6461 chk->rec.chunk_id = SCTP_ASCONF;
6462 chk->sent = SCTP_DATAGRAM_UNSENT;
6465 chk->asoc = &stcb->asoc;
6466 chk->whoTo = chk->asoc->primary_destination;
6467 chk->whoTo->ref_count++;
6468 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
6469 chk->asoc->ctrl_queue_cnt++;
6474 sctp_send_asconf_ack(struct sctp_tcb *stcb, uint32_t retrans)
6477 * formulate and queue a asconf-ack back to sender
6478 * the asconf-ack must be stored in the tcb
6480 struct sctp_tmit_chunk *chk;
6483 /* is there a asconf-ack mbuf chain to send? */
6484 if (stcb->asoc.last_asconf_ack_sent == NULL) {
6488 /* copy the asconf_ack */
6489 #if defined(__FreeBSD__) || defined(__NetBSD__)
6490 /* Supposedly the m_copypacket is a optimzation,
6493 if (stcb->asoc.last_asconf_ack_sent->m_flags & M_PKTHDR) {
6494 m_ack = m_copypacket(stcb->asoc.last_asconf_ack_sent, M_DONTWAIT);
6495 sctp_pegs[SCTP_CACHED_SRC]++;
6497 m_ack = m_copy(stcb->asoc.last_asconf_ack_sent, 0, M_COPYALL);
6499 m_ack = m_copy(stcb->asoc.last_asconf_ack_sent, 0, M_COPYALL);
6501 if (m_ack == NULL) {
6502 /* couldn't copy it */
6506 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
6510 sctp_m_freem(m_ack);
6513 sctppcbinfo.ipi_count_chunk++;
6514 sctppcbinfo.ipi_gencnt_chunk++;
6516 /* figure out where it goes to */
6518 /* we're doing a retransmission */
6519 if (stcb->asoc.used_alt_asconfack > 2) {
6520 /* tried alternate nets already, go back */
6523 /* need to try and alternate net */
6524 chk->whoTo = sctp_find_alternate_net(stcb, stcb->asoc.last_control_chunk_from);
6525 stcb->asoc.used_alt_asconfack++;
6527 if (chk->whoTo == NULL) {
6529 if (stcb->asoc.last_control_chunk_from == NULL)
6530 chk->whoTo = stcb->asoc.primary_destination;
6532 chk->whoTo = stcb->asoc.last_control_chunk_from;
6533 stcb->asoc.used_alt_asconfack = 0;
6537 if (stcb->asoc.last_control_chunk_from == NULL)
6538 chk->whoTo = stcb->asoc.primary_destination;
6540 chk->whoTo = stcb->asoc.last_control_chunk_from;
6541 stcb->asoc.used_alt_asconfack = 0;
6544 chk->send_size = m_ack->m_pkthdr.len;
6545 chk->rec.chunk_id = SCTP_ASCONF_ACK;
6546 chk->sent = SCTP_DATAGRAM_UNSENT;
6549 chk->asoc = &stcb->asoc;
6550 chk->whoTo->ref_count++;
6551 TAILQ_INSERT_TAIL(&chk->asoc->control_send_queue, chk, sctp_next);
6552 chk->asoc->ctrl_queue_cnt++;
6558 sctp_chunk_retransmission(struct sctp_inpcb *inp,
6559 struct sctp_tcb *stcb,
6560 struct sctp_association *asoc,
6561 int *cnt_out, struct timeval *now, int *now_filled)
6564 * send out one MTU of retransmission.
6565 * If fast_retransmit is happening we ignore the cwnd.
6566 * Otherwise we obey the cwnd and rwnd.
6567 * For a Cookie or Asconf in the control chunk queue we retransmit
6568 * them by themselves.
6570 * For data chunks we will pick out the lowest TSN's in the
6571 * sent_queue marked for resend and bundle them all together
6572 * (up to a MTU of destination). The address to send to should
6573 * have been selected/changed where the retransmission was
6574 * marked (i.e. in FR or t3-timeout routines).
6576 struct sctp_tmit_chunk *data_list[SCTP_MAX_DATA_BUNDLING];
6577 struct sctp_tmit_chunk *chk, *fwd;
6579 struct sctphdr *shdr;
6581 struct sctp_nets *net;
6582 int no_fragmentflg, bundle_at, cnt_thru;
6584 int error, i, one_chunk, fwd_tsn, ctl_cnt, tmr_started;
6586 tmr_started = ctl_cnt = bundle_at = error = 0;
6593 #ifdef SCTP_AUDITING_ENABLED
6594 sctp_audit_log(0xC3, 1);
6596 if (TAILQ_EMPTY(&asoc->sent_queue)) {
6598 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
6599 printf("SCTP hits empty queue with cnt set to %d?\n",
6600 asoc->sent_queue_retran_cnt);
6603 asoc->sent_queue_cnt = 0;
6604 asoc->sent_queue_cnt_removeable = 0;
6606 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
6607 if (chk->sent != SCTP_DATAGRAM_RESEND) {
6608 /* we only worry about things marked for resend */
6611 if ((chk->rec.chunk_id == SCTP_COOKIE_ECHO) ||
6612 (chk->rec.chunk_id == SCTP_ASCONF) ||
6613 (chk->rec.chunk_id == SCTP_STREAM_RESET) ||
6614 (chk->rec.chunk_id == SCTP_FORWARD_CUM_TSN)) {
6615 if (chk->rec.chunk_id == SCTP_STREAM_RESET) {
6616 /* For stream reset we only retran the request
6619 struct sctp_stream_reset_req *strreq;
6620 strreq = mtod(chk->data, struct sctp_stream_reset_req *);
6621 if (strreq->sr_req.ph.param_type != ntohs(SCTP_STR_RESET_REQUEST)) {
6626 if (chk->rec.chunk_id == SCTP_ASCONF) {
6630 if (chk->rec.chunk_id == SCTP_FORWARD_CUM_TSN) {
6634 m = sctp_copy_mbufchain(chk->data, m);
6640 /* do we have control chunks to retransmit? */
6642 /* Start a timer no matter if we suceed or fail */
6643 if (chk->rec.chunk_id == SCTP_COOKIE_ECHO) {
6644 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, inp, stcb, chk->whoTo);
6645 } else if (chk->rec.chunk_id == SCTP_ASCONF)
6646 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, inp, stcb, chk->whoTo);
6648 if (m->m_len == 0) {
6649 /* Special case for when you get a 0 len
6650 * mbuf at the head due to the lack
6651 * of a MHDR at the beginning.
6653 m->m_len = sizeof(struct sctphdr);
6655 M_PREPEND(m, sizeof(struct sctphdr), M_DONTWAIT);
6660 shdr = mtod(m, struct sctphdr *);
6661 shdr->src_port = inp->sctp_lport;
6662 shdr->dest_port = stcb->rport;
6663 shdr->v_tag = htonl(stcb->asoc.peer_vtag);
6665 chk->snd_count++; /* update our count */
6667 if ((error = sctp_lowlevel_chunk_output(inp, stcb, chk->whoTo,
6668 (struct sockaddr *)&chk->whoTo->ro._l_addr, m,
6669 no_fragmentflg, 0, NULL, asconf))) {
6670 sctp_pegs[SCTP_DATA_OUT_ERR]++;
6674 *We don't want to mark the net->sent time here since this
6675 * we use this for HB and retrans cannot measure RTT
6677 /* SCTP_GETTIME_TIMEVAL(&chk->whoTo->last_sent_time);*/
6679 chk->sent = SCTP_DATAGRAM_SENT;
6680 asoc->sent_queue_retran_cnt--;
6681 if (asoc->sent_queue_retran_cnt < 0) {
6682 asoc->sent_queue_retran_cnt = 0;
6687 /* Clean up the fwd-tsn list */
6688 sctp_clean_up_ctl (asoc);
6692 /* Ok, it is just data retransmission we need to do or
6693 * that and a fwd-tsn with it all.
6695 if (TAILQ_EMPTY(&asoc->sent_queue)) {
6699 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
6700 printf("Normal chunk retransmission cnt:%d\n",
6701 asoc->sent_queue_retran_cnt);
6704 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) ||
6705 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) {
6706 /* not yet open, resend the cookie and that is it */
6711 #ifdef SCTP_AUDITING_ENABLED
6712 sctp_auditing(20, inp, stcb, NULL);
6714 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
6715 if (chk->sent != SCTP_DATAGRAM_RESEND) {
6716 /* No, not sent to this net or not ready for rtx */
6720 /* pick up the net */
6722 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6723 mtu = (net->mtu - SCTP_MIN_OVERHEAD);
6725 mtu = net->mtu- SCTP_MIN_V4_OVERHEAD;
6728 if ((asoc->peers_rwnd < mtu) && (asoc->total_flight > 0)) {
6729 /* No room in peers rwnd */
6731 tsn = asoc->last_acked_seq + 1;
6732 if (tsn == chk->rec.data.TSN_seq) {
6733 /* we make a special exception for this case.
6734 * The peer has no rwnd but is missing the
6735 * lowest chunk.. which is probably what is
6736 * holding up the rwnd.
6738 goto one_chunk_around;
6741 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
6742 printf("blocked-peers_rwnd:%d tf:%d\n",
6743 (int)asoc->peers_rwnd,
6744 (int)asoc->total_flight);
6747 sctp_pegs[SCTP_RWND_BLOCKED]++;
6751 if (asoc->peers_rwnd < mtu) {
6754 #ifdef SCTP_AUDITING_ENABLED
6755 sctp_audit_log(0xC3, 2);
6759 net->fast_retran_ip = 0;
6760 if (chk->rec.data.doing_fast_retransmit == 0) {
6761 /* if no FR in progress skip destination that
6762 * have flight_size > cwnd.
6764 if (net->flight_size >= net->cwnd) {
6765 sctp_pegs[SCTP_CWND_BLOCKED]++;
6769 /* Mark the destination net to have FR recovery
6772 net->fast_retran_ip = 1;
6775 if ((chk->send_size <= mtu) || (chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
6776 /* ok we will add this one */
6777 m = sctp_copy_mbufchain(chk->data, m);
6781 /* upate our MTU size */
6782 /* Do clear IP_DF ? */
6783 if (chk->flags & CHUNK_FLAGS_FRAGMENT_OK) {
6786 mtu -= chk->send_size;
6787 data_list[bundle_at++] = chk;
6788 if (one_chunk && (asoc->total_flight <= 0)) {
6789 sctp_pegs[SCTP_WINDOW_PROBES]++;
6790 chk->rec.data.state_flags |= SCTP_WINDOW_PROBE;
6793 if (one_chunk == 0) {
6794 /* now are there anymore forward from chk to pick up?*/
6795 fwd = TAILQ_NEXT(chk, sctp_next);
6797 if (fwd->sent != SCTP_DATAGRAM_RESEND) {
6798 /* Nope, not for retran */
6799 fwd = TAILQ_NEXT(fwd, sctp_next);
6802 if (fwd->whoTo != net) {
6803 /* Nope, not the net in question */
6804 fwd = TAILQ_NEXT(fwd, sctp_next);
6807 if (fwd->send_size <= mtu) {
6808 m = sctp_copy_mbufchain(fwd->data, m);
6812 /* upate our MTU size */
6813 /* Do clear IP_DF ? */
6814 if (fwd->flags & CHUNK_FLAGS_FRAGMENT_OK) {
6817 mtu -= fwd->send_size;
6818 data_list[bundle_at++] = fwd;
6819 if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
6822 fwd = TAILQ_NEXT(fwd, sctp_next);
6824 /* can't fit so we are done */
6829 /* Is there something to send for this destination? */
6831 /* No matter if we fail/or suceed we should
6832 * start a timer. A failure is like a lost
6835 if (!callout_pending(&net->rxt_timer.timer)) {
6836 /* no timer running on this destination
6839 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
6842 if (m->m_len == 0) {
6843 /* Special case for when you get a 0 len
6844 * mbuf at the head due to the lack
6845 * of a MHDR at the beginning.
6847 m->m_len = sizeof(struct sctphdr);
6849 M_PREPEND(m, sizeof(struct sctphdr), M_DONTWAIT);
6854 shdr = mtod(m, struct sctphdr *);
6855 shdr->src_port = inp->sctp_lport;
6856 shdr->dest_port = stcb->rport;
6857 shdr->v_tag = htonl(stcb->asoc.peer_vtag);
6860 /* Now lets send it, if there is anything to send :> */
6861 if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
6862 (struct sockaddr *)&net->ro._l_addr,
6864 no_fragmentflg, 0, NULL, asconf))) {
6865 /* error, we could not output */
6866 sctp_pegs[SCTP_DATA_OUT_ERR]++;
6871 * We don't want to mark the net->sent time here since
6872 * this we use this for HB and retrans cannot measure
6875 /* SCTP_GETTIME_TIMEVAL(&net->last_sent_time);*/
6877 /* For auto-close */
6879 if (*now_filled == 0) {
6880 SCTP_GETTIME_TIMEVAL(&asoc->time_last_sent);
6881 *now = asoc->time_last_sent;
6884 asoc->time_last_sent = *now;
6886 *cnt_out += bundle_at;
6887 #ifdef SCTP_AUDITING_ENABLED
6888 sctp_audit_log(0xC4, bundle_at);
6890 for (i = 0; i < bundle_at; i++) {
6891 sctp_pegs[SCTP_RETRANTSN_SENT]++;
6892 data_list[i]->sent = SCTP_DATAGRAM_SENT;
6893 data_list[i]->snd_count++;
6894 asoc->sent_queue_retran_cnt--;
6895 /* record the time */
6896 data_list[i]->sent_rcv_time = asoc->time_last_sent;
6897 if (asoc->sent_queue_retran_cnt < 0) {
6898 asoc->sent_queue_retran_cnt = 0;
6900 net->flight_size += data_list[i]->book_size;
6901 asoc->total_flight += data_list[i]->book_size;
6902 asoc->total_flight_count++;
6904 #ifdef SCTP_LOG_RWND
6905 sctp_log_rwnd(SCTP_DECREASE_PEER_RWND,
6906 asoc->peers_rwnd , data_list[i]->send_size, sctp_peer_chunk_oh);
6908 asoc->peers_rwnd = sctp_sbspace_sub(asoc->peers_rwnd,
6909 (u_int32_t)(data_list[i]->send_size + sctp_peer_chunk_oh));
6910 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
6911 /* SWS sender side engages */
6912 asoc->peers_rwnd = 0;
6916 (data_list[i]->rec.data.doing_fast_retransmit)) {
6917 sctp_pegs[SCTP_FAST_RETRAN]++;
6918 if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
6919 (tmr_started == 0)) {
6921 * ok we just fast-retrans'd
6922 * the lowest TSN, i.e the
6923 * first on the list. In this
6924 * case we want to give some
6925 * more time to get a SACK
6926 * back without a t3-expiring.
6928 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
6929 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
6933 #ifdef SCTP_AUDITING_ENABLED
6934 sctp_auditing(21, inp, stcb, NULL);
6940 if (asoc->sent_queue_retran_cnt <= 0) {
6941 /* all done we have no more to retran */
6942 asoc->sent_queue_retran_cnt = 0;
6946 /* No more room in rwnd */
6949 /* stop the for loop here. we sent out a packet */
6957 sctp_timer_validation(struct sctp_inpcb *inp,
6958 struct sctp_tcb *stcb,
6959 struct sctp_association *asoc,
6962 struct sctp_nets *net;
6963 /* Validate that a timer is running somewhere */
6964 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
6965 if (callout_pending(&net->rxt_timer.timer)) {
6966 /* Here is a timer */
6970 /* Gak, we did not have a timer somewhere */
6972 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
6973 printf("Deadlock avoided starting timer on a dest at retran\n");
6976 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, asoc->primary_destination);
6981 sctp_chunk_output(struct sctp_inpcb *inp,
6982 struct sctp_tcb *stcb,
6985 /* Ok this is the generic chunk service queue.
6986 * we must do the following:
6987 * - See if there are retransmits pending, if so we
6988 * must do these first and return.
6989 * - Service the stream queue that is next,
6990 * moving any message (note I must get a complete
6991 * message i.e. FIRST/MIDDLE and LAST to the out
6992 * queue in one pass) and assigning TSN's
6993 * - Check to see if the cwnd/rwnd allows any output, if
6994 * so we go ahead and fomulate and send the low level
6995 * chunks. Making sure to combine any control in the
6996 * control chunk queue also.
6998 struct sctp_association *asoc;
6999 struct sctp_nets *net;
7000 int error, num_out, tot_out, ret, reason_code, burst_cnt, burst_limit;
7008 sctp_pegs[SCTP_CALLS_TO_CO]++;
7010 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
7011 printf("in co - retran count:%d\n", asoc->sent_queue_retran_cnt);
7014 while (asoc->sent_queue_retran_cnt) {
7015 /* Ok, it is retransmission time only, we send out only ONE
7016 * packet with a single call off to the retran code.
7018 ret = sctp_chunk_retransmission(inp, stcb, asoc, &num_out, &now, &now_filled);
7020 /* Can't send anymore */
7022 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
7023 printf("retransmission ret:%d -- full\n", ret);
7027 * now lets push out control by calling med-level
7028 * output once. this assures that we WILL send HB's
7031 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
7032 &cwnd_full, from_where,
7035 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
7036 printf("Control send outputs:%d@full\n", num_out);
7039 #ifdef SCTP_AUDITING_ENABLED
7040 sctp_auditing(8, inp, stcb, NULL);
7042 return (sctp_timer_validation(inp, stcb, asoc, ret));
7046 * The count was off.. retran is not happening so do
7047 * the normal retransmission.
7050 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
7051 printf("Done with retrans, none left fill up window\n");
7054 #ifdef SCTP_AUDITING_ENABLED
7055 sctp_auditing(9, inp, stcb, NULL);
7059 if (from_where == 1) {
7060 /* Only one transmission allowed out of a timeout */
7062 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
7063 printf("Only one packet allowed out\n");
7066 #ifdef SCTP_AUDITING_ENABLED
7067 sctp_auditing(10, inp, stcb, NULL);
7069 /* Push out any control */
7070 (void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1, &cwnd_full, from_where,
7074 if ((num_out == 0) && (ret == 0)) {
7075 /* No more retrans to send */
7079 #ifdef SCTP_AUDITING_ENABLED
7080 sctp_auditing(12, inp, stcb, NULL);
7082 /* Check for bad destinations, if they exist move chunks around. */
7083 burst_limit = asoc->max_burst;
7084 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
7085 if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
7086 SCTP_ADDR_NOT_REACHABLE) {
7088 * if possible move things off of this address
7089 * we still may send below due to the dormant state
7090 * but we try to find an alternate address to send
7091 * to and if we have one we move all queued data on
7092 * the out wheel to this alternate address.
7094 sctp_move_to_an_alt(stcb, asoc, net);
7097 if ((asoc->sat_network) || (net->addr_is_local)) {
7098 burst_limit = asoc->max_burst * SCTP_SAT_NETWORK_BURST_INCR;
7102 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
7103 printf("examined net:%p burst limit:%d\n", net, asoc->max_burst);
7107 #ifdef SCTP_USE_ALLMAN_BURST
7108 if ((net->flight_size+(burst_limit*net->mtu)) < net->cwnd) {
7109 if (net->ssthresh < net->cwnd)
7110 net->ssthresh = net->cwnd;
7111 net->cwnd = (net->flight_size+(burst_limit*net->mtu));
7112 #ifdef SCTP_LOG_MAXBURST
7113 sctp_log_maxburst(net, 0, burst_limit, SCTP_MAX_BURST_APPLIED);
7115 sctp_pegs[SCTP_MAX_BURST_APL]++;
7117 net->fast_retran_ip = 0;
7122 /* Fill up what we can to the destination */
7127 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
7128 printf("Burst count:%d - call m-c-o\n", burst_cnt);
7131 error = sctp_med_chunk_output(inp, stcb, asoc, &num_out,
7132 &reason_code, 0, &cwnd_full, from_where,
7136 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
7137 printf("Error %d was returned from med-c-op\n", error);
7140 #ifdef SCTP_LOG_MAXBURST
7141 sctp_log_maxburst(asoc->primary_destination, error , burst_cnt, SCTP_MAX_BURST_ERROR_STOP);
7146 if (sctp_debug_on & SCTP_DEBUG_OUTPUT3) {
7147 printf("m-c-o put out %d\n", num_out);
7153 #ifndef SCTP_USE_ALLMAN_BURST
7154 && (burst_cnt < burst_limit)
7157 #ifndef SCTP_USE_ALLMAN_BURST
7158 if (burst_cnt >= burst_limit) {
7159 sctp_pegs[SCTP_MAX_BURST_APL]++;
7160 asoc->burst_limit_applied = 1;
7161 #ifdef SCTP_LOG_MAXBURST
7162 sctp_log_maxburst(asoc->primary_destination, 0 , burst_cnt, SCTP_MAX_BURST_APPLIED);
7165 asoc->burst_limit_applied = 0;
7170 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
7171 printf("Ok, we have put out %d chunks\n", tot_out);
7175 sctp_pegs[SCTP_CO_NODATASNT]++;
7176 if (asoc->stream_queue_cnt > 0) {
7177 sctp_pegs[SCTP_SOS_NOSNT]++;
7179 sctp_pegs[SCTP_NOS_NOSNT]++;
7181 if (asoc->send_queue_cnt > 0) {
7182 sctp_pegs[SCTP_SOSE_NOSNT]++;
7184 sctp_pegs[SCTP_NOSE_NOSNT]++;
7187 /* Now we need to clean up the control chunk chain if
7188 * a ECNE is on it. It must be marked as UNSENT again
7189 * so next call will continue to send it until
7190 * such time that we get a CWR, to remove it.
7192 sctp_fix_ecn_echo(asoc);
7198 sctp_output(inp, m, addr, control, p, flags)
7199 struct sctp_inpcb *inp;
7201 struct sockaddr *addr;
7202 struct mbuf *control;
7203 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
7210 struct inpcb *ip_inp;
7211 struct sctp_inpcb *t_inp;
7212 struct sctp_tcb *stcb;
7213 struct sctp_nets *net;
7214 struct sctp_association *asoc;
7215 int create_lock_applied = 0;
7216 int queue_only, error = 0;
7218 struct sctp_sndrcvinfo srcv;
7220 int use_rcvinfo = 0;
7222 /* struct route ro;*/
7224 #if defined(__NetBSD__) || defined(__OpenBSD__)
7230 ip_inp = (struct inpcb *)inp;
7236 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
7237 printf("USR Send BEGINS\n");
7241 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
7242 (inp->sctp_flags & SCTP_PCB_FLAGS_ACCEPTING)) {
7243 /* The listner can NOT send */
7245 sctppcbinfo.mbuf_track--;
7246 sctp_m_freem(control);
7253 /* Can't allow a V6 address on a non-v6 socket */
7255 SCTP_ASOC_CREATE_LOCK(inp);
7256 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
7257 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
7258 /* Should I really unlock ? */
7259 SCTP_ASOC_CREATE_UNLOCK(inp);
7261 sctppcbinfo.mbuf_track--;
7262 sctp_m_freem(control);
7269 create_lock_applied = 1;
7270 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
7271 (addr->sa_family == AF_INET6)) {
7272 SCTP_ASOC_CREATE_UNLOCK(inp);
7274 sctppcbinfo.mbuf_track--;
7275 sctp_m_freem(control);
7284 sctppcbinfo.mbuf_track++;
7285 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&srcv, control,
7287 if (srcv.sinfo_flags & MSG_SENDALL) {
7289 sctppcbinfo.mbuf_track--;
7290 sctp_m_freem(control);
7292 if (create_lock_applied) {
7293 SCTP_ASOC_CREATE_UNLOCK(inp);
7294 create_lock_applied = 0;
7296 return (sctp_sendall(inp, NULL, m, &srcv));
7298 if (srcv.sinfo_assoc_id) {
7299 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
7300 SCTP_INP_RLOCK(inp);
7301 stcb = LIST_FIRST(&inp->sctp_asoc_list);
7303 SCTP_TCB_LOCK(stcb);
7304 SCTP_INP_RUNLOCK(inp);
7307 if (create_lock_applied) {
7308 SCTP_ASOC_CREATE_UNLOCK(inp);
7309 create_lock_applied = 0;
7311 sctppcbinfo.mbuf_track--;
7312 sctp_m_freem(control);
7317 net = stcb->asoc.primary_destination;
7319 stcb = sctp_findassociation_ep_asocid(inp, srcv.sinfo_assoc_id);
7322 * Question: Should I error here if the
7324 * assoc_id is no longer valid?
7325 * i.e. I can't find it?
7329 /* Must locate the net structure */
7331 net = sctp_findnet(stcb, addr);
7334 net = stcb->asoc.primary_destination;
7340 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
7341 SCTP_INP_RLOCK(inp);
7342 stcb = LIST_FIRST(&inp->sctp_asoc_list);
7344 SCTP_TCB_LOCK(stcb);
7345 SCTP_INP_RUNLOCK(inp);
7348 if (create_lock_applied) {
7349 SCTP_ASOC_CREATE_UNLOCK(inp);
7350 create_lock_applied = 0;
7353 sctppcbinfo.mbuf_track--;
7354 sctp_m_freem(control);
7361 net = stcb->asoc.primary_destination;
7363 net = sctp_findnet(stcb, addr);
7365 net = stcb->asoc.primary_destination;
7370 SCTP_INP_WLOCK(inp);
7371 SCTP_INP_INCR_REF(inp);
7372 SCTP_INP_WUNLOCK(inp);
7373 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
7375 SCTP_INP_WLOCK(inp);
7376 SCTP_INP_DECR_REF(inp);
7377 SCTP_INP_WUNLOCK(inp);
7382 if ((stcb == NULL) &&
7383 (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)) {
7385 sctppcbinfo.mbuf_track--;
7386 sctp_m_freem(control);
7389 if (create_lock_applied) {
7390 SCTP_ASOC_CREATE_UNLOCK(inp);
7391 create_lock_applied = 0;
7396 } else if ((stcb == NULL) &&
7399 sctppcbinfo.mbuf_track--;
7400 sctp_m_freem(control);
7403 if (create_lock_applied) {
7404 SCTP_ASOC_CREATE_UNLOCK(inp);
7405 create_lock_applied = 0;
7410 } else if (stcb == NULL) {
7411 /* UDP mode, we must go ahead and start the INIT process */
7412 if ((use_rcvinfo) && (srcv.sinfo_flags & MSG_ABORT)) {
7413 /* Strange user to do this */
7415 sctppcbinfo.mbuf_track--;
7416 sctp_m_freem(control);
7419 if (create_lock_applied) {
7420 SCTP_ASOC_CREATE_UNLOCK(inp);
7421 create_lock_applied = 0;
7427 stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0);
7430 sctppcbinfo.mbuf_track--;
7431 sctp_m_freem(control);
7434 if (create_lock_applied) {
7435 SCTP_ASOC_CREATE_UNLOCK(inp);
7436 create_lock_applied = 0;
7442 if (create_lock_applied) {
7443 SCTP_ASOC_CREATE_UNLOCK(inp);
7444 create_lock_applied = 0;
7446 printf("Huh-1, create lock should have been applied!\n");
7450 asoc->state = SCTP_STATE_COOKIE_WAIT;
7451 SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
7453 /* see if a init structure exists in cmsg headers */
7454 struct sctp_initmsg initm;
7456 if (sctp_find_cmsg(SCTP_INIT, (void *)&initm, control,
7458 /* we have an INIT override of the default */
7459 if (initm.sinit_max_attempts)
7460 asoc->max_init_times = initm.sinit_max_attempts;
7461 if (initm.sinit_num_ostreams)
7462 asoc->pre_open_streams = initm.sinit_num_ostreams;
7463 if (initm.sinit_max_instreams)
7464 asoc->max_inbound_streams = initm.sinit_max_instreams;
7465 if (initm.sinit_max_init_timeo)
7466 asoc->initial_init_rto_max = initm.sinit_max_init_timeo;
7468 if (asoc->streamoutcnt < asoc->pre_open_streams) {
7469 /* Default is NOT correct */
7471 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
7472 printf("Ok, defout:%d pre_open:%d\n",
7473 asoc->streamoutcnt, asoc->pre_open_streams);
7476 FREE(asoc->strmout, M_PCB);
7477 asoc->strmout = NULL;
7478 asoc->streamoutcnt = asoc->pre_open_streams;
7479 MALLOC(asoc->strmout, struct sctp_stream_out *,
7480 asoc->streamoutcnt *
7481 sizeof(struct sctp_stream_out), M_PCB,
7483 for (i = 0; i < asoc->streamoutcnt; i++) {
7485 * inbound side must be set to 0xffff,
7486 * also NOTE when we get the INIT-ACK
7487 * back (for INIT sender) we MUST
7488 * reduce the count (streamoutcnt) but
7489 * first check if we sent to any of the
7490 * upper streams that were dropped (if
7491 * some were). Those that were dropped
7492 * must be notified to the upper layer
7493 * as failed to send.
7495 asoc->strmout[i].next_sequence_sent = 0x0;
7496 TAILQ_INIT(&asoc->strmout[i].outqueue);
7497 asoc->strmout[i].stream_no = i;
7498 asoc->strmout[i].next_spoke.tqe_next = 0;
7499 asoc->strmout[i].next_spoke.tqe_prev = 0;
7503 sctp_send_initiate(inp, stcb);
7505 * we may want to dig in after this call and adjust the MTU
7506 * value. It defaulted to 1500 (constant) but the ro structure
7507 * may now have an update and thus we may need to change it
7508 * BEFORE we append the message.
7510 net = stcb->asoc.primary_destination;
7512 if (create_lock_applied) {
7513 SCTP_ASOC_CREATE_UNLOCK(inp);
7514 create_lock_applied = 0;
7517 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
7518 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
7521 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
7522 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
7523 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
7524 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
7526 sctppcbinfo.mbuf_track--;
7527 sctp_m_freem(control);
7530 if ((use_rcvinfo) &&
7531 (srcv.sinfo_flags & MSG_ABORT)) {
7532 sctp_msg_append(stcb, net, m, &srcv, flags);
7540 SCTP_TCB_UNLOCK(stcb);
7544 if (create_lock_applied) {
7545 /* we should never hit here with the create lock applied
7548 SCTP_ASOC_CREATE_UNLOCK(inp);
7549 create_lock_applied = 0;
7553 if (use_rcvinfo == 0) {
7554 srcv = stcb->asoc.def_send;
7558 if (sctp_debug_on & SCTP_DEBUG_OUTPUT5) {
7559 printf("stream:%d\n", srcv.sinfo_stream);
7560 printf("flags:%x\n", (u_int)srcv.sinfo_flags);
7561 printf("ppid:%d\n", srcv.sinfo_ppid);
7562 printf("context:%d\n", srcv.sinfo_context);
7567 sctppcbinfo.mbuf_track--;
7568 sctp_m_freem(control);
7571 if (net && ((srcv.sinfo_flags & MSG_ADDR_OVER))) {
7572 /* we take the override or the unconfirmed */
7575 net = stcb->asoc.primary_destination;
7577 if ((error = sctp_msg_append(stcb, net, m, &srcv, flags))) {
7578 SCTP_TCB_UNLOCK(stcb);
7582 if (net->flight_size > net->cwnd) {
7583 sctp_pegs[SCTP_SENDTO_FULL_CWND]++;
7585 } else if (asoc->ifp_had_enobuf) {
7586 sctp_pegs[SCTP_QUEONLY_BURSTLMT]++;
7589 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
7590 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * sizeof(struct sctp_data_chunk)) +
7593 if (((inp->sctp_flags & SCTP_PCB_FLAGS_NODELAY) == 0) &&
7594 (stcb->asoc.total_flight > 0) &&
7595 (un_sent < (int)stcb->asoc.smallest_mtu)
7598 /* Ok, Nagle is set on and we have
7599 * data outstanding. Don't send anything
7600 * and let the SACK drive out the data.
7602 sctp_pegs[SCTP_NAGLE_NOQ]++;
7605 sctp_pegs[SCTP_NAGLE_OFF]++;
7608 if ((queue_only == 0) && stcb->asoc.peers_rwnd) {
7609 /* we can attempt to send too.*/
7611 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
7612 printf("USR Send calls sctp_chunk_output\n");
7615 #ifdef SCTP_AUDITING_ENABLED
7616 sctp_audit_log(0xC0, 1);
7617 sctp_auditing(6, inp, stcb, net);
7619 sctp_pegs[SCTP_OUTPUT_FRM_SND]++;
7620 sctp_chunk_output(inp, stcb, 0);
7621 #ifdef SCTP_AUDITING_ENABLED
7622 sctp_audit_log(0xC0, 2);
7623 sctp_auditing(7, inp, stcb, net);
7628 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
7629 printf("USR Send complete qo:%d prw:%d\n", queue_only, stcb->asoc.peers_rwnd);
7632 SCTP_TCB_UNLOCK(stcb);
7638 send_forward_tsn(struct sctp_tcb *stcb,
7639 struct sctp_association *asoc)
7641 struct sctp_tmit_chunk *chk;
7642 struct sctp_forward_tsn_chunk *fwdtsn;
7644 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7645 if (chk->rec.chunk_id == SCTP_FORWARD_CUM_TSN) {
7646 /* mark it to unsent */
7647 chk->sent = SCTP_DATAGRAM_UNSENT;
7649 /* Do we correct its output location? */
7650 if (chk->whoTo != asoc->primary_destination) {
7651 sctp_free_remote_addr(chk->whoTo);
7652 chk->whoTo = asoc->primary_destination;
7653 chk->whoTo->ref_count++;
7655 goto sctp_fill_in_rest;
7658 /* Ok if we reach here we must build one */
7659 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
7663 sctppcbinfo.ipi_count_chunk++;
7664 sctppcbinfo.ipi_gencnt_chunk++;
7665 chk->rec.chunk_id = SCTP_FORWARD_CUM_TSN;
7667 MGETHDR(chk->data, M_DONTWAIT, MT_DATA);
7668 if (chk->data == NULL) {
7669 chk->whoTo->ref_count--;
7670 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
7671 sctppcbinfo.ipi_count_chunk--;
7672 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
7673 panic("Chunk count is negative");
7675 sctppcbinfo.ipi_gencnt_chunk++;
7678 chk->data->m_data += SCTP_MIN_OVERHEAD;
7679 chk->sent = SCTP_DATAGRAM_UNSENT;
7681 chk->whoTo = asoc->primary_destination;
7682 chk->whoTo->ref_count++;
7683 TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
7684 asoc->ctrl_queue_cnt++;
7686 /* Here we go through and fill out the part that
7687 * deals with stream/seq of the ones we skip.
7689 chk->data->m_pkthdr.len = chk->data->m_len = 0;
7691 struct sctp_tmit_chunk *at, *tp1, *last;
7692 struct sctp_strseq *strseq;
7693 unsigned int cnt_of_space, i, ovh;
7694 unsigned int space_needed;
7695 unsigned int cnt_of_skipped = 0;
7696 TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
7697 if (at->sent != SCTP_FORWARD_TSN_SKIP) {
7698 /* no more to look at */
7701 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
7702 /* We don't report these */
7707 space_needed = (sizeof(struct sctp_forward_tsn_chunk) +
7708 (cnt_of_skipped * sizeof(struct sctp_strseq)));
7709 if ((M_TRAILINGSPACE(chk->data) < (int)space_needed) &&
7710 ((chk->data->m_flags & M_EXT) == 0)) {
7711 /* Need a M_EXT, get one and move
7712 * fwdtsn to data area.
7714 MCLGET(chk->data, M_DONTWAIT);
7716 cnt_of_space = M_TRAILINGSPACE(chk->data);
7718 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
7719 ovh = SCTP_MIN_OVERHEAD;
7721 ovh = SCTP_MIN_V4_OVERHEAD;
7723 if (cnt_of_space > (asoc->smallest_mtu-ovh)) {
7724 /* trim to a mtu size */
7725 cnt_of_space = asoc->smallest_mtu - ovh;
7727 if (cnt_of_space < space_needed) {
7728 /* ok we must trim down the chunk by lowering
7729 * the advance peer ack point.
7731 cnt_of_skipped = (cnt_of_space-
7732 ((sizeof(struct sctp_forward_tsn_chunk))/
7733 sizeof(struct sctp_strseq)));
7734 /* Go through and find the TSN that
7735 * will be the one we report.
7737 at = TAILQ_FIRST(&asoc->sent_queue);
7738 for (i = 0; i < cnt_of_skipped; i++) {
7739 tp1 = TAILQ_NEXT(at, sctp_next);
7743 /* last now points to last one I can report, update peer ack point */
7744 asoc->advanced_peer_ack_point = last->rec.data.TSN_seq;
7745 space_needed -= (cnt_of_skipped * sizeof(struct sctp_strseq));
7747 chk->send_size = space_needed;
7748 /* Setup the chunk */
7749 fwdtsn = mtod(chk->data, struct sctp_forward_tsn_chunk *);
7750 fwdtsn->ch.chunk_length = htons(chk->send_size);
7751 fwdtsn->ch.chunk_flags = 0;
7752 fwdtsn->ch.chunk_type = SCTP_FORWARD_CUM_TSN;
7753 fwdtsn->new_cumulative_tsn = htonl(asoc->advanced_peer_ack_point);
7754 chk->send_size = (sizeof(struct sctp_forward_tsn_chunk) +
7755 (cnt_of_skipped * sizeof(struct sctp_strseq)));
7756 chk->data->m_pkthdr.len = chk->data->m_len = chk->send_size;
7758 /* Move pointer to after the fwdtsn and transfer to
7759 * the strseq pointer.
7761 strseq = (struct sctp_strseq *)fwdtsn;
7763 * Now populate the strseq list. This is done blindly
7764 * without pulling out duplicate stream info. This is
7765 * inefficent but won't harm the process since the peer
7766 * will look at these in sequence and will thus release
7767 * anything. It could mean we exceed the PMTU and chop
7768 * off some that we could have included.. but this is
7769 * unlikely (aka 1432/4 would mean 300+ stream seq's would
7770 * have to be reported in one FWD-TSN. With a bit of work
7771 * we can later FIX this to optimize and pull out duplcates..
7772 * but it does add more overhead. So for now... not!
7774 at = TAILQ_FIRST(&asoc->sent_queue);
7775 for (i = 0; i < cnt_of_skipped; i++) {
7776 tp1 = TAILQ_NEXT(at, sctp_next);
7777 if (at->rec.data.rcv_flags & SCTP_DATA_UNORDERED) {
7778 /* We don't report these */
7783 strseq->stream = ntohs(at->rec.data.stream_number);
7784 strseq->sequence = ntohs(at->rec.data.stream_seq);
7794 sctp_send_sack(struct sctp_tcb *stcb)
7797 * Queue up a SACK in the control queue. We must first check to
7798 * see if a SACK is somehow on the control queue. If so, we will
7799 * take and and remove the old one.
7801 struct sctp_association *asoc;
7802 struct sctp_tmit_chunk *chk, *a_chk;
7803 struct sctp_sack_chunk *sack;
7804 struct sctp_gap_ack_block *gap_descriptor;
7807 unsigned int i, maxi, seeing_ones, m_size;
7808 unsigned int num_gap_blocks, space;
7814 if (asoc->last_data_chunk_from == NULL) {
7815 /* Hmm we never received anything */
7818 sctp_set_rwnd(stcb, asoc);
7819 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
7820 if (chk->rec.chunk_id == SCTP_SELECTIVE_ACK) {
7821 /* Hmm, found a sack already on queue, remove it */
7822 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
7823 asoc->ctrl_queue_cnt++;
7826 sctp_m_freem(a_chk->data);
7828 sctp_free_remote_addr(a_chk->whoTo);
7829 a_chk->whoTo = NULL;
7833 if (a_chk == NULL) {
7834 a_chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
7835 if (a_chk == NULL) {
7836 /* No memory so we drop the idea, and set a timer */
7837 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
7838 stcb->sctp_ep, stcb, NULL);
7839 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
7840 stcb->sctp_ep, stcb, NULL);
7843 sctppcbinfo.ipi_count_chunk++;
7844 sctppcbinfo.ipi_gencnt_chunk++;
7845 a_chk->rec.chunk_id = SCTP_SELECTIVE_ACK;
7848 a_chk->snd_count = 0;
7849 a_chk->send_size = 0; /* fill in later */
7850 a_chk->sent = SCTP_DATAGRAM_UNSENT;
7851 m_size = (asoc->mapping_array_size << 3);
7853 if ((asoc->numduptsns) ||
7854 (asoc->last_data_chunk_from->dest_state & SCTP_ADDR_NOT_REACHABLE)
7856 /* Ok, we have some duplicates or the destination for the
7857 * sack is unreachable, lets see if we can select an alternate
7858 * than asoc->last_data_chunk_from
7860 if ((!(asoc->last_data_chunk_from->dest_state &
7861 SCTP_ADDR_NOT_REACHABLE)) &&
7862 (asoc->used_alt_onsack > 2)) {
7863 /* We used an alt last time, don't this time */
7864 a_chk->whoTo = NULL;
7866 asoc->used_alt_onsack++;
7867 a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from);
7869 if (a_chk->whoTo == NULL) {
7870 /* Nope, no alternate */
7871 a_chk->whoTo = asoc->last_data_chunk_from;
7872 asoc->used_alt_onsack = 0;
7875 /* No duplicates so we use the last
7876 * place we received data from.
7879 if (asoc->last_data_chunk_from == NULL) {
7880 printf("Huh, last_data_chunk_from is null when we want to sack??\n");
7883 asoc->used_alt_onsack = 0;
7884 a_chk->whoTo = asoc->last_data_chunk_from;
7887 a_chk->whoTo->ref_count++;
7889 /* Ok now lets formulate a MBUF with our sack */
7890 MGETHDR(a_chk->data, M_DONTWAIT, MT_DATA);
7891 if ((a_chk->data == NULL) ||
7892 (a_chk->whoTo == NULL)) {
7893 /* rats, no mbuf memory */
7895 /* was a problem with the destination */
7896 sctp_m_freem(a_chk->data);
7899 a_chk->whoTo->ref_count--;
7900 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, a_chk);
7901 sctppcbinfo.ipi_count_chunk--;
7902 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
7903 panic("Chunk count is negative");
7905 sctppcbinfo.ipi_gencnt_chunk++;
7906 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
7907 stcb->sctp_ep, stcb, NULL);
7908 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
7909 stcb->sctp_ep, stcb, NULL);
7912 /* First count the number of gap ack blocks we need */
7913 if (asoc->highest_tsn_inside_map == asoc->cumulative_tsn) {
7914 /* We know if there are none above the cum-ack we
7915 * have everything with NO gaps
7919 /* Ok we must count how many gaps we
7923 if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
7924 maxi = (asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn);
7926 maxi = (asoc->highest_tsn_inside_map + (MAX_TSN - asoc->mapping_array_base_tsn) + 1);
7928 if (maxi > m_size) {
7929 /* impossible but who knows, someone is playing with us :> */
7931 printf("GAK maxi:%d > m_size:%d came out higher than allowed htsn:%u base:%u cumack:%u\n",
7934 asoc->highest_tsn_inside_map,
7935 asoc->mapping_array_base_tsn,
7936 asoc->cumulative_tsn
7942 if (asoc->cumulative_tsn >= asoc->mapping_array_base_tsn) {
7943 start = (asoc->cumulative_tsn - asoc->mapping_array_base_tsn);
7945 /* Set it so we start at 0 */
7948 /* Ok move start up one to look at the NEXT past the cum-ack */
7950 for (i = start; i <= maxi; i++) {
7952 /* while seeing ones I must
7953 * transition back to 0 before
7954 * finding the next gap and
7955 * counting the segment.
7957 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) == 0) {
7961 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, i)) {
7968 if (num_gap_blocks == 0) {
7970 * Traveled all of the bits and NO one,
7973 if (compare_with_wrap(asoc->cumulative_tsn, asoc->highest_tsn_inside_map, MAX_TSN)) {
7974 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
7975 #ifdef SCTP_MAP_LOGGING
7976 sctp_log_map(0, 4, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
7982 /* Now calculate the space needed */
7983 space = (sizeof(struct sctp_sack_chunk) +
7984 (num_gap_blocks * sizeof(struct sctp_gap_ack_block)) +
7985 (asoc->numduptsns * sizeof(int32_t))
7987 if (space > (asoc->smallest_mtu-SCTP_MAX_OVERHEAD)) {
7988 /* Reduce the size of the sack to fit */
7990 calc = (asoc->smallest_mtu - SCTP_MAX_OVERHEAD);
7991 calc -= sizeof(struct sctp_gap_ack_block);
7992 fit = calc/sizeof(struct sctp_gap_ack_block);
7993 if (fit > (int)num_gap_blocks) {
7994 /* discard some dups */
7995 asoc->numduptsns = (fit - num_gap_blocks);
7997 /* discard all dups and some gaps */
7998 num_gap_blocks = fit;
7999 asoc->numduptsns = 0;
8002 space = (sizeof(struct sctp_sack_chunk) +
8003 (num_gap_blocks * sizeof(struct sctp_gap_ack_block)) +
8004 (asoc->numduptsns * sizeof(int32_t))
8009 if ((space+SCTP_MIN_OVERHEAD) > MHLEN) {
8010 /* We need a cluster */
8011 MCLGET(a_chk->data, M_DONTWAIT);
8012 if ((a_chk->data->m_flags & M_EXT) != M_EXT) {
8013 /* can't get a cluster
8014 * give up and try later.
8017 sctp_m_freem(a_chk->data);
8019 a_chk->whoTo->ref_count--;
8020 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, a_chk);
8021 sctppcbinfo.ipi_count_chunk--;
8022 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
8023 panic("Chunk count is negative");
8025 sctppcbinfo.ipi_gencnt_chunk++;
8026 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
8027 stcb->sctp_ep, stcb, NULL);
8028 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
8029 stcb->sctp_ep, stcb, NULL);
8034 /* ok, lets go through and fill it in */
8035 a_chk->data->m_data += SCTP_MIN_OVERHEAD;
8036 sack = mtod(a_chk->data, struct sctp_sack_chunk *);
8037 sack->ch.chunk_type = SCTP_SELECTIVE_ACK;
8038 sack->ch.chunk_flags = asoc->receiver_nonce_sum & SCTP_SACK_NONCE_SUM;
8039 sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
8040 sack->sack.a_rwnd = htonl(asoc->my_rwnd);
8041 asoc->my_last_reported_rwnd = asoc->my_rwnd;
8042 sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
8043 sack->sack.num_dup_tsns = htons(asoc->numduptsns);
8045 a_chk->send_size = (sizeof(struct sctp_sack_chunk) +
8046 (num_gap_blocks * sizeof(struct sctp_gap_ack_block)) +
8047 (asoc->numduptsns * sizeof(int32_t)));
8048 a_chk->data->m_pkthdr.len = a_chk->data->m_len = a_chk->send_size;
8049 sack->ch.chunk_length = htons(a_chk->send_size);
8051 gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
8053 for (i = start; i <= maxi; i++) {
8054 if (num_gap_blocks == 0) {
8058 /* while seeing Ones I must
8059 * transition back to 0 before
8060 * finding the next gap
8062 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) == 0) {
8063 gap_descriptor->end = htons(((uint16_t)(i-start)));
8069 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, i)) {
8070 gap_descriptor->start = htons(((uint16_t)(i+1-start)));
8071 /* advance struct to next pointer */
8076 if (num_gap_blocks) {
8077 /* special case where the array is all 1's
8078 * to the end of the array.
8080 gap_descriptor->end = htons(((uint16_t)((i-start))));
8083 /* now we must add any dups we are going to report. */
8084 if (asoc->numduptsns) {
8085 dup = (uint32_t *)gap_descriptor;
8086 for (i = 0; i < asoc->numduptsns; i++) {
8087 *dup = htonl(asoc->dup_tsns[i]);
8090 asoc->numduptsns = 0;
8092 /* now that the chunk is prepared queue it to the control
8095 TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
8096 asoc->ctrl_queue_cnt++;
8097 sctp_pegs[SCTP_PEG_SACKS_SENT]++;
8102 sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr)
8104 struct mbuf *m_abort;
8105 struct sctp_abort_msg *abort_m;
8108 MGETHDR(m_abort, M_DONTWAIT, MT_HEADER);
8109 if (m_abort == NULL) {
8113 m_abort->m_data += SCTP_MIN_OVERHEAD;
8114 abort_m = mtod(m_abort, struct sctp_abort_msg *);
8115 m_abort->m_len = sizeof(struct sctp_abort_msg);
8116 m_abort->m_next = operr;
8126 abort_m->msg.ch.chunk_type = SCTP_ABORT_ASSOCIATION;
8127 abort_m->msg.ch.chunk_flags = 0;
8128 abort_m->msg.ch.chunk_length = htons(sizeof(struct sctp_abort_chunk) +
8130 abort_m->sh.src_port = stcb->sctp_ep->sctp_lport;
8131 abort_m->sh.dest_port = stcb->rport;
8132 abort_m->sh.v_tag = htonl(stcb->asoc.peer_vtag);
8133 abort_m->sh.checksum = 0;
8134 m_abort->m_pkthdr.len = m_abort->m_len + sz;
8135 m_abort->m_pkthdr.rcvif = 0;
8136 sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb,
8137 stcb->asoc.primary_destination,
8138 (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr,
8139 m_abort, 1, 0, NULL, 0);
8143 sctp_send_shutdown_complete(struct sctp_tcb *stcb,
8144 struct sctp_nets *net)
8147 /* formulate and SEND a SHUTDOWN-COMPLETE */
8148 struct mbuf *m_shutdown_comp;
8149 struct sctp_shutdown_complete_msg *comp_cp;
8151 m_shutdown_comp = NULL;
8152 MGETHDR(m_shutdown_comp, M_DONTWAIT, MT_HEADER);
8153 if (m_shutdown_comp == NULL) {
8157 m_shutdown_comp->m_data += sizeof(struct ip6_hdr);
8158 comp_cp = mtod(m_shutdown_comp, struct sctp_shutdown_complete_msg *);
8159 comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
8160 comp_cp->shut_cmp.ch.chunk_flags = 0;
8161 comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
8162 comp_cp->sh.src_port = stcb->sctp_ep->sctp_lport;
8163 comp_cp->sh.dest_port = stcb->rport;
8164 comp_cp->sh.v_tag = htonl(stcb->asoc.peer_vtag);
8165 comp_cp->sh.checksum = 0;
8167 m_shutdown_comp->m_pkthdr.len = m_shutdown_comp->m_len = sizeof(struct sctp_shutdown_complete_msg);
8168 m_shutdown_comp->m_pkthdr.rcvif = 0;
8169 sctp_lowlevel_chunk_output(stcb->sctp_ep, stcb, net,
8170 (struct sockaddr *)&net->ro._l_addr, m_shutdown_comp,
8172 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
8173 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
8174 stcb->sctp_ep->sctp_flags &= ~SCTP_PCB_FLAGS_CONNECTED;
8175 stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0;
8176 soisdisconnected(stcb->sctp_ep->sctp_socket);
8182 sctp_send_shutdown_complete2(struct mbuf *m, int iphlen, struct sctphdr *sh)
8184 /* formulate and SEND a SHUTDOWN-COMPLETE */
8186 struct ip *iph, *iph_out;
8187 struct ip6_hdr *ip6, *ip6_out;
8189 struct sctp_shutdown_complete_msg *comp_cp;
8191 MGETHDR(mout, M_DONTWAIT, MT_HEADER);
8196 iph = mtod(m, struct ip *);
8200 if (iph->ip_v == IPVERSION) {
8201 mout->m_len = sizeof(struct ip) +
8202 sizeof(struct sctp_shutdown_complete_msg);
8203 mout->m_next = NULL;
8204 iph_out = mtod(mout, struct ip *);
8206 /* Fill in the IP header for the ABORT */
8207 iph_out->ip_v = IPVERSION;
8208 iph_out->ip_hl = (sizeof(struct ip)/4);
8209 iph_out->ip_tos = (u_char)0;
8211 iph_out->ip_off = 0;
8212 iph_out->ip_ttl = MAXTTL;
8213 iph_out->ip_p = IPPROTO_SCTP;
8214 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
8215 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
8217 /* let IP layer calculate this */
8218 iph_out->ip_sum = 0;
8219 offset_out += sizeof(*iph_out);
8220 comp_cp = (struct sctp_shutdown_complete_msg *)(
8221 (caddr_t)iph_out + offset_out);
8222 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
8223 ip6 = (struct ip6_hdr *)iph;
8224 mout->m_len = sizeof(struct ip6_hdr) +
8225 sizeof(struct sctp_shutdown_complete_msg);
8226 mout->m_next = NULL;
8227 ip6_out = mtod(mout, struct ip6_hdr *);
8229 /* Fill in the IPv6 header for the ABORT */
8230 ip6_out->ip6_flow = ip6->ip6_flow;
8231 ip6_out->ip6_hlim = ip6_defhlim;
8232 ip6_out->ip6_nxt = IPPROTO_SCTP;
8233 ip6_out->ip6_src = ip6->ip6_dst;
8234 ip6_out->ip6_dst = ip6->ip6_src;
8235 ip6_out->ip6_plen = mout->m_len;
8236 offset_out += sizeof(*ip6_out);
8237 comp_cp = (struct sctp_shutdown_complete_msg *)(
8238 (caddr_t)ip6_out + offset_out);
8240 /* Currently not supported. */
8244 /* Now copy in and fill in the ABORT tags etc. */
8245 comp_cp->sh.src_port = sh->dest_port;
8246 comp_cp->sh.dest_port = sh->src_port;
8247 comp_cp->sh.checksum = 0;
8248 comp_cp->sh.v_tag = sh->v_tag;
8249 comp_cp->shut_cmp.ch.chunk_flags = SCTP_HAD_NO_TCB;
8250 comp_cp->shut_cmp.ch.chunk_type = SCTP_SHUTDOWN_COMPLETE;
8251 comp_cp->shut_cmp.ch.chunk_length = htons(sizeof(struct sctp_shutdown_complete_chunk));
8253 mout->m_pkthdr.len = mout->m_len;
8255 if ((sctp_no_csum_on_loopback) &&
8256 (m->m_pkthdr.rcvif) &&
8257 (m->m_pkthdr.rcvif->if_type == IFT_LOOP)) {
8258 comp_cp->sh.checksum = 0;
8260 comp_cp->sh.checksum = sctp_calculate_sum(mout, NULL, offset_out);
8263 /* zap the rcvif, it should be null */
8264 mout->m_pkthdr.rcvif = 0;
8265 /* zap the stack pointer to the route */
8266 if (iph_out != NULL) {
8269 bzero(&ro, sizeof ro);
8271 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
8272 printf("sctp_shutdown_complete2 calling ip_output:\n");
8273 sctp_print_address_pkt(iph_out, &comp_cp->sh);
8276 /* set IPv4 length */
8277 #if defined(__FreeBSD__)
8278 iph_out->ip_len = mout->m_pkthdr.len;
8280 iph_out->ip_len = htons(mout->m_pkthdr.len);
8283 ip_output(mout, 0, &ro, IP_RAWOUTPUT, NULL
8284 #if defined(__OpenBSD__) || (defined(__FreeBSD__) && __FreeBSD_version >= 480000) \
8285 || defined(__NetBSD__) || defined(__DragonFly__)
8289 /* Free the route if we got one back */
8292 } else if (ip6_out != NULL) {
8293 #ifdef NEW_STRUCT_ROUTE
8296 struct route_in6 ro;
8299 bzero(&ro, sizeof(ro));
8301 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
8302 printf("sctp_shutdown_complete2 calling ip6_output:\n");
8303 sctp_print_address_pkt((struct ip *)ip6_out,
8307 ip6_output(mout, NULL, &ro, 0, NULL, NULL
8308 #if defined(__NetBSD__)
8311 #if (defined(__FreeBSD__) && __FreeBSD_version >= 480000) || defined(__DragonFly__)
8315 /* Free the route if we got one back */
8319 sctp_pegs[SCTP_DATAGRAMS_SENT]++;
8323 static struct sctp_nets *
8324 sctp_select_hb_destination(struct sctp_tcb *stcb, struct timeval *now)
8326 struct sctp_nets *net, *hnet;
8327 int ms_goneby, highest_ms, state_overide=0;
8329 SCTP_GETTIME_TIMEVAL(now);
8332 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
8334 ((net->dest_state & SCTP_ADDR_NOHB) && ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) ||
8335 (net->dest_state & SCTP_ADDR_OUT_OF_SCOPE)
8337 /* Skip this guy from consideration if HB is off AND its confirmed*/
8339 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
8340 printf("Skipping net:%p state:%d nohb/out-of-scope\n",
8341 net, net->dest_state);
8346 if (sctp_destination_is_reachable(stcb, (struct sockaddr *)&net->ro._l_addr) == 0) {
8347 /* skip this dest net from consideration */
8349 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
8350 printf("Skipping net:%p reachable NOT\n",
8356 if (net->last_sent_time.tv_sec) {
8357 /* Sent to so we subtract */
8358 ms_goneby = (now->tv_sec - net->last_sent_time.tv_sec) * 1000;
8360 /* Never been sent to */
8361 ms_goneby = 0x7fffffff;
8363 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
8364 printf("net:%p ms_goneby:%d\n",
8368 /* When the address state is unconfirmed but still considered reachable, we
8369 * HB at a higher rate. Once it goes confirmed OR reaches the "unreachable"
8370 * state, thenw we cut it back to HB at a more normal pace.
8372 if ((net->dest_state & (SCTP_ADDR_UNCONFIRMED|SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED) {
8378 if ((((unsigned int)ms_goneby >= net->RTO) || (state_overide)) &&
8379 (ms_goneby > highest_ms)) {
8380 highest_ms = ms_goneby;
8383 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
8384 printf("net:%p is the new high\n",
8391 ((hnet->dest_state & (SCTP_ADDR_UNCONFIRMED|SCTP_ADDR_NOT_REACHABLE)) == SCTP_ADDR_UNCONFIRMED)) {
8397 if (highest_ms && (((unsigned int)highest_ms >= hnet->RTO) || state_overide)) {
8398 /* Found the one with longest delay bounds
8399 * OR it is unconfirmed and still not marked
8403 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
8404 printf("net:%p is the hb winner -",
8407 sctp_print_address((struct sockaddr *)&hnet->ro._l_addr);
8412 /* update the timer now */
8413 hnet->last_sent_time = *now;
8421 sctp_send_hb(struct sctp_tcb *stcb, int user_req, struct sctp_nets *u_net)
8423 struct sctp_tmit_chunk *chk;
8424 struct sctp_nets *net;
8425 struct sctp_heartbeat_chunk *hb;
8427 struct sockaddr_in *sin;
8428 struct sockaddr_in6 *sin6;
8430 if (user_req == 0) {
8431 net = sctp_select_hb_destination(stcb, &now);
8433 /* All our busy none to send to, just
8434 * start the timer again.
8436 if (stcb->asoc.state == 0) {
8439 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
8445 #ifndef SCTP_USE_ALLMAN_BURST
8447 /* found one idle.. decay cwnd on this one
8448 * by 1/2 if none outstanding.
8451 if (net->flight_size == 0) {
8453 if (net->addr_is_local) {
8454 if (net->cwnd < (net->mtu *4)) {
8455 net->cwnd = net->mtu * 4;
8458 if (net->cwnd < (net->mtu * 2)) {
8459 net->cwnd = net->mtu * 2;
8472 SCTP_GETTIME_TIMEVAL(&now);
8474 sin = (struct sockaddr_in *)&net->ro._l_addr;
8475 if (sin->sin_family != AF_INET) {
8476 if (sin->sin_family != AF_INET6) {
8481 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
8484 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
8485 printf("Gak, can't get a chunk for hb\n");
8490 sctppcbinfo.ipi_gencnt_chunk++;
8491 sctppcbinfo.ipi_count_chunk++;
8492 chk->rec.chunk_id = SCTP_HEARTBEAT_REQUEST;
8493 chk->asoc = &stcb->asoc;
8494 chk->send_size = sizeof(struct sctp_heartbeat_chunk);
8495 MGETHDR(chk->data, M_DONTWAIT, MT_DATA);
8496 if (chk->data == NULL) {
8497 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
8498 sctppcbinfo.ipi_count_chunk--;
8499 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
8500 panic("Chunk count is negative");
8502 sctppcbinfo.ipi_gencnt_chunk++;
8505 chk->data->m_data += SCTP_MIN_OVERHEAD;
8506 chk->data->m_pkthdr.len = chk->data->m_len = chk->send_size;
8507 chk->sent = SCTP_DATAGRAM_UNSENT;
8510 chk->whoTo->ref_count++;
8511 /* Now we have a mbuf that we can fill in with the details */
8512 hb = mtod(chk->data, struct sctp_heartbeat_chunk *);
8514 /* fill out chunk header */
8515 hb->ch.chunk_type = SCTP_HEARTBEAT_REQUEST;
8516 hb->ch.chunk_flags = 0;
8517 hb->ch.chunk_length = htons(chk->send_size);
8518 /* Fill out hb parameter */
8519 hb->heartbeat.hb_info.ph.param_type = htons(SCTP_HEARTBEAT_INFO);
8520 hb->heartbeat.hb_info.ph.param_length = htons(sizeof(struct sctp_heartbeat_info_param));
8521 hb->heartbeat.hb_info.time_value_1 = now.tv_sec;
8522 hb->heartbeat.hb_info.time_value_2 = now.tv_usec;
8523 /* Did our user request this one, put it in */
8524 hb->heartbeat.hb_info.user_req = user_req;
8525 hb->heartbeat.hb_info.addr_family = sin->sin_family;
8526 hb->heartbeat.hb_info.addr_len = sin->sin_len;
8527 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
8528 /* we only take from the entropy pool if the address is
8531 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
8532 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
8534 net->heartbeat_random1 = hb->heartbeat.hb_info.random_value1 = 0;
8535 net->heartbeat_random2 = hb->heartbeat.hb_info.random_value2 = 0;
8537 if (sin->sin_family == AF_INET) {
8538 memcpy(hb->heartbeat.hb_info.address, &sin->sin_addr, sizeof(sin->sin_addr));
8539 } else if (sin->sin_family == AF_INET6) {
8540 /* We leave the scope the way it is in our lookup table. */
8541 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
8542 memcpy(hb->heartbeat.hb_info.address, &sin6->sin6_addr, sizeof(sin6->sin6_addr));
8544 /* huh compiler bug */
8546 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
8547 printf("Compiler bug bleeds a mbuf and a chunk\n");
8552 /* ok we have a destination that needs a beat */
8553 /* lets do the theshold management Qiaobing style */
8554 if (user_req == 0) {
8555 if (sctp_threshold_management(stcb->sctp_ep, stcb, net,
8556 stcb->asoc.max_send_times)) {
8557 /* we have lost the association, in a way this
8558 * is quite bad since we really are one less time
8559 * since we really did not send yet. This is the
8560 * down side to the Q's style as defined in the RFC
8561 * and not my alternate style defined in the RFC.
8563 if (chk->data != NULL) {
8564 sctp_m_freem(chk->data);
8567 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
8568 sctppcbinfo.ipi_count_chunk--;
8569 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
8570 panic("Chunk count is negative");
8572 sctppcbinfo.ipi_gencnt_chunk++;
8576 net->hb_responded = 0;
8578 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
8579 printf("Inserting chunk for HB\n");
8582 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
8583 stcb->asoc.ctrl_queue_cnt++;
8584 sctp_pegs[SCTP_HB_SENT]++;
8586 * Call directly med level routine to put out the chunk. It will
8587 * always tumble out control chunks aka HB but it may even tumble
8590 if (user_req == 0) {
8591 /* Ok now lets start the HB timer if it is NOT a user req */
8592 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep,
8599 sctp_send_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
8602 struct sctp_association *asoc;
8603 struct sctp_ecne_chunk *ecne;
8604 struct sctp_tmit_chunk *chk;
8606 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8607 if (chk->rec.chunk_id == SCTP_ECN_ECHO) {
8608 /* found a previous ECN_ECHO update it if needed */
8609 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
8610 ecne->tsn = htonl(high_tsn);
8614 /* nope could not find one to update so we must build one */
8615 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
8619 sctp_pegs[SCTP_ECNE_SENT]++;
8620 sctppcbinfo.ipi_count_chunk++;
8621 sctppcbinfo.ipi_gencnt_chunk++;
8622 chk->rec.chunk_id = SCTP_ECN_ECHO;
8623 chk->asoc = &stcb->asoc;
8624 chk->send_size = sizeof(struct sctp_ecne_chunk);
8625 MGETHDR(chk->data, M_DONTWAIT, MT_DATA);
8626 if (chk->data == NULL) {
8627 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
8628 sctppcbinfo.ipi_count_chunk--;
8629 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
8630 panic("Chunk count is negative");
8632 sctppcbinfo.ipi_gencnt_chunk++;
8635 chk->data->m_data += SCTP_MIN_OVERHEAD;
8636 chk->data->m_pkthdr.len = chk->data->m_len = chk->send_size;
8637 chk->sent = SCTP_DATAGRAM_UNSENT;
8640 chk->whoTo->ref_count++;
8641 ecne = mtod(chk->data, struct sctp_ecne_chunk *);
8642 ecne->ch.chunk_type = SCTP_ECN_ECHO;
8643 ecne->ch.chunk_flags = 0;
8644 ecne->ch.chunk_length = htons(sizeof(struct sctp_ecne_chunk));
8645 ecne->tsn = htonl(high_tsn);
8646 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
8647 asoc->ctrl_queue_cnt++;
8651 sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
8652 struct mbuf *m, int iphlen, int bad_crc)
8654 struct sctp_association *asoc;
8655 struct sctp_pktdrop_chunk *drp;
8656 struct sctp_tmit_chunk *chk;
8659 unsigned int small_one;
8664 if (asoc->peer_supports_pktdrop == 0) {
8665 /* peer must declare support before I
8670 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
8674 sctppcbinfo.ipi_count_chunk++;
8675 sctppcbinfo.ipi_gencnt_chunk++;
8677 iph = mtod(m, struct ip *);
8681 if (iph->ip_v == IPVERSION) {
8683 #if defined(__FreeBSD__)
8684 len = chk->send_size = iph->ip_len;
8686 len = chk->send_size = (iph->ip_len - iphlen);
8689 struct ip6_hdr *ip6h;
8691 ip6h = mtod(m, struct ip6_hdr *);
8692 len = chk->send_size = htons(ip6h->ip6_plen);
8694 if ((len+iphlen) > m->m_pkthdr.len) {
8696 chk->send_size = len = m->m_pkthdr.len - iphlen;
8698 chk->asoc = &stcb->asoc;
8699 MGETHDR(chk->data, M_DONTWAIT, MT_DATA);
8700 if (chk->data == NULL) {
8702 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
8703 sctppcbinfo.ipi_count_chunk--;
8704 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
8705 panic("Chunk count is negative");
8707 sctppcbinfo.ipi_gencnt_chunk++;
8710 if ((chk->send_size+sizeof(struct sctp_pktdrop_chunk)+SCTP_MIN_OVERHEAD) > MHLEN) {
8711 MCLGET(chk->data, M_DONTWAIT);
8712 if ((chk->data->m_flags & M_EXT) == 0) {
8714 sctp_m_freem(chk->data);
8719 chk->data->m_data += SCTP_MIN_OVERHEAD;
8720 drp = mtod(chk->data, struct sctp_pktdrop_chunk *);
8722 sctp_m_freem(chk->data);
8726 small_one = asoc->smallest_mtu;
8727 if (small_one > MCLBYTES) {
8728 /* Only one cluster worth of data MAX */
8729 small_one = MCLBYTES;
8731 chk->book_size = (chk->send_size + sizeof(struct sctp_pktdrop_chunk) +
8732 sizeof(struct sctphdr) + SCTP_MED_OVERHEAD);
8733 if (chk->book_size > small_one) {
8734 drp->ch.chunk_flags = SCTP_PACKET_TRUNCATED;
8735 drp->trunc_len = htons(chk->send_size);
8736 chk->send_size = small_one - (SCTP_MED_OVERHEAD +
8737 sizeof(struct sctp_pktdrop_chunk) +
8738 sizeof(struct sctphdr));
8739 len = chk->send_size;
8741 /* no truncation needed */
8742 drp->ch.chunk_flags = 0;
8743 drp->trunc_len = htons(0);
8746 drp->ch.chunk_flags |= SCTP_BADCRC;
8748 chk->send_size += sizeof(struct sctp_pktdrop_chunk);
8749 chk->data->m_pkthdr.len = chk->data->m_len = chk->send_size;
8750 chk->sent = SCTP_DATAGRAM_UNSENT;
8753 /* we should hit here */
8756 chk->whoTo = asoc->primary_destination;
8758 chk->whoTo->ref_count++;
8759 chk->rec.chunk_id = SCTP_PACKET_DROPPED;
8760 drp->ch.chunk_type = SCTP_PACKET_DROPPED;
8761 drp->ch.chunk_length = htons(chk->send_size);
8762 spc = stcb->sctp_socket->so_rcv.sb_hiwat;
8766 drp->bottle_bw = htonl(spc);
8767 drp->current_onq = htonl(asoc->size_on_delivery_queue +
8768 asoc->size_on_reasm_queue +
8769 asoc->size_on_all_streams +
8770 asoc->my_rwnd_control_len +
8771 stcb->sctp_socket->so_rcv.sb_cc);
8774 m_copydata(m, iphlen, len, datap);
8775 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
8776 asoc->ctrl_queue_cnt++;
8780 sctp_send_cwr(struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t high_tsn)
8782 struct sctp_association *asoc;
8783 struct sctp_cwr_chunk *cwr;
8784 struct sctp_tmit_chunk *chk;
8787 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
8788 if (chk->rec.chunk_id == SCTP_ECN_CWR) {
8789 /* found a previous ECN_CWR update it if needed */
8790 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
8791 if (compare_with_wrap(high_tsn, ntohl(cwr->tsn),
8793 cwr->tsn = htonl(high_tsn);
8798 /* nope could not find one to update so we must build one */
8799 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
8803 sctppcbinfo.ipi_count_chunk++;
8804 sctppcbinfo.ipi_gencnt_chunk++;
8805 chk->rec.chunk_id = SCTP_ECN_CWR;
8806 chk->asoc = &stcb->asoc;
8807 chk->send_size = sizeof(struct sctp_cwr_chunk);
8808 MGETHDR(chk->data, M_DONTWAIT, MT_DATA);
8809 if (chk->data == NULL) {
8810 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
8811 sctppcbinfo.ipi_count_chunk--;
8812 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
8813 panic("Chunk count is negative");
8815 sctppcbinfo.ipi_gencnt_chunk++;
8818 chk->data->m_data += SCTP_MIN_OVERHEAD;
8819 chk->data->m_pkthdr.len = chk->data->m_len = chk->send_size;
8820 chk->sent = SCTP_DATAGRAM_UNSENT;
8823 chk->whoTo->ref_count++;
8824 cwr = mtod(chk->data, struct sctp_cwr_chunk *);
8825 cwr->ch.chunk_type = SCTP_ECN_CWR;
8826 cwr->ch.chunk_flags = 0;
8827 cwr->ch.chunk_length = htons(sizeof(struct sctp_cwr_chunk));
8828 cwr->tsn = htonl(high_tsn);
8829 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
8830 asoc->ctrl_queue_cnt++;
8833 sctp_reset_the_streams(struct sctp_tcb *stcb,
8834 struct sctp_stream_reset_request *req, int number_entries, uint16_t *list)
8838 if (req->reset_flags & SCTP_RESET_ALL) {
8839 for (i=0; i<stcb->asoc.streamoutcnt; i++) {
8840 stcb->asoc.strmout[i].next_sequence_sent = 0;
8842 } else if (number_entries) {
8843 for (i=0; i<number_entries; i++) {
8844 if (list[i] >= stcb->asoc.streamoutcnt) {
8845 /* no such stream */
8848 stcb->asoc.strmout[(list[i])].next_sequence_sent = 0;
8851 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list);
8855 sctp_send_str_reset_ack(struct sctp_tcb *stcb,
8856 struct sctp_stream_reset_request *req)
8858 struct sctp_association *asoc;
8859 struct sctp_stream_reset_resp *strack;
8860 struct sctp_tmit_chunk *chk;
8862 int number_entries, i;
8863 uint8_t two_way=0, not_peer=0;
8864 uint16_t *list=NULL;
8867 if (req->reset_flags & SCTP_RESET_ALL)
8870 number_entries = (ntohs(req->ph.param_length) - sizeof(struct sctp_stream_reset_request)) / sizeof(uint16_t);
8872 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
8876 sctppcbinfo.ipi_count_chunk++;
8877 sctppcbinfo.ipi_gencnt_chunk++;
8878 chk->rec.chunk_id = SCTP_STREAM_RESET;
8879 chk->asoc = &stcb->asoc;
8880 chk->send_size = sizeof(struct sctp_stream_reset_resp) + (number_entries * sizeof(uint16_t));
8881 MGETHDR(chk->data, M_DONTWAIT, MT_DATA);
8882 if (chk->data == NULL) {
8884 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
8885 sctppcbinfo.ipi_count_chunk--;
8886 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
8887 panic("Chunk count is negative");
8889 sctppcbinfo.ipi_gencnt_chunk++;
8892 chk->data->m_data += SCTP_MIN_OVERHEAD;
8893 chk->data->m_pkthdr.len = chk->data->m_len = SCTP_SIZE32(chk->send_size);
8894 if (M_TRAILINGSPACE(chk->data) < (int)SCTP_SIZE32(chk->send_size)) {
8895 MCLGET(chk->data, M_DONTWAIT);
8896 if ((chk->data->m_flags & M_EXT) == 0) {
8898 sctp_m_freem(chk->data);
8900 goto strresp_jump_out;
8902 chk->data->m_data += SCTP_MIN_OVERHEAD;
8904 if (M_TRAILINGSPACE(chk->data) < (int)SCTP_SIZE32(chk->send_size)) {
8905 /* can't do it, no room */
8907 sctp_m_freem(chk->data);
8909 goto strresp_jump_out;
8912 chk->sent = SCTP_DATAGRAM_UNSENT;
8914 chk->whoTo = asoc->primary_destination;
8915 chk->whoTo->ref_count++;
8916 strack = mtod(chk->data, struct sctp_stream_reset_resp *);
8918 strack->ch.chunk_type = SCTP_STREAM_RESET;
8919 strack->ch.chunk_flags = 0;
8920 strack->ch.chunk_length = htons(chk->send_size);
8922 memset(strack->sr_resp.reset_pad, 0, sizeof(strack->sr_resp.reset_pad));
8924 strack->sr_resp.ph.param_type = ntohs(SCTP_STR_RESET_RESPONSE);
8925 strack->sr_resp.ph.param_length = htons((chk->send_size - sizeof(struct sctp_chunkhdr)));
8929 if (chk->send_size % 4) {
8930 /* need a padding for the end */
8933 end = (uint8_t *)((caddr_t)strack + chk->send_size);
8934 pad = chk->send_size % 4;
8935 for (i = 0; i < pad; i++) {
8938 chk->send_size += pad;
8941 /* actual response */
8942 if (req->reset_flags & SCTP_RESET_YOUR) {
8943 strack->sr_resp.reset_flags = SCTP_RESET_PERFORMED;
8945 strack->sr_resp.reset_flags = 0;
8948 /* copied from reset request */
8949 strack->sr_resp.reset_req_seq_resp = req->reset_req_seq;
8950 seq = ntohl(req->reset_req_seq);
8952 list = req->list_of_streams;
8953 /* copy the un-converted network byte order streams */
8954 for (i=0; i<number_entries; i++) {
8955 strack->sr_resp.list_of_streams[i] = list[i];
8957 if (asoc->str_reset_seq_in == seq) {
8958 /* is it the next expected? */
8959 asoc->str_reset_seq_in++;
8960 strack->sr_resp.reset_at_tsn = htonl(asoc->sending_seq);
8961 asoc->str_reset_sending_seq = asoc->sending_seq;
8962 if (number_entries) {
8965 /* convert them to host byte order */
8966 for (i=0 ; i<number_entries; i++) {
8967 temp = ntohs(list[i]);
8971 if (req->reset_flags & SCTP_RESET_YOUR) {
8972 /* reset my outbound streams */
8973 sctp_reset_the_streams(stcb, req , number_entries, list);
8975 if (req->reset_flags & SCTP_RECIPRICAL) {
8976 /* reset peer too */
8977 sctp_send_str_reset_req(stcb, number_entries, list, two_way, not_peer);
8981 /* no its a retran so I must just ack and do nothing */
8982 strack->sr_resp.reset_at_tsn = htonl(asoc->str_reset_sending_seq);
8984 strack->sr_resp.cumulative_tsn = htonl(asoc->cumulative_tsn);
8985 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
8988 asoc->ctrl_queue_cnt++;
8993 sctp_send_str_reset_req(struct sctp_tcb *stcb,
8994 int number_entrys, uint16_t *list, uint8_t two_way, uint8_t not_peer)
8996 /* Send a stream reset request. The number_entrys may be 0 and list NULL
8997 * if the request is to reset all streams. If two_way is true then we
8998 * not only request a RESET of the received streams but we also
8999 * request the peer to send a reset req to us too.
9000 * Flag combinations in table:
9002 * two_way | not_peer | = | Flags
9003 * ------------------------------
9004 * 0 | 0 | = | SCTP_RESET_YOUR (just the peer)
9005 * 1 | 0 | = | SCTP_RESET_YOUR | SCTP_RECIPRICAL (both sides)
9006 * 0 | 1 | = | Not a Valid Request (not anyone)
9007 * 1 | 1 | = | SCTP_RESET_RECIPRICAL (Just local host)
9009 struct sctp_association *asoc;
9010 struct sctp_stream_reset_req *strreq;
9011 struct sctp_tmit_chunk *chk;
9015 if (asoc->stream_reset_outstanding) {
9016 /* Already one pending, must get ACK back
9017 * to clear the flag.
9022 if ((two_way == 0) && (not_peer == 1)) {
9023 /* not a valid request */
9027 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
9031 sctppcbinfo.ipi_count_chunk++;
9032 sctppcbinfo.ipi_gencnt_chunk++;
9033 chk->rec.chunk_id = SCTP_STREAM_RESET;
9034 chk->asoc = &stcb->asoc;
9035 chk->send_size = sizeof(struct sctp_stream_reset_req) + (number_entrys * sizeof(uint16_t));
9036 MGETHDR(chk->data, M_DONTWAIT, MT_DATA);
9037 if (chk->data == NULL) {
9039 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
9040 sctppcbinfo.ipi_count_chunk--;
9041 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
9042 panic("Chunk count is negative");
9044 sctppcbinfo.ipi_gencnt_chunk++;
9047 chk->data->m_data += SCTP_MIN_OVERHEAD;
9048 chk->data->m_pkthdr.len = chk->data->m_len = SCTP_SIZE32(chk->send_size);
9049 if (M_TRAILINGSPACE(chk->data) < (int)SCTP_SIZE32(chk->send_size)) {
9050 MCLGET(chk->data, M_DONTWAIT);
9051 if ((chk->data->m_flags & M_EXT) == 0) {
9053 sctp_m_freem(chk->data);
9055 goto strreq_jump_out;
9057 chk->data->m_data += SCTP_MIN_OVERHEAD;
9059 if (M_TRAILINGSPACE(chk->data) < (int)SCTP_SIZE32(chk->send_size)) {
9060 /* can't do it, no room */
9062 sctp_m_freem(chk->data);
9064 goto strreq_jump_out;
9066 chk->sent = SCTP_DATAGRAM_UNSENT;
9068 chk->whoTo = asoc->primary_destination;
9069 chk->whoTo->ref_count++;
9071 strreq = mtod(chk->data, struct sctp_stream_reset_req *);
9072 strreq->ch.chunk_type = SCTP_STREAM_RESET;
9073 strreq->ch.chunk_flags = 0;
9074 strreq->ch.chunk_length = htons(chk->send_size);
9076 strreq->sr_req.ph.param_type = ntohs(SCTP_STR_RESET_REQUEST);
9077 strreq->sr_req.ph.param_length = htons((chk->send_size - sizeof(struct sctp_chunkhdr)));
9079 if (chk->send_size % 4) {
9080 /* need a padding for the end */
9083 end = (uint8_t *)((caddr_t)strreq + chk->send_size);
9084 pad = chk->send_size % 4;
9085 for (i=0; i<pad; i++) {
9088 chk->send_size += pad;
9091 strreq->sr_req.reset_flags = 0;
9092 if (number_entrys == 0) {
9093 strreq->sr_req.reset_flags |= SCTP_RESET_ALL;
9096 strreq->sr_req.reset_flags |= SCTP_RESET_YOUR;
9098 if (not_peer == 0) {
9099 strreq->sr_req.reset_flags |= SCTP_RECIPRICAL | SCTP_RESET_YOUR;
9101 strreq->sr_req.reset_flags |= SCTP_RECIPRICAL;
9104 memset(strreq->sr_req.reset_pad, 0, sizeof(strreq->sr_req.reset_pad));
9105 strreq->sr_req.reset_req_seq = htonl(asoc->str_reset_seq_out);
9106 if (number_entrys) {
9107 /* populate the specific entry's */
9109 for (i=0; i < number_entrys; i++) {
9110 strreq->sr_req.list_of_streams[i] = htons(list[i]);
9113 TAILQ_INSERT_TAIL(&asoc->control_send_queue,
9116 asoc->ctrl_queue_cnt++;
9117 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo);
9118 asoc->stream_reset_outstanding = 1;
9122 sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
9123 struct mbuf *err_cause)
9126 * Formulate the abort message, and send it back down.
9129 struct sctp_abort_msg *abm;
9130 struct ip *iph, *iph_out;
9131 struct ip6_hdr *ip6, *ip6_out;
9134 /* don't respond to ABORT with ABORT */
9135 if (sctp_is_there_an_abort_here(m, iphlen, &vtag)) {
9137 sctp_m_freem(err_cause);
9140 MGETHDR(mout, M_DONTWAIT, MT_HEADER);
9143 sctp_m_freem(err_cause);
9146 iph = mtod(m, struct ip *);
9149 if (iph->ip_v == IPVERSION) {
9150 iph_out = mtod(mout, struct ip *);
9151 mout->m_len = sizeof(*iph_out) + sizeof(*abm);
9152 mout->m_next = err_cause;
9154 /* Fill in the IP header for the ABORT */
9155 iph_out->ip_v = IPVERSION;
9156 iph_out->ip_hl = (sizeof(struct ip) / 4);
9157 iph_out->ip_tos = (u_char)0;
9159 iph_out->ip_off = 0;
9160 iph_out->ip_ttl = MAXTTL;
9161 iph_out->ip_p = IPPROTO_SCTP;
9162 iph_out->ip_src.s_addr = iph->ip_dst.s_addr;
9163 iph_out->ip_dst.s_addr = iph->ip_src.s_addr;
9164 /* let IP layer calculate this */
9165 iph_out->ip_sum = 0;
9167 iphlen_out = sizeof(*iph_out);
9168 abm = (struct sctp_abort_msg *)((caddr_t)iph_out + iphlen_out);
9169 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
9170 ip6 = (struct ip6_hdr *)iph;
9171 ip6_out = mtod(mout, struct ip6_hdr *);
9172 mout->m_len = sizeof(*ip6_out) + sizeof(*abm);
9173 mout->m_next = err_cause;
9175 /* Fill in the IP6 header for the ABORT */
9176 ip6_out->ip6_flow = ip6->ip6_flow;
9177 ip6_out->ip6_hlim = ip6_defhlim;
9178 ip6_out->ip6_nxt = IPPROTO_SCTP;
9179 ip6_out->ip6_src = ip6->ip6_dst;
9180 ip6_out->ip6_dst = ip6->ip6_src;
9182 iphlen_out = sizeof(*ip6_out);
9183 abm = (struct sctp_abort_msg *)((caddr_t)ip6_out + iphlen_out);
9185 /* Currently not supported */
9189 abm->sh.src_port = sh->dest_port;
9190 abm->sh.dest_port = sh->src_port;
9191 abm->sh.checksum = 0;
9193 abm->sh.v_tag = sh->v_tag;
9194 abm->msg.ch.chunk_flags = SCTP_HAD_NO_TCB;
9196 abm->sh.v_tag = htonl(vtag);
9197 abm->msg.ch.chunk_flags = 0;
9199 abm->msg.ch.chunk_type = SCTP_ABORT_ASSOCIATION;
9202 struct mbuf *m_tmp = err_cause;
9204 /* get length of the err_cause chain */
9205 while (m_tmp != NULL) {
9206 err_len += m_tmp->m_len;
9207 m_tmp = m_tmp->m_next;
9209 mout->m_pkthdr.len = mout->m_len + err_len;
9211 /* need pad at end of chunk */
9214 padlen = 4 - (mout->m_pkthdr.len % 4);
9215 m_copyback(mout, mout->m_pkthdr.len, padlen, (caddr_t)&cpthis);
9217 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch) + err_len);
9219 mout->m_pkthdr.len = mout->m_len;
9220 abm->msg.ch.chunk_length = htons(sizeof(abm->msg.ch));
9224 if ((sctp_no_csum_on_loopback) &&
9225 (m->m_pkthdr.rcvif) &&
9226 (m->m_pkthdr.rcvif->if_type == IFT_LOOP)) {
9227 abm->sh.checksum = 0;
9229 abm->sh.checksum = sctp_calculate_sum(mout, NULL, iphlen_out);
9232 /* zap the rcvif, it should be null */
9233 mout->m_pkthdr.rcvif = 0;
9234 if (iph_out != NULL) {
9237 /* zap the stack pointer to the route */
9238 bzero(&ro, sizeof ro);
9240 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
9241 printf("sctp_send_abort calling ip_output:\n");
9242 sctp_print_address_pkt(iph_out, &abm->sh);
9245 /* set IPv4 length */
9246 #if defined(__FreeBSD__)
9247 iph_out->ip_len = mout->m_pkthdr.len;
9249 iph_out->ip_len = htons(mout->m_pkthdr.len);
9252 (void)ip_output(mout, 0, &ro, IP_RAWOUTPUT, NULL
9253 #if defined(__OpenBSD__) || (defined(__FreeBSD__) && __FreeBSD_version >= 480000) \
9254 || defined(__NetBSD__) || defined(__DragonFly__)
9258 /* Free the route if we got one back */
9261 } else if (ip6_out != NULL) {
9262 #ifdef NEW_STRUCT_ROUTE
9265 struct route_in6 ro;
9268 /* zap the stack pointer to the route */
9269 bzero(&ro, sizeof(ro));
9271 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
9272 printf("sctp_send_abort calling ip6_output:\n");
9273 sctp_print_address_pkt((struct ip *)ip6_out, &abm->sh);
9276 ip6_output(mout, NULL, &ro, 0, NULL, NULL
9277 #if defined(__NetBSD__)
9280 #if (defined(__FreeBSD__) && __FreeBSD_version >= 480000) || defined(__DragonFly__)
9284 /* Free the route if we got one back */
9288 sctp_pegs[SCTP_DATAGRAMS_SENT]++;
9292 sctp_send_operr_to(struct mbuf *m, int iphlen,
9296 struct sctphdr *ihdr;
9298 struct sctphdr *ohdr;
9299 struct sctp_chunkhdr *ophdr;
9303 struct sockaddr_in6 lsa6, fsa6;
9306 iph = mtod(m, struct ip *);
9307 ihdr = (struct sctphdr *)((caddr_t)iph + iphlen);
9308 if (!(scm->m_flags & M_PKTHDR)) {
9309 /* must be a pkthdr */
9310 printf("Huh, not a packet header in send_operr\n");
9314 M_PREPEND(scm, (sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr)), M_DONTWAIT);
9316 /* can't send because we can't add a mbuf */
9319 ohdr = mtod(scm, struct sctphdr *);
9320 ohdr->src_port = ihdr->dest_port;
9321 ohdr->dest_port = ihdr->src_port;
9324 ophdr = (struct sctp_chunkhdr *)(ohdr + 1);
9325 ophdr->chunk_type = SCTP_OPERATION_ERROR;
9326 ophdr->chunk_flags = 0;
9327 ophdr->chunk_length = htons(scm->m_pkthdr.len - sizeof(struct sctphdr));
9328 if (scm->m_pkthdr.len % 4) {
9332 padlen = 4 - (scm->m_pkthdr.len % 4);
9333 m_copyback(scm, scm->m_pkthdr.len, padlen, (caddr_t)&cpthis);
9335 if ((sctp_no_csum_on_loopback) &&
9336 (m->m_pkthdr.rcvif) &&
9337 (m->m_pkthdr.rcvif->if_type == IFT_LOOP)) {
9340 val = sctp_calculate_sum(scm, NULL, 0);
9342 ohdr->checksum = val;
9343 if (iph->ip_v == IPVERSION) {
9347 M_PREPEND(scm, sizeof(struct ip), M_DONTWAIT);
9350 bzero(&ro, sizeof ro);
9351 out = mtod(scm, struct ip *);
9352 out->ip_v = iph->ip_v;
9353 out->ip_hl = (sizeof(struct ip)/4);
9354 out->ip_tos = iph->ip_tos;
9355 out->ip_id = iph->ip_id;
9357 out->ip_ttl = MAXTTL;
9358 out->ip_p = IPPROTO_SCTP;
9360 out->ip_src = iph->ip_dst;
9361 out->ip_dst = iph->ip_src;
9362 #if defined(__FreeBSD__)
9363 out->ip_len = scm->m_pkthdr.len;
9365 out->ip_len = htons(scm->m_pkthdr.len);
9367 retcode = ip_output(scm, 0, &ro, IP_RAWOUTPUT, NULL
9368 #if defined(__OpenBSD__) || (defined(__FreeBSD__) && __FreeBSD_version >= 480000) \
9369 || defined(__NetBSD__) || defined(__DragonFly__)
9373 sctp_pegs[SCTP_DATAGRAMS_SENT]++;
9374 /* Free the route if we got one back */
9379 #ifdef NEW_STRUCT_ROUTE
9382 struct route_in6 ro;
9384 struct ip6_hdr *out6, *in6;
9386 M_PREPEND(scm, sizeof(struct ip6_hdr), M_DONTWAIT);
9389 bzero(&ro, sizeof ro);
9390 in6 = mtod(m, struct ip6_hdr *);
9391 out6 = mtod(scm, struct ip6_hdr *);
9392 out6->ip6_flow = in6->ip6_flow;
9393 out6->ip6_hlim = ip6_defhlim;
9394 out6->ip6_nxt = IPPROTO_SCTP;
9395 out6->ip6_src = in6->ip6_dst;
9396 out6->ip6_dst = in6->ip6_src;
9399 bzero(&lsa6, sizeof(lsa6));
9400 lsa6.sin6_len = sizeof(lsa6);
9401 lsa6.sin6_family = AF_INET6;
9402 lsa6.sin6_addr = out6->ip6_src;
9403 bzero(&fsa6, sizeof(fsa6));
9404 fsa6.sin6_len = sizeof(fsa6);
9405 fsa6.sin6_family = AF_INET6;
9406 fsa6.sin6_addr = out6->ip6_dst;
9407 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
9408 printf("sctp_operr_to calling ipv6 output:\n");
9410 sctp_print_address((struct sockaddr *)&lsa6);
9412 sctp_print_address((struct sockaddr *)&fsa6);
9414 #endif /* SCTP_DEBUG */
9415 ip6_output(scm, NULL, &ro, 0, NULL, NULL
9416 #if defined(__NetBSD__)
9419 #if (defined(__FreeBSD__) && __FreeBSD_version >= 480000) || defined(__DragonFly__)
9423 sctp_pegs[SCTP_DATAGRAMS_SENT]++;
9424 /* Free the route if we got one back */
9431 sctp_copy_one(struct mbuf *m, struct uio *uio, int cpsz, int resv_upfront, int *mbcnt)
9433 int left, cancpy, willcpy, error;
9442 if ((left+resv_upfront) > (int)MHLEN) {
9448 if ((m->m_flags & M_EXT) == 0) {
9452 *mbcnt += m->m_ext.ext_size;
9455 cancpy = M_TRAILINGSPACE(m);
9456 willcpy = min(cancpy, left);
9457 if ((willcpy + resv_upfront) > cancpy) {
9458 willcpy -= resv_upfront;
9461 /* Align data to the end */
9462 if ((m->m_flags & M_EXT) == 0) {
9463 if (m->m_flags & M_PKTHDR) {
9464 MH_ALIGN(m, willcpy);
9466 M_ALIGN(m, willcpy);
9469 MC_ALIGN(m, willcpy);
9471 error = uiomove(mtod(m, caddr_t), willcpy, uio);
9479 MGET(m->m_next, M_WAIT, MT_DATA);
9480 if (m->m_next == NULL) {
9487 if (left > (int)MHLEN) {
9493 if ((m->m_flags & M_EXT) == 0) {
9497 *mbcnt += m->m_ext.ext_size;
9499 cancpy = M_TRAILINGSPACE(m);
9500 willcpy = min(cancpy, left);
9507 sctp_copy_it_in(struct sctp_inpcb *inp,
9508 struct sctp_tcb *stcb,
9509 struct sctp_association *asoc,
9510 struct sctp_nets *net,
9511 struct sctp_sndrcvinfo *srcv,
9515 /* This routine must be very careful in
9516 * its work. Protocol processing is
9517 * up and running so care must be taken to
9518 * spl...() when you need to do something
9519 * that may effect the stcb/asoc. The sb is
9520 * locked however. When data is copied the
9521 * protocol processing should be enabled since
9522 * this is a slower operation...
9527 int frag_size, mbcnt = 0, mbcnt_e = 0;
9528 unsigned int sndlen;
9529 unsigned int tot_demand;
9530 int tot_out, dataout;
9531 struct sctp_tmit_chunk *chk;
9533 struct sctp_stream_out *strq;
9537 #if defined(__NetBSD__) || defined(__OpenBSD__)
9542 so = stcb->sctp_socket;
9546 sndlen = uio->uio_resid;
9547 /* lock the socket buf */
9548 SOCKBUF_LOCK(&so->so_snd);
9549 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
9553 /* will it ever fit ? */
9554 if (sndlen > so->so_snd.sb_hiwat) {
9555 /* It will NEVER fit */
9560 /* Do I need to block? */
9561 if ((so->so_snd.sb_hiwat <
9562 (sndlen + asoc->total_output_queue_size)) ||
9563 (asoc->chunks_on_out_queue > sctp_max_chunks_on_queue) ||
9564 (asoc->total_output_mbuf_queue_size >
9565 so->so_snd.sb_mbmax)
9567 /* prune any prsctp bufs out */
9568 if (asoc->peer_supports_prsctp) {
9569 sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
9572 * We store off a pointer to the endpoint.
9573 * Since on return from this we must check to
9574 * see if an so_error is set. If so we may have
9575 * been reset and our stcb destroyed. Returning
9576 * an error will flow back to the user...
9578 while ((so->so_snd.sb_hiwat <
9579 (sndlen + asoc->total_output_queue_size)) ||
9580 (asoc->chunks_on_out_queue >
9581 sctp_max_chunks_on_queue) ||
9582 (asoc->total_output_mbuf_queue_size >
9583 so->so_snd.sb_mbmax)
9585 if ((so->so_state & SS_NBIO)
9586 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
9587 || (flags & MSG_NBIO)
9590 /* Non-blocking io in place */
9591 error = EWOULDBLOCK;
9594 inp->sctp_tcb_at_block = (void *)stcb;
9595 inp->error_on_block = 0;
9596 #ifdef SCTP_BLK_LOGGING
9597 sctp_log_block(SCTP_BLOCK_LOG_INTO_BLK,
9600 sbunlock(&so->so_snd);
9601 SCTP_TCB_UNLOCK(stcb);
9602 error = sbwait(&so->so_snd);
9603 SCTP_INP_RLOCK(inp);
9604 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
9605 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
9606 /* Should I really unlock ? */
9607 SCTP_INP_RUNLOCK(inp);
9611 SCTP_TCB_LOCK(stcb);
9612 SCTP_INP_RUNLOCK(inp);
9614 inp->sctp_tcb_at_block = 0;
9615 #ifdef SCTP_BLK_LOGGING
9616 sctp_log_block(SCTP_BLOCK_LOG_OUTOF_BLK,
9619 if (inp->error_on_block) {
9621 * if our asoc was killed, the free code
9622 * (in sctp_pcb.c) will save a error in
9625 error = inp->error_on_block;
9633 /* did we encounter a socket error? */
9635 error = so->so_error;
9639 error = sblock(&so->so_snd, M_WAITOK);
9641 /* Can't aquire the lock */
9645 #if defined(__FreeBSD__) && __FreeBSD_version >= 502115
9646 if (so->so_rcv.sb_state & SBS_CANTSENDMORE) {
9648 if (so->so_state & SS_CANTSENDMORE) {
9650 /* The socket is now set not to sendmore.. its gone */
9656 error = so->so_error;
9660 if (asoc->peer_supports_prsctp) {
9661 sctp_prune_prsctp(stcb, asoc, srcv, sndlen);
9665 dataout = tot_out = uio->uio_resid;
9666 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
9667 resv_in_first = SCTP_MED_OVERHEAD;
9669 resv_in_first = SCTP_MED_V4_OVERHEAD;
9672 /* Are we aborting? */
9673 if (srcv->sinfo_flags & MSG_ABORT) {
9674 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) &&
9675 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_ECHOED)) {
9676 /* It has to be up before we abort */
9677 /* how big is the user initiated abort? */
9679 /* I wonder about doing a MGET without a splnet set.
9680 * it is done that way in the sosend code so I guess
9683 MGETHDR(mm, M_WAIT, MT_DATA);
9685 struct sctp_paramhdr *ph;
9687 tot_demand = (tot_out + sizeof(struct sctp_paramhdr));
9688 if (tot_demand > MHLEN) {
9689 if (tot_demand > MCLBYTES) {
9690 /* truncate user data */
9691 tot_demand = MCLBYTES;
9692 tot_out = tot_demand - sizeof(struct sctp_paramhdr);
9695 if ((mm->m_flags & M_EXT) == 0) {
9696 /* truncate further */
9698 tot_out = tot_demand - sizeof(struct sctp_paramhdr);
9701 /* now move forward the data pointer */
9702 ph = mtod(mm, struct sctp_paramhdr *);
9703 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
9704 ph->param_length = htons((sizeof(struct sctp_paramhdr) + tot_out));
9706 mm->m_pkthdr.len = tot_out + sizeof(struct sctp_paramhdr);
9707 mm->m_len = mm->m_pkthdr.len;
9708 error = uiomove((caddr_t)ph, (int)tot_out, uio);
9711 * Here if we can't get his data we
9712 * still abort we just don't get to
9713 * send the users note :-0
9719 sbunlock(&so->so_snd);
9720 SOCKBUF_UNLOCK(&so->so_snd);
9721 sctp_abort_an_association(stcb->sctp_ep, stcb,
9722 SCTP_RESPONSE_TO_USER_REQ,
9732 /* Now can we send this? */
9733 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
9734 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
9735 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
9736 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
9737 /* got data while shutting down */
9742 /* Is the stream no. valid? */
9743 if (srcv->sinfo_stream >= asoc->streamoutcnt) {
9744 /* Invalid stream number */
9749 if (asoc->strmout == NULL) {
9750 /* huh? software error */
9752 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
9753 printf("software error in sctp_copy_it_in\n");
9760 if ((srcv->sinfo_flags & MSG_EOF) &&
9761 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) &&
9772 /* save off the tag */
9773 my_vtag = asoc->my_vtag;
9774 strq = &asoc->strmout[srcv->sinfo_stream];
9775 /* First lets figure out the "chunking" point */
9776 frag_size = sctp_get_frag_point(stcb, asoc);
9778 /* two choices here, it all fits in one chunk or
9779 * we need multiple chunks.
9782 SOCKBUF_UNLOCK(&so->so_snd);
9783 if (tot_out <= frag_size) {
9784 /* no need to setup a template */
9785 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
9788 SOCKBUF_LOCK(&so->so_snd);
9791 sctppcbinfo.ipi_count_chunk++;
9792 sctppcbinfo.ipi_gencnt_chunk++;
9793 asoc->chunks_on_out_queue++;
9794 MGETHDR(mm, M_WAIT, MT_DATA);
9799 error = sctp_copy_one(mm, uio, tot_out, resv_in_first, &mbcnt_e);
9802 sctp_prepare_chunk(chk, stcb, srcv, strq, net);
9803 chk->mbcnt = mbcnt_e;
9806 mm->m_pkthdr.len = tot_out;
9810 /* the actual chunk flags */
9811 chk->rec.data.rcv_flags |= SCTP_DATA_NOT_FRAG;
9812 chk->whoTo->ref_count++;
9814 /* fix up the send_size if it is not present */
9815 chk->send_size = tot_out;
9816 chk->book_size = chk->send_size;
9817 /* ok, we are commited */
9818 if ((srcv->sinfo_flags & MSG_UNORDERED) == 0) {
9819 /* bump the ssn if we are unordered. */
9820 strq->next_sequence_sent++;
9822 if (chk->flags & SCTP_PR_SCTP_BUFFER) {
9823 asoc->sent_queue_cnt_removeable++;
9825 #if defined(__NetBSD__) || defined(__OpenBSD__)
9830 if ((asoc->state == 0) ||
9831 (my_vtag != asoc->my_vtag) ||
9832 (so != inp->sctp_socket) ||
9833 (inp->sctp_socket == 0)) {
9834 /* connection was aborted */
9839 asoc->stream_queue_cnt++;
9840 TAILQ_INSERT_TAIL(&strq->outqueue, chk, sctp_next);
9841 /* now check if this stream is on the wheel */
9842 if ((strq->next_spoke.tqe_next == NULL) &&
9843 (strq->next_spoke.tqe_prev == NULL)) {
9844 /* Insert it on the wheel since it is not
9847 sctp_insert_on_wheel(asoc, strq);
9852 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
9853 sctppcbinfo.ipi_count_chunk--;
9854 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
9855 panic("Chunk count is negative");
9857 SOCKBUF_LOCK(&so->so_snd);
9861 /* we need to setup a template */
9862 struct sctp_tmit_chunk template;
9863 struct sctpchunk_listhead tmp;
9865 /* setup the template */
9866 sctp_prepare_chunk(&template, stcb, srcv, strq, net);
9868 /* Prepare the temp list */
9871 /* Template is complete, now time for the work */
9872 while (tot_out > 0) {
9874 chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
9877 * ok we must spin through and dump anything
9878 * we have allocated and then jump to the
9883 sctppcbinfo.ipi_count_chunk++;
9884 asoc->chunks_on_out_queue++;
9886 sctppcbinfo.ipi_gencnt_chunk++;
9888 chk->whoTo->ref_count++;
9889 MGETHDR(chk->data, M_WAIT, MT_DATA);
9890 if (chk->data == NULL) {
9894 tot_demand = min(tot_out, frag_size);
9895 error = sctp_copy_one(chk->data, uio, tot_demand , resv_in_first, &mbcnt_e);
9898 /* now fix the chk->send_size */
9899 chk->mbcnt = mbcnt_e;
9902 chk->send_size = tot_demand;
9903 chk->data->m_pkthdr.len = tot_demand;
9904 chk->book_size = chk->send_size;
9905 if (chk->flags & SCTP_PR_SCTP_BUFFER) {
9906 asoc->sent_queue_cnt_removeable++;
9908 TAILQ_INSERT_TAIL(&tmp, chk, sctp_next);
9909 tot_out -= tot_demand;
9911 /* Now the tmp list holds all chunks and data */
9912 if ((srcv->sinfo_flags & MSG_UNORDERED) == 0) {
9913 /* bump the ssn if we are unordered. */
9914 strq->next_sequence_sent++;
9916 /* Mark the first/last flags. This will
9917 * result int a 3 for a single item on the list
9919 chk = TAILQ_FIRST(&tmp);
9920 chk->rec.data.rcv_flags |= SCTP_DATA_FIRST_FRAG;
9921 chk = TAILQ_LAST(&tmp, sctpchunk_listhead);
9922 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
9924 /* now move it to the streams actual queue */
9925 /* first stop protocol processing */
9926 #if defined(__NetBSD__) || defined(__OpenBSD__)
9931 if ((asoc->state == 0) ||
9932 (my_vtag != asoc->my_vtag) ||
9933 (so != inp->sctp_socket) ||
9934 (inp->sctp_socket == 0)) {
9935 /* connection was aborted */
9940 chk = TAILQ_FIRST(&tmp);
9942 chk->data->m_nextpkt = 0;
9943 TAILQ_REMOVE(&tmp, chk, sctp_next);
9944 asoc->stream_queue_cnt++;
9945 TAILQ_INSERT_TAIL(&strq->outqueue, chk, sctp_next);
9946 chk = TAILQ_FIRST(&tmp);
9948 /* now check if this stream is on the wheel */
9949 if ((strq->next_spoke.tqe_next == NULL) &&
9950 (strq->next_spoke.tqe_prev == NULL)) {
9951 /* Insert it on the wheel since it is not
9954 sctp_insert_on_wheel(asoc, strq);
9956 /* Ok now we can allow pping */
9960 SOCKBUF_LOCK(&so->so_snd);
9961 chk = TAILQ_FIRST(&tmp);
9964 sctp_m_freem(chk->data);
9967 TAILQ_REMOVE(&tmp, chk, sctp_next);
9968 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
9969 sctppcbinfo.ipi_count_chunk--;
9970 asoc->chunks_on_out_queue--;
9971 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
9972 panic("Chunk count is negative");
9974 sctppcbinfo.ipi_gencnt_chunk++;
9975 chk = TAILQ_FIRST(&tmp);
9981 #ifdef SCTP_MBCNT_LOGGING
9982 sctp_log_mbcnt(SCTP_LOG_MBCNT_INCREASE,
9983 asoc->total_output_queue_size,
9985 asoc->total_output_mbuf_queue_size,
9988 #if defined(__NetBSD__) || defined(__OpenBSD__)
9993 SOCKBUF_LOCK(&so->so_snd);
9994 asoc->total_output_queue_size += dataout;
9995 asoc->total_output_mbuf_queue_size += mbcnt;
9996 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
9997 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
9998 so->so_snd.sb_cc += dataout;
9999 so->so_snd.sb_mbcnt += mbcnt;
10001 if ((srcv->sinfo_flags & MSG_EOF) &&
10002 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)
10004 int some_on_streamwheel = 0;
10006 if (!TAILQ_EMPTY(&asoc->out_wheel)) {
10007 /* Check to see if some data queued */
10008 struct sctp_stream_out *outs;
10009 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
10010 if (!TAILQ_EMPTY(&outs->outqueue)) {
10011 some_on_streamwheel = 1;
10016 if (TAILQ_EMPTY(&asoc->send_queue) &&
10017 TAILQ_EMPTY(&asoc->sent_queue) &&
10018 (some_on_streamwheel == 0)) {
10019 /* there is nothing queued to send, so I'm done... */
10020 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
10021 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
10022 /* only send SHUTDOWN the first time through */
10024 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
10025 printf("%s:%d sends a shutdown\n",
10031 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
10032 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
10033 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb,
10034 asoc->primary_destination);
10035 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
10036 asoc->primary_destination);
10040 * we still got (or just got) data to send, so set
10044 * XXX sockets draft says that MSG_EOF should be sent
10045 * with no data. currently, we will allow user data
10046 * to be sent first and move to SHUTDOWN-PENDING
10048 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
10053 if (sctp_debug_on & SCTP_DEBUG_OUTPUT2) {
10054 printf("++total out:%d total_mbuf_out:%d\n",
10055 (int)asoc->total_output_queue_size,
10056 (int)asoc->total_output_mbuf_queue_size);
10061 sbunlock(&so->so_snd);
10063 SOCKBUF_UNLOCK(&so->so_snd);
10072 sctp_sosend(struct socket *so,
10074 struct mbuf *addr_mbuf,
10076 struct sockaddr *addr,
10080 struct mbuf *control,
10081 #if defined(__NetBSD__) || defined(__APPLE__)
10085 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
10093 unsigned int sndlen;
10094 int error, use_rcvinfo;
10095 int s, queue_only = 0, queue_only_for_init=0;
10098 struct sctp_inpcb *inp;
10099 struct sctp_tcb *stcb=NULL;
10100 struct sctp_sndrcvinfo srcv;
10101 struct timeval now;
10102 struct sctp_nets *net;
10103 struct sctp_association *asoc;
10104 struct sctp_inpcb *t_inp;
10105 int create_lock_applied = 0;
10106 #if defined(__APPLE__)
10107 struct proc *p = current_proc();
10108 #elif defined(__NetBSD__)
10109 struct proc *p = curproc; /* XXX */
10110 struct sockaddr *addr = NULL;
10112 addr = mtod(addr_mbuf, struct sockaddr *);
10115 error = use_rcvinfo = 0;
10119 t_inp = inp = (struct sctp_inpcb *)so->so_pcb;
10121 sndlen = uio->uio_resid;
10123 sndlen = top->m_pkthdr.len;
10126 #if defined(__NetBSD__) || defined(__OpenBSD__)
10132 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
10133 (inp->sctp_flags & SCTP_PCB_FLAGS_ACCEPTING)) {
10134 /* The listner can NOT send */
10140 SCTP_ASOC_CREATE_LOCK(inp);
10141 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
10142 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
10143 /* Should I really unlock ? */
10149 create_lock_applied = 1;
10150 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
10151 (addr->sa_family == AF_INET6)) {
10157 /* now we must find the assoc */
10158 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
10159 SCTP_INP_RLOCK(inp);
10160 stcb = LIST_FIRST(&inp->sctp_asoc_list);
10161 if (stcb == NULL) {
10162 SCTP_INP_RUNLOCK(inp);
10167 SCTP_TCB_LOCK(stcb);
10168 SCTP_INP_RUNLOCK(inp);
10169 net = stcb->asoc.primary_destination;
10173 /* process cmsg snd/rcv info (maybe a assoc-id) */
10174 if (sctp_find_cmsg(SCTP_SNDRCV, (void *)&srcv, control,
10177 if (srcv.sinfo_flags & MSG_SENDALL) {
10178 /* its a sendall */
10179 sctppcbinfo.mbuf_track--;
10180 sctp_m_freem(control);
10182 if (create_lock_applied) {
10183 SCTP_ASOC_CREATE_UNLOCK(inp);
10184 create_lock_applied = 0;
10186 return (sctp_sendall(inp, uio, top, &srcv));
10191 if (stcb == NULL) {
10192 /* Need to do a lookup */
10193 if (use_rcvinfo && srcv.sinfo_assoc_id) {
10194 stcb = sctp_findassociation_ep_asocid(inp, srcv.sinfo_assoc_id);
10196 * Question: Should I error here if the assoc_id is
10197 * no longer valid? i.e. I can't find it?
10201 /* Must locate the net structure */
10202 net = sctp_findnet(stcb, addr);
10205 if (stcb == NULL) {
10206 if (addr != NULL) {
10207 /* Since we did not use findep we must
10208 * increment it, and if we don't find a
10209 * tcb decrement it.
10211 SCTP_INP_WLOCK(inp);
10212 SCTP_INP_INCR_REF(inp);
10213 SCTP_INP_WUNLOCK(inp);
10214 stcb = sctp_findassociation_ep_addr(&t_inp, addr, &net, NULL, NULL);
10215 if (stcb == NULL) {
10216 SCTP_INP_WLOCK(inp);
10217 SCTP_INP_DECR_REF(inp);
10218 SCTP_INP_WUNLOCK(inp);
10223 if ((stcb == NULL) &&
10224 (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)) {
10228 } else if ((stcb == NULL) && (addr == NULL)) {
10232 } else if (stcb == NULL) {
10233 /* UDP style, we must go ahead and start the INIT process */
10234 if ((use_rcvinfo) &&
10235 (srcv.sinfo_flags & MSG_ABORT)) {
10236 /* User asks to abort a non-existant asoc */
10241 /* get an asoc/stcb struct */
10242 stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0);
10243 if (stcb == NULL) {
10244 /* Error is setup for us in the call */
10248 if (create_lock_applied) {
10249 SCTP_ASOC_CREATE_UNLOCK(inp);
10250 create_lock_applied = 0;
10252 printf("Huh-3? create lock should have been on??\n");
10254 /* Turn on queue only flag to prevent data from being sent */
10256 asoc = &stcb->asoc;
10257 asoc->state = SCTP_STATE_COOKIE_WAIT;
10258 SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
10260 /* see if a init structure exists in cmsg headers */
10261 struct sctp_initmsg initm;
10263 if (sctp_find_cmsg(SCTP_INIT, (void *)&initm, control, sizeof(initm))) {
10264 /* we have an INIT override of the default */
10265 if (initm.sinit_max_attempts)
10266 asoc->max_init_times = initm.sinit_max_attempts;
10267 if (initm.sinit_num_ostreams)
10268 asoc->pre_open_streams = initm.sinit_num_ostreams;
10269 if (initm.sinit_max_instreams)
10270 asoc->max_inbound_streams = initm.sinit_max_instreams;
10271 if (initm.sinit_max_init_timeo)
10272 asoc->initial_init_rto_max = initm.sinit_max_init_timeo;
10273 if (asoc->streamoutcnt < asoc->pre_open_streams) {
10274 /* Default is NOT correct */
10276 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
10277 printf("Ok, defout:%d pre_open:%d\n",
10278 asoc->streamoutcnt, asoc->pre_open_streams);
10281 FREE(asoc->strmout, M_PCB);
10282 asoc->strmout = NULL;
10283 asoc->streamoutcnt = asoc->pre_open_streams;
10285 /* What happesn if this fails? .. we panic ...*/
10286 MALLOC(asoc->strmout,
10287 struct sctp_stream_out *,
10288 asoc->streamoutcnt *
10289 sizeof(struct sctp_stream_out),
10291 for (i = 0; i < asoc->streamoutcnt; i++) {
10293 * inbound side must be set to 0xffff,
10294 * also NOTE when we get the INIT-ACK
10295 * back (for INIT sender) we MUST
10296 * reduce the count (streamoutcnt) but
10297 * first check if we sent to any of the
10298 * upper streams that were dropped (if
10299 * some were). Those that were dropped
10300 * must be notified to the upper layer
10301 * as failed to send.
10303 asoc->strmout[i].next_sequence_sent = 0x0;
10304 TAILQ_INIT(&asoc->strmout[i].outqueue);
10305 asoc->strmout[i].stream_no = i;
10306 asoc->strmout[i].next_spoke.tqe_next = 0;
10307 asoc->strmout[i].next_spoke.tqe_prev = 0;
10313 /* out with the INIT */
10314 queue_only_for_init = 1;
10315 sctp_send_initiate(inp, stcb);
10317 * we may want to dig in after this call and adjust the MTU
10318 * value. It defaulted to 1500 (constant) but the ro structure
10319 * may now have an update and thus we may need to change it
10320 * BEFORE we append the message.
10322 net = stcb->asoc.primary_destination;
10323 asoc = &stcb->asoc;
10325 asoc = &stcb->asoc;
10327 if (create_lock_applied) {
10328 SCTP_ASOC_CREATE_UNLOCK(inp);
10329 create_lock_applied = 0;
10331 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) ||
10332 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) {
10335 if (use_rcvinfo == 0) {
10336 /* Grab the default stuff from the asoc */
10337 srcv = stcb->asoc.def_send;
10339 /* we are now done with all control */
10341 sctp_m_freem(control);
10345 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) ||
10346 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) ||
10347 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) ||
10348 (asoc->state & SCTP_STATE_SHUTDOWN_PENDING)) {
10349 if ((use_rcvinfo) &&
10350 (srcv.sinfo_flags & MSG_ABORT)) {
10353 error = ECONNRESET;
10358 /* Ok, we will attempt a msgsnd :> */
10360 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
10361 p->td_proc->p_stats->p_ru.ru_msgsnd++;
10363 p->p_stats->p_ru.ru_msgsnd++;
10367 if (net && ((srcv.sinfo_flags & MSG_ADDR_OVER))) {
10368 /* we take the override or the unconfirmed */
10371 net = stcb->asoc.primary_destination;
10376 /* Must copy it all in from user land. The
10377 * socket buf is locked but we don't suspend
10378 * protocol processing until we are ready to
10382 error = sctp_copy_it_in(inp, stcb, asoc, net, &srcv, uio, flags);
10386 /* Here we must either pull in the user data to chunk
10387 * buffers, or use top to do a msg_append.
10389 error = sctp_msg_append(stcb, net, top, &srcv, flags);
10393 /* zap the top since it is now being used */
10397 if (net->flight_size > net->cwnd) {
10398 sctp_pegs[SCTP_SENDTO_FULL_CWND]++;
10401 } else if (asoc->ifp_had_enobuf) {
10402 sctp_pegs[SCTP_QUEONLY_BURSTLMT]++;
10405 un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
10406 ((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count) * sizeof(struct sctp_data_chunk)) +
10407 SCTP_MED_OVERHEAD);
10409 if (((inp->sctp_flags & SCTP_PCB_FLAGS_NODELAY) == 0) &&
10410 (stcb->asoc.total_flight > 0) &&
10411 (un_sent < (int)stcb->asoc.smallest_mtu)) {
10413 /* Ok, Nagle is set on and we have data outstanding. Don't
10414 * send anything and let SACKs drive out the data unless we
10415 * have a "full" segment to send.
10417 sctp_pegs[SCTP_NAGLE_NOQ]++;
10420 sctp_pegs[SCTP_NAGLE_OFF]++;
10423 if (queue_only_for_init) {
10424 /* It is possible to have a turn around of the
10425 * INIT/INIT-ACK/COOKIE before I have a chance to
10426 * copy in the data. In such a case I DO want to
10427 * send it out by reversing the queue only flag.
10429 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) ||
10430 (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_ECHOED)) {
10431 /* yep, reverse it */
10436 if ((queue_only == 0) && (stcb->asoc.peers_rwnd && un_sent)) {
10437 /* we can attempt to send too.*/
10439 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
10440 printf("USR Send calls sctp_chunk_output\n");
10443 #if defined(__NetBSD__) || defined(__OpenBSD__)
10448 sctp_pegs[SCTP_OUTPUT_FRM_SND]++;
10449 sctp_chunk_output(inp, stcb, 0);
10451 } else if ((queue_only == 0) &&
10452 (stcb->asoc.peers_rwnd == 0) &&
10453 (stcb->asoc.total_flight == 0)) {
10454 /* We get to have a probe outstanding */
10455 #if defined(__NetBSD__) || defined(__OpenBSD__)
10460 sctp_from_user_send = 1;
10461 sctp_chunk_output(inp, stcb, 0);
10462 sctp_from_user_send = 0;
10465 } else if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) {
10466 int num_out, reason, cwnd_full;
10467 /* Here we do control only */
10468 #if defined(__NetBSD__) || defined(__OpenBSD__)
10473 sctp_med_chunk_output(inp, stcb, &stcb->asoc, &num_out,
10474 &reason, 1, &cwnd_full, 1, &now, &now_filled);
10478 if (sctp_debug_on & SCTP_DEBUG_OUTPUT1) {
10479 printf("USR Send complete qo:%d prw:%d unsent:%d tf:%d cooq:%d toqs:%d \n",
10480 queue_only, stcb->asoc.peers_rwnd, un_sent,
10481 stcb->asoc.total_flight, stcb->asoc.chunks_on_out_queue,
10482 stcb->asoc.total_output_queue_size);
10486 if (create_lock_applied) {
10487 SCTP_ASOC_CREATE_UNLOCK(inp);
10488 create_lock_applied = 0;
10491 SCTP_TCB_UNLOCK(stcb);
10495 sctp_m_freem(control);