1 /* $KAME: sctp_usrreq.c,v 1.47 2005/03/06 16:04:18 itojun Exp $ */
4 * Copyright (c) 2001, 2002, 2003, 2004 Cisco Systems, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Cisco Systems, Inc.
18 * 4. Neither the name of the project nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY CISCO SYSTEMS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL CISCO SYSTEMS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 #if !(defined(__OpenBSD__) || defined(__APPLE__))
35 #include "opt_ipsec.h"
37 #if defined(__FreeBSD__) || defined(__DragonFly__)
38 #include "opt_inet6.h"
41 #if defined(__NetBSD__)
47 #elif !defined(__OpenBSD__)
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/kernel.h>
54 #include <sys/malloc.h>
56 #include <sys/domain.h>
59 #include <sys/protosw.h>
60 #include <sys/socket.h>
61 #include <sys/socketvar.h>
62 #include <sys/socketvar2.h>
63 #include <sys/sysctl.h>
64 #include <sys/syslog.h>
66 #include <sys/thread2.h>
67 #include <sys/msgport2.h>
70 #include <net/if_types.h>
71 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
72 #include <net/if_var.h>
74 #include <net/route.h>
75 #include <netinet/in.h>
76 #include <netinet/in_systm.h>
77 #include <netinet/ip.h>
78 #include <netinet/ip6.h>
79 #include <netinet/in_pcb.h>
80 #include <netinet/in_var.h>
81 #include <netinet/ip_var.h>
82 #include <netinet6/ip6_var.h>
83 #include <netinet6/in6_var.h>
85 #include <netinet/ip_icmp.h>
86 #include <netinet/icmp_var.h>
87 #include <netinet/sctp_pcb.h>
88 #include <netinet/sctp_header.h>
89 #include <netinet/sctp_var.h>
90 #include <netinet/sctp_output.h>
91 #include <netinet/sctp_uio.h>
92 #include <netinet/sctp_asconf.h>
93 #include <netinet/sctputil.h>
94 #include <netinet/sctp_indata.h>
95 #include <netinet/sctp_asconf.h>
98 #include <netinet6/ipsec.h>
99 #include <netproto/key/key.h>
105 #include <net/net_osdep.h>
107 #if defined(HAVE_NRL_INPCB) || defined(__FreeBSD__) || defined(__DragonFly__)
112 #define sotoin6pcb sotoinpcb
117 extern u_int32_t sctp_debug_on;
118 #endif /* SCTP_DEBUG */
121 * sysctl tunable variables
123 int sctp_auto_asconf = SCTP_DEFAULT_AUTO_ASCONF;
124 int sctp_max_burst_default = SCTP_DEF_MAX_BURST;
125 int sctp_peer_chunk_oh = sizeof(struct mbuf);
126 int sctp_strict_init = 1;
127 int sctp_no_csum_on_loopback = 1;
128 unsigned int sctp_max_chunks_on_queue = SCTP_ASOC_MAX_CHUNKS_ON_QUEUE;
129 int sctp_sendspace = (128 * 1024);
130 int sctp_recvspace = 128 * (1024 +
132 sizeof(struct sockaddr_in6)
134 sizeof(struct sockaddr_in)
137 int sctp_strict_sacks = 0;
139 int sctp_ecn_nonce = 0;
141 unsigned int sctp_delayed_sack_time_default = SCTP_RECV_MSEC;
142 unsigned int sctp_heartbeat_interval_default = SCTP_HB_DEFAULT_MSEC;
143 unsigned int sctp_pmtu_raise_time_default = SCTP_DEF_PMTU_RAISE_SEC;
144 unsigned int sctp_shutdown_guard_time_default = SCTP_DEF_MAX_SHUTDOWN_SEC;
145 unsigned int sctp_secret_lifetime_default = SCTP_DEFAULT_SECRET_LIFE_SEC;
146 unsigned int sctp_rto_max_default = SCTP_RTO_UPPER_BOUND;
147 unsigned int sctp_rto_min_default = SCTP_RTO_LOWER_BOUND;
148 unsigned int sctp_rto_initial_default = SCTP_RTO_INITIAL;
149 unsigned int sctp_init_rto_max_default = SCTP_RTO_UPPER_BOUND;
150 unsigned int sctp_valid_cookie_life_default = SCTP_DEFAULT_COOKIE_LIFE;
151 unsigned int sctp_init_rtx_max_default = SCTP_DEF_MAX_INIT;
152 unsigned int sctp_assoc_rtx_max_default = SCTP_DEF_MAX_SEND;
153 unsigned int sctp_path_rtx_max_default = SCTP_DEF_MAX_SEND/2;
154 unsigned int sctp_nr_outgoing_streams_default = SCTP_OSTREAM_INITIAL;
160 #define nmbclusters nmbclust
162 /* Init the SCTP pcb in sctp_pcb.c */
168 if (nmbclusters > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE)
169 sctp_max_chunks_on_queue = nmbclusters;
171 /* if (nmbclust > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE)
172 sctp_max_chunks_on_queue = nmbclust; FIX ME */
173 sctp_max_chunks_on_queue = nmbclust * 2;
176 * Allow a user to take no more than 1/2 the number of clusters
177 * or the SB_MAX whichever is smaller for the send window.
179 sb_max_adj = (u_long)((u_quad_t)(SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES));
180 sctp_sendspace = min((min(SB_MAX, sb_max_adj)),
182 ((nmbclusters/2) * SCTP_DEFAULT_MAXSEGMENT));
184 ((nmbclust/2) * SCTP_DEFAULT_MAXSEGMENT));
187 * Now for the recv window, should we take the same amount?
188 * or should I do 1/2 the SB_MAX instead in the SB_MAX min above.
189 * For now I will just copy.
191 sctp_recvspace = sctp_sendspace;
199 ip_2_ip6_hdr(struct ip6_hdr *ip6, struct ip *ip)
201 bzero(ip6, sizeof(*ip6));
203 ip6->ip6_vfc = IPV6_VERSION;
204 ip6->ip6_plen = ip->ip_len;
205 ip6->ip6_nxt = ip->ip_p;
206 ip6->ip6_hlim = ip->ip_ttl;
207 ip6->ip6_src.s6_addr32[2] = ip6->ip6_dst.s6_addr32[2] =
209 ip6->ip6_src.s6_addr32[3] = ip->ip_src.s_addr;
210 ip6->ip6_dst.s6_addr32[3] = ip->ip_dst.s_addr;
215 sctp_split_chunks(struct sctp_association *asoc,
216 struct sctp_stream_out *strm,
217 struct sctp_tmit_chunk *chk)
219 struct sctp_tmit_chunk *new_chk;
221 /* First we need a chunk */
222 new_chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
223 if (new_chk == NULL) {
224 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
227 sctppcbinfo.ipi_count_chunk++;
228 sctppcbinfo.ipi_gencnt_chunk++;
232 new_chk->data = m_split(chk->data, (chk->send_size>>1), MB_DONTWAIT);
233 if (new_chk->data == NULL) {
235 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
236 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, new_chk);
237 sctppcbinfo.ipi_count_chunk--;
238 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
239 panic("Chunk count is negative");
241 sctppcbinfo.ipi_gencnt_chunk++;
245 /* Data is now split adjust sizes */
246 chk->send_size >>= 1;
247 new_chk->send_size >>= 1;
249 chk->book_size >>= 1;
250 new_chk->book_size >>= 1;
252 /* now adjust the marks */
253 chk->rec.data.rcv_flags |= SCTP_DATA_FIRST_FRAG;
254 chk->rec.data.rcv_flags &= ~SCTP_DATA_LAST_FRAG;
256 new_chk->rec.data.rcv_flags &= ~SCTP_DATA_FIRST_FRAG;
257 new_chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
259 /* Increase ref count if dest is set */
261 new_chk->whoTo->ref_count++;
263 /* now drop it on the end of the list*/
264 asoc->stream_queue_cnt++;
265 TAILQ_INSERT_AFTER(&strm->outqueue, chk, new_chk, sctp_next);
269 sctp_notify_mbuf(struct sctp_inpcb *inp,
270 struct sctp_tcb *stcb,
271 struct sctp_nets *net,
281 if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
282 (ip == NULL) || (sh == NULL)) {
284 SCTP_TCB_UNLOCK(stcb);
287 /* First job is to verify the vtag matches what I would send */
288 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
289 SCTP_TCB_UNLOCK(stcb);
292 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) -
294 if (icmph->icmp_type != ICMP_UNREACH) {
295 /* We only care about unreachable */
296 SCTP_TCB_UNLOCK(stcb);
299 if (icmph->icmp_code != ICMP_UNREACH_NEEDFRAG) {
300 /* not a unreachable message due to frag. */
301 SCTP_TCB_UNLOCK(stcb);
305 nxtsz = ntohs(icmph->icmp_seq);
308 * old type router that does not tell us what the next size
309 * mtu is. Rats we will have to guess (in a educated fashion
312 nxtsz = find_next_best_mtu(totsz);
315 /* Stop any PMTU timer */
316 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, NULL);
318 /* Adjust destination size limit */
319 if (net->mtu > nxtsz) {
322 /* now what about the ep? */
323 if (stcb->asoc.smallest_mtu > nxtsz) {
324 struct sctp_tmit_chunk *chk, *nchk;
325 struct sctp_stream_out *strm;
326 /* Adjust that too */
327 stcb->asoc.smallest_mtu = nxtsz;
328 /* now off to subtract IP_DF flag if needed */
330 TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) {
331 if ((chk->send_size+IP_HDR_SIZE) > nxtsz) {
332 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
335 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
336 if ((chk->send_size+IP_HDR_SIZE) > nxtsz) {
338 * For this guy we also mark for immediate
339 * resend since we sent to big of chunk
341 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
342 if (chk->sent != SCTP_DATAGRAM_RESEND) {
343 stcb->asoc.sent_queue_retran_cnt++;
345 chk->sent = SCTP_DATAGRAM_RESEND;
346 chk->rec.data.doing_fast_retransmit = 0;
348 /* Clear any time so NO RTT is being done */
350 stcb->asoc.total_flight -= chk->book_size;
351 if (stcb->asoc.total_flight < 0) {
352 stcb->asoc.total_flight = 0;
354 stcb->asoc.total_flight_count--;
355 if (stcb->asoc.total_flight_count < 0) {
356 stcb->asoc.total_flight_count = 0;
358 net->flight_size -= chk->book_size;
359 if (net->flight_size < 0) {
360 net->flight_size = 0;
364 TAILQ_FOREACH(strm, &stcb->asoc.out_wheel, next_spoke) {
365 chk = TAILQ_FIRST(&strm->outqueue);
367 nchk = TAILQ_NEXT(chk, sctp_next);
368 if ((chk->send_size+SCTP_MED_OVERHEAD) > nxtsz) {
369 sctp_split_chunks(&stcb->asoc, strm, chk);
375 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, NULL);
376 SCTP_TCB_UNLOCK(stcb);
381 sctp_notify(struct sctp_inpcb *inp,
385 struct sctp_tcb *stcb,
386 struct sctp_nets *net)
389 if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
390 (sh == NULL) || (to == NULL)) {
392 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
393 kprintf("sctp-notify, bad call\n");
395 #endif /* SCTP_DEBUG */
398 /* First job is to verify the vtag matches what I would send */
399 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
403 /* FIX ME FIX ME PROTOPT i.e. no SCTP should ALWAYS be an ABORT */
405 if ((error == EHOSTUNREACH) || /* Host is not reachable */
406 (error == EHOSTDOWN) || /* Host is down */
407 (error == ECONNREFUSED) || /* Host refused the connection, (not an abort?) */
408 (error == ENOPROTOOPT) /* SCTP is not present on host */
411 * Hmm reachablity problems we must examine closely.
412 * If its not reachable, we may have lost a network.
413 * Or if there is NO protocol at the other end named SCTP.
414 * well we consider it a OOTB abort.
416 if ((error == EHOSTUNREACH) || (error == EHOSTDOWN)) {
417 if (net->dest_state & SCTP_ADDR_REACHABLE) {
418 /* Ok that destination is NOT reachable */
419 net->dest_state &= ~SCTP_ADDR_REACHABLE;
420 net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
421 net->error_count = net->failure_threshold + 1;
422 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
423 stcb, SCTP_FAILED_THRESHOLD,
427 SCTP_TCB_UNLOCK(stcb);
430 * Here the peer is either playing tricks on us,
431 * including an address that belongs to someone who
432 * does not support SCTP OR was a userland
433 * implementation that shutdown and now is dead. In
434 * either case treat it like a OOTB abort with no TCB
436 sctp_abort_notification(stcb, SCTP_PEER_FAULTY);
437 sctp_free_assoc(inp, stcb);
438 /* no need to unlock here, since the TCB is gone */
441 /* Send all others to the app */
442 if (inp->sctp_socket) {
443 SOCK_LOCK(inp->sctp_socket);
444 inp->sctp_socket->so_error = error;
445 sctp_sowwakeup(inp, inp->sctp_socket);
446 SOCK_UNLOCK(inp->sctp_socket);
449 SCTP_TCB_UNLOCK(stcb);
454 sctp_ctlinput(netmsg_t msg)
456 int cmd = msg->ctlinput.nm_cmd;
457 struct sockaddr *sa = msg->ctlinput.nm_arg;
458 struct ip *ip = msg->ctlinput.nm_extra;
461 if (sa->sa_family != AF_INET ||
462 ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) {
466 if (PRC_IS_REDIRECT(cmd)) {
468 } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) {
472 struct sctp_inpcb *inp;
473 struct sctp_tcb *stcb;
474 struct sctp_nets *net;
475 struct sockaddr_in to, from;
477 sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2));
478 bzero(&to, sizeof(to));
479 bzero(&from, sizeof(from));
480 from.sin_family = to.sin_family = AF_INET;
481 from.sin_len = to.sin_len = sizeof(to);
482 from.sin_port = sh->src_port;
483 from.sin_addr = ip->ip_src;
484 to.sin_port = sh->dest_port;
485 to.sin_addr = ip->ip_dst;
488 * 'to' holds the dest of the packet that failed to be sent.
489 * 'from' holds our local endpoint address.
490 * Thus we reverse the to and the from in the lookup.
492 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&from,
493 (struct sockaddr *)&to,
495 if (stcb != NULL && inp && (inp->sctp_socket != NULL)) {
496 if (cmd != PRC_MSGSIZE) {
498 if (cmd == PRC_HOSTDEAD) {
501 cm = inetctlerrmap[cmd];
503 sctp_notify(inp, cm, sh,
504 (struct sockaddr *)&to, stcb,
507 /* handle possible ICMP size messages */
508 sctp_notify_mbuf(inp, stcb, net, ip, sh);
511 #if (defined(__FreeBSD__) && __FreeBSD_version < 500000) || defined(__DragonFly__)
512 /* XXX must be fixed for 5.x and higher, leave for 4.x */
513 if (PRC_IS_REDIRECT(cmd) && inp) {
514 in_rtchange((struct inpcb *)inp,
518 if ((stcb == NULL) && (inp != NULL)) {
519 /* reduce ref-count */
521 SCTP_INP_DECR_REF(inp);
522 SCTP_INP_WUNLOCK(inp);
528 lwkt_replymsg(&msg->lmsg, 0);
531 #if defined(__FreeBSD__) || defined(__DragonFly__)
533 sctp_getcred(SYSCTL_HANDLER_ARGS)
535 struct sockaddr_in addrs[2];
536 struct sctp_inpcb *inp;
537 struct sctp_nets *net;
538 struct sctp_tcb *stcb;
541 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
542 error = priv_check(req->td, PRIV_ROOT);
544 error = suser(req->p);
548 error = SYSCTL_IN(req, addrs, sizeof(addrs));
552 stcb = sctp_findassociation_addr_sa(sintosa(&addrs[0]),
555 if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) {
556 if ((inp != NULL) && (stcb == NULL)) {
557 /* reduce ref-count */
559 SCTP_INP_DECR_REF(inp);
560 SCTP_INP_WUNLOCK(inp);
565 error = SYSCTL_OUT(req, inp->sctp_socket->so_cred, sizeof(struct ucred));
566 SCTP_TCB_UNLOCK(stcb);
571 SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE|CTLFLAG_RW,
572 0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection");
573 #endif /* #if defined(__FreeBSD__) || defined(__DragonFly__) */
578 #if defined(__FreeBSD__) || defined (__APPLE__) || defined(__DragonFly__)
580 SYSCTL_DECL(_net_inet);
582 SYSCTL_NODE(_net_inet, OID_AUTO, sctp, CTLFLAG_RD, 0,
585 SYSCTL_INT(_net_inet_sctp, OID_AUTO, maxdgram, CTLFLAG_RW,
586 &sctp_sendspace, 0, "Maximum outgoing SCTP buffer size");
588 SYSCTL_INT(_net_inet_sctp, OID_AUTO, recvspace, CTLFLAG_RW,
589 &sctp_recvspace, 0, "Maximum incoming SCTP buffer size");
591 SYSCTL_INT(_net_inet_sctp, OID_AUTO, auto_asconf, CTLFLAG_RW,
592 &sctp_auto_asconf, 0, "Enable SCTP Auto-ASCONF");
594 SYSCTL_INT(_net_inet_sctp, OID_AUTO, ecn_enable, CTLFLAG_RW,
595 &sctp_ecn, 0, "Enable SCTP ECN");
597 SYSCTL_INT(_net_inet_sctp, OID_AUTO, ecn_nonce, CTLFLAG_RW,
598 &sctp_ecn_nonce, 0, "Enable SCTP ECN Nonce");
600 SYSCTL_INT(_net_inet_sctp, OID_AUTO, strict_sacks, CTLFLAG_RW,
601 &sctp_strict_sacks, 0, "Enable SCTP Strict SACK checking");
603 SYSCTL_INT(_net_inet_sctp, OID_AUTO, loopback_nocsum, CTLFLAG_RW,
604 &sctp_no_csum_on_loopback, 0,
605 "Enable NO Csum on packets sent on loopback");
607 SYSCTL_INT(_net_inet_sctp, OID_AUTO, strict_init, CTLFLAG_RW,
608 &sctp_strict_init, 0,
609 "Enable strict INIT/INIT-ACK singleton enforcement");
611 SYSCTL_INT(_net_inet_sctp, OID_AUTO, peer_chkoh, CTLFLAG_RW,
612 &sctp_peer_chunk_oh, 0,
613 "Amount to debit peers rwnd per chunk sent");
615 SYSCTL_INT(_net_inet_sctp, OID_AUTO, maxburst, CTLFLAG_RW,
616 &sctp_max_burst_default, 0,
617 "Default max burst for sctp endpoints");
619 SYSCTL_INT(_net_inet_sctp, OID_AUTO, maxchunks, CTLFLAG_RW,
620 &sctp_max_chunks_on_queue, 0,
621 "Default max chunks on queue per asoc");
623 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, delayed_sack_time, CTLFLAG_RW,
624 &sctp_delayed_sack_time_default, 0,
625 "Default delayed SACK timer in msec");
627 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, heartbeat_interval, CTLFLAG_RW,
628 &sctp_heartbeat_interval_default, 0,
629 "Default heartbeat interval in msec");
631 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, pmtu_raise_time, CTLFLAG_RW,
632 &sctp_pmtu_raise_time_default, 0,
633 "Default PMTU raise timer in sec");
635 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, shutdown_guard_time, CTLFLAG_RW,
636 &sctp_shutdown_guard_time_default, 0,
637 "Default shutdown guard timer in sec");
639 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, secret_lifetime, CTLFLAG_RW,
640 &sctp_secret_lifetime_default, 0,
641 "Default secret lifetime in sec");
643 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, rto_max, CTLFLAG_RW,
644 &sctp_rto_max_default, 0,
645 "Default maximum retransmission timeout in msec");
647 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, rto_min, CTLFLAG_RW,
648 &sctp_rto_min_default, 0,
649 "Default minimum retransmission timeout in msec");
651 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, rto_initial, CTLFLAG_RW,
652 &sctp_rto_initial_default, 0,
653 "Default initial retransmission timeout in msec");
655 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, init_rto_max, CTLFLAG_RW,
656 &sctp_init_rto_max_default, 0,
657 "Default maximum retransmission timeout during association setup in msec");
659 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, valid_cookie_life, CTLFLAG_RW,
660 &sctp_valid_cookie_life_default, 0,
661 "Default cookie lifetime in sec");
663 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, init_rtx_max, CTLFLAG_RW,
664 &sctp_init_rtx_max_default, 0,
665 "Default maximum number of retransmission for INIT chunks");
667 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, assoc_rtx_max, CTLFLAG_RW,
668 &sctp_assoc_rtx_max_default, 0,
669 "Default maximum number of retransmissions per association");
671 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, path_rtx_max, CTLFLAG_RW,
672 &sctp_path_rtx_max_default, 0,
673 "Default maximum of retransmissions per path");
675 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, nr_outgoing_streams, CTLFLAG_RW,
676 &sctp_nr_outgoing_streams_default, 0,
677 "Default number of outgoing streams");
680 SYSCTL_INT(_net_inet_sctp, OID_AUTO, debug, CTLFLAG_RW,
681 &sctp_debug_on, 0, "Configure debug output");
682 #endif /* SCTP_DEBUG */
686 * NOTE: (so) is referenced from soabort*() and netmsg_pru_abort()
687 * will sofree() it when we return.
690 sctp_abort(netmsg_t msg)
692 struct socket *so = msg->abort.base.nm_so;
693 struct sctp_inpcb *inp;
696 inp = (struct sctp_inpcb *)so->so_pcb;
698 sctp_inpcb_free(inp, 1);
703 lwkt_replymsg(&msg->lmsg, error);
707 sctp_attach(netmsg_t msg)
709 struct socket *so = msg->attach.base.nm_so;
710 struct sctp_inpcb *inp;
711 struct inpcb *ip_inp;
714 inp = (struct sctp_inpcb *)so->so_pcb;
719 error = soreserve(so, sctp_sendspace, sctp_recvspace, NULL);
720 atomic_set_int(&so->so_rcv.ssb_flags, SSB_PREALLOC);
721 atomic_set_int(&so->so_snd.ssb_flags, SSB_PREALLOC);
725 error = sctp_inpcb_alloc(so);
728 inp = (struct sctp_inpcb *)so->so_pcb;
731 inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6; /* I'm not v6! */
732 ip_inp = &inp->ip_inp.inp;
733 ip_inp->inp_ip_ttl = ip_defttl;
736 #if !(defined(__OpenBSD__) || defined(__APPLE__))
737 error = ipsec_init_policy(so, &ip_inp->inp_sp);
739 sctp_inpcb_free(inp, 1);
744 SCTP_INP_WUNLOCK(inp);
745 #if defined(__NetBSD__)
746 so->so_send = sctp_sosend;
750 lwkt_replymsg(&msg->lmsg, error);
754 sctp_bind(netmsg_t msg)
756 struct socket *so = msg->bind.base.nm_so;
757 struct sockaddr *addr = msg->bind.nm_nam;
758 thread_t td = msg->bind.nm_td;
759 struct sctp_inpcb *inp;
763 if (addr && addr->sa_family != AF_INET) {
764 /* must be a v4 address! */
770 inp = (struct sctp_inpcb *)so->so_pcb;
772 error = sctp_inpcb_bind(so, addr, td);
777 lwkt_replymsg(&msg->lmsg, error);
782 sctp_detach(netmsg_t msg)
784 struct socket *so = msg->detach.base.nm_so;
785 struct sctp_inpcb *inp;
788 inp = (struct sctp_inpcb *)so->so_pcb;
793 if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) ||
794 (so->so_rcv.ssb_cc > 0)) {
795 sctp_inpcb_free(inp, 1);
797 sctp_inpcb_free(inp, 0);
801 lwkt_replymsg(&msg->lmsg, error);
805 sctp_send(netmsg_t msg)
807 struct socket *so = msg->send.base.nm_so;
808 int flags = msg->send.nm_flags;
809 struct mbuf *m = msg->send.nm_m;
810 struct mbuf *control = msg->send.nm_control;
811 struct sockaddr *addr = msg->send.nm_addr;
812 struct thread *td = msg->send.nm_td;
814 struct sctp_inpcb *inp;
815 inp = (struct sctp_inpcb *)so->so_pcb;
818 sctp_m_freem(control);
825 /* Got to have an to address if we are NOT a connected socket */
826 if ((addr == NULL) &&
827 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
828 (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE))
831 } else if (addr == NULL) {
832 error = EDESTADDRREQ;
835 sctp_m_freem(control);
841 if (addr->sa_family != AF_INET) {
842 /* must be a v4 address! */
845 sctp_m_freem(control);
848 error = EDESTADDRREQ; /* XXX huh? */
854 /* now what about control */
857 kprintf("huh? control set?\n");
858 sctp_m_freem(inp->control);
861 inp->control = control;
863 /* add it in possibly */
864 if ((inp->pkt) && (inp->pkt->m_flags & M_PKTHDR)) {
870 for (x=m;x;x = x->m_next) {
873 inp->pkt->m_pkthdr.len += c_len;
877 inp->pkt_last->m_next = m;
880 inp->pkt_last = inp->pkt = m;
883 #if defined (__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
884 /* FreeBSD uses a flag passed */
885 ((flags & PRUS_MORETOCOME) == 0)
886 #elif defined( __NetBSD__)
887 /* NetBSD uses the so_state field */
888 ((so->so_state & SS_MORETOCOME) == 0)
890 1 /* Open BSD does not have any "more to come" indication */
894 * note with the current version this code will only be used
895 * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for
896 * re-defining sosend to use the sctp_sosend. One can
897 * optionally switch back to this code (by changing back the
898 * definitions) but this is not advisable.
900 error = sctp_output(inp, inp->pkt, addr,
901 inp->control, td, flags);
908 if (msg->send.nm_flags & PRUS_NAMALLOC) {
909 kfree(msg->send.nm_addr, M_LWKTMSG);
910 msg->send.nm_addr = NULL;
912 lwkt_replymsg(&msg->lmsg, error);
916 sctp_disconnect(netmsg_t msg)
918 struct socket *so = msg->disconnect.base.nm_so;
919 struct sctp_inpcb *inp;
922 inp = (struct sctp_inpcb *)so->so_pcb;
928 if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
929 if (LIST_EMPTY(&inp->sctp_asoc_list)) {
931 SCTP_INP_RUNLOCK(inp);
935 int some_on_streamwheel = 0;
936 struct sctp_association *asoc;
937 struct sctp_tcb *stcb;
939 stcb = LIST_FIRST(&inp->sctp_asoc_list);
941 SCTP_INP_RUNLOCK(inp);
947 if (((so->so_options & SO_LINGER) &&
948 (so->so_linger == 0)) ||
949 (so->so_rcv.ssb_cc > 0)) {
950 if (SCTP_GET_STATE(asoc) !=
951 SCTP_STATE_COOKIE_WAIT) {
952 /* Left with Data unread */
955 MGET(err, MB_DONTWAIT, MT_DATA);
957 /* Fill in the user initiated abort */
958 struct sctp_paramhdr *ph;
959 ph = mtod(err, struct sctp_paramhdr *);
960 err->m_len = sizeof(struct sctp_paramhdr);
961 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
962 ph->param_length = htons(err->m_len);
964 sctp_send_abort_tcb(stcb, err);
966 SCTP_INP_RUNLOCK(inp);
967 sctp_free_assoc(inp, stcb);
968 /* No unlock tcb assoc is gone */
972 if (!TAILQ_EMPTY(&asoc->out_wheel)) {
973 /* Check to see if some data queued */
974 struct sctp_stream_out *outs;
975 TAILQ_FOREACH(outs, &asoc->out_wheel,
977 if (!TAILQ_EMPTY(&outs->outqueue)) {
978 some_on_streamwheel = 1;
984 if (TAILQ_EMPTY(&asoc->send_queue) &&
985 TAILQ_EMPTY(&asoc->sent_queue) &&
986 (some_on_streamwheel == 0)) {
987 /* there is nothing queued to send, so done */
988 if ((SCTP_GET_STATE(asoc) !=
989 SCTP_STATE_SHUTDOWN_SENT) &&
990 (SCTP_GET_STATE(asoc) !=
991 SCTP_STATE_SHUTDOWN_ACK_SENT)) {
992 /* only send SHUTDOWN 1st time thru */
994 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
995 kprintf("%s:%d sends a shutdown\n",
1001 sctp_send_shutdown(stcb,
1002 stcb->asoc.primary_destination);
1003 sctp_chunk_output(stcb->sctp_ep, stcb, 1);
1004 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
1005 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1006 stcb->sctp_ep, stcb,
1007 asoc->primary_destination);
1008 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1009 stcb->sctp_ep, stcb,
1010 asoc->primary_destination);
1014 * we still got (or just got) data to send,
1015 * so set SHUTDOWN_PENDING
1018 * XXX sockets draft says that MSG_EOF should
1019 * be sent with no data.
1020 * currently, we will allow user data to be
1021 * sent first and move to SHUTDOWN-PENDING
1023 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
1025 SCTP_TCB_UNLOCK(stcb);
1026 SCTP_INP_RUNLOCK(inp);
1030 /* UDP model does not support this */
1031 SCTP_INP_RUNLOCK(inp);
1035 lwkt_replymsg(&msg->lmsg, error);
1038 /* also called from ipv6 sctp code */
1040 sctp_shutdown(netmsg_t msg)
1042 struct socket *so = msg->shutdown.base.nm_so;
1043 struct sctp_inpcb *inp;
1046 inp = (struct sctp_inpcb *)so->so_pcb;
1051 SCTP_INP_RLOCK(inp);
1052 /* For UDP model this is a invalid call */
1053 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
1054 /* Restore the flags that the soshutdown took away. */
1055 #if defined(__FreeBSD__) && __FreeBSD_version >= 502115
1056 so->so_rcv.sb_state &= ~SBS_CANTRCVMORE;
1058 soclrstate(so, SS_CANTRCVMORE);
1060 /* This proc will wakeup for read and do nothing (I hope) */
1061 SCTP_INP_RUNLOCK(inp);
1066 * Ok if we reach here its the TCP model and it is either a SHUT_WR
1067 * or SHUT_RDWR. This means we put the shutdown flag against it.
1070 int some_on_streamwheel = 0;
1071 struct sctp_tcb *stcb;
1072 struct sctp_association *asoc;
1075 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1078 * Ok we hit the case that the shutdown call was made
1079 * after an abort or something. Nothing to do now.
1084 SCTP_TCB_LOCK(stcb);
1087 if (!TAILQ_EMPTY(&asoc->out_wheel)) {
1088 /* Check to see if some data queued */
1089 struct sctp_stream_out *outs;
1090 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
1091 if (!TAILQ_EMPTY(&outs->outqueue)) {
1092 some_on_streamwheel = 1;
1097 if (TAILQ_EMPTY(&asoc->send_queue) &&
1098 TAILQ_EMPTY(&asoc->sent_queue) &&
1099 (some_on_streamwheel == 0)) {
1100 /* there is nothing queued to send, so I'm done... */
1101 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1102 /* only send SHUTDOWN the first time through */
1104 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
1105 kprintf("%s:%d sends a shutdown\n",
1111 sctp_send_shutdown(stcb,
1112 stcb->asoc.primary_destination);
1113 sctp_chunk_output(stcb->sctp_ep, stcb, 1);
1114 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
1115 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1116 stcb->sctp_ep, stcb,
1117 asoc->primary_destination);
1118 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1119 stcb->sctp_ep, stcb,
1120 asoc->primary_destination);
1124 * we still got (or just got) data to send, so
1125 * set SHUTDOWN_PENDING
1127 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
1129 SCTP_TCB_UNLOCK(stcb);
1131 SCTP_INP_RUNLOCK(inp);
1134 lwkt_replymsg(&msg->lmsg, error);
1138 * copies a "user" presentable address and removes embedded scope, etc.
1139 * returns 0 on success, 1 on error
1142 sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa)
1144 struct sockaddr_in6 lsa6;
1145 sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa,
1147 memcpy(ss, sa, sa->sa_len);
1152 sctp_fill_up_addresses(struct sctp_inpcb *inp,
1153 struct sctp_tcb *stcb,
1155 struct sockaddr_storage *sas)
1158 int loopback_scope, ipv4_local_scope, local_scope, site_scope, actual;
1159 int ipv4_addr_legal, ipv6_addr_legal;
1165 /* Turn on all the appropriate scope */
1166 loopback_scope = stcb->asoc.loopback_scope;
1167 ipv4_local_scope = stcb->asoc.ipv4_local_scope;
1168 local_scope = stcb->asoc.local_scope;
1169 site_scope = stcb->asoc.site_scope;
1171 /* Turn on ALL scope, since we look at the EP */
1172 loopback_scope = ipv4_local_scope = local_scope =
1175 ipv4_addr_legal = ipv6_addr_legal = 0;
1176 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1177 ipv6_addr_legal = 1;
1179 ipv4_addr_legal = 1;
1182 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1183 TAILQ_FOREACH(ifn, &ifnet, if_list) {
1184 struct ifaddr_container *ifac;
1186 if ((loopback_scope == 0) &&
1187 (ifn->if_type == IFT_LOOP)) {
1188 /* Skip loopback if loopback_scope not set */
1191 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid],
1193 struct ifaddr *ifa = ifac->ifa;
1197 * For the BOUND-ALL case, the list
1198 * associated with a TCB is Always
1199 * considered a reverse list.. i.e.
1200 * it lists addresses that are NOT
1201 * part of the association. If this
1202 * is one of those we must skip it.
1204 if (sctp_is_addr_restricted(stcb,
1209 if ((ifa->ifa_addr->sa_family == AF_INET) &&
1210 (ipv4_addr_legal)) {
1211 struct sockaddr_in *sin;
1212 sin = (struct sockaddr_in *)ifa->ifa_addr;
1213 if (sin->sin_addr.s_addr == 0) {
1214 /* we skip unspecifed addresses */
1217 if ((ipv4_local_scope == 0) &&
1218 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1221 memcpy(sas, sin, sizeof(*sin));
1222 ((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport;
1223 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin));
1224 actual += sizeof(*sin);
1225 if (actual >= limit) {
1228 } else if ((ifa->ifa_addr->sa_family == AF_INET6) &&
1229 (ipv6_addr_legal)) {
1230 struct sockaddr_in6 *sin6, lsa6;
1231 sin6 = (struct sockaddr_in6 *)ifa->ifa_addr;
1232 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1233 /* we skip unspecifed addresses */
1236 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
1237 if (local_scope == 0)
1239 if (sin6->sin6_scope_id == 0) {
1241 if (in6_recoverscope(&lsa6,
1244 /* bad link local address */
1249 if ((site_scope == 0) &&
1250 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1253 memcpy(sas, sin6, sizeof(*sin6));
1254 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1255 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin6));
1256 actual += sizeof(*sin6);
1257 if (actual >= limit) {
1264 struct sctp_laddr *laddr;
1266 * If we have a TCB and we do NOT support ASCONF (it's
1267 * turned off or otherwise) then the list is always the
1268 * true list of addresses (the else case below). Otherwise
1269 * the list on the association is a list of addresses that
1270 * are NOT part of the association.
1272 if (inp->sctp_flags & SCTP_PCB_FLAGS_DO_ASCONF) {
1273 /* The list is a NEGATIVE list */
1274 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
1276 if (sctp_is_addr_restricted(stcb, laddr->ifa->ifa_addr)) {
1280 if (sctp_fill_user_address(sas, laddr->ifa->ifa_addr))
1283 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1284 sas = (struct sockaddr_storage *)((caddr_t)sas +
1285 laddr->ifa->ifa_addr->sa_len);
1286 actual += laddr->ifa->ifa_addr->sa_len;
1287 if (actual >= limit) {
1292 /* The list is a positive list if present */
1294 /* Must use the specific association list */
1295 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list,
1297 if (sctp_fill_user_address(sas,
1298 laddr->ifa->ifa_addr))
1300 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1301 sas = (struct sockaddr_storage *)((caddr_t)sas +
1302 laddr->ifa->ifa_addr->sa_len);
1303 actual += laddr->ifa->ifa_addr->sa_len;
1304 if (actual >= limit) {
1309 /* No endpoint so use the endpoints individual list */
1310 LIST_FOREACH(laddr, &inp->sctp_addr_list,
1312 if (sctp_fill_user_address(sas,
1313 laddr->ifa->ifa_addr))
1315 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1316 sas = (struct sockaddr_storage *)((caddr_t)sas +
1317 laddr->ifa->ifa_addr->sa_len);
1318 actual += laddr->ifa->ifa_addr->sa_len;
1319 if (actual >= limit) {
1330 sctp_count_max_addresses(struct sctp_inpcb *inp)
1334 * In both sub-set bound an bound_all cases we return the MAXIMUM
1335 * number of addresses that you COULD get. In reality the sub-set
1336 * bound may have an exclusion list for a given TCB OR in the
1337 * bound-all case a TCB may NOT include the loopback or other
1338 * addresses as well.
1340 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1343 TAILQ_FOREACH(ifn, &ifnet, if_list) {
1344 struct ifaddr_container *ifac;
1346 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid], ifa_link) {
1347 struct ifaddr *ifa = ifac->ifa;
1349 /* Count them if they are the right type */
1350 if (ifa->ifa_addr->sa_family == AF_INET) {
1351 if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)
1352 cnt += sizeof(struct sockaddr_in6);
1354 cnt += sizeof(struct sockaddr_in);
1356 } else if (ifa->ifa_addr->sa_family == AF_INET6)
1357 cnt += sizeof(struct sockaddr_in6);
1361 struct sctp_laddr *laddr;
1362 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
1363 if (laddr->ifa->ifa_addr->sa_family == AF_INET) {
1364 if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)
1365 cnt += sizeof(struct sockaddr_in6);
1367 cnt += sizeof(struct sockaddr_in);
1369 } else if (laddr->ifa->ifa_addr->sa_family == AF_INET6)
1370 cnt += sizeof(struct sockaddr_in6);
1377 sctp_do_connect_x(struct socket *so,
1378 struct sctp_inpcb *inp,
1380 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
1389 struct sctp_tcb *stcb = NULL;
1390 struct sockaddr *sa;
1391 int num_v6=0, num_v4=0, *totaddrp, totaddr, i, incr, at;
1393 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1394 kprintf("Connectx called\n");
1396 #endif /* SCTP_DEBUG */
1398 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
1399 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
1400 /* We are already connected AND the TCP model */
1401 return (EADDRINUSE);
1403 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
1404 SCTP_INP_RLOCK(inp);
1405 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1406 SCTP_INP_RUNLOCK(inp);
1412 SCTP_ASOC_CREATE_LOCK(inp);
1413 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1414 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
1415 SCTP_ASOC_CREATE_UNLOCK(inp);
1419 totaddrp = mtod(m, int *);
1420 totaddr = *totaddrp;
1421 sa = (struct sockaddr *)(totaddrp + 1);
1423 /* account and validate addresses */
1424 SCTP_INP_WLOCK(inp);
1425 SCTP_INP_INCR_REF(inp);
1426 SCTP_INP_WUNLOCK(inp);
1427 for (i = 0; i < totaddr; i++) {
1428 if (sa->sa_family == AF_INET) {
1430 incr = sizeof(struct sockaddr_in);
1431 } else if (sa->sa_family == AF_INET6) {
1432 struct sockaddr_in6 *sin6;
1433 sin6 = (struct sockaddr_in6 *)sa;
1434 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
1435 /* Must be non-mapped for connectx */
1436 SCTP_ASOC_CREATE_UNLOCK(inp);
1440 incr = sizeof(struct sockaddr_in6);
1445 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
1447 /* Already have or am bring up an association */
1448 SCTP_ASOC_CREATE_UNLOCK(inp);
1449 SCTP_TCB_UNLOCK(stcb);
1452 if ((at + incr) > m->m_len) {
1456 sa = (struct sockaddr *)((caddr_t)sa + incr);
1458 sa = (struct sockaddr *)(totaddrp + 1);
1459 SCTP_INP_WLOCK(inp);
1460 SCTP_INP_DECR_REF(inp);
1461 SCTP_INP_WUNLOCK(inp);
1463 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
1465 SCTP_INP_WUNLOCK(inp);
1466 SCTP_ASOC_CREATE_UNLOCK(inp);
1469 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
1472 * Ignore connections destined to a v4 addr or
1475 SCTP_INP_WUNLOCK(inp);
1476 SCTP_ASOC_CREATE_UNLOCK(inp);
1480 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
1481 SCTP_PCB_FLAGS_UNBOUND) {
1482 /* Bind a ephemeral port */
1483 SCTP_INP_WUNLOCK(inp);
1484 error = sctp_inpcb_bind(so, NULL, p);
1486 SCTP_ASOC_CREATE_UNLOCK(inp);
1490 SCTP_INP_WUNLOCK(inp);
1492 /* We are GOOD to go */
1493 stcb = sctp_aloc_assoc(inp, sa, 1, &error, 0);
1495 /* Gak! no memory */
1496 SCTP_ASOC_CREATE_UNLOCK(inp);
1499 /* move to second address */
1500 if (sa->sa_family == AF_INET)
1501 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in));
1503 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6));
1505 for (i = 1; i < totaddr; i++) {
1506 if (sa->sa_family == AF_INET) {
1507 incr = sizeof(struct sockaddr_in);
1508 if (sctp_add_remote_addr(stcb, sa, 0, 8)) {
1509 /* assoc gone no un-lock */
1510 sctp_free_assoc(inp, stcb);
1511 SCTP_ASOC_CREATE_UNLOCK(inp);
1515 } else if (sa->sa_family == AF_INET6) {
1516 incr = sizeof(struct sockaddr_in6);
1517 if (sctp_add_remote_addr(stcb, sa, 0, 8)) {
1518 /* assoc gone no un-lock */
1519 sctp_free_assoc(inp, stcb);
1520 SCTP_ASOC_CREATE_UNLOCK(inp);
1524 sa = (struct sockaddr *)((caddr_t)sa + incr);
1526 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT;
1528 /* doing delayed connection */
1529 stcb->asoc.delayed_connection = 1;
1530 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination);
1532 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1533 sctp_send_initiate(inp, stcb);
1535 SCTP_TCB_UNLOCK(stcb);
1536 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
1537 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
1538 /* Set the connected flag so we can queue data */
1541 SCTP_ASOC_CREATE_UNLOCK(inp);
1547 sctp_optsget(struct socket *so,
1550 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
1557 struct sctp_inpcb *inp;
1559 int error, optval=0;
1560 struct sctp_tcb *stcb = NULL;
1562 inp = (struct sctp_inpcb *)so->so_pcb;
1569 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
1570 kprintf("optsget:MP is NULL EINVAL\n");
1572 #endif /* SCTP_DEBUG */
1577 /* Got to have a mbuf */
1579 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
1580 kprintf("Huh no mbuf\n");
1582 #endif /* SCTP_DEBUG */
1586 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
1587 kprintf("optsget opt:%lxx sz:%u\n", (unsigned long)opt,
1590 #endif /* SCTP_DEBUG */
1594 case SCTP_AUTOCLOSE:
1595 case SCTP_AUTO_ASCONF:
1596 case SCTP_DISABLE_FRAGMENTS:
1597 case SCTP_I_WANT_MAPPED_V4_ADDR:
1599 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
1600 kprintf("other stuff\n");
1602 #endif /* SCTP_DEBUG */
1603 SCTP_INP_RLOCK(inp);
1605 case SCTP_DISABLE_FRAGMENTS:
1606 optval = inp->sctp_flags & SCTP_PCB_FLAGS_NO_FRAGMENT;
1608 case SCTP_I_WANT_MAPPED_V4_ADDR:
1609 optval = inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4;
1611 case SCTP_AUTO_ASCONF:
1612 optval = inp->sctp_flags & SCTP_PCB_FLAGS_AUTO_ASCONF;
1615 optval = inp->sctp_flags & SCTP_PCB_FLAGS_NODELAY;
1617 case SCTP_AUTOCLOSE:
1618 if ((inp->sctp_flags & SCTP_PCB_FLAGS_AUTOCLOSE) ==
1619 SCTP_PCB_FLAGS_AUTOCLOSE)
1620 optval = inp->sctp_ep.auto_close_time;
1626 error = ENOPROTOOPT;
1627 } /* end switch (sopt->sopt_name) */
1628 if (opt != SCTP_AUTOCLOSE) {
1629 /* make it an "on/off" value */
1630 optval = (optval != 0);
1632 if ((size_t)m->m_len < sizeof(int)) {
1635 SCTP_INP_RUNLOCK(inp);
1637 /* return the option value */
1638 *mtod(m, int *) = optval;
1639 m->m_len = sizeof(optval);
1642 case SCTP_GET_ASOC_ID_LIST:
1644 struct sctp_assoc_ids *ids;
1648 if ((size_t)m->m_len < sizeof(struct sctp_assoc_ids)) {
1652 ids = mtod(m, struct sctp_assoc_ids *);
1654 SCTP_INP_RLOCK(inp);
1655 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1658 ids->asls_numb_present = 0;
1659 ids->asls_more_to_get = 0;
1660 SCTP_INP_RUNLOCK(inp);
1663 orig = ids->asls_assoc_start;
1664 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1666 stcb = LIST_NEXT(stcb , sctp_tcblist);
1674 ids->asls_numb_present = 0;
1675 ids->asls_more_to_get = 1;
1676 while(at < MAX_ASOC_IDS_RET) {
1677 ids->asls_assoc_id[at] = sctp_get_associd(stcb);
1679 ids->asls_numb_present++;
1680 stcb = LIST_NEXT(stcb , sctp_tcblist);
1682 ids->asls_more_to_get = 0;
1686 SCTP_INP_RUNLOCK(inp);
1689 case SCTP_GET_NONCE_VALUES:
1691 struct sctp_get_nonce_values *gnv;
1692 if ((size_t)m->m_len < sizeof(struct sctp_get_nonce_values)) {
1696 gnv = mtod(m, struct sctp_get_nonce_values *);
1697 stcb = sctp_findassociation_ep_asocid(inp, gnv->gn_assoc_id);
1701 gnv->gn_peers_tag = stcb->asoc.peer_vtag;
1702 gnv->gn_local_tag = stcb->asoc.my_vtag;
1703 SCTP_TCB_UNLOCK(stcb);
1708 case SCTP_PEER_PUBLIC_KEY:
1709 case SCTP_MY_PUBLIC_KEY:
1710 case SCTP_SET_AUTH_CHUNKS:
1711 case SCTP_SET_AUTH_SECRET:
1712 /* not supported yet and until we refine the draft */
1716 case SCTP_DELAYED_ACK_TIME:
1719 if ((size_t)m->m_len < sizeof(int32_t)) {
1723 tm = mtod(m, int32_t *);
1725 *tm = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1729 case SCTP_GET_SNDBUF_USE:
1730 if ((size_t)m->m_len < sizeof(struct sctp_sockstat)) {
1733 struct sctp_sockstat *ss;
1734 struct sctp_tcb *stcb;
1735 struct sctp_association *asoc;
1736 ss = mtod(m, struct sctp_sockstat *);
1737 stcb = sctp_findassociation_ep_asocid(inp, ss->ss_assoc_id);
1742 ss->ss_total_sndbuf = (u_int32_t)asoc->total_output_queue_size;
1743 ss->ss_total_mbuf_sndbuf = (u_int32_t)asoc->total_output_mbuf_queue_size;
1744 ss->ss_total_recv_buf = (u_int32_t)(asoc->size_on_delivery_queue +
1745 asoc->size_on_reasm_queue +
1746 asoc->size_on_all_streams);
1747 SCTP_TCB_UNLOCK(stcb);
1749 m->m_len = sizeof(struct sctp_sockstat);
1756 burst = mtod(m, u_int8_t *);
1757 SCTP_INP_RLOCK(inp);
1758 *burst = inp->sctp_ep.max_burst;
1759 SCTP_INP_RUNLOCK(inp);
1760 m->m_len = sizeof(u_int8_t);
1766 sctp_assoc_t *assoc_id;
1769 if ((size_t)m->m_len < sizeof(u_int32_t)) {
1773 if ((size_t)m->m_len < sizeof(sctp_assoc_t)) {
1777 assoc_id = mtod(m, sctp_assoc_t *);
1778 segsize = mtod(m, u_int32_t *);
1779 m->m_len = sizeof(u_int32_t);
1781 if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
1782 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) ||
1783 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
1784 struct sctp_tcb *stcb;
1785 SCTP_INP_RLOCK(inp);
1786 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1788 SCTP_TCB_LOCK(stcb);
1789 SCTP_INP_RUNLOCK(inp);
1790 *segsize = sctp_get_frag_point(stcb, &stcb->asoc);
1791 SCTP_TCB_UNLOCK(stcb);
1793 SCTP_INP_RUNLOCK(inp);
1797 stcb = sctp_findassociation_ep_asocid(inp, *assoc_id);
1799 *segsize = sctp_get_frag_point(stcb, &stcb->asoc);
1800 SCTP_TCB_UNLOCK(stcb);
1804 /* default is to get the max, if I
1805 * can't calculate from an existing association.
1807 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1808 ovh = SCTP_MED_OVERHEAD;
1810 ovh = SCTP_MED_V4_OVERHEAD;
1812 *segsize = inp->sctp_frag_point - ovh;
1817 case SCTP_SET_DEBUG_LEVEL:
1821 if ((size_t)m->m_len < sizeof(u_int32_t)) {
1825 level = mtod(m, u_int32_t *);
1827 *level = sctp_debug_on;
1828 m->m_len = sizeof(u_int32_t);
1829 kprintf("Returning DEBUG LEVEL %x is set\n",
1830 (u_int)sctp_debug_on);
1832 #else /* SCTP_DEBUG */
1836 case SCTP_GET_STAT_LOG:
1837 #ifdef SCTP_STAT_LOGGING
1838 error = sctp_fill_stat_log(m);
1839 #else /* SCTP_DEBUG */
1846 if ((size_t)m->m_len < sizeof(sctp_pegs)) {
1850 pt = mtod(m, u_int32_t *);
1851 memcpy(pt, sctp_pegs, sizeof(sctp_pegs));
1852 m->m_len = sizeof(sctp_pegs);
1857 struct sctp_event_subscribe *events;
1859 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
1860 kprintf("get events\n");
1862 #endif /* SCTP_DEBUG */
1863 if ((size_t)m->m_len < sizeof(struct sctp_event_subscribe)) {
1865 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
1866 kprintf("M->M_LEN is %d not %d\n",
1868 (int)sizeof(struct sctp_event_subscribe));
1870 #endif /* SCTP_DEBUG */
1874 events = mtod(m, struct sctp_event_subscribe *);
1875 memset(events, 0, sizeof(*events));
1876 SCTP_INP_RLOCK(inp);
1877 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVDATAIOEVNT)
1878 events->sctp_data_io_event = 1;
1880 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVASSOCEVNT)
1881 events->sctp_association_event = 1;
1883 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVPADDREVNT)
1884 events->sctp_address_event = 1;
1886 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVSENDFAILEVNT)
1887 events->sctp_send_failure_event = 1;
1889 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVPEERERR)
1890 events->sctp_peer_error_event = 1;
1892 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)
1893 events->sctp_shutdown_event = 1;
1895 if (inp->sctp_flags & SCTP_PCB_FLAGS_PDAPIEVNT)
1896 events->sctp_partial_delivery_event = 1;
1898 if (inp->sctp_flags & SCTP_PCB_FLAGS_ADAPTIONEVNT)
1899 events->sctp_adaption_layer_event = 1;
1901 if (inp->sctp_flags & SCTP_PCB_FLAGS_STREAM_RESETEVNT)
1902 events->sctp_stream_reset_events = 1;
1903 SCTP_INP_RUNLOCK(inp);
1904 m->m_len = sizeof(struct sctp_event_subscribe);
1909 case SCTP_ADAPTION_LAYER:
1910 if ((size_t)m->m_len < sizeof(int)) {
1915 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
1916 kprintf("getadaption ind\n");
1918 #endif /* SCTP_DEBUG */
1919 SCTP_INP_RLOCK(inp);
1920 *mtod(m, int *) = inp->sctp_ep.adaption_layer_indicator;
1921 SCTP_INP_RUNLOCK(inp);
1922 m->m_len = sizeof(int);
1924 case SCTP_SET_INITIAL_DBG_SEQ:
1925 if ((size_t)m->m_len < sizeof(int)) {
1930 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
1931 kprintf("get initial dbg seq\n");
1933 #endif /* SCTP_DEBUG */
1934 SCTP_INP_RLOCK(inp);
1935 *mtod(m, int *) = inp->sctp_ep.initial_sequence_debug;
1936 SCTP_INP_RUNLOCK(inp);
1937 m->m_len = sizeof(int);
1939 case SCTP_GET_LOCAL_ADDR_SIZE:
1940 if ((size_t)m->m_len < sizeof(int)) {
1945 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
1946 kprintf("get local sizes\n");
1948 #endif /* SCTP_DEBUG */
1949 SCTP_INP_RLOCK(inp);
1950 *mtod(m, int *) = sctp_count_max_addresses(inp);
1951 SCTP_INP_RUNLOCK(inp);
1952 m->m_len = sizeof(int);
1954 case SCTP_GET_REMOTE_ADDR_SIZE:
1956 sctp_assoc_t *assoc_id;
1958 struct sctp_nets *net;
1960 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
1961 kprintf("get remote size\n");
1963 #endif /* SCTP_DEBUG */
1964 if ((size_t)m->m_len < sizeof(sctp_assoc_t)) {
1966 kprintf("m->m_len:%d not %zd\n",
1967 m->m_len, sizeof(sctp_assoc_t));
1968 #endif /* SCTP_DEBUG */
1973 val = mtod(m, u_int32_t *);
1974 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
1975 SCTP_INP_RLOCK(inp);
1976 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1978 SCTP_TCB_LOCK(stcb);
1979 SCTP_INP_RUNLOCK(inp);
1982 assoc_id = mtod(m, sctp_assoc_t *);
1983 stcb = sctp_findassociation_ep_asocid(inp, *assoc_id);
1992 /* Count the sizes */
1993 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1994 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) ||
1995 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
1996 sz += sizeof(struct sockaddr_in6);
1997 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
1998 sz += sizeof(struct sockaddr_in);
2004 SCTP_TCB_UNLOCK(stcb);
2006 m->m_len = sizeof(u_int32_t);
2009 case SCTP_GET_PEER_ADDRESSES:
2011 * Get the address information, an array
2012 * is passed in to fill up we pack it.
2016 struct sockaddr_storage *sas;
2017 struct sctp_nets *net;
2018 struct sctp_getaddresses *saddr;
2020 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2021 kprintf("get peer addresses\n");
2023 #endif /* SCTP_DEBUG */
2024 if ((size_t)m->m_len < sizeof(struct sctp_getaddresses)) {
2028 left = m->m_len - sizeof(struct sctp_getaddresses);
2029 saddr = mtod(m, struct sctp_getaddresses *);
2030 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2031 SCTP_INP_RLOCK(inp);
2032 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2034 SCTP_TCB_LOCK(stcb);
2035 SCTP_INP_RUNLOCK(inp);
2037 stcb = sctp_findassociation_ep_asocid(inp,
2038 saddr->sget_assoc_id);
2043 m->m_len = sizeof(struct sctp_getaddresses);
2044 sas = (struct sockaddr_storage *)&saddr->addr[0];
2046 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2047 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) ||
2048 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
2049 cpsz = sizeof(struct sockaddr_in6);
2050 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
2051 cpsz = sizeof(struct sockaddr_in);
2057 /* not enough room. */
2059 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2060 kprintf("Out of room\n");
2062 #endif /* SCTP_DEBUG */
2065 memcpy(sas, &net->ro._l_addr, cpsz);
2066 ((struct sockaddr_in *)sas)->sin_port = stcb->rport;
2068 sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz);
2072 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
2073 kprintf("left now:%d mlen:%d\n",
2076 #endif /* SCTP_DEBUG */
2078 SCTP_TCB_UNLOCK(stcb);
2081 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2082 kprintf("All done\n");
2084 #endif /* SCTP_DEBUG */
2086 case SCTP_GET_LOCAL_ADDRESSES:
2089 struct sockaddr_storage *sas;
2090 struct sctp_getaddresses *saddr;
2092 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2093 kprintf("get local addresses\n");
2095 #endif /* SCTP_DEBUG */
2096 if ((size_t)m->m_len < sizeof(struct sctp_getaddresses)) {
2100 saddr = mtod(m, struct sctp_getaddresses *);
2102 if (saddr->sget_assoc_id) {
2103 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2104 SCTP_INP_RLOCK(inp);
2105 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2107 SCTP_TCB_LOCK(stcb);
2108 SCTP_INP_RUNLOCK(inp);
2110 stcb = sctp_findassociation_ep_asocid(inp, saddr->sget_assoc_id);
2116 * assure that the TCP model does not need a assoc id
2119 if ( (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) &&
2121 SCTP_INP_RLOCK(inp);
2122 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2124 SCTP_TCB_LOCK(stcb);
2125 SCTP_INP_RUNLOCK(inp);
2127 sas = (struct sockaddr_storage *)&saddr->addr[0];
2128 limit = m->m_len - sizeof(sctp_assoc_t);
2129 actual = sctp_fill_up_addresses(inp, stcb, limit, sas);
2130 SCTP_TCB_UNLOCK(stcb);
2131 m->m_len = sizeof(struct sockaddr_storage) + actual;
2134 case SCTP_PEER_ADDR_PARAMS:
2136 struct sctp_paddrparams *paddrp;
2137 struct sctp_nets *net;
2140 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2141 kprintf("Getting peer_addr_params\n");
2143 #endif /* SCTP_DEBUG */
2144 if ((size_t)m->m_len < sizeof(struct sctp_paddrparams)) {
2146 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
2147 kprintf("Hmm m->m_len:%d is to small\n",
2150 #endif /* SCTP_DEBUG */
2154 paddrp = mtod(m, struct sctp_paddrparams *);
2157 if (paddrp->spp_assoc_id) {
2159 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2160 kprintf("In spp_assoc_id find type\n");
2162 #endif /* SCTP_DEBUG */
2163 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2164 SCTP_INP_RLOCK(inp);
2165 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2167 SCTP_TCB_LOCK(stcb);
2168 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
2170 SCTP_INP_RLOCK(inp);
2172 stcb = sctp_findassociation_ep_asocid(inp, paddrp->spp_assoc_id);
2179 if ( (stcb == NULL) &&
2180 ((((struct sockaddr *)&paddrp->spp_address)->sa_family == AF_INET) ||
2181 (((struct sockaddr *)&paddrp->spp_address)->sa_family == AF_INET6))) {
2182 /* Lookup via address */
2184 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2185 kprintf("Ok we need to lookup a param\n");
2187 #endif /* SCTP_DEBUG */
2188 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2189 SCTP_INP_RLOCK(inp);
2190 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2192 SCTP_TCB_LOCK(stcb);
2193 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
2195 SCTP_INP_RUNLOCK(inp);
2197 SCTP_INP_WLOCK(inp);
2198 SCTP_INP_INCR_REF(inp);
2199 SCTP_INP_WUNLOCK(inp);
2200 stcb = sctp_findassociation_ep_addr(&inp,
2201 (struct sockaddr *)&paddrp->spp_address,
2204 SCTP_INP_WLOCK(inp);
2205 SCTP_INP_DECR_REF(inp);
2206 SCTP_INP_WUNLOCK(inp);
2215 /* Effects the Endpoint */
2217 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2218 kprintf("User wants EP level info\n");
2220 #endif /* SCTP_DEBUG */
2224 /* Applys to the specific association */
2226 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2227 kprintf("In TCB side\n");
2229 #endif /* SCTP_DEBUG */
2231 paddrp->spp_pathmaxrxt = net->failure_threshold;
2233 /* No destination so return default value */
2234 paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure;
2236 paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay;
2237 paddrp->spp_assoc_id = sctp_get_associd(stcb);
2238 SCTP_TCB_UNLOCK(stcb);
2240 /* Use endpoint defaults */
2241 SCTP_INP_RLOCK(inp);
2243 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2244 kprintf("In EP level info\n");
2246 #endif /* SCTP_DEBUG */
2247 paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure;
2248 paddrp->spp_hbinterval = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT];
2249 paddrp->spp_assoc_id = (sctp_assoc_t)0;
2250 SCTP_INP_RUNLOCK(inp);
2252 m->m_len = sizeof(struct sctp_paddrparams);
2255 case SCTP_GET_PEER_ADDR_INFO:
2257 struct sctp_paddrinfo *paddri;
2258 struct sctp_nets *net;
2260 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2261 kprintf("GetPEER ADDR_INFO\n");
2263 #endif /* SCTP_DEBUG */
2264 if ((size_t)m->m_len < sizeof(struct sctp_paddrinfo)) {
2268 paddri = mtod(m, struct sctp_paddrinfo *);
2270 if ((((struct sockaddr *)&paddri->spinfo_address)->sa_family == AF_INET) ||
2271 (((struct sockaddr *)&paddri->spinfo_address)->sa_family == AF_INET6)) {
2272 /* Lookup via address */
2273 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2274 SCTP_INP_RLOCK(inp);
2275 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2277 SCTP_TCB_LOCK(stcb);
2278 net = sctp_findnet(stcb,
2279 (struct sockaddr *)&paddri->spinfo_address);
2281 SCTP_INP_RUNLOCK(inp);
2283 SCTP_INP_WLOCK(inp);
2284 SCTP_INP_INCR_REF(inp);
2285 SCTP_INP_WUNLOCK(inp);
2286 stcb = sctp_findassociation_ep_addr(&inp,
2287 (struct sockaddr *)&paddri->spinfo_address,
2290 SCTP_INP_WLOCK(inp);
2291 SCTP_INP_DECR_REF(inp);
2292 SCTP_INP_WUNLOCK(inp);
2299 if ((stcb == NULL) || (net == NULL)) {
2303 m->m_len = sizeof(struct sctp_paddrinfo);
2304 paddri->spinfo_state = net->dest_state & (SCTP_REACHABLE_MASK|SCTP_ADDR_NOHB);
2305 paddri->spinfo_cwnd = net->cwnd;
2306 paddri->spinfo_srtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
2307 paddri->spinfo_rto = net->RTO;
2308 paddri->spinfo_assoc_id = sctp_get_associd(stcb);
2309 SCTP_TCB_UNLOCK(stcb);
2312 case SCTP_PCB_STATUS:
2314 struct sctp_pcbinfo *spcb;
2316 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2317 kprintf("PCB status\n");
2319 #endif /* SCTP_DEBUG */
2320 if ((size_t)m->m_len < sizeof(struct sctp_pcbinfo)) {
2324 spcb = mtod(m, struct sctp_pcbinfo *);
2325 sctp_fill_pcbinfo(spcb);
2326 m->m_len = sizeof(struct sctp_pcbinfo);
2331 struct sctp_nets *net;
2332 struct sctp_status *sstat;
2334 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2335 kprintf("SCTP status\n");
2337 #endif /* SCTP_DEBUG */
2339 if ((size_t)m->m_len < sizeof(struct sctp_status)) {
2343 sstat = mtod(m, struct sctp_status *);
2345 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2346 SCTP_INP_RLOCK(inp);
2347 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2349 SCTP_TCB_LOCK(stcb);
2350 SCTP_INP_RUNLOCK(inp);
2352 stcb = sctp_findassociation_ep_asocid(inp, sstat->sstat_assoc_id);
2359 * I think passing the state is fine since
2360 * sctp_constants.h will be available to the user
2363 sstat->sstat_state = stcb->asoc.state;
2364 sstat->sstat_rwnd = stcb->asoc.peers_rwnd;
2365 sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt;
2367 * We can't include chunks that have been passed
2368 * to the socket layer. Only things in queue.
2370 sstat->sstat_penddata = (stcb->asoc.cnt_on_delivery_queue +
2371 stcb->asoc.cnt_on_reasm_queue +
2372 stcb->asoc.cnt_on_all_streams);
2375 sstat->sstat_instrms = stcb->asoc.streamincnt;
2376 sstat->sstat_outstrms = stcb->asoc.streamoutcnt;
2377 sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc);
2378 memcpy(&sstat->sstat_primary.spinfo_address,
2379 &stcb->asoc.primary_destination->ro._l_addr,
2380 ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len);
2381 net = stcb->asoc.primary_destination;
2382 ((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport;
2384 * Again the user can get info from sctp_constants.h
2385 * for what the state of the network is.
2387 sstat->sstat_primary.spinfo_state = net->dest_state & SCTP_REACHABLE_MASK;
2388 sstat->sstat_primary.spinfo_cwnd = net->cwnd;
2389 sstat->sstat_primary.spinfo_srtt = net->lastsa;
2390 sstat->sstat_primary.spinfo_rto = net->RTO;
2391 sstat->sstat_primary.spinfo_mtu = net->mtu;
2392 sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb);
2393 SCTP_TCB_UNLOCK(stcb);
2394 m->m_len = sizeof(*sstat);
2399 struct sctp_rtoinfo *srto;
2401 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2402 kprintf("RTO Info\n");
2404 #endif /* SCTP_DEBUG */
2405 if ((size_t)m->m_len < sizeof(struct sctp_rtoinfo)) {
2409 srto = mtod(m, struct sctp_rtoinfo *);
2410 if (srto->srto_assoc_id == 0) {
2411 /* Endpoint only please */
2412 SCTP_INP_RLOCK(inp);
2413 srto->srto_initial = inp->sctp_ep.initial_rto;
2414 srto->srto_max = inp->sctp_ep.sctp_maxrto;
2415 srto->srto_min = inp->sctp_ep.sctp_minrto;
2416 SCTP_INP_RUNLOCK(inp);
2419 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2420 SCTP_INP_RLOCK(inp);
2421 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2423 SCTP_TCB_LOCK(stcb);
2424 SCTP_INP_RUNLOCK(inp);
2426 stcb = sctp_findassociation_ep_asocid(inp, srto->srto_assoc_id);
2432 srto->srto_initial = stcb->asoc.initial_rto;
2433 srto->srto_max = stcb->asoc.maxrto;
2434 srto->srto_min = stcb->asoc.minrto;
2435 SCTP_TCB_UNLOCK(stcb);
2436 m->m_len = sizeof(*srto);
2439 case SCTP_ASSOCINFO:
2441 struct sctp_assocparams *sasoc;
2443 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2444 kprintf("Associnfo\n");
2446 #endif /* SCTP_DEBUG */
2447 if ((size_t)m->m_len < sizeof(struct sctp_assocparams)) {
2451 sasoc = mtod(m, struct sctp_assocparams *);
2454 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2455 SCTP_INP_RLOCK(inp);
2456 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2458 SCTP_TCB_LOCK(stcb);
2459 SCTP_INP_RUNLOCK(inp);
2461 if ((sasoc->sasoc_assoc_id) && (stcb == NULL)) {
2462 stcb = sctp_findassociation_ep_asocid(inp,
2463 sasoc->sasoc_assoc_id);
2473 sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times;
2474 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
2475 sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd;
2476 sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd;
2477 sasoc->sasoc_cookie_life = stcb->asoc.cookie_life;
2478 SCTP_TCB_UNLOCK(stcb);
2480 SCTP_INP_RLOCK(inp);
2481 sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times;
2482 sasoc->sasoc_number_peer_destinations = 0;
2483 sasoc->sasoc_peer_rwnd = 0;
2484 sasoc->sasoc_local_rwnd = ssb_space(&inp->sctp_socket->so_rcv);
2485 sasoc->sasoc_cookie_life = inp->sctp_ep.def_cookie_life;
2486 SCTP_INP_RUNLOCK(inp);
2488 m->m_len = sizeof(*sasoc);
2491 case SCTP_DEFAULT_SEND_PARAM:
2493 struct sctp_sndrcvinfo *s_info;
2495 if (m->m_len != sizeof(struct sctp_sndrcvinfo)) {
2499 s_info = mtod(m, struct sctp_sndrcvinfo *);
2500 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2501 SCTP_INP_RLOCK(inp);
2502 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2504 SCTP_TCB_LOCK(stcb);
2505 SCTP_INP_RUNLOCK(inp);
2507 stcb = sctp_findassociation_ep_asocid(inp, s_info->sinfo_assoc_id);
2514 *s_info = stcb->asoc.def_send;
2515 SCTP_TCB_UNLOCK(stcb);
2516 m->m_len = sizeof(*s_info);
2520 struct sctp_initmsg *sinit;
2522 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2523 kprintf("initmsg\n");
2525 #endif /* SCTP_DEBUG */
2526 if ((size_t)m->m_len < sizeof(struct sctp_initmsg)) {
2530 sinit = mtod(m, struct sctp_initmsg *);
2531 SCTP_INP_RLOCK(inp);
2532 sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count;
2533 sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome;
2534 sinit->sinit_max_attempts = inp->sctp_ep.max_init_times;
2535 sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max;
2536 SCTP_INP_RUNLOCK(inp);
2537 m->m_len = sizeof(*sinit);
2540 case SCTP_PRIMARY_ADDR:
2541 /* we allow a "get" operation on this */
2543 struct sctp_setprim *ssp;
2546 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2547 kprintf("setprimary\n");
2549 #endif /* SCTP_DEBUG */
2550 if ((size_t)m->m_len < sizeof(struct sctp_setprim)) {
2554 ssp = mtod(m, struct sctp_setprim *);
2555 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2556 SCTP_INP_RLOCK(inp);
2557 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2559 SCTP_TCB_LOCK(stcb);
2560 SCTP_INP_RUNLOCK(inp);
2562 stcb = sctp_findassociation_ep_asocid(inp, ssp->ssp_assoc_id);
2564 /* one last shot, try it by the address in */
2565 struct sctp_nets *net;
2567 SCTP_INP_WLOCK(inp);
2568 SCTP_INP_INCR_REF(inp);
2569 SCTP_INP_WUNLOCK(inp);
2570 stcb = sctp_findassociation_ep_addr(&inp,
2571 (struct sockaddr *)&ssp->ssp_addr,
2574 SCTP_INP_WLOCK(inp);
2575 SCTP_INP_DECR_REF(inp);
2576 SCTP_INP_WUNLOCK(inp);
2584 /* simply copy out the sockaddr_storage... */
2585 memcpy(&ssp->ssp_addr,
2586 &stcb->asoc.primary_destination->ro._l_addr,
2587 ((struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr)->sa_len);
2588 SCTP_TCB_UNLOCK(stcb);
2589 m->m_len = sizeof(*ssp);
2593 error = ENOPROTOOPT;
2596 } /* end switch (sopt->sopt_name) */
2601 sctp_optsset(struct socket *so,
2604 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
2611 int error, *mopt, set_opt;
2613 struct sctp_tcb *stcb = NULL;
2614 struct sctp_inpcb *inp;
2618 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2619 kprintf("optsset:MP is NULL EINVAL\n");
2621 #endif /* SCTP_DEBUG */
2628 inp = (struct sctp_inpcb *)so->so_pcb;
2635 case SCTP_AUTOCLOSE:
2636 case SCTP_AUTO_ASCONF:
2637 case SCTP_DISABLE_FRAGMENTS:
2638 case SCTP_I_WANT_MAPPED_V4_ADDR:
2639 /* copy in the option value */
2640 if ((size_t)m->m_len < sizeof(int)) {
2644 mopt = mtod(m, int *);
2649 case SCTP_DISABLE_FRAGMENTS:
2650 set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT;
2652 case SCTP_AUTO_ASCONF:
2653 set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF;
2656 case SCTP_I_WANT_MAPPED_V4_ADDR:
2657 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2658 return (EOPNOTSUPP);
2664 set_opt = SCTP_PCB_FLAGS_NODELAY;
2666 case SCTP_AUTOCLOSE:
2667 set_opt = SCTP_PCB_FLAGS_AUTOCLOSE;
2669 * The value is in ticks.
2670 * Note this does not effect old associations, only
2673 inp->sctp_ep.auto_close_time = (*mopt * hz);
2676 SCTP_INP_WLOCK(inp);
2678 inp->sctp_flags |= set_opt;
2680 inp->sctp_flags &= ~set_opt;
2682 SCTP_INP_WUNLOCK(inp);
2684 case SCTP_MY_PUBLIC_KEY: /* set my public key */
2685 case SCTP_SET_AUTH_CHUNKS: /* set the authenticated chunks required */
2686 case SCTP_SET_AUTH_SECRET: /* set the actual secret for the endpoint */
2687 /* not supported yet and until we refine the draft */
2691 case SCTP_CLR_STAT_LOG:
2692 #ifdef SCTP_STAT_LOGGING
2693 sctp_clr_stat_log();
2698 case SCTP_DELAYED_ACK_TIME:
2701 if ((size_t)m->m_len < sizeof(int32_t)) {
2705 tm = mtod(m, int32_t *);
2707 if ((*tm < 10) || (*tm > 500)) {
2708 /* can't be smaller than 10ms */
2709 /* MUST NOT be larger than 500ms */
2713 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(*tm);
2716 case SCTP_RESET_STREAMS:
2718 struct sctp_stream_reset *strrst;
2719 uint8_t two_way, not_peer;
2721 if ((size_t)m->m_len < sizeof(struct sctp_stream_reset)) {
2725 strrst = mtod(m, struct sctp_stream_reset *);
2727 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2728 SCTP_INP_RLOCK(inp);
2729 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2731 SCTP_TCB_LOCK(stcb);
2732 SCTP_INP_RUNLOCK(inp);
2734 stcb = sctp_findassociation_ep_asocid(inp, strrst->strrst_assoc_id);
2739 if (stcb->asoc.peer_supports_strreset == 0) {
2740 /* Peer does not support it,
2741 * we return protocol not supported since
2742 * this is true for this feature and this
2743 * peer, not the socket request in general.
2745 error = EPROTONOSUPPORT;
2746 SCTP_TCB_UNLOCK(stcb);
2750 /* Having re-thought this code I added as I write the I-D there
2751 * is NO need for it. The peer, if we are requesting a stream-reset
2752 * will send a request to us but will itself do what we do, take
2753 * and copy off the "reset information" we send and queue TSN's
2754 * larger than the send-next in our response message. Thus they
2757 /* if (stcb->asoc.sending_seq != (stcb->asoc.last_acked_seq + 1)) {*/
2758 /* Must have all sending data ack'd before we
2759 * start this procedure. This is a bit restrictive
2760 * and we SHOULD work on changing this so ONLY the
2761 * streams being RESET get held up. So, a reset-all
2762 * would require this.. but a reset specific just
2763 * needs to be sure that the ones being reset have
2764 * nothing on the send_queue. For now we will
2765 * skip this more detailed method and do a course
2766 * way.. i.e. nothing pending ... for future FIX ME!
2772 if (stcb->asoc.stream_reset_outstanding) {
2774 SCTP_TCB_UNLOCK(stcb);
2777 if (strrst->strrst_flags == SCTP_RESET_LOCAL_RECV) {
2780 } else if (strrst->strrst_flags == SCTP_RESET_LOCAL_SEND) {
2783 } else if (strrst->strrst_flags == SCTP_RESET_BOTH) {
2788 SCTP_TCB_UNLOCK(stcb);
2791 sctp_send_str_reset_req(stcb, strrst->strrst_num_streams,
2792 strrst->strrst_list, two_way, not_peer);
2793 sctp_chunk_output(inp, stcb, 12);
2794 SCTP_TCB_UNLOCK(stcb);
2798 case SCTP_RESET_PEGS:
2799 memset(sctp_pegs, 0, sizeof(sctp_pegs));
2802 case SCTP_CONNECT_X:
2803 if ((size_t)m->m_len < (sizeof(int) + sizeof(struct sockaddr_in))) {
2807 error = sctp_do_connect_x(so, inp, m, p, 0);
2810 case SCTP_CONNECT_X_DELAYED:
2811 if ((size_t)m->m_len < (sizeof(int) + sizeof(struct sockaddr_in))) {
2815 error = sctp_do_connect_x(so, inp, m, p, 1);
2818 case SCTP_CONNECT_X_COMPLETE:
2820 struct sockaddr *sa;
2821 struct sctp_nets *net;
2822 if ((size_t)m->m_len < sizeof(struct sockaddr_in)) {
2826 sa = mtod(m, struct sockaddr *);
2828 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2829 SCTP_INP_RLOCK(inp);
2830 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2832 SCTP_TCB_LOCK(stcb);
2833 net = sctp_findnet(stcb, sa);
2835 SCTP_INP_RUNLOCK(inp);
2837 SCTP_INP_WLOCK(inp);
2838 SCTP_INP_INCR_REF(inp);
2839 SCTP_INP_WUNLOCK(inp);
2840 stcb = sctp_findassociation_ep_addr(&inp, sa, &net, NULL, NULL);
2842 SCTP_INP_WLOCK(inp);
2843 SCTP_INP_DECR_REF(inp);
2844 SCTP_INP_WUNLOCK(inp);
2852 if (stcb->asoc.delayed_connection == 1) {
2853 stcb->asoc.delayed_connection = 0;
2854 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
2855 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination);
2856 sctp_send_initiate(inp, stcb);
2858 /* already expired or did not use delayed connectx */
2861 SCTP_TCB_UNLOCK(stcb);
2867 SCTP_INP_WLOCK(inp);
2868 burst = mtod(m, u_int8_t *);
2870 inp->sctp_ep.max_burst = *burst;
2872 SCTP_INP_WUNLOCK(inp);
2879 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2880 ovh = SCTP_MED_OVERHEAD;
2882 ovh = SCTP_MED_V4_OVERHEAD;
2884 segsize = mtod(m, u_int32_t *);
2889 SCTP_INP_WLOCK(inp);
2890 inp->sctp_frag_point = (*segsize+ovh);
2891 if (inp->sctp_frag_point < MHLEN) {
2892 inp->sctp_frag_point = MHLEN;
2894 SCTP_INP_WUNLOCK(inp);
2897 case SCTP_SET_DEBUG_LEVEL:
2901 if ((size_t)m->m_len < sizeof(u_int32_t)) {
2905 level = mtod(m, u_int32_t *);
2907 sctp_debug_on = (*level & (SCTP_DEBUG_ALL |
2909 kprintf("SETTING DEBUG LEVEL to %x\n",
2910 (u_int)sctp_debug_on);
2915 #endif /* SCTP_DEBUG */
2919 struct sctp_event_subscribe *events;
2920 if ((size_t)m->m_len < sizeof(struct sctp_event_subscribe)) {
2924 SCTP_INP_WLOCK(inp);
2925 events = mtod(m, struct sctp_event_subscribe *);
2926 if (events->sctp_data_io_event) {
2927 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVDATAIOEVNT;
2929 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVDATAIOEVNT;
2932 if (events->sctp_association_event) {
2933 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVASSOCEVNT;
2935 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVASSOCEVNT;
2938 if (events->sctp_address_event) {
2939 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVPADDREVNT;
2941 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVPADDREVNT;
2944 if (events->sctp_send_failure_event) {
2945 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVSENDFAILEVNT;
2947 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVSENDFAILEVNT;
2950 if (events->sctp_peer_error_event) {
2951 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVPEERERR;
2953 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVPEERERR;
2956 if (events->sctp_shutdown_event) {
2957 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT;
2959 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT;
2962 if (events->sctp_partial_delivery_event) {
2963 inp->sctp_flags |= SCTP_PCB_FLAGS_PDAPIEVNT;
2965 inp->sctp_flags &= ~SCTP_PCB_FLAGS_PDAPIEVNT;
2968 if (events->sctp_adaption_layer_event) {
2969 inp->sctp_flags |= SCTP_PCB_FLAGS_ADAPTIONEVNT;
2971 inp->sctp_flags &= ~SCTP_PCB_FLAGS_ADAPTIONEVNT;
2974 if (events->sctp_stream_reset_events) {
2975 inp->sctp_flags |= SCTP_PCB_FLAGS_STREAM_RESETEVNT;
2977 inp->sctp_flags &= ~SCTP_PCB_FLAGS_STREAM_RESETEVNT;
2979 SCTP_INP_WUNLOCK(inp);
2983 case SCTP_ADAPTION_LAYER:
2985 struct sctp_setadaption *adap_bits;
2986 if ((size_t)m->m_len < sizeof(struct sctp_setadaption)) {
2990 SCTP_INP_WLOCK(inp);
2991 adap_bits = mtod(m, struct sctp_setadaption *);
2992 inp->sctp_ep.adaption_layer_indicator = adap_bits->ssb_adaption_ind;
2993 SCTP_INP_WUNLOCK(inp);
2996 case SCTP_SET_INITIAL_DBG_SEQ:
2999 if ((size_t)m->m_len < sizeof(u_int32_t)) {
3003 SCTP_INP_WLOCK(inp);
3004 vvv = mtod(m, u_int32_t *);
3005 inp->sctp_ep.initial_sequence_debug = *vvv;
3006 SCTP_INP_WUNLOCK(inp);
3009 case SCTP_DEFAULT_SEND_PARAM:
3011 struct sctp_sndrcvinfo *s_info;
3013 if (m->m_len != sizeof(struct sctp_sndrcvinfo)) {
3017 s_info = mtod(m, struct sctp_sndrcvinfo *);
3019 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3020 SCTP_INP_RLOCK(inp);
3021 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3023 SCTP_TCB_LOCK(stcb);
3024 SCTP_INP_RUNLOCK(inp);
3026 stcb = sctp_findassociation_ep_asocid(inp, s_info->sinfo_assoc_id);
3032 /* Validate things */
3033 if (s_info->sinfo_stream > stcb->asoc.streamoutcnt) {
3034 SCTP_TCB_UNLOCK(stcb);
3038 /* Mask off the flags that are allowed */
3039 s_info->sinfo_flags = (s_info->sinfo_flags &
3040 (MSG_UNORDERED | MSG_ADDR_OVER |
3041 MSG_PR_SCTP_TTL | MSG_PR_SCTP_BUF));
3043 stcb->asoc.def_send = *s_info;
3044 SCTP_TCB_UNLOCK(stcb);
3047 case SCTP_PEER_ADDR_PARAMS:
3049 struct sctp_paddrparams *paddrp;
3050 struct sctp_nets *net;
3051 if ((size_t)m->m_len < sizeof(struct sctp_paddrparams)) {
3055 paddrp = mtod(m, struct sctp_paddrparams *);
3057 if (paddrp->spp_assoc_id) {
3058 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3059 SCTP_INP_RLOCK(inp);
3060 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3062 SCTP_TCB_LOCK(stcb);
3063 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
3065 SCTP_INP_RUNLOCK(inp);
3067 stcb = sctp_findassociation_ep_asocid(inp, paddrp->spp_assoc_id);
3074 if ((stcb == NULL) &&
3075 ((((struct sockaddr *)&paddrp->spp_address)->sa_family == AF_INET) ||
3076 (((struct sockaddr *)&paddrp->spp_address)->sa_family == AF_INET6))) {
3077 /* Lookup via address */
3078 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3079 SCTP_INP_RLOCK(inp);
3080 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3082 SCTP_TCB_LOCK(stcb);
3083 net = sctp_findnet(stcb,
3084 (struct sockaddr *)&paddrp->spp_address);
3086 SCTP_INP_RUNLOCK(inp);
3088 SCTP_INP_WLOCK(inp);
3089 SCTP_INP_INCR_REF(inp);
3090 SCTP_INP_WUNLOCK(inp);
3091 stcb = sctp_findassociation_ep_addr(&inp,
3092 (struct sockaddr *)&paddrp->spp_address,
3095 SCTP_INP_WLOCK(inp);
3096 SCTP_INP_DECR_REF(inp);
3097 SCTP_INP_WUNLOCK(inp);
3101 /* Effects the Endpoint */
3105 /* Applies to the specific association */
3106 if (paddrp->spp_pathmaxrxt) {
3108 if (paddrp->spp_pathmaxrxt)
3109 net->failure_threshold = paddrp->spp_pathmaxrxt;
3111 if (paddrp->spp_pathmaxrxt)
3112 stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt;
3115 if ((paddrp->spp_hbinterval != 0) && (paddrp->spp_hbinterval != 0xffffffff)) {
3119 net->dest_state &= ~SCTP_ADDR_NOHB;
3121 old = stcb->asoc.heart_beat_delay;
3122 stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval;
3124 /* Turn back on the timer */
3125 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
3128 } else if (paddrp->spp_hbinterval == 0xffffffff) {
3130 sctp_send_hb(stcb, 1, net);
3133 /* off on association */
3134 if (stcb->asoc.heart_beat_delay) {
3135 int cnt_of_unconf = 0;
3136 struct sctp_nets *lnet;
3137 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
3138 if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
3142 /* stop the timer ONLY if we have no unconfirmed addresses
3144 if (cnt_of_unconf == 0)
3145 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
3147 stcb->asoc.heart_beat_delay = 0;
3149 net->dest_state |= SCTP_ADDR_NOHB;
3152 SCTP_TCB_UNLOCK(stcb);
3154 /* Use endpoint defaults */
3155 SCTP_INP_WLOCK(inp);
3156 if (paddrp->spp_pathmaxrxt)
3157 inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt;
3158 if (paddrp->spp_hbinterval != SCTP_ISSUE_HB)
3159 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = paddrp->spp_hbinterval;
3160 SCTP_INP_WUNLOCK(inp);
3166 struct sctp_rtoinfo *srto;
3167 if ((size_t)m->m_len < sizeof(struct sctp_rtoinfo)) {
3171 srto = mtod(m, struct sctp_rtoinfo *);
3172 if (srto->srto_assoc_id == 0) {
3173 SCTP_INP_WLOCK(inp);
3174 /* If we have a null asoc, its default for the endpoint */
3175 if (srto->srto_initial > 10)
3176 inp->sctp_ep.initial_rto = srto->srto_initial;
3177 if (srto->srto_max > 10)
3178 inp->sctp_ep.sctp_maxrto = srto->srto_max;
3179 if (srto->srto_min > 10)
3180 inp->sctp_ep.sctp_minrto = srto->srto_min;
3181 SCTP_INP_WUNLOCK(inp);
3184 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3185 SCTP_INP_RLOCK(inp);
3186 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3188 SCTP_TCB_LOCK(stcb);
3189 SCTP_INP_RUNLOCK(inp);
3191 stcb = sctp_findassociation_ep_asocid(inp, srto->srto_assoc_id);
3196 /* Set in ms we hope :-) */
3197 if (srto->srto_initial > 10)
3198 stcb->asoc.initial_rto = srto->srto_initial;
3199 if (srto->srto_max > 10)
3200 stcb->asoc.maxrto = srto->srto_max;
3201 if (srto->srto_min > 10)
3202 stcb->asoc.minrto = srto->srto_min;
3203 SCTP_TCB_UNLOCK(stcb);
3206 case SCTP_ASSOCINFO:
3208 struct sctp_assocparams *sasoc;
3210 if ((size_t)m->m_len < sizeof(struct sctp_assocparams)) {
3214 sasoc = mtod(m, struct sctp_assocparams *);
3215 if (sasoc->sasoc_assoc_id) {
3216 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3217 SCTP_INP_RLOCK(inp);
3218 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3220 SCTP_TCB_LOCK(stcb);
3221 SCTP_INP_RUNLOCK(inp);
3223 stcb = sctp_findassociation_ep_asocid(inp,
3224 sasoc->sasoc_assoc_id);
3234 if (sasoc->sasoc_asocmaxrxt)
3235 stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt;
3236 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
3237 sasoc->sasoc_peer_rwnd = 0;
3238 sasoc->sasoc_local_rwnd = 0;
3239 if (stcb->asoc.cookie_life)
3240 stcb->asoc.cookie_life = sasoc->sasoc_cookie_life;
3241 SCTP_TCB_UNLOCK(stcb);
3243 SCTP_INP_WLOCK(inp);
3244 if (sasoc->sasoc_asocmaxrxt)
3245 inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt;
3246 sasoc->sasoc_number_peer_destinations = 0;
3247 sasoc->sasoc_peer_rwnd = 0;
3248 sasoc->sasoc_local_rwnd = 0;
3249 if (sasoc->sasoc_cookie_life)
3250 inp->sctp_ep.def_cookie_life = sasoc->sasoc_cookie_life;
3251 SCTP_INP_WUNLOCK(inp);
3257 struct sctp_initmsg *sinit;
3259 if ((size_t)m->m_len < sizeof(struct sctp_initmsg)) {
3263 sinit = mtod(m, struct sctp_initmsg *);
3264 SCTP_INP_WLOCK(inp);
3265 if (sinit->sinit_num_ostreams)
3266 inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams;
3268 if (sinit->sinit_max_instreams)
3269 inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams;
3271 if (sinit->sinit_max_attempts)
3272 inp->sctp_ep.max_init_times = sinit->sinit_max_attempts;
3274 if (sinit->sinit_max_init_timeo > 10)
3275 /* We must be at least a 100ms (we set in ticks) */
3276 inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo;
3277 SCTP_INP_WUNLOCK(inp);
3280 case SCTP_PRIMARY_ADDR:
3282 struct sctp_setprim *spa;
3283 struct sctp_nets *net, *lnet;
3284 if ((size_t)m->m_len < sizeof(struct sctp_setprim)) {
3288 spa = mtod(m, struct sctp_setprim *);
3290 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3291 SCTP_INP_RLOCK(inp);
3292 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3294 SCTP_TCB_LOCK(stcb);
3299 SCTP_INP_RUNLOCK(inp);
3301 stcb = sctp_findassociation_ep_asocid(inp, spa->ssp_assoc_id);
3304 SCTP_INP_WLOCK(inp);
3305 SCTP_INP_INCR_REF(inp);
3306 SCTP_INP_WUNLOCK(inp);
3307 stcb = sctp_findassociation_ep_addr(&inp,
3308 (struct sockaddr *)&spa->ssp_addr,
3311 SCTP_INP_WLOCK(inp);
3312 SCTP_INP_DECR_REF(inp);
3313 SCTP_INP_WUNLOCK(inp);
3318 /* find the net, associd or connected lookup type */
3319 net = sctp_findnet(stcb, (struct sockaddr *)&spa->ssp_addr);
3321 SCTP_TCB_UNLOCK(stcb);
3326 if ((net != stcb->asoc.primary_destination) &&
3327 (!(net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
3328 /* Ok we need to set it */
3329 lnet = stcb->asoc.primary_destination;
3330 lnet->next_tsn_at_change = net->next_tsn_at_change = stcb->asoc.sending_seq;
3331 if (sctp_set_primary_addr(stcb,
3334 if (net->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3335 net->dest_state |= SCTP_ADDR_DOUBLE_SWITCH;
3337 net->dest_state |= SCTP_ADDR_SWITCH_PRIMARY;
3340 SCTP_TCB_UNLOCK(stcb);
3344 case SCTP_SET_PEER_PRIMARY_ADDR:
3346 struct sctp_setpeerprim *sspp;
3347 if ((size_t)m->m_len < sizeof(struct sctp_setpeerprim)) {
3351 sspp = mtod(m, struct sctp_setpeerprim *);
3354 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3355 SCTP_INP_RLOCK(inp);
3356 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3358 SCTP_TCB_UNLOCK(stcb);
3359 SCTP_INP_RUNLOCK(inp);
3361 stcb = sctp_findassociation_ep_asocid(inp, sspp->sspp_assoc_id);
3366 if (sctp_set_primary_ip_address_sa(stcb, (struct sockaddr *)&sspp->sspp_addr) != 0) {
3369 SCTP_TCB_UNLOCK(stcb);
3372 case SCTP_BINDX_ADD_ADDR:
3374 struct sctp_getaddresses *addrs;
3375 struct sockaddr *addr_touse;
3376 /* see if we're bound all already! */
3377 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3381 if ((size_t)m->m_len < sizeof(struct sctp_getaddresses)) {
3385 addrs = mtod(m, struct sctp_getaddresses *);
3386 addr_touse = addrs->addr;
3387 if (addrs->addr->sa_family == AF_INET6) {
3388 struct sockaddr_in6 *sin6;
3389 sin6 = (struct sockaddr_in6 *)addr_touse;
3390 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
3391 error = EADDRNOTAVAIL;
3395 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
3397 /* Can't get proc for Net/Open BSD */
3401 error = sctp_inpcb_bind(so, addr_touse, p);
3404 /* No locks required here since bind and mgmt_ep_sa all
3405 * do their own locking. If we do something for the FIX:
3406 * below we may need to lock in that case.
3408 if (addrs->sget_assoc_id == 0) {
3409 /* add the address */
3410 struct sctp_inpcb *lep;
3411 ((struct sockaddr_in *)addr_touse)->sin_port = inp->sctp_lport;
3412 lep = sctp_pcb_findep(addr_touse, 1, 0);
3414 /* We must decrement the refcount
3415 * since we have the ep already and
3416 * are binding. No remove going on
3419 SCTP_INP_WLOCK(inp);
3420 SCTP_INP_DECR_REF(inp);
3421 SCTP_INP_WUNLOCK(inp);
3424 /* already bound to it.. ok */
3426 } else if (lep == NULL) {
3427 ((struct sockaddr_in *)addr_touse)->sin_port = 0;
3428 error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
3429 SCTP_ADD_IP_ADDRESS);
3431 error = EADDRNOTAVAIL;
3437 /* FIX: decide whether we allow assoc based bindx */
3441 case SCTP_BINDX_REM_ADDR:
3443 struct sctp_getaddresses *addrs;
3444 struct sockaddr *addr_touse;
3445 /* see if we're bound all already! */
3446 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3450 if ((size_t)m->m_len < sizeof(struct sctp_getaddresses)) {
3454 addrs = mtod(m, struct sctp_getaddresses *);
3455 addr_touse = addrs->addr;
3456 if (addrs->addr->sa_family == AF_INET6) {
3457 struct sockaddr_in6 *sin6;
3458 sin6 = (struct sockaddr_in6 *)addr_touse;
3459 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
3460 error = EADDRNOTAVAIL;
3464 /* No lock required mgmt_ep_sa does its own locking. If
3465 * the FIX: below is ever changed we may need to
3466 * lock before calling association level binding.
3468 if (addrs->sget_assoc_id == 0) {
3469 /* delete the address */
3470 sctp_addr_mgmt_ep_sa(inp, addr_touse,
3471 SCTP_DEL_IP_ADDRESS);
3473 /* FIX: decide whether we allow assoc based bindx */
3478 error = ENOPROTOOPT;
3480 } /* end switch (opt) */
3485 sctp_ctloutput(netmsg_t msg)
3487 struct socket *so = msg->ctloutput.base.nm_so;
3488 struct sockopt *sopt = msg->ctloutput.nm_sopt;
3489 struct mbuf *m = NULL;
3490 struct sctp_inpcb *inp;
3493 inp = (struct sctp_inpcb *)so->so_pcb;
3496 /* I made the same as TCP since we are not setup? */
3500 if (sopt->sopt_level != IPPROTO_SCTP) {
3501 /* wrong proto level... send back up to IP */
3503 if (INP_CHECK_SOCKAF(so, AF_INET6))
3504 ip6_ctloutput_dispatch(msg);
3508 /* msg invalid now */
3511 if (sopt->sopt_valsize > MCLBYTES) {
3513 * Restrict us down to a cluster size, that's all we can
3514 * pass either way...
3516 sopt->sopt_valsize = MCLBYTES;
3518 if (sopt->sopt_valsize) {
3520 m = m_get(MB_WAIT, MT_DATA);
3521 if (sopt->sopt_valsize > MLEN) {
3522 MCLGET(m, MB_DONTWAIT);
3523 if ((m->m_flags & M_EXT) == 0) {
3529 error = sooptcopyin(sopt, mtod(m, caddr_t), sopt->sopt_valsize,
3530 sopt->sopt_valsize);
3535 m->m_len = sopt->sopt_valsize;
3537 if (sopt->sopt_dir == SOPT_SET) {
3538 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
3539 error = sctp_optsset(so, sopt->sopt_name, &m, sopt->sopt_td);
3541 error = sctp_optsset(so, sopt->sopt_name, &m, sopt->sopt_p);
3543 } else if (sopt->sopt_dir == SOPT_GET) {
3544 #if (defined (__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
3545 error = sctp_optsget(so, sopt->sopt_name, &m, sopt->sopt_td);
3547 error = sctp_optsget(so, sopt->sopt_name, &m, sopt->sopt_p);
3552 if ( (error == 0) && (m != NULL)) {
3553 error = sooptcopyout(sopt, mtod(m, caddr_t), m->m_len);
3555 } else if (m != NULL) {
3559 lwkt_replymsg(&msg->lmsg, error);
3563 sctp_connect(netmsg_t msg)
3565 struct socket *so = msg->connect.base.nm_so;
3566 struct sockaddr *addr = msg->connect.nm_nam;
3567 struct sctp_inpcb *inp;
3568 struct sctp_tcb *stcb;
3572 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
3573 kprintf("Connect called in SCTP to ");
3574 sctp_print_address(addr);
3575 kprintf("Port %d\n", ntohs(((struct sockaddr_in *)addr)->sin_port));
3577 #endif /* SCTP_DEBUG */
3578 inp = (struct sctp_inpcb *)so->so_pcb;
3580 /* I made the same as TCP since we are not setup? */
3584 SCTP_ASOC_CREATE_LOCK(inp);
3585 SCTP_INP_WLOCK(inp);
3586 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3587 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3588 /* Should I really unlock ? */
3589 SCTP_INP_WUNLOCK(inp);
3590 SCTP_ASOC_CREATE_UNLOCK(inp);
3595 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
3596 (addr->sa_family == AF_INET6)) {
3597 SCTP_INP_WUNLOCK(inp);
3598 SCTP_ASOC_CREATE_UNLOCK(inp);
3603 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
3604 SCTP_PCB_FLAGS_UNBOUND) {
3605 /* Bind a ephemeral port */
3606 SCTP_INP_WUNLOCK(inp);
3607 error = sctp_inpcb_bind(so, NULL, msg->connect.nm_td);
3609 SCTP_ASOC_CREATE_UNLOCK(inp);
3612 SCTP_INP_WLOCK(inp);
3614 /* Now do we connect? */
3615 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3616 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
3617 /* We are already connected AND the TCP model */
3618 SCTP_INP_WUNLOCK(inp);
3619 SCTP_ASOC_CREATE_UNLOCK(inp);
3623 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3624 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3626 SCTP_TCB_UNLOCK(stcb);
3627 SCTP_INP_WUNLOCK(inp);
3629 SCTP_INP_INCR_REF(inp);
3630 SCTP_INP_WUNLOCK(inp);
3631 stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL);
3633 SCTP_INP_WLOCK(inp);
3634 SCTP_INP_DECR_REF(inp);
3635 SCTP_INP_WUNLOCK(inp);
3639 /* Already have or am bring up an association */
3640 SCTP_ASOC_CREATE_UNLOCK(inp);
3641 SCTP_TCB_UNLOCK(stcb);
3645 /* We are GOOD to go */
3646 stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0);
3648 /* Gak! no memory */
3651 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
3652 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
3653 /* Set the connected flag so we can queue data */
3656 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT;
3657 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
3658 sctp_send_initiate(inp, stcb);
3659 SCTP_ASOC_CREATE_UNLOCK(inp);
3660 SCTP_TCB_UNLOCK(stcb);
3662 lwkt_replymsg(&msg->lmsg, error);
3666 sctp_usr_recvd(netmsg_t msg)
3668 struct socket *so = msg->rcvd.base.nm_so;
3669 struct sctp_socket_q_list *sq = NULL;
3670 int flags = msg->rcvd.nm_flags;
3674 * The user has received some data, we may be able to stuff more
3675 * up the socket. And we need to possibly update the rwnd.
3677 struct sctp_inpcb *inp;
3678 struct sctp_tcb *stcb=NULL;
3680 inp = (struct sctp_inpcb *)so->so_pcb;
3682 if (sctp_debug_on & SCTP_DEBUG_USRREQ2)
3683 kprintf("Read for so:%p inp:%p Flags:%x\n",
3684 so, inp, (u_int)flags);
3688 /* I made the same as TCP since we are not setup? */
3690 if (sctp_debug_on & SCTP_DEBUG_USRREQ2)
3691 kprintf("Nope, connection reset\n");
3697 * Grab the first one on the list. It will re-insert itself if
3698 * it runs out of room
3700 SCTP_INP_WLOCK(inp);
3701 if ((flags & MSG_EOR) && ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0)
3702 && ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
3703 /* Ok the other part of our grubby tracking
3704 * stuff for our horrible layer violation that
3705 * the tsvwg thinks is ok for sctp_peeloff.. gak!
3706 * We must update the next vtag pending on the
3707 * socket buffer (if any).
3709 inp->sctp_vtag_first = sctp_get_first_vtag_from_sb(so);
3710 sq = TAILQ_FIRST(&inp->sctp_queue_list);
3717 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3720 SCTP_TCB_LOCK(stcb);
3723 /* all code in normal stcb path assumes
3724 * that you have a tcb_lock only. Thus
3725 * we must release the inp write lock.
3727 if (flags & MSG_EOR) {
3728 if (((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0)
3729 && ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
3730 stcb = sctp_remove_from_socket_q(inp);
3733 if (sctp_debug_on & SCTP_DEBUG_USRREQ2)
3734 kprintf("remove from socket queue for inp:%p tcbret:%p\n",
3738 stcb->asoc.my_rwnd_control_len = sctp_sbspace_sub(stcb->asoc.my_rwnd_control_len,
3739 sizeof(struct mbuf));
3740 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVDATAIOEVNT) {
3741 stcb->asoc.my_rwnd_control_len = sctp_sbspace_sub(stcb->asoc.my_rwnd_control_len,
3742 CMSG_LEN(sizeof(struct sctp_sndrcvinfo)));
3745 if ((TAILQ_EMPTY(&stcb->asoc.delivery_queue) == 0) ||
3746 (TAILQ_EMPTY(&stcb->asoc.reasmqueue) == 0)) {
3747 /* Deliver if there is something to be delivered */
3748 sctp_service_queues(stcb, &stcb->asoc, 1);
3750 sctp_set_rwnd(stcb, &stcb->asoc);
3751 /* if we increase by 1 or more MTU's (smallest MTUs of all
3752 * nets) we send a window update sack
3754 incr = stcb->asoc.my_rwnd - stcb->asoc.my_last_reported_rwnd;
3758 if (((uint32_t)incr >= (stcb->asoc.smallest_mtu * SCTP_SEG_TO_RWND_UPD)) ||
3759 ((((uint32_t)incr)*SCTP_SCALE_OF_RWND_TO_UPD) >= so->so_rcv.ssb_hiwat)) {
3760 if (callout_pending(&stcb->asoc.dack_timer.timer)) {
3761 /* If the timer is up, stop it */
3762 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
3763 stcb->sctp_ep, stcb, NULL);
3765 /* Send the sack, with the new rwnd */
3766 sctp_send_sack(stcb);
3767 /* Now do the output */
3768 sctp_chunk_output(inp, stcb, 10);
3771 if ((( sq ) && (flags & MSG_EOR) && ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0))
3772 && ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
3773 stcb = sctp_remove_from_socket_q(inp);
3776 SOCKBUF_LOCK(&so->so_rcv);
3777 if (( so->so_rcv.ssb_mb == NULL ) &&
3778 (TAILQ_EMPTY(&inp->sctp_queue_list) == 0)) {
3781 if (sctp_debug_on & SCTP_DEBUG_USRREQ2)
3782 kprintf("Something off, inp:%p so->so_rcv->ssb_mb is empty and sockq is not.. cleaning\n",
3785 if (((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0)
3786 && ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
3788 done_yet = TAILQ_EMPTY(&inp->sctp_queue_list);
3791 sctp_remove_from_socket_q(inp);
3792 done_yet = TAILQ_EMPTY(&inp->sctp_queue_list);
3796 if (sctp_debug_on & SCTP_DEBUG_USRREQ2)
3797 kprintf("Cleaned up %d sockq's\n", sq_cnt);
3800 SOCKBUF_UNLOCK(&so->so_rcv);
3802 SCTP_TCB_UNLOCK(stcb);
3803 SCTP_INP_WUNLOCK(inp);
3806 lwkt_replymsg(&msg->lmsg, error);
3810 sctp_listen(netmsg_t msg)
3812 struct socket *so = msg->listen.base.nm_so;
3816 * Note this module depends on the protocol processing being
3817 * called AFTER any socket level flags and backlog are applied
3818 * to the socket. The traditional way that the socket flags are
3819 * applied is AFTER protocol processing. We have made a change
3820 * to the sys/kern/uipc_socket.c module to reverse this but this
3821 * MUST be in place if the socket API for SCTP is to work properly.
3823 struct sctp_inpcb *inp;
3825 inp = (struct sctp_inpcb *)so->so_pcb;
3827 /* I made the same as TCP since we are not setup? */
3831 SCTP_INP_RLOCK(inp);
3832 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3833 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
3834 /* We are already connected AND the TCP model */
3835 SCTP_INP_RUNLOCK(inp);
3839 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
3840 /* We must do a bind. */
3841 SCTP_INP_RUNLOCK(inp);
3842 if ((error = sctp_inpcb_bind(so, NULL, msg->listen.nm_td))) {
3843 /* bind error, probably perm */
3847 SCTP_INP_RUNLOCK(inp);
3850 SCTP_INP_WLOCK(inp);
3851 if (inp->sctp_socket->so_qlimit) {
3852 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
3854 * For the UDP model we must TURN OFF the ACCEPT
3855 * flags since we do NOT allow the accept() call.
3856 * The TCP model (when present) will do accept which
3857 * then prohibits connect().
3859 inp->sctp_socket->so_options &= ~SO_ACCEPTCONN;
3861 inp->sctp_flags |= SCTP_PCB_FLAGS_ACCEPTING;
3863 if (inp->sctp_flags & SCTP_PCB_FLAGS_ACCEPTING) {
3865 * Turning off the listen flags if the backlog is
3866 * set to 0 (i.e. qlimit is 0).
3868 inp->sctp_flags &= ~SCTP_PCB_FLAGS_ACCEPTING;
3870 inp->sctp_socket->so_options &= ~SO_ACCEPTCONN;
3872 SCTP_INP_WUNLOCK(inp);
3876 lwkt_replymsg(&msg->lmsg, error);
3880 sctp_accept(netmsg_t msg)
3882 struct socket *so = msg->accept.base.nm_so;
3883 struct sockaddr **addr = msg->accept.nm_nam;
3884 struct sctp_tcb *stcb;
3885 struct sockaddr *prim;
3886 struct sctp_inpcb *inp;
3889 inp = (struct sctp_inpcb *)so->so_pcb;
3895 SCTP_INP_RLOCK(inp);
3896 if (so->so_state & SS_ISDISCONNECTED) {
3897 SCTP_INP_RUNLOCK(inp);
3898 error = ECONNABORTED;
3901 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3903 SCTP_INP_RUNLOCK(inp);
3907 SCTP_TCB_LOCK(stcb);
3908 SCTP_INP_RUNLOCK(inp);
3909 prim = (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr;
3910 if (prim->sa_family == AF_INET) {
3911 struct sockaddr_in *sin;
3912 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
3913 sin = kmalloc(sizeof *sin, M_SONAME, M_WAITOK | M_ZERO);
3915 sin = (struct sockaddr_in *)addr;
3916 bzero((caddr_t)sin, sizeof (*sin));
3918 sin->sin_family = AF_INET;
3919 sin->sin_len = sizeof(*sin);
3920 sin->sin_port = ((struct sockaddr_in *)prim)->sin_port;
3921 sin->sin_addr = ((struct sockaddr_in *)prim)->sin_addr;
3922 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
3923 *addr = (struct sockaddr *)sin;
3925 nam->m_len = sizeof(*sin);
3928 struct sockaddr_in6 *sin6;
3929 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
3930 sin6 = kmalloc(sizeof *sin6, M_SONAME, M_WAITOK | M_ZERO);
3932 sin6 = (struct sockaddr_in6 *)addr;
3934 bzero((caddr_t)sin6, sizeof (*sin6));
3935 sin6->sin6_family = AF_INET6;
3936 sin6->sin6_len = sizeof(*sin6);
3937 sin6->sin6_port = ((struct sockaddr_in6 *)prim)->sin6_port;
3939 sin6->sin6_addr = ((struct sockaddr_in6 *)prim)->sin6_addr;
3940 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr))
3941 /* sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]);*/
3942 in6_recoverscope(sin6, &sin6->sin6_addr, NULL); /* skip ifp check */
3944 sin6->sin6_scope_id = 0; /*XXX*/
3945 #if defined(__FreeBSD__) || defined (__APPLE__) || defined(__DragonFly__)
3946 *addr= (struct sockaddr *)sin6;
3948 nam->m_len = sizeof(*sin6);
3951 /* Wake any delayed sleep action */
3952 SCTP_TCB_UNLOCK(stcb);
3953 SCTP_INP_WLOCK(inp);
3954 if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) {
3955 inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE;
3956 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) {
3957 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT;
3958 #if defined(__NetBSD__)
3959 if (sowritable(inp->sctp_socket))
3960 sowwakeup(inp->sctp_socket);
3962 if (sowriteable(inp->sctp_socket))
3963 sowwakeup(inp->sctp_socket);
3966 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) {
3967 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT;
3968 if (soreadable(inp->sctp_socket))
3969 sorwakeup(inp->sctp_socket);
3973 SCTP_INP_WUNLOCK(inp);
3976 lwkt_replymsg(&msg->lmsg, error);
3981 sctp_ingetaddr(netmsg_t msg)
3985 error = sctp_ingetaddr_oncpu(msg->sockaddr.base.nm_so,
3986 msg->sockaddr.nm_nam);
3987 lwkt_replymsg(&msg->lmsg, error);
3991 sctp_ingetaddr_oncpu(struct socket *so, struct sockaddr **addr)
3993 struct sockaddr_in *sin;
3994 struct sctp_inpcb *inp;
3996 * Do the malloc first in case it blocks.
3998 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
3999 sin = kmalloc(sizeof *sin, M_SONAME, M_WAITOK | M_ZERO);
4001 nam->m_len = sizeof(*sin);
4002 memset(sin, 0, sizeof(*sin));
4004 sin->sin_family = AF_INET;
4005 sin->sin_len = sizeof(*sin);
4006 inp = (struct sctp_inpcb *)so->so_pcb;
4008 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4009 kfree(sin, M_SONAME);
4013 SCTP_INP_RLOCK(inp);
4014 sin->sin_port = inp->sctp_lport;
4015 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
4016 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
4017 struct sctp_tcb *stcb;
4018 struct sockaddr_in *sin_a;
4019 struct sctp_nets *net;
4022 stcb = LIST_FIRST(&inp->sctp_asoc_list);
4028 SCTP_TCB_LOCK(stcb);
4029 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4030 sin_a = (struct sockaddr_in *)&net->ro._l_addr;
4031 if (sin_a->sin_family == AF_INET) {
4036 if ((!fnd) || (sin_a == NULL)) {
4038 SCTP_TCB_UNLOCK(stcb);
4041 sin->sin_addr = sctp_ipv4_source_address_selection(inp,
4042 stcb, (struct route *)&net->ro, net, 0);
4043 SCTP_TCB_UNLOCK(stcb);
4045 /* For the bound all case you get back 0 */
4047 sin->sin_addr.s_addr = 0;
4051 /* Take the first IPv4 address in the list */
4052 struct sctp_laddr *laddr;
4054 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4055 if (laddr->ifa->ifa_addr->sa_family == AF_INET) {
4056 struct sockaddr_in *sin_a;
4057 sin_a = (struct sockaddr_in *)laddr->ifa->ifa_addr;
4058 sin->sin_addr = sin_a->sin_addr;
4064 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4065 kfree(sin, M_SONAME);
4067 SCTP_INP_RUNLOCK(inp);
4071 SCTP_INP_RUNLOCK(inp);
4072 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4073 (*addr) = (struct sockaddr *)sin;
4079 sctp_peeraddr(netmsg_t msg)
4083 error = sctp_peeraddr_oncpu(msg->peeraddr.base.nm_so,
4084 msg->peeraddr.nm_nam);
4085 lwkt_replymsg(&msg->lmsg, error);
4089 sctp_peeraddr_oncpu(struct socket *so, struct sockaddr **addr)
4091 struct sockaddr_in *sin = (struct sockaddr_in *)*addr;
4092 struct sockaddr_in *sin_a;
4093 struct sctp_inpcb *inp;
4094 struct sctp_tcb *stcb;
4095 struct sctp_nets *net;
4099 /* Do the malloc first in case it blocks. */
4100 inp = (struct sctp_inpcb *)so->so_pcb;
4101 if ((inp == NULL) ||
4102 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
4103 /* UDP type and listeners will drop out here */
4108 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4109 /* XXX huh? why assign it above and then allocate it here? */
4110 sin = kmalloc(sizeof *sin, M_SONAME, M_WAITOK | M_ZERO);
4112 nam->m_len = sizeof(*sin);
4113 memset(sin, 0, sizeof(*sin));
4115 sin->sin_family = AF_INET;
4116 sin->sin_len = sizeof(*sin);
4118 /* We must recapture incase we blocked */
4119 inp = (struct sctp_inpcb *)so->so_pcb;
4121 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4122 kfree(sin, M_SONAME);
4127 SCTP_INP_RLOCK(inp);
4128 stcb = LIST_FIRST(&inp->sctp_asoc_list);
4130 SCTP_TCB_LOCK(stcb);
4131 SCTP_INP_RUNLOCK(inp);
4133 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4134 kfree(sin, M_SONAME);
4140 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4141 sin_a = (struct sockaddr_in *)&net->ro._l_addr;
4142 if (sin_a->sin_family == AF_INET) {
4144 sin->sin_port = stcb->rport;
4145 sin->sin_addr = sin_a->sin_addr;
4149 SCTP_TCB_UNLOCK(stcb);
4151 /* No IPv4 address */
4152 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4153 kfree(sin, M_SONAME);
4163 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4164 struct pr_usrreqs sctp_usrreqs = {
4165 .pru_abort = sctp_abort,
4166 .pru_accept = sctp_accept,
4167 .pru_attach = sctp_attach,
4168 .pru_bind = sctp_bind,
4169 .pru_connect = sctp_connect,
4170 .pru_connect2 = pr_generic_notsupp,
4171 .pru_control = in_control_dispatch,
4172 .pru_detach = sctp_detach,
4173 .pru_disconnect = sctp_disconnect,
4174 .pru_listen = sctp_listen,
4175 .pru_peeraddr = sctp_peeraddr,
4176 .pru_rcvd = sctp_usr_recvd,
4177 .pru_rcvoob = pr_generic_notsupp,
4178 .pru_send = sctp_send,
4179 .pru_sense = pru_sense_null,
4180 .pru_shutdown = sctp_shutdown,
4181 .pru_sockaddr = sctp_ingetaddr,
4182 .pru_sosend = sctp_sosend,
4183 .pru_soreceive = soreceive
4187 #if defined(__NetBSD__)
4189 sctp_usrreq(struct socket *so, int req, struct mbuf *m, struct mbuf *nam,
4190 struct mbuf *control, struct proc *p)
4195 sctp_usrreq(struct socket *so, int req, struct mbuf *m, struct mbuf *nam,
4196 struct mbuf *control)
4198 struct proc *p = curproc;
4203 family = so->so_proto->pr_domain->dom_family;
4205 if (req == PRU_CONTROL) {
4208 error = in_control(so, (long)m, (caddr_t)nam,
4209 (struct ifnet *)control
4210 #if defined(__NetBSD__)
4217 error = in6_control(so, (long)m, (caddr_t)nam,
4218 (struct ifnet *)control, p);
4222 error = EAFNOSUPPORT;
4227 if (req == PRU_PURGEIF) {
4230 ifn = (struct ifnet *)control;
4231 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
4232 if (ifa->ifa_addr->sa_family == family) {
4233 sctp_delete_ip_address(ifa);
4246 return (EAFNOSUPPORT);
4253 error = sctp_attach(so, family, p);
4256 error = sctp_detach(so);
4262 error = sctp_bind(so, nam, p);
4265 error = sctp_listen(so, p);
4271 error = sctp_connect(so, nam, p);
4273 case PRU_DISCONNECT:
4274 error = sctp_disconnect(so);
4280 error = sctp_accept(so, nam);
4283 error = sctp_shutdown(so);
4288 * For Open and Net BSD, this is real
4289 * ugly. The mbuf *nam that is passed
4290 * (by soreceive()) is the int flags c
4291 * ast as a (mbuf *) yuck!
4293 error = sctp_usr_recvd(so, (int)((long)nam));
4297 /* Flags are ignored */
4299 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
4300 kprintf("Send called on V4 side\n");
4304 struct sockaddr *addr;
4308 addr = mtod(nam, struct sockaddr *);
4310 error = sctp_send(so, 0, m, addr, control, p);
4314 error = sctp_abort(so);
4321 error = EAFNOSUPPORT;
4324 error = EAFNOSUPPORT;
4327 error = sctp_peeraddr(so, nam);
4330 error = sctp_ingetaddr(so, nam);
4342 /* #if defined(__NetBSD__) || defined(__OpenBSD__) */
4343 #if defined(__OpenBSD__)
4345 * Sysctl for sctp variables.
4348 sctp_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
4352 /* All sysctl names at this level are terminal. */
4358 case SCTPCTL_MAXDGRAM:
4359 return (sysctl_int(oldp, oldlenp, newp, newlen,
4361 case SCTPCTL_RECVSPACE:
4362 return (sysctl_int(oldp, oldlenp, newp, newlen,
4364 case SCTPCTL_AUTOASCONF:
4365 return (sysctl_int(oldp, oldlenp, newp, newlen,
4366 &sctp_auto_asconf));
4367 case SCTPCTL_ECN_ENABLE:
4368 return (sysctl_int(oldp, oldlenp, newp, newlen,
4370 case SCTPCTL_ECN_NONCE:
4371 return (sysctl_int(oldp, oldlenp, newp, newlen,
4373 case SCTPCTL_STRICT_SACK:
4374 return (sysctl_int(oldp, oldlenp, newp, newlen,
4375 &sctp_strict_sacks));
4376 case SCTPCTL_NOCSUM_LO:
4377 return (sysctl_int(oldp, oldlenp, newp, newlen,
4378 &sctp_no_csum_on_loopback));
4379 case SCTPCTL_STRICT_INIT:
4380 return (sysctl_int(oldp, oldlenp, newp, newlen,
4381 &sctp_strict_init));
4382 case SCTPCTL_PEER_CHK_OH:
4383 return (sysctl_int(oldp, oldlenp, newp, newlen,
4384 &sctp_peer_chunk_oh));
4385 case SCTPCTL_MAXBURST:
4386 return (sysctl_int(oldp, oldlenp, newp, newlen,
4387 &sctp_max_burst_default));
4388 case SCTPCTL_MAXCHUNKONQ:
4389 return (sysctl_int(oldp, oldlenp, newp, newlen,
4390 &sctp_max_chunks_on_queue));
4391 case SCTPCTL_DELAYED_SACK:
4392 return (sysctl_int(oldp, oldlenp, newp, newlen,
4393 &sctp_delayed_sack_time_default));
4394 case SCTPCTL_HB_INTERVAL:
4395 return (sysctl_int(oldp, oldlenp, newp, newlen,
4396 &sctp_heartbeat_interval_default));
4397 case SCTPCTL_PMTU_RAISE:
4398 return (sysctl_int(oldp, oldlenp, newp, newlen,
4399 &sctp_pmtu_raise_time_default));
4400 case SCTPCTL_SHUTDOWN_GUARD:
4401 return (sysctl_int(oldp, oldlenp, newp, newlen,
4402 &sctp_shutdown_guard_time_default));
4403 case SCTPCTL_SECRET_LIFETIME:
4404 return (sysctl_int(oldp, oldlenp, newp, newlen,
4405 &sctp_secret_lifetime_default));
4406 case SCTPCTL_RTO_MAX:
4407 return (sysctl_int(oldp, oldlenp, newp, newlen,
4408 &sctp_rto_max_default));
4409 case SCTPCTL_RTO_MIN:
4410 return (sysctl_int(oldp, oldlenp, newp, newlen,
4411 &sctp_rto_min_default));
4412 case SCTPCTL_RTO_INITIAL:
4413 return (sysctl_int(oldp, oldlenp, newp, newlen,
4414 &sctp_rto_initial_default));
4415 case SCTPCTL_INIT_RTO_MAX:
4416 return (sysctl_int(oldp, oldlenp, newp, newlen,
4417 &sctp_init_rto_max_default));
4418 case SCTPCTL_COOKIE_LIFE:
4419 return (sysctl_int(oldp, oldlenp, newp, newlen,
4420 &sctp_valid_cookie_life_default));
4421 case SCTPCTL_INIT_RTX_MAX:
4422 return (sysctl_int(oldp, oldlenp, newp, newlen,
4423 &sctp_init_rtx_max_default));
4424 case SCTPCTL_ASSOC_RTX_MAX:
4425 return (sysctl_int(oldp, oldlenp, newp, newlen,
4426 &sctp_assoc_rtx_max_default));
4427 case SCTPCTL_PATH_RTX_MAX:
4428 return (sysctl_int(oldp, oldlenp, newp, newlen,
4429 &sctp_path_rtx_max_default));
4430 case SCTPCTL_NR_OUTGOING_STREAMS:
4431 return (sysctl_int(oldp, oldlenp, newp, newlen,
4432 &sctp_nr_outgoing_streams_default));
4435 return (sysctl_int(oldp, oldlenp, newp, newlen,
4439 return (ENOPROTOOPT);
4444 #if defined(__NetBSD__)
4446 * Sysctl for sctp variables.
4448 SYSCTL_SETUP(sysctl_net_inet_sctp_setup, "sysctl net.inet.sctp subtree setup")
4451 sysctl_createv(clog, 0, NULL, NULL,
4453 CTLTYPE_NODE, "net", NULL,
4456 sysctl_createv(clog, 0, NULL, NULL,
4458 CTLTYPE_NODE, "inet", NULL,
4460 CTL_NET, PF_INET, CTL_EOL);
4461 sysctl_createv(clog, 0, NULL, NULL,
4463 CTLTYPE_NODE, "sctp",
4464 SYSCTL_DESCR("sctp related settings"),
4466 CTL_NET, PF_INET, IPPROTO_SCTP, CTL_EOL);
4468 sysctl_createv(clog, 0, NULL, NULL,
4469 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4470 CTLTYPE_INT, "maxdgram",
4471 SYSCTL_DESCR("Maximum outgoing SCTP buffer size"),
4472 NULL, 0, &sctp_sendspace, 0,
4473 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_MAXDGRAM,
4476 sysctl_createv(clog, 0, NULL, NULL,
4477 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4478 CTLTYPE_INT, "recvspace",
4479 SYSCTL_DESCR("Maximum incoming SCTP buffer size"),
4480 NULL, 0, &sctp_recvspace, 0,
4481 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_RECVSPACE,
4484 sysctl_createv(clog, 0, NULL, NULL,
4485 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4486 CTLTYPE_INT, "autoasconf",
4487 SYSCTL_DESCR("Enable SCTP Auto-ASCONF"),
4488 NULL, 0, &sctp_auto_asconf, 0,
4489 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_AUTOASCONF,
4492 sysctl_createv(clog, 0, NULL, NULL,
4493 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4494 CTLTYPE_INT, "ecn_enable",
4495 SYSCTL_DESCR("Enable SCTP ECN"),
4496 NULL, 0, &sctp_ecn, 0,
4497 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_ECN_ENABLE,
4500 sysctl_createv(clog, 0, NULL, NULL,
4501 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4502 CTLTYPE_INT, "ecn_nonce",
4503 SYSCTL_DESCR("Enable SCTP ECN Nonce"),
4504 NULL, 0, &sctp_ecn_nonce, 0,
4505 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_ECN_NONCE,
4508 sysctl_createv(clog, 0, NULL, NULL,
4509 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4510 CTLTYPE_INT, "strict_sack",
4511 SYSCTL_DESCR("Enable SCTP Strict SACK checking"),
4512 NULL, 0, &sctp_strict_sacks, 0,
4513 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_STRICT_SACK,
4516 sysctl_createv(clog, 0, NULL, NULL,
4517 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4518 CTLTYPE_INT, "loopback_nocsum",
4519 SYSCTL_DESCR("Enable NO Csum on packets sent on loopback"),
4520 NULL, 0, &sctp_no_csum_on_loopback, 0,
4521 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_NOCSUM_LO,
4524 sysctl_createv(clog, 0, NULL, NULL,
4525 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4526 CTLTYPE_INT, "strict_init",
4527 SYSCTL_DESCR("Enable strict INIT/INIT-ACK singleton enforcement"),
4528 NULL, 0, &sctp_strict_init, 0,
4529 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_STRICT_INIT,
4532 sysctl_createv(clog, 0, NULL, NULL,
4533 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4534 CTLTYPE_INT, "peer_chkoh",
4535 SYSCTL_DESCR("Amount to debit peers rwnd per chunk sent"),
4536 NULL, 0, &sctp_peer_chunk_oh, 0,
4537 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_PEER_CHK_OH,
4540 sysctl_createv(clog, 0, NULL, NULL,
4541 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4542 CTLTYPE_INT, "maxburst",
4543 SYSCTL_DESCR("Default max burst for sctp endpoints"),
4544 NULL, 0, &sctp_max_burst_default, 0,
4545 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_MAXBURST,
4548 sysctl_createv(clog, 0, NULL, NULL,
4549 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4550 CTLTYPE_INT, "maxchunks",
4551 SYSCTL_DESCR("Default max chunks on queue per asoc"),
4552 NULL, 0, &sctp_max_chunks_on_queue, 0,
4553 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_MAXCHUNKONQ,