1 /* $KAME: sctp_usrreq.c,v 1.47 2005/03/06 16:04:18 itojun Exp $ */
4 * Copyright (c) 2001, 2002, 2003, 2004 Cisco Systems, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Cisco Systems, Inc.
18 * 4. Neither the name of the project nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY CISCO SYSTEMS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL CISCO SYSTEMS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 #if !(defined(__OpenBSD__) || defined(__APPLE__))
35 #include "opt_ipsec.h"
37 #if defined(__FreeBSD__) || defined(__DragonFly__)
38 #include "opt_inet6.h"
41 #if defined(__NetBSD__)
47 #elif !defined(__OpenBSD__)
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/kernel.h>
54 #include <sys/malloc.h>
56 #include <sys/domain.h>
59 #include <sys/protosw.h>
60 #include <sys/socket.h>
61 #include <sys/socketvar.h>
62 #include <sys/socketvar2.h>
63 #include <sys/sysctl.h>
64 #include <sys/syslog.h>
66 #include <sys/thread2.h>
67 #include <sys/msgport2.h>
70 #include <net/if_types.h>
71 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
72 #include <net/if_var.h>
74 #include <net/route.h>
75 #include <netinet/in.h>
76 #include <netinet/in_systm.h>
77 #include <netinet/ip.h>
78 #include <netinet/ip6.h>
79 #include <netinet/in_pcb.h>
80 #include <netinet/in_var.h>
81 #include <netinet/ip_var.h>
82 #include <netinet6/ip6_var.h>
83 #include <netinet6/in6_var.h>
85 #include <netinet/ip_icmp.h>
86 #include <netinet/icmp_var.h>
87 #include <netinet/sctp_pcb.h>
88 #include <netinet/sctp_header.h>
89 #include <netinet/sctp_var.h>
90 #include <netinet/sctp_output.h>
91 #include <netinet/sctp_uio.h>
92 #include <netinet/sctp_asconf.h>
93 #include <netinet/sctputil.h>
94 #include <netinet/sctp_indata.h>
95 #include <netinet/sctp_asconf.h>
98 #include <netinet6/ipsec.h>
99 #include <netproto/key/key.h>
105 #include <net/net_osdep.h>
107 #if defined(HAVE_NRL_INPCB) || defined(__FreeBSD__) || defined(__DragonFly__)
112 #define sotoin6pcb sotoinpcb
117 extern u_int32_t sctp_debug_on;
118 #endif /* SCTP_DEBUG */
121 * sysctl tunable variables
123 int sctp_auto_asconf = SCTP_DEFAULT_AUTO_ASCONF;
124 int sctp_max_burst_default = SCTP_DEF_MAX_BURST;
125 int sctp_peer_chunk_oh = sizeof(struct mbuf);
126 int sctp_strict_init = 1;
127 int sctp_no_csum_on_loopback = 1;
128 unsigned int sctp_max_chunks_on_queue = SCTP_ASOC_MAX_CHUNKS_ON_QUEUE;
129 int sctp_sendspace = (128 * 1024);
130 int sctp_recvspace = 128 * (1024 +
132 sizeof(struct sockaddr_in6)
134 sizeof(struct sockaddr_in)
137 int sctp_strict_sacks = 0;
139 int sctp_ecn_nonce = 0;
141 unsigned int sctp_delayed_sack_time_default = SCTP_RECV_MSEC;
142 unsigned int sctp_heartbeat_interval_default = SCTP_HB_DEFAULT_MSEC;
143 unsigned int sctp_pmtu_raise_time_default = SCTP_DEF_PMTU_RAISE_SEC;
144 unsigned int sctp_shutdown_guard_time_default = SCTP_DEF_MAX_SHUTDOWN_SEC;
145 unsigned int sctp_secret_lifetime_default = SCTP_DEFAULT_SECRET_LIFE_SEC;
146 unsigned int sctp_rto_max_default = SCTP_RTO_UPPER_BOUND;
147 unsigned int sctp_rto_min_default = SCTP_RTO_LOWER_BOUND;
148 unsigned int sctp_rto_initial_default = SCTP_RTO_INITIAL;
149 unsigned int sctp_init_rto_max_default = SCTP_RTO_UPPER_BOUND;
150 unsigned int sctp_valid_cookie_life_default = SCTP_DEFAULT_COOKIE_LIFE;
151 unsigned int sctp_init_rtx_max_default = SCTP_DEF_MAX_INIT;
152 unsigned int sctp_assoc_rtx_max_default = SCTP_DEF_MAX_SEND;
153 unsigned int sctp_path_rtx_max_default = SCTP_DEF_MAX_SEND/2;
154 unsigned int sctp_nr_outgoing_streams_default = SCTP_OSTREAM_INITIAL;
160 #define nmbclusters nmbclust
162 /* Init the SCTP pcb in sctp_pcb.c */
168 if (nmbclusters > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE)
169 sctp_max_chunks_on_queue = nmbclusters;
171 /* if (nmbclust > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE)
172 sctp_max_chunks_on_queue = nmbclust; FIX ME */
173 sctp_max_chunks_on_queue = nmbclust * 2;
176 * Allow a user to take no more than 1/2 the number of clusters
177 * or the SB_MAX whichever is smaller for the send window.
179 sb_max_adj = (u_long)((u_quad_t)(SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES));
180 sctp_sendspace = min((min(SB_MAX, sb_max_adj)),
182 ((nmbclusters/2) * SCTP_DEFAULT_MAXSEGMENT));
184 ((nmbclust/2) * SCTP_DEFAULT_MAXSEGMENT));
187 * Now for the recv window, should we take the same amount?
188 * or should I do 1/2 the SB_MAX instead in the SB_MAX min above.
189 * For now I will just copy.
191 sctp_recvspace = sctp_sendspace;
199 ip_2_ip6_hdr(struct ip6_hdr *ip6, struct ip *ip)
201 bzero(ip6, sizeof(*ip6));
203 ip6->ip6_vfc = IPV6_VERSION;
204 ip6->ip6_plen = ip->ip_len;
205 ip6->ip6_nxt = ip->ip_p;
206 ip6->ip6_hlim = ip->ip_ttl;
207 ip6->ip6_src.s6_addr32[2] = ip6->ip6_dst.s6_addr32[2] =
209 ip6->ip6_src.s6_addr32[3] = ip->ip_src.s_addr;
210 ip6->ip6_dst.s6_addr32[3] = ip->ip_dst.s_addr;
215 sctp_split_chunks(struct sctp_association *asoc,
216 struct sctp_stream_out *strm,
217 struct sctp_tmit_chunk *chk)
219 struct sctp_tmit_chunk *new_chk;
221 /* First we need a chunk */
222 new_chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
223 if (new_chk == NULL) {
224 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
227 sctppcbinfo.ipi_count_chunk++;
228 sctppcbinfo.ipi_gencnt_chunk++;
232 new_chk->data = m_split(chk->data, (chk->send_size>>1), MB_DONTWAIT);
233 if (new_chk->data == NULL) {
235 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
236 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, new_chk);
237 sctppcbinfo.ipi_count_chunk--;
238 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
239 panic("Chunk count is negative");
241 sctppcbinfo.ipi_gencnt_chunk++;
245 /* Data is now split adjust sizes */
246 chk->send_size >>= 1;
247 new_chk->send_size >>= 1;
249 chk->book_size >>= 1;
250 new_chk->book_size >>= 1;
252 /* now adjust the marks */
253 chk->rec.data.rcv_flags |= SCTP_DATA_FIRST_FRAG;
254 chk->rec.data.rcv_flags &= ~SCTP_DATA_LAST_FRAG;
256 new_chk->rec.data.rcv_flags &= ~SCTP_DATA_FIRST_FRAG;
257 new_chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
259 /* Increase ref count if dest is set */
261 new_chk->whoTo->ref_count++;
263 /* now drop it on the end of the list*/
264 asoc->stream_queue_cnt++;
265 TAILQ_INSERT_AFTER(&strm->outqueue, chk, new_chk, sctp_next);
269 sctp_notify_mbuf(struct sctp_inpcb *inp,
270 struct sctp_tcb *stcb,
271 struct sctp_nets *net,
281 if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
282 (ip == NULL) || (sh == NULL)) {
284 SCTP_TCB_UNLOCK(stcb);
287 /* First job is to verify the vtag matches what I would send */
288 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
289 SCTP_TCB_UNLOCK(stcb);
292 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) -
294 if (icmph->icmp_type != ICMP_UNREACH) {
295 /* We only care about unreachable */
296 SCTP_TCB_UNLOCK(stcb);
299 if (icmph->icmp_code != ICMP_UNREACH_NEEDFRAG) {
300 /* not a unreachable message due to frag. */
301 SCTP_TCB_UNLOCK(stcb);
305 nxtsz = ntohs(icmph->icmp_seq);
308 * old type router that does not tell us what the next size
309 * mtu is. Rats we will have to guess (in a educated fashion
312 nxtsz = find_next_best_mtu(totsz);
315 /* Stop any PMTU timer */
316 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, NULL);
318 /* Adjust destination size limit */
319 if (net->mtu > nxtsz) {
322 /* now what about the ep? */
323 if (stcb->asoc.smallest_mtu > nxtsz) {
324 struct sctp_tmit_chunk *chk, *nchk;
325 struct sctp_stream_out *strm;
326 /* Adjust that too */
327 stcb->asoc.smallest_mtu = nxtsz;
328 /* now off to subtract IP_DF flag if needed */
330 TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) {
331 if ((chk->send_size+IP_HDR_SIZE) > nxtsz) {
332 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
335 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
336 if ((chk->send_size+IP_HDR_SIZE) > nxtsz) {
338 * For this guy we also mark for immediate
339 * resend since we sent to big of chunk
341 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
342 if (chk->sent != SCTP_DATAGRAM_RESEND) {
343 stcb->asoc.sent_queue_retran_cnt++;
345 chk->sent = SCTP_DATAGRAM_RESEND;
346 chk->rec.data.doing_fast_retransmit = 0;
348 /* Clear any time so NO RTT is being done */
350 stcb->asoc.total_flight -= chk->book_size;
351 if (stcb->asoc.total_flight < 0) {
352 stcb->asoc.total_flight = 0;
354 stcb->asoc.total_flight_count--;
355 if (stcb->asoc.total_flight_count < 0) {
356 stcb->asoc.total_flight_count = 0;
358 net->flight_size -= chk->book_size;
359 if (net->flight_size < 0) {
360 net->flight_size = 0;
364 TAILQ_FOREACH(strm, &stcb->asoc.out_wheel, next_spoke) {
365 chk = TAILQ_FIRST(&strm->outqueue);
367 nchk = TAILQ_NEXT(chk, sctp_next);
368 if ((chk->send_size+SCTP_MED_OVERHEAD) > nxtsz) {
369 sctp_split_chunks(&stcb->asoc, strm, chk);
375 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, NULL);
376 SCTP_TCB_UNLOCK(stcb);
381 sctp_notify(struct sctp_inpcb *inp,
385 struct sctp_tcb *stcb,
386 struct sctp_nets *net)
389 if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
390 (sh == NULL) || (to == NULL)) {
392 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
393 kprintf("sctp-notify, bad call\n");
395 #endif /* SCTP_DEBUG */
398 /* First job is to verify the vtag matches what I would send */
399 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
403 /* FIX ME FIX ME PROTOPT i.e. no SCTP should ALWAYS be an ABORT */
405 if ((error == EHOSTUNREACH) || /* Host is not reachable */
406 (error == EHOSTDOWN) || /* Host is down */
407 (error == ECONNREFUSED) || /* Host refused the connection, (not an abort?) */
408 (error == ENOPROTOOPT) /* SCTP is not present on host */
411 * Hmm reachablity problems we must examine closely.
412 * If its not reachable, we may have lost a network.
413 * Or if there is NO protocol at the other end named SCTP.
414 * well we consider it a OOTB abort.
416 if ((error == EHOSTUNREACH) || (error == EHOSTDOWN)) {
417 if (net->dest_state & SCTP_ADDR_REACHABLE) {
418 /* Ok that destination is NOT reachable */
419 net->dest_state &= ~SCTP_ADDR_REACHABLE;
420 net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
421 net->error_count = net->failure_threshold + 1;
422 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
423 stcb, SCTP_FAILED_THRESHOLD,
427 SCTP_TCB_UNLOCK(stcb);
430 * Here the peer is either playing tricks on us,
431 * including an address that belongs to someone who
432 * does not support SCTP OR was a userland
433 * implementation that shutdown and now is dead. In
434 * either case treat it like a OOTB abort with no TCB
436 sctp_abort_notification(stcb, SCTP_PEER_FAULTY);
437 sctp_free_assoc(inp, stcb);
438 /* no need to unlock here, since the TCB is gone */
441 /* Send all others to the app */
442 if (inp->sctp_socket) {
443 SOCK_LOCK(inp->sctp_socket);
444 inp->sctp_socket->so_error = error;
445 sctp_sowwakeup(inp, inp->sctp_socket);
446 SOCK_UNLOCK(inp->sctp_socket);
449 SCTP_TCB_UNLOCK(stcb);
454 sctp_ctlinput(netmsg_t msg)
456 int cmd = msg->ctlinput.nm_cmd;
457 struct sockaddr *sa = msg->ctlinput.nm_arg;
458 struct ip *ip = msg->ctlinput.nm_extra;
461 if (sa->sa_family != AF_INET ||
462 ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) {
466 if (PRC_IS_REDIRECT(cmd)) {
468 } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) {
472 struct sctp_inpcb *inp;
473 struct sctp_tcb *stcb;
474 struct sctp_nets *net;
475 struct sockaddr_in to, from;
477 sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2));
478 bzero(&to, sizeof(to));
479 bzero(&from, sizeof(from));
480 from.sin_family = to.sin_family = AF_INET;
481 from.sin_len = to.sin_len = sizeof(to);
482 from.sin_port = sh->src_port;
483 from.sin_addr = ip->ip_src;
484 to.sin_port = sh->dest_port;
485 to.sin_addr = ip->ip_dst;
488 * 'to' holds the dest of the packet that failed to be sent.
489 * 'from' holds our local endpoint address.
490 * Thus we reverse the to and the from in the lookup.
492 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&from,
493 (struct sockaddr *)&to,
495 if (stcb != NULL && inp && (inp->sctp_socket != NULL)) {
496 if (cmd != PRC_MSGSIZE) {
498 if (cmd == PRC_HOSTDEAD) {
501 cm = inetctlerrmap[cmd];
503 sctp_notify(inp, cm, sh,
504 (struct sockaddr *)&to, stcb,
507 /* handle possible ICMP size messages */
508 sctp_notify_mbuf(inp, stcb, net, ip, sh);
511 #if (defined(__FreeBSD__) && __FreeBSD_version < 500000) || defined(__DragonFly__)
512 /* XXX must be fixed for 5.x and higher, leave for 4.x */
513 if (PRC_IS_REDIRECT(cmd) && inp) {
514 in_rtchange((struct inpcb *)inp,
518 if ((stcb == NULL) && (inp != NULL)) {
519 /* reduce ref-count */
521 SCTP_INP_DECR_REF(inp);
522 SCTP_INP_WUNLOCK(inp);
528 lwkt_replymsg(&msg->lmsg, 0);
531 #if defined(__FreeBSD__) || defined(__DragonFly__)
533 sctp_getcred(SYSCTL_HANDLER_ARGS)
535 struct sockaddr_in addrs[2];
536 struct sctp_inpcb *inp;
537 struct sctp_nets *net;
538 struct sctp_tcb *stcb;
541 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
542 error = priv_check(req->td, PRIV_ROOT);
544 error = suser(req->p);
548 error = SYSCTL_IN(req, addrs, sizeof(addrs));
552 stcb = sctp_findassociation_addr_sa(sintosa(&addrs[0]),
555 if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) {
556 if ((inp != NULL) && (stcb == NULL)) {
557 /* reduce ref-count */
559 SCTP_INP_DECR_REF(inp);
560 SCTP_INP_WUNLOCK(inp);
565 error = SYSCTL_OUT(req, inp->sctp_socket->so_cred, sizeof(struct ucred));
566 SCTP_TCB_UNLOCK(stcb);
571 SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE|CTLFLAG_RW,
572 0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection");
573 #endif /* #if defined(__FreeBSD__) || defined(__DragonFly__) */
578 #if defined(__FreeBSD__) || defined (__APPLE__) || defined(__DragonFly__)
580 SYSCTL_DECL(_net_inet);
582 SYSCTL_NODE(_net_inet, OID_AUTO, sctp, CTLFLAG_RD, 0,
585 SYSCTL_INT(_net_inet_sctp, OID_AUTO, maxdgram, CTLFLAG_RW,
586 &sctp_sendspace, 0, "Maximum outgoing SCTP buffer size");
588 SYSCTL_INT(_net_inet_sctp, OID_AUTO, recvspace, CTLFLAG_RW,
589 &sctp_recvspace, 0, "Maximum incoming SCTP buffer size");
591 SYSCTL_INT(_net_inet_sctp, OID_AUTO, auto_asconf, CTLFLAG_RW,
592 &sctp_auto_asconf, 0, "Enable SCTP Auto-ASCONF");
594 SYSCTL_INT(_net_inet_sctp, OID_AUTO, ecn_enable, CTLFLAG_RW,
595 &sctp_ecn, 0, "Enable SCTP ECN");
597 SYSCTL_INT(_net_inet_sctp, OID_AUTO, ecn_nonce, CTLFLAG_RW,
598 &sctp_ecn_nonce, 0, "Enable SCTP ECN Nonce");
600 SYSCTL_INT(_net_inet_sctp, OID_AUTO, strict_sacks, CTLFLAG_RW,
601 &sctp_strict_sacks, 0, "Enable SCTP Strict SACK checking");
603 SYSCTL_INT(_net_inet_sctp, OID_AUTO, loopback_nocsum, CTLFLAG_RW,
604 &sctp_no_csum_on_loopback, 0,
605 "Enable NO Csum on packets sent on loopback");
607 SYSCTL_INT(_net_inet_sctp, OID_AUTO, strict_init, CTLFLAG_RW,
608 &sctp_strict_init, 0,
609 "Enable strict INIT/INIT-ACK singleton enforcement");
611 SYSCTL_INT(_net_inet_sctp, OID_AUTO, peer_chkoh, CTLFLAG_RW,
612 &sctp_peer_chunk_oh, 0,
613 "Amount to debit peers rwnd per chunk sent");
615 SYSCTL_INT(_net_inet_sctp, OID_AUTO, maxburst, CTLFLAG_RW,
616 &sctp_max_burst_default, 0,
617 "Default max burst for sctp endpoints");
619 SYSCTL_INT(_net_inet_sctp, OID_AUTO, maxchunks, CTLFLAG_RW,
620 &sctp_max_chunks_on_queue, 0,
621 "Default max chunks on queue per asoc");
623 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, delayed_sack_time, CTLFLAG_RW,
624 &sctp_delayed_sack_time_default, 0,
625 "Default delayed SACK timer in msec");
627 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, heartbeat_interval, CTLFLAG_RW,
628 &sctp_heartbeat_interval_default, 0,
629 "Default heartbeat interval in msec");
631 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, pmtu_raise_time, CTLFLAG_RW,
632 &sctp_pmtu_raise_time_default, 0,
633 "Default PMTU raise timer in sec");
635 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, shutdown_guard_time, CTLFLAG_RW,
636 &sctp_shutdown_guard_time_default, 0,
637 "Default shutdown guard timer in sec");
639 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, secret_lifetime, CTLFLAG_RW,
640 &sctp_secret_lifetime_default, 0,
641 "Default secret lifetime in sec");
643 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, rto_max, CTLFLAG_RW,
644 &sctp_rto_max_default, 0,
645 "Default maximum retransmission timeout in msec");
647 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, rto_min, CTLFLAG_RW,
648 &sctp_rto_min_default, 0,
649 "Default minimum retransmission timeout in msec");
651 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, rto_initial, CTLFLAG_RW,
652 &sctp_rto_initial_default, 0,
653 "Default initial retransmission timeout in msec");
655 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, init_rto_max, CTLFLAG_RW,
656 &sctp_init_rto_max_default, 0,
657 "Default maximum retransmission timeout during association setup in msec");
659 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, valid_cookie_life, CTLFLAG_RW,
660 &sctp_valid_cookie_life_default, 0,
661 "Default cookie lifetime in sec");
663 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, init_rtx_max, CTLFLAG_RW,
664 &sctp_init_rtx_max_default, 0,
665 "Default maximum number of retransmission for INIT chunks");
667 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, assoc_rtx_max, CTLFLAG_RW,
668 &sctp_assoc_rtx_max_default, 0,
669 "Default maximum number of retransmissions per association");
671 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, path_rtx_max, CTLFLAG_RW,
672 &sctp_path_rtx_max_default, 0,
673 "Default maximum of retransmissions per path");
675 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, nr_outgoing_streams, CTLFLAG_RW,
676 &sctp_nr_outgoing_streams_default, 0,
677 "Default number of outgoing streams");
680 SYSCTL_INT(_net_inet_sctp, OID_AUTO, debug, CTLFLAG_RW,
681 &sctp_debug_on, 0, "Configure debug output");
682 #endif /* SCTP_DEBUG */
686 * NOTE: (so) is referenced from soabort*() and netmsg_pru_abort()
687 * will sofree() it when we return.
690 sctp_abort(netmsg_t msg)
692 struct socket *so = msg->abort.base.nm_so;
693 struct sctp_inpcb *inp;
696 inp = (struct sctp_inpcb *)so->so_pcb;
698 sctp_inpcb_free(inp, 1);
703 lwkt_replymsg(&msg->lmsg, error);
707 sctp_attach(netmsg_t msg)
709 struct socket *so = msg->attach.base.nm_so;
710 struct sctp_inpcb *inp;
711 struct inpcb *ip_inp;
714 inp = (struct sctp_inpcb *)so->so_pcb;
719 error = soreserve(so, sctp_sendspace, sctp_recvspace, NULL);
720 atomic_set_int(&so->so_rcv.ssb_flags, SSB_PREALLOC);
721 atomic_set_int(&so->so_snd.ssb_flags, SSB_PREALLOC);
725 error = sctp_inpcb_alloc(so);
728 inp = (struct sctp_inpcb *)so->so_pcb;
731 inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6; /* I'm not v6! */
732 ip_inp = &inp->ip_inp.inp;
733 ip_inp->inp_vflag |= INP_IPV4;
734 ip_inp->inp_ip_ttl = ip_defttl;
737 #if !(defined(__OpenBSD__) || defined(__APPLE__))
738 error = ipsec_init_policy(so, &ip_inp->inp_sp);
740 sctp_inpcb_free(inp, 1);
745 SCTP_INP_WUNLOCK(inp);
746 #if defined(__NetBSD__)
747 so->so_send = sctp_sosend;
751 lwkt_replymsg(&msg->lmsg, error);
755 sctp_bind(netmsg_t msg)
757 struct socket *so = msg->bind.base.nm_so;
758 struct sockaddr *addr = msg->bind.nm_nam;
759 thread_t td = msg->bind.nm_td;
760 struct sctp_inpcb *inp;
764 if (addr && addr->sa_family != AF_INET) {
765 /* must be a v4 address! */
771 inp = (struct sctp_inpcb *)so->so_pcb;
773 error = sctp_inpcb_bind(so, addr, td);
778 lwkt_replymsg(&msg->lmsg, error);
783 sctp_detach(netmsg_t msg)
785 struct socket *so = msg->detach.base.nm_so;
786 struct sctp_inpcb *inp;
789 inp = (struct sctp_inpcb *)so->so_pcb;
794 if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) ||
795 (so->so_rcv.ssb_cc > 0)) {
796 sctp_inpcb_free(inp, 1);
798 sctp_inpcb_free(inp, 0);
802 lwkt_replymsg(&msg->lmsg, error);
806 sctp_send(netmsg_t msg)
808 struct socket *so = msg->send.base.nm_so;
809 int flags = msg->send.nm_flags;
810 struct mbuf *m = msg->send.nm_m;
811 struct mbuf *control = msg->send.nm_control;
812 struct sockaddr *addr = msg->send.nm_addr;
813 struct thread *td = msg->send.nm_td;
815 struct sctp_inpcb *inp;
816 inp = (struct sctp_inpcb *)so->so_pcb;
819 sctp_m_freem(control);
826 /* Got to have an to address if we are NOT a connected socket */
827 if ((addr == NULL) &&
828 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
829 (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE))
832 } else if (addr == NULL) {
833 error = EDESTADDRREQ;
836 sctp_m_freem(control);
842 if (addr->sa_family != AF_INET) {
843 /* must be a v4 address! */
846 sctp_m_freem(control);
849 error = EDESTADDRREQ; /* XXX huh? */
855 /* now what about control */
858 kprintf("huh? control set?\n");
859 sctp_m_freem(inp->control);
862 inp->control = control;
864 /* add it in possibly */
865 if ((inp->pkt) && (inp->pkt->m_flags & M_PKTHDR)) {
871 for (x=m;x;x = x->m_next) {
874 inp->pkt->m_pkthdr.len += c_len;
878 inp->pkt_last->m_next = m;
881 inp->pkt_last = inp->pkt = m;
884 #if defined (__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
885 /* FreeBSD uses a flag passed */
886 ((flags & PRUS_MORETOCOME) == 0)
887 #elif defined( __NetBSD__)
888 /* NetBSD uses the so_state field */
889 ((so->so_state & SS_MORETOCOME) == 0)
891 1 /* Open BSD does not have any "more to come" indication */
895 * note with the current version this code will only be used
896 * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for
897 * re-defining sosend to use the sctp_sosend. One can
898 * optionally switch back to this code (by changing back the
899 * definitions) but this is not advisable.
901 error = sctp_output(inp, inp->pkt, addr,
902 inp->control, td, flags);
909 if (msg->send.nm_flags & PRUS_NAMALLOC) {
910 kfree(msg->send.nm_addr, M_LWKTMSG);
911 msg->send.nm_addr = NULL;
913 lwkt_replymsg(&msg->lmsg, error);
917 sctp_disconnect(netmsg_t msg)
919 struct socket *so = msg->disconnect.base.nm_so;
920 struct sctp_inpcb *inp;
923 inp = (struct sctp_inpcb *)so->so_pcb;
929 if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
930 if (LIST_EMPTY(&inp->sctp_asoc_list)) {
932 SCTP_INP_RUNLOCK(inp);
936 int some_on_streamwheel = 0;
937 struct sctp_association *asoc;
938 struct sctp_tcb *stcb;
940 stcb = LIST_FIRST(&inp->sctp_asoc_list);
942 SCTP_INP_RUNLOCK(inp);
948 if (((so->so_options & SO_LINGER) &&
949 (so->so_linger == 0)) ||
950 (so->so_rcv.ssb_cc > 0)) {
951 if (SCTP_GET_STATE(asoc) !=
952 SCTP_STATE_COOKIE_WAIT) {
953 /* Left with Data unread */
956 MGET(err, MB_DONTWAIT, MT_DATA);
958 /* Fill in the user initiated abort */
959 struct sctp_paramhdr *ph;
960 ph = mtod(err, struct sctp_paramhdr *);
961 err->m_len = sizeof(struct sctp_paramhdr);
962 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
963 ph->param_length = htons(err->m_len);
965 sctp_send_abort_tcb(stcb, err);
967 SCTP_INP_RUNLOCK(inp);
968 sctp_free_assoc(inp, stcb);
969 /* No unlock tcb assoc is gone */
973 if (!TAILQ_EMPTY(&asoc->out_wheel)) {
974 /* Check to see if some data queued */
975 struct sctp_stream_out *outs;
976 TAILQ_FOREACH(outs, &asoc->out_wheel,
978 if (!TAILQ_EMPTY(&outs->outqueue)) {
979 some_on_streamwheel = 1;
985 if (TAILQ_EMPTY(&asoc->send_queue) &&
986 TAILQ_EMPTY(&asoc->sent_queue) &&
987 (some_on_streamwheel == 0)) {
988 /* there is nothing queued to send, so done */
989 if ((SCTP_GET_STATE(asoc) !=
990 SCTP_STATE_SHUTDOWN_SENT) &&
991 (SCTP_GET_STATE(asoc) !=
992 SCTP_STATE_SHUTDOWN_ACK_SENT)) {
993 /* only send SHUTDOWN 1st time thru */
995 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
996 kprintf("%s:%d sends a shutdown\n",
1002 sctp_send_shutdown(stcb,
1003 stcb->asoc.primary_destination);
1004 sctp_chunk_output(stcb->sctp_ep, stcb, 1);
1005 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
1006 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1007 stcb->sctp_ep, stcb,
1008 asoc->primary_destination);
1009 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1010 stcb->sctp_ep, stcb,
1011 asoc->primary_destination);
1015 * we still got (or just got) data to send,
1016 * so set SHUTDOWN_PENDING
1019 * XXX sockets draft says that MSG_EOF should
1020 * be sent with no data.
1021 * currently, we will allow user data to be
1022 * sent first and move to SHUTDOWN-PENDING
1024 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
1026 SCTP_TCB_UNLOCK(stcb);
1027 SCTP_INP_RUNLOCK(inp);
1031 /* UDP model does not support this */
1032 SCTP_INP_RUNLOCK(inp);
1036 lwkt_replymsg(&msg->lmsg, error);
1039 /* also called from ipv6 sctp code */
1041 sctp_shutdown(netmsg_t msg)
1043 struct socket *so = msg->shutdown.base.nm_so;
1044 struct sctp_inpcb *inp;
1047 inp = (struct sctp_inpcb *)so->so_pcb;
1052 SCTP_INP_RLOCK(inp);
1053 /* For UDP model this is a invalid call */
1054 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
1055 /* Restore the flags that the soshutdown took away. */
1056 #if defined(__FreeBSD__) && __FreeBSD_version >= 502115
1057 so->so_rcv.sb_state &= ~SBS_CANTRCVMORE;
1059 soclrstate(so, SS_CANTRCVMORE);
1061 /* This proc will wakeup for read and do nothing (I hope) */
1062 SCTP_INP_RUNLOCK(inp);
1067 * Ok if we reach here its the TCP model and it is either a SHUT_WR
1068 * or SHUT_RDWR. This means we put the shutdown flag against it.
1071 int some_on_streamwheel = 0;
1072 struct sctp_tcb *stcb;
1073 struct sctp_association *asoc;
1076 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1079 * Ok we hit the case that the shutdown call was made
1080 * after an abort or something. Nothing to do now.
1085 SCTP_TCB_LOCK(stcb);
1088 if (!TAILQ_EMPTY(&asoc->out_wheel)) {
1089 /* Check to see if some data queued */
1090 struct sctp_stream_out *outs;
1091 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
1092 if (!TAILQ_EMPTY(&outs->outqueue)) {
1093 some_on_streamwheel = 1;
1098 if (TAILQ_EMPTY(&asoc->send_queue) &&
1099 TAILQ_EMPTY(&asoc->sent_queue) &&
1100 (some_on_streamwheel == 0)) {
1101 /* there is nothing queued to send, so I'm done... */
1102 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1103 /* only send SHUTDOWN the first time through */
1105 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
1106 kprintf("%s:%d sends a shutdown\n",
1112 sctp_send_shutdown(stcb,
1113 stcb->asoc.primary_destination);
1114 sctp_chunk_output(stcb->sctp_ep, stcb, 1);
1115 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
1116 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1117 stcb->sctp_ep, stcb,
1118 asoc->primary_destination);
1119 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1120 stcb->sctp_ep, stcb,
1121 asoc->primary_destination);
1125 * we still got (or just got) data to send, so
1126 * set SHUTDOWN_PENDING
1128 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
1130 SCTP_TCB_UNLOCK(stcb);
1132 SCTP_INP_RUNLOCK(inp);
1135 lwkt_replymsg(&msg->lmsg, error);
1139 * copies a "user" presentable address and removes embedded scope, etc.
1140 * returns 0 on success, 1 on error
1143 sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa)
1145 struct sockaddr_in6 lsa6;
1146 sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa,
1148 memcpy(ss, sa, sa->sa_len);
1153 #if defined(__NetBSD__) || defined(__OpenBSD__)
1155 * On NetBSD and OpenBSD in6_sin_2_v4mapsin6() not used and not exported,
1156 * so we have to export it here.
1158 void in6_sin_2_v4mapsin6(struct sockaddr_in *sin, struct sockaddr_in6 *sin6);
1162 sctp_fill_up_addresses(struct sctp_inpcb *inp,
1163 struct sctp_tcb *stcb,
1165 struct sockaddr_storage *sas)
1168 int loopback_scope, ipv4_local_scope, local_scope, site_scope, actual;
1169 int ipv4_addr_legal, ipv6_addr_legal;
1175 /* Turn on all the appropriate scope */
1176 loopback_scope = stcb->asoc.loopback_scope;
1177 ipv4_local_scope = stcb->asoc.ipv4_local_scope;
1178 local_scope = stcb->asoc.local_scope;
1179 site_scope = stcb->asoc.site_scope;
1181 /* Turn on ALL scope, since we look at the EP */
1182 loopback_scope = ipv4_local_scope = local_scope =
1185 ipv4_addr_legal = ipv6_addr_legal = 0;
1186 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1187 ipv6_addr_legal = 1;
1189 #if defined(__OpenBSD__)
1190 (0) /* we always do dual bind */
1191 #elif defined (__NetBSD__)
1192 (((struct in6pcb *)inp)->in6p_flags & IN6P_IPV6_V6ONLY)
1194 (((struct in6pcb *)inp)->inp_flags & IN6P_IPV6_V6ONLY)
1197 ipv4_addr_legal = 1;
1200 ipv4_addr_legal = 1;
1203 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1204 TAILQ_FOREACH(ifn, &ifnet, if_list) {
1205 struct ifaddr_container *ifac;
1207 if ((loopback_scope == 0) &&
1208 (ifn->if_type == IFT_LOOP)) {
1209 /* Skip loopback if loopback_scope not set */
1212 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid],
1214 struct ifaddr *ifa = ifac->ifa;
1218 * For the BOUND-ALL case, the list
1219 * associated with a TCB is Always
1220 * considered a reverse list.. i.e.
1221 * it lists addresses that are NOT
1222 * part of the association. If this
1223 * is one of those we must skip it.
1225 if (sctp_is_addr_restricted(stcb,
1230 if ((ifa->ifa_addr->sa_family == AF_INET) &&
1231 (ipv4_addr_legal)) {
1232 struct sockaddr_in *sin;
1233 sin = (struct sockaddr_in *)ifa->ifa_addr;
1234 if (sin->sin_addr.s_addr == 0) {
1235 /* we skip unspecifed addresses */
1238 if ((ipv4_local_scope == 0) &&
1239 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1242 if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) {
1243 in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas);
1244 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1245 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6));
1246 actual += sizeof(struct sockaddr_in6);
1248 memcpy(sas, sin, sizeof(*sin));
1249 ((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport;
1250 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin));
1251 actual += sizeof(*sin);
1253 if (actual >= limit) {
1256 } else if ((ifa->ifa_addr->sa_family == AF_INET6) &&
1257 (ipv6_addr_legal)) {
1258 struct sockaddr_in6 *sin6, lsa6;
1259 sin6 = (struct sockaddr_in6 *)ifa->ifa_addr;
1260 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1261 /* we skip unspecifed addresses */
1264 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
1265 if (local_scope == 0)
1267 if (sin6->sin6_scope_id == 0) {
1269 if (in6_recoverscope(&lsa6,
1272 /* bad link local address */
1277 if ((site_scope == 0) &&
1278 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1281 memcpy(sas, sin6, sizeof(*sin6));
1282 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1283 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin6));
1284 actual += sizeof(*sin6);
1285 if (actual >= limit) {
1292 struct sctp_laddr *laddr;
1294 * If we have a TCB and we do NOT support ASCONF (it's
1295 * turned off or otherwise) then the list is always the
1296 * true list of addresses (the else case below). Otherwise
1297 * the list on the association is a list of addresses that
1298 * are NOT part of the association.
1300 if (inp->sctp_flags & SCTP_PCB_FLAGS_DO_ASCONF) {
1301 /* The list is a NEGATIVE list */
1302 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
1304 if (sctp_is_addr_restricted(stcb, laddr->ifa->ifa_addr)) {
1308 if (sctp_fill_user_address(sas, laddr->ifa->ifa_addr))
1311 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1312 sas = (struct sockaddr_storage *)((caddr_t)sas +
1313 laddr->ifa->ifa_addr->sa_len);
1314 actual += laddr->ifa->ifa_addr->sa_len;
1315 if (actual >= limit) {
1320 /* The list is a positive list if present */
1322 /* Must use the specific association list */
1323 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list,
1325 if (sctp_fill_user_address(sas,
1326 laddr->ifa->ifa_addr))
1328 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1329 sas = (struct sockaddr_storage *)((caddr_t)sas +
1330 laddr->ifa->ifa_addr->sa_len);
1331 actual += laddr->ifa->ifa_addr->sa_len;
1332 if (actual >= limit) {
1337 /* No endpoint so use the endpoints individual list */
1338 LIST_FOREACH(laddr, &inp->sctp_addr_list,
1340 if (sctp_fill_user_address(sas,
1341 laddr->ifa->ifa_addr))
1343 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1344 sas = (struct sockaddr_storage *)((caddr_t)sas +
1345 laddr->ifa->ifa_addr->sa_len);
1346 actual += laddr->ifa->ifa_addr->sa_len;
1347 if (actual >= limit) {
1358 sctp_count_max_addresses(struct sctp_inpcb *inp)
1362 * In both sub-set bound an bound_all cases we return the MAXIMUM
1363 * number of addresses that you COULD get. In reality the sub-set
1364 * bound may have an exclusion list for a given TCB OR in the
1365 * bound-all case a TCB may NOT include the loopback or other
1366 * addresses as well.
1368 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1371 TAILQ_FOREACH(ifn, &ifnet, if_list) {
1372 struct ifaddr_container *ifac;
1374 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid], ifa_link) {
1375 struct ifaddr *ifa = ifac->ifa;
1377 /* Count them if they are the right type */
1378 if (ifa->ifa_addr->sa_family == AF_INET) {
1379 if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)
1380 cnt += sizeof(struct sockaddr_in6);
1382 cnt += sizeof(struct sockaddr_in);
1384 } else if (ifa->ifa_addr->sa_family == AF_INET6)
1385 cnt += sizeof(struct sockaddr_in6);
1389 struct sctp_laddr *laddr;
1390 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
1391 if (laddr->ifa->ifa_addr->sa_family == AF_INET) {
1392 if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)
1393 cnt += sizeof(struct sockaddr_in6);
1395 cnt += sizeof(struct sockaddr_in);
1397 } else if (laddr->ifa->ifa_addr->sa_family == AF_INET6)
1398 cnt += sizeof(struct sockaddr_in6);
1405 sctp_do_connect_x(struct socket *so,
1406 struct sctp_inpcb *inp,
1408 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
1417 struct sctp_tcb *stcb = NULL;
1418 struct sockaddr *sa;
1419 int num_v6=0, num_v4=0, *totaddrp, totaddr, i, incr, at;
1421 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1422 kprintf("Connectx called\n");
1424 #endif /* SCTP_DEBUG */
1426 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
1427 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
1428 /* We are already connected AND the TCP model */
1429 return (EADDRINUSE);
1431 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
1432 SCTP_INP_RLOCK(inp);
1433 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1434 SCTP_INP_RUNLOCK(inp);
1440 SCTP_ASOC_CREATE_LOCK(inp);
1441 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1442 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
1443 SCTP_ASOC_CREATE_UNLOCK(inp);
1447 totaddrp = mtod(m, int *);
1448 totaddr = *totaddrp;
1449 sa = (struct sockaddr *)(totaddrp + 1);
1451 /* account and validate addresses */
1452 SCTP_INP_WLOCK(inp);
1453 SCTP_INP_INCR_REF(inp);
1454 SCTP_INP_WUNLOCK(inp);
1455 for (i = 0; i < totaddr; i++) {
1456 if (sa->sa_family == AF_INET) {
1458 incr = sizeof(struct sockaddr_in);
1459 } else if (sa->sa_family == AF_INET6) {
1460 struct sockaddr_in6 *sin6;
1461 sin6 = (struct sockaddr_in6 *)sa;
1462 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
1463 /* Must be non-mapped for connectx */
1464 SCTP_ASOC_CREATE_UNLOCK(inp);
1468 incr = sizeof(struct sockaddr_in6);
1473 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
1475 /* Already have or am bring up an association */
1476 SCTP_ASOC_CREATE_UNLOCK(inp);
1477 SCTP_TCB_UNLOCK(stcb);
1480 if ((at + incr) > m->m_len) {
1484 sa = (struct sockaddr *)((caddr_t)sa + incr);
1486 sa = (struct sockaddr *)(totaddrp + 1);
1487 SCTP_INP_WLOCK(inp);
1488 SCTP_INP_DECR_REF(inp);
1489 SCTP_INP_WUNLOCK(inp);
1491 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
1493 SCTP_INP_WUNLOCK(inp);
1494 SCTP_ASOC_CREATE_UNLOCK(inp);
1497 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
1499 struct in6pcb *inp6;
1500 inp6 = (struct in6pcb *)inp;
1502 #if defined(__OpenBSD__)
1503 (0) /* we always do dual bind */
1504 #elif defined (__NetBSD__)
1505 (inp6->in6p_flags & IN6P_IPV6_V6ONLY)
1507 (inp6->inp_flags & IN6P_IPV6_V6ONLY)
1511 * if IPV6_V6ONLY flag, ignore connections
1512 * destined to a v4 addr or v4-mapped addr
1514 SCTP_INP_WUNLOCK(inp);
1515 SCTP_ASOC_CREATE_UNLOCK(inp);
1520 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
1521 SCTP_PCB_FLAGS_UNBOUND) {
1522 /* Bind a ephemeral port */
1523 SCTP_INP_WUNLOCK(inp);
1524 error = sctp_inpcb_bind(so, NULL, p);
1526 SCTP_ASOC_CREATE_UNLOCK(inp);
1530 SCTP_INP_WUNLOCK(inp);
1532 /* We are GOOD to go */
1533 stcb = sctp_aloc_assoc(inp, sa, 1, &error, 0);
1535 /* Gak! no memory */
1536 SCTP_ASOC_CREATE_UNLOCK(inp);
1539 /* move to second address */
1540 if (sa->sa_family == AF_INET)
1541 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in));
1543 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6));
1545 for (i = 1; i < totaddr; i++) {
1546 if (sa->sa_family == AF_INET) {
1547 incr = sizeof(struct sockaddr_in);
1548 if (sctp_add_remote_addr(stcb, sa, 0, 8)) {
1549 /* assoc gone no un-lock */
1550 sctp_free_assoc(inp, stcb);
1551 SCTP_ASOC_CREATE_UNLOCK(inp);
1555 } else if (sa->sa_family == AF_INET6) {
1556 incr = sizeof(struct sockaddr_in6);
1557 if (sctp_add_remote_addr(stcb, sa, 0, 8)) {
1558 /* assoc gone no un-lock */
1559 sctp_free_assoc(inp, stcb);
1560 SCTP_ASOC_CREATE_UNLOCK(inp);
1564 sa = (struct sockaddr *)((caddr_t)sa + incr);
1566 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT;
1568 /* doing delayed connection */
1569 stcb->asoc.delayed_connection = 1;
1570 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination);
1572 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1573 sctp_send_initiate(inp, stcb);
1575 SCTP_TCB_UNLOCK(stcb);
1576 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
1577 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
1578 /* Set the connected flag so we can queue data */
1581 SCTP_ASOC_CREATE_UNLOCK(inp);
1587 sctp_optsget(struct socket *so,
1590 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
1597 struct sctp_inpcb *inp;
1599 int error, optval=0;
1600 struct sctp_tcb *stcb = NULL;
1602 inp = (struct sctp_inpcb *)so->so_pcb;
1609 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
1610 kprintf("optsget:MP is NULL EINVAL\n");
1612 #endif /* SCTP_DEBUG */
1617 /* Got to have a mbuf */
1619 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
1620 kprintf("Huh no mbuf\n");
1622 #endif /* SCTP_DEBUG */
1626 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
1627 kprintf("optsget opt:%lxx sz:%u\n", (unsigned long)opt,
1630 #endif /* SCTP_DEBUG */
1634 case SCTP_AUTOCLOSE:
1635 case SCTP_AUTO_ASCONF:
1636 case SCTP_DISABLE_FRAGMENTS:
1637 case SCTP_I_WANT_MAPPED_V4_ADDR:
1639 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
1640 kprintf("other stuff\n");
1642 #endif /* SCTP_DEBUG */
1643 SCTP_INP_RLOCK(inp);
1645 case SCTP_DISABLE_FRAGMENTS:
1646 optval = inp->sctp_flags & SCTP_PCB_FLAGS_NO_FRAGMENT;
1648 case SCTP_I_WANT_MAPPED_V4_ADDR:
1649 optval = inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4;
1651 case SCTP_AUTO_ASCONF:
1652 optval = inp->sctp_flags & SCTP_PCB_FLAGS_AUTO_ASCONF;
1655 optval = inp->sctp_flags & SCTP_PCB_FLAGS_NODELAY;
1657 case SCTP_AUTOCLOSE:
1658 if ((inp->sctp_flags & SCTP_PCB_FLAGS_AUTOCLOSE) ==
1659 SCTP_PCB_FLAGS_AUTOCLOSE)
1660 optval = inp->sctp_ep.auto_close_time;
1666 error = ENOPROTOOPT;
1667 } /* end switch (sopt->sopt_name) */
1668 if (opt != SCTP_AUTOCLOSE) {
1669 /* make it an "on/off" value */
1670 optval = (optval != 0);
1672 if ((size_t)m->m_len < sizeof(int)) {
1675 SCTP_INP_RUNLOCK(inp);
1677 /* return the option value */
1678 *mtod(m, int *) = optval;
1679 m->m_len = sizeof(optval);
1682 case SCTP_GET_ASOC_ID_LIST:
1684 struct sctp_assoc_ids *ids;
1688 if ((size_t)m->m_len < sizeof(struct sctp_assoc_ids)) {
1692 ids = mtod(m, struct sctp_assoc_ids *);
1694 SCTP_INP_RLOCK(inp);
1695 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1698 ids->asls_numb_present = 0;
1699 ids->asls_more_to_get = 0;
1700 SCTP_INP_RUNLOCK(inp);
1703 orig = ids->asls_assoc_start;
1704 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1706 stcb = LIST_NEXT(stcb , sctp_tcblist);
1714 ids->asls_numb_present = 0;
1715 ids->asls_more_to_get = 1;
1716 while(at < MAX_ASOC_IDS_RET) {
1717 ids->asls_assoc_id[at] = sctp_get_associd(stcb);
1719 ids->asls_numb_present++;
1720 stcb = LIST_NEXT(stcb , sctp_tcblist);
1722 ids->asls_more_to_get = 0;
1726 SCTP_INP_RUNLOCK(inp);
1729 case SCTP_GET_NONCE_VALUES:
1731 struct sctp_get_nonce_values *gnv;
1732 if ((size_t)m->m_len < sizeof(struct sctp_get_nonce_values)) {
1736 gnv = mtod(m, struct sctp_get_nonce_values *);
1737 stcb = sctp_findassociation_ep_asocid(inp, gnv->gn_assoc_id);
1741 gnv->gn_peers_tag = stcb->asoc.peer_vtag;
1742 gnv->gn_local_tag = stcb->asoc.my_vtag;
1743 SCTP_TCB_UNLOCK(stcb);
1748 case SCTP_PEER_PUBLIC_KEY:
1749 case SCTP_MY_PUBLIC_KEY:
1750 case SCTP_SET_AUTH_CHUNKS:
1751 case SCTP_SET_AUTH_SECRET:
1752 /* not supported yet and until we refine the draft */
1756 case SCTP_DELAYED_ACK_TIME:
1759 if ((size_t)m->m_len < sizeof(int32_t)) {
1763 tm = mtod(m, int32_t *);
1765 *tm = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1769 case SCTP_GET_SNDBUF_USE:
1770 if ((size_t)m->m_len < sizeof(struct sctp_sockstat)) {
1773 struct sctp_sockstat *ss;
1774 struct sctp_tcb *stcb;
1775 struct sctp_association *asoc;
1776 ss = mtod(m, struct sctp_sockstat *);
1777 stcb = sctp_findassociation_ep_asocid(inp, ss->ss_assoc_id);
1782 ss->ss_total_sndbuf = (u_int32_t)asoc->total_output_queue_size;
1783 ss->ss_total_mbuf_sndbuf = (u_int32_t)asoc->total_output_mbuf_queue_size;
1784 ss->ss_total_recv_buf = (u_int32_t)(asoc->size_on_delivery_queue +
1785 asoc->size_on_reasm_queue +
1786 asoc->size_on_all_streams);
1787 SCTP_TCB_UNLOCK(stcb);
1789 m->m_len = sizeof(struct sctp_sockstat);
1796 burst = mtod(m, u_int8_t *);
1797 SCTP_INP_RLOCK(inp);
1798 *burst = inp->sctp_ep.max_burst;
1799 SCTP_INP_RUNLOCK(inp);
1800 m->m_len = sizeof(u_int8_t);
1806 sctp_assoc_t *assoc_id;
1809 if ((size_t)m->m_len < sizeof(u_int32_t)) {
1813 if ((size_t)m->m_len < sizeof(sctp_assoc_t)) {
1817 assoc_id = mtod(m, sctp_assoc_t *);
1818 segsize = mtod(m, u_int32_t *);
1819 m->m_len = sizeof(u_int32_t);
1821 if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
1822 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) ||
1823 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
1824 struct sctp_tcb *stcb;
1825 SCTP_INP_RLOCK(inp);
1826 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1828 SCTP_TCB_LOCK(stcb);
1829 SCTP_INP_RUNLOCK(inp);
1830 *segsize = sctp_get_frag_point(stcb, &stcb->asoc);
1831 SCTP_TCB_UNLOCK(stcb);
1833 SCTP_INP_RUNLOCK(inp);
1837 stcb = sctp_findassociation_ep_asocid(inp, *assoc_id);
1839 *segsize = sctp_get_frag_point(stcb, &stcb->asoc);
1840 SCTP_TCB_UNLOCK(stcb);
1844 /* default is to get the max, if I
1845 * can't calculate from an existing association.
1847 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1848 ovh = SCTP_MED_OVERHEAD;
1850 ovh = SCTP_MED_V4_OVERHEAD;
1852 *segsize = inp->sctp_frag_point - ovh;
1857 case SCTP_SET_DEBUG_LEVEL:
1861 if ((size_t)m->m_len < sizeof(u_int32_t)) {
1865 level = mtod(m, u_int32_t *);
1867 *level = sctp_debug_on;
1868 m->m_len = sizeof(u_int32_t);
1869 kprintf("Returning DEBUG LEVEL %x is set\n",
1870 (u_int)sctp_debug_on);
1872 #else /* SCTP_DEBUG */
1876 case SCTP_GET_STAT_LOG:
1877 #ifdef SCTP_STAT_LOGGING
1878 error = sctp_fill_stat_log(m);
1879 #else /* SCTP_DEBUG */
1886 if ((size_t)m->m_len < sizeof(sctp_pegs)) {
1890 pt = mtod(m, u_int32_t *);
1891 memcpy(pt, sctp_pegs, sizeof(sctp_pegs));
1892 m->m_len = sizeof(sctp_pegs);
1897 struct sctp_event_subscribe *events;
1899 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
1900 kprintf("get events\n");
1902 #endif /* SCTP_DEBUG */
1903 if ((size_t)m->m_len < sizeof(struct sctp_event_subscribe)) {
1905 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
1906 kprintf("M->M_LEN is %d not %d\n",
1908 (int)sizeof(struct sctp_event_subscribe));
1910 #endif /* SCTP_DEBUG */
1914 events = mtod(m, struct sctp_event_subscribe *);
1915 memset(events, 0, sizeof(*events));
1916 SCTP_INP_RLOCK(inp);
1917 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVDATAIOEVNT)
1918 events->sctp_data_io_event = 1;
1920 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVASSOCEVNT)
1921 events->sctp_association_event = 1;
1923 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVPADDREVNT)
1924 events->sctp_address_event = 1;
1926 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVSENDFAILEVNT)
1927 events->sctp_send_failure_event = 1;
1929 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVPEERERR)
1930 events->sctp_peer_error_event = 1;
1932 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)
1933 events->sctp_shutdown_event = 1;
1935 if (inp->sctp_flags & SCTP_PCB_FLAGS_PDAPIEVNT)
1936 events->sctp_partial_delivery_event = 1;
1938 if (inp->sctp_flags & SCTP_PCB_FLAGS_ADAPTIONEVNT)
1939 events->sctp_adaption_layer_event = 1;
1941 if (inp->sctp_flags & SCTP_PCB_FLAGS_STREAM_RESETEVNT)
1942 events->sctp_stream_reset_events = 1;
1943 SCTP_INP_RUNLOCK(inp);
1944 m->m_len = sizeof(struct sctp_event_subscribe);
1949 case SCTP_ADAPTION_LAYER:
1950 if ((size_t)m->m_len < sizeof(int)) {
1955 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
1956 kprintf("getadaption ind\n");
1958 #endif /* SCTP_DEBUG */
1959 SCTP_INP_RLOCK(inp);
1960 *mtod(m, int *) = inp->sctp_ep.adaption_layer_indicator;
1961 SCTP_INP_RUNLOCK(inp);
1962 m->m_len = sizeof(int);
1964 case SCTP_SET_INITIAL_DBG_SEQ:
1965 if ((size_t)m->m_len < sizeof(int)) {
1970 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
1971 kprintf("get initial dbg seq\n");
1973 #endif /* SCTP_DEBUG */
1974 SCTP_INP_RLOCK(inp);
1975 *mtod(m, int *) = inp->sctp_ep.initial_sequence_debug;
1976 SCTP_INP_RUNLOCK(inp);
1977 m->m_len = sizeof(int);
1979 case SCTP_GET_LOCAL_ADDR_SIZE:
1980 if ((size_t)m->m_len < sizeof(int)) {
1985 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
1986 kprintf("get local sizes\n");
1988 #endif /* SCTP_DEBUG */
1989 SCTP_INP_RLOCK(inp);
1990 *mtod(m, int *) = sctp_count_max_addresses(inp);
1991 SCTP_INP_RUNLOCK(inp);
1992 m->m_len = sizeof(int);
1994 case SCTP_GET_REMOTE_ADDR_SIZE:
1996 sctp_assoc_t *assoc_id;
1998 struct sctp_nets *net;
2000 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2001 kprintf("get remote size\n");
2003 #endif /* SCTP_DEBUG */
2004 if ((size_t)m->m_len < sizeof(sctp_assoc_t)) {
2006 kprintf("m->m_len:%d not %zd\n",
2007 m->m_len, sizeof(sctp_assoc_t));
2008 #endif /* SCTP_DEBUG */
2013 val = mtod(m, u_int32_t *);
2014 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2015 SCTP_INP_RLOCK(inp);
2016 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2018 SCTP_TCB_LOCK(stcb);
2019 SCTP_INP_RUNLOCK(inp);
2022 assoc_id = mtod(m, sctp_assoc_t *);
2023 stcb = sctp_findassociation_ep_asocid(inp, *assoc_id);
2032 /* Count the sizes */
2033 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2034 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) ||
2035 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
2036 sz += sizeof(struct sockaddr_in6);
2037 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
2038 sz += sizeof(struct sockaddr_in);
2044 SCTP_TCB_UNLOCK(stcb);
2046 m->m_len = sizeof(u_int32_t);
2049 case SCTP_GET_PEER_ADDRESSES:
2051 * Get the address information, an array
2052 * is passed in to fill up we pack it.
2056 struct sockaddr_storage *sas;
2057 struct sctp_nets *net;
2058 struct sctp_getaddresses *saddr;
2060 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2061 kprintf("get peer addresses\n");
2063 #endif /* SCTP_DEBUG */
2064 if ((size_t)m->m_len < sizeof(struct sctp_getaddresses)) {
2068 left = m->m_len - sizeof(struct sctp_getaddresses);
2069 saddr = mtod(m, struct sctp_getaddresses *);
2070 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2071 SCTP_INP_RLOCK(inp);
2072 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2074 SCTP_TCB_LOCK(stcb);
2075 SCTP_INP_RUNLOCK(inp);
2077 stcb = sctp_findassociation_ep_asocid(inp,
2078 saddr->sget_assoc_id);
2083 m->m_len = sizeof(struct sctp_getaddresses);
2084 sas = (struct sockaddr_storage *)&saddr->addr[0];
2086 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2087 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) ||
2088 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
2089 cpsz = sizeof(struct sockaddr_in6);
2090 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
2091 cpsz = sizeof(struct sockaddr_in);
2097 /* not enough room. */
2099 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2100 kprintf("Out of room\n");
2102 #endif /* SCTP_DEBUG */
2105 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2106 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET)) {
2107 /* Must map the address */
2108 in6_sin_2_v4mapsin6((struct sockaddr_in *)&net->ro._l_addr,
2109 (struct sockaddr_in6 *)sas);
2111 memcpy(sas, &net->ro._l_addr, cpsz);
2113 ((struct sockaddr_in *)sas)->sin_port = stcb->rport;
2115 sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz);
2119 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
2120 kprintf("left now:%d mlen:%d\n",
2123 #endif /* SCTP_DEBUG */
2125 SCTP_TCB_UNLOCK(stcb);
2128 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2129 kprintf("All done\n");
2131 #endif /* SCTP_DEBUG */
2133 case SCTP_GET_LOCAL_ADDRESSES:
2136 struct sockaddr_storage *sas;
2137 struct sctp_getaddresses *saddr;
2139 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2140 kprintf("get local addresses\n");
2142 #endif /* SCTP_DEBUG */
2143 if ((size_t)m->m_len < sizeof(struct sctp_getaddresses)) {
2147 saddr = mtod(m, struct sctp_getaddresses *);
2149 if (saddr->sget_assoc_id) {
2150 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2151 SCTP_INP_RLOCK(inp);
2152 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2154 SCTP_TCB_LOCK(stcb);
2155 SCTP_INP_RUNLOCK(inp);
2157 stcb = sctp_findassociation_ep_asocid(inp, saddr->sget_assoc_id);
2163 * assure that the TCP model does not need a assoc id
2166 if ( (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) &&
2168 SCTP_INP_RLOCK(inp);
2169 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2171 SCTP_TCB_LOCK(stcb);
2172 SCTP_INP_RUNLOCK(inp);
2174 sas = (struct sockaddr_storage *)&saddr->addr[0];
2175 limit = m->m_len - sizeof(sctp_assoc_t);
2176 actual = sctp_fill_up_addresses(inp, stcb, limit, sas);
2177 SCTP_TCB_UNLOCK(stcb);
2178 m->m_len = sizeof(struct sockaddr_storage) + actual;
2181 case SCTP_PEER_ADDR_PARAMS:
2183 struct sctp_paddrparams *paddrp;
2184 struct sctp_nets *net;
2187 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2188 kprintf("Getting peer_addr_params\n");
2190 #endif /* SCTP_DEBUG */
2191 if ((size_t)m->m_len < sizeof(struct sctp_paddrparams)) {
2193 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
2194 kprintf("Hmm m->m_len:%d is to small\n",
2197 #endif /* SCTP_DEBUG */
2201 paddrp = mtod(m, struct sctp_paddrparams *);
2204 if (paddrp->spp_assoc_id) {
2206 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2207 kprintf("In spp_assoc_id find type\n");
2209 #endif /* SCTP_DEBUG */
2210 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2211 SCTP_INP_RLOCK(inp);
2212 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2214 SCTP_TCB_LOCK(stcb);
2215 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
2217 SCTP_INP_RLOCK(inp);
2219 stcb = sctp_findassociation_ep_asocid(inp, paddrp->spp_assoc_id);
2226 if ( (stcb == NULL) &&
2227 ((((struct sockaddr *)&paddrp->spp_address)->sa_family == AF_INET) ||
2228 (((struct sockaddr *)&paddrp->spp_address)->sa_family == AF_INET6))) {
2229 /* Lookup via address */
2231 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2232 kprintf("Ok we need to lookup a param\n");
2234 #endif /* SCTP_DEBUG */
2235 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2236 SCTP_INP_RLOCK(inp);
2237 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2239 SCTP_TCB_LOCK(stcb);
2240 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
2242 SCTP_INP_RUNLOCK(inp);
2244 SCTP_INP_WLOCK(inp);
2245 SCTP_INP_INCR_REF(inp);
2246 SCTP_INP_WUNLOCK(inp);
2247 stcb = sctp_findassociation_ep_addr(&inp,
2248 (struct sockaddr *)&paddrp->spp_address,
2251 SCTP_INP_WLOCK(inp);
2252 SCTP_INP_DECR_REF(inp);
2253 SCTP_INP_WUNLOCK(inp);
2262 /* Effects the Endpoint */
2264 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2265 kprintf("User wants EP level info\n");
2267 #endif /* SCTP_DEBUG */
2271 /* Applys to the specific association */
2273 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2274 kprintf("In TCB side\n");
2276 #endif /* SCTP_DEBUG */
2278 paddrp->spp_pathmaxrxt = net->failure_threshold;
2280 /* No destination so return default value */
2281 paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure;
2283 paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay;
2284 paddrp->spp_assoc_id = sctp_get_associd(stcb);
2285 SCTP_TCB_UNLOCK(stcb);
2287 /* Use endpoint defaults */
2288 SCTP_INP_RLOCK(inp);
2290 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2291 kprintf("In EP level info\n");
2293 #endif /* SCTP_DEBUG */
2294 paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure;
2295 paddrp->spp_hbinterval = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT];
2296 paddrp->spp_assoc_id = (sctp_assoc_t)0;
2297 SCTP_INP_RUNLOCK(inp);
2299 m->m_len = sizeof(struct sctp_paddrparams);
2302 case SCTP_GET_PEER_ADDR_INFO:
2304 struct sctp_paddrinfo *paddri;
2305 struct sctp_nets *net;
2307 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2308 kprintf("GetPEER ADDR_INFO\n");
2310 #endif /* SCTP_DEBUG */
2311 if ((size_t)m->m_len < sizeof(struct sctp_paddrinfo)) {
2315 paddri = mtod(m, struct sctp_paddrinfo *);
2317 if ((((struct sockaddr *)&paddri->spinfo_address)->sa_family == AF_INET) ||
2318 (((struct sockaddr *)&paddri->spinfo_address)->sa_family == AF_INET6)) {
2319 /* Lookup via address */
2320 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2321 SCTP_INP_RLOCK(inp);
2322 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2324 SCTP_TCB_LOCK(stcb);
2325 net = sctp_findnet(stcb,
2326 (struct sockaddr *)&paddri->spinfo_address);
2328 SCTP_INP_RUNLOCK(inp);
2330 SCTP_INP_WLOCK(inp);
2331 SCTP_INP_INCR_REF(inp);
2332 SCTP_INP_WUNLOCK(inp);
2333 stcb = sctp_findassociation_ep_addr(&inp,
2334 (struct sockaddr *)&paddri->spinfo_address,
2337 SCTP_INP_WLOCK(inp);
2338 SCTP_INP_DECR_REF(inp);
2339 SCTP_INP_WUNLOCK(inp);
2346 if ((stcb == NULL) || (net == NULL)) {
2350 m->m_len = sizeof(struct sctp_paddrinfo);
2351 paddri->spinfo_state = net->dest_state & (SCTP_REACHABLE_MASK|SCTP_ADDR_NOHB);
2352 paddri->spinfo_cwnd = net->cwnd;
2353 paddri->spinfo_srtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
2354 paddri->spinfo_rto = net->RTO;
2355 paddri->spinfo_assoc_id = sctp_get_associd(stcb);
2356 SCTP_TCB_UNLOCK(stcb);
2359 case SCTP_PCB_STATUS:
2361 struct sctp_pcbinfo *spcb;
2363 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2364 kprintf("PCB status\n");
2366 #endif /* SCTP_DEBUG */
2367 if ((size_t)m->m_len < sizeof(struct sctp_pcbinfo)) {
2371 spcb = mtod(m, struct sctp_pcbinfo *);
2372 sctp_fill_pcbinfo(spcb);
2373 m->m_len = sizeof(struct sctp_pcbinfo);
2378 struct sctp_nets *net;
2379 struct sctp_status *sstat;
2381 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2382 kprintf("SCTP status\n");
2384 #endif /* SCTP_DEBUG */
2386 if ((size_t)m->m_len < sizeof(struct sctp_status)) {
2390 sstat = mtod(m, struct sctp_status *);
2392 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2393 SCTP_INP_RLOCK(inp);
2394 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2396 SCTP_TCB_LOCK(stcb);
2397 SCTP_INP_RUNLOCK(inp);
2399 stcb = sctp_findassociation_ep_asocid(inp, sstat->sstat_assoc_id);
2406 * I think passing the state is fine since
2407 * sctp_constants.h will be available to the user
2410 sstat->sstat_state = stcb->asoc.state;
2411 sstat->sstat_rwnd = stcb->asoc.peers_rwnd;
2412 sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt;
2414 * We can't include chunks that have been passed
2415 * to the socket layer. Only things in queue.
2417 sstat->sstat_penddata = (stcb->asoc.cnt_on_delivery_queue +
2418 stcb->asoc.cnt_on_reasm_queue +
2419 stcb->asoc.cnt_on_all_streams);
2422 sstat->sstat_instrms = stcb->asoc.streamincnt;
2423 sstat->sstat_outstrms = stcb->asoc.streamoutcnt;
2424 sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc);
2425 memcpy(&sstat->sstat_primary.spinfo_address,
2426 &stcb->asoc.primary_destination->ro._l_addr,
2427 ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len);
2428 net = stcb->asoc.primary_destination;
2429 ((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport;
2431 * Again the user can get info from sctp_constants.h
2432 * for what the state of the network is.
2434 sstat->sstat_primary.spinfo_state = net->dest_state & SCTP_REACHABLE_MASK;
2435 sstat->sstat_primary.spinfo_cwnd = net->cwnd;
2436 sstat->sstat_primary.spinfo_srtt = net->lastsa;
2437 sstat->sstat_primary.spinfo_rto = net->RTO;
2438 sstat->sstat_primary.spinfo_mtu = net->mtu;
2439 sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb);
2440 SCTP_TCB_UNLOCK(stcb);
2441 m->m_len = sizeof(*sstat);
2446 struct sctp_rtoinfo *srto;
2448 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2449 kprintf("RTO Info\n");
2451 #endif /* SCTP_DEBUG */
2452 if ((size_t)m->m_len < sizeof(struct sctp_rtoinfo)) {
2456 srto = mtod(m, struct sctp_rtoinfo *);
2457 if (srto->srto_assoc_id == 0) {
2458 /* Endpoint only please */
2459 SCTP_INP_RLOCK(inp);
2460 srto->srto_initial = inp->sctp_ep.initial_rto;
2461 srto->srto_max = inp->sctp_ep.sctp_maxrto;
2462 srto->srto_min = inp->sctp_ep.sctp_minrto;
2463 SCTP_INP_RUNLOCK(inp);
2466 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2467 SCTP_INP_RLOCK(inp);
2468 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2470 SCTP_TCB_LOCK(stcb);
2471 SCTP_INP_RUNLOCK(inp);
2473 stcb = sctp_findassociation_ep_asocid(inp, srto->srto_assoc_id);
2479 srto->srto_initial = stcb->asoc.initial_rto;
2480 srto->srto_max = stcb->asoc.maxrto;
2481 srto->srto_min = stcb->asoc.minrto;
2482 SCTP_TCB_UNLOCK(stcb);
2483 m->m_len = sizeof(*srto);
2486 case SCTP_ASSOCINFO:
2488 struct sctp_assocparams *sasoc;
2490 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2491 kprintf("Associnfo\n");
2493 #endif /* SCTP_DEBUG */
2494 if ((size_t)m->m_len < sizeof(struct sctp_assocparams)) {
2498 sasoc = mtod(m, struct sctp_assocparams *);
2501 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2502 SCTP_INP_RLOCK(inp);
2503 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2505 SCTP_TCB_LOCK(stcb);
2506 SCTP_INP_RUNLOCK(inp);
2508 if ((sasoc->sasoc_assoc_id) && (stcb == NULL)) {
2509 stcb = sctp_findassociation_ep_asocid(inp,
2510 sasoc->sasoc_assoc_id);
2520 sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times;
2521 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
2522 sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd;
2523 sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd;
2524 sasoc->sasoc_cookie_life = stcb->asoc.cookie_life;
2525 SCTP_TCB_UNLOCK(stcb);
2527 SCTP_INP_RLOCK(inp);
2528 sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times;
2529 sasoc->sasoc_number_peer_destinations = 0;
2530 sasoc->sasoc_peer_rwnd = 0;
2531 sasoc->sasoc_local_rwnd = ssb_space(&inp->sctp_socket->so_rcv);
2532 sasoc->sasoc_cookie_life = inp->sctp_ep.def_cookie_life;
2533 SCTP_INP_RUNLOCK(inp);
2535 m->m_len = sizeof(*sasoc);
2538 case SCTP_DEFAULT_SEND_PARAM:
2540 struct sctp_sndrcvinfo *s_info;
2542 if (m->m_len != sizeof(struct sctp_sndrcvinfo)) {
2546 s_info = mtod(m, struct sctp_sndrcvinfo *);
2547 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2548 SCTP_INP_RLOCK(inp);
2549 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2551 SCTP_TCB_LOCK(stcb);
2552 SCTP_INP_RUNLOCK(inp);
2554 stcb = sctp_findassociation_ep_asocid(inp, s_info->sinfo_assoc_id);
2561 *s_info = stcb->asoc.def_send;
2562 SCTP_TCB_UNLOCK(stcb);
2563 m->m_len = sizeof(*s_info);
2567 struct sctp_initmsg *sinit;
2569 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2570 kprintf("initmsg\n");
2572 #endif /* SCTP_DEBUG */
2573 if ((size_t)m->m_len < sizeof(struct sctp_initmsg)) {
2577 sinit = mtod(m, struct sctp_initmsg *);
2578 SCTP_INP_RLOCK(inp);
2579 sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count;
2580 sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome;
2581 sinit->sinit_max_attempts = inp->sctp_ep.max_init_times;
2582 sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max;
2583 SCTP_INP_RUNLOCK(inp);
2584 m->m_len = sizeof(*sinit);
2587 case SCTP_PRIMARY_ADDR:
2588 /* we allow a "get" operation on this */
2590 struct sctp_setprim *ssp;
2593 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2594 kprintf("setprimary\n");
2596 #endif /* SCTP_DEBUG */
2597 if ((size_t)m->m_len < sizeof(struct sctp_setprim)) {
2601 ssp = mtod(m, struct sctp_setprim *);
2602 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2603 SCTP_INP_RLOCK(inp);
2604 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2606 SCTP_TCB_LOCK(stcb);
2607 SCTP_INP_RUNLOCK(inp);
2609 stcb = sctp_findassociation_ep_asocid(inp, ssp->ssp_assoc_id);
2611 /* one last shot, try it by the address in */
2612 struct sctp_nets *net;
2614 SCTP_INP_WLOCK(inp);
2615 SCTP_INP_INCR_REF(inp);
2616 SCTP_INP_WUNLOCK(inp);
2617 stcb = sctp_findassociation_ep_addr(&inp,
2618 (struct sockaddr *)&ssp->ssp_addr,
2621 SCTP_INP_WLOCK(inp);
2622 SCTP_INP_DECR_REF(inp);
2623 SCTP_INP_WUNLOCK(inp);
2631 /* simply copy out the sockaddr_storage... */
2632 memcpy(&ssp->ssp_addr,
2633 &stcb->asoc.primary_destination->ro._l_addr,
2634 ((struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr)->sa_len);
2635 SCTP_TCB_UNLOCK(stcb);
2636 m->m_len = sizeof(*ssp);
2640 error = ENOPROTOOPT;
2643 } /* end switch (sopt->sopt_name) */
2648 sctp_optsset(struct socket *so,
2651 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
2658 int error, *mopt, set_opt;
2660 struct sctp_tcb *stcb = NULL;
2661 struct sctp_inpcb *inp;
2665 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2666 kprintf("optsset:MP is NULL EINVAL\n");
2668 #endif /* SCTP_DEBUG */
2675 inp = (struct sctp_inpcb *)so->so_pcb;
2682 case SCTP_AUTOCLOSE:
2683 case SCTP_AUTO_ASCONF:
2684 case SCTP_DISABLE_FRAGMENTS:
2685 case SCTP_I_WANT_MAPPED_V4_ADDR:
2686 /* copy in the option value */
2687 if ((size_t)m->m_len < sizeof(int)) {
2691 mopt = mtod(m, int *);
2696 case SCTP_DISABLE_FRAGMENTS:
2697 set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT;
2699 case SCTP_AUTO_ASCONF:
2700 set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF;
2703 case SCTP_I_WANT_MAPPED_V4_ADDR:
2704 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2705 set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4;
2711 set_opt = SCTP_PCB_FLAGS_NODELAY;
2713 case SCTP_AUTOCLOSE:
2714 set_opt = SCTP_PCB_FLAGS_AUTOCLOSE;
2716 * The value is in ticks.
2717 * Note this does not effect old associations, only
2720 inp->sctp_ep.auto_close_time = (*mopt * hz);
2723 SCTP_INP_WLOCK(inp);
2725 inp->sctp_flags |= set_opt;
2727 inp->sctp_flags &= ~set_opt;
2729 SCTP_INP_WUNLOCK(inp);
2731 case SCTP_MY_PUBLIC_KEY: /* set my public key */
2732 case SCTP_SET_AUTH_CHUNKS: /* set the authenticated chunks required */
2733 case SCTP_SET_AUTH_SECRET: /* set the actual secret for the endpoint */
2734 /* not supported yet and until we refine the draft */
2738 case SCTP_CLR_STAT_LOG:
2739 #ifdef SCTP_STAT_LOGGING
2740 sctp_clr_stat_log();
2745 case SCTP_DELAYED_ACK_TIME:
2748 if ((size_t)m->m_len < sizeof(int32_t)) {
2752 tm = mtod(m, int32_t *);
2754 if ((*tm < 10) || (*tm > 500)) {
2755 /* can't be smaller than 10ms */
2756 /* MUST NOT be larger than 500ms */
2760 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(*tm);
2763 case SCTP_RESET_STREAMS:
2765 struct sctp_stream_reset *strrst;
2766 uint8_t two_way, not_peer;
2768 if ((size_t)m->m_len < sizeof(struct sctp_stream_reset)) {
2772 strrst = mtod(m, struct sctp_stream_reset *);
2774 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2775 SCTP_INP_RLOCK(inp);
2776 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2778 SCTP_TCB_LOCK(stcb);
2779 SCTP_INP_RUNLOCK(inp);
2781 stcb = sctp_findassociation_ep_asocid(inp, strrst->strrst_assoc_id);
2786 if (stcb->asoc.peer_supports_strreset == 0) {
2787 /* Peer does not support it,
2788 * we return protocol not supported since
2789 * this is true for this feature and this
2790 * peer, not the socket request in general.
2792 error = EPROTONOSUPPORT;
2793 SCTP_TCB_UNLOCK(stcb);
2797 /* Having re-thought this code I added as I write the I-D there
2798 * is NO need for it. The peer, if we are requesting a stream-reset
2799 * will send a request to us but will itself do what we do, take
2800 * and copy off the "reset information" we send and queue TSN's
2801 * larger than the send-next in our response message. Thus they
2804 /* if (stcb->asoc.sending_seq != (stcb->asoc.last_acked_seq + 1)) {*/
2805 /* Must have all sending data ack'd before we
2806 * start this procedure. This is a bit restrictive
2807 * and we SHOULD work on changing this so ONLY the
2808 * streams being RESET get held up. So, a reset-all
2809 * would require this.. but a reset specific just
2810 * needs to be sure that the ones being reset have
2811 * nothing on the send_queue. For now we will
2812 * skip this more detailed method and do a course
2813 * way.. i.e. nothing pending ... for future FIX ME!
2819 if (stcb->asoc.stream_reset_outstanding) {
2821 SCTP_TCB_UNLOCK(stcb);
2824 if (strrst->strrst_flags == SCTP_RESET_LOCAL_RECV) {
2827 } else if (strrst->strrst_flags == SCTP_RESET_LOCAL_SEND) {
2830 } else if (strrst->strrst_flags == SCTP_RESET_BOTH) {
2835 SCTP_TCB_UNLOCK(stcb);
2838 sctp_send_str_reset_req(stcb, strrst->strrst_num_streams,
2839 strrst->strrst_list, two_way, not_peer);
2840 sctp_chunk_output(inp, stcb, 12);
2841 SCTP_TCB_UNLOCK(stcb);
2845 case SCTP_RESET_PEGS:
2846 memset(sctp_pegs, 0, sizeof(sctp_pegs));
2849 case SCTP_CONNECT_X:
2850 if ((size_t)m->m_len < (sizeof(int) + sizeof(struct sockaddr_in))) {
2854 error = sctp_do_connect_x(so, inp, m, p, 0);
2857 case SCTP_CONNECT_X_DELAYED:
2858 if ((size_t)m->m_len < (sizeof(int) + sizeof(struct sockaddr_in))) {
2862 error = sctp_do_connect_x(so, inp, m, p, 1);
2865 case SCTP_CONNECT_X_COMPLETE:
2867 struct sockaddr *sa;
2868 struct sctp_nets *net;
2869 if ((size_t)m->m_len < sizeof(struct sockaddr_in)) {
2873 sa = mtod(m, struct sockaddr *);
2875 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2876 SCTP_INP_RLOCK(inp);
2877 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2879 SCTP_TCB_LOCK(stcb);
2880 net = sctp_findnet(stcb, sa);
2882 SCTP_INP_RUNLOCK(inp);
2884 SCTP_INP_WLOCK(inp);
2885 SCTP_INP_INCR_REF(inp);
2886 SCTP_INP_WUNLOCK(inp);
2887 stcb = sctp_findassociation_ep_addr(&inp, sa, &net, NULL, NULL);
2889 SCTP_INP_WLOCK(inp);
2890 SCTP_INP_DECR_REF(inp);
2891 SCTP_INP_WUNLOCK(inp);
2899 if (stcb->asoc.delayed_connection == 1) {
2900 stcb->asoc.delayed_connection = 0;
2901 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
2902 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination);
2903 sctp_send_initiate(inp, stcb);
2905 /* already expired or did not use delayed connectx */
2908 SCTP_TCB_UNLOCK(stcb);
2914 SCTP_INP_WLOCK(inp);
2915 burst = mtod(m, u_int8_t *);
2917 inp->sctp_ep.max_burst = *burst;
2919 SCTP_INP_WUNLOCK(inp);
2926 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2927 ovh = SCTP_MED_OVERHEAD;
2929 ovh = SCTP_MED_V4_OVERHEAD;
2931 segsize = mtod(m, u_int32_t *);
2936 SCTP_INP_WLOCK(inp);
2937 inp->sctp_frag_point = (*segsize+ovh);
2938 if (inp->sctp_frag_point < MHLEN) {
2939 inp->sctp_frag_point = MHLEN;
2941 SCTP_INP_WUNLOCK(inp);
2944 case SCTP_SET_DEBUG_LEVEL:
2948 if ((size_t)m->m_len < sizeof(u_int32_t)) {
2952 level = mtod(m, u_int32_t *);
2954 sctp_debug_on = (*level & (SCTP_DEBUG_ALL |
2956 kprintf("SETTING DEBUG LEVEL to %x\n",
2957 (u_int)sctp_debug_on);
2962 #endif /* SCTP_DEBUG */
2966 struct sctp_event_subscribe *events;
2967 if ((size_t)m->m_len < sizeof(struct sctp_event_subscribe)) {
2971 SCTP_INP_WLOCK(inp);
2972 events = mtod(m, struct sctp_event_subscribe *);
2973 if (events->sctp_data_io_event) {
2974 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVDATAIOEVNT;
2976 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVDATAIOEVNT;
2979 if (events->sctp_association_event) {
2980 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVASSOCEVNT;
2982 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVASSOCEVNT;
2985 if (events->sctp_address_event) {
2986 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVPADDREVNT;
2988 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVPADDREVNT;
2991 if (events->sctp_send_failure_event) {
2992 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVSENDFAILEVNT;
2994 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVSENDFAILEVNT;
2997 if (events->sctp_peer_error_event) {
2998 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVPEERERR;
3000 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVPEERERR;
3003 if (events->sctp_shutdown_event) {
3004 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT;
3006 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT;
3009 if (events->sctp_partial_delivery_event) {
3010 inp->sctp_flags |= SCTP_PCB_FLAGS_PDAPIEVNT;
3012 inp->sctp_flags &= ~SCTP_PCB_FLAGS_PDAPIEVNT;
3015 if (events->sctp_adaption_layer_event) {
3016 inp->sctp_flags |= SCTP_PCB_FLAGS_ADAPTIONEVNT;
3018 inp->sctp_flags &= ~SCTP_PCB_FLAGS_ADAPTIONEVNT;
3021 if (events->sctp_stream_reset_events) {
3022 inp->sctp_flags |= SCTP_PCB_FLAGS_STREAM_RESETEVNT;
3024 inp->sctp_flags &= ~SCTP_PCB_FLAGS_STREAM_RESETEVNT;
3026 SCTP_INP_WUNLOCK(inp);
3030 case SCTP_ADAPTION_LAYER:
3032 struct sctp_setadaption *adap_bits;
3033 if ((size_t)m->m_len < sizeof(struct sctp_setadaption)) {
3037 SCTP_INP_WLOCK(inp);
3038 adap_bits = mtod(m, struct sctp_setadaption *);
3039 inp->sctp_ep.adaption_layer_indicator = adap_bits->ssb_adaption_ind;
3040 SCTP_INP_WUNLOCK(inp);
3043 case SCTP_SET_INITIAL_DBG_SEQ:
3046 if ((size_t)m->m_len < sizeof(u_int32_t)) {
3050 SCTP_INP_WLOCK(inp);
3051 vvv = mtod(m, u_int32_t *);
3052 inp->sctp_ep.initial_sequence_debug = *vvv;
3053 SCTP_INP_WUNLOCK(inp);
3056 case SCTP_DEFAULT_SEND_PARAM:
3058 struct sctp_sndrcvinfo *s_info;
3060 if (m->m_len != sizeof(struct sctp_sndrcvinfo)) {
3064 s_info = mtod(m, struct sctp_sndrcvinfo *);
3066 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3067 SCTP_INP_RLOCK(inp);
3068 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3070 SCTP_TCB_LOCK(stcb);
3071 SCTP_INP_RUNLOCK(inp);
3073 stcb = sctp_findassociation_ep_asocid(inp, s_info->sinfo_assoc_id);
3079 /* Validate things */
3080 if (s_info->sinfo_stream > stcb->asoc.streamoutcnt) {
3081 SCTP_TCB_UNLOCK(stcb);
3085 /* Mask off the flags that are allowed */
3086 s_info->sinfo_flags = (s_info->sinfo_flags &
3087 (MSG_UNORDERED | MSG_ADDR_OVER |
3088 MSG_PR_SCTP_TTL | MSG_PR_SCTP_BUF));
3090 stcb->asoc.def_send = *s_info;
3091 SCTP_TCB_UNLOCK(stcb);
3094 case SCTP_PEER_ADDR_PARAMS:
3096 struct sctp_paddrparams *paddrp;
3097 struct sctp_nets *net;
3098 if ((size_t)m->m_len < sizeof(struct sctp_paddrparams)) {
3102 paddrp = mtod(m, struct sctp_paddrparams *);
3104 if (paddrp->spp_assoc_id) {
3105 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3106 SCTP_INP_RLOCK(inp);
3107 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3109 SCTP_TCB_LOCK(stcb);
3110 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
3112 SCTP_INP_RUNLOCK(inp);
3114 stcb = sctp_findassociation_ep_asocid(inp, paddrp->spp_assoc_id);
3121 if ((stcb == NULL) &&
3122 ((((struct sockaddr *)&paddrp->spp_address)->sa_family == AF_INET) ||
3123 (((struct sockaddr *)&paddrp->spp_address)->sa_family == AF_INET6))) {
3124 /* Lookup via address */
3125 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3126 SCTP_INP_RLOCK(inp);
3127 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3129 SCTP_TCB_LOCK(stcb);
3130 net = sctp_findnet(stcb,
3131 (struct sockaddr *)&paddrp->spp_address);
3133 SCTP_INP_RUNLOCK(inp);
3135 SCTP_INP_WLOCK(inp);
3136 SCTP_INP_INCR_REF(inp);
3137 SCTP_INP_WUNLOCK(inp);
3138 stcb = sctp_findassociation_ep_addr(&inp,
3139 (struct sockaddr *)&paddrp->spp_address,
3142 SCTP_INP_WLOCK(inp);
3143 SCTP_INP_DECR_REF(inp);
3144 SCTP_INP_WUNLOCK(inp);
3148 /* Effects the Endpoint */
3152 /* Applies to the specific association */
3153 if (paddrp->spp_pathmaxrxt) {
3155 if (paddrp->spp_pathmaxrxt)
3156 net->failure_threshold = paddrp->spp_pathmaxrxt;
3158 if (paddrp->spp_pathmaxrxt)
3159 stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt;
3162 if ((paddrp->spp_hbinterval != 0) && (paddrp->spp_hbinterval != 0xffffffff)) {
3166 net->dest_state &= ~SCTP_ADDR_NOHB;
3168 old = stcb->asoc.heart_beat_delay;
3169 stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval;
3171 /* Turn back on the timer */
3172 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
3175 } else if (paddrp->spp_hbinterval == 0xffffffff) {
3177 sctp_send_hb(stcb, 1, net);
3180 /* off on association */
3181 if (stcb->asoc.heart_beat_delay) {
3182 int cnt_of_unconf = 0;
3183 struct sctp_nets *lnet;
3184 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
3185 if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
3189 /* stop the timer ONLY if we have no unconfirmed addresses
3191 if (cnt_of_unconf == 0)
3192 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
3194 stcb->asoc.heart_beat_delay = 0;
3196 net->dest_state |= SCTP_ADDR_NOHB;
3199 SCTP_TCB_UNLOCK(stcb);
3201 /* Use endpoint defaults */
3202 SCTP_INP_WLOCK(inp);
3203 if (paddrp->spp_pathmaxrxt)
3204 inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt;
3205 if (paddrp->spp_hbinterval != SCTP_ISSUE_HB)
3206 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = paddrp->spp_hbinterval;
3207 SCTP_INP_WUNLOCK(inp);
3213 struct sctp_rtoinfo *srto;
3214 if ((size_t)m->m_len < sizeof(struct sctp_rtoinfo)) {
3218 srto = mtod(m, struct sctp_rtoinfo *);
3219 if (srto->srto_assoc_id == 0) {
3220 SCTP_INP_WLOCK(inp);
3221 /* If we have a null asoc, its default for the endpoint */
3222 if (srto->srto_initial > 10)
3223 inp->sctp_ep.initial_rto = srto->srto_initial;
3224 if (srto->srto_max > 10)
3225 inp->sctp_ep.sctp_maxrto = srto->srto_max;
3226 if (srto->srto_min > 10)
3227 inp->sctp_ep.sctp_minrto = srto->srto_min;
3228 SCTP_INP_WUNLOCK(inp);
3231 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3232 SCTP_INP_RLOCK(inp);
3233 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3235 SCTP_TCB_LOCK(stcb);
3236 SCTP_INP_RUNLOCK(inp);
3238 stcb = sctp_findassociation_ep_asocid(inp, srto->srto_assoc_id);
3243 /* Set in ms we hope :-) */
3244 if (srto->srto_initial > 10)
3245 stcb->asoc.initial_rto = srto->srto_initial;
3246 if (srto->srto_max > 10)
3247 stcb->asoc.maxrto = srto->srto_max;
3248 if (srto->srto_min > 10)
3249 stcb->asoc.minrto = srto->srto_min;
3250 SCTP_TCB_UNLOCK(stcb);
3253 case SCTP_ASSOCINFO:
3255 struct sctp_assocparams *sasoc;
3257 if ((size_t)m->m_len < sizeof(struct sctp_assocparams)) {
3261 sasoc = mtod(m, struct sctp_assocparams *);
3262 if (sasoc->sasoc_assoc_id) {
3263 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3264 SCTP_INP_RLOCK(inp);
3265 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3267 SCTP_TCB_LOCK(stcb);
3268 SCTP_INP_RUNLOCK(inp);
3270 stcb = sctp_findassociation_ep_asocid(inp,
3271 sasoc->sasoc_assoc_id);
3281 if (sasoc->sasoc_asocmaxrxt)
3282 stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt;
3283 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
3284 sasoc->sasoc_peer_rwnd = 0;
3285 sasoc->sasoc_local_rwnd = 0;
3286 if (stcb->asoc.cookie_life)
3287 stcb->asoc.cookie_life = sasoc->sasoc_cookie_life;
3288 SCTP_TCB_UNLOCK(stcb);
3290 SCTP_INP_WLOCK(inp);
3291 if (sasoc->sasoc_asocmaxrxt)
3292 inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt;
3293 sasoc->sasoc_number_peer_destinations = 0;
3294 sasoc->sasoc_peer_rwnd = 0;
3295 sasoc->sasoc_local_rwnd = 0;
3296 if (sasoc->sasoc_cookie_life)
3297 inp->sctp_ep.def_cookie_life = sasoc->sasoc_cookie_life;
3298 SCTP_INP_WUNLOCK(inp);
3304 struct sctp_initmsg *sinit;
3306 if ((size_t)m->m_len < sizeof(struct sctp_initmsg)) {
3310 sinit = mtod(m, struct sctp_initmsg *);
3311 SCTP_INP_WLOCK(inp);
3312 if (sinit->sinit_num_ostreams)
3313 inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams;
3315 if (sinit->sinit_max_instreams)
3316 inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams;
3318 if (sinit->sinit_max_attempts)
3319 inp->sctp_ep.max_init_times = sinit->sinit_max_attempts;
3321 if (sinit->sinit_max_init_timeo > 10)
3322 /* We must be at least a 100ms (we set in ticks) */
3323 inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo;
3324 SCTP_INP_WUNLOCK(inp);
3327 case SCTP_PRIMARY_ADDR:
3329 struct sctp_setprim *spa;
3330 struct sctp_nets *net, *lnet;
3331 if ((size_t)m->m_len < sizeof(struct sctp_setprim)) {
3335 spa = mtod(m, struct sctp_setprim *);
3337 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3338 SCTP_INP_RLOCK(inp);
3339 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3341 SCTP_TCB_LOCK(stcb);
3346 SCTP_INP_RUNLOCK(inp);
3348 stcb = sctp_findassociation_ep_asocid(inp, spa->ssp_assoc_id);
3351 SCTP_INP_WLOCK(inp);
3352 SCTP_INP_INCR_REF(inp);
3353 SCTP_INP_WUNLOCK(inp);
3354 stcb = sctp_findassociation_ep_addr(&inp,
3355 (struct sockaddr *)&spa->ssp_addr,
3358 SCTP_INP_WLOCK(inp);
3359 SCTP_INP_DECR_REF(inp);
3360 SCTP_INP_WUNLOCK(inp);
3365 /* find the net, associd or connected lookup type */
3366 net = sctp_findnet(stcb, (struct sockaddr *)&spa->ssp_addr);
3368 SCTP_TCB_UNLOCK(stcb);
3373 if ((net != stcb->asoc.primary_destination) &&
3374 (!(net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
3375 /* Ok we need to set it */
3376 lnet = stcb->asoc.primary_destination;
3377 lnet->next_tsn_at_change = net->next_tsn_at_change = stcb->asoc.sending_seq;
3378 if (sctp_set_primary_addr(stcb,
3381 if (net->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3382 net->dest_state |= SCTP_ADDR_DOUBLE_SWITCH;
3384 net->dest_state |= SCTP_ADDR_SWITCH_PRIMARY;
3387 SCTP_TCB_UNLOCK(stcb);
3391 case SCTP_SET_PEER_PRIMARY_ADDR:
3393 struct sctp_setpeerprim *sspp;
3394 if ((size_t)m->m_len < sizeof(struct sctp_setpeerprim)) {
3398 sspp = mtod(m, struct sctp_setpeerprim *);
3401 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3402 SCTP_INP_RLOCK(inp);
3403 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3405 SCTP_TCB_UNLOCK(stcb);
3406 SCTP_INP_RUNLOCK(inp);
3408 stcb = sctp_findassociation_ep_asocid(inp, sspp->sspp_assoc_id);
3413 if (sctp_set_primary_ip_address_sa(stcb, (struct sockaddr *)&sspp->sspp_addr) != 0) {
3416 SCTP_TCB_UNLOCK(stcb);
3419 case SCTP_BINDX_ADD_ADDR:
3421 struct sctp_getaddresses *addrs;
3422 struct sockaddr *addr_touse;
3423 struct sockaddr_in sin;
3424 /* see if we're bound all already! */
3425 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3429 if ((size_t)m->m_len < sizeof(struct sctp_getaddresses)) {
3433 addrs = mtod(m, struct sctp_getaddresses *);
3434 addr_touse = addrs->addr;
3435 if (addrs->addr->sa_family == AF_INET6) {
3436 struct sockaddr_in6 *sin6;
3437 sin6 = (struct sockaddr_in6 *)addr_touse;
3438 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
3439 in6_sin6_2_sin(&sin, sin6);
3440 addr_touse = (struct sockaddr *)&sin;
3443 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
3445 /* Can't get proc for Net/Open BSD */
3449 error = sctp_inpcb_bind(so, addr_touse, p);
3452 /* No locks required here since bind and mgmt_ep_sa all
3453 * do their own locking. If we do something for the FIX:
3454 * below we may need to lock in that case.
3456 if (addrs->sget_assoc_id == 0) {
3457 /* add the address */
3458 struct sctp_inpcb *lep;
3459 ((struct sockaddr_in *)addr_touse)->sin_port = inp->sctp_lport;
3460 lep = sctp_pcb_findep(addr_touse, 1, 0);
3462 /* We must decrement the refcount
3463 * since we have the ep already and
3464 * are binding. No remove going on
3467 SCTP_INP_WLOCK(inp);
3468 SCTP_INP_DECR_REF(inp);
3469 SCTP_INP_WUNLOCK(inp);
3472 /* already bound to it.. ok */
3474 } else if (lep == NULL) {
3475 ((struct sockaddr_in *)addr_touse)->sin_port = 0;
3476 error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
3477 SCTP_ADD_IP_ADDRESS);
3479 error = EADDRNOTAVAIL;
3485 /* FIX: decide whether we allow assoc based bindx */
3489 case SCTP_BINDX_REM_ADDR:
3491 struct sctp_getaddresses *addrs;
3492 struct sockaddr *addr_touse;
3493 struct sockaddr_in sin;
3494 /* see if we're bound all already! */
3495 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3499 if ((size_t)m->m_len < sizeof(struct sctp_getaddresses)) {
3503 addrs = mtod(m, struct sctp_getaddresses *);
3504 addr_touse = addrs->addr;
3505 if (addrs->addr->sa_family == AF_INET6) {
3506 struct sockaddr_in6 *sin6;
3507 sin6 = (struct sockaddr_in6 *)addr_touse;
3508 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
3509 in6_sin6_2_sin(&sin, sin6);
3510 addr_touse = (struct sockaddr *)&sin;
3513 /* No lock required mgmt_ep_sa does its own locking. If
3514 * the FIX: below is ever changed we may need to
3515 * lock before calling association level binding.
3517 if (addrs->sget_assoc_id == 0) {
3518 /* delete the address */
3519 sctp_addr_mgmt_ep_sa(inp, addr_touse,
3520 SCTP_DEL_IP_ADDRESS);
3522 /* FIX: decide whether we allow assoc based bindx */
3527 error = ENOPROTOOPT;
3529 } /* end switch (opt) */
3534 sctp_ctloutput(netmsg_t msg)
3536 struct socket *so = msg->ctloutput.base.nm_so;
3537 struct sockopt *sopt = msg->ctloutput.nm_sopt;
3538 struct mbuf *m = NULL;
3539 struct sctp_inpcb *inp;
3542 inp = (struct sctp_inpcb *)so->so_pcb;
3545 /* I made the same as TCP since we are not setup? */
3549 if (sopt->sopt_level != IPPROTO_SCTP) {
3550 /* wrong proto level... send back up to IP */
3552 if (INP_CHECK_SOCKAF(so, AF_INET6))
3553 ip6_ctloutput_dispatch(msg);
3557 /* msg invalid now */
3560 if (sopt->sopt_valsize > MCLBYTES) {
3562 * Restrict us down to a cluster size, that's all we can
3563 * pass either way...
3565 sopt->sopt_valsize = MCLBYTES;
3567 if (sopt->sopt_valsize) {
3569 m = m_get(MB_WAIT, MT_DATA);
3570 if (sopt->sopt_valsize > MLEN) {
3571 MCLGET(m, MB_DONTWAIT);
3572 if ((m->m_flags & M_EXT) == 0) {
3578 error = sooptcopyin(sopt, mtod(m, caddr_t), sopt->sopt_valsize,
3579 sopt->sopt_valsize);
3584 m->m_len = sopt->sopt_valsize;
3586 if (sopt->sopt_dir == SOPT_SET) {
3587 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
3588 error = sctp_optsset(so, sopt->sopt_name, &m, sopt->sopt_td);
3590 error = sctp_optsset(so, sopt->sopt_name, &m, sopt->sopt_p);
3592 } else if (sopt->sopt_dir == SOPT_GET) {
3593 #if (defined (__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
3594 error = sctp_optsget(so, sopt->sopt_name, &m, sopt->sopt_td);
3596 error = sctp_optsget(so, sopt->sopt_name, &m, sopt->sopt_p);
3601 if ( (error == 0) && (m != NULL)) {
3602 error = sooptcopyout(sopt, mtod(m, caddr_t), m->m_len);
3604 } else if (m != NULL) {
3608 lwkt_replymsg(&msg->lmsg, error);
3612 sctp_connect(netmsg_t msg)
3614 struct socket *so = msg->connect.base.nm_so;
3615 struct sockaddr *addr = msg->connect.nm_nam;
3616 struct sctp_inpcb *inp;
3617 struct sctp_tcb *stcb;
3621 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
3622 kprintf("Connect called in SCTP to ");
3623 sctp_print_address(addr);
3624 kprintf("Port %d\n", ntohs(((struct sockaddr_in *)addr)->sin_port));
3626 #endif /* SCTP_DEBUG */
3627 inp = (struct sctp_inpcb *)so->so_pcb;
3629 /* I made the same as TCP since we are not setup? */
3633 SCTP_ASOC_CREATE_LOCK(inp);
3634 SCTP_INP_WLOCK(inp);
3635 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3636 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3637 /* Should I really unlock ? */
3638 SCTP_INP_WUNLOCK(inp);
3639 SCTP_ASOC_CREATE_UNLOCK(inp);
3644 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
3645 (addr->sa_family == AF_INET6)) {
3646 SCTP_INP_WUNLOCK(inp);
3647 SCTP_ASOC_CREATE_UNLOCK(inp);
3652 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
3653 SCTP_PCB_FLAGS_UNBOUND) {
3654 /* Bind a ephemeral port */
3655 SCTP_INP_WUNLOCK(inp);
3656 error = sctp_inpcb_bind(so, NULL, msg->connect.nm_td);
3658 SCTP_ASOC_CREATE_UNLOCK(inp);
3661 SCTP_INP_WLOCK(inp);
3663 /* Now do we connect? */
3664 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3665 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
3666 /* We are already connected AND the TCP model */
3667 SCTP_INP_WUNLOCK(inp);
3668 SCTP_ASOC_CREATE_UNLOCK(inp);
3672 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3673 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3675 SCTP_TCB_UNLOCK(stcb);
3676 SCTP_INP_WUNLOCK(inp);
3678 SCTP_INP_INCR_REF(inp);
3679 SCTP_INP_WUNLOCK(inp);
3680 stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL);
3682 SCTP_INP_WLOCK(inp);
3683 SCTP_INP_DECR_REF(inp);
3684 SCTP_INP_WUNLOCK(inp);
3688 /* Already have or am bring up an association */
3689 SCTP_ASOC_CREATE_UNLOCK(inp);
3690 SCTP_TCB_UNLOCK(stcb);
3694 /* We are GOOD to go */
3695 stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0);
3697 /* Gak! no memory */
3700 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
3701 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
3702 /* Set the connected flag so we can queue data */
3705 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT;
3706 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
3707 sctp_send_initiate(inp, stcb);
3708 SCTP_ASOC_CREATE_UNLOCK(inp);
3709 SCTP_TCB_UNLOCK(stcb);
3711 lwkt_replymsg(&msg->lmsg, error);
3715 sctp_usr_recvd(netmsg_t msg)
3717 struct socket *so = msg->rcvd.base.nm_so;
3718 struct sctp_socket_q_list *sq = NULL;
3719 int flags = msg->rcvd.nm_flags;
3723 * The user has received some data, we may be able to stuff more
3724 * up the socket. And we need to possibly update the rwnd.
3726 struct sctp_inpcb *inp;
3727 struct sctp_tcb *stcb=NULL;
3729 inp = (struct sctp_inpcb *)so->so_pcb;
3731 if (sctp_debug_on & SCTP_DEBUG_USRREQ2)
3732 kprintf("Read for so:%p inp:%p Flags:%x\n",
3733 so, inp, (u_int)flags);
3737 /* I made the same as TCP since we are not setup? */
3739 if (sctp_debug_on & SCTP_DEBUG_USRREQ2)
3740 kprintf("Nope, connection reset\n");
3746 * Grab the first one on the list. It will re-insert itself if
3747 * it runs out of room
3749 SCTP_INP_WLOCK(inp);
3750 if ((flags & MSG_EOR) && ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0)
3751 && ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
3752 /* Ok the other part of our grubby tracking
3753 * stuff for our horrible layer violation that
3754 * the tsvwg thinks is ok for sctp_peeloff.. gak!
3755 * We must update the next vtag pending on the
3756 * socket buffer (if any).
3758 inp->sctp_vtag_first = sctp_get_first_vtag_from_sb(so);
3759 sq = TAILQ_FIRST(&inp->sctp_queue_list);
3766 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3769 SCTP_TCB_LOCK(stcb);
3772 /* all code in normal stcb path assumes
3773 * that you have a tcb_lock only. Thus
3774 * we must release the inp write lock.
3776 if (flags & MSG_EOR) {
3777 if (((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0)
3778 && ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
3779 stcb = sctp_remove_from_socket_q(inp);
3782 if (sctp_debug_on & SCTP_DEBUG_USRREQ2)
3783 kprintf("remove from socket queue for inp:%p tcbret:%p\n",
3787 stcb->asoc.my_rwnd_control_len = sctp_sbspace_sub(stcb->asoc.my_rwnd_control_len,
3788 sizeof(struct mbuf));
3789 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVDATAIOEVNT) {
3790 stcb->asoc.my_rwnd_control_len = sctp_sbspace_sub(stcb->asoc.my_rwnd_control_len,
3791 CMSG_LEN(sizeof(struct sctp_sndrcvinfo)));
3794 if ((TAILQ_EMPTY(&stcb->asoc.delivery_queue) == 0) ||
3795 (TAILQ_EMPTY(&stcb->asoc.reasmqueue) == 0)) {
3796 /* Deliver if there is something to be delivered */
3797 sctp_service_queues(stcb, &stcb->asoc, 1);
3799 sctp_set_rwnd(stcb, &stcb->asoc);
3800 /* if we increase by 1 or more MTU's (smallest MTUs of all
3801 * nets) we send a window update sack
3803 incr = stcb->asoc.my_rwnd - stcb->asoc.my_last_reported_rwnd;
3807 if (((uint32_t)incr >= (stcb->asoc.smallest_mtu * SCTP_SEG_TO_RWND_UPD)) ||
3808 ((((uint32_t)incr)*SCTP_SCALE_OF_RWND_TO_UPD) >= so->so_rcv.ssb_hiwat)) {
3809 if (callout_pending(&stcb->asoc.dack_timer.timer)) {
3810 /* If the timer is up, stop it */
3811 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
3812 stcb->sctp_ep, stcb, NULL);
3814 /* Send the sack, with the new rwnd */
3815 sctp_send_sack(stcb);
3816 /* Now do the output */
3817 sctp_chunk_output(inp, stcb, 10);
3820 if ((( sq ) && (flags & MSG_EOR) && ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0))
3821 && ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
3822 stcb = sctp_remove_from_socket_q(inp);
3825 SOCKBUF_LOCK(&so->so_rcv);
3826 if (( so->so_rcv.ssb_mb == NULL ) &&
3827 (TAILQ_EMPTY(&inp->sctp_queue_list) == 0)) {
3830 if (sctp_debug_on & SCTP_DEBUG_USRREQ2)
3831 kprintf("Something off, inp:%p so->so_rcv->ssb_mb is empty and sockq is not.. cleaning\n",
3834 if (((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0)
3835 && ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
3837 done_yet = TAILQ_EMPTY(&inp->sctp_queue_list);
3840 sctp_remove_from_socket_q(inp);
3841 done_yet = TAILQ_EMPTY(&inp->sctp_queue_list);
3845 if (sctp_debug_on & SCTP_DEBUG_USRREQ2)
3846 kprintf("Cleaned up %d sockq's\n", sq_cnt);
3849 SOCKBUF_UNLOCK(&so->so_rcv);
3851 SCTP_TCB_UNLOCK(stcb);
3852 SCTP_INP_WUNLOCK(inp);
3855 lwkt_replymsg(&msg->lmsg, error);
3859 sctp_listen(netmsg_t msg)
3861 struct socket *so = msg->listen.base.nm_so;
3865 * Note this module depends on the protocol processing being
3866 * called AFTER any socket level flags and backlog are applied
3867 * to the socket. The traditional way that the socket flags are
3868 * applied is AFTER protocol processing. We have made a change
3869 * to the sys/kern/uipc_socket.c module to reverse this but this
3870 * MUST be in place if the socket API for SCTP is to work properly.
3872 struct sctp_inpcb *inp;
3874 inp = (struct sctp_inpcb *)so->so_pcb;
3876 /* I made the same as TCP since we are not setup? */
3880 SCTP_INP_RLOCK(inp);
3881 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3882 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
3883 /* We are already connected AND the TCP model */
3884 SCTP_INP_RUNLOCK(inp);
3888 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
3889 /* We must do a bind. */
3890 SCTP_INP_RUNLOCK(inp);
3891 if ((error = sctp_inpcb_bind(so, NULL, msg->listen.nm_td))) {
3892 /* bind error, probably perm */
3896 SCTP_INP_RUNLOCK(inp);
3899 SCTP_INP_WLOCK(inp);
3900 if (inp->sctp_socket->so_qlimit) {
3901 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
3903 * For the UDP model we must TURN OFF the ACCEPT
3904 * flags since we do NOT allow the accept() call.
3905 * The TCP model (when present) will do accept which
3906 * then prohibits connect().
3908 inp->sctp_socket->so_options &= ~SO_ACCEPTCONN;
3910 inp->sctp_flags |= SCTP_PCB_FLAGS_ACCEPTING;
3912 if (inp->sctp_flags & SCTP_PCB_FLAGS_ACCEPTING) {
3914 * Turning off the listen flags if the backlog is
3915 * set to 0 (i.e. qlimit is 0).
3917 inp->sctp_flags &= ~SCTP_PCB_FLAGS_ACCEPTING;
3919 inp->sctp_socket->so_options &= ~SO_ACCEPTCONN;
3921 SCTP_INP_WUNLOCK(inp);
3925 lwkt_replymsg(&msg->lmsg, error);
3929 sctp_accept(netmsg_t msg)
3931 struct socket *so = msg->accept.base.nm_so;
3932 struct sockaddr **addr = msg->accept.nm_nam;
3933 struct sctp_tcb *stcb;
3934 struct sockaddr *prim;
3935 struct sctp_inpcb *inp;
3938 inp = (struct sctp_inpcb *)so->so_pcb;
3944 SCTP_INP_RLOCK(inp);
3945 if (so->so_state & SS_ISDISCONNECTED) {
3946 SCTP_INP_RUNLOCK(inp);
3947 error = ECONNABORTED;
3950 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3952 SCTP_INP_RUNLOCK(inp);
3956 SCTP_TCB_LOCK(stcb);
3957 SCTP_INP_RUNLOCK(inp);
3958 prim = (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr;
3959 if (prim->sa_family == AF_INET) {
3960 struct sockaddr_in *sin;
3961 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
3962 sin = kmalloc(sizeof *sin, M_SONAME, M_WAITOK | M_ZERO);
3964 sin = (struct sockaddr_in *)addr;
3965 bzero((caddr_t)sin, sizeof (*sin));
3967 sin->sin_family = AF_INET;
3968 sin->sin_len = sizeof(*sin);
3969 sin->sin_port = ((struct sockaddr_in *)prim)->sin_port;
3970 sin->sin_addr = ((struct sockaddr_in *)prim)->sin_addr;
3971 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
3972 *addr = (struct sockaddr *)sin;
3974 nam->m_len = sizeof(*sin);
3977 struct sockaddr_in6 *sin6;
3978 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
3979 sin6 = kmalloc(sizeof *sin6, M_SONAME, M_WAITOK | M_ZERO);
3981 sin6 = (struct sockaddr_in6 *)addr;
3983 bzero((caddr_t)sin6, sizeof (*sin6));
3984 sin6->sin6_family = AF_INET6;
3985 sin6->sin6_len = sizeof(*sin6);
3986 sin6->sin6_port = ((struct sockaddr_in6 *)prim)->sin6_port;
3988 sin6->sin6_addr = ((struct sockaddr_in6 *)prim)->sin6_addr;
3989 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr))
3990 /* sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]);*/
3991 in6_recoverscope(sin6, &sin6->sin6_addr, NULL); /* skip ifp check */
3993 sin6->sin6_scope_id = 0; /*XXX*/
3994 #if defined(__FreeBSD__) || defined (__APPLE__) || defined(__DragonFly__)
3995 *addr= (struct sockaddr *)sin6;
3997 nam->m_len = sizeof(*sin6);
4000 /* Wake any delayed sleep action */
4001 SCTP_TCB_UNLOCK(stcb);
4002 SCTP_INP_WLOCK(inp);
4003 if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) {
4004 inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE;
4005 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) {
4006 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT;
4007 #if defined(__NetBSD__)
4008 if (sowritable(inp->sctp_socket))
4009 sowwakeup(inp->sctp_socket);
4011 if (sowriteable(inp->sctp_socket))
4012 sowwakeup(inp->sctp_socket);
4015 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) {
4016 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT;
4017 if (soreadable(inp->sctp_socket))
4018 sorwakeup(inp->sctp_socket);
4022 SCTP_INP_WUNLOCK(inp);
4025 lwkt_replymsg(&msg->lmsg, error);
4030 sctp_ingetaddr(netmsg_t msg)
4034 error = sctp_ingetaddr_oncpu(msg->sockaddr.base.nm_so,
4035 msg->sockaddr.nm_nam);
4036 lwkt_replymsg(&msg->lmsg, error);
4040 sctp_ingetaddr_oncpu(struct socket *so, struct sockaddr **addr)
4042 struct sockaddr_in *sin;
4043 struct sctp_inpcb *inp;
4045 * Do the malloc first in case it blocks.
4047 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4048 sin = kmalloc(sizeof *sin, M_SONAME, M_WAITOK | M_ZERO);
4050 nam->m_len = sizeof(*sin);
4051 memset(sin, 0, sizeof(*sin));
4053 sin->sin_family = AF_INET;
4054 sin->sin_len = sizeof(*sin);
4055 inp = (struct sctp_inpcb *)so->so_pcb;
4057 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4058 kfree(sin, M_SONAME);
4062 SCTP_INP_RLOCK(inp);
4063 sin->sin_port = inp->sctp_lport;
4064 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
4065 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
4066 struct sctp_tcb *stcb;
4067 struct sockaddr_in *sin_a;
4068 struct sctp_nets *net;
4071 stcb = LIST_FIRST(&inp->sctp_asoc_list);
4077 SCTP_TCB_LOCK(stcb);
4078 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4079 sin_a = (struct sockaddr_in *)&net->ro._l_addr;
4080 if (sin_a->sin_family == AF_INET) {
4085 if ((!fnd) || (sin_a == NULL)) {
4087 SCTP_TCB_UNLOCK(stcb);
4090 sin->sin_addr = sctp_ipv4_source_address_selection(inp,
4091 stcb, (struct route *)&net->ro, net, 0);
4092 SCTP_TCB_UNLOCK(stcb);
4094 /* For the bound all case you get back 0 */
4096 sin->sin_addr.s_addr = 0;
4100 /* Take the first IPv4 address in the list */
4101 struct sctp_laddr *laddr;
4103 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4104 if (laddr->ifa->ifa_addr->sa_family == AF_INET) {
4105 struct sockaddr_in *sin_a;
4106 sin_a = (struct sockaddr_in *)laddr->ifa->ifa_addr;
4107 sin->sin_addr = sin_a->sin_addr;
4113 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4114 kfree(sin, M_SONAME);
4116 SCTP_INP_RUNLOCK(inp);
4120 SCTP_INP_RUNLOCK(inp);
4121 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4122 (*addr) = (struct sockaddr *)sin;
4128 sctp_peeraddr(netmsg_t msg)
4132 error = sctp_peeraddr_oncpu(msg->peeraddr.base.nm_so,
4133 msg->peeraddr.nm_nam);
4134 lwkt_replymsg(&msg->lmsg, error);
4138 sctp_peeraddr_oncpu(struct socket *so, struct sockaddr **addr)
4140 struct sockaddr_in *sin = (struct sockaddr_in *)*addr;
4141 struct sockaddr_in *sin_a;
4142 struct sctp_inpcb *inp;
4143 struct sctp_tcb *stcb;
4144 struct sctp_nets *net;
4148 /* Do the malloc first in case it blocks. */
4149 inp = (struct sctp_inpcb *)so->so_pcb;
4150 if ((inp == NULL) ||
4151 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
4152 /* UDP type and listeners will drop out here */
4157 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4158 /* XXX huh? why assign it above and then allocate it here? */
4159 sin = kmalloc(sizeof *sin, M_SONAME, M_WAITOK | M_ZERO);
4161 nam->m_len = sizeof(*sin);
4162 memset(sin, 0, sizeof(*sin));
4164 sin->sin_family = AF_INET;
4165 sin->sin_len = sizeof(*sin);
4167 /* We must recapture incase we blocked */
4168 inp = (struct sctp_inpcb *)so->so_pcb;
4170 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4171 kfree(sin, M_SONAME);
4176 SCTP_INP_RLOCK(inp);
4177 stcb = LIST_FIRST(&inp->sctp_asoc_list);
4179 SCTP_TCB_LOCK(stcb);
4180 SCTP_INP_RUNLOCK(inp);
4182 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4183 kfree(sin, M_SONAME);
4189 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4190 sin_a = (struct sockaddr_in *)&net->ro._l_addr;
4191 if (sin_a->sin_family == AF_INET) {
4193 sin->sin_port = stcb->rport;
4194 sin->sin_addr = sin_a->sin_addr;
4198 SCTP_TCB_UNLOCK(stcb);
4200 /* No IPv4 address */
4201 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4202 kfree(sin, M_SONAME);
4212 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4213 struct pr_usrreqs sctp_usrreqs = {
4214 .pru_abort = sctp_abort,
4215 .pru_accept = sctp_accept,
4216 .pru_attach = sctp_attach,
4217 .pru_bind = sctp_bind,
4218 .pru_connect = sctp_connect,
4219 .pru_connect2 = pr_generic_notsupp,
4220 .pru_control = in_control_dispatch,
4221 .pru_detach = sctp_detach,
4222 .pru_disconnect = sctp_disconnect,
4223 .pru_listen = sctp_listen,
4224 .pru_peeraddr = sctp_peeraddr,
4225 .pru_rcvd = sctp_usr_recvd,
4226 .pru_rcvoob = pr_generic_notsupp,
4227 .pru_send = sctp_send,
4228 .pru_sense = pru_sense_null,
4229 .pru_shutdown = sctp_shutdown,
4230 .pru_sockaddr = sctp_ingetaddr,
4231 .pru_sosend = sctp_sosend,
4232 .pru_soreceive = soreceive
4236 #if defined(__NetBSD__)
4238 sctp_usrreq(struct socket *so, int req, struct mbuf *m, struct mbuf *nam,
4239 struct mbuf *control, struct proc *p)
4244 sctp_usrreq(struct socket *so, int req, struct mbuf *m, struct mbuf *nam,
4245 struct mbuf *control)
4247 struct proc *p = curproc;
4252 family = so->so_proto->pr_domain->dom_family;
4254 if (req == PRU_CONTROL) {
4257 error = in_control(so, (long)m, (caddr_t)nam,
4258 (struct ifnet *)control
4259 #if defined(__NetBSD__)
4266 error = in6_control(so, (long)m, (caddr_t)nam,
4267 (struct ifnet *)control, p);
4271 error = EAFNOSUPPORT;
4276 if (req == PRU_PURGEIF) {
4279 ifn = (struct ifnet *)control;
4280 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
4281 if (ifa->ifa_addr->sa_family == family) {
4282 sctp_delete_ip_address(ifa);
4295 return (EAFNOSUPPORT);
4302 error = sctp_attach(so, family, p);
4305 error = sctp_detach(so);
4311 error = sctp_bind(so, nam, p);
4314 error = sctp_listen(so, p);
4320 error = sctp_connect(so, nam, p);
4322 case PRU_DISCONNECT:
4323 error = sctp_disconnect(so);
4329 error = sctp_accept(so, nam);
4332 error = sctp_shutdown(so);
4337 * For Open and Net BSD, this is real
4338 * ugly. The mbuf *nam that is passed
4339 * (by soreceive()) is the int flags c
4340 * ast as a (mbuf *) yuck!
4342 error = sctp_usr_recvd(so, (int)((long)nam));
4346 /* Flags are ignored */
4348 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
4349 kprintf("Send called on V4 side\n");
4353 struct sockaddr *addr;
4357 addr = mtod(nam, struct sockaddr *);
4359 error = sctp_send(so, 0, m, addr, control, p);
4363 error = sctp_abort(so);
4370 error = EAFNOSUPPORT;
4373 error = EAFNOSUPPORT;
4376 error = sctp_peeraddr(so, nam);
4379 error = sctp_ingetaddr(so, nam);
4391 /* #if defined(__NetBSD__) || defined(__OpenBSD__) */
4392 #if defined(__OpenBSD__)
4394 * Sysctl for sctp variables.
4397 sctp_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
4401 /* All sysctl names at this level are terminal. */
4407 case SCTPCTL_MAXDGRAM:
4408 return (sysctl_int(oldp, oldlenp, newp, newlen,
4410 case SCTPCTL_RECVSPACE:
4411 return (sysctl_int(oldp, oldlenp, newp, newlen,
4413 case SCTPCTL_AUTOASCONF:
4414 return (sysctl_int(oldp, oldlenp, newp, newlen,
4415 &sctp_auto_asconf));
4416 case SCTPCTL_ECN_ENABLE:
4417 return (sysctl_int(oldp, oldlenp, newp, newlen,
4419 case SCTPCTL_ECN_NONCE:
4420 return (sysctl_int(oldp, oldlenp, newp, newlen,
4422 case SCTPCTL_STRICT_SACK:
4423 return (sysctl_int(oldp, oldlenp, newp, newlen,
4424 &sctp_strict_sacks));
4425 case SCTPCTL_NOCSUM_LO:
4426 return (sysctl_int(oldp, oldlenp, newp, newlen,
4427 &sctp_no_csum_on_loopback));
4428 case SCTPCTL_STRICT_INIT:
4429 return (sysctl_int(oldp, oldlenp, newp, newlen,
4430 &sctp_strict_init));
4431 case SCTPCTL_PEER_CHK_OH:
4432 return (sysctl_int(oldp, oldlenp, newp, newlen,
4433 &sctp_peer_chunk_oh));
4434 case SCTPCTL_MAXBURST:
4435 return (sysctl_int(oldp, oldlenp, newp, newlen,
4436 &sctp_max_burst_default));
4437 case SCTPCTL_MAXCHUNKONQ:
4438 return (sysctl_int(oldp, oldlenp, newp, newlen,
4439 &sctp_max_chunks_on_queue));
4440 case SCTPCTL_DELAYED_SACK:
4441 return (sysctl_int(oldp, oldlenp, newp, newlen,
4442 &sctp_delayed_sack_time_default));
4443 case SCTPCTL_HB_INTERVAL:
4444 return (sysctl_int(oldp, oldlenp, newp, newlen,
4445 &sctp_heartbeat_interval_default));
4446 case SCTPCTL_PMTU_RAISE:
4447 return (sysctl_int(oldp, oldlenp, newp, newlen,
4448 &sctp_pmtu_raise_time_default));
4449 case SCTPCTL_SHUTDOWN_GUARD:
4450 return (sysctl_int(oldp, oldlenp, newp, newlen,
4451 &sctp_shutdown_guard_time_default));
4452 case SCTPCTL_SECRET_LIFETIME:
4453 return (sysctl_int(oldp, oldlenp, newp, newlen,
4454 &sctp_secret_lifetime_default));
4455 case SCTPCTL_RTO_MAX:
4456 return (sysctl_int(oldp, oldlenp, newp, newlen,
4457 &sctp_rto_max_default));
4458 case SCTPCTL_RTO_MIN:
4459 return (sysctl_int(oldp, oldlenp, newp, newlen,
4460 &sctp_rto_min_default));
4461 case SCTPCTL_RTO_INITIAL:
4462 return (sysctl_int(oldp, oldlenp, newp, newlen,
4463 &sctp_rto_initial_default));
4464 case SCTPCTL_INIT_RTO_MAX:
4465 return (sysctl_int(oldp, oldlenp, newp, newlen,
4466 &sctp_init_rto_max_default));
4467 case SCTPCTL_COOKIE_LIFE:
4468 return (sysctl_int(oldp, oldlenp, newp, newlen,
4469 &sctp_valid_cookie_life_default));
4470 case SCTPCTL_INIT_RTX_MAX:
4471 return (sysctl_int(oldp, oldlenp, newp, newlen,
4472 &sctp_init_rtx_max_default));
4473 case SCTPCTL_ASSOC_RTX_MAX:
4474 return (sysctl_int(oldp, oldlenp, newp, newlen,
4475 &sctp_assoc_rtx_max_default));
4476 case SCTPCTL_PATH_RTX_MAX:
4477 return (sysctl_int(oldp, oldlenp, newp, newlen,
4478 &sctp_path_rtx_max_default));
4479 case SCTPCTL_NR_OUTGOING_STREAMS:
4480 return (sysctl_int(oldp, oldlenp, newp, newlen,
4481 &sctp_nr_outgoing_streams_default));
4484 return (sysctl_int(oldp, oldlenp, newp, newlen,
4488 return (ENOPROTOOPT);
4493 #if defined(__NetBSD__)
4495 * Sysctl for sctp variables.
4497 SYSCTL_SETUP(sysctl_net_inet_sctp_setup, "sysctl net.inet.sctp subtree setup")
4500 sysctl_createv(clog, 0, NULL, NULL,
4502 CTLTYPE_NODE, "net", NULL,
4505 sysctl_createv(clog, 0, NULL, NULL,
4507 CTLTYPE_NODE, "inet", NULL,
4509 CTL_NET, PF_INET, CTL_EOL);
4510 sysctl_createv(clog, 0, NULL, NULL,
4512 CTLTYPE_NODE, "sctp",
4513 SYSCTL_DESCR("sctp related settings"),
4515 CTL_NET, PF_INET, IPPROTO_SCTP, CTL_EOL);
4517 sysctl_createv(clog, 0, NULL, NULL,
4518 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4519 CTLTYPE_INT, "maxdgram",
4520 SYSCTL_DESCR("Maximum outgoing SCTP buffer size"),
4521 NULL, 0, &sctp_sendspace, 0,
4522 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_MAXDGRAM,
4525 sysctl_createv(clog, 0, NULL, NULL,
4526 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4527 CTLTYPE_INT, "recvspace",
4528 SYSCTL_DESCR("Maximum incoming SCTP buffer size"),
4529 NULL, 0, &sctp_recvspace, 0,
4530 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_RECVSPACE,
4533 sysctl_createv(clog, 0, NULL, NULL,
4534 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4535 CTLTYPE_INT, "autoasconf",
4536 SYSCTL_DESCR("Enable SCTP Auto-ASCONF"),
4537 NULL, 0, &sctp_auto_asconf, 0,
4538 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_AUTOASCONF,
4541 sysctl_createv(clog, 0, NULL, NULL,
4542 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4543 CTLTYPE_INT, "ecn_enable",
4544 SYSCTL_DESCR("Enable SCTP ECN"),
4545 NULL, 0, &sctp_ecn, 0,
4546 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_ECN_ENABLE,
4549 sysctl_createv(clog, 0, NULL, NULL,
4550 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4551 CTLTYPE_INT, "ecn_nonce",
4552 SYSCTL_DESCR("Enable SCTP ECN Nonce"),
4553 NULL, 0, &sctp_ecn_nonce, 0,
4554 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_ECN_NONCE,
4557 sysctl_createv(clog, 0, NULL, NULL,
4558 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4559 CTLTYPE_INT, "strict_sack",
4560 SYSCTL_DESCR("Enable SCTP Strict SACK checking"),
4561 NULL, 0, &sctp_strict_sacks, 0,
4562 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_STRICT_SACK,
4565 sysctl_createv(clog, 0, NULL, NULL,
4566 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4567 CTLTYPE_INT, "loopback_nocsum",
4568 SYSCTL_DESCR("Enable NO Csum on packets sent on loopback"),
4569 NULL, 0, &sctp_no_csum_on_loopback, 0,
4570 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_NOCSUM_LO,
4573 sysctl_createv(clog, 0, NULL, NULL,
4574 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4575 CTLTYPE_INT, "strict_init",
4576 SYSCTL_DESCR("Enable strict INIT/INIT-ACK singleton enforcement"),
4577 NULL, 0, &sctp_strict_init, 0,
4578 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_STRICT_INIT,
4581 sysctl_createv(clog, 0, NULL, NULL,
4582 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4583 CTLTYPE_INT, "peer_chkoh",
4584 SYSCTL_DESCR("Amount to debit peers rwnd per chunk sent"),
4585 NULL, 0, &sctp_peer_chunk_oh, 0,
4586 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_PEER_CHK_OH,
4589 sysctl_createv(clog, 0, NULL, NULL,
4590 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4591 CTLTYPE_INT, "maxburst",
4592 SYSCTL_DESCR("Default max burst for sctp endpoints"),
4593 NULL, 0, &sctp_max_burst_default, 0,
4594 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_MAXBURST,
4597 sysctl_createv(clog, 0, NULL, NULL,
4598 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4599 CTLTYPE_INT, "maxchunks",
4600 SYSCTL_DESCR("Default max chunks on queue per asoc"),
4601 NULL, 0, &sctp_max_chunks_on_queue, 0,
4602 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_MAXCHUNKONQ,