1 /* $KAME: sctp_usrreq.c,v 1.47 2005/03/06 16:04:18 itojun Exp $ */
2 /* $DragonFly: src/sys/netinet/sctp_usrreq.c,v 1.14 2008/04/20 13:44:25 swildner Exp $ */
5 * Copyright (c) 2001, 2002, 2003, 2004 Cisco Systems, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Cisco Systems, Inc.
19 * 4. Neither the name of the project nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY CISCO SYSTEMS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL CISCO SYSTEMS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #if !(defined(__OpenBSD__) || defined(__APPLE__))
36 #include "opt_ipsec.h"
38 #if defined(__FreeBSD__) || defined(__DragonFly__)
39 #include "opt_inet6.h"
42 #if defined(__NetBSD__)
48 #elif !defined(__OpenBSD__)
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/kernel.h>
55 #include <sys/malloc.h>
57 #include <sys/domain.h>
60 #include <sys/protosw.h>
61 #include <sys/socket.h>
62 #include <sys/socketvar.h>
63 #include <sys/socketvar2.h>
64 #include <sys/sysctl.h>
65 #include <sys/syslog.h>
67 #include <sys/thread2.h>
68 #include <sys/msgport2.h>
71 #include <net/if_types.h>
72 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
73 #include <net/if_var.h>
75 #include <net/route.h>
76 #include <netinet/in.h>
77 #include <netinet/in_systm.h>
78 #include <netinet/ip.h>
79 #include <netinet/ip6.h>
80 #include <netinet/in_pcb.h>
81 #include <netinet/in_var.h>
82 #include <netinet/ip_var.h>
83 #include <netinet6/ip6_var.h>
84 #include <netinet6/in6_var.h>
86 #include <netinet/ip_icmp.h>
87 #include <netinet/icmp_var.h>
88 #include <netinet/sctp_pcb.h>
89 #include <netinet/sctp_header.h>
90 #include <netinet/sctp_var.h>
91 #include <netinet/sctp_output.h>
92 #include <netinet/sctp_uio.h>
93 #include <netinet/sctp_asconf.h>
94 #include <netinet/sctputil.h>
95 #include <netinet/sctp_indata.h>
96 #include <netinet/sctp_asconf.h>
99 #include <netinet6/ipsec.h>
100 #include <netproto/key/key.h>
106 #include <net/net_osdep.h>
108 #if defined(HAVE_NRL_INPCB) || defined(__FreeBSD__) || defined(__DragonFly__)
113 #define sotoin6pcb sotoinpcb
118 extern u_int32_t sctp_debug_on;
119 #endif /* SCTP_DEBUG */
122 * sysctl tunable variables
124 int sctp_auto_asconf = SCTP_DEFAULT_AUTO_ASCONF;
125 int sctp_max_burst_default = SCTP_DEF_MAX_BURST;
126 int sctp_peer_chunk_oh = sizeof(struct mbuf);
127 int sctp_strict_init = 1;
128 int sctp_no_csum_on_loopback = 1;
129 unsigned int sctp_max_chunks_on_queue = SCTP_ASOC_MAX_CHUNKS_ON_QUEUE;
130 int sctp_sendspace = (128 * 1024);
131 int sctp_recvspace = 128 * (1024 +
133 sizeof(struct sockaddr_in6)
135 sizeof(struct sockaddr_in)
138 int sctp_strict_sacks = 0;
140 int sctp_ecn_nonce = 0;
142 unsigned int sctp_delayed_sack_time_default = SCTP_RECV_MSEC;
143 unsigned int sctp_heartbeat_interval_default = SCTP_HB_DEFAULT_MSEC;
144 unsigned int sctp_pmtu_raise_time_default = SCTP_DEF_PMTU_RAISE_SEC;
145 unsigned int sctp_shutdown_guard_time_default = SCTP_DEF_MAX_SHUTDOWN_SEC;
146 unsigned int sctp_secret_lifetime_default = SCTP_DEFAULT_SECRET_LIFE_SEC;
147 unsigned int sctp_rto_max_default = SCTP_RTO_UPPER_BOUND;
148 unsigned int sctp_rto_min_default = SCTP_RTO_LOWER_BOUND;
149 unsigned int sctp_rto_initial_default = SCTP_RTO_INITIAL;
150 unsigned int sctp_init_rto_max_default = SCTP_RTO_UPPER_BOUND;
151 unsigned int sctp_valid_cookie_life_default = SCTP_DEFAULT_COOKIE_LIFE;
152 unsigned int sctp_init_rtx_max_default = SCTP_DEF_MAX_INIT;
153 unsigned int sctp_assoc_rtx_max_default = SCTP_DEF_MAX_SEND;
154 unsigned int sctp_path_rtx_max_default = SCTP_DEF_MAX_SEND/2;
155 unsigned int sctp_nr_outgoing_streams_default = SCTP_OSTREAM_INITIAL;
161 #define nmbclusters nmbclust
163 /* Init the SCTP pcb in sctp_pcb.c */
169 if (nmbclusters > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE)
170 sctp_max_chunks_on_queue = nmbclusters;
172 /* if (nmbclust > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE)
173 sctp_max_chunks_on_queue = nmbclust; FIX ME */
174 sctp_max_chunks_on_queue = nmbclust * 2;
177 * Allow a user to take no more than 1/2 the number of clusters
178 * or the SB_MAX whichever is smaller for the send window.
180 sb_max_adj = (u_long)((u_quad_t)(SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES));
181 sctp_sendspace = min((min(SB_MAX, sb_max_adj)),
183 ((nmbclusters/2) * SCTP_DEFAULT_MAXSEGMENT));
185 ((nmbclust/2) * SCTP_DEFAULT_MAXSEGMENT));
188 * Now for the recv window, should we take the same amount?
189 * or should I do 1/2 the SB_MAX instead in the SB_MAX min above.
190 * For now I will just copy.
192 sctp_recvspace = sctp_sendspace;
200 ip_2_ip6_hdr(struct ip6_hdr *ip6, struct ip *ip)
202 bzero(ip6, sizeof(*ip6));
204 ip6->ip6_vfc = IPV6_VERSION;
205 ip6->ip6_plen = ip->ip_len;
206 ip6->ip6_nxt = ip->ip_p;
207 ip6->ip6_hlim = ip->ip_ttl;
208 ip6->ip6_src.s6_addr32[2] = ip6->ip6_dst.s6_addr32[2] =
210 ip6->ip6_src.s6_addr32[3] = ip->ip_src.s_addr;
211 ip6->ip6_dst.s6_addr32[3] = ip->ip_dst.s_addr;
216 sctp_split_chunks(struct sctp_association *asoc,
217 struct sctp_stream_out *strm,
218 struct sctp_tmit_chunk *chk)
220 struct sctp_tmit_chunk *new_chk;
222 /* First we need a chunk */
223 new_chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
224 if (new_chk == NULL) {
225 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
228 sctppcbinfo.ipi_count_chunk++;
229 sctppcbinfo.ipi_gencnt_chunk++;
233 new_chk->data = m_split(chk->data, (chk->send_size>>1), MB_DONTWAIT);
234 if (new_chk->data == NULL) {
236 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
237 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, new_chk);
238 sctppcbinfo.ipi_count_chunk--;
239 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
240 panic("Chunk count is negative");
242 sctppcbinfo.ipi_gencnt_chunk++;
246 /* Data is now split adjust sizes */
247 chk->send_size >>= 1;
248 new_chk->send_size >>= 1;
250 chk->book_size >>= 1;
251 new_chk->book_size >>= 1;
253 /* now adjust the marks */
254 chk->rec.data.rcv_flags |= SCTP_DATA_FIRST_FRAG;
255 chk->rec.data.rcv_flags &= ~SCTP_DATA_LAST_FRAG;
257 new_chk->rec.data.rcv_flags &= ~SCTP_DATA_FIRST_FRAG;
258 new_chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
260 /* Increase ref count if dest is set */
262 new_chk->whoTo->ref_count++;
264 /* now drop it on the end of the list*/
265 asoc->stream_queue_cnt++;
266 TAILQ_INSERT_AFTER(&strm->outqueue, chk, new_chk, sctp_next);
270 sctp_notify_mbuf(struct sctp_inpcb *inp,
271 struct sctp_tcb *stcb,
272 struct sctp_nets *net,
282 if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
283 (ip == NULL) || (sh == NULL)) {
285 SCTP_TCB_UNLOCK(stcb);
288 /* First job is to verify the vtag matches what I would send */
289 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
290 SCTP_TCB_UNLOCK(stcb);
293 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) -
295 if (icmph->icmp_type != ICMP_UNREACH) {
296 /* We only care about unreachable */
297 SCTP_TCB_UNLOCK(stcb);
300 if (icmph->icmp_code != ICMP_UNREACH_NEEDFRAG) {
301 /* not a unreachable message due to frag. */
302 SCTP_TCB_UNLOCK(stcb);
306 nxtsz = ntohs(icmph->icmp_seq);
309 * old type router that does not tell us what the next size
310 * mtu is. Rats we will have to guess (in a educated fashion
313 nxtsz = find_next_best_mtu(totsz);
316 /* Stop any PMTU timer */
317 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, NULL);
319 /* Adjust destination size limit */
320 if (net->mtu > nxtsz) {
323 /* now what about the ep? */
324 if (stcb->asoc.smallest_mtu > nxtsz) {
325 struct sctp_tmit_chunk *chk, *nchk;
326 struct sctp_stream_out *strm;
327 /* Adjust that too */
328 stcb->asoc.smallest_mtu = nxtsz;
329 /* now off to subtract IP_DF flag if needed */
331 TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) {
332 if ((chk->send_size+IP_HDR_SIZE) > nxtsz) {
333 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
336 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
337 if ((chk->send_size+IP_HDR_SIZE) > nxtsz) {
339 * For this guy we also mark for immediate
340 * resend since we sent to big of chunk
342 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
343 if (chk->sent != SCTP_DATAGRAM_RESEND) {
344 stcb->asoc.sent_queue_retran_cnt++;
346 chk->sent = SCTP_DATAGRAM_RESEND;
347 chk->rec.data.doing_fast_retransmit = 0;
349 /* Clear any time so NO RTT is being done */
351 stcb->asoc.total_flight -= chk->book_size;
352 if (stcb->asoc.total_flight < 0) {
353 stcb->asoc.total_flight = 0;
355 stcb->asoc.total_flight_count--;
356 if (stcb->asoc.total_flight_count < 0) {
357 stcb->asoc.total_flight_count = 0;
359 net->flight_size -= chk->book_size;
360 if (net->flight_size < 0) {
361 net->flight_size = 0;
365 TAILQ_FOREACH(strm, &stcb->asoc.out_wheel, next_spoke) {
366 chk = TAILQ_FIRST(&strm->outqueue);
368 nchk = TAILQ_NEXT(chk, sctp_next);
369 if ((chk->send_size+SCTP_MED_OVERHEAD) > nxtsz) {
370 sctp_split_chunks(&stcb->asoc, strm, chk);
376 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, NULL);
377 SCTP_TCB_UNLOCK(stcb);
382 sctp_notify(struct sctp_inpcb *inp,
386 struct sctp_tcb *stcb,
387 struct sctp_nets *net)
390 if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
391 (sh == NULL) || (to == NULL)) {
393 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
394 kprintf("sctp-notify, bad call\n");
396 #endif /* SCTP_DEBUG */
399 /* First job is to verify the vtag matches what I would send */
400 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
404 /* FIX ME FIX ME PROTOPT i.e. no SCTP should ALWAYS be an ABORT */
406 if ((error == EHOSTUNREACH) || /* Host is not reachable */
407 (error == EHOSTDOWN) || /* Host is down */
408 (error == ECONNREFUSED) || /* Host refused the connection, (not an abort?) */
409 (error == ENOPROTOOPT) /* SCTP is not present on host */
412 * Hmm reachablity problems we must examine closely.
413 * If its not reachable, we may have lost a network.
414 * Or if there is NO protocol at the other end named SCTP.
415 * well we consider it a OOTB abort.
417 if ((error == EHOSTUNREACH) || (error == EHOSTDOWN)) {
418 if (net->dest_state & SCTP_ADDR_REACHABLE) {
419 /* Ok that destination is NOT reachable */
420 net->dest_state &= ~SCTP_ADDR_REACHABLE;
421 net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
422 net->error_count = net->failure_threshold + 1;
423 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
424 stcb, SCTP_FAILED_THRESHOLD,
428 SCTP_TCB_UNLOCK(stcb);
431 * Here the peer is either playing tricks on us,
432 * including an address that belongs to someone who
433 * does not support SCTP OR was a userland
434 * implementation that shutdown and now is dead. In
435 * either case treat it like a OOTB abort with no TCB
437 sctp_abort_notification(stcb, SCTP_PEER_FAULTY);
438 sctp_free_assoc(inp, stcb);
439 /* no need to unlock here, since the TCB is gone */
442 /* Send all others to the app */
443 if (inp->sctp_socket) {
444 SOCK_LOCK(inp->sctp_socket);
445 inp->sctp_socket->so_error = error;
446 sctp_sowwakeup(inp, inp->sctp_socket);
447 SOCK_UNLOCK(inp->sctp_socket);
450 SCTP_TCB_UNLOCK(stcb);
455 sctp_ctlinput(netmsg_t msg)
457 int cmd = msg->ctlinput.nm_cmd;
458 struct sockaddr *sa = msg->ctlinput.nm_arg;
459 struct ip *ip = msg->ctlinput.nm_extra;
462 if (sa->sa_family != AF_INET ||
463 ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) {
467 if (PRC_IS_REDIRECT(cmd)) {
469 } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) {
473 struct sctp_inpcb *inp;
474 struct sctp_tcb *stcb;
475 struct sctp_nets *net;
476 struct sockaddr_in to, from;
478 sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2));
479 bzero(&to, sizeof(to));
480 bzero(&from, sizeof(from));
481 from.sin_family = to.sin_family = AF_INET;
482 from.sin_len = to.sin_len = sizeof(to);
483 from.sin_port = sh->src_port;
484 from.sin_addr = ip->ip_src;
485 to.sin_port = sh->dest_port;
486 to.sin_addr = ip->ip_dst;
489 * 'to' holds the dest of the packet that failed to be sent.
490 * 'from' holds our local endpoint address.
491 * Thus we reverse the to and the from in the lookup.
493 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&from,
494 (struct sockaddr *)&to,
496 if (stcb != NULL && inp && (inp->sctp_socket != NULL)) {
497 if (cmd != PRC_MSGSIZE) {
499 if (cmd == PRC_HOSTDEAD) {
502 cm = inetctlerrmap[cmd];
504 sctp_notify(inp, cm, sh,
505 (struct sockaddr *)&to, stcb,
508 /* handle possible ICMP size messages */
509 sctp_notify_mbuf(inp, stcb, net, ip, sh);
512 #if (defined(__FreeBSD__) && __FreeBSD_version < 500000) || defined(__DragonFly__)
513 /* XXX must be fixed for 5.x and higher, leave for 4.x */
514 if (PRC_IS_REDIRECT(cmd) && inp) {
515 in_rtchange((struct inpcb *)inp,
519 if ((stcb == NULL) && (inp != NULL)) {
520 /* reduce ref-count */
522 SCTP_INP_DECR_REF(inp);
523 SCTP_INP_WUNLOCK(inp);
529 lwkt_replymsg(&msg->lmsg, 0);
532 #if defined(__FreeBSD__) || defined(__DragonFly__)
534 sctp_getcred(SYSCTL_HANDLER_ARGS)
536 struct sockaddr_in addrs[2];
537 struct sctp_inpcb *inp;
538 struct sctp_nets *net;
539 struct sctp_tcb *stcb;
542 #if __FreeBSD_version >= 500000 || defined(__DragonFly__)
543 error = priv_check(req->td, PRIV_ROOT);
545 error = suser(req->p);
549 error = SYSCTL_IN(req, addrs, sizeof(addrs));
553 stcb = sctp_findassociation_addr_sa(sintosa(&addrs[0]),
556 if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) {
557 if ((inp != NULL) && (stcb == NULL)) {
558 /* reduce ref-count */
560 SCTP_INP_DECR_REF(inp);
561 SCTP_INP_WUNLOCK(inp);
566 error = SYSCTL_OUT(req, inp->sctp_socket->so_cred, sizeof(struct ucred));
567 SCTP_TCB_UNLOCK(stcb);
572 SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE|CTLFLAG_RW,
573 0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection");
574 #endif /* #if defined(__FreeBSD__) || defined(__DragonFly__) */
579 #if defined(__FreeBSD__) || defined (__APPLE__) || defined(__DragonFly__)
581 SYSCTL_DECL(_net_inet);
583 SYSCTL_NODE(_net_inet, OID_AUTO, sctp, CTLFLAG_RD, 0,
586 SYSCTL_INT(_net_inet_sctp, OID_AUTO, maxdgram, CTLFLAG_RW,
587 &sctp_sendspace, 0, "Maximum outgoing SCTP buffer size");
589 SYSCTL_INT(_net_inet_sctp, OID_AUTO, recvspace, CTLFLAG_RW,
590 &sctp_recvspace, 0, "Maximum incoming SCTP buffer size");
592 SYSCTL_INT(_net_inet_sctp, OID_AUTO, auto_asconf, CTLFLAG_RW,
593 &sctp_auto_asconf, 0, "Enable SCTP Auto-ASCONF");
595 SYSCTL_INT(_net_inet_sctp, OID_AUTO, ecn_enable, CTLFLAG_RW,
596 &sctp_ecn, 0, "Enable SCTP ECN");
598 SYSCTL_INT(_net_inet_sctp, OID_AUTO, ecn_nonce, CTLFLAG_RW,
599 &sctp_ecn_nonce, 0, "Enable SCTP ECN Nonce");
601 SYSCTL_INT(_net_inet_sctp, OID_AUTO, strict_sacks, CTLFLAG_RW,
602 &sctp_strict_sacks, 0, "Enable SCTP Strict SACK checking");
604 SYSCTL_INT(_net_inet_sctp, OID_AUTO, loopback_nocsum, CTLFLAG_RW,
605 &sctp_no_csum_on_loopback, 0,
606 "Enable NO Csum on packets sent on loopback");
608 SYSCTL_INT(_net_inet_sctp, OID_AUTO, strict_init, CTLFLAG_RW,
609 &sctp_strict_init, 0,
610 "Enable strict INIT/INIT-ACK singleton enforcement");
612 SYSCTL_INT(_net_inet_sctp, OID_AUTO, peer_chkoh, CTLFLAG_RW,
613 &sctp_peer_chunk_oh, 0,
614 "Amount to debit peers rwnd per chunk sent");
616 SYSCTL_INT(_net_inet_sctp, OID_AUTO, maxburst, CTLFLAG_RW,
617 &sctp_max_burst_default, 0,
618 "Default max burst for sctp endpoints");
620 SYSCTL_INT(_net_inet_sctp, OID_AUTO, maxchunks, CTLFLAG_RW,
621 &sctp_max_chunks_on_queue, 0,
622 "Default max chunks on queue per asoc");
624 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, delayed_sack_time, CTLFLAG_RW,
625 &sctp_delayed_sack_time_default, 0,
626 "Default delayed SACK timer in msec");
628 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, heartbeat_interval, CTLFLAG_RW,
629 &sctp_heartbeat_interval_default, 0,
630 "Default heartbeat interval in msec");
632 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, pmtu_raise_time, CTLFLAG_RW,
633 &sctp_pmtu_raise_time_default, 0,
634 "Default PMTU raise timer in sec");
636 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, shutdown_guard_time, CTLFLAG_RW,
637 &sctp_shutdown_guard_time_default, 0,
638 "Default shutdown guard timer in sec");
640 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, secret_lifetime, CTLFLAG_RW,
641 &sctp_secret_lifetime_default, 0,
642 "Default secret lifetime in sec");
644 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, rto_max, CTLFLAG_RW,
645 &sctp_rto_max_default, 0,
646 "Default maximum retransmission timeout in msec");
648 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, rto_min, CTLFLAG_RW,
649 &sctp_rto_min_default, 0,
650 "Default minimum retransmission timeout in msec");
652 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, rto_initial, CTLFLAG_RW,
653 &sctp_rto_initial_default, 0,
654 "Default initial retransmission timeout in msec");
656 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, init_rto_max, CTLFLAG_RW,
657 &sctp_init_rto_max_default, 0,
658 "Default maximum retransmission timeout during association setup in msec");
660 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, valid_cookie_life, CTLFLAG_RW,
661 &sctp_valid_cookie_life_default, 0,
662 "Default cookie lifetime in sec");
664 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, init_rtx_max, CTLFLAG_RW,
665 &sctp_init_rtx_max_default, 0,
666 "Default maximum number of retransmission for INIT chunks");
668 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, assoc_rtx_max, CTLFLAG_RW,
669 &sctp_assoc_rtx_max_default, 0,
670 "Default maximum number of retransmissions per association");
672 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, path_rtx_max, CTLFLAG_RW,
673 &sctp_path_rtx_max_default, 0,
674 "Default maximum of retransmissions per path");
676 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, nr_outgoing_streams, CTLFLAG_RW,
677 &sctp_nr_outgoing_streams_default, 0,
678 "Default number of outgoing streams");
681 SYSCTL_INT(_net_inet_sctp, OID_AUTO, debug, CTLFLAG_RW,
682 &sctp_debug_on, 0, "Configure debug output");
683 #endif /* SCTP_DEBUG */
687 * NOTE: (so) is referenced from soabort*() and netmsg_pru_abort()
688 * will sofree() it when we return.
691 sctp_abort(netmsg_t msg)
693 struct socket *so = msg->abort.base.nm_so;
694 struct sctp_inpcb *inp;
697 inp = (struct sctp_inpcb *)so->so_pcb;
699 sctp_inpcb_free(inp, 1);
704 lwkt_replymsg(&msg->lmsg, error);
708 sctp_attach(netmsg_t msg)
710 struct socket *so = msg->attach.base.nm_so;
711 struct sctp_inpcb *inp;
712 struct inpcb *ip_inp;
715 inp = (struct sctp_inpcb *)so->so_pcb;
720 error = soreserve(so, sctp_sendspace, sctp_recvspace, NULL);
723 error = sctp_inpcb_alloc(so);
726 inp = (struct sctp_inpcb *)so->so_pcb;
729 inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6; /* I'm not v6! */
730 ip_inp = &inp->ip_inp.inp;
731 ip_inp->inp_vflag |= INP_IPV4;
732 ip_inp->inp_ip_ttl = ip_defttl;
735 #if !(defined(__OpenBSD__) || defined(__APPLE__))
736 error = ipsec_init_policy(so, &ip_inp->inp_sp);
738 sctp_inpcb_free(inp, 1);
743 SCTP_INP_WUNLOCK(inp);
744 #if defined(__NetBSD__)
745 so->so_send = sctp_sosend;
749 lwkt_replymsg(&msg->lmsg, error);
753 sctp_bind(netmsg_t msg)
755 struct socket *so = msg->bind.base.nm_so;
756 struct sockaddr *addr = msg->bind.nm_nam;
757 thread_t td = msg->bind.nm_td;
758 struct sctp_inpcb *inp;
762 if (addr && addr->sa_family != AF_INET) {
763 /* must be a v4 address! */
769 inp = (struct sctp_inpcb *)so->so_pcb;
771 error = sctp_inpcb_bind(so, addr, td);
776 lwkt_replymsg(&msg->lmsg, error);
781 sctp_detach(netmsg_t msg)
783 struct socket *so = msg->detach.base.nm_so;
784 struct sctp_inpcb *inp;
787 inp = (struct sctp_inpcb *)so->so_pcb;
792 if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) ||
793 (so->so_rcv.ssb_cc > 0)) {
794 sctp_inpcb_free(inp, 1);
796 sctp_inpcb_free(inp, 0);
800 lwkt_replymsg(&msg->lmsg, error);
804 sctp_send(netmsg_t msg)
806 struct socket *so = msg->send.base.nm_so;
807 int flags = msg->send.nm_flags;
808 struct mbuf *m = msg->send.nm_m;
809 struct mbuf *control = msg->send.nm_control;
810 struct sockaddr *addr = msg->send.nm_addr;
811 struct thread *td = msg->send.nm_td;
813 struct sctp_inpcb *inp;
814 inp = (struct sctp_inpcb *)so->so_pcb;
817 sctp_m_freem(control);
824 /* Got to have an to address if we are NOT a connected socket */
825 if ((addr == NULL) &&
826 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
827 (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE))
830 } else if (addr == NULL) {
831 error = EDESTADDRREQ;
834 sctp_m_freem(control);
840 if (addr->sa_family != AF_INET) {
841 /* must be a v4 address! */
844 sctp_m_freem(control);
847 error = EDESTADDRREQ; /* XXX huh? */
853 /* now what about control */
856 kprintf("huh? control set?\n");
857 sctp_m_freem(inp->control);
860 inp->control = control;
862 /* add it in possibly */
863 if ((inp->pkt) && (inp->pkt->m_flags & M_PKTHDR)) {
869 for (x=m;x;x = x->m_next) {
872 inp->pkt->m_pkthdr.len += c_len;
876 inp->pkt_last->m_next = m;
879 inp->pkt_last = inp->pkt = m;
882 #if defined (__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
883 /* FreeBSD uses a flag passed */
884 ((flags & PRUS_MORETOCOME) == 0)
885 #elif defined( __NetBSD__)
886 /* NetBSD uses the so_state field */
887 ((so->so_state & SS_MORETOCOME) == 0)
889 1 /* Open BSD does not have any "more to come" indication */
893 * note with the current version this code will only be used
894 * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for
895 * re-defining sosend to use the sctp_sosend. One can
896 * optionally switch back to this code (by changing back the
897 * definitions) but this is not advisable.
899 error = sctp_output(inp, inp->pkt, addr,
900 inp->control, td, flags);
907 if (msg->send.nm_flags & PRUS_NAMALLOC) {
908 kfree(msg->send.nm_addr, M_LWKTMSG);
909 msg->send.nm_addr = NULL;
911 lwkt_replymsg(&msg->lmsg, error);
915 sctp_disconnect(netmsg_t msg)
917 struct socket *so = msg->disconnect.base.nm_so;
918 struct sctp_inpcb *inp;
921 inp = (struct sctp_inpcb *)so->so_pcb;
927 if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
928 if (LIST_EMPTY(&inp->sctp_asoc_list)) {
930 SCTP_INP_RUNLOCK(inp);
934 int some_on_streamwheel = 0;
935 struct sctp_association *asoc;
936 struct sctp_tcb *stcb;
938 stcb = LIST_FIRST(&inp->sctp_asoc_list);
940 SCTP_INP_RUNLOCK(inp);
946 if (((so->so_options & SO_LINGER) &&
947 (so->so_linger == 0)) ||
948 (so->so_rcv.ssb_cc > 0)) {
949 if (SCTP_GET_STATE(asoc) !=
950 SCTP_STATE_COOKIE_WAIT) {
951 /* Left with Data unread */
954 MGET(err, MB_DONTWAIT, MT_DATA);
956 /* Fill in the user initiated abort */
957 struct sctp_paramhdr *ph;
958 ph = mtod(err, struct sctp_paramhdr *);
959 err->m_len = sizeof(struct sctp_paramhdr);
960 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
961 ph->param_length = htons(err->m_len);
963 sctp_send_abort_tcb(stcb, err);
965 SCTP_INP_RUNLOCK(inp);
966 sctp_free_assoc(inp, stcb);
967 /* No unlock tcb assoc is gone */
971 if (!TAILQ_EMPTY(&asoc->out_wheel)) {
972 /* Check to see if some data queued */
973 struct sctp_stream_out *outs;
974 TAILQ_FOREACH(outs, &asoc->out_wheel,
976 if (!TAILQ_EMPTY(&outs->outqueue)) {
977 some_on_streamwheel = 1;
983 if (TAILQ_EMPTY(&asoc->send_queue) &&
984 TAILQ_EMPTY(&asoc->sent_queue) &&
985 (some_on_streamwheel == 0)) {
986 /* there is nothing queued to send, so done */
987 if ((SCTP_GET_STATE(asoc) !=
988 SCTP_STATE_SHUTDOWN_SENT) &&
989 (SCTP_GET_STATE(asoc) !=
990 SCTP_STATE_SHUTDOWN_ACK_SENT)) {
991 /* only send SHUTDOWN 1st time thru */
993 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
994 kprintf("%s:%d sends a shutdown\n",
1000 sctp_send_shutdown(stcb,
1001 stcb->asoc.primary_destination);
1002 sctp_chunk_output(stcb->sctp_ep, stcb, 1);
1003 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
1004 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1005 stcb->sctp_ep, stcb,
1006 asoc->primary_destination);
1007 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1008 stcb->sctp_ep, stcb,
1009 asoc->primary_destination);
1013 * we still got (or just got) data to send,
1014 * so set SHUTDOWN_PENDING
1017 * XXX sockets draft says that MSG_EOF should
1018 * be sent with no data.
1019 * currently, we will allow user data to be
1020 * sent first and move to SHUTDOWN-PENDING
1022 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
1024 SCTP_TCB_UNLOCK(stcb);
1025 SCTP_INP_RUNLOCK(inp);
1029 /* UDP model does not support this */
1030 SCTP_INP_RUNLOCK(inp);
1034 lwkt_replymsg(&msg->lmsg, error);
1037 /* also called from ipv6 sctp code */
1039 sctp_shutdown(netmsg_t msg)
1041 struct socket *so = msg->shutdown.base.nm_so;
1042 struct sctp_inpcb *inp;
1045 inp = (struct sctp_inpcb *)so->so_pcb;
1050 SCTP_INP_RLOCK(inp);
1051 /* For UDP model this is a invalid call */
1052 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
1053 /* Restore the flags that the soshutdown took away. */
1054 #if defined(__FreeBSD__) && __FreeBSD_version >= 502115
1055 so->so_rcv.sb_state &= ~SBS_CANTRCVMORE;
1057 soclrstate(so, SS_CANTRCVMORE);
1059 /* This proc will wakeup for read and do nothing (I hope) */
1060 SCTP_INP_RUNLOCK(inp);
1065 * Ok if we reach here its the TCP model and it is either a SHUT_WR
1066 * or SHUT_RDWR. This means we put the shutdown flag against it.
1069 int some_on_streamwheel = 0;
1070 struct sctp_tcb *stcb;
1071 struct sctp_association *asoc;
1074 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1077 * Ok we hit the case that the shutdown call was made
1078 * after an abort or something. Nothing to do now.
1083 SCTP_TCB_LOCK(stcb);
1086 if (!TAILQ_EMPTY(&asoc->out_wheel)) {
1087 /* Check to see if some data queued */
1088 struct sctp_stream_out *outs;
1089 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
1090 if (!TAILQ_EMPTY(&outs->outqueue)) {
1091 some_on_streamwheel = 1;
1096 if (TAILQ_EMPTY(&asoc->send_queue) &&
1097 TAILQ_EMPTY(&asoc->sent_queue) &&
1098 (some_on_streamwheel == 0)) {
1099 /* there is nothing queued to send, so I'm done... */
1100 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1101 /* only send SHUTDOWN the first time through */
1103 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
1104 kprintf("%s:%d sends a shutdown\n",
1110 sctp_send_shutdown(stcb,
1111 stcb->asoc.primary_destination);
1112 sctp_chunk_output(stcb->sctp_ep, stcb, 1);
1113 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
1114 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1115 stcb->sctp_ep, stcb,
1116 asoc->primary_destination);
1117 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1118 stcb->sctp_ep, stcb,
1119 asoc->primary_destination);
1123 * we still got (or just got) data to send, so
1124 * set SHUTDOWN_PENDING
1126 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
1128 SCTP_TCB_UNLOCK(stcb);
1130 SCTP_INP_RUNLOCK(inp);
1133 lwkt_replymsg(&msg->lmsg, error);
1137 * copies a "user" presentable address and removes embedded scope, etc.
1138 * returns 0 on success, 1 on error
1141 sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa)
1143 struct sockaddr_in6 lsa6;
1144 sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa,
1146 memcpy(ss, sa, sa->sa_len);
1151 #if defined(__NetBSD__) || defined(__OpenBSD__)
1153 * On NetBSD and OpenBSD in6_sin_2_v4mapsin6() not used and not exported,
1154 * so we have to export it here.
1156 void in6_sin_2_v4mapsin6(struct sockaddr_in *sin, struct sockaddr_in6 *sin6);
1160 sctp_fill_up_addresses(struct sctp_inpcb *inp,
1161 struct sctp_tcb *stcb,
1163 struct sockaddr_storage *sas)
1166 int loopback_scope, ipv4_local_scope, local_scope, site_scope, actual;
1167 int ipv4_addr_legal, ipv6_addr_legal;
1173 /* Turn on all the appropriate scope */
1174 loopback_scope = stcb->asoc.loopback_scope;
1175 ipv4_local_scope = stcb->asoc.ipv4_local_scope;
1176 local_scope = stcb->asoc.local_scope;
1177 site_scope = stcb->asoc.site_scope;
1179 /* Turn on ALL scope, since we look at the EP */
1180 loopback_scope = ipv4_local_scope = local_scope =
1183 ipv4_addr_legal = ipv6_addr_legal = 0;
1184 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1185 ipv6_addr_legal = 1;
1187 #if defined(__OpenBSD__)
1188 (0) /* we always do dual bind */
1189 #elif defined (__NetBSD__)
1190 (((struct in6pcb *)inp)->in6p_flags & IN6P_IPV6_V6ONLY)
1192 (((struct in6pcb *)inp)->inp_flags & IN6P_IPV6_V6ONLY)
1195 ipv4_addr_legal = 1;
1198 ipv4_addr_legal = 1;
1201 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1202 TAILQ_FOREACH(ifn, &ifnet, if_list) {
1203 struct ifaddr_container *ifac;
1205 if ((loopback_scope == 0) &&
1206 (ifn->if_type == IFT_LOOP)) {
1207 /* Skip loopback if loopback_scope not set */
1210 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid],
1212 struct ifaddr *ifa = ifac->ifa;
1216 * For the BOUND-ALL case, the list
1217 * associated with a TCB is Always
1218 * considered a reverse list.. i.e.
1219 * it lists addresses that are NOT
1220 * part of the association. If this
1221 * is one of those we must skip it.
1223 if (sctp_is_addr_restricted(stcb,
1228 if ((ifa->ifa_addr->sa_family == AF_INET) &&
1229 (ipv4_addr_legal)) {
1230 struct sockaddr_in *sin;
1231 sin = (struct sockaddr_in *)ifa->ifa_addr;
1232 if (sin->sin_addr.s_addr == 0) {
1233 /* we skip unspecifed addresses */
1236 if ((ipv4_local_scope == 0) &&
1237 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1240 if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) {
1241 in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas);
1242 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1243 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6));
1244 actual += sizeof(sizeof(struct sockaddr_in6));
1246 memcpy(sas, sin, sizeof(*sin));
1247 ((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport;
1248 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin));
1249 actual += sizeof(*sin);
1251 if (actual >= limit) {
1254 } else if ((ifa->ifa_addr->sa_family == AF_INET6) &&
1255 (ipv6_addr_legal)) {
1256 struct sockaddr_in6 *sin6, lsa6;
1257 sin6 = (struct sockaddr_in6 *)ifa->ifa_addr;
1258 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1259 /* we skip unspecifed addresses */
1262 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
1263 if (local_scope == 0)
1265 if (sin6->sin6_scope_id == 0) {
1267 if (in6_recoverscope(&lsa6,
1270 /* bad link local address */
1275 if ((site_scope == 0) &&
1276 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1279 memcpy(sas, sin6, sizeof(*sin6));
1280 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1281 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin6));
1282 actual += sizeof(*sin6);
1283 if (actual >= limit) {
1290 struct sctp_laddr *laddr;
1292 * If we have a TCB and we do NOT support ASCONF (it's
1293 * turned off or otherwise) then the list is always the
1294 * true list of addresses (the else case below). Otherwise
1295 * the list on the association is a list of addresses that
1296 * are NOT part of the association.
1298 if (inp->sctp_flags & SCTP_PCB_FLAGS_DO_ASCONF) {
1299 /* The list is a NEGATIVE list */
1300 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
1302 if (sctp_is_addr_restricted(stcb, laddr->ifa->ifa_addr)) {
1306 if (sctp_fill_user_address(sas, laddr->ifa->ifa_addr))
1309 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1310 sas = (struct sockaddr_storage *)((caddr_t)sas +
1311 laddr->ifa->ifa_addr->sa_len);
1312 actual += laddr->ifa->ifa_addr->sa_len;
1313 if (actual >= limit) {
1318 /* The list is a positive list if present */
1320 /* Must use the specific association list */
1321 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list,
1323 if (sctp_fill_user_address(sas,
1324 laddr->ifa->ifa_addr))
1326 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1327 sas = (struct sockaddr_storage *)((caddr_t)sas +
1328 laddr->ifa->ifa_addr->sa_len);
1329 actual += laddr->ifa->ifa_addr->sa_len;
1330 if (actual >= limit) {
1335 /* No endpoint so use the endpoints individual list */
1336 LIST_FOREACH(laddr, &inp->sctp_addr_list,
1338 if (sctp_fill_user_address(sas,
1339 laddr->ifa->ifa_addr))
1341 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1342 sas = (struct sockaddr_storage *)((caddr_t)sas +
1343 laddr->ifa->ifa_addr->sa_len);
1344 actual += laddr->ifa->ifa_addr->sa_len;
1345 if (actual >= limit) {
1356 sctp_count_max_addresses(struct sctp_inpcb *inp)
1360 * In both sub-set bound an bound_all cases we return the MAXIMUM
1361 * number of addresses that you COULD get. In reality the sub-set
1362 * bound may have an exclusion list for a given TCB OR in the
1363 * bound-all case a TCB may NOT include the loopback or other
1364 * addresses as well.
1366 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1369 TAILQ_FOREACH(ifn, &ifnet, if_list) {
1370 struct ifaddr_container *ifac;
1372 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid], ifa_link) {
1373 struct ifaddr *ifa = ifac->ifa;
1375 /* Count them if they are the right type */
1376 if (ifa->ifa_addr->sa_family == AF_INET) {
1377 if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)
1378 cnt += sizeof(struct sockaddr_in6);
1380 cnt += sizeof(struct sockaddr_in);
1382 } else if (ifa->ifa_addr->sa_family == AF_INET6)
1383 cnt += sizeof(struct sockaddr_in6);
1387 struct sctp_laddr *laddr;
1388 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
1389 if (laddr->ifa->ifa_addr->sa_family == AF_INET) {
1390 if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)
1391 cnt += sizeof(struct sockaddr_in6);
1393 cnt += sizeof(struct sockaddr_in);
1395 } else if (laddr->ifa->ifa_addr->sa_family == AF_INET6)
1396 cnt += sizeof(struct sockaddr_in6);
1403 sctp_do_connect_x(struct socket *so,
1404 struct sctp_inpcb *inp,
1406 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
1415 struct sctp_tcb *stcb = NULL;
1416 struct sockaddr *sa;
1417 int num_v6=0, num_v4=0, *totaddrp, totaddr, i, incr, at;
1419 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1420 kprintf("Connectx called\n");
1422 #endif /* SCTP_DEBUG */
1424 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
1425 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
1426 /* We are already connected AND the TCP model */
1427 return (EADDRINUSE);
1429 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
1430 SCTP_INP_RLOCK(inp);
1431 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1432 SCTP_INP_RUNLOCK(inp);
1438 SCTP_ASOC_CREATE_LOCK(inp);
1439 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1440 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
1441 SCTP_ASOC_CREATE_UNLOCK(inp);
1445 totaddrp = mtod(m, int *);
1446 totaddr = *totaddrp;
1447 sa = (struct sockaddr *)(totaddrp + 1);
1449 /* account and validate addresses */
1450 SCTP_INP_WLOCK(inp);
1451 SCTP_INP_INCR_REF(inp);
1452 SCTP_INP_WUNLOCK(inp);
1453 for (i = 0; i < totaddr; i++) {
1454 if (sa->sa_family == AF_INET) {
1456 incr = sizeof(struct sockaddr_in);
1457 } else if (sa->sa_family == AF_INET6) {
1458 struct sockaddr_in6 *sin6;
1459 sin6 = (struct sockaddr_in6 *)sa;
1460 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
1461 /* Must be non-mapped for connectx */
1462 SCTP_ASOC_CREATE_UNLOCK(inp);
1466 incr = sizeof(struct sockaddr_in6);
1471 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
1473 /* Already have or am bring up an association */
1474 SCTP_ASOC_CREATE_UNLOCK(inp);
1475 SCTP_TCB_UNLOCK(stcb);
1478 if ((at + incr) > m->m_len) {
1482 sa = (struct sockaddr *)((caddr_t)sa + incr);
1484 sa = (struct sockaddr *)(totaddrp + 1);
1485 SCTP_INP_WLOCK(inp);
1486 SCTP_INP_DECR_REF(inp);
1487 SCTP_INP_WUNLOCK(inp);
1489 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
1491 SCTP_INP_WUNLOCK(inp);
1492 SCTP_ASOC_CREATE_UNLOCK(inp);
1495 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
1497 struct in6pcb *inp6;
1498 inp6 = (struct in6pcb *)inp;
1500 #if defined(__OpenBSD__)
1501 (0) /* we always do dual bind */
1502 #elif defined (__NetBSD__)
1503 (inp6->in6p_flags & IN6P_IPV6_V6ONLY)
1505 (inp6->inp_flags & IN6P_IPV6_V6ONLY)
1509 * if IPV6_V6ONLY flag, ignore connections
1510 * destined to a v4 addr or v4-mapped addr
1512 SCTP_INP_WUNLOCK(inp);
1513 SCTP_ASOC_CREATE_UNLOCK(inp);
1518 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
1519 SCTP_PCB_FLAGS_UNBOUND) {
1520 /* Bind a ephemeral port */
1521 SCTP_INP_WUNLOCK(inp);
1522 error = sctp_inpcb_bind(so, NULL, p);
1524 SCTP_ASOC_CREATE_UNLOCK(inp);
1528 SCTP_INP_WUNLOCK(inp);
1530 /* We are GOOD to go */
1531 stcb = sctp_aloc_assoc(inp, sa, 1, &error, 0);
1533 /* Gak! no memory */
1534 SCTP_ASOC_CREATE_UNLOCK(inp);
1537 /* move to second address */
1538 if (sa->sa_family == AF_INET)
1539 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in));
1541 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6));
1543 for (i = 1; i < totaddr; i++) {
1544 if (sa->sa_family == AF_INET) {
1545 incr = sizeof(struct sockaddr_in);
1546 if (sctp_add_remote_addr(stcb, sa, 0, 8)) {
1547 /* assoc gone no un-lock */
1548 sctp_free_assoc(inp, stcb);
1549 SCTP_ASOC_CREATE_UNLOCK(inp);
1553 } else if (sa->sa_family == AF_INET6) {
1554 incr = sizeof(struct sockaddr_in6);
1555 if (sctp_add_remote_addr(stcb, sa, 0, 8)) {
1556 /* assoc gone no un-lock */
1557 sctp_free_assoc(inp, stcb);
1558 SCTP_ASOC_CREATE_UNLOCK(inp);
1562 sa = (struct sockaddr *)((caddr_t)sa + incr);
1564 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT;
1566 /* doing delayed connection */
1567 stcb->asoc.delayed_connection = 1;
1568 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination);
1570 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1571 sctp_send_initiate(inp, stcb);
1573 SCTP_TCB_UNLOCK(stcb);
1574 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
1575 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
1576 /* Set the connected flag so we can queue data */
1579 SCTP_ASOC_CREATE_UNLOCK(inp);
1585 sctp_optsget(struct socket *so,
1588 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
1595 struct sctp_inpcb *inp;
1597 int error, optval=0;
1598 struct sctp_tcb *stcb = NULL;
1600 inp = (struct sctp_inpcb *)so->so_pcb;
1607 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
1608 kprintf("optsget:MP is NULL EINVAL\n");
1610 #endif /* SCTP_DEBUG */
1615 /* Got to have a mbuf */
1617 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
1618 kprintf("Huh no mbuf\n");
1620 #endif /* SCTP_DEBUG */
1624 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
1625 kprintf("optsget opt:%lxx sz:%u\n", (unsigned long)opt,
1628 #endif /* SCTP_DEBUG */
1632 case SCTP_AUTOCLOSE:
1633 case SCTP_AUTO_ASCONF:
1634 case SCTP_DISABLE_FRAGMENTS:
1635 case SCTP_I_WANT_MAPPED_V4_ADDR:
1637 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
1638 kprintf("other stuff\n");
1640 #endif /* SCTP_DEBUG */
1641 SCTP_INP_RLOCK(inp);
1643 case SCTP_DISABLE_FRAGMENTS:
1644 optval = inp->sctp_flags & SCTP_PCB_FLAGS_NO_FRAGMENT;
1646 case SCTP_I_WANT_MAPPED_V4_ADDR:
1647 optval = inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4;
1649 case SCTP_AUTO_ASCONF:
1650 optval = inp->sctp_flags & SCTP_PCB_FLAGS_AUTO_ASCONF;
1653 optval = inp->sctp_flags & SCTP_PCB_FLAGS_NODELAY;
1655 case SCTP_AUTOCLOSE:
1656 if ((inp->sctp_flags & SCTP_PCB_FLAGS_AUTOCLOSE) ==
1657 SCTP_PCB_FLAGS_AUTOCLOSE)
1658 optval = inp->sctp_ep.auto_close_time;
1664 error = ENOPROTOOPT;
1665 } /* end switch (sopt->sopt_name) */
1666 if (opt != SCTP_AUTOCLOSE) {
1667 /* make it an "on/off" value */
1668 optval = (optval != 0);
1670 if ((size_t)m->m_len < sizeof(int)) {
1673 SCTP_INP_RUNLOCK(inp);
1675 /* return the option value */
1676 *mtod(m, int *) = optval;
1677 m->m_len = sizeof(optval);
1680 case SCTP_GET_ASOC_ID_LIST:
1682 struct sctp_assoc_ids *ids;
1686 if ((size_t)m->m_len < sizeof(struct sctp_assoc_ids)) {
1690 ids = mtod(m, struct sctp_assoc_ids *);
1692 SCTP_INP_RLOCK(inp);
1693 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1696 ids->asls_numb_present = 0;
1697 ids->asls_more_to_get = 0;
1698 SCTP_INP_RUNLOCK(inp);
1701 orig = ids->asls_assoc_start;
1702 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1704 stcb = LIST_NEXT(stcb , sctp_tcblist);
1712 ids->asls_numb_present = 0;
1713 ids->asls_more_to_get = 1;
1714 while(at < MAX_ASOC_IDS_RET) {
1715 ids->asls_assoc_id[at] = sctp_get_associd(stcb);
1717 ids->asls_numb_present++;
1718 stcb = LIST_NEXT(stcb , sctp_tcblist);
1720 ids->asls_more_to_get = 0;
1724 SCTP_INP_RUNLOCK(inp);
1727 case SCTP_GET_NONCE_VALUES:
1729 struct sctp_get_nonce_values *gnv;
1730 if ((size_t)m->m_len < sizeof(struct sctp_get_nonce_values)) {
1734 gnv = mtod(m, struct sctp_get_nonce_values *);
1735 stcb = sctp_findassociation_ep_asocid(inp, gnv->gn_assoc_id);
1739 gnv->gn_peers_tag = stcb->asoc.peer_vtag;
1740 gnv->gn_local_tag = stcb->asoc.my_vtag;
1741 SCTP_TCB_UNLOCK(stcb);
1746 case SCTP_PEER_PUBLIC_KEY:
1747 case SCTP_MY_PUBLIC_KEY:
1748 case SCTP_SET_AUTH_CHUNKS:
1749 case SCTP_SET_AUTH_SECRET:
1750 /* not supported yet and until we refine the draft */
1754 case SCTP_DELAYED_ACK_TIME:
1757 if ((size_t)m->m_len < sizeof(int32_t)) {
1761 tm = mtod(m, int32_t *);
1763 *tm = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1767 case SCTP_GET_SNDBUF_USE:
1768 if ((size_t)m->m_len < sizeof(struct sctp_sockstat)) {
1771 struct sctp_sockstat *ss;
1772 struct sctp_tcb *stcb;
1773 struct sctp_association *asoc;
1774 ss = mtod(m, struct sctp_sockstat *);
1775 stcb = sctp_findassociation_ep_asocid(inp, ss->ss_assoc_id);
1780 ss->ss_total_sndbuf = (u_int32_t)asoc->total_output_queue_size;
1781 ss->ss_total_mbuf_sndbuf = (u_int32_t)asoc->total_output_mbuf_queue_size;
1782 ss->ss_total_recv_buf = (u_int32_t)(asoc->size_on_delivery_queue +
1783 asoc->size_on_reasm_queue +
1784 asoc->size_on_all_streams);
1785 SCTP_TCB_UNLOCK(stcb);
1787 m->m_len = sizeof(struct sctp_sockstat);
1794 burst = mtod(m, u_int8_t *);
1795 SCTP_INP_RLOCK(inp);
1796 *burst = inp->sctp_ep.max_burst;
1797 SCTP_INP_RUNLOCK(inp);
1798 m->m_len = sizeof(u_int8_t);
1804 sctp_assoc_t *assoc_id;
1807 if ((size_t)m->m_len < sizeof(u_int32_t)) {
1811 if ((size_t)m->m_len < sizeof(sctp_assoc_t)) {
1815 assoc_id = mtod(m, sctp_assoc_t *);
1816 segsize = mtod(m, u_int32_t *);
1817 m->m_len = sizeof(u_int32_t);
1819 if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
1820 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) ||
1821 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
1822 struct sctp_tcb *stcb;
1823 SCTP_INP_RLOCK(inp);
1824 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1826 SCTP_TCB_LOCK(stcb);
1827 SCTP_INP_RUNLOCK(inp);
1828 *segsize = sctp_get_frag_point(stcb, &stcb->asoc);
1829 SCTP_TCB_UNLOCK(stcb);
1831 SCTP_INP_RUNLOCK(inp);
1835 stcb = sctp_findassociation_ep_asocid(inp, *assoc_id);
1837 *segsize = sctp_get_frag_point(stcb, &stcb->asoc);
1838 SCTP_TCB_UNLOCK(stcb);
1842 /* default is to get the max, if I
1843 * can't calculate from an existing association.
1845 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1846 ovh = SCTP_MED_OVERHEAD;
1848 ovh = SCTP_MED_V4_OVERHEAD;
1850 *segsize = inp->sctp_frag_point - ovh;
1855 case SCTP_SET_DEBUG_LEVEL:
1859 if ((size_t)m->m_len < sizeof(u_int32_t)) {
1863 level = mtod(m, u_int32_t *);
1865 *level = sctp_debug_on;
1866 m->m_len = sizeof(u_int32_t);
1867 kprintf("Returning DEBUG LEVEL %x is set\n",
1868 (u_int)sctp_debug_on);
1870 #else /* SCTP_DEBUG */
1874 case SCTP_GET_STAT_LOG:
1875 #ifdef SCTP_STAT_LOGGING
1876 error = sctp_fill_stat_log(m);
1877 #else /* SCTP_DEBUG */
1884 if ((size_t)m->m_len < sizeof(sctp_pegs)) {
1888 pt = mtod(m, u_int32_t *);
1889 memcpy(pt, sctp_pegs, sizeof(sctp_pegs));
1890 m->m_len = sizeof(sctp_pegs);
1895 struct sctp_event_subscribe *events;
1897 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
1898 kprintf("get events\n");
1900 #endif /* SCTP_DEBUG */
1901 if ((size_t)m->m_len < sizeof(struct sctp_event_subscribe)) {
1903 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
1904 kprintf("M->M_LEN is %d not %d\n",
1906 (int)sizeof(struct sctp_event_subscribe));
1908 #endif /* SCTP_DEBUG */
1912 events = mtod(m, struct sctp_event_subscribe *);
1913 memset(events, 0, sizeof(events));
1914 SCTP_INP_RLOCK(inp);
1915 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVDATAIOEVNT)
1916 events->sctp_data_io_event = 1;
1918 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVASSOCEVNT)
1919 events->sctp_association_event = 1;
1921 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVPADDREVNT)
1922 events->sctp_address_event = 1;
1924 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVSENDFAILEVNT)
1925 events->sctp_send_failure_event = 1;
1927 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVPEERERR)
1928 events->sctp_peer_error_event = 1;
1930 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)
1931 events->sctp_shutdown_event = 1;
1933 if (inp->sctp_flags & SCTP_PCB_FLAGS_PDAPIEVNT)
1934 events->sctp_partial_delivery_event = 1;
1936 if (inp->sctp_flags & SCTP_PCB_FLAGS_ADAPTIONEVNT)
1937 events->sctp_adaption_layer_event = 1;
1939 if (inp->sctp_flags & SCTP_PCB_FLAGS_STREAM_RESETEVNT)
1940 events->sctp_stream_reset_events = 1;
1941 SCTP_INP_RUNLOCK(inp);
1942 m->m_len = sizeof(struct sctp_event_subscribe);
1947 case SCTP_ADAPTION_LAYER:
1948 if ((size_t)m->m_len < sizeof(int)) {
1953 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
1954 kprintf("getadaption ind\n");
1956 #endif /* SCTP_DEBUG */
1957 SCTP_INP_RLOCK(inp);
1958 *mtod(m, int *) = inp->sctp_ep.adaption_layer_indicator;
1959 SCTP_INP_RUNLOCK(inp);
1960 m->m_len = sizeof(int);
1962 case SCTP_SET_INITIAL_DBG_SEQ:
1963 if ((size_t)m->m_len < sizeof(int)) {
1968 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
1969 kprintf("get initial dbg seq\n");
1971 #endif /* SCTP_DEBUG */
1972 SCTP_INP_RLOCK(inp);
1973 *mtod(m, int *) = inp->sctp_ep.initial_sequence_debug;
1974 SCTP_INP_RUNLOCK(inp);
1975 m->m_len = sizeof(int);
1977 case SCTP_GET_LOCAL_ADDR_SIZE:
1978 if ((size_t)m->m_len < sizeof(int)) {
1983 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
1984 kprintf("get local sizes\n");
1986 #endif /* SCTP_DEBUG */
1987 SCTP_INP_RLOCK(inp);
1988 *mtod(m, int *) = sctp_count_max_addresses(inp);
1989 SCTP_INP_RUNLOCK(inp);
1990 m->m_len = sizeof(int);
1992 case SCTP_GET_REMOTE_ADDR_SIZE:
1994 sctp_assoc_t *assoc_id;
1996 struct sctp_nets *net;
1998 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
1999 kprintf("get remote size\n");
2001 #endif /* SCTP_DEBUG */
2002 if ((size_t)m->m_len < sizeof(sctp_assoc_t)) {
2004 kprintf("m->m_len:%d not %d\n",
2005 m->m_len, sizeof(sctp_assoc_t));
2006 #endif /* SCTP_DEBUG */
2011 val = mtod(m, u_int32_t *);
2012 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2013 SCTP_INP_RLOCK(inp);
2014 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2016 SCTP_TCB_LOCK(stcb);
2017 SCTP_INP_RUNLOCK(inp);
2020 assoc_id = mtod(m, sctp_assoc_t *);
2021 stcb = sctp_findassociation_ep_asocid(inp, *assoc_id);
2030 /* Count the sizes */
2031 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2032 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) ||
2033 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
2034 sz += sizeof(struct sockaddr_in6);
2035 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
2036 sz += sizeof(struct sockaddr_in);
2042 SCTP_TCB_UNLOCK(stcb);
2044 m->m_len = sizeof(u_int32_t);
2047 case SCTP_GET_PEER_ADDRESSES:
2049 * Get the address information, an array
2050 * is passed in to fill up we pack it.
2054 struct sockaddr_storage *sas;
2055 struct sctp_nets *net;
2056 struct sctp_getaddresses *saddr;
2058 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2059 kprintf("get peer addresses\n");
2061 #endif /* SCTP_DEBUG */
2062 if ((size_t)m->m_len < sizeof(struct sctp_getaddresses)) {
2066 left = m->m_len - sizeof(struct sctp_getaddresses);
2067 saddr = mtod(m, struct sctp_getaddresses *);
2068 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2069 SCTP_INP_RLOCK(inp);
2070 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2072 SCTP_TCB_LOCK(stcb);
2073 SCTP_INP_RUNLOCK(inp);
2075 stcb = sctp_findassociation_ep_asocid(inp,
2076 saddr->sget_assoc_id);
2081 m->m_len = sizeof(struct sctp_getaddresses);
2082 sas = (struct sockaddr_storage *)&saddr->addr[0];
2084 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2085 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) ||
2086 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
2087 cpsz = sizeof(struct sockaddr_in6);
2088 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
2089 cpsz = sizeof(struct sockaddr_in);
2095 /* not enough room. */
2097 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2098 kprintf("Out of room\n");
2100 #endif /* SCTP_DEBUG */
2103 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2104 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET)) {
2105 /* Must map the address */
2106 in6_sin_2_v4mapsin6((struct sockaddr_in *)&net->ro._l_addr,
2107 (struct sockaddr_in6 *)sas);
2109 memcpy(sas, &net->ro._l_addr, cpsz);
2111 ((struct sockaddr_in *)sas)->sin_port = stcb->rport;
2113 sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz);
2117 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
2118 kprintf("left now:%d mlen:%d\n",
2121 #endif /* SCTP_DEBUG */
2123 SCTP_TCB_UNLOCK(stcb);
2126 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2127 kprintf("All done\n");
2129 #endif /* SCTP_DEBUG */
2131 case SCTP_GET_LOCAL_ADDRESSES:
2134 struct sockaddr_storage *sas;
2135 struct sctp_getaddresses *saddr;
2137 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2138 kprintf("get local addresses\n");
2140 #endif /* SCTP_DEBUG */
2141 if ((size_t)m->m_len < sizeof(struct sctp_getaddresses)) {
2145 saddr = mtod(m, struct sctp_getaddresses *);
2147 if (saddr->sget_assoc_id) {
2148 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2149 SCTP_INP_RLOCK(inp);
2150 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2152 SCTP_TCB_LOCK(stcb);
2153 SCTP_INP_RUNLOCK(inp);
2155 stcb = sctp_findassociation_ep_asocid(inp, saddr->sget_assoc_id);
2161 * assure that the TCP model does not need a assoc id
2164 if ( (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) &&
2166 SCTP_INP_RLOCK(inp);
2167 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2169 SCTP_TCB_LOCK(stcb);
2170 SCTP_INP_RUNLOCK(inp);
2172 sas = (struct sockaddr_storage *)&saddr->addr[0];
2173 limit = m->m_len - sizeof(sctp_assoc_t);
2174 actual = sctp_fill_up_addresses(inp, stcb, limit, sas);
2175 SCTP_TCB_UNLOCK(stcb);
2176 m->m_len = sizeof(struct sockaddr_storage) + actual;
2179 case SCTP_PEER_ADDR_PARAMS:
2181 struct sctp_paddrparams *paddrp;
2182 struct sctp_nets *net;
2185 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2186 kprintf("Getting peer_addr_params\n");
2188 #endif /* SCTP_DEBUG */
2189 if ((size_t)m->m_len < sizeof(struct sctp_paddrparams)) {
2191 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
2192 kprintf("Hmm m->m_len:%d is to small\n",
2195 #endif /* SCTP_DEBUG */
2199 paddrp = mtod(m, struct sctp_paddrparams *);
2202 if (paddrp->spp_assoc_id) {
2204 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2205 kprintf("In spp_assoc_id find type\n");
2207 #endif /* SCTP_DEBUG */
2208 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2209 SCTP_INP_RLOCK(inp);
2210 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2212 SCTP_TCB_LOCK(stcb);
2213 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
2215 SCTP_INP_RLOCK(inp);
2217 stcb = sctp_findassociation_ep_asocid(inp, paddrp->spp_assoc_id);
2224 if ( (stcb == NULL) &&
2225 ((((struct sockaddr *)&paddrp->spp_address)->sa_family == AF_INET) ||
2226 (((struct sockaddr *)&paddrp->spp_address)->sa_family == AF_INET6))) {
2227 /* Lookup via address */
2229 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2230 kprintf("Ok we need to lookup a param\n");
2232 #endif /* SCTP_DEBUG */
2233 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2234 SCTP_INP_RLOCK(inp);
2235 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2237 SCTP_TCB_LOCK(stcb);
2238 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
2240 SCTP_INP_RUNLOCK(inp);
2242 SCTP_INP_WLOCK(inp);
2243 SCTP_INP_INCR_REF(inp);
2244 SCTP_INP_WUNLOCK(inp);
2245 stcb = sctp_findassociation_ep_addr(&inp,
2246 (struct sockaddr *)&paddrp->spp_address,
2249 SCTP_INP_WLOCK(inp);
2250 SCTP_INP_DECR_REF(inp);
2251 SCTP_INP_WUNLOCK(inp);
2260 /* Effects the Endpoint */
2262 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2263 kprintf("User wants EP level info\n");
2265 #endif /* SCTP_DEBUG */
2269 /* Applys to the specific association */
2271 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2272 kprintf("In TCB side\n");
2274 #endif /* SCTP_DEBUG */
2276 paddrp->spp_pathmaxrxt = net->failure_threshold;
2278 /* No destination so return default value */
2279 paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure;
2281 paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay;
2282 paddrp->spp_assoc_id = sctp_get_associd(stcb);
2283 SCTP_TCB_UNLOCK(stcb);
2285 /* Use endpoint defaults */
2286 SCTP_INP_RLOCK(inp);
2288 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2289 kprintf("In EP level info\n");
2291 #endif /* SCTP_DEBUG */
2292 paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure;
2293 paddrp->spp_hbinterval = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT];
2294 paddrp->spp_assoc_id = (sctp_assoc_t)0;
2295 SCTP_INP_RUNLOCK(inp);
2297 m->m_len = sizeof(struct sctp_paddrparams);
2300 case SCTP_GET_PEER_ADDR_INFO:
2302 struct sctp_paddrinfo *paddri;
2303 struct sctp_nets *net;
2305 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2306 kprintf("GetPEER ADDR_INFO\n");
2308 #endif /* SCTP_DEBUG */
2309 if ((size_t)m->m_len < sizeof(struct sctp_paddrinfo)) {
2313 paddri = mtod(m, struct sctp_paddrinfo *);
2315 if ((((struct sockaddr *)&paddri->spinfo_address)->sa_family == AF_INET) ||
2316 (((struct sockaddr *)&paddri->spinfo_address)->sa_family == AF_INET6)) {
2317 /* Lookup via address */
2318 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2319 SCTP_INP_RLOCK(inp);
2320 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2322 SCTP_TCB_LOCK(stcb);
2323 net = sctp_findnet(stcb,
2324 (struct sockaddr *)&paddri->spinfo_address);
2326 SCTP_INP_RUNLOCK(inp);
2328 SCTP_INP_WLOCK(inp);
2329 SCTP_INP_INCR_REF(inp);
2330 SCTP_INP_WUNLOCK(inp);
2331 stcb = sctp_findassociation_ep_addr(&inp,
2332 (struct sockaddr *)&paddri->spinfo_address,
2335 SCTP_INP_WLOCK(inp);
2336 SCTP_INP_DECR_REF(inp);
2337 SCTP_INP_WUNLOCK(inp);
2344 if ((stcb == NULL) || (net == NULL)) {
2348 m->m_len = sizeof(struct sctp_paddrinfo);
2349 paddri->spinfo_state = net->dest_state & (SCTP_REACHABLE_MASK|SCTP_ADDR_NOHB);
2350 paddri->spinfo_cwnd = net->cwnd;
2351 paddri->spinfo_srtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
2352 paddri->spinfo_rto = net->RTO;
2353 paddri->spinfo_assoc_id = sctp_get_associd(stcb);
2354 SCTP_TCB_UNLOCK(stcb);
2357 case SCTP_PCB_STATUS:
2359 struct sctp_pcbinfo *spcb;
2361 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2362 kprintf("PCB status\n");
2364 #endif /* SCTP_DEBUG */
2365 if ((size_t)m->m_len < sizeof(struct sctp_pcbinfo)) {
2369 spcb = mtod(m, struct sctp_pcbinfo *);
2370 sctp_fill_pcbinfo(spcb);
2371 m->m_len = sizeof(struct sctp_pcbinfo);
2376 struct sctp_nets *net;
2377 struct sctp_status *sstat;
2379 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2380 kprintf("SCTP status\n");
2382 #endif /* SCTP_DEBUG */
2384 if ((size_t)m->m_len < sizeof(struct sctp_status)) {
2388 sstat = mtod(m, struct sctp_status *);
2390 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2391 SCTP_INP_RLOCK(inp);
2392 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2394 SCTP_TCB_LOCK(stcb);
2395 SCTP_INP_RUNLOCK(inp);
2397 stcb = sctp_findassociation_ep_asocid(inp, sstat->sstat_assoc_id);
2404 * I think passing the state is fine since
2405 * sctp_constants.h will be available to the user
2408 sstat->sstat_state = stcb->asoc.state;
2409 sstat->sstat_rwnd = stcb->asoc.peers_rwnd;
2410 sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt;
2412 * We can't include chunks that have been passed
2413 * to the socket layer. Only things in queue.
2415 sstat->sstat_penddata = (stcb->asoc.cnt_on_delivery_queue +
2416 stcb->asoc.cnt_on_reasm_queue +
2417 stcb->asoc.cnt_on_all_streams);
2420 sstat->sstat_instrms = stcb->asoc.streamincnt;
2421 sstat->sstat_outstrms = stcb->asoc.streamoutcnt;
2422 sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc);
2423 memcpy(&sstat->sstat_primary.spinfo_address,
2424 &stcb->asoc.primary_destination->ro._l_addr,
2425 ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len);
2426 net = stcb->asoc.primary_destination;
2427 ((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport;
2429 * Again the user can get info from sctp_constants.h
2430 * for what the state of the network is.
2432 sstat->sstat_primary.spinfo_state = net->dest_state & SCTP_REACHABLE_MASK;
2433 sstat->sstat_primary.spinfo_cwnd = net->cwnd;
2434 sstat->sstat_primary.spinfo_srtt = net->lastsa;
2435 sstat->sstat_primary.spinfo_rto = net->RTO;
2436 sstat->sstat_primary.spinfo_mtu = net->mtu;
2437 sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb);
2438 SCTP_TCB_UNLOCK(stcb);
2439 m->m_len = sizeof(*sstat);
2444 struct sctp_rtoinfo *srto;
2446 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2447 kprintf("RTO Info\n");
2449 #endif /* SCTP_DEBUG */
2450 if ((size_t)m->m_len < sizeof(struct sctp_rtoinfo)) {
2454 srto = mtod(m, struct sctp_rtoinfo *);
2455 if (srto->srto_assoc_id == 0) {
2456 /* Endpoint only please */
2457 SCTP_INP_RLOCK(inp);
2458 srto->srto_initial = inp->sctp_ep.initial_rto;
2459 srto->srto_max = inp->sctp_ep.sctp_maxrto;
2460 srto->srto_min = inp->sctp_ep.sctp_minrto;
2461 SCTP_INP_RUNLOCK(inp);
2464 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2465 SCTP_INP_RLOCK(inp);
2466 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2468 SCTP_TCB_LOCK(stcb);
2469 SCTP_INP_RUNLOCK(inp);
2471 stcb = sctp_findassociation_ep_asocid(inp, srto->srto_assoc_id);
2477 srto->srto_initial = stcb->asoc.initial_rto;
2478 srto->srto_max = stcb->asoc.maxrto;
2479 srto->srto_min = stcb->asoc.minrto;
2480 SCTP_TCB_UNLOCK(stcb);
2481 m->m_len = sizeof(*srto);
2484 case SCTP_ASSOCINFO:
2486 struct sctp_assocparams *sasoc;
2488 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2489 kprintf("Associnfo\n");
2491 #endif /* SCTP_DEBUG */
2492 if ((size_t)m->m_len < sizeof(struct sctp_assocparams)) {
2496 sasoc = mtod(m, struct sctp_assocparams *);
2499 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2500 SCTP_INP_RLOCK(inp);
2501 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2503 SCTP_TCB_LOCK(stcb);
2504 SCTP_INP_RUNLOCK(inp);
2506 if ((sasoc->sasoc_assoc_id) && (stcb == NULL)) {
2507 stcb = sctp_findassociation_ep_asocid(inp,
2508 sasoc->sasoc_assoc_id);
2518 sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times;
2519 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
2520 sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd;
2521 sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd;
2522 sasoc->sasoc_cookie_life = stcb->asoc.cookie_life;
2523 SCTP_TCB_UNLOCK(stcb);
2525 SCTP_INP_RLOCK(inp);
2526 sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times;
2527 sasoc->sasoc_number_peer_destinations = 0;
2528 sasoc->sasoc_peer_rwnd = 0;
2529 sasoc->sasoc_local_rwnd = ssb_space(&inp->sctp_socket->so_rcv);
2530 sasoc->sasoc_cookie_life = inp->sctp_ep.def_cookie_life;
2531 SCTP_INP_RUNLOCK(inp);
2533 m->m_len = sizeof(*sasoc);
2536 case SCTP_DEFAULT_SEND_PARAM:
2538 struct sctp_sndrcvinfo *s_info;
2540 if (m->m_len != sizeof(struct sctp_sndrcvinfo)) {
2544 s_info = mtod(m, struct sctp_sndrcvinfo *);
2545 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2546 SCTP_INP_RLOCK(inp);
2547 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2549 SCTP_TCB_LOCK(stcb);
2550 SCTP_INP_RUNLOCK(inp);
2552 stcb = sctp_findassociation_ep_asocid(inp, s_info->sinfo_assoc_id);
2559 *s_info = stcb->asoc.def_send;
2560 SCTP_TCB_UNLOCK(stcb);
2561 m->m_len = sizeof(*s_info);
2565 struct sctp_initmsg *sinit;
2567 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2568 kprintf("initmsg\n");
2570 #endif /* SCTP_DEBUG */
2571 if ((size_t)m->m_len < sizeof(struct sctp_initmsg)) {
2575 sinit = mtod(m, struct sctp_initmsg *);
2576 SCTP_INP_RLOCK(inp);
2577 sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count;
2578 sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome;
2579 sinit->sinit_max_attempts = inp->sctp_ep.max_init_times;
2580 sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max;
2581 SCTP_INP_RUNLOCK(inp);
2582 m->m_len = sizeof(*sinit);
2585 case SCTP_PRIMARY_ADDR:
2586 /* we allow a "get" operation on this */
2588 struct sctp_setprim *ssp;
2591 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2592 kprintf("setprimary\n");
2594 #endif /* SCTP_DEBUG */
2595 if ((size_t)m->m_len < sizeof(struct sctp_setprim)) {
2599 ssp = mtod(m, struct sctp_setprim *);
2600 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2601 SCTP_INP_RLOCK(inp);
2602 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2604 SCTP_TCB_LOCK(stcb);
2605 SCTP_INP_RUNLOCK(inp);
2607 stcb = sctp_findassociation_ep_asocid(inp, ssp->ssp_assoc_id);
2609 /* one last shot, try it by the address in */
2610 struct sctp_nets *net;
2612 SCTP_INP_WLOCK(inp);
2613 SCTP_INP_INCR_REF(inp);
2614 SCTP_INP_WUNLOCK(inp);
2615 stcb = sctp_findassociation_ep_addr(&inp,
2616 (struct sockaddr *)&ssp->ssp_addr,
2619 SCTP_INP_WLOCK(inp);
2620 SCTP_INP_DECR_REF(inp);
2621 SCTP_INP_WUNLOCK(inp);
2629 /* simply copy out the sockaddr_storage... */
2630 memcpy(&ssp->ssp_addr,
2631 &stcb->asoc.primary_destination->ro._l_addr,
2632 ((struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr)->sa_len);
2633 SCTP_TCB_UNLOCK(stcb);
2634 m->m_len = sizeof(*ssp);
2638 error = ENOPROTOOPT;
2641 } /* end switch (sopt->sopt_name) */
2646 sctp_optsset(struct socket *so,
2649 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
2656 int error, *mopt, set_opt;
2658 struct sctp_tcb *stcb = NULL;
2659 struct sctp_inpcb *inp;
2663 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2664 kprintf("optsset:MP is NULL EINVAL\n");
2666 #endif /* SCTP_DEBUG */
2673 inp = (struct sctp_inpcb *)so->so_pcb;
2680 case SCTP_AUTOCLOSE:
2681 case SCTP_AUTO_ASCONF:
2682 case SCTP_DISABLE_FRAGMENTS:
2683 case SCTP_I_WANT_MAPPED_V4_ADDR:
2684 /* copy in the option value */
2685 if ((size_t)m->m_len < sizeof(int)) {
2689 mopt = mtod(m, int *);
2694 case SCTP_DISABLE_FRAGMENTS:
2695 set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT;
2697 case SCTP_AUTO_ASCONF:
2698 set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF;
2701 case SCTP_I_WANT_MAPPED_V4_ADDR:
2702 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2703 set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4;
2709 set_opt = SCTP_PCB_FLAGS_NODELAY;
2711 case SCTP_AUTOCLOSE:
2712 set_opt = SCTP_PCB_FLAGS_AUTOCLOSE;
2714 * The value is in ticks.
2715 * Note this does not effect old associations, only
2718 inp->sctp_ep.auto_close_time = (*mopt * hz);
2721 SCTP_INP_WLOCK(inp);
2723 inp->sctp_flags |= set_opt;
2725 inp->sctp_flags &= ~set_opt;
2727 SCTP_INP_WUNLOCK(inp);
2729 case SCTP_MY_PUBLIC_KEY: /* set my public key */
2730 case SCTP_SET_AUTH_CHUNKS: /* set the authenticated chunks required */
2731 case SCTP_SET_AUTH_SECRET: /* set the actual secret for the endpoint */
2732 /* not supported yet and until we refine the draft */
2736 case SCTP_CLR_STAT_LOG:
2737 #ifdef SCTP_STAT_LOGGING
2738 sctp_clr_stat_log();
2743 case SCTP_DELAYED_ACK_TIME:
2746 if ((size_t)m->m_len < sizeof(int32_t)) {
2750 tm = mtod(m, int32_t *);
2752 if ((*tm < 10) || (*tm > 500)) {
2753 /* can't be smaller than 10ms */
2754 /* MUST NOT be larger than 500ms */
2758 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(*tm);
2761 case SCTP_RESET_STREAMS:
2763 struct sctp_stream_reset *strrst;
2764 uint8_t two_way, not_peer;
2766 if ((size_t)m->m_len < sizeof(struct sctp_stream_reset)) {
2770 strrst = mtod(m, struct sctp_stream_reset *);
2772 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2773 SCTP_INP_RLOCK(inp);
2774 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2776 SCTP_TCB_LOCK(stcb);
2777 SCTP_INP_RUNLOCK(inp);
2779 stcb = sctp_findassociation_ep_asocid(inp, strrst->strrst_assoc_id);
2784 if (stcb->asoc.peer_supports_strreset == 0) {
2785 /* Peer does not support it,
2786 * we return protocol not supported since
2787 * this is true for this feature and this
2788 * peer, not the socket request in general.
2790 error = EPROTONOSUPPORT;
2791 SCTP_TCB_UNLOCK(stcb);
2795 /* Having re-thought this code I added as I write the I-D there
2796 * is NO need for it. The peer, if we are requesting a stream-reset
2797 * will send a request to us but will itself do what we do, take
2798 * and copy off the "reset information" we send and queue TSN's
2799 * larger than the send-next in our response message. Thus they
2802 /* if (stcb->asoc.sending_seq != (stcb->asoc.last_acked_seq + 1)) {*/
2803 /* Must have all sending data ack'd before we
2804 * start this procedure. This is a bit restrictive
2805 * and we SHOULD work on changing this so ONLY the
2806 * streams being RESET get held up. So, a reset-all
2807 * would require this.. but a reset specific just
2808 * needs to be sure that the ones being reset have
2809 * nothing on the send_queue. For now we will
2810 * skip this more detailed method and do a course
2811 * way.. i.e. nothing pending ... for future FIX ME!
2817 if (stcb->asoc.stream_reset_outstanding) {
2819 SCTP_TCB_UNLOCK(stcb);
2822 if (strrst->strrst_flags == SCTP_RESET_LOCAL_RECV) {
2825 } else if (strrst->strrst_flags == SCTP_RESET_LOCAL_SEND) {
2828 } else if (strrst->strrst_flags == SCTP_RESET_BOTH) {
2833 SCTP_TCB_UNLOCK(stcb);
2836 sctp_send_str_reset_req(stcb, strrst->strrst_num_streams,
2837 strrst->strrst_list, two_way, not_peer);
2838 sctp_chunk_output(inp, stcb, 12);
2839 SCTP_TCB_UNLOCK(stcb);
2843 case SCTP_RESET_PEGS:
2844 memset(sctp_pegs, 0, sizeof(sctp_pegs));
2847 case SCTP_CONNECT_X:
2848 if ((size_t)m->m_len < (sizeof(int) + sizeof(struct sockaddr_in))) {
2852 error = sctp_do_connect_x(so, inp, m, p, 0);
2855 case SCTP_CONNECT_X_DELAYED:
2856 if ((size_t)m->m_len < (sizeof(int) + sizeof(struct sockaddr_in))) {
2860 error = sctp_do_connect_x(so, inp, m, p, 1);
2863 case SCTP_CONNECT_X_COMPLETE:
2865 struct sockaddr *sa;
2866 struct sctp_nets *net;
2867 if ((size_t)m->m_len < sizeof(struct sockaddr_in)) {
2871 sa = mtod(m, struct sockaddr *);
2873 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2874 SCTP_INP_RLOCK(inp);
2875 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2877 SCTP_TCB_LOCK(stcb);
2878 net = sctp_findnet(stcb, sa);
2880 SCTP_INP_RUNLOCK(inp);
2882 SCTP_INP_WLOCK(inp);
2883 SCTP_INP_INCR_REF(inp);
2884 SCTP_INP_WUNLOCK(inp);
2885 stcb = sctp_findassociation_ep_addr(&inp, sa, &net, NULL, NULL);
2887 SCTP_INP_WLOCK(inp);
2888 SCTP_INP_DECR_REF(inp);
2889 SCTP_INP_WUNLOCK(inp);
2897 if (stcb->asoc.delayed_connection == 1) {
2898 stcb->asoc.delayed_connection = 0;
2899 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
2900 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination);
2901 sctp_send_initiate(inp, stcb);
2903 /* already expired or did not use delayed connectx */
2906 SCTP_TCB_UNLOCK(stcb);
2912 SCTP_INP_WLOCK(inp);
2913 burst = mtod(m, u_int8_t *);
2915 inp->sctp_ep.max_burst = *burst;
2917 SCTP_INP_WUNLOCK(inp);
2924 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2925 ovh = SCTP_MED_OVERHEAD;
2927 ovh = SCTP_MED_V4_OVERHEAD;
2929 segsize = mtod(m, u_int32_t *);
2934 SCTP_INP_WLOCK(inp);
2935 inp->sctp_frag_point = (*segsize+ovh);
2936 if (inp->sctp_frag_point < MHLEN) {
2937 inp->sctp_frag_point = MHLEN;
2939 SCTP_INP_WUNLOCK(inp);
2942 case SCTP_SET_DEBUG_LEVEL:
2946 if ((size_t)m->m_len < sizeof(u_int32_t)) {
2950 level = mtod(m, u_int32_t *);
2952 sctp_debug_on = (*level & (SCTP_DEBUG_ALL |
2954 kprintf("SETTING DEBUG LEVEL to %x\n",
2955 (u_int)sctp_debug_on);
2960 #endif /* SCTP_DEBUG */
2964 struct sctp_event_subscribe *events;
2965 if ((size_t)m->m_len < sizeof(struct sctp_event_subscribe)) {
2969 SCTP_INP_WLOCK(inp);
2970 events = mtod(m, struct sctp_event_subscribe *);
2971 if (events->sctp_data_io_event) {
2972 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVDATAIOEVNT;
2974 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVDATAIOEVNT;
2977 if (events->sctp_association_event) {
2978 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVASSOCEVNT;
2980 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVASSOCEVNT;
2983 if (events->sctp_address_event) {
2984 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVPADDREVNT;
2986 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVPADDREVNT;
2989 if (events->sctp_send_failure_event) {
2990 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVSENDFAILEVNT;
2992 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVSENDFAILEVNT;
2995 if (events->sctp_peer_error_event) {
2996 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVPEERERR;
2998 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVPEERERR;
3001 if (events->sctp_shutdown_event) {
3002 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT;
3004 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT;
3007 if (events->sctp_partial_delivery_event) {
3008 inp->sctp_flags |= SCTP_PCB_FLAGS_PDAPIEVNT;
3010 inp->sctp_flags &= ~SCTP_PCB_FLAGS_PDAPIEVNT;
3013 if (events->sctp_adaption_layer_event) {
3014 inp->sctp_flags |= SCTP_PCB_FLAGS_ADAPTIONEVNT;
3016 inp->sctp_flags &= ~SCTP_PCB_FLAGS_ADAPTIONEVNT;
3019 if (events->sctp_stream_reset_events) {
3020 inp->sctp_flags |= SCTP_PCB_FLAGS_STREAM_RESETEVNT;
3022 inp->sctp_flags &= ~SCTP_PCB_FLAGS_STREAM_RESETEVNT;
3024 SCTP_INP_WUNLOCK(inp);
3028 case SCTP_ADAPTION_LAYER:
3030 struct sctp_setadaption *adap_bits;
3031 if ((size_t)m->m_len < sizeof(struct sctp_setadaption)) {
3035 SCTP_INP_WLOCK(inp);
3036 adap_bits = mtod(m, struct sctp_setadaption *);
3037 inp->sctp_ep.adaption_layer_indicator = adap_bits->ssb_adaption_ind;
3038 SCTP_INP_WUNLOCK(inp);
3041 case SCTP_SET_INITIAL_DBG_SEQ:
3044 if ((size_t)m->m_len < sizeof(u_int32_t)) {
3048 SCTP_INP_WLOCK(inp);
3049 vvv = mtod(m, u_int32_t *);
3050 inp->sctp_ep.initial_sequence_debug = *vvv;
3051 SCTP_INP_WUNLOCK(inp);
3054 case SCTP_DEFAULT_SEND_PARAM:
3056 struct sctp_sndrcvinfo *s_info;
3058 if (m->m_len != sizeof(struct sctp_sndrcvinfo)) {
3062 s_info = mtod(m, struct sctp_sndrcvinfo *);
3064 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3065 SCTP_INP_RLOCK(inp);
3066 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3068 SCTP_TCB_LOCK(stcb);
3069 SCTP_INP_RUNLOCK(inp);
3071 stcb = sctp_findassociation_ep_asocid(inp, s_info->sinfo_assoc_id);
3077 /* Validate things */
3078 if (s_info->sinfo_stream > stcb->asoc.streamoutcnt) {
3079 SCTP_TCB_UNLOCK(stcb);
3083 /* Mask off the flags that are allowed */
3084 s_info->sinfo_flags = (s_info->sinfo_flags &
3085 (MSG_UNORDERED | MSG_ADDR_OVER |
3086 MSG_PR_SCTP_TTL | MSG_PR_SCTP_BUF));
3088 stcb->asoc.def_send = *s_info;
3089 SCTP_TCB_UNLOCK(stcb);
3092 case SCTP_PEER_ADDR_PARAMS:
3094 struct sctp_paddrparams *paddrp;
3095 struct sctp_nets *net;
3096 if ((size_t)m->m_len < sizeof(struct sctp_paddrparams)) {
3100 paddrp = mtod(m, struct sctp_paddrparams *);
3102 if (paddrp->spp_assoc_id) {
3103 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3104 SCTP_INP_RLOCK(inp);
3105 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3107 SCTP_TCB_LOCK(stcb);
3108 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
3110 SCTP_INP_RUNLOCK(inp);
3112 stcb = sctp_findassociation_ep_asocid(inp, paddrp->spp_assoc_id);
3119 if ((stcb == NULL) &&
3120 ((((struct sockaddr *)&paddrp->spp_address)->sa_family == AF_INET) ||
3121 (((struct sockaddr *)&paddrp->spp_address)->sa_family == AF_INET6))) {
3122 /* Lookup via address */
3123 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3124 SCTP_INP_RLOCK(inp);
3125 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3127 SCTP_TCB_LOCK(stcb);
3128 net = sctp_findnet(stcb,
3129 (struct sockaddr *)&paddrp->spp_address);
3131 SCTP_INP_RUNLOCK(inp);
3133 SCTP_INP_WLOCK(inp);
3134 SCTP_INP_INCR_REF(inp);
3135 SCTP_INP_WUNLOCK(inp);
3136 stcb = sctp_findassociation_ep_addr(&inp,
3137 (struct sockaddr *)&paddrp->spp_address,
3140 SCTP_INP_WLOCK(inp);
3141 SCTP_INP_DECR_REF(inp);
3142 SCTP_INP_WUNLOCK(inp);
3146 /* Effects the Endpoint */
3150 /* Applies to the specific association */
3151 if (paddrp->spp_pathmaxrxt) {
3153 if (paddrp->spp_pathmaxrxt)
3154 net->failure_threshold = paddrp->spp_pathmaxrxt;
3156 if (paddrp->spp_pathmaxrxt)
3157 stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt;
3160 if ((paddrp->spp_hbinterval != 0) && (paddrp->spp_hbinterval != 0xffffffff)) {
3164 net->dest_state &= ~SCTP_ADDR_NOHB;
3166 old = stcb->asoc.heart_beat_delay;
3167 stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval;
3169 /* Turn back on the timer */
3170 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
3173 } else if (paddrp->spp_hbinterval == 0xffffffff) {
3175 sctp_send_hb(stcb, 1, net);
3178 /* off on association */
3179 if (stcb->asoc.heart_beat_delay) {
3180 int cnt_of_unconf = 0;
3181 struct sctp_nets *lnet;
3182 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
3183 if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
3187 /* stop the timer ONLY if we have no unconfirmed addresses
3189 if (cnt_of_unconf == 0)
3190 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
3192 stcb->asoc.heart_beat_delay = 0;
3194 net->dest_state |= SCTP_ADDR_NOHB;
3197 SCTP_TCB_UNLOCK(stcb);
3199 /* Use endpoint defaults */
3200 SCTP_INP_WLOCK(inp);
3201 if (paddrp->spp_pathmaxrxt)
3202 inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt;
3203 if (paddrp->spp_hbinterval != SCTP_ISSUE_HB)
3204 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = paddrp->spp_hbinterval;
3205 SCTP_INP_WUNLOCK(inp);
3211 struct sctp_rtoinfo *srto;
3212 if ((size_t)m->m_len < sizeof(struct sctp_rtoinfo)) {
3216 srto = mtod(m, struct sctp_rtoinfo *);
3217 if (srto->srto_assoc_id == 0) {
3218 SCTP_INP_WLOCK(inp);
3219 /* If we have a null asoc, its default for the endpoint */
3220 if (srto->srto_initial > 10)
3221 inp->sctp_ep.initial_rto = srto->srto_initial;
3222 if (srto->srto_max > 10)
3223 inp->sctp_ep.sctp_maxrto = srto->srto_max;
3224 if (srto->srto_min > 10)
3225 inp->sctp_ep.sctp_minrto = srto->srto_min;
3226 SCTP_INP_WUNLOCK(inp);
3229 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3230 SCTP_INP_RLOCK(inp);
3231 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3233 SCTP_TCB_LOCK(stcb);
3234 SCTP_INP_RUNLOCK(inp);
3236 stcb = sctp_findassociation_ep_asocid(inp, srto->srto_assoc_id);
3241 /* Set in ms we hope :-) */
3242 if (srto->srto_initial > 10)
3243 stcb->asoc.initial_rto = srto->srto_initial;
3244 if (srto->srto_max > 10)
3245 stcb->asoc.maxrto = srto->srto_max;
3246 if (srto->srto_min > 10)
3247 stcb->asoc.minrto = srto->srto_min;
3248 SCTP_TCB_UNLOCK(stcb);
3251 case SCTP_ASSOCINFO:
3253 struct sctp_assocparams *sasoc;
3255 if ((size_t)m->m_len < sizeof(struct sctp_assocparams)) {
3259 sasoc = mtod(m, struct sctp_assocparams *);
3260 if (sasoc->sasoc_assoc_id) {
3261 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3262 SCTP_INP_RLOCK(inp);
3263 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3265 SCTP_TCB_LOCK(stcb);
3266 SCTP_INP_RUNLOCK(inp);
3268 stcb = sctp_findassociation_ep_asocid(inp,
3269 sasoc->sasoc_assoc_id);
3279 if (sasoc->sasoc_asocmaxrxt)
3280 stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt;
3281 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
3282 sasoc->sasoc_peer_rwnd = 0;
3283 sasoc->sasoc_local_rwnd = 0;
3284 if (stcb->asoc.cookie_life)
3285 stcb->asoc.cookie_life = sasoc->sasoc_cookie_life;
3286 SCTP_TCB_UNLOCK(stcb);
3288 SCTP_INP_WLOCK(inp);
3289 if (sasoc->sasoc_asocmaxrxt)
3290 inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt;
3291 sasoc->sasoc_number_peer_destinations = 0;
3292 sasoc->sasoc_peer_rwnd = 0;
3293 sasoc->sasoc_local_rwnd = 0;
3294 if (sasoc->sasoc_cookie_life)
3295 inp->sctp_ep.def_cookie_life = sasoc->sasoc_cookie_life;
3296 SCTP_INP_WUNLOCK(inp);
3302 struct sctp_initmsg *sinit;
3304 if ((size_t)m->m_len < sizeof(struct sctp_initmsg)) {
3308 sinit = mtod(m, struct sctp_initmsg *);
3309 SCTP_INP_WLOCK(inp);
3310 if (sinit->sinit_num_ostreams)
3311 inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams;
3313 if (sinit->sinit_max_instreams)
3314 inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams;
3316 if (sinit->sinit_max_attempts)
3317 inp->sctp_ep.max_init_times = sinit->sinit_max_attempts;
3319 if (sinit->sinit_max_init_timeo > 10)
3320 /* We must be at least a 100ms (we set in ticks) */
3321 inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo;
3322 SCTP_INP_WUNLOCK(inp);
3325 case SCTP_PRIMARY_ADDR:
3327 struct sctp_setprim *spa;
3328 struct sctp_nets *net, *lnet;
3329 if ((size_t)m->m_len < sizeof(struct sctp_setprim)) {
3333 spa = mtod(m, struct sctp_setprim *);
3335 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3336 SCTP_INP_RLOCK(inp);
3337 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3339 SCTP_TCB_LOCK(stcb);
3344 SCTP_INP_RUNLOCK(inp);
3346 stcb = sctp_findassociation_ep_asocid(inp, spa->ssp_assoc_id);
3349 SCTP_INP_WLOCK(inp);
3350 SCTP_INP_INCR_REF(inp);
3351 SCTP_INP_WUNLOCK(inp);
3352 stcb = sctp_findassociation_ep_addr(&inp,
3353 (struct sockaddr *)&spa->ssp_addr,
3356 SCTP_INP_WLOCK(inp);
3357 SCTP_INP_DECR_REF(inp);
3358 SCTP_INP_WUNLOCK(inp);
3363 /* find the net, associd or connected lookup type */
3364 net = sctp_findnet(stcb, (struct sockaddr *)&spa->ssp_addr);
3366 SCTP_TCB_UNLOCK(stcb);
3371 if ((net != stcb->asoc.primary_destination) &&
3372 (!(net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
3373 /* Ok we need to set it */
3374 lnet = stcb->asoc.primary_destination;
3375 lnet->next_tsn_at_change = net->next_tsn_at_change = stcb->asoc.sending_seq;
3376 if (sctp_set_primary_addr(stcb,
3379 if (net->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3380 net->dest_state |= SCTP_ADDR_DOUBLE_SWITCH;
3382 net->dest_state |= SCTP_ADDR_SWITCH_PRIMARY;
3385 SCTP_TCB_UNLOCK(stcb);
3389 case SCTP_SET_PEER_PRIMARY_ADDR:
3391 struct sctp_setpeerprim *sspp;
3392 if ((size_t)m->m_len < sizeof(struct sctp_setpeerprim)) {
3396 sspp = mtod(m, struct sctp_setpeerprim *);
3399 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3400 SCTP_INP_RLOCK(inp);
3401 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3403 SCTP_TCB_UNLOCK(stcb);
3404 SCTP_INP_RUNLOCK(inp);
3406 stcb = sctp_findassociation_ep_asocid(inp, sspp->sspp_assoc_id);
3411 if (sctp_set_primary_ip_address_sa(stcb, (struct sockaddr *)&sspp->sspp_addr) != 0) {
3414 SCTP_TCB_UNLOCK(stcb);
3417 case SCTP_BINDX_ADD_ADDR:
3419 struct sctp_getaddresses *addrs;
3420 struct sockaddr *addr_touse;
3421 struct sockaddr_in sin;
3422 /* see if we're bound all already! */
3423 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3427 if ((size_t)m->m_len < sizeof(struct sctp_getaddresses)) {
3431 addrs = mtod(m, struct sctp_getaddresses *);
3432 addr_touse = addrs->addr;
3433 if (addrs->addr->sa_family == AF_INET6) {
3434 struct sockaddr_in6 *sin6;
3435 sin6 = (struct sockaddr_in6 *)addr_touse;
3436 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
3437 in6_sin6_2_sin(&sin, sin6);
3438 addr_touse = (struct sockaddr *)&sin;
3441 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
3443 /* Can't get proc for Net/Open BSD */
3447 error = sctp_inpcb_bind(so, addr_touse, p);
3450 /* No locks required here since bind and mgmt_ep_sa all
3451 * do their own locking. If we do something for the FIX:
3452 * below we may need to lock in that case.
3454 if (addrs->sget_assoc_id == 0) {
3455 /* add the address */
3456 struct sctp_inpcb *lep;
3457 ((struct sockaddr_in *)addr_touse)->sin_port = inp->sctp_lport;
3458 lep = sctp_pcb_findep(addr_touse, 1, 0);
3460 /* We must decrement the refcount
3461 * since we have the ep already and
3462 * are binding. No remove going on
3465 SCTP_INP_WLOCK(inp);
3466 SCTP_INP_DECR_REF(inp);
3467 SCTP_INP_WUNLOCK(inp);
3470 /* already bound to it.. ok */
3472 } else if (lep == NULL) {
3473 ((struct sockaddr_in *)addr_touse)->sin_port = 0;
3474 error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
3475 SCTP_ADD_IP_ADDRESS);
3477 error = EADDRNOTAVAIL;
3483 /* FIX: decide whether we allow assoc based bindx */
3487 case SCTP_BINDX_REM_ADDR:
3489 struct sctp_getaddresses *addrs;
3490 struct sockaddr *addr_touse;
3491 struct sockaddr_in sin;
3492 /* see if we're bound all already! */
3493 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3497 if ((size_t)m->m_len < sizeof(struct sctp_getaddresses)) {
3501 addrs = mtod(m, struct sctp_getaddresses *);
3502 addr_touse = addrs->addr;
3503 if (addrs->addr->sa_family == AF_INET6) {
3504 struct sockaddr_in6 *sin6;
3505 sin6 = (struct sockaddr_in6 *)addr_touse;
3506 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
3507 in6_sin6_2_sin(&sin, sin6);
3508 addr_touse = (struct sockaddr *)&sin;
3511 /* No lock required mgmt_ep_sa does its own locking. If
3512 * the FIX: below is ever changed we may need to
3513 * lock before calling association level binding.
3515 if (addrs->sget_assoc_id == 0) {
3516 /* delete the address */
3517 sctp_addr_mgmt_ep_sa(inp, addr_touse,
3518 SCTP_DEL_IP_ADDRESS);
3520 /* FIX: decide whether we allow assoc based bindx */
3525 error = ENOPROTOOPT;
3527 } /* end switch (opt) */
3532 sctp_ctloutput(netmsg_t msg)
3534 struct socket *so = msg->ctloutput.base.nm_so;
3535 struct sockopt *sopt = msg->ctloutput.nm_sopt;
3536 struct mbuf *m = NULL;
3537 struct sctp_inpcb *inp;
3540 inp = (struct sctp_inpcb *)so->so_pcb;
3543 /* I made the same as TCP since we are not setup? */
3547 if (sopt->sopt_level != IPPROTO_SCTP) {
3548 /* wrong proto level... send back up to IP */
3550 if (INP_CHECK_SOCKAF(so, AF_INET6))
3551 ip6_ctloutput_dispatch(msg);
3555 /* msg invalid now */
3558 if (sopt->sopt_valsize > MCLBYTES) {
3560 * Restrict us down to a cluster size, that's all we can
3561 * pass either way...
3563 sopt->sopt_valsize = MCLBYTES;
3565 if (sopt->sopt_valsize) {
3567 m = m_get(MB_WAIT, MT_DATA);
3568 if (sopt->sopt_valsize > MLEN) {
3569 MCLGET(m, MB_DONTWAIT);
3570 if ((m->m_flags & M_EXT) == 0) {
3576 error = sooptcopyin(sopt, mtod(m, caddr_t), sopt->sopt_valsize,
3577 sopt->sopt_valsize);
3582 m->m_len = sopt->sopt_valsize;
3584 if (sopt->sopt_dir == SOPT_SET) {
3585 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
3586 error = sctp_optsset(so, sopt->sopt_name, &m, sopt->sopt_td);
3588 error = sctp_optsset(so, sopt->sopt_name, &m, sopt->sopt_p);
3590 } else if (sopt->sopt_dir == SOPT_GET) {
3591 #if (defined (__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
3592 error = sctp_optsget(so, sopt->sopt_name, &m, sopt->sopt_td);
3594 error = sctp_optsget(so, sopt->sopt_name, &m, sopt->sopt_p);
3599 if ( (error == 0) && (m != NULL)) {
3600 error = sooptcopyout(sopt, mtod(m, caddr_t), m->m_len);
3602 } else if (m != NULL) {
3606 lwkt_replymsg(&msg->lmsg, error);
3610 sctp_connect(netmsg_t msg)
3612 struct socket *so = msg->connect.base.nm_so;
3613 struct sockaddr *addr = msg->connect.nm_nam;
3614 struct sctp_inpcb *inp;
3615 struct sctp_tcb *stcb;
3619 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
3620 kprintf("Connect called in SCTP to ");
3621 sctp_print_address(addr);
3622 kprintf("Port %d\n", ntohs(((struct sockaddr_in *)addr)->sin_port));
3624 #endif /* SCTP_DEBUG */
3625 inp = (struct sctp_inpcb *)so->so_pcb;
3627 /* I made the same as TCP since we are not setup? */
3631 SCTP_ASOC_CREATE_LOCK(inp);
3632 SCTP_INP_WLOCK(inp);
3633 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3634 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3635 /* Should I really unlock ? */
3636 SCTP_INP_WUNLOCK(inp);
3637 SCTP_ASOC_CREATE_UNLOCK(inp);
3642 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
3643 (addr->sa_family == AF_INET6)) {
3644 SCTP_INP_WUNLOCK(inp);
3645 SCTP_ASOC_CREATE_UNLOCK(inp);
3650 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
3651 SCTP_PCB_FLAGS_UNBOUND) {
3652 /* Bind a ephemeral port */
3653 SCTP_INP_WUNLOCK(inp);
3654 error = sctp_inpcb_bind(so, NULL, msg->connect.nm_td);
3656 SCTP_ASOC_CREATE_UNLOCK(inp);
3659 SCTP_INP_WLOCK(inp);
3661 /* Now do we connect? */
3662 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3663 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
3664 /* We are already connected AND the TCP model */
3665 SCTP_INP_WUNLOCK(inp);
3666 SCTP_ASOC_CREATE_UNLOCK(inp);
3670 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3671 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3673 SCTP_TCB_UNLOCK(stcb);
3674 SCTP_INP_WUNLOCK(inp);
3676 SCTP_INP_INCR_REF(inp);
3677 SCTP_INP_WUNLOCK(inp);
3678 stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL);
3680 SCTP_INP_WLOCK(inp);
3681 SCTP_INP_DECR_REF(inp);
3682 SCTP_INP_WUNLOCK(inp);
3686 /* Already have or am bring up an association */
3687 SCTP_ASOC_CREATE_UNLOCK(inp);
3688 SCTP_TCB_UNLOCK(stcb);
3692 /* We are GOOD to go */
3693 stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0);
3695 /* Gak! no memory */
3698 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
3699 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
3700 /* Set the connected flag so we can queue data */
3703 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT;
3704 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
3705 sctp_send_initiate(inp, stcb);
3706 SCTP_ASOC_CREATE_UNLOCK(inp);
3707 SCTP_TCB_UNLOCK(stcb);
3709 lwkt_replymsg(&msg->lmsg, error);
3713 sctp_usr_recvd(netmsg_t msg)
3715 struct socket *so = msg->rcvd.base.nm_so;
3716 struct sctp_socket_q_list *sq = NULL;
3717 int flags = msg->rcvd.nm_flags;
3721 * The user has received some data, we may be able to stuff more
3722 * up the socket. And we need to possibly update the rwnd.
3724 struct sctp_inpcb *inp;
3725 struct sctp_tcb *stcb=NULL;
3727 inp = (struct sctp_inpcb *)so->so_pcb;
3729 if (sctp_debug_on & SCTP_DEBUG_USRREQ2)
3730 kprintf("Read for so:%x inp:%x Flags:%x\n",
3731 (u_int)so, (u_int)inp, (u_int)flags);
3735 /* I made the same as TCP since we are not setup? */
3737 if (sctp_debug_on & SCTP_DEBUG_USRREQ2)
3738 kprintf("Nope, connection reset\n");
3744 * Grab the first one on the list. It will re-insert itself if
3745 * it runs out of room
3747 SCTP_INP_WLOCK(inp);
3748 if ((flags & MSG_EOR) && ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0)
3749 && ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
3750 /* Ok the other part of our grubby tracking
3751 * stuff for our horrible layer violation that
3752 * the tsvwg thinks is ok for sctp_peeloff.. gak!
3753 * We must update the next vtag pending on the
3754 * socket buffer (if any).
3756 inp->sctp_vtag_first = sctp_get_first_vtag_from_sb(so);
3757 sq = TAILQ_FIRST(&inp->sctp_queue_list);
3764 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3767 SCTP_TCB_LOCK(stcb);
3770 /* all code in normal stcb path assumes
3771 * that you have a tcb_lock only. Thus
3772 * we must release the inp write lock.
3774 if (flags & MSG_EOR) {
3775 if (((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0)
3776 && ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
3777 stcb = sctp_remove_from_socket_q(inp);
3780 if (sctp_debug_on & SCTP_DEBUG_USRREQ2)
3781 kprintf("remove from socket queue for inp:%x tcbret:%x\n",
3782 (u_int)inp, (u_int)stcb);
3785 stcb->asoc.my_rwnd_control_len = sctp_sbspace_sub(stcb->asoc.my_rwnd_control_len,
3786 sizeof(struct mbuf));
3787 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVDATAIOEVNT) {
3788 stcb->asoc.my_rwnd_control_len = sctp_sbspace_sub(stcb->asoc.my_rwnd_control_len,
3789 CMSG_LEN(sizeof(struct sctp_sndrcvinfo)));
3792 if ((TAILQ_EMPTY(&stcb->asoc.delivery_queue) == 0) ||
3793 (TAILQ_EMPTY(&stcb->asoc.reasmqueue) == 0)) {
3794 /* Deliver if there is something to be delivered */
3795 sctp_service_queues(stcb, &stcb->asoc, 1);
3797 sctp_set_rwnd(stcb, &stcb->asoc);
3798 /* if we increase by 1 or more MTU's (smallest MTUs of all
3799 * nets) we send a window update sack
3801 incr = stcb->asoc.my_rwnd - stcb->asoc.my_last_reported_rwnd;
3805 if (((uint32_t)incr >= (stcb->asoc.smallest_mtu * SCTP_SEG_TO_RWND_UPD)) ||
3806 ((((uint32_t)incr)*SCTP_SCALE_OF_RWND_TO_UPD) >= so->so_rcv.ssb_hiwat)) {
3807 if (callout_pending(&stcb->asoc.dack_timer.timer)) {
3808 /* If the timer is up, stop it */
3809 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
3810 stcb->sctp_ep, stcb, NULL);
3812 /* Send the sack, with the new rwnd */
3813 sctp_send_sack(stcb);
3814 /* Now do the output */
3815 sctp_chunk_output(inp, stcb, 10);
3818 if ((( sq ) && (flags & MSG_EOR) && ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0))
3819 && ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
3820 stcb = sctp_remove_from_socket_q(inp);
3823 SOCKBUF_LOCK(&so->so_rcv);
3824 if (( so->so_rcv.ssb_mb == NULL ) &&
3825 (TAILQ_EMPTY(&inp->sctp_queue_list) == 0)) {
3828 if (sctp_debug_on & SCTP_DEBUG_USRREQ2)
3829 kprintf("Something off, inp:%x so->so_rcv->ssb_mb is empty and sockq is not.. cleaning\n",
3832 if (((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0)
3833 && ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
3835 done_yet = TAILQ_EMPTY(&inp->sctp_queue_list);
3838 sctp_remove_from_socket_q(inp);
3839 done_yet = TAILQ_EMPTY(&inp->sctp_queue_list);
3843 if (sctp_debug_on & SCTP_DEBUG_USRREQ2)
3844 kprintf("Cleaned up %d sockq's\n", sq_cnt);
3847 SOCKBUF_UNLOCK(&so->so_rcv);
3849 SCTP_TCB_UNLOCK(stcb);
3850 SCTP_INP_WUNLOCK(inp);
3853 lwkt_replymsg(&msg->lmsg, error);
3857 sctp_listen(netmsg_t msg)
3859 struct socket *so = msg->listen.base.nm_so;
3863 * Note this module depends on the protocol processing being
3864 * called AFTER any socket level flags and backlog are applied
3865 * to the socket. The traditional way that the socket flags are
3866 * applied is AFTER protocol processing. We have made a change
3867 * to the sys/kern/uipc_socket.c module to reverse this but this
3868 * MUST be in place if the socket API for SCTP is to work properly.
3870 struct sctp_inpcb *inp;
3872 inp = (struct sctp_inpcb *)so->so_pcb;
3874 /* I made the same as TCP since we are not setup? */
3878 SCTP_INP_RLOCK(inp);
3879 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3880 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
3881 /* We are already connected AND the TCP model */
3882 SCTP_INP_RUNLOCK(inp);
3886 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
3887 /* We must do a bind. */
3888 SCTP_INP_RUNLOCK(inp);
3889 if ((error = sctp_inpcb_bind(so, NULL, msg->listen.nm_td))) {
3890 /* bind error, probably perm */
3894 SCTP_INP_RUNLOCK(inp);
3897 SCTP_INP_WLOCK(inp);
3898 if (inp->sctp_socket->so_qlimit) {
3899 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
3901 * For the UDP model we must TURN OFF the ACCEPT
3902 * flags since we do NOT allow the accept() call.
3903 * The TCP model (when present) will do accept which
3904 * then prohibits connect().
3906 inp->sctp_socket->so_options &= ~SO_ACCEPTCONN;
3908 inp->sctp_flags |= SCTP_PCB_FLAGS_ACCEPTING;
3910 if (inp->sctp_flags & SCTP_PCB_FLAGS_ACCEPTING) {
3912 * Turning off the listen flags if the backlog is
3913 * set to 0 (i.e. qlimit is 0).
3915 inp->sctp_flags &= ~SCTP_PCB_FLAGS_ACCEPTING;
3917 inp->sctp_socket->so_options &= ~SO_ACCEPTCONN;
3919 SCTP_INP_WUNLOCK(inp);
3923 lwkt_replymsg(&msg->lmsg, error);
3927 sctp_accept(netmsg_t msg)
3929 struct socket *so = msg->accept.base.nm_so;
3930 struct sockaddr **addr = msg->accept.nm_nam;
3931 struct sctp_tcb *stcb;
3932 struct sockaddr *prim;
3933 struct sctp_inpcb *inp;
3936 inp = (struct sctp_inpcb *)so->so_pcb;
3942 SCTP_INP_RLOCK(inp);
3943 if (so->so_state & SS_ISDISCONNECTED) {
3944 SCTP_INP_RUNLOCK(inp);
3945 error = ECONNABORTED;
3948 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3950 SCTP_INP_RUNLOCK(inp);
3954 SCTP_TCB_LOCK(stcb);
3955 SCTP_INP_RUNLOCK(inp);
3956 prim = (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr;
3957 if (prim->sa_family == AF_INET) {
3958 struct sockaddr_in *sin;
3959 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
3960 MALLOC(sin, struct sockaddr_in *, sizeof *sin, M_SONAME,
3963 sin = (struct sockaddr_in *)addr;
3964 bzero((caddr_t)sin, sizeof (*sin));
3966 sin->sin_family = AF_INET;
3967 sin->sin_len = sizeof(*sin);
3968 sin->sin_port = ((struct sockaddr_in *)prim)->sin_port;
3969 sin->sin_addr = ((struct sockaddr_in *)prim)->sin_addr;
3970 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
3971 *addr = (struct sockaddr *)sin;
3973 nam->m_len = sizeof(*sin);
3976 struct sockaddr_in6 *sin6;
3977 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
3978 MALLOC(sin6, struct sockaddr_in6 *, sizeof *sin6, M_SONAME,
3981 sin6 = (struct sockaddr_in6 *)addr;
3983 bzero((caddr_t)sin6, sizeof (*sin6));
3984 sin6->sin6_family = AF_INET6;
3985 sin6->sin6_len = sizeof(*sin6);
3986 sin6->sin6_port = ((struct sockaddr_in6 *)prim)->sin6_port;
3988 sin6->sin6_addr = ((struct sockaddr_in6 *)prim)->sin6_addr;
3989 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr))
3990 /* sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]);*/
3991 in6_recoverscope(sin6, &sin6->sin6_addr, NULL); /* skip ifp check */
3993 sin6->sin6_scope_id = 0; /*XXX*/
3994 #if defined(__FreeBSD__) || defined (__APPLE__) || defined(__DragonFly__)
3995 *addr= (struct sockaddr *)sin6;
3997 nam->m_len = sizeof(*sin6);
4000 /* Wake any delayed sleep action */
4001 SCTP_TCB_UNLOCK(stcb);
4002 SCTP_INP_WLOCK(inp);
4003 if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) {
4004 inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE;
4005 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) {
4006 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT;
4007 #if defined(__NetBSD__)
4008 if (sowritable(inp->sctp_socket))
4009 sowwakeup(inp->sctp_socket);
4011 if (sowriteable(inp->sctp_socket))
4012 sowwakeup(inp->sctp_socket);
4015 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) {
4016 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT;
4017 if (soreadable(inp->sctp_socket))
4018 sorwakeup(inp->sctp_socket);
4022 SCTP_INP_WUNLOCK(inp);
4025 lwkt_replymsg(&msg->lmsg, error);
4030 sctp_ingetaddr(netmsg_t msg)
4034 error = sctp_ingetaddr_oncpu(msg->sockaddr.base.nm_so,
4035 msg->sockaddr.nm_nam);
4036 lwkt_replymsg(&msg->lmsg, error);
4040 sctp_ingetaddr_oncpu(struct socket *so, struct sockaddr **addr)
4042 struct sockaddr_in *sin;
4043 struct sctp_inpcb *inp;
4045 * Do the malloc first in case it blocks.
4047 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4048 MALLOC(sin, struct sockaddr_in *, sizeof *sin, M_SONAME, M_WAITOK |
4051 nam->m_len = sizeof(*sin);
4052 memset(sin, 0, sizeof(*sin));
4054 sin->sin_family = AF_INET;
4055 sin->sin_len = sizeof(*sin);
4056 inp = (struct sctp_inpcb *)so->so_pcb;
4058 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4059 FREE(sin, M_SONAME);
4063 SCTP_INP_RLOCK(inp);
4064 sin->sin_port = inp->sctp_lport;
4065 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
4066 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
4067 struct sctp_tcb *stcb;
4068 struct sockaddr_in *sin_a;
4069 struct sctp_nets *net;
4072 stcb = LIST_FIRST(&inp->sctp_asoc_list);
4078 SCTP_TCB_LOCK(stcb);
4079 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4080 sin_a = (struct sockaddr_in *)&net->ro._l_addr;
4081 if (sin_a->sin_family == AF_INET) {
4086 if ((!fnd) || (sin_a == NULL)) {
4088 SCTP_TCB_UNLOCK(stcb);
4091 sin->sin_addr = sctp_ipv4_source_address_selection(inp,
4092 stcb, (struct route *)&net->ro, net, 0);
4093 SCTP_TCB_UNLOCK(stcb);
4095 /* For the bound all case you get back 0 */
4097 sin->sin_addr.s_addr = 0;
4101 /* Take the first IPv4 address in the list */
4102 struct sctp_laddr *laddr;
4104 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4105 if (laddr->ifa->ifa_addr->sa_family == AF_INET) {
4106 struct sockaddr_in *sin_a;
4107 sin_a = (struct sockaddr_in *)laddr->ifa->ifa_addr;
4108 sin->sin_addr = sin_a->sin_addr;
4114 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4115 FREE(sin, M_SONAME);
4117 SCTP_INP_RUNLOCK(inp);
4121 SCTP_INP_RUNLOCK(inp);
4122 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4123 (*addr) = (struct sockaddr *)sin;
4129 sctp_peeraddr(netmsg_t msg)
4133 error = sctp_peeraddr_oncpu(msg->peeraddr.base.nm_so,
4134 msg->peeraddr.nm_nam);
4135 lwkt_replymsg(&msg->lmsg, error);
4139 sctp_peeraddr_oncpu(struct socket *so, struct sockaddr **addr)
4141 struct sockaddr_in *sin = (struct sockaddr_in *)*addr;
4142 struct sockaddr_in *sin_a;
4143 struct sctp_inpcb *inp;
4144 struct sctp_tcb *stcb;
4145 struct sctp_nets *net;
4149 /* Do the malloc first in case it blocks. */
4150 inp = (struct sctp_inpcb *)so->so_pcb;
4151 if ((inp == NULL) ||
4152 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
4153 /* UDP type and listeners will drop out here */
4158 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4159 /* XXX huh? why assign it above and then allocate it here? */
4160 MALLOC(sin, struct sockaddr_in *, sizeof *sin, M_SONAME, M_WAITOK |
4163 nam->m_len = sizeof(*sin);
4164 memset(sin, 0, sizeof(*sin));
4166 sin->sin_family = AF_INET;
4167 sin->sin_len = sizeof(*sin);
4169 /* We must recapture incase we blocked */
4170 inp = (struct sctp_inpcb *)so->so_pcb;
4172 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4173 FREE(sin, M_SONAME);
4178 SCTP_INP_RLOCK(inp);
4179 stcb = LIST_FIRST(&inp->sctp_asoc_list);
4181 SCTP_TCB_LOCK(stcb);
4182 SCTP_INP_RUNLOCK(inp);
4184 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4185 FREE(sin, M_SONAME);
4191 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4192 sin_a = (struct sockaddr_in *)&net->ro._l_addr;
4193 if (sin_a->sin_family == AF_INET) {
4195 sin->sin_port = stcb->rport;
4196 sin->sin_addr = sin_a->sin_addr;
4200 SCTP_TCB_UNLOCK(stcb);
4202 /* No IPv4 address */
4203 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4204 FREE(sin, M_SONAME);
4214 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4215 struct pr_usrreqs sctp_usrreqs = {
4216 .pru_abort = sctp_abort,
4217 .pru_accept = sctp_accept,
4218 .pru_attach = sctp_attach,
4219 .pru_bind = sctp_bind,
4220 .pru_connect = sctp_connect,
4221 .pru_connect2 = pr_generic_notsupp,
4222 .pru_control = in_control_dispatch,
4223 .pru_detach = sctp_detach,
4224 .pru_disconnect = sctp_disconnect,
4225 .pru_listen = sctp_listen,
4226 .pru_peeraddr = sctp_peeraddr,
4227 .pru_rcvd = sctp_usr_recvd,
4228 .pru_rcvoob = pr_generic_notsupp,
4229 .pru_send = sctp_send,
4230 .pru_sense = pru_sense_null,
4231 .pru_shutdown = sctp_shutdown,
4232 .pru_sockaddr = sctp_ingetaddr,
4233 .pru_sosend = sctp_sosend,
4234 .pru_soreceive = soreceive
4238 #if defined(__NetBSD__)
4240 sctp_usrreq(struct socket *so, int req, struct mbuf *m, struct mbuf *nam,
4241 struct mbuf *control, struct proc *p)
4246 sctp_usrreq(struct socket *so, int req, struct mbuf *m, struct mbuf *nam,
4247 struct mbuf *control)
4249 struct proc *p = curproc;
4254 family = so->so_proto->pr_domain->dom_family;
4256 if (req == PRU_CONTROL) {
4259 error = in_control(so, (long)m, (caddr_t)nam,
4260 (struct ifnet *)control
4261 #if defined(__NetBSD__)
4268 error = in6_control(so, (long)m, (caddr_t)nam,
4269 (struct ifnet *)control, p);
4273 error = EAFNOSUPPORT;
4278 if (req == PRU_PURGEIF) {
4281 ifn = (struct ifnet *)control;
4282 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
4283 if (ifa->ifa_addr->sa_family == family) {
4284 sctp_delete_ip_address(ifa);
4297 return (EAFNOSUPPORT);
4304 error = sctp_attach(so, family, p);
4307 error = sctp_detach(so);
4313 error = sctp_bind(so, nam, p);
4316 error = sctp_listen(so, p);
4322 error = sctp_connect(so, nam, p);
4324 case PRU_DISCONNECT:
4325 error = sctp_disconnect(so);
4331 error = sctp_accept(so, nam);
4334 error = sctp_shutdown(so);
4339 * For Open and Net BSD, this is real
4340 * ugly. The mbuf *nam that is passed
4341 * (by soreceive()) is the int flags c
4342 * ast as a (mbuf *) yuck!
4344 error = sctp_usr_recvd(so, (int)((long)nam));
4348 /* Flags are ignored */
4350 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
4351 kprintf("Send called on V4 side\n");
4355 struct sockaddr *addr;
4359 addr = mtod(nam, struct sockaddr *);
4361 error = sctp_send(so, 0, m, addr, control, p);
4365 error = sctp_abort(so);
4372 error = EAFNOSUPPORT;
4375 error = EAFNOSUPPORT;
4378 error = sctp_peeraddr(so, nam);
4381 error = sctp_ingetaddr(so, nam);
4393 /* #if defined(__NetBSD__) || defined(__OpenBSD__) */
4396 * Sysctl for sctp variables.
4399 sctp_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
4403 /* All sysctl names at this level are terminal. */
4409 case SCTPCTL_MAXDGRAM:
4410 return (sysctl_int(oldp, oldlenp, newp, newlen,
4412 case SCTPCTL_RECVSPACE:
4413 return (sysctl_int(oldp, oldlenp, newp, newlen,
4415 case SCTPCTL_AUTOASCONF:
4416 return (sysctl_int(oldp, oldlenp, newp, newlen,
4417 &sctp_auto_asconf));
4418 case SCTPCTL_ECN_ENABLE:
4419 return (sysctl_int(oldp, oldlenp, newp, newlen,
4421 case SCTPCTL_ECN_NONCE:
4422 return (sysctl_int(oldp, oldlenp, newp, newlen,
4424 case SCTPCTL_STRICT_SACK:
4425 return (sysctl_int(oldp, oldlenp, newp, newlen,
4426 &sctp_strict_sacks));
4427 case SCTPCTL_NOCSUM_LO:
4428 return (sysctl_int(oldp, oldlenp, newp, newlen,
4429 &sctp_no_csum_on_loopback));
4430 case SCTPCTL_STRICT_INIT:
4431 return (sysctl_int(oldp, oldlenp, newp, newlen,
4432 &sctp_strict_init));
4433 case SCTPCTL_PEER_CHK_OH:
4434 return (sysctl_int(oldp, oldlenp, newp, newlen,
4435 &sctp_peer_chunk_oh));
4436 case SCTPCTL_MAXBURST:
4437 return (sysctl_int(oldp, oldlenp, newp, newlen,
4438 &sctp_max_burst_default));
4439 case SCTPCTL_MAXCHUNKONQ:
4440 return (sysctl_int(oldp, oldlenp, newp, newlen,
4441 &sctp_max_chunks_on_queue));
4442 case SCTPCTL_DELAYED_SACK:
4443 return (sysctl_int(oldp, oldlenp, newp, newlen,
4444 &sctp_delayed_sack_time_default));
4445 case SCTPCTL_HB_INTERVAL:
4446 return (sysctl_int(oldp, oldlenp, newp, newlen,
4447 &sctp_heartbeat_interval_default));
4448 case SCTPCTL_PMTU_RAISE:
4449 return (sysctl_int(oldp, oldlenp, newp, newlen,
4450 &sctp_pmtu_raise_time_default));
4451 case SCTPCTL_SHUTDOWN_GUARD:
4452 return (sysctl_int(oldp, oldlenp, newp, newlen,
4453 &sctp_shutdown_guard_time_default));
4454 case SCTPCTL_SECRET_LIFETIME:
4455 return (sysctl_int(oldp, oldlenp, newp, newlen,
4456 &sctp_secret_lifetime_default));
4457 case SCTPCTL_RTO_MAX:
4458 return (sysctl_int(oldp, oldlenp, newp, newlen,
4459 &sctp_rto_max_default));
4460 case SCTPCTL_RTO_MIN:
4461 return (sysctl_int(oldp, oldlenp, newp, newlen,
4462 &sctp_rto_min_default));
4463 case SCTPCTL_RTO_INITIAL:
4464 return (sysctl_int(oldp, oldlenp, newp, newlen,
4465 &sctp_rto_initial_default));
4466 case SCTPCTL_INIT_RTO_MAX:
4467 return (sysctl_int(oldp, oldlenp, newp, newlen,
4468 &sctp_init_rto_max_default));
4469 case SCTPCTL_COOKIE_LIFE:
4470 return (sysctl_int(oldp, oldlenp, newp, newlen,
4471 &sctp_valid_cookie_life_default));
4472 case SCTPCTL_INIT_RTX_MAX:
4473 return (sysctl_int(oldp, oldlenp, newp, newlen,
4474 &sctp_init_rtx_max_default));
4475 case SCTPCTL_ASSOC_RTX_MAX:
4476 return (sysctl_int(oldp, oldlenp, newp, newlen,
4477 &sctp_assoc_rtx_max_default));
4478 case SCTPCTL_PATH_RTX_MAX:
4479 return (sysctl_int(oldp, oldlenp, newp, newlen,
4480 &sctp_path_rtx_max_default));
4481 case SCTPCTL_NR_OUTGOING_STREAMS:
4482 return (sysctl_int(oldp, oldlenp, newp, newlen,
4483 &sctp_nr_outgoing_streams_default));
4486 return (sysctl_int(oldp, oldlenp, newp, newlen,
4490 return (ENOPROTOOPT);
4497 * Sysctl for sctp variables.
4499 SYSCTL_SETUP(sysctl_net_inet_sctp_setup, "sysctl net.inet.sctp subtree setup")
4502 sysctl_createv(clog, 0, NULL, NULL,
4504 CTLTYPE_NODE, "net", NULL,
4507 sysctl_createv(clog, 0, NULL, NULL,
4509 CTLTYPE_NODE, "inet", NULL,
4511 CTL_NET, PF_INET, CTL_EOL);
4512 sysctl_createv(clog, 0, NULL, NULL,
4514 CTLTYPE_NODE, "sctp",
4515 SYSCTL_DESCR("sctp related settings"),
4517 CTL_NET, PF_INET, IPPROTO_SCTP, CTL_EOL);
4519 sysctl_createv(clog, 0, NULL, NULL,
4520 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4521 CTLTYPE_INT, "maxdgram",
4522 SYSCTL_DESCR("Maximum outgoing SCTP buffer size"),
4523 NULL, 0, &sctp_sendspace, 0,
4524 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_MAXDGRAM,
4527 sysctl_createv(clog, 0, NULL, NULL,
4528 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4529 CTLTYPE_INT, "recvspace",
4530 SYSCTL_DESCR("Maximum incoming SCTP buffer size"),
4531 NULL, 0, &sctp_recvspace, 0,
4532 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_RECVSPACE,
4535 sysctl_createv(clog, 0, NULL, NULL,
4536 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4537 CTLTYPE_INT, "autoasconf",
4538 SYSCTL_DESCR("Enable SCTP Auto-ASCONF"),
4539 NULL, 0, &sctp_auto_asconf, 0,
4540 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_AUTOASCONF,
4543 sysctl_createv(clog, 0, NULL, NULL,
4544 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4545 CTLTYPE_INT, "ecn_enable",
4546 SYSCTL_DESCR("Enable SCTP ECN"),
4547 NULL, 0, &sctp_ecn, 0,
4548 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_ECN_ENABLE,
4551 sysctl_createv(clog, 0, NULL, NULL,
4552 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4553 CTLTYPE_INT, "ecn_nonce",
4554 SYSCTL_DESCR("Enable SCTP ECN Nonce"),
4555 NULL, 0, &sctp_ecn_nonce, 0,
4556 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_ECN_NONCE,
4559 sysctl_createv(clog, 0, NULL, NULL,
4560 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4561 CTLTYPE_INT, "strict_sack",
4562 SYSCTL_DESCR("Enable SCTP Strict SACK checking"),
4563 NULL, 0, &sctp_strict_sacks, 0,
4564 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_STRICT_SACK,
4567 sysctl_createv(clog, 0, NULL, NULL,
4568 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4569 CTLTYPE_INT, "loopback_nocsum",
4570 SYSCTL_DESCR("Enable NO Csum on packets sent on loopback"),
4571 NULL, 0, &sctp_no_csum_on_loopback, 0,
4572 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_NOCSUM_LO,
4575 sysctl_createv(clog, 0, NULL, NULL,
4576 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4577 CTLTYPE_INT, "strict_init",
4578 SYSCTL_DESCR("Enable strict INIT/INIT-ACK singleton enforcement"),
4579 NULL, 0, &sctp_strict_init, 0,
4580 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_STRICT_INIT,
4583 sysctl_createv(clog, 0, NULL, NULL,
4584 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4585 CTLTYPE_INT, "peer_chkoh",
4586 SYSCTL_DESCR("Amount to debit peers rwnd per chunk sent"),
4587 NULL, 0, &sctp_peer_chunk_oh, 0,
4588 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_PEER_CHK_OH,
4591 sysctl_createv(clog, 0, NULL, NULL,
4592 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4593 CTLTYPE_INT, "maxburst",
4594 SYSCTL_DESCR("Default max burst for sctp endpoints"),
4595 NULL, 0, &sctp_max_burst_default, 0,
4596 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_MAXBURST,
4599 sysctl_createv(clog, 0, NULL, NULL,
4600 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4601 CTLTYPE_INT, "maxchunks",
4602 SYSCTL_DESCR("Default max chunks on queue per asoc"),
4603 NULL, 0, &sctp_max_chunks_on_queue, 0,
4604 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_MAXCHUNKONQ,