1 /* $KAME: sctp_usrreq.c,v 1.47 2005/03/06 16:04:18 itojun Exp $ */
2 /* $DragonFly: src/sys/netinet/sctp_usrreq.c,v 1.14 2008/04/20 13:44:25 swildner Exp $ */
5 * Copyright (c) 2001, 2002, 2003, 2004 Cisco Systems, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Cisco Systems, Inc.
19 * 4. Neither the name of the project nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY CISCO SYSTEMS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL CISCO SYSTEMS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #if !(defined(__OpenBSD__) || defined(__APPLE__))
36 #include "opt_ipsec.h"
38 #if defined(__FreeBSD__) || defined(__DragonFly__)
39 #include "opt_inet6.h"
42 #if defined(__NetBSD__)
48 #elif !defined(__OpenBSD__)
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/kernel.h>
55 #include <sys/malloc.h>
57 #include <sys/domain.h>
60 #include <sys/protosw.h>
61 #include <sys/socket.h>
62 #include <sys/socketvar.h>
63 #include <sys/sysctl.h>
64 #include <sys/syslog.h>
65 #include <sys/thread2.h>
67 #include <net/if_types.h>
68 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
69 #include <net/if_var.h>
71 #include <net/route.h>
72 #include <netinet/in.h>
73 #include <netinet/in_systm.h>
74 #include <netinet/ip.h>
75 #include <netinet/ip6.h>
76 #include <netinet/in_pcb.h>
77 #include <netinet/in_var.h>
78 #include <netinet/ip_var.h>
79 #include <netinet6/ip6_var.h>
80 #include <netinet6/in6_var.h>
82 #include <netinet/ip_icmp.h>
83 #include <netinet/icmp_var.h>
84 #include <netinet/sctp_pcb.h>
85 #include <netinet/sctp_header.h>
86 #include <netinet/sctp_var.h>
87 #include <netinet/sctp_output.h>
88 #include <netinet/sctp_uio.h>
89 #include <netinet/sctp_asconf.h>
90 #include <netinet/sctputil.h>
91 #include <netinet/sctp_indata.h>
92 #include <netinet/sctp_asconf.h>
95 #include <netinet6/ipsec.h>
96 #include <netproto/key/key.h>
102 #include <net/net_osdep.h>
104 #if defined(HAVE_NRL_INPCB) || defined(__FreeBSD__) || defined(__DragonFly__)
109 #define sotoin6pcb sotoinpcb
114 extern u_int32_t sctp_debug_on;
115 #endif /* SCTP_DEBUG */
118 * sysctl tunable variables
120 int sctp_auto_asconf = SCTP_DEFAULT_AUTO_ASCONF;
121 int sctp_max_burst_default = SCTP_DEF_MAX_BURST;
122 int sctp_peer_chunk_oh = sizeof(struct mbuf);
123 int sctp_strict_init = 1;
124 int sctp_no_csum_on_loopback = 1;
125 unsigned int sctp_max_chunks_on_queue = SCTP_ASOC_MAX_CHUNKS_ON_QUEUE;
126 int sctp_sendspace = (128 * 1024);
127 int sctp_recvspace = 128 * (1024 +
129 sizeof(struct sockaddr_in6)
131 sizeof(struct sockaddr_in)
134 int sctp_strict_sacks = 0;
136 int sctp_ecn_nonce = 0;
138 unsigned int sctp_delayed_sack_time_default = SCTP_RECV_MSEC;
139 unsigned int sctp_heartbeat_interval_default = SCTP_HB_DEFAULT_MSEC;
140 unsigned int sctp_pmtu_raise_time_default = SCTP_DEF_PMTU_RAISE_SEC;
141 unsigned int sctp_shutdown_guard_time_default = SCTP_DEF_MAX_SHUTDOWN_SEC;
142 unsigned int sctp_secret_lifetime_default = SCTP_DEFAULT_SECRET_LIFE_SEC;
143 unsigned int sctp_rto_max_default = SCTP_RTO_UPPER_BOUND;
144 unsigned int sctp_rto_min_default = SCTP_RTO_LOWER_BOUND;
145 unsigned int sctp_rto_initial_default = SCTP_RTO_INITIAL;
146 unsigned int sctp_init_rto_max_default = SCTP_RTO_UPPER_BOUND;
147 unsigned int sctp_valid_cookie_life_default = SCTP_DEFAULT_COOKIE_LIFE;
148 unsigned int sctp_init_rtx_max_default = SCTP_DEF_MAX_INIT;
149 unsigned int sctp_assoc_rtx_max_default = SCTP_DEF_MAX_SEND;
150 unsigned int sctp_path_rtx_max_default = SCTP_DEF_MAX_SEND/2;
151 unsigned int sctp_nr_outgoing_streams_default = SCTP_OSTREAM_INITIAL;
157 #define nmbclusters nmbclust
159 /* Init the SCTP pcb in sctp_pcb.c */
165 if (nmbclusters > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE)
166 sctp_max_chunks_on_queue = nmbclusters;
168 /* if (nmbclust > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE)
169 sctp_max_chunks_on_queue = nmbclust; FIX ME */
170 sctp_max_chunks_on_queue = nmbclust * 2;
173 * Allow a user to take no more than 1/2 the number of clusters
174 * or the SB_MAX whichever is smaller for the send window.
176 sb_max_adj = (u_long)((u_quad_t)(SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES));
177 sctp_sendspace = min((min(SB_MAX, sb_max_adj)),
179 ((nmbclusters/2) * SCTP_DEFAULT_MAXSEGMENT));
181 ((nmbclust/2) * SCTP_DEFAULT_MAXSEGMENT));
184 * Now for the recv window, should we take the same amount?
185 * or should I do 1/2 the SB_MAX instead in the SB_MAX min above.
186 * For now I will just copy.
188 sctp_recvspace = sctp_sendspace;
196 ip_2_ip6_hdr(struct ip6_hdr *ip6, struct ip *ip)
198 bzero(ip6, sizeof(*ip6));
200 ip6->ip6_vfc = IPV6_VERSION;
201 ip6->ip6_plen = ip->ip_len;
202 ip6->ip6_nxt = ip->ip_p;
203 ip6->ip6_hlim = ip->ip_ttl;
204 ip6->ip6_src.s6_addr32[2] = ip6->ip6_dst.s6_addr32[2] =
206 ip6->ip6_src.s6_addr32[3] = ip->ip_src.s_addr;
207 ip6->ip6_dst.s6_addr32[3] = ip->ip_dst.s_addr;
212 sctp_split_chunks(struct sctp_association *asoc,
213 struct sctp_stream_out *strm,
214 struct sctp_tmit_chunk *chk)
216 struct sctp_tmit_chunk *new_chk;
218 /* First we need a chunk */
219 new_chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
220 if (new_chk == NULL) {
221 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
224 sctppcbinfo.ipi_count_chunk++;
225 sctppcbinfo.ipi_gencnt_chunk++;
229 new_chk->data = m_split(chk->data, (chk->send_size>>1), MB_DONTWAIT);
230 if (new_chk->data == NULL) {
232 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
233 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, new_chk);
234 sctppcbinfo.ipi_count_chunk--;
235 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
236 panic("Chunk count is negative");
238 sctppcbinfo.ipi_gencnt_chunk++;
242 /* Data is now split adjust sizes */
243 chk->send_size >>= 1;
244 new_chk->send_size >>= 1;
246 chk->book_size >>= 1;
247 new_chk->book_size >>= 1;
249 /* now adjust the marks */
250 chk->rec.data.rcv_flags |= SCTP_DATA_FIRST_FRAG;
251 chk->rec.data.rcv_flags &= ~SCTP_DATA_LAST_FRAG;
253 new_chk->rec.data.rcv_flags &= ~SCTP_DATA_FIRST_FRAG;
254 new_chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
256 /* Increase ref count if dest is set */
258 new_chk->whoTo->ref_count++;
260 /* now drop it on the end of the list*/
261 asoc->stream_queue_cnt++;
262 TAILQ_INSERT_AFTER(&strm->outqueue, chk, new_chk, sctp_next);
266 sctp_notify_mbuf(struct sctp_inpcb *inp,
267 struct sctp_tcb *stcb,
268 struct sctp_nets *net,
278 if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
279 (ip == NULL) || (sh == NULL)) {
281 SCTP_TCB_UNLOCK(stcb);
284 /* First job is to verify the vtag matches what I would send */
285 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
286 SCTP_TCB_UNLOCK(stcb);
289 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) -
291 if (icmph->icmp_type != ICMP_UNREACH) {
292 /* We only care about unreachable */
293 SCTP_TCB_UNLOCK(stcb);
296 if (icmph->icmp_code != ICMP_UNREACH_NEEDFRAG) {
297 /* not a unreachable message due to frag. */
298 SCTP_TCB_UNLOCK(stcb);
302 nxtsz = ntohs(icmph->icmp_seq);
305 * old type router that does not tell us what the next size
306 * mtu is. Rats we will have to guess (in a educated fashion
309 nxtsz = find_next_best_mtu(totsz);
312 /* Stop any PMTU timer */
313 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, NULL);
315 /* Adjust destination size limit */
316 if (net->mtu > nxtsz) {
319 /* now what about the ep? */
320 if (stcb->asoc.smallest_mtu > nxtsz) {
321 struct sctp_tmit_chunk *chk, *nchk;
322 struct sctp_stream_out *strm;
323 /* Adjust that too */
324 stcb->asoc.smallest_mtu = nxtsz;
325 /* now off to subtract IP_DF flag if needed */
327 TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) {
328 if ((chk->send_size+IP_HDR_SIZE) > nxtsz) {
329 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
332 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
333 if ((chk->send_size+IP_HDR_SIZE) > nxtsz) {
335 * For this guy we also mark for immediate
336 * resend since we sent to big of chunk
338 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
339 if (chk->sent != SCTP_DATAGRAM_RESEND) {
340 stcb->asoc.sent_queue_retran_cnt++;
342 chk->sent = SCTP_DATAGRAM_RESEND;
343 chk->rec.data.doing_fast_retransmit = 0;
345 /* Clear any time so NO RTT is being done */
347 stcb->asoc.total_flight -= chk->book_size;
348 if (stcb->asoc.total_flight < 0) {
349 stcb->asoc.total_flight = 0;
351 stcb->asoc.total_flight_count--;
352 if (stcb->asoc.total_flight_count < 0) {
353 stcb->asoc.total_flight_count = 0;
355 net->flight_size -= chk->book_size;
356 if (net->flight_size < 0) {
357 net->flight_size = 0;
361 TAILQ_FOREACH(strm, &stcb->asoc.out_wheel, next_spoke) {
362 chk = TAILQ_FIRST(&strm->outqueue);
364 nchk = TAILQ_NEXT(chk, sctp_next);
365 if ((chk->send_size+SCTP_MED_OVERHEAD) > nxtsz) {
366 sctp_split_chunks(&stcb->asoc, strm, chk);
372 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, NULL);
373 SCTP_TCB_UNLOCK(stcb);
378 sctp_notify(struct sctp_inpcb *inp,
382 struct sctp_tcb *stcb,
383 struct sctp_nets *net)
386 if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
387 (sh == NULL) || (to == NULL)) {
389 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
390 kprintf("sctp-notify, bad call\n");
392 #endif /* SCTP_DEBUG */
395 /* First job is to verify the vtag matches what I would send */
396 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
400 /* FIX ME FIX ME PROTOPT i.e. no SCTP should ALWAYS be an ABORT */
402 if ((error == EHOSTUNREACH) || /* Host is not reachable */
403 (error == EHOSTDOWN) || /* Host is down */
404 (error == ECONNREFUSED) || /* Host refused the connection, (not an abort?) */
405 (error == ENOPROTOOPT) /* SCTP is not present on host */
408 * Hmm reachablity problems we must examine closely.
409 * If its not reachable, we may have lost a network.
410 * Or if there is NO protocol at the other end named SCTP.
411 * well we consider it a OOTB abort.
413 if ((error == EHOSTUNREACH) || (error == EHOSTDOWN)) {
414 if (net->dest_state & SCTP_ADDR_REACHABLE) {
415 /* Ok that destination is NOT reachable */
416 net->dest_state &= ~SCTP_ADDR_REACHABLE;
417 net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
418 net->error_count = net->failure_threshold + 1;
419 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
420 stcb, SCTP_FAILED_THRESHOLD,
424 SCTP_TCB_UNLOCK(stcb);
427 * Here the peer is either playing tricks on us,
428 * including an address that belongs to someone who
429 * does not support SCTP OR was a userland
430 * implementation that shutdown and now is dead. In
431 * either case treat it like a OOTB abort with no TCB
433 sctp_abort_notification(stcb, SCTP_PEER_FAULTY);
434 sctp_free_assoc(inp, stcb);
435 /* no need to unlock here, since the TCB is gone */
438 /* Send all others to the app */
439 if (inp->sctp_socket) {
440 SOCK_LOCK(inp->sctp_socket);
441 inp->sctp_socket->so_error = error;
442 sctp_sowwakeup(inp, inp->sctp_socket);
443 SOCK_UNLOCK(inp->sctp_socket);
446 SCTP_TCB_UNLOCK(stcb);
450 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
455 sctp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
461 if (sa->sa_family != AF_INET ||
462 ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) {
463 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
470 if (PRC_IS_REDIRECT(cmd)) {
472 } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) {
473 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
480 struct sctp_inpcb *inp;
481 struct sctp_tcb *stcb;
482 struct sctp_nets *net;
483 struct sockaddr_in to, from;
485 sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2));
486 bzero(&to, sizeof(to));
487 bzero(&from, sizeof(from));
488 from.sin_family = to.sin_family = AF_INET;
489 from.sin_len = to.sin_len = sizeof(to);
490 from.sin_port = sh->src_port;
491 from.sin_addr = ip->ip_src;
492 to.sin_port = sh->dest_port;
493 to.sin_addr = ip->ip_dst;
496 * 'to' holds the dest of the packet that failed to be sent.
497 * 'from' holds our local endpoint address.
498 * Thus we reverse the to and the from in the lookup.
501 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&from,
502 (struct sockaddr *)&to,
504 if (stcb != NULL && inp && (inp->sctp_socket != NULL)) {
505 if (cmd != PRC_MSGSIZE) {
507 if (cmd == PRC_HOSTDEAD) {
510 cm = inetctlerrmap[cmd];
512 sctp_notify(inp, cm, sh,
513 (struct sockaddr *)&to, stcb,
516 /* handle possible ICMP size messages */
517 sctp_notify_mbuf(inp, stcb, net, ip, sh);
520 #if (defined(__FreeBSD__) && __FreeBSD_version < 500000) || defined(__DragonFly__)
521 /* XXX must be fixed for 5.x and higher, leave for 4.x */
522 if (PRC_IS_REDIRECT(cmd) && inp) {
523 in_rtchange((struct inpcb *)inp,
527 if ((stcb == NULL) && (inp != NULL)) {
528 /* reduce ref-count */
530 SCTP_INP_DECR_REF(inp);
531 SCTP_INP_WUNLOCK(inp);
537 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
544 #if defined(__FreeBSD__) || defined(__DragonFly__)
546 sctp_getcred(SYSCTL_HANDLER_ARGS)
548 struct sockaddr_in addrs[2];
549 struct sctp_inpcb *inp;
550 struct sctp_nets *net;
551 struct sctp_tcb *stcb;
554 #if __FreeBSD_version >= 500000 || defined(__DragonFly__)
555 error = priv_check(req->td, PRIV_ROOT);
557 error = suser(req->p);
561 error = SYSCTL_IN(req, addrs, sizeof(addrs));
566 stcb = sctp_findassociation_addr_sa(sintosa(&addrs[0]),
569 if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) {
570 if ((inp != NULL) && (stcb == NULL)) {
571 /* reduce ref-count */
573 SCTP_INP_DECR_REF(inp);
574 SCTP_INP_WUNLOCK(inp);
579 error = SYSCTL_OUT(req, inp->sctp_socket->so_cred, sizeof(struct ucred));
580 SCTP_TCB_UNLOCK(stcb);
586 SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE|CTLFLAG_RW,
587 0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection");
588 #endif /* #if defined(__FreeBSD__) || defined(__DragonFly__) */
593 #if defined(__FreeBSD__) || defined (__APPLE__) || defined(__DragonFly__)
595 SYSCTL_DECL(_net_inet);
597 SYSCTL_NODE(_net_inet, OID_AUTO, sctp, CTLFLAG_RD, 0,
600 SYSCTL_INT(_net_inet_sctp, OID_AUTO, maxdgram, CTLFLAG_RW,
601 &sctp_sendspace, 0, "Maximum outgoing SCTP buffer size");
603 SYSCTL_INT(_net_inet_sctp, OID_AUTO, recvspace, CTLFLAG_RW,
604 &sctp_recvspace, 0, "Maximum incoming SCTP buffer size");
606 SYSCTL_INT(_net_inet_sctp, OID_AUTO, auto_asconf, CTLFLAG_RW,
607 &sctp_auto_asconf, 0, "Enable SCTP Auto-ASCONF");
609 SYSCTL_INT(_net_inet_sctp, OID_AUTO, ecn_enable, CTLFLAG_RW,
610 &sctp_ecn, 0, "Enable SCTP ECN");
612 SYSCTL_INT(_net_inet_sctp, OID_AUTO, ecn_nonce, CTLFLAG_RW,
613 &sctp_ecn_nonce, 0, "Enable SCTP ECN Nonce");
615 SYSCTL_INT(_net_inet_sctp, OID_AUTO, strict_sacks, CTLFLAG_RW,
616 &sctp_strict_sacks, 0, "Enable SCTP Strict SACK checking");
618 SYSCTL_INT(_net_inet_sctp, OID_AUTO, loopback_nocsum, CTLFLAG_RW,
619 &sctp_no_csum_on_loopback, 0,
620 "Enable NO Csum on packets sent on loopback");
622 SYSCTL_INT(_net_inet_sctp, OID_AUTO, strict_init, CTLFLAG_RW,
623 &sctp_strict_init, 0,
624 "Enable strict INIT/INIT-ACK singleton enforcement");
626 SYSCTL_INT(_net_inet_sctp, OID_AUTO, peer_chkoh, CTLFLAG_RW,
627 &sctp_peer_chunk_oh, 0,
628 "Amount to debit peers rwnd per chunk sent");
630 SYSCTL_INT(_net_inet_sctp, OID_AUTO, maxburst, CTLFLAG_RW,
631 &sctp_max_burst_default, 0,
632 "Default max burst for sctp endpoints");
634 SYSCTL_INT(_net_inet_sctp, OID_AUTO, maxchunks, CTLFLAG_RW,
635 &sctp_max_chunks_on_queue, 0,
636 "Default max chunks on queue per asoc");
638 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, delayed_sack_time, CTLFLAG_RW,
639 &sctp_delayed_sack_time_default, 0,
640 "Default delayed SACK timer in msec");
642 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, heartbeat_interval, CTLFLAG_RW,
643 &sctp_heartbeat_interval_default, 0,
644 "Default heartbeat interval in msec");
646 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, pmtu_raise_time, CTLFLAG_RW,
647 &sctp_pmtu_raise_time_default, 0,
648 "Default PMTU raise timer in sec");
650 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, shutdown_guard_time, CTLFLAG_RW,
651 &sctp_shutdown_guard_time_default, 0,
652 "Default shutdown guard timer in sec");
654 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, secret_lifetime, CTLFLAG_RW,
655 &sctp_secret_lifetime_default, 0,
656 "Default secret lifetime in sec");
658 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, rto_max, CTLFLAG_RW,
659 &sctp_rto_max_default, 0,
660 "Default maximum retransmission timeout in msec");
662 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, rto_min, CTLFLAG_RW,
663 &sctp_rto_min_default, 0,
664 "Default minimum retransmission timeout in msec");
666 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, rto_initial, CTLFLAG_RW,
667 &sctp_rto_initial_default, 0,
668 "Default initial retransmission timeout in msec");
670 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, init_rto_max, CTLFLAG_RW,
671 &sctp_init_rto_max_default, 0,
672 "Default maximum retransmission timeout during association setup in msec");
674 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, valid_cookie_life, CTLFLAG_RW,
675 &sctp_valid_cookie_life_default, 0,
676 "Default cookie lifetime in sec");
678 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, init_rtx_max, CTLFLAG_RW,
679 &sctp_init_rtx_max_default, 0,
680 "Default maximum number of retransmission for INIT chunks");
682 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, assoc_rtx_max, CTLFLAG_RW,
683 &sctp_assoc_rtx_max_default, 0,
684 "Default maximum number of retransmissions per association");
686 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, path_rtx_max, CTLFLAG_RW,
687 &sctp_path_rtx_max_default, 0,
688 "Default maximum of retransmissions per path");
690 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, nr_outgoing_streams, CTLFLAG_RW,
691 &sctp_nr_outgoing_streams_default, 0,
692 "Default number of outgoing streams");
695 SYSCTL_INT(_net_inet_sctp, OID_AUTO, debug, CTLFLAG_RW,
696 &sctp_debug_on, 0, "Configure debug output");
697 #endif /* SCTP_DEBUG */
701 * NOTE: (so) is referenced from soabort*() and netmsg_pru_abort()
702 * will sofree() it when we return.
705 sctp_abort(struct socket *so)
707 struct sctp_inpcb *inp;
710 inp = (struct sctp_inpcb *)so->so_pcb;
712 sctp_inpcb_free(inp, 1);
721 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
722 sctp_attach(struct socket *so, int proto, struct thread *p)
723 #elif defined(__DragonFly__)
724 sctp_attach(struct socket *so, int proto, struct pru_attach_info *ai)
726 sctp_attach(struct socket *so, int proto, struct proc *p)
729 struct sctp_inpcb *inp;
730 struct inpcb *ip_inp;
734 inp = (struct sctp_inpcb *)so->so_pcb;
739 error = soreserve(so, sctp_sendspace, sctp_recvspace, NULL);
744 error = sctp_inpcb_alloc(so);
749 inp = (struct sctp_inpcb *)so->so_pcb;
752 inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6; /* I'm not v6! */
753 ip_inp = &inp->ip_inp.inp;
754 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
755 ip_inp->inp_vflag |= INP_IPV4;
756 ip_inp->inp_ip_ttl = ip_defttl;
758 inp->inp_vflag |= INP_IPV4;
759 inp->inp_ip_ttl = ip_defttl;
763 #if !(defined(__OpenBSD__) || defined(__APPLE__))
764 error = ipsec_init_policy(so, &ip_inp->inp_sp);
766 sctp_inpcb_free(inp, 1);
771 SCTP_INP_WUNLOCK(inp);
772 #if defined(__NetBSD__)
773 so->so_send = sctp_sosend;
780 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
781 sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p)
783 #elif defined(__FreeBSD__) || defined(__APPLE__)
784 sctp_bind(struct socket *so, struct sockaddr *addr, struct proc *p)
787 sctp_bind(struct socket *so, struct mbuf *nam, struct proc *p)
789 struct sockaddr *addr = nam ? mtod(nam, struct sockaddr *) : NULL;
791 struct sctp_inpcb *inp;
795 if (addr && addr->sa_family != AF_INET)
796 /* must be a v4 address! */
800 inp = (struct sctp_inpcb *)so->so_pcb;
805 error = sctp_inpcb_bind(so, addr, p);
812 sctp_detach(struct socket *so)
814 struct sctp_inpcb *inp;
816 inp = (struct sctp_inpcb *)so->so_pcb;
820 if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) ||
821 (so->so_rcv.ssb_cc > 0)) {
822 sctp_inpcb_free(inp, 1);
824 sctp_inpcb_free(inp, 0);
831 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
832 sctp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
833 struct mbuf *control, struct thread *p);
835 sctp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
836 struct mbuf *control, struct proc *p);
840 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
841 sctp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
842 struct mbuf *control, struct thread *p)
845 sctp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
846 struct mbuf *control, struct proc *p)
849 struct sctp_inpcb *inp;
851 inp = (struct sctp_inpcb *)so->so_pcb;
854 sctp_m_freem(control);
860 /* Got to have an to address if we are NOT a connected socket */
861 if ((addr == NULL) &&
862 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
863 (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE))
866 } else if (addr == NULL) {
867 error = EDESTADDRREQ;
870 sctp_m_freem(control);
876 if (addr->sa_family != AF_INET) {
877 /* must be a v4 address! */
880 sctp_m_freem(control);
883 error = EDESTADDRREQ;
888 /* now what about control */
891 kprintf("huh? control set?\n");
892 sctp_m_freem(inp->control);
895 inp->control = control;
897 /* add it in possibly */
898 if ((inp->pkt) && (inp->pkt->m_flags & M_PKTHDR)) {
904 for (x=m;x;x = x->m_next) {
907 inp->pkt->m_pkthdr.len += c_len;
911 inp->pkt_last->m_next = m;
914 inp->pkt_last = inp->pkt = m;
917 #if defined (__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
918 /* FreeBSD uses a flag passed */
919 ((flags & PRUS_MORETOCOME) == 0)
920 #elif defined( __NetBSD__)
921 /* NetBSD uses the so_state field */
922 ((so->so_state & SS_MORETOCOME) == 0)
924 1 /* Open BSD does not have any "more to come" indication */
928 * note with the current version this code will only be used
929 * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for
930 * re-defining sosend to use the sctp_sosend. One can
931 * optionally switch back to this code (by changing back the
932 * definitions) but this is not advisable.
935 ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags);
945 sctp_disconnect(struct socket *so)
947 struct sctp_inpcb *inp;
950 inp = (struct sctp_inpcb *)so->so_pcb;
956 if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
957 if (LIST_EMPTY(&inp->sctp_asoc_list)) {
960 SCTP_INP_RUNLOCK(inp);
963 int some_on_streamwheel = 0;
964 struct sctp_association *asoc;
965 struct sctp_tcb *stcb;
967 stcb = LIST_FIRST(&inp->sctp_asoc_list);
970 SCTP_INP_RUNLOCK(inp);
975 if (((so->so_options & SO_LINGER) &&
976 (so->so_linger == 0)) ||
977 (so->so_rcv.ssb_cc > 0)) {
978 if (SCTP_GET_STATE(asoc) !=
979 SCTP_STATE_COOKIE_WAIT) {
980 /* Left with Data unread */
983 MGET(err, MB_DONTWAIT, MT_DATA);
985 /* Fill in the user initiated abort */
986 struct sctp_paramhdr *ph;
987 ph = mtod(err, struct sctp_paramhdr *);
988 err->m_len = sizeof(struct sctp_paramhdr);
989 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
990 ph->param_length = htons(err->m_len);
992 sctp_send_abort_tcb(stcb, err);
994 SCTP_INP_RUNLOCK(inp);
995 sctp_free_assoc(inp, stcb);
996 /* No unlock tcb assoc is gone */
1000 if (!TAILQ_EMPTY(&asoc->out_wheel)) {
1001 /* Check to see if some data queued */
1002 struct sctp_stream_out *outs;
1003 TAILQ_FOREACH(outs, &asoc->out_wheel,
1005 if (!TAILQ_EMPTY(&outs->outqueue)) {
1006 some_on_streamwheel = 1;
1012 if (TAILQ_EMPTY(&asoc->send_queue) &&
1013 TAILQ_EMPTY(&asoc->sent_queue) &&
1014 (some_on_streamwheel == 0)) {
1015 /* there is nothing queued to send, so done */
1016 if ((SCTP_GET_STATE(asoc) !=
1017 SCTP_STATE_SHUTDOWN_SENT) &&
1018 (SCTP_GET_STATE(asoc) !=
1019 SCTP_STATE_SHUTDOWN_ACK_SENT)) {
1020 /* only send SHUTDOWN 1st time thru */
1022 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
1023 kprintf("%s:%d sends a shutdown\n",
1029 sctp_send_shutdown(stcb,
1030 stcb->asoc.primary_destination);
1031 sctp_chunk_output(stcb->sctp_ep, stcb, 1);
1032 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
1033 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1034 stcb->sctp_ep, stcb,
1035 asoc->primary_destination);
1036 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1037 stcb->sctp_ep, stcb,
1038 asoc->primary_destination);
1042 * we still got (or just got) data to send,
1043 * so set SHUTDOWN_PENDING
1046 * XXX sockets draft says that MSG_EOF should
1047 * be sent with no data.
1048 * currently, we will allow user data to be
1049 * sent first and move to SHUTDOWN-PENDING
1051 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
1053 SCTP_TCB_UNLOCK(stcb);
1054 SCTP_INP_RUNLOCK(inp);
1060 /* UDP model does not support this */
1061 SCTP_INP_RUNLOCK(inp);
1068 sctp_shutdown(struct socket *so)
1070 struct sctp_inpcb *inp;
1073 inp = (struct sctp_inpcb *)so->so_pcb;
1078 SCTP_INP_RLOCK(inp);
1079 /* For UDP model this is a invalid call */
1080 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
1081 /* Restore the flags that the soshutdown took away. */
1082 #if defined(__FreeBSD__) && __FreeBSD_version >= 502115
1083 so->so_rcv.sb_state &= ~SBS_CANTRCVMORE;
1085 soclrstate(so, SS_CANTRCVMORE);
1087 /* This proc will wakeup for read and do nothing (I hope) */
1089 SCTP_INP_RUNLOCK(inp);
1090 return (EOPNOTSUPP);
1093 * Ok if we reach here its the TCP model and it is either a SHUT_WR
1094 * or SHUT_RDWR. This means we put the shutdown flag against it.
1097 int some_on_streamwheel = 0;
1098 struct sctp_tcb *stcb;
1099 struct sctp_association *asoc;
1102 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1105 * Ok we hit the case that the shutdown call was made
1106 * after an abort or something. Nothing to do now.
1111 SCTP_TCB_LOCK(stcb);
1114 if (!TAILQ_EMPTY(&asoc->out_wheel)) {
1115 /* Check to see if some data queued */
1116 struct sctp_stream_out *outs;
1117 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
1118 if (!TAILQ_EMPTY(&outs->outqueue)) {
1119 some_on_streamwheel = 1;
1124 if (TAILQ_EMPTY(&asoc->send_queue) &&
1125 TAILQ_EMPTY(&asoc->sent_queue) &&
1126 (some_on_streamwheel == 0)) {
1127 /* there is nothing queued to send, so I'm done... */
1128 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1129 /* only send SHUTDOWN the first time through */
1131 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
1132 kprintf("%s:%d sends a shutdown\n",
1138 sctp_send_shutdown(stcb,
1139 stcb->asoc.primary_destination);
1140 sctp_chunk_output(stcb->sctp_ep, stcb, 1);
1141 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
1142 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1143 stcb->sctp_ep, stcb,
1144 asoc->primary_destination);
1145 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1146 stcb->sctp_ep, stcb,
1147 asoc->primary_destination);
1151 * we still got (or just got) data to send, so
1152 * set SHUTDOWN_PENDING
1154 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
1156 SCTP_TCB_UNLOCK(stcb);
1158 SCTP_INP_RUNLOCK(inp);
1164 * copies a "user" presentable address and removes embedded scope, etc.
1165 * returns 0 on success, 1 on error
1168 sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa)
1170 struct sockaddr_in6 lsa6;
1171 sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa,
1173 memcpy(ss, sa, sa->sa_len);
1178 #if defined(__NetBSD__) || defined(__OpenBSD__)
1180 * On NetBSD and OpenBSD in6_sin_2_v4mapsin6() not used and not exported,
1181 * so we have to export it here.
1183 void in6_sin_2_v4mapsin6(struct sockaddr_in *sin, struct sockaddr_in6 *sin6);
1187 sctp_fill_up_addresses(struct sctp_inpcb *inp,
1188 struct sctp_tcb *stcb,
1190 struct sockaddr_storage *sas)
1193 int loopback_scope, ipv4_local_scope, local_scope, site_scope, actual;
1194 int ipv4_addr_legal, ipv6_addr_legal;
1200 /* Turn on all the appropriate scope */
1201 loopback_scope = stcb->asoc.loopback_scope;
1202 ipv4_local_scope = stcb->asoc.ipv4_local_scope;
1203 local_scope = stcb->asoc.local_scope;
1204 site_scope = stcb->asoc.site_scope;
1206 /* Turn on ALL scope, since we look at the EP */
1207 loopback_scope = ipv4_local_scope = local_scope =
1210 ipv4_addr_legal = ipv6_addr_legal = 0;
1211 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1212 ipv6_addr_legal = 1;
1214 #if defined(__OpenBSD__)
1215 (0) /* we always do dual bind */
1216 #elif defined (__NetBSD__)
1217 (((struct in6pcb *)inp)->in6p_flags & IN6P_IPV6_V6ONLY)
1219 (((struct in6pcb *)inp)->inp_flags & IN6P_IPV6_V6ONLY)
1222 ipv4_addr_legal = 1;
1225 ipv4_addr_legal = 1;
1228 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1229 TAILQ_FOREACH(ifn, &ifnet, if_list) {
1230 struct ifaddr_container *ifac;
1232 if ((loopback_scope == 0) &&
1233 (ifn->if_type == IFT_LOOP)) {
1234 /* Skip loopback if loopback_scope not set */
1237 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid],
1239 struct ifaddr *ifa = ifac->ifa;
1243 * For the BOUND-ALL case, the list
1244 * associated with a TCB is Always
1245 * considered a reverse list.. i.e.
1246 * it lists addresses that are NOT
1247 * part of the association. If this
1248 * is one of those we must skip it.
1250 if (sctp_is_addr_restricted(stcb,
1255 if ((ifa->ifa_addr->sa_family == AF_INET) &&
1256 (ipv4_addr_legal)) {
1257 struct sockaddr_in *sin;
1258 sin = (struct sockaddr_in *)ifa->ifa_addr;
1259 if (sin->sin_addr.s_addr == 0) {
1260 /* we skip unspecifed addresses */
1263 if ((ipv4_local_scope == 0) &&
1264 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1267 if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) {
1268 in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas);
1269 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1270 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6));
1271 actual += sizeof(sizeof(struct sockaddr_in6));
1273 memcpy(sas, sin, sizeof(*sin));
1274 ((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport;
1275 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin));
1276 actual += sizeof(*sin);
1278 if (actual >= limit) {
1281 } else if ((ifa->ifa_addr->sa_family == AF_INET6) &&
1282 (ipv6_addr_legal)) {
1283 struct sockaddr_in6 *sin6, lsa6;
1284 sin6 = (struct sockaddr_in6 *)ifa->ifa_addr;
1285 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1286 /* we skip unspecifed addresses */
1289 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
1290 if (local_scope == 0)
1292 if (sin6->sin6_scope_id == 0) {
1294 if (in6_recoverscope(&lsa6,
1297 /* bad link local address */
1302 if ((site_scope == 0) &&
1303 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1306 memcpy(sas, sin6, sizeof(*sin6));
1307 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1308 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin6));
1309 actual += sizeof(*sin6);
1310 if (actual >= limit) {
1317 struct sctp_laddr *laddr;
1319 * If we have a TCB and we do NOT support ASCONF (it's
1320 * turned off or otherwise) then the list is always the
1321 * true list of addresses (the else case below). Otherwise
1322 * the list on the association is a list of addresses that
1323 * are NOT part of the association.
1325 if (inp->sctp_flags & SCTP_PCB_FLAGS_DO_ASCONF) {
1326 /* The list is a NEGATIVE list */
1327 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
1329 if (sctp_is_addr_restricted(stcb, laddr->ifa->ifa_addr)) {
1333 if (sctp_fill_user_address(sas, laddr->ifa->ifa_addr))
1336 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1337 sas = (struct sockaddr_storage *)((caddr_t)sas +
1338 laddr->ifa->ifa_addr->sa_len);
1339 actual += laddr->ifa->ifa_addr->sa_len;
1340 if (actual >= limit) {
1345 /* The list is a positive list if present */
1347 /* Must use the specific association list */
1348 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list,
1350 if (sctp_fill_user_address(sas,
1351 laddr->ifa->ifa_addr))
1353 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1354 sas = (struct sockaddr_storage *)((caddr_t)sas +
1355 laddr->ifa->ifa_addr->sa_len);
1356 actual += laddr->ifa->ifa_addr->sa_len;
1357 if (actual >= limit) {
1362 /* No endpoint so use the endpoints individual list */
1363 LIST_FOREACH(laddr, &inp->sctp_addr_list,
1365 if (sctp_fill_user_address(sas,
1366 laddr->ifa->ifa_addr))
1368 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1369 sas = (struct sockaddr_storage *)((caddr_t)sas +
1370 laddr->ifa->ifa_addr->sa_len);
1371 actual += laddr->ifa->ifa_addr->sa_len;
1372 if (actual >= limit) {
1383 sctp_count_max_addresses(struct sctp_inpcb *inp)
1387 * In both sub-set bound an bound_all cases we return the MAXIMUM
1388 * number of addresses that you COULD get. In reality the sub-set
1389 * bound may have an exclusion list for a given TCB OR in the
1390 * bound-all case a TCB may NOT include the loopback or other
1391 * addresses as well.
1393 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1396 TAILQ_FOREACH(ifn, &ifnet, if_list) {
1397 struct ifaddr_container *ifac;
1399 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid], ifa_link) {
1400 struct ifaddr *ifa = ifac->ifa;
1402 /* Count them if they are the right type */
1403 if (ifa->ifa_addr->sa_family == AF_INET) {
1404 if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)
1405 cnt += sizeof(struct sockaddr_in6);
1407 cnt += sizeof(struct sockaddr_in);
1409 } else if (ifa->ifa_addr->sa_family == AF_INET6)
1410 cnt += sizeof(struct sockaddr_in6);
1414 struct sctp_laddr *laddr;
1415 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
1416 if (laddr->ifa->ifa_addr->sa_family == AF_INET) {
1417 if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)
1418 cnt += sizeof(struct sockaddr_in6);
1420 cnt += sizeof(struct sockaddr_in);
1422 } else if (laddr->ifa->ifa_addr->sa_family == AF_INET6)
1423 cnt += sizeof(struct sockaddr_in6);
1430 sctp_do_connect_x(struct socket *so,
1431 struct sctp_inpcb *inp,
1433 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
1442 struct sctp_tcb *stcb = NULL;
1443 struct sockaddr *sa;
1444 int num_v6=0, num_v4=0, *totaddrp, totaddr, i, incr, at;
1446 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1447 kprintf("Connectx called\n");
1449 #endif /* SCTP_DEBUG */
1452 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
1453 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
1454 /* We are already connected AND the TCP model */
1456 return (EADDRINUSE);
1458 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
1459 SCTP_INP_RLOCK(inp);
1460 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1461 SCTP_INP_RUNLOCK(inp);
1468 SCTP_ASOC_CREATE_LOCK(inp);
1469 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1470 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
1471 SCTP_ASOC_CREATE_UNLOCK(inp);
1476 totaddrp = mtod(m, int *);
1477 totaddr = *totaddrp;
1478 sa = (struct sockaddr *)(totaddrp + 1);
1480 /* account and validate addresses */
1481 SCTP_INP_WLOCK(inp);
1482 SCTP_INP_INCR_REF(inp);
1483 SCTP_INP_WUNLOCK(inp);
1484 for (i = 0; i < totaddr; i++) {
1485 if (sa->sa_family == AF_INET) {
1487 incr = sizeof(struct sockaddr_in);
1488 } else if (sa->sa_family == AF_INET6) {
1489 struct sockaddr_in6 *sin6;
1490 sin6 = (struct sockaddr_in6 *)sa;
1491 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
1492 /* Must be non-mapped for connectx */
1493 SCTP_ASOC_CREATE_UNLOCK(inp);
1498 incr = sizeof(struct sockaddr_in6);
1503 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
1505 /* Already have or am bring up an association */
1506 SCTP_ASOC_CREATE_UNLOCK(inp);
1507 SCTP_TCB_UNLOCK(stcb);
1511 if ((at + incr) > m->m_len) {
1515 sa = (struct sockaddr *)((caddr_t)sa + incr);
1517 sa = (struct sockaddr *)(totaddrp + 1);
1518 SCTP_INP_WLOCK(inp);
1519 SCTP_INP_DECR_REF(inp);
1520 SCTP_INP_WUNLOCK(inp);
1522 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
1525 SCTP_INP_WUNLOCK(inp);
1526 SCTP_ASOC_CREATE_UNLOCK(inp);
1529 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
1531 struct in6pcb *inp6;
1532 inp6 = (struct in6pcb *)inp;
1534 #if defined(__OpenBSD__)
1535 (0) /* we always do dual bind */
1536 #elif defined (__NetBSD__)
1537 (inp6->in6p_flags & IN6P_IPV6_V6ONLY)
1539 (inp6->inp_flags & IN6P_IPV6_V6ONLY)
1543 * if IPV6_V6ONLY flag, ignore connections
1544 * destined to a v4 addr or v4-mapped addr
1546 SCTP_INP_WUNLOCK(inp);
1547 SCTP_ASOC_CREATE_UNLOCK(inp);
1553 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
1554 SCTP_PCB_FLAGS_UNBOUND) {
1555 /* Bind a ephemeral port */
1556 SCTP_INP_WUNLOCK(inp);
1557 error = sctp_inpcb_bind(so, NULL, p);
1559 SCTP_ASOC_CREATE_UNLOCK(inp);
1564 SCTP_INP_WUNLOCK(inp);
1566 /* We are GOOD to go */
1567 stcb = sctp_aloc_assoc(inp, sa, 1, &error, 0);
1569 /* Gak! no memory */
1570 SCTP_ASOC_CREATE_UNLOCK(inp);
1574 /* move to second address */
1575 if (sa->sa_family == AF_INET)
1576 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in));
1578 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6));
1580 for (i = 1; i < totaddr; i++) {
1581 if (sa->sa_family == AF_INET) {
1582 incr = sizeof(struct sockaddr_in);
1583 if (sctp_add_remote_addr(stcb, sa, 0, 8)) {
1584 /* assoc gone no un-lock */
1585 sctp_free_assoc(inp, stcb);
1586 SCTP_ASOC_CREATE_UNLOCK(inp);
1591 } else if (sa->sa_family == AF_INET6) {
1592 incr = sizeof(struct sockaddr_in6);
1593 if (sctp_add_remote_addr(stcb, sa, 0, 8)) {
1594 /* assoc gone no un-lock */
1595 sctp_free_assoc(inp, stcb);
1596 SCTP_ASOC_CREATE_UNLOCK(inp);
1601 sa = (struct sockaddr *)((caddr_t)sa + incr);
1603 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT;
1605 /* doing delayed connection */
1606 stcb->asoc.delayed_connection = 1;
1607 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination);
1609 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1610 sctp_send_initiate(inp, stcb);
1612 SCTP_TCB_UNLOCK(stcb);
1613 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
1614 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
1615 /* Set the connected flag so we can queue data */
1618 SCTP_ASOC_CREATE_UNLOCK(inp);
1625 sctp_optsget(struct socket *so,
1628 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
1635 struct sctp_inpcb *inp;
1637 int error, optval=0;
1638 struct sctp_tcb *stcb = NULL;
1640 inp = (struct sctp_inpcb *)so->so_pcb;
1647 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
1648 kprintf("optsget:MP is NULL EINVAL\n");
1650 #endif /* SCTP_DEBUG */
1655 /* Got to have a mbuf */
1657 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
1658 kprintf("Huh no mbuf\n");
1660 #endif /* SCTP_DEBUG */
1664 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
1665 kprintf("optsget opt:%lxx sz:%u\n", (unsigned long)opt,
1668 #endif /* SCTP_DEBUG */
1672 case SCTP_AUTOCLOSE:
1673 case SCTP_AUTO_ASCONF:
1674 case SCTP_DISABLE_FRAGMENTS:
1675 case SCTP_I_WANT_MAPPED_V4_ADDR:
1677 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
1678 kprintf("other stuff\n");
1680 #endif /* SCTP_DEBUG */
1681 SCTP_INP_RLOCK(inp);
1683 case SCTP_DISABLE_FRAGMENTS:
1684 optval = inp->sctp_flags & SCTP_PCB_FLAGS_NO_FRAGMENT;
1686 case SCTP_I_WANT_MAPPED_V4_ADDR:
1687 optval = inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4;
1689 case SCTP_AUTO_ASCONF:
1690 optval = inp->sctp_flags & SCTP_PCB_FLAGS_AUTO_ASCONF;
1693 optval = inp->sctp_flags & SCTP_PCB_FLAGS_NODELAY;
1695 case SCTP_AUTOCLOSE:
1696 if ((inp->sctp_flags & SCTP_PCB_FLAGS_AUTOCLOSE) ==
1697 SCTP_PCB_FLAGS_AUTOCLOSE)
1698 optval = inp->sctp_ep.auto_close_time;
1704 error = ENOPROTOOPT;
1705 } /* end switch (sopt->sopt_name) */
1706 if (opt != SCTP_AUTOCLOSE) {
1707 /* make it an "on/off" value */
1708 optval = (optval != 0);
1710 if ((size_t)m->m_len < sizeof(int)) {
1713 SCTP_INP_RUNLOCK(inp);
1715 /* return the option value */
1716 *mtod(m, int *) = optval;
1717 m->m_len = sizeof(optval);
1720 case SCTP_GET_ASOC_ID_LIST:
1722 struct sctp_assoc_ids *ids;
1726 if ((size_t)m->m_len < sizeof(struct sctp_assoc_ids)) {
1730 ids = mtod(m, struct sctp_assoc_ids *);
1732 SCTP_INP_RLOCK(inp);
1733 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1736 ids->asls_numb_present = 0;
1737 ids->asls_more_to_get = 0;
1738 SCTP_INP_RUNLOCK(inp);
1741 orig = ids->asls_assoc_start;
1742 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1744 stcb = LIST_NEXT(stcb , sctp_tcblist);
1752 ids->asls_numb_present = 0;
1753 ids->asls_more_to_get = 1;
1754 while(at < MAX_ASOC_IDS_RET) {
1755 ids->asls_assoc_id[at] = sctp_get_associd(stcb);
1757 ids->asls_numb_present++;
1758 stcb = LIST_NEXT(stcb , sctp_tcblist);
1760 ids->asls_more_to_get = 0;
1764 SCTP_INP_RUNLOCK(inp);
1767 case SCTP_GET_NONCE_VALUES:
1769 struct sctp_get_nonce_values *gnv;
1770 if ((size_t)m->m_len < sizeof(struct sctp_get_nonce_values)) {
1774 gnv = mtod(m, struct sctp_get_nonce_values *);
1775 stcb = sctp_findassociation_ep_asocid(inp, gnv->gn_assoc_id);
1779 gnv->gn_peers_tag = stcb->asoc.peer_vtag;
1780 gnv->gn_local_tag = stcb->asoc.my_vtag;
1781 SCTP_TCB_UNLOCK(stcb);
1786 case SCTP_PEER_PUBLIC_KEY:
1787 case SCTP_MY_PUBLIC_KEY:
1788 case SCTP_SET_AUTH_CHUNKS:
1789 case SCTP_SET_AUTH_SECRET:
1790 /* not supported yet and until we refine the draft */
1794 case SCTP_DELAYED_ACK_TIME:
1797 if ((size_t)m->m_len < sizeof(int32_t)) {
1801 tm = mtod(m, int32_t *);
1803 *tm = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1807 case SCTP_GET_SNDBUF_USE:
1808 if ((size_t)m->m_len < sizeof(struct sctp_sockstat)) {
1811 struct sctp_sockstat *ss;
1812 struct sctp_tcb *stcb;
1813 struct sctp_association *asoc;
1814 ss = mtod(m, struct sctp_sockstat *);
1815 stcb = sctp_findassociation_ep_asocid(inp, ss->ss_assoc_id);
1820 ss->ss_total_sndbuf = (u_int32_t)asoc->total_output_queue_size;
1821 ss->ss_total_mbuf_sndbuf = (u_int32_t)asoc->total_output_mbuf_queue_size;
1822 ss->ss_total_recv_buf = (u_int32_t)(asoc->size_on_delivery_queue +
1823 asoc->size_on_reasm_queue +
1824 asoc->size_on_all_streams);
1825 SCTP_TCB_UNLOCK(stcb);
1827 m->m_len = sizeof(struct sctp_sockstat);
1834 burst = mtod(m, u_int8_t *);
1835 SCTP_INP_RLOCK(inp);
1836 *burst = inp->sctp_ep.max_burst;
1837 SCTP_INP_RUNLOCK(inp);
1838 m->m_len = sizeof(u_int8_t);
1844 sctp_assoc_t *assoc_id;
1847 if ((size_t)m->m_len < sizeof(u_int32_t)) {
1851 if ((size_t)m->m_len < sizeof(sctp_assoc_t)) {
1855 assoc_id = mtod(m, sctp_assoc_t *);
1856 segsize = mtod(m, u_int32_t *);
1857 m->m_len = sizeof(u_int32_t);
1859 if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
1860 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) ||
1861 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
1862 struct sctp_tcb *stcb;
1863 SCTP_INP_RLOCK(inp);
1864 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1866 SCTP_TCB_LOCK(stcb);
1867 SCTP_INP_RUNLOCK(inp);
1868 *segsize = sctp_get_frag_point(stcb, &stcb->asoc);
1869 SCTP_TCB_UNLOCK(stcb);
1871 SCTP_INP_RUNLOCK(inp);
1875 stcb = sctp_findassociation_ep_asocid(inp, *assoc_id);
1877 *segsize = sctp_get_frag_point(stcb, &stcb->asoc);
1878 SCTP_TCB_UNLOCK(stcb);
1882 /* default is to get the max, if I
1883 * can't calculate from an existing association.
1885 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1886 ovh = SCTP_MED_OVERHEAD;
1888 ovh = SCTP_MED_V4_OVERHEAD;
1890 *segsize = inp->sctp_frag_point - ovh;
1895 case SCTP_SET_DEBUG_LEVEL:
1899 if ((size_t)m->m_len < sizeof(u_int32_t)) {
1903 level = mtod(m, u_int32_t *);
1905 *level = sctp_debug_on;
1906 m->m_len = sizeof(u_int32_t);
1907 kprintf("Returning DEBUG LEVEL %x is set\n",
1908 (u_int)sctp_debug_on);
1910 #else /* SCTP_DEBUG */
1914 case SCTP_GET_STAT_LOG:
1915 #ifdef SCTP_STAT_LOGGING
1916 error = sctp_fill_stat_log(m);
1917 #else /* SCTP_DEBUG */
1924 if ((size_t)m->m_len < sizeof(sctp_pegs)) {
1928 pt = mtod(m, u_int32_t *);
1929 memcpy(pt, sctp_pegs, sizeof(sctp_pegs));
1930 m->m_len = sizeof(sctp_pegs);
1935 struct sctp_event_subscribe *events;
1937 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
1938 kprintf("get events\n");
1940 #endif /* SCTP_DEBUG */
1941 if ((size_t)m->m_len < sizeof(struct sctp_event_subscribe)) {
1943 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
1944 kprintf("M->M_LEN is %d not %d\n",
1946 (int)sizeof(struct sctp_event_subscribe));
1948 #endif /* SCTP_DEBUG */
1952 events = mtod(m, struct sctp_event_subscribe *);
1953 memset(events, 0, sizeof(events));
1954 SCTP_INP_RLOCK(inp);
1955 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVDATAIOEVNT)
1956 events->sctp_data_io_event = 1;
1958 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVASSOCEVNT)
1959 events->sctp_association_event = 1;
1961 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVPADDREVNT)
1962 events->sctp_address_event = 1;
1964 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVSENDFAILEVNT)
1965 events->sctp_send_failure_event = 1;
1967 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVPEERERR)
1968 events->sctp_peer_error_event = 1;
1970 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)
1971 events->sctp_shutdown_event = 1;
1973 if (inp->sctp_flags & SCTP_PCB_FLAGS_PDAPIEVNT)
1974 events->sctp_partial_delivery_event = 1;
1976 if (inp->sctp_flags & SCTP_PCB_FLAGS_ADAPTIONEVNT)
1977 events->sctp_adaption_layer_event = 1;
1979 if (inp->sctp_flags & SCTP_PCB_FLAGS_STREAM_RESETEVNT)
1980 events->sctp_stream_reset_events = 1;
1981 SCTP_INP_RUNLOCK(inp);
1982 m->m_len = sizeof(struct sctp_event_subscribe);
1987 case SCTP_ADAPTION_LAYER:
1988 if ((size_t)m->m_len < sizeof(int)) {
1993 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
1994 kprintf("getadaption ind\n");
1996 #endif /* SCTP_DEBUG */
1997 SCTP_INP_RLOCK(inp);
1998 *mtod(m, int *) = inp->sctp_ep.adaption_layer_indicator;
1999 SCTP_INP_RUNLOCK(inp);
2000 m->m_len = sizeof(int);
2002 case SCTP_SET_INITIAL_DBG_SEQ:
2003 if ((size_t)m->m_len < sizeof(int)) {
2008 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2009 kprintf("get initial dbg seq\n");
2011 #endif /* SCTP_DEBUG */
2012 SCTP_INP_RLOCK(inp);
2013 *mtod(m, int *) = inp->sctp_ep.initial_sequence_debug;
2014 SCTP_INP_RUNLOCK(inp);
2015 m->m_len = sizeof(int);
2017 case SCTP_GET_LOCAL_ADDR_SIZE:
2018 if ((size_t)m->m_len < sizeof(int)) {
2023 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2024 kprintf("get local sizes\n");
2026 #endif /* SCTP_DEBUG */
2027 SCTP_INP_RLOCK(inp);
2028 *mtod(m, int *) = sctp_count_max_addresses(inp);
2029 SCTP_INP_RUNLOCK(inp);
2030 m->m_len = sizeof(int);
2032 case SCTP_GET_REMOTE_ADDR_SIZE:
2034 sctp_assoc_t *assoc_id;
2036 struct sctp_nets *net;
2038 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2039 kprintf("get remote size\n");
2041 #endif /* SCTP_DEBUG */
2042 if ((size_t)m->m_len < sizeof(sctp_assoc_t)) {
2044 kprintf("m->m_len:%d not %d\n",
2045 m->m_len, sizeof(sctp_assoc_t));
2046 #endif /* SCTP_DEBUG */
2051 val = mtod(m, u_int32_t *);
2052 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2053 SCTP_INP_RLOCK(inp);
2054 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2056 SCTP_TCB_LOCK(stcb);
2057 SCTP_INP_RUNLOCK(inp);
2060 assoc_id = mtod(m, sctp_assoc_t *);
2061 stcb = sctp_findassociation_ep_asocid(inp, *assoc_id);
2070 /* Count the sizes */
2071 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2072 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) ||
2073 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
2074 sz += sizeof(struct sockaddr_in6);
2075 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
2076 sz += sizeof(struct sockaddr_in);
2082 SCTP_TCB_UNLOCK(stcb);
2084 m->m_len = sizeof(u_int32_t);
2087 case SCTP_GET_PEER_ADDRESSES:
2089 * Get the address information, an array
2090 * is passed in to fill up we pack it.
2094 struct sockaddr_storage *sas;
2095 struct sctp_nets *net;
2096 struct sctp_getaddresses *saddr;
2098 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2099 kprintf("get peer addresses\n");
2101 #endif /* SCTP_DEBUG */
2102 if ((size_t)m->m_len < sizeof(struct sctp_getaddresses)) {
2106 left = m->m_len - sizeof(struct sctp_getaddresses);
2107 saddr = mtod(m, struct sctp_getaddresses *);
2108 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2109 SCTP_INP_RLOCK(inp);
2110 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2112 SCTP_TCB_LOCK(stcb);
2113 SCTP_INP_RUNLOCK(inp);
2115 stcb = sctp_findassociation_ep_asocid(inp,
2116 saddr->sget_assoc_id);
2121 m->m_len = sizeof(struct sctp_getaddresses);
2122 sas = (struct sockaddr_storage *)&saddr->addr[0];
2124 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2125 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) ||
2126 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
2127 cpsz = sizeof(struct sockaddr_in6);
2128 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
2129 cpsz = sizeof(struct sockaddr_in);
2135 /* not enough room. */
2137 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2138 kprintf("Out of room\n");
2140 #endif /* SCTP_DEBUG */
2143 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2144 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET)) {
2145 /* Must map the address */
2146 in6_sin_2_v4mapsin6((struct sockaddr_in *)&net->ro._l_addr,
2147 (struct sockaddr_in6 *)sas);
2149 memcpy(sas, &net->ro._l_addr, cpsz);
2151 ((struct sockaddr_in *)sas)->sin_port = stcb->rport;
2153 sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz);
2157 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
2158 kprintf("left now:%d mlen:%d\n",
2161 #endif /* SCTP_DEBUG */
2163 SCTP_TCB_UNLOCK(stcb);
2166 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2167 kprintf("All done\n");
2169 #endif /* SCTP_DEBUG */
2171 case SCTP_GET_LOCAL_ADDRESSES:
2174 struct sockaddr_storage *sas;
2175 struct sctp_getaddresses *saddr;
2177 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2178 kprintf("get local addresses\n");
2180 #endif /* SCTP_DEBUG */
2181 if ((size_t)m->m_len < sizeof(struct sctp_getaddresses)) {
2185 saddr = mtod(m, struct sctp_getaddresses *);
2187 if (saddr->sget_assoc_id) {
2188 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2189 SCTP_INP_RLOCK(inp);
2190 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2192 SCTP_TCB_LOCK(stcb);
2193 SCTP_INP_RUNLOCK(inp);
2195 stcb = sctp_findassociation_ep_asocid(inp, saddr->sget_assoc_id);
2201 * assure that the TCP model does not need a assoc id
2204 if ( (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) &&
2206 SCTP_INP_RLOCK(inp);
2207 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2209 SCTP_TCB_LOCK(stcb);
2210 SCTP_INP_RUNLOCK(inp);
2212 sas = (struct sockaddr_storage *)&saddr->addr[0];
2213 limit = m->m_len - sizeof(sctp_assoc_t);
2214 actual = sctp_fill_up_addresses(inp, stcb, limit, sas);
2215 SCTP_TCB_UNLOCK(stcb);
2216 m->m_len = sizeof(struct sockaddr_storage) + actual;
2219 case SCTP_PEER_ADDR_PARAMS:
2221 struct sctp_paddrparams *paddrp;
2222 struct sctp_nets *net;
2225 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2226 kprintf("Getting peer_addr_params\n");
2228 #endif /* SCTP_DEBUG */
2229 if ((size_t)m->m_len < sizeof(struct sctp_paddrparams)) {
2231 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
2232 kprintf("Hmm m->m_len:%d is to small\n",
2235 #endif /* SCTP_DEBUG */
2239 paddrp = mtod(m, struct sctp_paddrparams *);
2242 if (paddrp->spp_assoc_id) {
2244 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2245 kprintf("In spp_assoc_id find type\n");
2247 #endif /* SCTP_DEBUG */
2248 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2249 SCTP_INP_RLOCK(inp);
2250 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2252 SCTP_TCB_LOCK(stcb);
2253 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
2255 SCTP_INP_RLOCK(inp);
2257 stcb = sctp_findassociation_ep_asocid(inp, paddrp->spp_assoc_id);
2264 if ( (stcb == NULL) &&
2265 ((((struct sockaddr *)&paddrp->spp_address)->sa_family == AF_INET) ||
2266 (((struct sockaddr *)&paddrp->spp_address)->sa_family == AF_INET6))) {
2267 /* Lookup via address */
2269 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2270 kprintf("Ok we need to lookup a param\n");
2272 #endif /* SCTP_DEBUG */
2273 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2274 SCTP_INP_RLOCK(inp);
2275 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2277 SCTP_TCB_LOCK(stcb);
2278 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
2280 SCTP_INP_RUNLOCK(inp);
2282 SCTP_INP_WLOCK(inp);
2283 SCTP_INP_INCR_REF(inp);
2284 SCTP_INP_WUNLOCK(inp);
2285 stcb = sctp_findassociation_ep_addr(&inp,
2286 (struct sockaddr *)&paddrp->spp_address,
2289 SCTP_INP_WLOCK(inp);
2290 SCTP_INP_DECR_REF(inp);
2291 SCTP_INP_WUNLOCK(inp);
2300 /* Effects the Endpoint */
2302 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2303 kprintf("User wants EP level info\n");
2305 #endif /* SCTP_DEBUG */
2309 /* Applys to the specific association */
2311 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2312 kprintf("In TCB side\n");
2314 #endif /* SCTP_DEBUG */
2316 paddrp->spp_pathmaxrxt = net->failure_threshold;
2318 /* No destination so return default value */
2319 paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure;
2321 paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay;
2322 paddrp->spp_assoc_id = sctp_get_associd(stcb);
2323 SCTP_TCB_UNLOCK(stcb);
2325 /* Use endpoint defaults */
2326 SCTP_INP_RLOCK(inp);
2328 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2329 kprintf("In EP level info\n");
2331 #endif /* SCTP_DEBUG */
2332 paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure;
2333 paddrp->spp_hbinterval = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT];
2334 paddrp->spp_assoc_id = (sctp_assoc_t)0;
2335 SCTP_INP_RUNLOCK(inp);
2337 m->m_len = sizeof(struct sctp_paddrparams);
2340 case SCTP_GET_PEER_ADDR_INFO:
2342 struct sctp_paddrinfo *paddri;
2343 struct sctp_nets *net;
2345 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2346 kprintf("GetPEER ADDR_INFO\n");
2348 #endif /* SCTP_DEBUG */
2349 if ((size_t)m->m_len < sizeof(struct sctp_paddrinfo)) {
2353 paddri = mtod(m, struct sctp_paddrinfo *);
2355 if ((((struct sockaddr *)&paddri->spinfo_address)->sa_family == AF_INET) ||
2356 (((struct sockaddr *)&paddri->spinfo_address)->sa_family == AF_INET6)) {
2357 /* Lookup via address */
2358 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2359 SCTP_INP_RLOCK(inp);
2360 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2362 SCTP_TCB_LOCK(stcb);
2363 net = sctp_findnet(stcb,
2364 (struct sockaddr *)&paddri->spinfo_address);
2366 SCTP_INP_RUNLOCK(inp);
2368 SCTP_INP_WLOCK(inp);
2369 SCTP_INP_INCR_REF(inp);
2370 SCTP_INP_WUNLOCK(inp);
2371 stcb = sctp_findassociation_ep_addr(&inp,
2372 (struct sockaddr *)&paddri->spinfo_address,
2375 SCTP_INP_WLOCK(inp);
2376 SCTP_INP_DECR_REF(inp);
2377 SCTP_INP_WUNLOCK(inp);
2384 if ((stcb == NULL) || (net == NULL)) {
2388 m->m_len = sizeof(struct sctp_paddrinfo);
2389 paddri->spinfo_state = net->dest_state & (SCTP_REACHABLE_MASK|SCTP_ADDR_NOHB);
2390 paddri->spinfo_cwnd = net->cwnd;
2391 paddri->spinfo_srtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
2392 paddri->spinfo_rto = net->RTO;
2393 paddri->spinfo_assoc_id = sctp_get_associd(stcb);
2394 SCTP_TCB_UNLOCK(stcb);
2397 case SCTP_PCB_STATUS:
2399 struct sctp_pcbinfo *spcb;
2401 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2402 kprintf("PCB status\n");
2404 #endif /* SCTP_DEBUG */
2405 if ((size_t)m->m_len < sizeof(struct sctp_pcbinfo)) {
2409 spcb = mtod(m, struct sctp_pcbinfo *);
2410 sctp_fill_pcbinfo(spcb);
2411 m->m_len = sizeof(struct sctp_pcbinfo);
2416 struct sctp_nets *net;
2417 struct sctp_status *sstat;
2419 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2420 kprintf("SCTP status\n");
2422 #endif /* SCTP_DEBUG */
2424 if ((size_t)m->m_len < sizeof(struct sctp_status)) {
2428 sstat = mtod(m, struct sctp_status *);
2430 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2431 SCTP_INP_RLOCK(inp);
2432 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2434 SCTP_TCB_LOCK(stcb);
2435 SCTP_INP_RUNLOCK(inp);
2437 stcb = sctp_findassociation_ep_asocid(inp, sstat->sstat_assoc_id);
2444 * I think passing the state is fine since
2445 * sctp_constants.h will be available to the user
2448 sstat->sstat_state = stcb->asoc.state;
2449 sstat->sstat_rwnd = stcb->asoc.peers_rwnd;
2450 sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt;
2452 * We can't include chunks that have been passed
2453 * to the socket layer. Only things in queue.
2455 sstat->sstat_penddata = (stcb->asoc.cnt_on_delivery_queue +
2456 stcb->asoc.cnt_on_reasm_queue +
2457 stcb->asoc.cnt_on_all_streams);
2460 sstat->sstat_instrms = stcb->asoc.streamincnt;
2461 sstat->sstat_outstrms = stcb->asoc.streamoutcnt;
2462 sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc);
2463 memcpy(&sstat->sstat_primary.spinfo_address,
2464 &stcb->asoc.primary_destination->ro._l_addr,
2465 ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len);
2466 net = stcb->asoc.primary_destination;
2467 ((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport;
2469 * Again the user can get info from sctp_constants.h
2470 * for what the state of the network is.
2472 sstat->sstat_primary.spinfo_state = net->dest_state & SCTP_REACHABLE_MASK;
2473 sstat->sstat_primary.spinfo_cwnd = net->cwnd;
2474 sstat->sstat_primary.spinfo_srtt = net->lastsa;
2475 sstat->sstat_primary.spinfo_rto = net->RTO;
2476 sstat->sstat_primary.spinfo_mtu = net->mtu;
2477 sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb);
2478 SCTP_TCB_UNLOCK(stcb);
2479 m->m_len = sizeof(*sstat);
2484 struct sctp_rtoinfo *srto;
2486 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2487 kprintf("RTO Info\n");
2489 #endif /* SCTP_DEBUG */
2490 if ((size_t)m->m_len < sizeof(struct sctp_rtoinfo)) {
2494 srto = mtod(m, struct sctp_rtoinfo *);
2495 if (srto->srto_assoc_id == 0) {
2496 /* Endpoint only please */
2497 SCTP_INP_RLOCK(inp);
2498 srto->srto_initial = inp->sctp_ep.initial_rto;
2499 srto->srto_max = inp->sctp_ep.sctp_maxrto;
2500 srto->srto_min = inp->sctp_ep.sctp_minrto;
2501 SCTP_INP_RUNLOCK(inp);
2504 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2505 SCTP_INP_RLOCK(inp);
2506 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2508 SCTP_TCB_LOCK(stcb);
2509 SCTP_INP_RUNLOCK(inp);
2511 stcb = sctp_findassociation_ep_asocid(inp, srto->srto_assoc_id);
2517 srto->srto_initial = stcb->asoc.initial_rto;
2518 srto->srto_max = stcb->asoc.maxrto;
2519 srto->srto_min = stcb->asoc.minrto;
2520 SCTP_TCB_UNLOCK(stcb);
2521 m->m_len = sizeof(*srto);
2524 case SCTP_ASSOCINFO:
2526 struct sctp_assocparams *sasoc;
2528 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2529 kprintf("Associnfo\n");
2531 #endif /* SCTP_DEBUG */
2532 if ((size_t)m->m_len < sizeof(struct sctp_assocparams)) {
2536 sasoc = mtod(m, struct sctp_assocparams *);
2539 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2540 SCTP_INP_RLOCK(inp);
2541 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2543 SCTP_TCB_LOCK(stcb);
2544 SCTP_INP_RUNLOCK(inp);
2546 if ((sasoc->sasoc_assoc_id) && (stcb == NULL)) {
2547 stcb = sctp_findassociation_ep_asocid(inp,
2548 sasoc->sasoc_assoc_id);
2558 sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times;
2559 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
2560 sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd;
2561 sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd;
2562 sasoc->sasoc_cookie_life = stcb->asoc.cookie_life;
2563 SCTP_TCB_UNLOCK(stcb);
2565 SCTP_INP_RLOCK(inp);
2566 sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times;
2567 sasoc->sasoc_number_peer_destinations = 0;
2568 sasoc->sasoc_peer_rwnd = 0;
2569 sasoc->sasoc_local_rwnd = ssb_space(&inp->sctp_socket->so_rcv);
2570 sasoc->sasoc_cookie_life = inp->sctp_ep.def_cookie_life;
2571 SCTP_INP_RUNLOCK(inp);
2573 m->m_len = sizeof(*sasoc);
2576 case SCTP_DEFAULT_SEND_PARAM:
2578 struct sctp_sndrcvinfo *s_info;
2580 if (m->m_len != sizeof(struct sctp_sndrcvinfo)) {
2584 s_info = mtod(m, struct sctp_sndrcvinfo *);
2585 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2586 SCTP_INP_RLOCK(inp);
2587 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2589 SCTP_TCB_LOCK(stcb);
2590 SCTP_INP_RUNLOCK(inp);
2592 stcb = sctp_findassociation_ep_asocid(inp, s_info->sinfo_assoc_id);
2599 *s_info = stcb->asoc.def_send;
2600 SCTP_TCB_UNLOCK(stcb);
2601 m->m_len = sizeof(*s_info);
2605 struct sctp_initmsg *sinit;
2607 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2608 kprintf("initmsg\n");
2610 #endif /* SCTP_DEBUG */
2611 if ((size_t)m->m_len < sizeof(struct sctp_initmsg)) {
2615 sinit = mtod(m, struct sctp_initmsg *);
2616 SCTP_INP_RLOCK(inp);
2617 sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count;
2618 sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome;
2619 sinit->sinit_max_attempts = inp->sctp_ep.max_init_times;
2620 sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max;
2621 SCTP_INP_RUNLOCK(inp);
2622 m->m_len = sizeof(*sinit);
2625 case SCTP_PRIMARY_ADDR:
2626 /* we allow a "get" operation on this */
2628 struct sctp_setprim *ssp;
2631 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2632 kprintf("setprimary\n");
2634 #endif /* SCTP_DEBUG */
2635 if ((size_t)m->m_len < sizeof(struct sctp_setprim)) {
2639 ssp = mtod(m, struct sctp_setprim *);
2640 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2641 SCTP_INP_RLOCK(inp);
2642 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2644 SCTP_TCB_LOCK(stcb);
2645 SCTP_INP_RUNLOCK(inp);
2647 stcb = sctp_findassociation_ep_asocid(inp, ssp->ssp_assoc_id);
2649 /* one last shot, try it by the address in */
2650 struct sctp_nets *net;
2652 SCTP_INP_WLOCK(inp);
2653 SCTP_INP_INCR_REF(inp);
2654 SCTP_INP_WUNLOCK(inp);
2655 stcb = sctp_findassociation_ep_addr(&inp,
2656 (struct sockaddr *)&ssp->ssp_addr,
2659 SCTP_INP_WLOCK(inp);
2660 SCTP_INP_DECR_REF(inp);
2661 SCTP_INP_WUNLOCK(inp);
2669 /* simply copy out the sockaddr_storage... */
2670 memcpy(&ssp->ssp_addr,
2671 &stcb->asoc.primary_destination->ro._l_addr,
2672 ((struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr)->sa_len);
2673 SCTP_TCB_UNLOCK(stcb);
2674 m->m_len = sizeof(*ssp);
2678 error = ENOPROTOOPT;
2681 } /* end switch (sopt->sopt_name) */
2686 sctp_optsset(struct socket *so,
2689 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
2696 int error, *mopt, set_opt;
2698 struct sctp_tcb *stcb = NULL;
2699 struct sctp_inpcb *inp;
2703 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2704 kprintf("optsset:MP is NULL EINVAL\n");
2706 #endif /* SCTP_DEBUG */
2713 inp = (struct sctp_inpcb *)so->so_pcb;
2720 case SCTP_AUTOCLOSE:
2721 case SCTP_AUTO_ASCONF:
2722 case SCTP_DISABLE_FRAGMENTS:
2723 case SCTP_I_WANT_MAPPED_V4_ADDR:
2724 /* copy in the option value */
2725 if ((size_t)m->m_len < sizeof(int)) {
2729 mopt = mtod(m, int *);
2734 case SCTP_DISABLE_FRAGMENTS:
2735 set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT;
2737 case SCTP_AUTO_ASCONF:
2738 set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF;
2741 case SCTP_I_WANT_MAPPED_V4_ADDR:
2742 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2743 set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4;
2749 set_opt = SCTP_PCB_FLAGS_NODELAY;
2751 case SCTP_AUTOCLOSE:
2752 set_opt = SCTP_PCB_FLAGS_AUTOCLOSE;
2754 * The value is in ticks.
2755 * Note this does not effect old associations, only
2758 inp->sctp_ep.auto_close_time = (*mopt * hz);
2761 SCTP_INP_WLOCK(inp);
2763 inp->sctp_flags |= set_opt;
2765 inp->sctp_flags &= ~set_opt;
2767 SCTP_INP_WUNLOCK(inp);
2769 case SCTP_MY_PUBLIC_KEY: /* set my public key */
2770 case SCTP_SET_AUTH_CHUNKS: /* set the authenticated chunks required */
2771 case SCTP_SET_AUTH_SECRET: /* set the actual secret for the endpoint */
2772 /* not supported yet and until we refine the draft */
2776 case SCTP_CLR_STAT_LOG:
2777 #ifdef SCTP_STAT_LOGGING
2778 sctp_clr_stat_log();
2783 case SCTP_DELAYED_ACK_TIME:
2786 if ((size_t)m->m_len < sizeof(int32_t)) {
2790 tm = mtod(m, int32_t *);
2792 if ((*tm < 10) || (*tm > 500)) {
2793 /* can't be smaller than 10ms */
2794 /* MUST NOT be larger than 500ms */
2798 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(*tm);
2801 case SCTP_RESET_STREAMS:
2803 struct sctp_stream_reset *strrst;
2804 uint8_t two_way, not_peer;
2806 if ((size_t)m->m_len < sizeof(struct sctp_stream_reset)) {
2810 strrst = mtod(m, struct sctp_stream_reset *);
2812 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2813 SCTP_INP_RLOCK(inp);
2814 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2816 SCTP_TCB_LOCK(stcb);
2817 SCTP_INP_RUNLOCK(inp);
2819 stcb = sctp_findassociation_ep_asocid(inp, strrst->strrst_assoc_id);
2824 if (stcb->asoc.peer_supports_strreset == 0) {
2825 /* Peer does not support it,
2826 * we return protocol not supported since
2827 * this is true for this feature and this
2828 * peer, not the socket request in general.
2830 error = EPROTONOSUPPORT;
2831 SCTP_TCB_UNLOCK(stcb);
2835 /* Having re-thought this code I added as I write the I-D there
2836 * is NO need for it. The peer, if we are requesting a stream-reset
2837 * will send a request to us but will itself do what we do, take
2838 * and copy off the "reset information" we send and queue TSN's
2839 * larger than the send-next in our response message. Thus they
2842 /* if (stcb->asoc.sending_seq != (stcb->asoc.last_acked_seq + 1)) {*/
2843 /* Must have all sending data ack'd before we
2844 * start this procedure. This is a bit restrictive
2845 * and we SHOULD work on changing this so ONLY the
2846 * streams being RESET get held up. So, a reset-all
2847 * would require this.. but a reset specific just
2848 * needs to be sure that the ones being reset have
2849 * nothing on the send_queue. For now we will
2850 * skip this more detailed method and do a course
2851 * way.. i.e. nothing pending ... for future FIX ME!
2857 if (stcb->asoc.stream_reset_outstanding) {
2859 SCTP_TCB_UNLOCK(stcb);
2862 if (strrst->strrst_flags == SCTP_RESET_LOCAL_RECV) {
2865 } else if (strrst->strrst_flags == SCTP_RESET_LOCAL_SEND) {
2868 } else if (strrst->strrst_flags == SCTP_RESET_BOTH) {
2873 SCTP_TCB_UNLOCK(stcb);
2876 sctp_send_str_reset_req(stcb, strrst->strrst_num_streams,
2877 strrst->strrst_list, two_way, not_peer);
2879 sctp_chunk_output(inp, stcb, 12);
2880 SCTP_TCB_UNLOCK(stcb);
2885 case SCTP_RESET_PEGS:
2886 memset(sctp_pegs, 0, sizeof(sctp_pegs));
2889 case SCTP_CONNECT_X:
2890 if ((size_t)m->m_len < (sizeof(int) + sizeof(struct sockaddr_in))) {
2894 error = sctp_do_connect_x(so, inp, m, p, 0);
2897 case SCTP_CONNECT_X_DELAYED:
2898 if ((size_t)m->m_len < (sizeof(int) + sizeof(struct sockaddr_in))) {
2902 error = sctp_do_connect_x(so, inp, m, p, 1);
2905 case SCTP_CONNECT_X_COMPLETE:
2907 struct sockaddr *sa;
2908 struct sctp_nets *net;
2909 if ((size_t)m->m_len < sizeof(struct sockaddr_in)) {
2913 sa = mtod(m, struct sockaddr *);
2915 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2916 SCTP_INP_RLOCK(inp);
2917 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2919 SCTP_TCB_LOCK(stcb);
2920 net = sctp_findnet(stcb, sa);
2922 SCTP_INP_RUNLOCK(inp);
2924 SCTP_INP_WLOCK(inp);
2925 SCTP_INP_INCR_REF(inp);
2926 SCTP_INP_WUNLOCK(inp);
2927 stcb = sctp_findassociation_ep_addr(&inp, sa, &net, NULL, NULL);
2929 SCTP_INP_WLOCK(inp);
2930 SCTP_INP_DECR_REF(inp);
2931 SCTP_INP_WUNLOCK(inp);
2939 if (stcb->asoc.delayed_connection == 1) {
2940 stcb->asoc.delayed_connection = 0;
2941 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
2942 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination);
2943 sctp_send_initiate(inp, stcb);
2945 /* already expired or did not use delayed connectx */
2948 SCTP_TCB_UNLOCK(stcb);
2954 SCTP_INP_WLOCK(inp);
2955 burst = mtod(m, u_int8_t *);
2957 inp->sctp_ep.max_burst = *burst;
2959 SCTP_INP_WUNLOCK(inp);
2966 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2967 ovh = SCTP_MED_OVERHEAD;
2969 ovh = SCTP_MED_V4_OVERHEAD;
2971 segsize = mtod(m, u_int32_t *);
2976 SCTP_INP_WLOCK(inp);
2977 inp->sctp_frag_point = (*segsize+ovh);
2978 if (inp->sctp_frag_point < MHLEN) {
2979 inp->sctp_frag_point = MHLEN;
2981 SCTP_INP_WUNLOCK(inp);
2984 case SCTP_SET_DEBUG_LEVEL:
2988 if ((size_t)m->m_len < sizeof(u_int32_t)) {
2992 level = mtod(m, u_int32_t *);
2994 sctp_debug_on = (*level & (SCTP_DEBUG_ALL |
2996 kprintf("SETTING DEBUG LEVEL to %x\n",
2997 (u_int)sctp_debug_on);
3002 #endif /* SCTP_DEBUG */
3006 struct sctp_event_subscribe *events;
3007 if ((size_t)m->m_len < sizeof(struct sctp_event_subscribe)) {
3011 SCTP_INP_WLOCK(inp);
3012 events = mtod(m, struct sctp_event_subscribe *);
3013 if (events->sctp_data_io_event) {
3014 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVDATAIOEVNT;
3016 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVDATAIOEVNT;
3019 if (events->sctp_association_event) {
3020 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVASSOCEVNT;
3022 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVASSOCEVNT;
3025 if (events->sctp_address_event) {
3026 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVPADDREVNT;
3028 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVPADDREVNT;
3031 if (events->sctp_send_failure_event) {
3032 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVSENDFAILEVNT;
3034 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVSENDFAILEVNT;
3037 if (events->sctp_peer_error_event) {
3038 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVPEERERR;
3040 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVPEERERR;
3043 if (events->sctp_shutdown_event) {
3044 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT;
3046 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT;
3049 if (events->sctp_partial_delivery_event) {
3050 inp->sctp_flags |= SCTP_PCB_FLAGS_PDAPIEVNT;
3052 inp->sctp_flags &= ~SCTP_PCB_FLAGS_PDAPIEVNT;
3055 if (events->sctp_adaption_layer_event) {
3056 inp->sctp_flags |= SCTP_PCB_FLAGS_ADAPTIONEVNT;
3058 inp->sctp_flags &= ~SCTP_PCB_FLAGS_ADAPTIONEVNT;
3061 if (events->sctp_stream_reset_events) {
3062 inp->sctp_flags |= SCTP_PCB_FLAGS_STREAM_RESETEVNT;
3064 inp->sctp_flags &= ~SCTP_PCB_FLAGS_STREAM_RESETEVNT;
3066 SCTP_INP_WUNLOCK(inp);
3070 case SCTP_ADAPTION_LAYER:
3072 struct sctp_setadaption *adap_bits;
3073 if ((size_t)m->m_len < sizeof(struct sctp_setadaption)) {
3077 SCTP_INP_WLOCK(inp);
3078 adap_bits = mtod(m, struct sctp_setadaption *);
3079 inp->sctp_ep.adaption_layer_indicator = adap_bits->ssb_adaption_ind;
3080 SCTP_INP_WUNLOCK(inp);
3083 case SCTP_SET_INITIAL_DBG_SEQ:
3086 if ((size_t)m->m_len < sizeof(u_int32_t)) {
3090 SCTP_INP_WLOCK(inp);
3091 vvv = mtod(m, u_int32_t *);
3092 inp->sctp_ep.initial_sequence_debug = *vvv;
3093 SCTP_INP_WUNLOCK(inp);
3096 case SCTP_DEFAULT_SEND_PARAM:
3098 struct sctp_sndrcvinfo *s_info;
3100 if (m->m_len != sizeof(struct sctp_sndrcvinfo)) {
3104 s_info = mtod(m, struct sctp_sndrcvinfo *);
3106 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3107 SCTP_INP_RLOCK(inp);
3108 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3110 SCTP_TCB_LOCK(stcb);
3111 SCTP_INP_RUNLOCK(inp);
3113 stcb = sctp_findassociation_ep_asocid(inp, s_info->sinfo_assoc_id);
3119 /* Validate things */
3120 if (s_info->sinfo_stream > stcb->asoc.streamoutcnt) {
3121 SCTP_TCB_UNLOCK(stcb);
3125 /* Mask off the flags that are allowed */
3126 s_info->sinfo_flags = (s_info->sinfo_flags &
3127 (MSG_UNORDERED | MSG_ADDR_OVER |
3128 MSG_PR_SCTP_TTL | MSG_PR_SCTP_BUF));
3130 stcb->asoc.def_send = *s_info;
3131 SCTP_TCB_UNLOCK(stcb);
3134 case SCTP_PEER_ADDR_PARAMS:
3136 struct sctp_paddrparams *paddrp;
3137 struct sctp_nets *net;
3138 if ((size_t)m->m_len < sizeof(struct sctp_paddrparams)) {
3142 paddrp = mtod(m, struct sctp_paddrparams *);
3144 if (paddrp->spp_assoc_id) {
3145 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3146 SCTP_INP_RLOCK(inp);
3147 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3149 SCTP_TCB_LOCK(stcb);
3150 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
3152 SCTP_INP_RUNLOCK(inp);
3154 stcb = sctp_findassociation_ep_asocid(inp, paddrp->spp_assoc_id);
3161 if ((stcb == NULL) &&
3162 ((((struct sockaddr *)&paddrp->spp_address)->sa_family == AF_INET) ||
3163 (((struct sockaddr *)&paddrp->spp_address)->sa_family == AF_INET6))) {
3164 /* Lookup via address */
3165 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3166 SCTP_INP_RLOCK(inp);
3167 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3169 SCTP_TCB_LOCK(stcb);
3170 net = sctp_findnet(stcb,
3171 (struct sockaddr *)&paddrp->spp_address);
3173 SCTP_INP_RUNLOCK(inp);
3175 SCTP_INP_WLOCK(inp);
3176 SCTP_INP_INCR_REF(inp);
3177 SCTP_INP_WUNLOCK(inp);
3178 stcb = sctp_findassociation_ep_addr(&inp,
3179 (struct sockaddr *)&paddrp->spp_address,
3182 SCTP_INP_WLOCK(inp);
3183 SCTP_INP_DECR_REF(inp);
3184 SCTP_INP_WUNLOCK(inp);
3188 /* Effects the Endpoint */
3192 /* Applies to the specific association */
3193 if (paddrp->spp_pathmaxrxt) {
3195 if (paddrp->spp_pathmaxrxt)
3196 net->failure_threshold = paddrp->spp_pathmaxrxt;
3198 if (paddrp->spp_pathmaxrxt)
3199 stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt;
3202 if ((paddrp->spp_hbinterval != 0) && (paddrp->spp_hbinterval != 0xffffffff)) {
3206 net->dest_state &= ~SCTP_ADDR_NOHB;
3208 old = stcb->asoc.heart_beat_delay;
3209 stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval;
3211 /* Turn back on the timer */
3212 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
3215 } else if (paddrp->spp_hbinterval == 0xffffffff) {
3217 sctp_send_hb(stcb, 1, net);
3220 /* off on association */
3221 if (stcb->asoc.heart_beat_delay) {
3222 int cnt_of_unconf = 0;
3223 struct sctp_nets *lnet;
3224 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
3225 if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
3229 /* stop the timer ONLY if we have no unconfirmed addresses
3231 if (cnt_of_unconf == 0)
3232 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
3234 stcb->asoc.heart_beat_delay = 0;
3236 net->dest_state |= SCTP_ADDR_NOHB;
3239 SCTP_TCB_UNLOCK(stcb);
3241 /* Use endpoint defaults */
3242 SCTP_INP_WLOCK(inp);
3243 if (paddrp->spp_pathmaxrxt)
3244 inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt;
3245 if (paddrp->spp_hbinterval != SCTP_ISSUE_HB)
3246 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = paddrp->spp_hbinterval;
3247 SCTP_INP_WUNLOCK(inp);
3253 struct sctp_rtoinfo *srto;
3254 if ((size_t)m->m_len < sizeof(struct sctp_rtoinfo)) {
3258 srto = mtod(m, struct sctp_rtoinfo *);
3259 if (srto->srto_assoc_id == 0) {
3260 SCTP_INP_WLOCK(inp);
3261 /* If we have a null asoc, its default for the endpoint */
3262 if (srto->srto_initial > 10)
3263 inp->sctp_ep.initial_rto = srto->srto_initial;
3264 if (srto->srto_max > 10)
3265 inp->sctp_ep.sctp_maxrto = srto->srto_max;
3266 if (srto->srto_min > 10)
3267 inp->sctp_ep.sctp_minrto = srto->srto_min;
3268 SCTP_INP_WUNLOCK(inp);
3271 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3272 SCTP_INP_RLOCK(inp);
3273 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3275 SCTP_TCB_LOCK(stcb);
3276 SCTP_INP_RUNLOCK(inp);
3278 stcb = sctp_findassociation_ep_asocid(inp, srto->srto_assoc_id);
3283 /* Set in ms we hope :-) */
3284 if (srto->srto_initial > 10)
3285 stcb->asoc.initial_rto = srto->srto_initial;
3286 if (srto->srto_max > 10)
3287 stcb->asoc.maxrto = srto->srto_max;
3288 if (srto->srto_min > 10)
3289 stcb->asoc.minrto = srto->srto_min;
3290 SCTP_TCB_UNLOCK(stcb);
3293 case SCTP_ASSOCINFO:
3295 struct sctp_assocparams *sasoc;
3297 if ((size_t)m->m_len < sizeof(struct sctp_assocparams)) {
3301 sasoc = mtod(m, struct sctp_assocparams *);
3302 if (sasoc->sasoc_assoc_id) {
3303 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3304 SCTP_INP_RLOCK(inp);
3305 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3307 SCTP_TCB_LOCK(stcb);
3308 SCTP_INP_RUNLOCK(inp);
3310 stcb = sctp_findassociation_ep_asocid(inp,
3311 sasoc->sasoc_assoc_id);
3321 if (sasoc->sasoc_asocmaxrxt)
3322 stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt;
3323 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
3324 sasoc->sasoc_peer_rwnd = 0;
3325 sasoc->sasoc_local_rwnd = 0;
3326 if (stcb->asoc.cookie_life)
3327 stcb->asoc.cookie_life = sasoc->sasoc_cookie_life;
3328 SCTP_TCB_UNLOCK(stcb);
3330 SCTP_INP_WLOCK(inp);
3331 if (sasoc->sasoc_asocmaxrxt)
3332 inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt;
3333 sasoc->sasoc_number_peer_destinations = 0;
3334 sasoc->sasoc_peer_rwnd = 0;
3335 sasoc->sasoc_local_rwnd = 0;
3336 if (sasoc->sasoc_cookie_life)
3337 inp->sctp_ep.def_cookie_life = sasoc->sasoc_cookie_life;
3338 SCTP_INP_WUNLOCK(inp);
3344 struct sctp_initmsg *sinit;
3346 if ((size_t)m->m_len < sizeof(struct sctp_initmsg)) {
3350 sinit = mtod(m, struct sctp_initmsg *);
3351 SCTP_INP_WLOCK(inp);
3352 if (sinit->sinit_num_ostreams)
3353 inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams;
3355 if (sinit->sinit_max_instreams)
3356 inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams;
3358 if (sinit->sinit_max_attempts)
3359 inp->sctp_ep.max_init_times = sinit->sinit_max_attempts;
3361 if (sinit->sinit_max_init_timeo > 10)
3362 /* We must be at least a 100ms (we set in ticks) */
3363 inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo;
3364 SCTP_INP_WUNLOCK(inp);
3367 case SCTP_PRIMARY_ADDR:
3369 struct sctp_setprim *spa;
3370 struct sctp_nets *net, *lnet;
3371 if ((size_t)m->m_len < sizeof(struct sctp_setprim)) {
3375 spa = mtod(m, struct sctp_setprim *);
3377 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3378 SCTP_INP_RLOCK(inp);
3379 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3381 SCTP_TCB_LOCK(stcb);
3386 SCTP_INP_RUNLOCK(inp);
3388 stcb = sctp_findassociation_ep_asocid(inp, spa->ssp_assoc_id);
3391 SCTP_INP_WLOCK(inp);
3392 SCTP_INP_INCR_REF(inp);
3393 SCTP_INP_WUNLOCK(inp);
3394 stcb = sctp_findassociation_ep_addr(&inp,
3395 (struct sockaddr *)&spa->ssp_addr,
3398 SCTP_INP_WLOCK(inp);
3399 SCTP_INP_DECR_REF(inp);
3400 SCTP_INP_WUNLOCK(inp);
3405 /* find the net, associd or connected lookup type */
3406 net = sctp_findnet(stcb, (struct sockaddr *)&spa->ssp_addr);
3408 SCTP_TCB_UNLOCK(stcb);
3413 if ((net != stcb->asoc.primary_destination) &&
3414 (!(net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
3415 /* Ok we need to set it */
3416 lnet = stcb->asoc.primary_destination;
3417 lnet->next_tsn_at_change = net->next_tsn_at_change = stcb->asoc.sending_seq;
3418 if (sctp_set_primary_addr(stcb,
3421 if (net->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3422 net->dest_state |= SCTP_ADDR_DOUBLE_SWITCH;
3424 net->dest_state |= SCTP_ADDR_SWITCH_PRIMARY;
3427 SCTP_TCB_UNLOCK(stcb);
3431 case SCTP_SET_PEER_PRIMARY_ADDR:
3433 struct sctp_setpeerprim *sspp;
3434 if ((size_t)m->m_len < sizeof(struct sctp_setpeerprim)) {
3438 sspp = mtod(m, struct sctp_setpeerprim *);
3441 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3442 SCTP_INP_RLOCK(inp);
3443 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3445 SCTP_TCB_UNLOCK(stcb);
3446 SCTP_INP_RUNLOCK(inp);
3448 stcb = sctp_findassociation_ep_asocid(inp, sspp->sspp_assoc_id);
3453 if (sctp_set_primary_ip_address_sa(stcb, (struct sockaddr *)&sspp->sspp_addr) != 0) {
3456 SCTP_TCB_UNLOCK(stcb);
3459 case SCTP_BINDX_ADD_ADDR:
3461 struct sctp_getaddresses *addrs;
3462 struct sockaddr *addr_touse;
3463 struct sockaddr_in sin;
3464 /* see if we're bound all already! */
3465 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3469 if ((size_t)m->m_len < sizeof(struct sctp_getaddresses)) {
3473 addrs = mtod(m, struct sctp_getaddresses *);
3474 addr_touse = addrs->addr;
3475 if (addrs->addr->sa_family == AF_INET6) {
3476 struct sockaddr_in6 *sin6;
3477 sin6 = (struct sockaddr_in6 *)addr_touse;
3478 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
3479 in6_sin6_2_sin(&sin, sin6);
3480 addr_touse = (struct sockaddr *)&sin;
3483 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
3485 /* Can't get proc for Net/Open BSD */
3489 error = sctp_inpcb_bind(so, addr_touse, p);
3492 /* No locks required here since bind and mgmt_ep_sa all
3493 * do their own locking. If we do something for the FIX:
3494 * below we may need to lock in that case.
3496 if (addrs->sget_assoc_id == 0) {
3497 /* add the address */
3498 struct sctp_inpcb *lep;
3499 ((struct sockaddr_in *)addr_touse)->sin_port = inp->sctp_lport;
3500 lep = sctp_pcb_findep(addr_touse, 1, 0);
3502 /* We must decrement the refcount
3503 * since we have the ep already and
3504 * are binding. No remove going on
3507 SCTP_INP_WLOCK(inp);
3508 SCTP_INP_DECR_REF(inp);
3509 SCTP_INP_WUNLOCK(inp);
3512 /* already bound to it.. ok */
3514 } else if (lep == NULL) {
3515 ((struct sockaddr_in *)addr_touse)->sin_port = 0;
3516 error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
3517 SCTP_ADD_IP_ADDRESS);
3519 error = EADDRNOTAVAIL;
3525 /* FIX: decide whether we allow assoc based bindx */
3529 case SCTP_BINDX_REM_ADDR:
3531 struct sctp_getaddresses *addrs;
3532 struct sockaddr *addr_touse;
3533 struct sockaddr_in sin;
3534 /* see if we're bound all already! */
3535 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3539 if ((size_t)m->m_len < sizeof(struct sctp_getaddresses)) {
3543 addrs = mtod(m, struct sctp_getaddresses *);
3544 addr_touse = addrs->addr;
3545 if (addrs->addr->sa_family == AF_INET6) {
3546 struct sockaddr_in6 *sin6;
3547 sin6 = (struct sockaddr_in6 *)addr_touse;
3548 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
3549 in6_sin6_2_sin(&sin, sin6);
3550 addr_touse = (struct sockaddr *)&sin;
3553 /* No lock required mgmt_ep_sa does its own locking. If
3554 * the FIX: below is ever changed we may need to
3555 * lock before calling association level binding.
3557 if (addrs->sget_assoc_id == 0) {
3558 /* delete the address */
3559 sctp_addr_mgmt_ep_sa(inp, addr_touse,
3560 SCTP_DEL_IP_ADDRESS);
3562 /* FIX: decide whether we allow assoc based bindx */
3567 error = ENOPROTOOPT;
3569 } /* end switch (opt) */
3574 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
3576 sctp_ctloutput(struct socket *so, struct sockopt *sopt)
3578 struct mbuf *m = NULL;
3579 struct sctp_inpcb *inp;
3582 inp = (struct sctp_inpcb *)so->so_pcb;
3586 /* I made the same as TCP since we are not setup? */
3587 return (ECONNRESET);
3589 if (sopt->sopt_level != IPPROTO_SCTP) {
3590 /* wrong proto level... send back up to IP */
3592 if (INP_CHECK_SOCKAF(so, AF_INET6))
3593 error = ip6_ctloutput(so, sopt);
3596 error = ip_ctloutput(so, sopt);
3600 if (sopt->sopt_valsize > MCLBYTES) {
3602 * Restrict us down to a cluster size, that's all we can
3603 * pass either way...
3605 sopt->sopt_valsize = MCLBYTES;
3607 if (sopt->sopt_valsize) {
3609 m = m_get(MB_WAIT, MT_DATA);
3610 if (sopt->sopt_valsize > MLEN) {
3611 MCLGET(m, MB_DONTWAIT);
3612 if ((m->m_flags & M_EXT) == 0) {
3618 error = sooptcopyin(sopt, mtod(m, caddr_t), sopt->sopt_valsize,
3619 sopt->sopt_valsize);
3624 m->m_len = sopt->sopt_valsize;
3626 if (sopt->sopt_dir == SOPT_SET) {
3627 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
3628 error = sctp_optsset(so, sopt->sopt_name, &m, sopt->sopt_td);
3630 error = sctp_optsset(so, sopt->sopt_name, &m, sopt->sopt_p);
3632 } else if (sopt->sopt_dir == SOPT_GET) {
3633 #if (defined (__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
3634 error = sctp_optsget(so, sopt->sopt_name, &m, sopt->sopt_td);
3636 error = sctp_optsget(so, sopt->sopt_name, &m, sopt->sopt_p);
3641 if ( (error == 0) && (m != NULL)) {
3642 error = sooptcopyout(sopt, mtod(m, caddr_t), m->m_len);
3644 } else if (m != NULL) {
3653 /* NetBSD and OpenBSD */
3655 sctp_ctloutput(int op, struct socket *so, int level, int optname,
3661 struct in6pcb *in6p;
3663 int family; /* family of the socket */
3665 family = so->so_proto->pr_domain->dom_family;
3670 inp = sotoinpcb(so);
3678 in6p = sotoin6pcb(so);
3683 return EAFNOSUPPORT;
3688 if (inp == NULL && in6p == NULL)
3692 if (op == PRCO_SETOPT && *mp)
3694 return (ECONNRESET);
3696 if (level != IPPROTO_SCTP) {
3699 error = ip_ctloutput(op, so, level, optname, mp);
3703 error = ip6_ctloutput(op, so, level, optname, mp);
3710 /* Ok if we reach here it is a SCTP option we hope */
3711 if (op == PRCO_SETOPT) {
3712 error = sctp_optsset(so, optname, mp, NULL);
3715 } else if (op == PRCO_GETOPT) {
3716 error = sctp_optsget(so, optname, mp, NULL);
3727 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
3728 sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p)
3731 #if defined(__FreeBSD__) || defined(__APPLE__)
3732 sctp_connect(struct socket *so, struct sockaddr *addr, struct proc *p)
3735 sctp_connect(struct socket *so, struct mbuf *nam, struct proc *p)
3737 struct sockaddr *addr = mtod(nam, struct sockaddr *);
3741 struct sctp_inpcb *inp;
3742 struct sctp_tcb *stcb;
3745 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
3746 kprintf("Connect called in SCTP to ");
3747 sctp_print_address(addr);
3748 kprintf("Port %d\n", ntohs(((struct sockaddr_in *)addr)->sin_port));
3750 #endif /* SCTP_DEBUG */
3752 inp = (struct sctp_inpcb *)so->so_pcb;
3755 /* I made the same as TCP since we are not setup? */
3756 return (ECONNRESET);
3758 SCTP_ASOC_CREATE_LOCK(inp);
3759 SCTP_INP_WLOCK(inp);
3760 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3761 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3762 /* Should I really unlock ? */
3763 SCTP_INP_WUNLOCK(inp);
3764 SCTP_ASOC_CREATE_UNLOCK(inp);
3769 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
3770 (addr->sa_family == AF_INET6)) {
3771 SCTP_INP_WUNLOCK(inp);
3772 SCTP_ASOC_CREATE_UNLOCK(inp);
3777 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
3778 SCTP_PCB_FLAGS_UNBOUND) {
3779 /* Bind a ephemeral port */
3780 SCTP_INP_WUNLOCK(inp);
3781 error = sctp_inpcb_bind(so, NULL, p);
3783 SCTP_ASOC_CREATE_UNLOCK(inp);
3787 SCTP_INP_WLOCK(inp);
3789 /* Now do we connect? */
3790 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3791 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
3792 /* We are already connected AND the TCP model */
3794 SCTP_INP_WUNLOCK(inp);
3795 SCTP_ASOC_CREATE_UNLOCK(inp);
3796 return (EADDRINUSE);
3798 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3799 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3801 SCTP_TCB_UNLOCK(stcb);
3802 SCTP_INP_WUNLOCK(inp);
3804 SCTP_INP_INCR_REF(inp);
3805 SCTP_INP_WUNLOCK(inp);
3806 stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL);
3808 SCTP_INP_WLOCK(inp);
3809 SCTP_INP_DECR_REF(inp);
3810 SCTP_INP_WUNLOCK(inp);
3814 /* Already have or am bring up an association */
3815 SCTP_ASOC_CREATE_UNLOCK(inp);
3816 SCTP_TCB_UNLOCK(stcb);
3820 /* We are GOOD to go */
3821 stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0);
3823 /* Gak! no memory */
3827 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
3828 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
3829 /* Set the connected flag so we can queue data */
3832 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT;
3833 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
3834 sctp_send_initiate(inp, stcb);
3835 SCTP_ASOC_CREATE_UNLOCK(inp);
3836 SCTP_TCB_UNLOCK(stcb);
3842 sctp_usr_recvd(struct socket *so, int flags)
3844 struct sctp_socket_q_list *sq=NULL;
3846 * The user has received some data, we may be able to stuff more
3847 * up the socket. And we need to possibly update the rwnd.
3849 struct sctp_inpcb *inp;
3850 struct sctp_tcb *stcb=NULL;
3852 inp = (struct sctp_inpcb *)so->so_pcb;
3854 if (sctp_debug_on & SCTP_DEBUG_USRREQ2)
3855 kprintf("Read for so:%x inp:%x Flags:%x\n",
3856 (u_int)so, (u_int)inp, (u_int)flags);
3860 /* I made the same as TCP since we are not setup? */
3862 if (sctp_debug_on & SCTP_DEBUG_USRREQ2)
3863 kprintf("Nope, connection reset\n");
3865 return (ECONNRESET);
3869 * Grab the first one on the list. It will re-insert itself if
3870 * it runs out of room
3872 SCTP_INP_WLOCK(inp);
3873 if ((flags & MSG_EOR) && ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0)
3874 && ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
3875 /* Ok the other part of our grubby tracking
3876 * stuff for our horrible layer violation that
3877 * the tsvwg thinks is ok for sctp_peeloff.. gak!
3878 * We must update the next vtag pending on the
3879 * socket buffer (if any).
3881 inp->sctp_vtag_first = sctp_get_first_vtag_from_sb(so);
3882 sq = TAILQ_FIRST(&inp->sctp_queue_list);
3889 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3892 SCTP_TCB_LOCK(stcb);
3895 /* all code in normal stcb path assumes
3896 * that you have a tcb_lock only. Thus
3897 * we must release the inp write lock.
3899 if (flags & MSG_EOR) {
3900 if (((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0)
3901 && ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
3902 stcb = sctp_remove_from_socket_q(inp);
3905 if (sctp_debug_on & SCTP_DEBUG_USRREQ2)
3906 kprintf("remove from socket queue for inp:%x tcbret:%x\n",
3907 (u_int)inp, (u_int)stcb);
3910 stcb->asoc.my_rwnd_control_len = sctp_sbspace_sub(stcb->asoc.my_rwnd_control_len,
3911 sizeof(struct mbuf));
3912 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVDATAIOEVNT) {
3913 stcb->asoc.my_rwnd_control_len = sctp_sbspace_sub(stcb->asoc.my_rwnd_control_len,
3914 CMSG_LEN(sizeof(struct sctp_sndrcvinfo)));
3917 if ((TAILQ_EMPTY(&stcb->asoc.delivery_queue) == 0) ||
3918 (TAILQ_EMPTY(&stcb->asoc.reasmqueue) == 0)) {
3919 /* Deliver if there is something to be delivered */
3920 sctp_service_queues(stcb, &stcb->asoc, 1);
3922 sctp_set_rwnd(stcb, &stcb->asoc);
3923 /* if we increase by 1 or more MTU's (smallest MTUs of all
3924 * nets) we send a window update sack
3926 incr = stcb->asoc.my_rwnd - stcb->asoc.my_last_reported_rwnd;
3930 if (((uint32_t)incr >= (stcb->asoc.smallest_mtu * SCTP_SEG_TO_RWND_UPD)) ||
3931 ((((uint32_t)incr)*SCTP_SCALE_OF_RWND_TO_UPD) >= so->so_rcv.ssb_hiwat)) {
3932 if (callout_pending(&stcb->asoc.dack_timer.timer)) {
3933 /* If the timer is up, stop it */
3934 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
3935 stcb->sctp_ep, stcb, NULL);
3937 /* Send the sack, with the new rwnd */
3938 sctp_send_sack(stcb);
3939 /* Now do the output */
3940 sctp_chunk_output(inp, stcb, 10);
3943 if ((( sq ) && (flags & MSG_EOR) && ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0))
3944 && ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
3945 stcb = sctp_remove_from_socket_q(inp);
3948 SOCKBUF_LOCK(&so->so_rcv);
3949 if (( so->so_rcv.ssb_mb == NULL ) &&
3950 (TAILQ_EMPTY(&inp->sctp_queue_list) == 0)) {
3953 if (sctp_debug_on & SCTP_DEBUG_USRREQ2)
3954 kprintf("Something off, inp:%x so->so_rcv->ssb_mb is empty and sockq is not.. cleaning\n",
3957 if (((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0)
3958 && ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
3960 done_yet = TAILQ_EMPTY(&inp->sctp_queue_list);
3963 sctp_remove_from_socket_q(inp);
3964 done_yet = TAILQ_EMPTY(&inp->sctp_queue_list);
3968 if (sctp_debug_on & SCTP_DEBUG_USRREQ2)
3969 kprintf("Cleaned up %d sockq's\n", sq_cnt);
3972 SOCKBUF_UNLOCK(&so->so_rcv);
3974 SCTP_TCB_UNLOCK(stcb);
3975 SCTP_INP_WUNLOCK(inp);
3981 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
3982 sctp_listen(struct socket *so, struct thread *p)
3984 sctp_listen(struct socket *so, struct proc *p)
3988 * Note this module depends on the protocol processing being
3989 * called AFTER any socket level flags and backlog are applied
3990 * to the socket. The traditional way that the socket flags are
3991 * applied is AFTER protocol processing. We have made a change
3992 * to the sys/kern/uipc_socket.c module to reverse this but this
3993 * MUST be in place if the socket API for SCTP is to work properly.
3996 struct sctp_inpcb *inp;
3999 inp = (struct sctp_inpcb *)so->so_pcb;
4002 /* I made the same as TCP since we are not setup? */
4003 return (ECONNRESET);
4005 SCTP_INP_RLOCK(inp);
4006 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4007 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
4008 /* We are already connected AND the TCP model */
4010 SCTP_INP_RUNLOCK(inp);
4011 return (EADDRINUSE);
4013 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
4014 /* We must do a bind. */
4015 SCTP_INP_RUNLOCK(inp);
4016 if ((error = sctp_inpcb_bind(so, NULL, p))) {
4017 /* bind error, probably perm */
4022 SCTP_INP_RUNLOCK(inp);
4025 SCTP_INP_WLOCK(inp);
4026 if (inp->sctp_socket->so_qlimit) {
4027 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
4029 * For the UDP model we must TURN OFF the ACCEPT
4030 * flags since we do NOT allow the accept() call.
4031 * The TCP model (when present) will do accept which
4032 * then prohibits connect().
4034 inp->sctp_socket->so_options &= ~SO_ACCEPTCONN;
4036 inp->sctp_flags |= SCTP_PCB_FLAGS_ACCEPTING;
4038 if (inp->sctp_flags & SCTP_PCB_FLAGS_ACCEPTING) {
4040 * Turning off the listen flags if the backlog is
4041 * set to 0 (i.e. qlimit is 0).
4043 inp->sctp_flags &= ~SCTP_PCB_FLAGS_ACCEPTING;
4045 inp->sctp_socket->so_options &= ~SO_ACCEPTCONN;
4047 SCTP_INP_WUNLOCK(inp);
4054 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4055 sctp_accept(struct socket *so, struct sockaddr **addr)
4058 sctp_accept(struct socket *so, struct mbuf *nam)
4060 struct sockaddr *addr = mtod(nam, struct sockaddr *);
4062 struct sctp_tcb *stcb;
4063 struct sockaddr *prim;
4064 struct sctp_inpcb *inp;
4067 inp = (struct sctp_inpcb *)so->so_pcb;
4071 return (ECONNRESET);
4073 SCTP_INP_RLOCK(inp);
4074 if (so->so_state & SS_ISDISCONNECTED) {
4076 SCTP_INP_RUNLOCK(inp);
4077 return (ECONNABORTED);
4079 stcb = LIST_FIRST(&inp->sctp_asoc_list);
4082 SCTP_INP_RUNLOCK(inp);
4083 return (ECONNRESET);
4085 SCTP_TCB_LOCK(stcb);
4086 SCTP_INP_RUNLOCK(inp);
4087 prim = (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr;
4088 if (prim->sa_family == AF_INET) {
4089 struct sockaddr_in *sin;
4090 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4091 MALLOC(sin, struct sockaddr_in *, sizeof *sin, M_SONAME,
4094 sin = (struct sockaddr_in *)addr;
4095 bzero((caddr_t)sin, sizeof (*sin));
4097 sin->sin_family = AF_INET;
4098 sin->sin_len = sizeof(*sin);
4099 sin->sin_port = ((struct sockaddr_in *)prim)->sin_port;
4100 sin->sin_addr = ((struct sockaddr_in *)prim)->sin_addr;
4101 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4102 *addr = (struct sockaddr *)sin;
4104 nam->m_len = sizeof(*sin);
4107 struct sockaddr_in6 *sin6;
4108 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4109 MALLOC(sin6, struct sockaddr_in6 *, sizeof *sin6, M_SONAME,
4112 sin6 = (struct sockaddr_in6 *)addr;
4114 bzero((caddr_t)sin6, sizeof (*sin6));
4115 sin6->sin6_family = AF_INET6;
4116 sin6->sin6_len = sizeof(*sin6);
4117 sin6->sin6_port = ((struct sockaddr_in6 *)prim)->sin6_port;
4119 sin6->sin6_addr = ((struct sockaddr_in6 *)prim)->sin6_addr;
4120 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr))
4121 /* sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]);*/
4122 in6_recoverscope(sin6, &sin6->sin6_addr, NULL); /* skip ifp check */
4124 sin6->sin6_scope_id = 0; /*XXX*/
4125 #if defined(__FreeBSD__) || defined (__APPLE__) || defined(__DragonFly__)
4126 *addr= (struct sockaddr *)sin6;
4128 nam->m_len = sizeof(*sin6);
4131 /* Wake any delayed sleep action */
4132 SCTP_TCB_UNLOCK(stcb);
4133 SCTP_INP_WLOCK(inp);
4134 if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) {
4135 inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE;
4136 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) {
4137 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT;
4138 #if defined(__NetBSD__)
4139 if (sowritable(inp->sctp_socket))
4140 sowwakeup(inp->sctp_socket);
4142 if (sowriteable(inp->sctp_socket))
4143 sowwakeup(inp->sctp_socket);
4146 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) {
4147 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT;
4148 if (soreadable(inp->sctp_socket))
4149 sorwakeup(inp->sctp_socket);
4153 SCTP_INP_WUNLOCK(inp);
4159 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4160 sctp_ingetaddr(struct socket *so, struct sockaddr **addr)
4162 sctp_ingetaddr(struct socket *so, struct mbuf *nam)
4165 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4166 struct sockaddr_in *sin;
4168 struct sockaddr_in *sin = mtod(nam, struct sockaddr_in *);
4170 struct sctp_inpcb *inp;
4172 * Do the malloc first in case it blocks.
4174 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4175 MALLOC(sin, struct sockaddr_in *, sizeof *sin, M_SONAME, M_WAITOK |
4178 nam->m_len = sizeof(*sin);
4179 memset(sin, 0, sizeof(*sin));
4181 sin->sin_family = AF_INET;
4182 sin->sin_len = sizeof(*sin);
4184 inp = (struct sctp_inpcb *)so->so_pcb;
4187 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4188 FREE(sin, M_SONAME);
4192 SCTP_INP_RLOCK(inp);
4193 sin->sin_port = inp->sctp_lport;
4194 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
4195 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
4196 struct sctp_tcb *stcb;
4197 struct sockaddr_in *sin_a;
4198 struct sctp_nets *net;
4201 stcb = LIST_FIRST(&inp->sctp_asoc_list);
4207 SCTP_TCB_LOCK(stcb);
4208 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4209 sin_a = (struct sockaddr_in *)&net->ro._l_addr;
4210 if (sin_a->sin_family == AF_INET) {
4215 if ((!fnd) || (sin_a == NULL)) {
4217 SCTP_TCB_UNLOCK(stcb);
4220 sin->sin_addr = sctp_ipv4_source_address_selection(inp,
4221 stcb, (struct route *)&net->ro, net, 0);
4222 SCTP_TCB_UNLOCK(stcb);
4224 /* For the bound all case you get back 0 */
4226 sin->sin_addr.s_addr = 0;
4230 /* Take the first IPv4 address in the list */
4231 struct sctp_laddr *laddr;
4233 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4234 if (laddr->ifa->ifa_addr->sa_family == AF_INET) {
4235 struct sockaddr_in *sin_a;
4236 sin_a = (struct sockaddr_in *)laddr->ifa->ifa_addr;
4237 sin->sin_addr = sin_a->sin_addr;
4244 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4245 FREE(sin, M_SONAME);
4247 SCTP_INP_RUNLOCK(inp);
4251 SCTP_INP_RUNLOCK(inp);
4253 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4254 (*addr) = (struct sockaddr *)sin;
4260 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4261 sctp_peeraddr(struct socket *so, struct sockaddr **addr)
4263 struct sockaddr_in *sin = (struct sockaddr_in *)*addr;
4265 sctp_peeraddr(struct socket *so, struct mbuf *nam)
4267 struct sockaddr_in *sin = mtod(nam, struct sockaddr_in *);
4270 struct sockaddr_in *sin_a;
4271 struct sctp_inpcb *inp;
4272 struct sctp_tcb *stcb;
4273 struct sctp_nets *net;
4275 /* Do the malloc first in case it blocks. */
4276 inp = (struct sctp_inpcb *)so->so_pcb;
4277 if ((inp == NULL) ||
4278 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
4279 /* UDP type and listeners will drop out here */
4284 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4285 MALLOC(sin, struct sockaddr_in *, sizeof *sin, M_SONAME, M_WAITOK |
4288 nam->m_len = sizeof(*sin);
4289 memset(sin, 0, sizeof(*sin));
4291 sin->sin_family = AF_INET;
4292 sin->sin_len = sizeof(*sin);
4294 /* We must recapture incase we blocked */
4295 inp = (struct sctp_inpcb *)so->so_pcb;
4298 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4299 FREE(sin, M_SONAME);
4303 SCTP_INP_RLOCK(inp);
4304 stcb = LIST_FIRST(&inp->sctp_asoc_list);
4306 SCTP_TCB_LOCK(stcb);
4307 SCTP_INP_RUNLOCK(inp);
4310 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4311 FREE(sin, M_SONAME);
4316 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4317 sin_a = (struct sockaddr_in *)&net->ro._l_addr;
4318 if (sin_a->sin_family == AF_INET) {
4320 sin->sin_port = stcb->rport;
4321 sin->sin_addr = sin_a->sin_addr;
4325 SCTP_TCB_UNLOCK(stcb);
4327 /* No IPv4 address */
4329 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4330 FREE(sin, M_SONAME);
4338 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4339 struct pr_usrreqs sctp_usrreqs = {
4340 .pru_abort = sctp_abort,
4341 .pru_accept = sctp_accept,
4342 .pru_attach = sctp_attach,
4343 .pru_bind = sctp_bind,
4344 .pru_connect = sctp_connect,
4345 .pru_connect2 = pru_connect2_notsupp,
4346 .pru_control = in_control,
4347 .pru_detach = sctp_detach,
4348 .pru_disconnect = sctp_disconnect,
4349 .pru_listen = sctp_listen,
4350 .pru_peeraddr = sctp_peeraddr,
4351 .pru_rcvd = sctp_usr_recvd,
4352 .pru_rcvoob = pru_rcvoob_notsupp,
4353 .pru_send = sctp_send,
4354 .pru_sense = pru_sense_null,
4355 .pru_shutdown = sctp_shutdown,
4356 .pru_sockaddr = sctp_ingetaddr,
4357 .pru_sosend = sctp_sosend,
4358 .pru_soreceive = soreceive
4362 #if defined(__NetBSD__)
4364 sctp_usrreq(struct socket *so, int req, struct mbuf *m, struct mbuf *nam,
4365 struct mbuf *control, struct proc *p)
4369 sctp_usrreq(struct socket *so, int req, struct mbuf *m, struct mbuf *nam,
4370 struct mbuf *control)
4372 struct proc *p = curproc;
4377 family = so->so_proto->pr_domain->dom_family;
4380 if (req == PRU_CONTROL) {
4383 error = in_control(so, (long)m, (caddr_t)nam,
4384 (struct ifnet *)control
4385 #if defined(__NetBSD__)
4392 error = in6_control(so, (long)m, (caddr_t)nam,
4393 (struct ifnet *)control, p);
4397 error = EAFNOSUPPORT;
4403 if (req == PRU_PURGEIF) {
4406 ifn = (struct ifnet *)control;
4407 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
4408 if (ifa->ifa_addr->sa_family == family) {
4409 sctp_delete_ip_address(ifa);
4423 return (EAFNOSUPPORT);
4431 error = sctp_attach(so, family, p);
4434 error = sctp_detach(so);
4441 error = sctp_bind(so, nam, p);
4444 error = sctp_listen(so, p);
4451 error = sctp_connect(so, nam, p);
4453 case PRU_DISCONNECT:
4454 error = sctp_disconnect(so);
4461 error = sctp_accept(so, nam);
4464 error = sctp_shutdown(so);
4469 * For Open and Net BSD, this is real
4470 * ugly. The mbuf *nam that is passed
4471 * (by soreceive()) is the int flags c
4472 * ast as a (mbuf *) yuck!
4474 error = sctp_usr_recvd(so, (int)((long)nam));
4478 /* Flags are ignored */
4480 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
4481 kprintf("Send called on V4 side\n");
4485 struct sockaddr *addr;
4489 addr = mtod(nam, struct sockaddr *);
4491 error = sctp_send(so, 0, m, addr, control, p);
4495 error = sctp_abort(so);
4502 error = EAFNOSUPPORT;
4505 error = EAFNOSUPPORT;
4508 error = sctp_peeraddr(so, nam);
4511 error = sctp_ingetaddr(so, nam);
4524 /* #if defined(__NetBSD__) || defined(__OpenBSD__) */
4527 * Sysctl for sctp variables.
4530 sctp_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
4534 /* All sysctl names at this level are terminal. */
4540 case SCTPCTL_MAXDGRAM:
4541 return (sysctl_int(oldp, oldlenp, newp, newlen,
4543 case SCTPCTL_RECVSPACE:
4544 return (sysctl_int(oldp, oldlenp, newp, newlen,
4546 case SCTPCTL_AUTOASCONF:
4547 return (sysctl_int(oldp, oldlenp, newp, newlen,
4548 &sctp_auto_asconf));
4549 case SCTPCTL_ECN_ENABLE:
4550 return (sysctl_int(oldp, oldlenp, newp, newlen,
4552 case SCTPCTL_ECN_NONCE:
4553 return (sysctl_int(oldp, oldlenp, newp, newlen,
4555 case SCTPCTL_STRICT_SACK:
4556 return (sysctl_int(oldp, oldlenp, newp, newlen,
4557 &sctp_strict_sacks));
4558 case SCTPCTL_NOCSUM_LO:
4559 return (sysctl_int(oldp, oldlenp, newp, newlen,
4560 &sctp_no_csum_on_loopback));
4561 case SCTPCTL_STRICT_INIT:
4562 return (sysctl_int(oldp, oldlenp, newp, newlen,
4563 &sctp_strict_init));
4564 case SCTPCTL_PEER_CHK_OH:
4565 return (sysctl_int(oldp, oldlenp, newp, newlen,
4566 &sctp_peer_chunk_oh));
4567 case SCTPCTL_MAXBURST:
4568 return (sysctl_int(oldp, oldlenp, newp, newlen,
4569 &sctp_max_burst_default));
4570 case SCTPCTL_MAXCHUNKONQ:
4571 return (sysctl_int(oldp, oldlenp, newp, newlen,
4572 &sctp_max_chunks_on_queue));
4573 case SCTPCTL_DELAYED_SACK:
4574 return (sysctl_int(oldp, oldlenp, newp, newlen,
4575 &sctp_delayed_sack_time_default));
4576 case SCTPCTL_HB_INTERVAL:
4577 return (sysctl_int(oldp, oldlenp, newp, newlen,
4578 &sctp_heartbeat_interval_default));
4579 case SCTPCTL_PMTU_RAISE:
4580 return (sysctl_int(oldp, oldlenp, newp, newlen,
4581 &sctp_pmtu_raise_time_default));
4582 case SCTPCTL_SHUTDOWN_GUARD:
4583 return (sysctl_int(oldp, oldlenp, newp, newlen,
4584 &sctp_shutdown_guard_time_default));
4585 case SCTPCTL_SECRET_LIFETIME:
4586 return (sysctl_int(oldp, oldlenp, newp, newlen,
4587 &sctp_secret_lifetime_default));
4588 case SCTPCTL_RTO_MAX:
4589 return (sysctl_int(oldp, oldlenp, newp, newlen,
4590 &sctp_rto_max_default));
4591 case SCTPCTL_RTO_MIN:
4592 return (sysctl_int(oldp, oldlenp, newp, newlen,
4593 &sctp_rto_min_default));
4594 case SCTPCTL_RTO_INITIAL:
4595 return (sysctl_int(oldp, oldlenp, newp, newlen,
4596 &sctp_rto_initial_default));
4597 case SCTPCTL_INIT_RTO_MAX:
4598 return (sysctl_int(oldp, oldlenp, newp, newlen,
4599 &sctp_init_rto_max_default));
4600 case SCTPCTL_COOKIE_LIFE:
4601 return (sysctl_int(oldp, oldlenp, newp, newlen,
4602 &sctp_valid_cookie_life_default));
4603 case SCTPCTL_INIT_RTX_MAX:
4604 return (sysctl_int(oldp, oldlenp, newp, newlen,
4605 &sctp_init_rtx_max_default));
4606 case SCTPCTL_ASSOC_RTX_MAX:
4607 return (sysctl_int(oldp, oldlenp, newp, newlen,
4608 &sctp_assoc_rtx_max_default));
4609 case SCTPCTL_PATH_RTX_MAX:
4610 return (sysctl_int(oldp, oldlenp, newp, newlen,
4611 &sctp_path_rtx_max_default));
4612 case SCTPCTL_NR_OUTGOING_STREAMS:
4613 return (sysctl_int(oldp, oldlenp, newp, newlen,
4614 &sctp_nr_outgoing_streams_default));
4617 return (sysctl_int(oldp, oldlenp, newp, newlen,
4621 return (ENOPROTOOPT);
4628 * Sysctl for sctp variables.
4630 SYSCTL_SETUP(sysctl_net_inet_sctp_setup, "sysctl net.inet.sctp subtree setup")
4633 sysctl_createv(clog, 0, NULL, NULL,
4635 CTLTYPE_NODE, "net", NULL,
4638 sysctl_createv(clog, 0, NULL, NULL,
4640 CTLTYPE_NODE, "inet", NULL,
4642 CTL_NET, PF_INET, CTL_EOL);
4643 sysctl_createv(clog, 0, NULL, NULL,
4645 CTLTYPE_NODE, "sctp",
4646 SYSCTL_DESCR("sctp related settings"),
4648 CTL_NET, PF_INET, IPPROTO_SCTP, CTL_EOL);
4650 sysctl_createv(clog, 0, NULL, NULL,
4651 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4652 CTLTYPE_INT, "maxdgram",
4653 SYSCTL_DESCR("Maximum outgoing SCTP buffer size"),
4654 NULL, 0, &sctp_sendspace, 0,
4655 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_MAXDGRAM,
4658 sysctl_createv(clog, 0, NULL, NULL,
4659 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4660 CTLTYPE_INT, "recvspace",
4661 SYSCTL_DESCR("Maximum incoming SCTP buffer size"),
4662 NULL, 0, &sctp_recvspace, 0,
4663 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_RECVSPACE,
4666 sysctl_createv(clog, 0, NULL, NULL,
4667 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4668 CTLTYPE_INT, "autoasconf",
4669 SYSCTL_DESCR("Enable SCTP Auto-ASCONF"),
4670 NULL, 0, &sctp_auto_asconf, 0,
4671 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_AUTOASCONF,
4674 sysctl_createv(clog, 0, NULL, NULL,
4675 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4676 CTLTYPE_INT, "ecn_enable",
4677 SYSCTL_DESCR("Enable SCTP ECN"),
4678 NULL, 0, &sctp_ecn, 0,
4679 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_ECN_ENABLE,
4682 sysctl_createv(clog, 0, NULL, NULL,
4683 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4684 CTLTYPE_INT, "ecn_nonce",
4685 SYSCTL_DESCR("Enable SCTP ECN Nonce"),
4686 NULL, 0, &sctp_ecn_nonce, 0,
4687 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_ECN_NONCE,
4690 sysctl_createv(clog, 0, NULL, NULL,
4691 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4692 CTLTYPE_INT, "strict_sack",
4693 SYSCTL_DESCR("Enable SCTP Strict SACK checking"),
4694 NULL, 0, &sctp_strict_sacks, 0,
4695 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_STRICT_SACK,
4698 sysctl_createv(clog, 0, NULL, NULL,
4699 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4700 CTLTYPE_INT, "loopback_nocsum",
4701 SYSCTL_DESCR("Enable NO Csum on packets sent on loopback"),
4702 NULL, 0, &sctp_no_csum_on_loopback, 0,
4703 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_NOCSUM_LO,
4706 sysctl_createv(clog, 0, NULL, NULL,
4707 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4708 CTLTYPE_INT, "strict_init",
4709 SYSCTL_DESCR("Enable strict INIT/INIT-ACK singleton enforcement"),
4710 NULL, 0, &sctp_strict_init, 0,
4711 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_STRICT_INIT,
4714 sysctl_createv(clog, 0, NULL, NULL,
4715 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4716 CTLTYPE_INT, "peer_chkoh",
4717 SYSCTL_DESCR("Amount to debit peers rwnd per chunk sent"),
4718 NULL, 0, &sctp_peer_chunk_oh, 0,
4719 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_PEER_CHK_OH,
4722 sysctl_createv(clog, 0, NULL, NULL,
4723 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4724 CTLTYPE_INT, "maxburst",
4725 SYSCTL_DESCR("Default max burst for sctp endpoints"),
4726 NULL, 0, &sctp_max_burst_default, 0,
4727 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_MAXBURST,
4730 sysctl_createv(clog, 0, NULL, NULL,
4731 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4732 CTLTYPE_INT, "maxchunks",
4733 SYSCTL_DESCR("Default max chunks on queue per asoc"),
4734 NULL, 0, &sctp_max_chunks_on_queue, 0,
4735 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_MAXCHUNKONQ,