1 /* $KAME: sctp_usrreq.c,v 1.47 2005/03/06 16:04:18 itojun Exp $ */
2 /* $DragonFly: src/sys/netinet/sctp_usrreq.c,v 1.14 2008/04/20 13:44:25 swildner Exp $ */
5 * Copyright (c) 2001, 2002, 2003, 2004 Cisco Systems, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Cisco Systems, Inc.
19 * 4. Neither the name of the project nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY CISCO SYSTEMS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL CISCO SYSTEMS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #if !(defined(__OpenBSD__) || defined(__APPLE__))
36 #include "opt_ipsec.h"
38 #if defined(__FreeBSD__) || defined(__DragonFly__)
39 #include "opt_inet6.h"
42 #if defined(__NetBSD__)
48 #elif !defined(__OpenBSD__)
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/kernel.h>
55 #include <sys/malloc.h>
57 #include <sys/domain.h>
60 #include <sys/protosw.h>
61 #include <sys/socket.h>
62 #include <sys/socketvar.h>
63 #include <sys/socketvar2.h>
64 #include <sys/sysctl.h>
65 #include <sys/syslog.h>
66 #include <sys/thread2.h>
68 #include <net/if_types.h>
69 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
70 #include <net/if_var.h>
72 #include <net/route.h>
73 #include <netinet/in.h>
74 #include <netinet/in_systm.h>
75 #include <netinet/ip.h>
76 #include <netinet/ip6.h>
77 #include <netinet/in_pcb.h>
78 #include <netinet/in_var.h>
79 #include <netinet/ip_var.h>
80 #include <netinet6/ip6_var.h>
81 #include <netinet6/in6_var.h>
83 #include <netinet/ip_icmp.h>
84 #include <netinet/icmp_var.h>
85 #include <netinet/sctp_pcb.h>
86 #include <netinet/sctp_header.h>
87 #include <netinet/sctp_var.h>
88 #include <netinet/sctp_output.h>
89 #include <netinet/sctp_uio.h>
90 #include <netinet/sctp_asconf.h>
91 #include <netinet/sctputil.h>
92 #include <netinet/sctp_indata.h>
93 #include <netinet/sctp_asconf.h>
96 #include <netinet6/ipsec.h>
97 #include <netproto/key/key.h>
103 #include <net/net_osdep.h>
105 #if defined(HAVE_NRL_INPCB) || defined(__FreeBSD__) || defined(__DragonFly__)
110 #define sotoin6pcb sotoinpcb
115 extern u_int32_t sctp_debug_on;
116 #endif /* SCTP_DEBUG */
119 * sysctl tunable variables
121 int sctp_auto_asconf = SCTP_DEFAULT_AUTO_ASCONF;
122 int sctp_max_burst_default = SCTP_DEF_MAX_BURST;
123 int sctp_peer_chunk_oh = sizeof(struct mbuf);
124 int sctp_strict_init = 1;
125 int sctp_no_csum_on_loopback = 1;
126 unsigned int sctp_max_chunks_on_queue = SCTP_ASOC_MAX_CHUNKS_ON_QUEUE;
127 int sctp_sendspace = (128 * 1024);
128 int sctp_recvspace = 128 * (1024 +
130 sizeof(struct sockaddr_in6)
132 sizeof(struct sockaddr_in)
135 int sctp_strict_sacks = 0;
137 int sctp_ecn_nonce = 0;
139 unsigned int sctp_delayed_sack_time_default = SCTP_RECV_MSEC;
140 unsigned int sctp_heartbeat_interval_default = SCTP_HB_DEFAULT_MSEC;
141 unsigned int sctp_pmtu_raise_time_default = SCTP_DEF_PMTU_RAISE_SEC;
142 unsigned int sctp_shutdown_guard_time_default = SCTP_DEF_MAX_SHUTDOWN_SEC;
143 unsigned int sctp_secret_lifetime_default = SCTP_DEFAULT_SECRET_LIFE_SEC;
144 unsigned int sctp_rto_max_default = SCTP_RTO_UPPER_BOUND;
145 unsigned int sctp_rto_min_default = SCTP_RTO_LOWER_BOUND;
146 unsigned int sctp_rto_initial_default = SCTP_RTO_INITIAL;
147 unsigned int sctp_init_rto_max_default = SCTP_RTO_UPPER_BOUND;
148 unsigned int sctp_valid_cookie_life_default = SCTP_DEFAULT_COOKIE_LIFE;
149 unsigned int sctp_init_rtx_max_default = SCTP_DEF_MAX_INIT;
150 unsigned int sctp_assoc_rtx_max_default = SCTP_DEF_MAX_SEND;
151 unsigned int sctp_path_rtx_max_default = SCTP_DEF_MAX_SEND/2;
152 unsigned int sctp_nr_outgoing_streams_default = SCTP_OSTREAM_INITIAL;
158 #define nmbclusters nmbclust
160 /* Init the SCTP pcb in sctp_pcb.c */
166 if (nmbclusters > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE)
167 sctp_max_chunks_on_queue = nmbclusters;
169 /* if (nmbclust > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE)
170 sctp_max_chunks_on_queue = nmbclust; FIX ME */
171 sctp_max_chunks_on_queue = nmbclust * 2;
174 * Allow a user to take no more than 1/2 the number of clusters
175 * or the SB_MAX whichever is smaller for the send window.
177 sb_max_adj = (u_long)((u_quad_t)(SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES));
178 sctp_sendspace = min((min(SB_MAX, sb_max_adj)),
180 ((nmbclusters/2) * SCTP_DEFAULT_MAXSEGMENT));
182 ((nmbclust/2) * SCTP_DEFAULT_MAXSEGMENT));
185 * Now for the recv window, should we take the same amount?
186 * or should I do 1/2 the SB_MAX instead in the SB_MAX min above.
187 * For now I will just copy.
189 sctp_recvspace = sctp_sendspace;
197 ip_2_ip6_hdr(struct ip6_hdr *ip6, struct ip *ip)
199 bzero(ip6, sizeof(*ip6));
201 ip6->ip6_vfc = IPV6_VERSION;
202 ip6->ip6_plen = ip->ip_len;
203 ip6->ip6_nxt = ip->ip_p;
204 ip6->ip6_hlim = ip->ip_ttl;
205 ip6->ip6_src.s6_addr32[2] = ip6->ip6_dst.s6_addr32[2] =
207 ip6->ip6_src.s6_addr32[3] = ip->ip_src.s_addr;
208 ip6->ip6_dst.s6_addr32[3] = ip->ip_dst.s_addr;
213 sctp_split_chunks(struct sctp_association *asoc,
214 struct sctp_stream_out *strm,
215 struct sctp_tmit_chunk *chk)
217 struct sctp_tmit_chunk *new_chk;
219 /* First we need a chunk */
220 new_chk = (struct sctp_tmit_chunk *)SCTP_ZONE_GET(sctppcbinfo.ipi_zone_chunk);
221 if (new_chk == NULL) {
222 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
225 sctppcbinfo.ipi_count_chunk++;
226 sctppcbinfo.ipi_gencnt_chunk++;
230 new_chk->data = m_split(chk->data, (chk->send_size>>1), MB_DONTWAIT);
231 if (new_chk->data == NULL) {
233 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
234 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, new_chk);
235 sctppcbinfo.ipi_count_chunk--;
236 if ((int)sctppcbinfo.ipi_count_chunk < 0) {
237 panic("Chunk count is negative");
239 sctppcbinfo.ipi_gencnt_chunk++;
243 /* Data is now split adjust sizes */
244 chk->send_size >>= 1;
245 new_chk->send_size >>= 1;
247 chk->book_size >>= 1;
248 new_chk->book_size >>= 1;
250 /* now adjust the marks */
251 chk->rec.data.rcv_flags |= SCTP_DATA_FIRST_FRAG;
252 chk->rec.data.rcv_flags &= ~SCTP_DATA_LAST_FRAG;
254 new_chk->rec.data.rcv_flags &= ~SCTP_DATA_FIRST_FRAG;
255 new_chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
257 /* Increase ref count if dest is set */
259 new_chk->whoTo->ref_count++;
261 /* now drop it on the end of the list*/
262 asoc->stream_queue_cnt++;
263 TAILQ_INSERT_AFTER(&strm->outqueue, chk, new_chk, sctp_next);
267 sctp_notify_mbuf(struct sctp_inpcb *inp,
268 struct sctp_tcb *stcb,
269 struct sctp_nets *net,
279 if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
280 (ip == NULL) || (sh == NULL)) {
282 SCTP_TCB_UNLOCK(stcb);
285 /* First job is to verify the vtag matches what I would send */
286 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
287 SCTP_TCB_UNLOCK(stcb);
290 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) -
292 if (icmph->icmp_type != ICMP_UNREACH) {
293 /* We only care about unreachable */
294 SCTP_TCB_UNLOCK(stcb);
297 if (icmph->icmp_code != ICMP_UNREACH_NEEDFRAG) {
298 /* not a unreachable message due to frag. */
299 SCTP_TCB_UNLOCK(stcb);
303 nxtsz = ntohs(icmph->icmp_seq);
306 * old type router that does not tell us what the next size
307 * mtu is. Rats we will have to guess (in a educated fashion
310 nxtsz = find_next_best_mtu(totsz);
313 /* Stop any PMTU timer */
314 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, NULL);
316 /* Adjust destination size limit */
317 if (net->mtu > nxtsz) {
320 /* now what about the ep? */
321 if (stcb->asoc.smallest_mtu > nxtsz) {
322 struct sctp_tmit_chunk *chk, *nchk;
323 struct sctp_stream_out *strm;
324 /* Adjust that too */
325 stcb->asoc.smallest_mtu = nxtsz;
326 /* now off to subtract IP_DF flag if needed */
328 TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) {
329 if ((chk->send_size+IP_HDR_SIZE) > nxtsz) {
330 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
333 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
334 if ((chk->send_size+IP_HDR_SIZE) > nxtsz) {
336 * For this guy we also mark for immediate
337 * resend since we sent to big of chunk
339 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
340 if (chk->sent != SCTP_DATAGRAM_RESEND) {
341 stcb->asoc.sent_queue_retran_cnt++;
343 chk->sent = SCTP_DATAGRAM_RESEND;
344 chk->rec.data.doing_fast_retransmit = 0;
346 /* Clear any time so NO RTT is being done */
348 stcb->asoc.total_flight -= chk->book_size;
349 if (stcb->asoc.total_flight < 0) {
350 stcb->asoc.total_flight = 0;
352 stcb->asoc.total_flight_count--;
353 if (stcb->asoc.total_flight_count < 0) {
354 stcb->asoc.total_flight_count = 0;
356 net->flight_size -= chk->book_size;
357 if (net->flight_size < 0) {
358 net->flight_size = 0;
362 TAILQ_FOREACH(strm, &stcb->asoc.out_wheel, next_spoke) {
363 chk = TAILQ_FIRST(&strm->outqueue);
365 nchk = TAILQ_NEXT(chk, sctp_next);
366 if ((chk->send_size+SCTP_MED_OVERHEAD) > nxtsz) {
367 sctp_split_chunks(&stcb->asoc, strm, chk);
373 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, NULL);
374 SCTP_TCB_UNLOCK(stcb);
379 sctp_notify(struct sctp_inpcb *inp,
383 struct sctp_tcb *stcb,
384 struct sctp_nets *net)
387 if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
388 (sh == NULL) || (to == NULL)) {
390 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
391 kprintf("sctp-notify, bad call\n");
393 #endif /* SCTP_DEBUG */
396 /* First job is to verify the vtag matches what I would send */
397 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
401 /* FIX ME FIX ME PROTOPT i.e. no SCTP should ALWAYS be an ABORT */
403 if ((error == EHOSTUNREACH) || /* Host is not reachable */
404 (error == EHOSTDOWN) || /* Host is down */
405 (error == ECONNREFUSED) || /* Host refused the connection, (not an abort?) */
406 (error == ENOPROTOOPT) /* SCTP is not present on host */
409 * Hmm reachablity problems we must examine closely.
410 * If its not reachable, we may have lost a network.
411 * Or if there is NO protocol at the other end named SCTP.
412 * well we consider it a OOTB abort.
414 if ((error == EHOSTUNREACH) || (error == EHOSTDOWN)) {
415 if (net->dest_state & SCTP_ADDR_REACHABLE) {
416 /* Ok that destination is NOT reachable */
417 net->dest_state &= ~SCTP_ADDR_REACHABLE;
418 net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
419 net->error_count = net->failure_threshold + 1;
420 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
421 stcb, SCTP_FAILED_THRESHOLD,
425 SCTP_TCB_UNLOCK(stcb);
428 * Here the peer is either playing tricks on us,
429 * including an address that belongs to someone who
430 * does not support SCTP OR was a userland
431 * implementation that shutdown and now is dead. In
432 * either case treat it like a OOTB abort with no TCB
434 sctp_abort_notification(stcb, SCTP_PEER_FAULTY);
435 sctp_free_assoc(inp, stcb);
436 /* no need to unlock here, since the TCB is gone */
439 /* Send all others to the app */
440 if (inp->sctp_socket) {
441 SOCK_LOCK(inp->sctp_socket);
442 inp->sctp_socket->so_error = error;
443 sctp_sowwakeup(inp, inp->sctp_socket);
444 SOCK_UNLOCK(inp->sctp_socket);
447 SCTP_TCB_UNLOCK(stcb);
451 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
456 sctp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
462 if (sa->sa_family != AF_INET ||
463 ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) {
464 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
471 if (PRC_IS_REDIRECT(cmd)) {
473 } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) {
474 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
481 struct sctp_inpcb *inp;
482 struct sctp_tcb *stcb;
483 struct sctp_nets *net;
484 struct sockaddr_in to, from;
486 sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2));
487 bzero(&to, sizeof(to));
488 bzero(&from, sizeof(from));
489 from.sin_family = to.sin_family = AF_INET;
490 from.sin_len = to.sin_len = sizeof(to);
491 from.sin_port = sh->src_port;
492 from.sin_addr = ip->ip_src;
493 to.sin_port = sh->dest_port;
494 to.sin_addr = ip->ip_dst;
497 * 'to' holds the dest of the packet that failed to be sent.
498 * 'from' holds our local endpoint address.
499 * Thus we reverse the to and the from in the lookup.
502 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&from,
503 (struct sockaddr *)&to,
505 if (stcb != NULL && inp && (inp->sctp_socket != NULL)) {
506 if (cmd != PRC_MSGSIZE) {
508 if (cmd == PRC_HOSTDEAD) {
511 cm = inetctlerrmap[cmd];
513 sctp_notify(inp, cm, sh,
514 (struct sockaddr *)&to, stcb,
517 /* handle possible ICMP size messages */
518 sctp_notify_mbuf(inp, stcb, net, ip, sh);
521 #if (defined(__FreeBSD__) && __FreeBSD_version < 500000) || defined(__DragonFly__)
522 /* XXX must be fixed for 5.x and higher, leave for 4.x */
523 if (PRC_IS_REDIRECT(cmd) && inp) {
524 in_rtchange((struct inpcb *)inp,
528 if ((stcb == NULL) && (inp != NULL)) {
529 /* reduce ref-count */
531 SCTP_INP_DECR_REF(inp);
532 SCTP_INP_WUNLOCK(inp);
538 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
545 #if defined(__FreeBSD__) || defined(__DragonFly__)
547 sctp_getcred(SYSCTL_HANDLER_ARGS)
549 struct sockaddr_in addrs[2];
550 struct sctp_inpcb *inp;
551 struct sctp_nets *net;
552 struct sctp_tcb *stcb;
555 #if __FreeBSD_version >= 500000 || defined(__DragonFly__)
556 error = priv_check(req->td, PRIV_ROOT);
558 error = suser(req->p);
562 error = SYSCTL_IN(req, addrs, sizeof(addrs));
567 stcb = sctp_findassociation_addr_sa(sintosa(&addrs[0]),
570 if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) {
571 if ((inp != NULL) && (stcb == NULL)) {
572 /* reduce ref-count */
574 SCTP_INP_DECR_REF(inp);
575 SCTP_INP_WUNLOCK(inp);
580 error = SYSCTL_OUT(req, inp->sctp_socket->so_cred, sizeof(struct ucred));
581 SCTP_TCB_UNLOCK(stcb);
587 SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE|CTLFLAG_RW,
588 0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection");
589 #endif /* #if defined(__FreeBSD__) || defined(__DragonFly__) */
594 #if defined(__FreeBSD__) || defined (__APPLE__) || defined(__DragonFly__)
596 SYSCTL_DECL(_net_inet);
598 SYSCTL_NODE(_net_inet, OID_AUTO, sctp, CTLFLAG_RD, 0,
601 SYSCTL_INT(_net_inet_sctp, OID_AUTO, maxdgram, CTLFLAG_RW,
602 &sctp_sendspace, 0, "Maximum outgoing SCTP buffer size");
604 SYSCTL_INT(_net_inet_sctp, OID_AUTO, recvspace, CTLFLAG_RW,
605 &sctp_recvspace, 0, "Maximum incoming SCTP buffer size");
607 SYSCTL_INT(_net_inet_sctp, OID_AUTO, auto_asconf, CTLFLAG_RW,
608 &sctp_auto_asconf, 0, "Enable SCTP Auto-ASCONF");
610 SYSCTL_INT(_net_inet_sctp, OID_AUTO, ecn_enable, CTLFLAG_RW,
611 &sctp_ecn, 0, "Enable SCTP ECN");
613 SYSCTL_INT(_net_inet_sctp, OID_AUTO, ecn_nonce, CTLFLAG_RW,
614 &sctp_ecn_nonce, 0, "Enable SCTP ECN Nonce");
616 SYSCTL_INT(_net_inet_sctp, OID_AUTO, strict_sacks, CTLFLAG_RW,
617 &sctp_strict_sacks, 0, "Enable SCTP Strict SACK checking");
619 SYSCTL_INT(_net_inet_sctp, OID_AUTO, loopback_nocsum, CTLFLAG_RW,
620 &sctp_no_csum_on_loopback, 0,
621 "Enable NO Csum on packets sent on loopback");
623 SYSCTL_INT(_net_inet_sctp, OID_AUTO, strict_init, CTLFLAG_RW,
624 &sctp_strict_init, 0,
625 "Enable strict INIT/INIT-ACK singleton enforcement");
627 SYSCTL_INT(_net_inet_sctp, OID_AUTO, peer_chkoh, CTLFLAG_RW,
628 &sctp_peer_chunk_oh, 0,
629 "Amount to debit peers rwnd per chunk sent");
631 SYSCTL_INT(_net_inet_sctp, OID_AUTO, maxburst, CTLFLAG_RW,
632 &sctp_max_burst_default, 0,
633 "Default max burst for sctp endpoints");
635 SYSCTL_INT(_net_inet_sctp, OID_AUTO, maxchunks, CTLFLAG_RW,
636 &sctp_max_chunks_on_queue, 0,
637 "Default max chunks on queue per asoc");
639 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, delayed_sack_time, CTLFLAG_RW,
640 &sctp_delayed_sack_time_default, 0,
641 "Default delayed SACK timer in msec");
643 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, heartbeat_interval, CTLFLAG_RW,
644 &sctp_heartbeat_interval_default, 0,
645 "Default heartbeat interval in msec");
647 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, pmtu_raise_time, CTLFLAG_RW,
648 &sctp_pmtu_raise_time_default, 0,
649 "Default PMTU raise timer in sec");
651 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, shutdown_guard_time, CTLFLAG_RW,
652 &sctp_shutdown_guard_time_default, 0,
653 "Default shutdown guard timer in sec");
655 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, secret_lifetime, CTLFLAG_RW,
656 &sctp_secret_lifetime_default, 0,
657 "Default secret lifetime in sec");
659 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, rto_max, CTLFLAG_RW,
660 &sctp_rto_max_default, 0,
661 "Default maximum retransmission timeout in msec");
663 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, rto_min, CTLFLAG_RW,
664 &sctp_rto_min_default, 0,
665 "Default minimum retransmission timeout in msec");
667 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, rto_initial, CTLFLAG_RW,
668 &sctp_rto_initial_default, 0,
669 "Default initial retransmission timeout in msec");
671 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, init_rto_max, CTLFLAG_RW,
672 &sctp_init_rto_max_default, 0,
673 "Default maximum retransmission timeout during association setup in msec");
675 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, valid_cookie_life, CTLFLAG_RW,
676 &sctp_valid_cookie_life_default, 0,
677 "Default cookie lifetime in sec");
679 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, init_rtx_max, CTLFLAG_RW,
680 &sctp_init_rtx_max_default, 0,
681 "Default maximum number of retransmission for INIT chunks");
683 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, assoc_rtx_max, CTLFLAG_RW,
684 &sctp_assoc_rtx_max_default, 0,
685 "Default maximum number of retransmissions per association");
687 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, path_rtx_max, CTLFLAG_RW,
688 &sctp_path_rtx_max_default, 0,
689 "Default maximum of retransmissions per path");
691 SYSCTL_UINT(_net_inet_sctp, OID_AUTO, nr_outgoing_streams, CTLFLAG_RW,
692 &sctp_nr_outgoing_streams_default, 0,
693 "Default number of outgoing streams");
696 SYSCTL_INT(_net_inet_sctp, OID_AUTO, debug, CTLFLAG_RW,
697 &sctp_debug_on, 0, "Configure debug output");
698 #endif /* SCTP_DEBUG */
702 * NOTE: (so) is referenced from soabort*() and netmsg_pru_abort()
703 * will sofree() it when we return.
706 sctp_abort(struct socket *so)
708 struct sctp_inpcb *inp;
711 inp = (struct sctp_inpcb *)so->so_pcb;
713 sctp_inpcb_free(inp, 1);
722 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
723 sctp_attach(struct socket *so, int proto, struct thread *p)
724 #elif defined(__DragonFly__)
725 sctp_attach(struct socket *so, int proto, struct pru_attach_info *ai)
727 sctp_attach(struct socket *so, int proto, struct proc *p)
730 struct sctp_inpcb *inp;
731 struct inpcb *ip_inp;
735 inp = (struct sctp_inpcb *)so->so_pcb;
740 error = soreserve(so, sctp_sendspace, sctp_recvspace, NULL);
745 error = sctp_inpcb_alloc(so);
750 inp = (struct sctp_inpcb *)so->so_pcb;
753 inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6; /* I'm not v6! */
754 ip_inp = &inp->ip_inp.inp;
755 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
756 ip_inp->inp_vflag |= INP_IPV4;
757 ip_inp->inp_ip_ttl = ip_defttl;
759 inp->inp_vflag |= INP_IPV4;
760 inp->inp_ip_ttl = ip_defttl;
764 #if !(defined(__OpenBSD__) || defined(__APPLE__))
765 error = ipsec_init_policy(so, &ip_inp->inp_sp);
767 sctp_inpcb_free(inp, 1);
772 SCTP_INP_WUNLOCK(inp);
773 #if defined(__NetBSD__)
774 so->so_send = sctp_sosend;
781 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
782 sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p)
784 #elif defined(__FreeBSD__) || defined(__APPLE__)
785 sctp_bind(struct socket *so, struct sockaddr *addr, struct proc *p)
788 sctp_bind(struct socket *so, struct mbuf *nam, struct proc *p)
790 struct sockaddr *addr = nam ? mtod(nam, struct sockaddr *) : NULL;
792 struct sctp_inpcb *inp;
796 if (addr && addr->sa_family != AF_INET)
797 /* must be a v4 address! */
801 inp = (struct sctp_inpcb *)so->so_pcb;
806 error = sctp_inpcb_bind(so, addr, p);
813 sctp_detach(struct socket *so)
815 struct sctp_inpcb *inp;
817 inp = (struct sctp_inpcb *)so->so_pcb;
821 if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) ||
822 (so->so_rcv.ssb_cc > 0)) {
823 sctp_inpcb_free(inp, 1);
825 sctp_inpcb_free(inp, 0);
832 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
833 sctp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
834 struct mbuf *control, struct thread *p);
836 sctp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
837 struct mbuf *control, struct proc *p);
841 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
842 sctp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
843 struct mbuf *control, struct thread *p)
846 sctp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
847 struct mbuf *control, struct proc *p)
850 struct sctp_inpcb *inp;
852 inp = (struct sctp_inpcb *)so->so_pcb;
855 sctp_m_freem(control);
861 /* Got to have an to address if we are NOT a connected socket */
862 if ((addr == NULL) &&
863 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
864 (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE))
867 } else if (addr == NULL) {
868 error = EDESTADDRREQ;
871 sctp_m_freem(control);
877 if (addr->sa_family != AF_INET) {
878 /* must be a v4 address! */
881 sctp_m_freem(control);
884 error = EDESTADDRREQ;
889 /* now what about control */
892 kprintf("huh? control set?\n");
893 sctp_m_freem(inp->control);
896 inp->control = control;
898 /* add it in possibly */
899 if ((inp->pkt) && (inp->pkt->m_flags & M_PKTHDR)) {
905 for (x=m;x;x = x->m_next) {
908 inp->pkt->m_pkthdr.len += c_len;
912 inp->pkt_last->m_next = m;
915 inp->pkt_last = inp->pkt = m;
918 #if defined (__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
919 /* FreeBSD uses a flag passed */
920 ((flags & PRUS_MORETOCOME) == 0)
921 #elif defined( __NetBSD__)
922 /* NetBSD uses the so_state field */
923 ((so->so_state & SS_MORETOCOME) == 0)
925 1 /* Open BSD does not have any "more to come" indication */
929 * note with the current version this code will only be used
930 * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for
931 * re-defining sosend to use the sctp_sosend. One can
932 * optionally switch back to this code (by changing back the
933 * definitions) but this is not advisable.
936 ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags);
946 sctp_disconnect(struct socket *so)
948 struct sctp_inpcb *inp;
951 inp = (struct sctp_inpcb *)so->so_pcb;
957 if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
958 if (LIST_EMPTY(&inp->sctp_asoc_list)) {
961 SCTP_INP_RUNLOCK(inp);
964 int some_on_streamwheel = 0;
965 struct sctp_association *asoc;
966 struct sctp_tcb *stcb;
968 stcb = LIST_FIRST(&inp->sctp_asoc_list);
971 SCTP_INP_RUNLOCK(inp);
976 if (((so->so_options & SO_LINGER) &&
977 (so->so_linger == 0)) ||
978 (so->so_rcv.ssb_cc > 0)) {
979 if (SCTP_GET_STATE(asoc) !=
980 SCTP_STATE_COOKIE_WAIT) {
981 /* Left with Data unread */
984 MGET(err, MB_DONTWAIT, MT_DATA);
986 /* Fill in the user initiated abort */
987 struct sctp_paramhdr *ph;
988 ph = mtod(err, struct sctp_paramhdr *);
989 err->m_len = sizeof(struct sctp_paramhdr);
990 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
991 ph->param_length = htons(err->m_len);
993 sctp_send_abort_tcb(stcb, err);
995 SCTP_INP_RUNLOCK(inp);
996 sctp_free_assoc(inp, stcb);
997 /* No unlock tcb assoc is gone */
1001 if (!TAILQ_EMPTY(&asoc->out_wheel)) {
1002 /* Check to see if some data queued */
1003 struct sctp_stream_out *outs;
1004 TAILQ_FOREACH(outs, &asoc->out_wheel,
1006 if (!TAILQ_EMPTY(&outs->outqueue)) {
1007 some_on_streamwheel = 1;
1013 if (TAILQ_EMPTY(&asoc->send_queue) &&
1014 TAILQ_EMPTY(&asoc->sent_queue) &&
1015 (some_on_streamwheel == 0)) {
1016 /* there is nothing queued to send, so done */
1017 if ((SCTP_GET_STATE(asoc) !=
1018 SCTP_STATE_SHUTDOWN_SENT) &&
1019 (SCTP_GET_STATE(asoc) !=
1020 SCTP_STATE_SHUTDOWN_ACK_SENT)) {
1021 /* only send SHUTDOWN 1st time thru */
1023 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
1024 kprintf("%s:%d sends a shutdown\n",
1030 sctp_send_shutdown(stcb,
1031 stcb->asoc.primary_destination);
1032 sctp_chunk_output(stcb->sctp_ep, stcb, 1);
1033 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
1034 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1035 stcb->sctp_ep, stcb,
1036 asoc->primary_destination);
1037 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1038 stcb->sctp_ep, stcb,
1039 asoc->primary_destination);
1043 * we still got (or just got) data to send,
1044 * so set SHUTDOWN_PENDING
1047 * XXX sockets draft says that MSG_EOF should
1048 * be sent with no data.
1049 * currently, we will allow user data to be
1050 * sent first and move to SHUTDOWN-PENDING
1052 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
1054 SCTP_TCB_UNLOCK(stcb);
1055 SCTP_INP_RUNLOCK(inp);
1061 /* UDP model does not support this */
1062 SCTP_INP_RUNLOCK(inp);
1069 sctp_shutdown(struct socket *so)
1071 struct sctp_inpcb *inp;
1074 inp = (struct sctp_inpcb *)so->so_pcb;
1079 SCTP_INP_RLOCK(inp);
1080 /* For UDP model this is a invalid call */
1081 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
1082 /* Restore the flags that the soshutdown took away. */
1083 #if defined(__FreeBSD__) && __FreeBSD_version >= 502115
1084 so->so_rcv.sb_state &= ~SBS_CANTRCVMORE;
1086 soclrstate(so, SS_CANTRCVMORE);
1088 /* This proc will wakeup for read and do nothing (I hope) */
1090 SCTP_INP_RUNLOCK(inp);
1091 return (EOPNOTSUPP);
1094 * Ok if we reach here its the TCP model and it is either a SHUT_WR
1095 * or SHUT_RDWR. This means we put the shutdown flag against it.
1098 int some_on_streamwheel = 0;
1099 struct sctp_tcb *stcb;
1100 struct sctp_association *asoc;
1103 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1106 * Ok we hit the case that the shutdown call was made
1107 * after an abort or something. Nothing to do now.
1112 SCTP_TCB_LOCK(stcb);
1115 if (!TAILQ_EMPTY(&asoc->out_wheel)) {
1116 /* Check to see if some data queued */
1117 struct sctp_stream_out *outs;
1118 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
1119 if (!TAILQ_EMPTY(&outs->outqueue)) {
1120 some_on_streamwheel = 1;
1125 if (TAILQ_EMPTY(&asoc->send_queue) &&
1126 TAILQ_EMPTY(&asoc->sent_queue) &&
1127 (some_on_streamwheel == 0)) {
1128 /* there is nothing queued to send, so I'm done... */
1129 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1130 /* only send SHUTDOWN the first time through */
1132 if (sctp_debug_on & SCTP_DEBUG_OUTPUT4) {
1133 kprintf("%s:%d sends a shutdown\n",
1139 sctp_send_shutdown(stcb,
1140 stcb->asoc.primary_destination);
1141 sctp_chunk_output(stcb->sctp_ep, stcb, 1);
1142 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
1143 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1144 stcb->sctp_ep, stcb,
1145 asoc->primary_destination);
1146 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1147 stcb->sctp_ep, stcb,
1148 asoc->primary_destination);
1152 * we still got (or just got) data to send, so
1153 * set SHUTDOWN_PENDING
1155 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
1157 SCTP_TCB_UNLOCK(stcb);
1159 SCTP_INP_RUNLOCK(inp);
1165 * copies a "user" presentable address and removes embedded scope, etc.
1166 * returns 0 on success, 1 on error
1169 sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa)
1171 struct sockaddr_in6 lsa6;
1172 sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa,
1174 memcpy(ss, sa, sa->sa_len);
1179 #if defined(__NetBSD__) || defined(__OpenBSD__)
1181 * On NetBSD and OpenBSD in6_sin_2_v4mapsin6() not used and not exported,
1182 * so we have to export it here.
1184 void in6_sin_2_v4mapsin6(struct sockaddr_in *sin, struct sockaddr_in6 *sin6);
1188 sctp_fill_up_addresses(struct sctp_inpcb *inp,
1189 struct sctp_tcb *stcb,
1191 struct sockaddr_storage *sas)
1194 int loopback_scope, ipv4_local_scope, local_scope, site_scope, actual;
1195 int ipv4_addr_legal, ipv6_addr_legal;
1201 /* Turn on all the appropriate scope */
1202 loopback_scope = stcb->asoc.loopback_scope;
1203 ipv4_local_scope = stcb->asoc.ipv4_local_scope;
1204 local_scope = stcb->asoc.local_scope;
1205 site_scope = stcb->asoc.site_scope;
1207 /* Turn on ALL scope, since we look at the EP */
1208 loopback_scope = ipv4_local_scope = local_scope =
1211 ipv4_addr_legal = ipv6_addr_legal = 0;
1212 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1213 ipv6_addr_legal = 1;
1215 #if defined(__OpenBSD__)
1216 (0) /* we always do dual bind */
1217 #elif defined (__NetBSD__)
1218 (((struct in6pcb *)inp)->in6p_flags & IN6P_IPV6_V6ONLY)
1220 (((struct in6pcb *)inp)->inp_flags & IN6P_IPV6_V6ONLY)
1223 ipv4_addr_legal = 1;
1226 ipv4_addr_legal = 1;
1229 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1230 TAILQ_FOREACH(ifn, &ifnet, if_list) {
1231 struct ifaddr_container *ifac;
1233 if ((loopback_scope == 0) &&
1234 (ifn->if_type == IFT_LOOP)) {
1235 /* Skip loopback if loopback_scope not set */
1238 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid],
1240 struct ifaddr *ifa = ifac->ifa;
1244 * For the BOUND-ALL case, the list
1245 * associated with a TCB is Always
1246 * considered a reverse list.. i.e.
1247 * it lists addresses that are NOT
1248 * part of the association. If this
1249 * is one of those we must skip it.
1251 if (sctp_is_addr_restricted(stcb,
1256 if ((ifa->ifa_addr->sa_family == AF_INET) &&
1257 (ipv4_addr_legal)) {
1258 struct sockaddr_in *sin;
1259 sin = (struct sockaddr_in *)ifa->ifa_addr;
1260 if (sin->sin_addr.s_addr == 0) {
1261 /* we skip unspecifed addresses */
1264 if ((ipv4_local_scope == 0) &&
1265 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1268 if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) {
1269 in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas);
1270 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1271 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6));
1272 actual += sizeof(sizeof(struct sockaddr_in6));
1274 memcpy(sas, sin, sizeof(*sin));
1275 ((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport;
1276 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin));
1277 actual += sizeof(*sin);
1279 if (actual >= limit) {
1282 } else if ((ifa->ifa_addr->sa_family == AF_INET6) &&
1283 (ipv6_addr_legal)) {
1284 struct sockaddr_in6 *sin6, lsa6;
1285 sin6 = (struct sockaddr_in6 *)ifa->ifa_addr;
1286 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1287 /* we skip unspecifed addresses */
1290 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
1291 if (local_scope == 0)
1293 if (sin6->sin6_scope_id == 0) {
1295 if (in6_recoverscope(&lsa6,
1298 /* bad link local address */
1303 if ((site_scope == 0) &&
1304 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1307 memcpy(sas, sin6, sizeof(*sin6));
1308 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1309 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin6));
1310 actual += sizeof(*sin6);
1311 if (actual >= limit) {
1318 struct sctp_laddr *laddr;
1320 * If we have a TCB and we do NOT support ASCONF (it's
1321 * turned off or otherwise) then the list is always the
1322 * true list of addresses (the else case below). Otherwise
1323 * the list on the association is a list of addresses that
1324 * are NOT part of the association.
1326 if (inp->sctp_flags & SCTP_PCB_FLAGS_DO_ASCONF) {
1327 /* The list is a NEGATIVE list */
1328 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
1330 if (sctp_is_addr_restricted(stcb, laddr->ifa->ifa_addr)) {
1334 if (sctp_fill_user_address(sas, laddr->ifa->ifa_addr))
1337 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1338 sas = (struct sockaddr_storage *)((caddr_t)sas +
1339 laddr->ifa->ifa_addr->sa_len);
1340 actual += laddr->ifa->ifa_addr->sa_len;
1341 if (actual >= limit) {
1346 /* The list is a positive list if present */
1348 /* Must use the specific association list */
1349 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list,
1351 if (sctp_fill_user_address(sas,
1352 laddr->ifa->ifa_addr))
1354 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1355 sas = (struct sockaddr_storage *)((caddr_t)sas +
1356 laddr->ifa->ifa_addr->sa_len);
1357 actual += laddr->ifa->ifa_addr->sa_len;
1358 if (actual >= limit) {
1363 /* No endpoint so use the endpoints individual list */
1364 LIST_FOREACH(laddr, &inp->sctp_addr_list,
1366 if (sctp_fill_user_address(sas,
1367 laddr->ifa->ifa_addr))
1369 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1370 sas = (struct sockaddr_storage *)((caddr_t)sas +
1371 laddr->ifa->ifa_addr->sa_len);
1372 actual += laddr->ifa->ifa_addr->sa_len;
1373 if (actual >= limit) {
1384 sctp_count_max_addresses(struct sctp_inpcb *inp)
1388 * In both sub-set bound an bound_all cases we return the MAXIMUM
1389 * number of addresses that you COULD get. In reality the sub-set
1390 * bound may have an exclusion list for a given TCB OR in the
1391 * bound-all case a TCB may NOT include the loopback or other
1392 * addresses as well.
1394 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1397 TAILQ_FOREACH(ifn, &ifnet, if_list) {
1398 struct ifaddr_container *ifac;
1400 TAILQ_FOREACH(ifac, &ifn->if_addrheads[mycpuid], ifa_link) {
1401 struct ifaddr *ifa = ifac->ifa;
1403 /* Count them if they are the right type */
1404 if (ifa->ifa_addr->sa_family == AF_INET) {
1405 if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)
1406 cnt += sizeof(struct sockaddr_in6);
1408 cnt += sizeof(struct sockaddr_in);
1410 } else if (ifa->ifa_addr->sa_family == AF_INET6)
1411 cnt += sizeof(struct sockaddr_in6);
1415 struct sctp_laddr *laddr;
1416 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
1417 if (laddr->ifa->ifa_addr->sa_family == AF_INET) {
1418 if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)
1419 cnt += sizeof(struct sockaddr_in6);
1421 cnt += sizeof(struct sockaddr_in);
1423 } else if (laddr->ifa->ifa_addr->sa_family == AF_INET6)
1424 cnt += sizeof(struct sockaddr_in6);
1431 sctp_do_connect_x(struct socket *so,
1432 struct sctp_inpcb *inp,
1434 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
1443 struct sctp_tcb *stcb = NULL;
1444 struct sockaddr *sa;
1445 int num_v6=0, num_v4=0, *totaddrp, totaddr, i, incr, at;
1447 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1448 kprintf("Connectx called\n");
1450 #endif /* SCTP_DEBUG */
1453 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
1454 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
1455 /* We are already connected AND the TCP model */
1457 return (EADDRINUSE);
1459 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
1460 SCTP_INP_RLOCK(inp);
1461 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1462 SCTP_INP_RUNLOCK(inp);
1469 SCTP_ASOC_CREATE_LOCK(inp);
1470 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1471 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
1472 SCTP_ASOC_CREATE_UNLOCK(inp);
1477 totaddrp = mtod(m, int *);
1478 totaddr = *totaddrp;
1479 sa = (struct sockaddr *)(totaddrp + 1);
1481 /* account and validate addresses */
1482 SCTP_INP_WLOCK(inp);
1483 SCTP_INP_INCR_REF(inp);
1484 SCTP_INP_WUNLOCK(inp);
1485 for (i = 0; i < totaddr; i++) {
1486 if (sa->sa_family == AF_INET) {
1488 incr = sizeof(struct sockaddr_in);
1489 } else if (sa->sa_family == AF_INET6) {
1490 struct sockaddr_in6 *sin6;
1491 sin6 = (struct sockaddr_in6 *)sa;
1492 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
1493 /* Must be non-mapped for connectx */
1494 SCTP_ASOC_CREATE_UNLOCK(inp);
1499 incr = sizeof(struct sockaddr_in6);
1504 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
1506 /* Already have or am bring up an association */
1507 SCTP_ASOC_CREATE_UNLOCK(inp);
1508 SCTP_TCB_UNLOCK(stcb);
1512 if ((at + incr) > m->m_len) {
1516 sa = (struct sockaddr *)((caddr_t)sa + incr);
1518 sa = (struct sockaddr *)(totaddrp + 1);
1519 SCTP_INP_WLOCK(inp);
1520 SCTP_INP_DECR_REF(inp);
1521 SCTP_INP_WUNLOCK(inp);
1523 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
1526 SCTP_INP_WUNLOCK(inp);
1527 SCTP_ASOC_CREATE_UNLOCK(inp);
1530 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
1532 struct in6pcb *inp6;
1533 inp6 = (struct in6pcb *)inp;
1535 #if defined(__OpenBSD__)
1536 (0) /* we always do dual bind */
1537 #elif defined (__NetBSD__)
1538 (inp6->in6p_flags & IN6P_IPV6_V6ONLY)
1540 (inp6->inp_flags & IN6P_IPV6_V6ONLY)
1544 * if IPV6_V6ONLY flag, ignore connections
1545 * destined to a v4 addr or v4-mapped addr
1547 SCTP_INP_WUNLOCK(inp);
1548 SCTP_ASOC_CREATE_UNLOCK(inp);
1554 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
1555 SCTP_PCB_FLAGS_UNBOUND) {
1556 /* Bind a ephemeral port */
1557 SCTP_INP_WUNLOCK(inp);
1558 error = sctp_inpcb_bind(so, NULL, p);
1560 SCTP_ASOC_CREATE_UNLOCK(inp);
1565 SCTP_INP_WUNLOCK(inp);
1567 /* We are GOOD to go */
1568 stcb = sctp_aloc_assoc(inp, sa, 1, &error, 0);
1570 /* Gak! no memory */
1571 SCTP_ASOC_CREATE_UNLOCK(inp);
1575 /* move to second address */
1576 if (sa->sa_family == AF_INET)
1577 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in));
1579 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6));
1581 for (i = 1; i < totaddr; i++) {
1582 if (sa->sa_family == AF_INET) {
1583 incr = sizeof(struct sockaddr_in);
1584 if (sctp_add_remote_addr(stcb, sa, 0, 8)) {
1585 /* assoc gone no un-lock */
1586 sctp_free_assoc(inp, stcb);
1587 SCTP_ASOC_CREATE_UNLOCK(inp);
1592 } else if (sa->sa_family == AF_INET6) {
1593 incr = sizeof(struct sockaddr_in6);
1594 if (sctp_add_remote_addr(stcb, sa, 0, 8)) {
1595 /* assoc gone no un-lock */
1596 sctp_free_assoc(inp, stcb);
1597 SCTP_ASOC_CREATE_UNLOCK(inp);
1602 sa = (struct sockaddr *)((caddr_t)sa + incr);
1604 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT;
1606 /* doing delayed connection */
1607 stcb->asoc.delayed_connection = 1;
1608 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination);
1610 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1611 sctp_send_initiate(inp, stcb);
1613 SCTP_TCB_UNLOCK(stcb);
1614 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
1615 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
1616 /* Set the connected flag so we can queue data */
1619 SCTP_ASOC_CREATE_UNLOCK(inp);
1626 sctp_optsget(struct socket *so,
1629 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
1636 struct sctp_inpcb *inp;
1638 int error, optval=0;
1639 struct sctp_tcb *stcb = NULL;
1641 inp = (struct sctp_inpcb *)so->so_pcb;
1648 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
1649 kprintf("optsget:MP is NULL EINVAL\n");
1651 #endif /* SCTP_DEBUG */
1656 /* Got to have a mbuf */
1658 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
1659 kprintf("Huh no mbuf\n");
1661 #endif /* SCTP_DEBUG */
1665 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
1666 kprintf("optsget opt:%lxx sz:%u\n", (unsigned long)opt,
1669 #endif /* SCTP_DEBUG */
1673 case SCTP_AUTOCLOSE:
1674 case SCTP_AUTO_ASCONF:
1675 case SCTP_DISABLE_FRAGMENTS:
1676 case SCTP_I_WANT_MAPPED_V4_ADDR:
1678 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
1679 kprintf("other stuff\n");
1681 #endif /* SCTP_DEBUG */
1682 SCTP_INP_RLOCK(inp);
1684 case SCTP_DISABLE_FRAGMENTS:
1685 optval = inp->sctp_flags & SCTP_PCB_FLAGS_NO_FRAGMENT;
1687 case SCTP_I_WANT_MAPPED_V4_ADDR:
1688 optval = inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4;
1690 case SCTP_AUTO_ASCONF:
1691 optval = inp->sctp_flags & SCTP_PCB_FLAGS_AUTO_ASCONF;
1694 optval = inp->sctp_flags & SCTP_PCB_FLAGS_NODELAY;
1696 case SCTP_AUTOCLOSE:
1697 if ((inp->sctp_flags & SCTP_PCB_FLAGS_AUTOCLOSE) ==
1698 SCTP_PCB_FLAGS_AUTOCLOSE)
1699 optval = inp->sctp_ep.auto_close_time;
1705 error = ENOPROTOOPT;
1706 } /* end switch (sopt->sopt_name) */
1707 if (opt != SCTP_AUTOCLOSE) {
1708 /* make it an "on/off" value */
1709 optval = (optval != 0);
1711 if ((size_t)m->m_len < sizeof(int)) {
1714 SCTP_INP_RUNLOCK(inp);
1716 /* return the option value */
1717 *mtod(m, int *) = optval;
1718 m->m_len = sizeof(optval);
1721 case SCTP_GET_ASOC_ID_LIST:
1723 struct sctp_assoc_ids *ids;
1727 if ((size_t)m->m_len < sizeof(struct sctp_assoc_ids)) {
1731 ids = mtod(m, struct sctp_assoc_ids *);
1733 SCTP_INP_RLOCK(inp);
1734 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1737 ids->asls_numb_present = 0;
1738 ids->asls_more_to_get = 0;
1739 SCTP_INP_RUNLOCK(inp);
1742 orig = ids->asls_assoc_start;
1743 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1745 stcb = LIST_NEXT(stcb , sctp_tcblist);
1753 ids->asls_numb_present = 0;
1754 ids->asls_more_to_get = 1;
1755 while(at < MAX_ASOC_IDS_RET) {
1756 ids->asls_assoc_id[at] = sctp_get_associd(stcb);
1758 ids->asls_numb_present++;
1759 stcb = LIST_NEXT(stcb , sctp_tcblist);
1761 ids->asls_more_to_get = 0;
1765 SCTP_INP_RUNLOCK(inp);
1768 case SCTP_GET_NONCE_VALUES:
1770 struct sctp_get_nonce_values *gnv;
1771 if ((size_t)m->m_len < sizeof(struct sctp_get_nonce_values)) {
1775 gnv = mtod(m, struct sctp_get_nonce_values *);
1776 stcb = sctp_findassociation_ep_asocid(inp, gnv->gn_assoc_id);
1780 gnv->gn_peers_tag = stcb->asoc.peer_vtag;
1781 gnv->gn_local_tag = stcb->asoc.my_vtag;
1782 SCTP_TCB_UNLOCK(stcb);
1787 case SCTP_PEER_PUBLIC_KEY:
1788 case SCTP_MY_PUBLIC_KEY:
1789 case SCTP_SET_AUTH_CHUNKS:
1790 case SCTP_SET_AUTH_SECRET:
1791 /* not supported yet and until we refine the draft */
1795 case SCTP_DELAYED_ACK_TIME:
1798 if ((size_t)m->m_len < sizeof(int32_t)) {
1802 tm = mtod(m, int32_t *);
1804 *tm = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1808 case SCTP_GET_SNDBUF_USE:
1809 if ((size_t)m->m_len < sizeof(struct sctp_sockstat)) {
1812 struct sctp_sockstat *ss;
1813 struct sctp_tcb *stcb;
1814 struct sctp_association *asoc;
1815 ss = mtod(m, struct sctp_sockstat *);
1816 stcb = sctp_findassociation_ep_asocid(inp, ss->ss_assoc_id);
1821 ss->ss_total_sndbuf = (u_int32_t)asoc->total_output_queue_size;
1822 ss->ss_total_mbuf_sndbuf = (u_int32_t)asoc->total_output_mbuf_queue_size;
1823 ss->ss_total_recv_buf = (u_int32_t)(asoc->size_on_delivery_queue +
1824 asoc->size_on_reasm_queue +
1825 asoc->size_on_all_streams);
1826 SCTP_TCB_UNLOCK(stcb);
1828 m->m_len = sizeof(struct sctp_sockstat);
1835 burst = mtod(m, u_int8_t *);
1836 SCTP_INP_RLOCK(inp);
1837 *burst = inp->sctp_ep.max_burst;
1838 SCTP_INP_RUNLOCK(inp);
1839 m->m_len = sizeof(u_int8_t);
1845 sctp_assoc_t *assoc_id;
1848 if ((size_t)m->m_len < sizeof(u_int32_t)) {
1852 if ((size_t)m->m_len < sizeof(sctp_assoc_t)) {
1856 assoc_id = mtod(m, sctp_assoc_t *);
1857 segsize = mtod(m, u_int32_t *);
1858 m->m_len = sizeof(u_int32_t);
1860 if (((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
1861 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) ||
1862 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
1863 struct sctp_tcb *stcb;
1864 SCTP_INP_RLOCK(inp);
1865 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1867 SCTP_TCB_LOCK(stcb);
1868 SCTP_INP_RUNLOCK(inp);
1869 *segsize = sctp_get_frag_point(stcb, &stcb->asoc);
1870 SCTP_TCB_UNLOCK(stcb);
1872 SCTP_INP_RUNLOCK(inp);
1876 stcb = sctp_findassociation_ep_asocid(inp, *assoc_id);
1878 *segsize = sctp_get_frag_point(stcb, &stcb->asoc);
1879 SCTP_TCB_UNLOCK(stcb);
1883 /* default is to get the max, if I
1884 * can't calculate from an existing association.
1886 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1887 ovh = SCTP_MED_OVERHEAD;
1889 ovh = SCTP_MED_V4_OVERHEAD;
1891 *segsize = inp->sctp_frag_point - ovh;
1896 case SCTP_SET_DEBUG_LEVEL:
1900 if ((size_t)m->m_len < sizeof(u_int32_t)) {
1904 level = mtod(m, u_int32_t *);
1906 *level = sctp_debug_on;
1907 m->m_len = sizeof(u_int32_t);
1908 kprintf("Returning DEBUG LEVEL %x is set\n",
1909 (u_int)sctp_debug_on);
1911 #else /* SCTP_DEBUG */
1915 case SCTP_GET_STAT_LOG:
1916 #ifdef SCTP_STAT_LOGGING
1917 error = sctp_fill_stat_log(m);
1918 #else /* SCTP_DEBUG */
1925 if ((size_t)m->m_len < sizeof(sctp_pegs)) {
1929 pt = mtod(m, u_int32_t *);
1930 memcpy(pt, sctp_pegs, sizeof(sctp_pegs));
1931 m->m_len = sizeof(sctp_pegs);
1936 struct sctp_event_subscribe *events;
1938 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
1939 kprintf("get events\n");
1941 #endif /* SCTP_DEBUG */
1942 if ((size_t)m->m_len < sizeof(struct sctp_event_subscribe)) {
1944 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
1945 kprintf("M->M_LEN is %d not %d\n",
1947 (int)sizeof(struct sctp_event_subscribe));
1949 #endif /* SCTP_DEBUG */
1953 events = mtod(m, struct sctp_event_subscribe *);
1954 memset(events, 0, sizeof(events));
1955 SCTP_INP_RLOCK(inp);
1956 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVDATAIOEVNT)
1957 events->sctp_data_io_event = 1;
1959 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVASSOCEVNT)
1960 events->sctp_association_event = 1;
1962 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVPADDREVNT)
1963 events->sctp_address_event = 1;
1965 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVSENDFAILEVNT)
1966 events->sctp_send_failure_event = 1;
1968 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVPEERERR)
1969 events->sctp_peer_error_event = 1;
1971 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)
1972 events->sctp_shutdown_event = 1;
1974 if (inp->sctp_flags & SCTP_PCB_FLAGS_PDAPIEVNT)
1975 events->sctp_partial_delivery_event = 1;
1977 if (inp->sctp_flags & SCTP_PCB_FLAGS_ADAPTIONEVNT)
1978 events->sctp_adaption_layer_event = 1;
1980 if (inp->sctp_flags & SCTP_PCB_FLAGS_STREAM_RESETEVNT)
1981 events->sctp_stream_reset_events = 1;
1982 SCTP_INP_RUNLOCK(inp);
1983 m->m_len = sizeof(struct sctp_event_subscribe);
1988 case SCTP_ADAPTION_LAYER:
1989 if ((size_t)m->m_len < sizeof(int)) {
1994 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
1995 kprintf("getadaption ind\n");
1997 #endif /* SCTP_DEBUG */
1998 SCTP_INP_RLOCK(inp);
1999 *mtod(m, int *) = inp->sctp_ep.adaption_layer_indicator;
2000 SCTP_INP_RUNLOCK(inp);
2001 m->m_len = sizeof(int);
2003 case SCTP_SET_INITIAL_DBG_SEQ:
2004 if ((size_t)m->m_len < sizeof(int)) {
2009 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2010 kprintf("get initial dbg seq\n");
2012 #endif /* SCTP_DEBUG */
2013 SCTP_INP_RLOCK(inp);
2014 *mtod(m, int *) = inp->sctp_ep.initial_sequence_debug;
2015 SCTP_INP_RUNLOCK(inp);
2016 m->m_len = sizeof(int);
2018 case SCTP_GET_LOCAL_ADDR_SIZE:
2019 if ((size_t)m->m_len < sizeof(int)) {
2024 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2025 kprintf("get local sizes\n");
2027 #endif /* SCTP_DEBUG */
2028 SCTP_INP_RLOCK(inp);
2029 *mtod(m, int *) = sctp_count_max_addresses(inp);
2030 SCTP_INP_RUNLOCK(inp);
2031 m->m_len = sizeof(int);
2033 case SCTP_GET_REMOTE_ADDR_SIZE:
2035 sctp_assoc_t *assoc_id;
2037 struct sctp_nets *net;
2039 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2040 kprintf("get remote size\n");
2042 #endif /* SCTP_DEBUG */
2043 if ((size_t)m->m_len < sizeof(sctp_assoc_t)) {
2045 kprintf("m->m_len:%d not %d\n",
2046 m->m_len, sizeof(sctp_assoc_t));
2047 #endif /* SCTP_DEBUG */
2052 val = mtod(m, u_int32_t *);
2053 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2054 SCTP_INP_RLOCK(inp);
2055 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2057 SCTP_TCB_LOCK(stcb);
2058 SCTP_INP_RUNLOCK(inp);
2061 assoc_id = mtod(m, sctp_assoc_t *);
2062 stcb = sctp_findassociation_ep_asocid(inp, *assoc_id);
2071 /* Count the sizes */
2072 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2073 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) ||
2074 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
2075 sz += sizeof(struct sockaddr_in6);
2076 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
2077 sz += sizeof(struct sockaddr_in);
2083 SCTP_TCB_UNLOCK(stcb);
2085 m->m_len = sizeof(u_int32_t);
2088 case SCTP_GET_PEER_ADDRESSES:
2090 * Get the address information, an array
2091 * is passed in to fill up we pack it.
2095 struct sockaddr_storage *sas;
2096 struct sctp_nets *net;
2097 struct sctp_getaddresses *saddr;
2099 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2100 kprintf("get peer addresses\n");
2102 #endif /* SCTP_DEBUG */
2103 if ((size_t)m->m_len < sizeof(struct sctp_getaddresses)) {
2107 left = m->m_len - sizeof(struct sctp_getaddresses);
2108 saddr = mtod(m, struct sctp_getaddresses *);
2109 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2110 SCTP_INP_RLOCK(inp);
2111 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2113 SCTP_TCB_LOCK(stcb);
2114 SCTP_INP_RUNLOCK(inp);
2116 stcb = sctp_findassociation_ep_asocid(inp,
2117 saddr->sget_assoc_id);
2122 m->m_len = sizeof(struct sctp_getaddresses);
2123 sas = (struct sockaddr_storage *)&saddr->addr[0];
2125 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2126 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) ||
2127 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
2128 cpsz = sizeof(struct sockaddr_in6);
2129 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
2130 cpsz = sizeof(struct sockaddr_in);
2136 /* not enough room. */
2138 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2139 kprintf("Out of room\n");
2141 #endif /* SCTP_DEBUG */
2144 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
2145 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET)) {
2146 /* Must map the address */
2147 in6_sin_2_v4mapsin6((struct sockaddr_in *)&net->ro._l_addr,
2148 (struct sockaddr_in6 *)sas);
2150 memcpy(sas, &net->ro._l_addr, cpsz);
2152 ((struct sockaddr_in *)sas)->sin_port = stcb->rport;
2154 sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz);
2158 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
2159 kprintf("left now:%d mlen:%d\n",
2162 #endif /* SCTP_DEBUG */
2164 SCTP_TCB_UNLOCK(stcb);
2167 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2168 kprintf("All done\n");
2170 #endif /* SCTP_DEBUG */
2172 case SCTP_GET_LOCAL_ADDRESSES:
2175 struct sockaddr_storage *sas;
2176 struct sctp_getaddresses *saddr;
2178 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2179 kprintf("get local addresses\n");
2181 #endif /* SCTP_DEBUG */
2182 if ((size_t)m->m_len < sizeof(struct sctp_getaddresses)) {
2186 saddr = mtod(m, struct sctp_getaddresses *);
2188 if (saddr->sget_assoc_id) {
2189 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2190 SCTP_INP_RLOCK(inp);
2191 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2193 SCTP_TCB_LOCK(stcb);
2194 SCTP_INP_RUNLOCK(inp);
2196 stcb = sctp_findassociation_ep_asocid(inp, saddr->sget_assoc_id);
2202 * assure that the TCP model does not need a assoc id
2205 if ( (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) &&
2207 SCTP_INP_RLOCK(inp);
2208 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2210 SCTP_TCB_LOCK(stcb);
2211 SCTP_INP_RUNLOCK(inp);
2213 sas = (struct sockaddr_storage *)&saddr->addr[0];
2214 limit = m->m_len - sizeof(sctp_assoc_t);
2215 actual = sctp_fill_up_addresses(inp, stcb, limit, sas);
2216 SCTP_TCB_UNLOCK(stcb);
2217 m->m_len = sizeof(struct sockaddr_storage) + actual;
2220 case SCTP_PEER_ADDR_PARAMS:
2222 struct sctp_paddrparams *paddrp;
2223 struct sctp_nets *net;
2226 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2227 kprintf("Getting peer_addr_params\n");
2229 #endif /* SCTP_DEBUG */
2230 if ((size_t)m->m_len < sizeof(struct sctp_paddrparams)) {
2232 if (sctp_debug_on & SCTP_DEBUG_USRREQ2) {
2233 kprintf("Hmm m->m_len:%d is to small\n",
2236 #endif /* SCTP_DEBUG */
2240 paddrp = mtod(m, struct sctp_paddrparams *);
2243 if (paddrp->spp_assoc_id) {
2245 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2246 kprintf("In spp_assoc_id find type\n");
2248 #endif /* SCTP_DEBUG */
2249 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2250 SCTP_INP_RLOCK(inp);
2251 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2253 SCTP_TCB_LOCK(stcb);
2254 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
2256 SCTP_INP_RLOCK(inp);
2258 stcb = sctp_findassociation_ep_asocid(inp, paddrp->spp_assoc_id);
2265 if ( (stcb == NULL) &&
2266 ((((struct sockaddr *)&paddrp->spp_address)->sa_family == AF_INET) ||
2267 (((struct sockaddr *)&paddrp->spp_address)->sa_family == AF_INET6))) {
2268 /* Lookup via address */
2270 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2271 kprintf("Ok we need to lookup a param\n");
2273 #endif /* SCTP_DEBUG */
2274 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2275 SCTP_INP_RLOCK(inp);
2276 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2278 SCTP_TCB_LOCK(stcb);
2279 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
2281 SCTP_INP_RUNLOCK(inp);
2283 SCTP_INP_WLOCK(inp);
2284 SCTP_INP_INCR_REF(inp);
2285 SCTP_INP_WUNLOCK(inp);
2286 stcb = sctp_findassociation_ep_addr(&inp,
2287 (struct sockaddr *)&paddrp->spp_address,
2290 SCTP_INP_WLOCK(inp);
2291 SCTP_INP_DECR_REF(inp);
2292 SCTP_INP_WUNLOCK(inp);
2301 /* Effects the Endpoint */
2303 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2304 kprintf("User wants EP level info\n");
2306 #endif /* SCTP_DEBUG */
2310 /* Applys to the specific association */
2312 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2313 kprintf("In TCB side\n");
2315 #endif /* SCTP_DEBUG */
2317 paddrp->spp_pathmaxrxt = net->failure_threshold;
2319 /* No destination so return default value */
2320 paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure;
2322 paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay;
2323 paddrp->spp_assoc_id = sctp_get_associd(stcb);
2324 SCTP_TCB_UNLOCK(stcb);
2326 /* Use endpoint defaults */
2327 SCTP_INP_RLOCK(inp);
2329 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2330 kprintf("In EP level info\n");
2332 #endif /* SCTP_DEBUG */
2333 paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure;
2334 paddrp->spp_hbinterval = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT];
2335 paddrp->spp_assoc_id = (sctp_assoc_t)0;
2336 SCTP_INP_RUNLOCK(inp);
2338 m->m_len = sizeof(struct sctp_paddrparams);
2341 case SCTP_GET_PEER_ADDR_INFO:
2343 struct sctp_paddrinfo *paddri;
2344 struct sctp_nets *net;
2346 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2347 kprintf("GetPEER ADDR_INFO\n");
2349 #endif /* SCTP_DEBUG */
2350 if ((size_t)m->m_len < sizeof(struct sctp_paddrinfo)) {
2354 paddri = mtod(m, struct sctp_paddrinfo *);
2356 if ((((struct sockaddr *)&paddri->spinfo_address)->sa_family == AF_INET) ||
2357 (((struct sockaddr *)&paddri->spinfo_address)->sa_family == AF_INET6)) {
2358 /* Lookup via address */
2359 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2360 SCTP_INP_RLOCK(inp);
2361 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2363 SCTP_TCB_LOCK(stcb);
2364 net = sctp_findnet(stcb,
2365 (struct sockaddr *)&paddri->spinfo_address);
2367 SCTP_INP_RUNLOCK(inp);
2369 SCTP_INP_WLOCK(inp);
2370 SCTP_INP_INCR_REF(inp);
2371 SCTP_INP_WUNLOCK(inp);
2372 stcb = sctp_findassociation_ep_addr(&inp,
2373 (struct sockaddr *)&paddri->spinfo_address,
2376 SCTP_INP_WLOCK(inp);
2377 SCTP_INP_DECR_REF(inp);
2378 SCTP_INP_WUNLOCK(inp);
2385 if ((stcb == NULL) || (net == NULL)) {
2389 m->m_len = sizeof(struct sctp_paddrinfo);
2390 paddri->spinfo_state = net->dest_state & (SCTP_REACHABLE_MASK|SCTP_ADDR_NOHB);
2391 paddri->spinfo_cwnd = net->cwnd;
2392 paddri->spinfo_srtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
2393 paddri->spinfo_rto = net->RTO;
2394 paddri->spinfo_assoc_id = sctp_get_associd(stcb);
2395 SCTP_TCB_UNLOCK(stcb);
2398 case SCTP_PCB_STATUS:
2400 struct sctp_pcbinfo *spcb;
2402 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2403 kprintf("PCB status\n");
2405 #endif /* SCTP_DEBUG */
2406 if ((size_t)m->m_len < sizeof(struct sctp_pcbinfo)) {
2410 spcb = mtod(m, struct sctp_pcbinfo *);
2411 sctp_fill_pcbinfo(spcb);
2412 m->m_len = sizeof(struct sctp_pcbinfo);
2417 struct sctp_nets *net;
2418 struct sctp_status *sstat;
2420 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2421 kprintf("SCTP status\n");
2423 #endif /* SCTP_DEBUG */
2425 if ((size_t)m->m_len < sizeof(struct sctp_status)) {
2429 sstat = mtod(m, struct sctp_status *);
2431 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2432 SCTP_INP_RLOCK(inp);
2433 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2435 SCTP_TCB_LOCK(stcb);
2436 SCTP_INP_RUNLOCK(inp);
2438 stcb = sctp_findassociation_ep_asocid(inp, sstat->sstat_assoc_id);
2445 * I think passing the state is fine since
2446 * sctp_constants.h will be available to the user
2449 sstat->sstat_state = stcb->asoc.state;
2450 sstat->sstat_rwnd = stcb->asoc.peers_rwnd;
2451 sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt;
2453 * We can't include chunks that have been passed
2454 * to the socket layer. Only things in queue.
2456 sstat->sstat_penddata = (stcb->asoc.cnt_on_delivery_queue +
2457 stcb->asoc.cnt_on_reasm_queue +
2458 stcb->asoc.cnt_on_all_streams);
2461 sstat->sstat_instrms = stcb->asoc.streamincnt;
2462 sstat->sstat_outstrms = stcb->asoc.streamoutcnt;
2463 sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc);
2464 memcpy(&sstat->sstat_primary.spinfo_address,
2465 &stcb->asoc.primary_destination->ro._l_addr,
2466 ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len);
2467 net = stcb->asoc.primary_destination;
2468 ((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport;
2470 * Again the user can get info from sctp_constants.h
2471 * for what the state of the network is.
2473 sstat->sstat_primary.spinfo_state = net->dest_state & SCTP_REACHABLE_MASK;
2474 sstat->sstat_primary.spinfo_cwnd = net->cwnd;
2475 sstat->sstat_primary.spinfo_srtt = net->lastsa;
2476 sstat->sstat_primary.spinfo_rto = net->RTO;
2477 sstat->sstat_primary.spinfo_mtu = net->mtu;
2478 sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb);
2479 SCTP_TCB_UNLOCK(stcb);
2480 m->m_len = sizeof(*sstat);
2485 struct sctp_rtoinfo *srto;
2487 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2488 kprintf("RTO Info\n");
2490 #endif /* SCTP_DEBUG */
2491 if ((size_t)m->m_len < sizeof(struct sctp_rtoinfo)) {
2495 srto = mtod(m, struct sctp_rtoinfo *);
2496 if (srto->srto_assoc_id == 0) {
2497 /* Endpoint only please */
2498 SCTP_INP_RLOCK(inp);
2499 srto->srto_initial = inp->sctp_ep.initial_rto;
2500 srto->srto_max = inp->sctp_ep.sctp_maxrto;
2501 srto->srto_min = inp->sctp_ep.sctp_minrto;
2502 SCTP_INP_RUNLOCK(inp);
2505 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2506 SCTP_INP_RLOCK(inp);
2507 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2509 SCTP_TCB_LOCK(stcb);
2510 SCTP_INP_RUNLOCK(inp);
2512 stcb = sctp_findassociation_ep_asocid(inp, srto->srto_assoc_id);
2518 srto->srto_initial = stcb->asoc.initial_rto;
2519 srto->srto_max = stcb->asoc.maxrto;
2520 srto->srto_min = stcb->asoc.minrto;
2521 SCTP_TCB_UNLOCK(stcb);
2522 m->m_len = sizeof(*srto);
2525 case SCTP_ASSOCINFO:
2527 struct sctp_assocparams *sasoc;
2529 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2530 kprintf("Associnfo\n");
2532 #endif /* SCTP_DEBUG */
2533 if ((size_t)m->m_len < sizeof(struct sctp_assocparams)) {
2537 sasoc = mtod(m, struct sctp_assocparams *);
2540 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2541 SCTP_INP_RLOCK(inp);
2542 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2544 SCTP_TCB_LOCK(stcb);
2545 SCTP_INP_RUNLOCK(inp);
2547 if ((sasoc->sasoc_assoc_id) && (stcb == NULL)) {
2548 stcb = sctp_findassociation_ep_asocid(inp,
2549 sasoc->sasoc_assoc_id);
2559 sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times;
2560 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
2561 sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd;
2562 sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd;
2563 sasoc->sasoc_cookie_life = stcb->asoc.cookie_life;
2564 SCTP_TCB_UNLOCK(stcb);
2566 SCTP_INP_RLOCK(inp);
2567 sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times;
2568 sasoc->sasoc_number_peer_destinations = 0;
2569 sasoc->sasoc_peer_rwnd = 0;
2570 sasoc->sasoc_local_rwnd = ssb_space(&inp->sctp_socket->so_rcv);
2571 sasoc->sasoc_cookie_life = inp->sctp_ep.def_cookie_life;
2572 SCTP_INP_RUNLOCK(inp);
2574 m->m_len = sizeof(*sasoc);
2577 case SCTP_DEFAULT_SEND_PARAM:
2579 struct sctp_sndrcvinfo *s_info;
2581 if (m->m_len != sizeof(struct sctp_sndrcvinfo)) {
2585 s_info = mtod(m, struct sctp_sndrcvinfo *);
2586 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2587 SCTP_INP_RLOCK(inp);
2588 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2590 SCTP_TCB_LOCK(stcb);
2591 SCTP_INP_RUNLOCK(inp);
2593 stcb = sctp_findassociation_ep_asocid(inp, s_info->sinfo_assoc_id);
2600 *s_info = stcb->asoc.def_send;
2601 SCTP_TCB_UNLOCK(stcb);
2602 m->m_len = sizeof(*s_info);
2606 struct sctp_initmsg *sinit;
2608 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2609 kprintf("initmsg\n");
2611 #endif /* SCTP_DEBUG */
2612 if ((size_t)m->m_len < sizeof(struct sctp_initmsg)) {
2616 sinit = mtod(m, struct sctp_initmsg *);
2617 SCTP_INP_RLOCK(inp);
2618 sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count;
2619 sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome;
2620 sinit->sinit_max_attempts = inp->sctp_ep.max_init_times;
2621 sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max;
2622 SCTP_INP_RUNLOCK(inp);
2623 m->m_len = sizeof(*sinit);
2626 case SCTP_PRIMARY_ADDR:
2627 /* we allow a "get" operation on this */
2629 struct sctp_setprim *ssp;
2632 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2633 kprintf("setprimary\n");
2635 #endif /* SCTP_DEBUG */
2636 if ((size_t)m->m_len < sizeof(struct sctp_setprim)) {
2640 ssp = mtod(m, struct sctp_setprim *);
2641 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2642 SCTP_INP_RLOCK(inp);
2643 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2645 SCTP_TCB_LOCK(stcb);
2646 SCTP_INP_RUNLOCK(inp);
2648 stcb = sctp_findassociation_ep_asocid(inp, ssp->ssp_assoc_id);
2650 /* one last shot, try it by the address in */
2651 struct sctp_nets *net;
2653 SCTP_INP_WLOCK(inp);
2654 SCTP_INP_INCR_REF(inp);
2655 SCTP_INP_WUNLOCK(inp);
2656 stcb = sctp_findassociation_ep_addr(&inp,
2657 (struct sockaddr *)&ssp->ssp_addr,
2660 SCTP_INP_WLOCK(inp);
2661 SCTP_INP_DECR_REF(inp);
2662 SCTP_INP_WUNLOCK(inp);
2670 /* simply copy out the sockaddr_storage... */
2671 memcpy(&ssp->ssp_addr,
2672 &stcb->asoc.primary_destination->ro._l_addr,
2673 ((struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr)->sa_len);
2674 SCTP_TCB_UNLOCK(stcb);
2675 m->m_len = sizeof(*ssp);
2679 error = ENOPROTOOPT;
2682 } /* end switch (sopt->sopt_name) */
2687 sctp_optsset(struct socket *so,
2690 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
2697 int error, *mopt, set_opt;
2699 struct sctp_tcb *stcb = NULL;
2700 struct sctp_inpcb *inp;
2704 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
2705 kprintf("optsset:MP is NULL EINVAL\n");
2707 #endif /* SCTP_DEBUG */
2714 inp = (struct sctp_inpcb *)so->so_pcb;
2721 case SCTP_AUTOCLOSE:
2722 case SCTP_AUTO_ASCONF:
2723 case SCTP_DISABLE_FRAGMENTS:
2724 case SCTP_I_WANT_MAPPED_V4_ADDR:
2725 /* copy in the option value */
2726 if ((size_t)m->m_len < sizeof(int)) {
2730 mopt = mtod(m, int *);
2735 case SCTP_DISABLE_FRAGMENTS:
2736 set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT;
2738 case SCTP_AUTO_ASCONF:
2739 set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF;
2742 case SCTP_I_WANT_MAPPED_V4_ADDR:
2743 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2744 set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4;
2750 set_opt = SCTP_PCB_FLAGS_NODELAY;
2752 case SCTP_AUTOCLOSE:
2753 set_opt = SCTP_PCB_FLAGS_AUTOCLOSE;
2755 * The value is in ticks.
2756 * Note this does not effect old associations, only
2759 inp->sctp_ep.auto_close_time = (*mopt * hz);
2762 SCTP_INP_WLOCK(inp);
2764 inp->sctp_flags |= set_opt;
2766 inp->sctp_flags &= ~set_opt;
2768 SCTP_INP_WUNLOCK(inp);
2770 case SCTP_MY_PUBLIC_KEY: /* set my public key */
2771 case SCTP_SET_AUTH_CHUNKS: /* set the authenticated chunks required */
2772 case SCTP_SET_AUTH_SECRET: /* set the actual secret for the endpoint */
2773 /* not supported yet and until we refine the draft */
2777 case SCTP_CLR_STAT_LOG:
2778 #ifdef SCTP_STAT_LOGGING
2779 sctp_clr_stat_log();
2784 case SCTP_DELAYED_ACK_TIME:
2787 if ((size_t)m->m_len < sizeof(int32_t)) {
2791 tm = mtod(m, int32_t *);
2793 if ((*tm < 10) || (*tm > 500)) {
2794 /* can't be smaller than 10ms */
2795 /* MUST NOT be larger than 500ms */
2799 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(*tm);
2802 case SCTP_RESET_STREAMS:
2804 struct sctp_stream_reset *strrst;
2805 uint8_t two_way, not_peer;
2807 if ((size_t)m->m_len < sizeof(struct sctp_stream_reset)) {
2811 strrst = mtod(m, struct sctp_stream_reset *);
2813 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2814 SCTP_INP_RLOCK(inp);
2815 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2817 SCTP_TCB_LOCK(stcb);
2818 SCTP_INP_RUNLOCK(inp);
2820 stcb = sctp_findassociation_ep_asocid(inp, strrst->strrst_assoc_id);
2825 if (stcb->asoc.peer_supports_strreset == 0) {
2826 /* Peer does not support it,
2827 * we return protocol not supported since
2828 * this is true for this feature and this
2829 * peer, not the socket request in general.
2831 error = EPROTONOSUPPORT;
2832 SCTP_TCB_UNLOCK(stcb);
2836 /* Having re-thought this code I added as I write the I-D there
2837 * is NO need for it. The peer, if we are requesting a stream-reset
2838 * will send a request to us but will itself do what we do, take
2839 * and copy off the "reset information" we send and queue TSN's
2840 * larger than the send-next in our response message. Thus they
2843 /* if (stcb->asoc.sending_seq != (stcb->asoc.last_acked_seq + 1)) {*/
2844 /* Must have all sending data ack'd before we
2845 * start this procedure. This is a bit restrictive
2846 * and we SHOULD work on changing this so ONLY the
2847 * streams being RESET get held up. So, a reset-all
2848 * would require this.. but a reset specific just
2849 * needs to be sure that the ones being reset have
2850 * nothing on the send_queue. For now we will
2851 * skip this more detailed method and do a course
2852 * way.. i.e. nothing pending ... for future FIX ME!
2858 if (stcb->asoc.stream_reset_outstanding) {
2860 SCTP_TCB_UNLOCK(stcb);
2863 if (strrst->strrst_flags == SCTP_RESET_LOCAL_RECV) {
2866 } else if (strrst->strrst_flags == SCTP_RESET_LOCAL_SEND) {
2869 } else if (strrst->strrst_flags == SCTP_RESET_BOTH) {
2874 SCTP_TCB_UNLOCK(stcb);
2877 sctp_send_str_reset_req(stcb, strrst->strrst_num_streams,
2878 strrst->strrst_list, two_way, not_peer);
2880 sctp_chunk_output(inp, stcb, 12);
2881 SCTP_TCB_UNLOCK(stcb);
2886 case SCTP_RESET_PEGS:
2887 memset(sctp_pegs, 0, sizeof(sctp_pegs));
2890 case SCTP_CONNECT_X:
2891 if ((size_t)m->m_len < (sizeof(int) + sizeof(struct sockaddr_in))) {
2895 error = sctp_do_connect_x(so, inp, m, p, 0);
2898 case SCTP_CONNECT_X_DELAYED:
2899 if ((size_t)m->m_len < (sizeof(int) + sizeof(struct sockaddr_in))) {
2903 error = sctp_do_connect_x(so, inp, m, p, 1);
2906 case SCTP_CONNECT_X_COMPLETE:
2908 struct sockaddr *sa;
2909 struct sctp_nets *net;
2910 if ((size_t)m->m_len < sizeof(struct sockaddr_in)) {
2914 sa = mtod(m, struct sockaddr *);
2916 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2917 SCTP_INP_RLOCK(inp);
2918 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2920 SCTP_TCB_LOCK(stcb);
2921 net = sctp_findnet(stcb, sa);
2923 SCTP_INP_RUNLOCK(inp);
2925 SCTP_INP_WLOCK(inp);
2926 SCTP_INP_INCR_REF(inp);
2927 SCTP_INP_WUNLOCK(inp);
2928 stcb = sctp_findassociation_ep_addr(&inp, sa, &net, NULL, NULL);
2930 SCTP_INP_WLOCK(inp);
2931 SCTP_INP_DECR_REF(inp);
2932 SCTP_INP_WUNLOCK(inp);
2940 if (stcb->asoc.delayed_connection == 1) {
2941 stcb->asoc.delayed_connection = 0;
2942 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
2943 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination);
2944 sctp_send_initiate(inp, stcb);
2946 /* already expired or did not use delayed connectx */
2949 SCTP_TCB_UNLOCK(stcb);
2955 SCTP_INP_WLOCK(inp);
2956 burst = mtod(m, u_int8_t *);
2958 inp->sctp_ep.max_burst = *burst;
2960 SCTP_INP_WUNLOCK(inp);
2967 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2968 ovh = SCTP_MED_OVERHEAD;
2970 ovh = SCTP_MED_V4_OVERHEAD;
2972 segsize = mtod(m, u_int32_t *);
2977 SCTP_INP_WLOCK(inp);
2978 inp->sctp_frag_point = (*segsize+ovh);
2979 if (inp->sctp_frag_point < MHLEN) {
2980 inp->sctp_frag_point = MHLEN;
2982 SCTP_INP_WUNLOCK(inp);
2985 case SCTP_SET_DEBUG_LEVEL:
2989 if ((size_t)m->m_len < sizeof(u_int32_t)) {
2993 level = mtod(m, u_int32_t *);
2995 sctp_debug_on = (*level & (SCTP_DEBUG_ALL |
2997 kprintf("SETTING DEBUG LEVEL to %x\n",
2998 (u_int)sctp_debug_on);
3003 #endif /* SCTP_DEBUG */
3007 struct sctp_event_subscribe *events;
3008 if ((size_t)m->m_len < sizeof(struct sctp_event_subscribe)) {
3012 SCTP_INP_WLOCK(inp);
3013 events = mtod(m, struct sctp_event_subscribe *);
3014 if (events->sctp_data_io_event) {
3015 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVDATAIOEVNT;
3017 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVDATAIOEVNT;
3020 if (events->sctp_association_event) {
3021 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVASSOCEVNT;
3023 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVASSOCEVNT;
3026 if (events->sctp_address_event) {
3027 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVPADDREVNT;
3029 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVPADDREVNT;
3032 if (events->sctp_send_failure_event) {
3033 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVSENDFAILEVNT;
3035 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVSENDFAILEVNT;
3038 if (events->sctp_peer_error_event) {
3039 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVPEERERR;
3041 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVPEERERR;
3044 if (events->sctp_shutdown_event) {
3045 inp->sctp_flags |= SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT;
3047 inp->sctp_flags &= ~SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT;
3050 if (events->sctp_partial_delivery_event) {
3051 inp->sctp_flags |= SCTP_PCB_FLAGS_PDAPIEVNT;
3053 inp->sctp_flags &= ~SCTP_PCB_FLAGS_PDAPIEVNT;
3056 if (events->sctp_adaption_layer_event) {
3057 inp->sctp_flags |= SCTP_PCB_FLAGS_ADAPTIONEVNT;
3059 inp->sctp_flags &= ~SCTP_PCB_FLAGS_ADAPTIONEVNT;
3062 if (events->sctp_stream_reset_events) {
3063 inp->sctp_flags |= SCTP_PCB_FLAGS_STREAM_RESETEVNT;
3065 inp->sctp_flags &= ~SCTP_PCB_FLAGS_STREAM_RESETEVNT;
3067 SCTP_INP_WUNLOCK(inp);
3071 case SCTP_ADAPTION_LAYER:
3073 struct sctp_setadaption *adap_bits;
3074 if ((size_t)m->m_len < sizeof(struct sctp_setadaption)) {
3078 SCTP_INP_WLOCK(inp);
3079 adap_bits = mtod(m, struct sctp_setadaption *);
3080 inp->sctp_ep.adaption_layer_indicator = adap_bits->ssb_adaption_ind;
3081 SCTP_INP_WUNLOCK(inp);
3084 case SCTP_SET_INITIAL_DBG_SEQ:
3087 if ((size_t)m->m_len < sizeof(u_int32_t)) {
3091 SCTP_INP_WLOCK(inp);
3092 vvv = mtod(m, u_int32_t *);
3093 inp->sctp_ep.initial_sequence_debug = *vvv;
3094 SCTP_INP_WUNLOCK(inp);
3097 case SCTP_DEFAULT_SEND_PARAM:
3099 struct sctp_sndrcvinfo *s_info;
3101 if (m->m_len != sizeof(struct sctp_sndrcvinfo)) {
3105 s_info = mtod(m, struct sctp_sndrcvinfo *);
3107 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3108 SCTP_INP_RLOCK(inp);
3109 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3111 SCTP_TCB_LOCK(stcb);
3112 SCTP_INP_RUNLOCK(inp);
3114 stcb = sctp_findassociation_ep_asocid(inp, s_info->sinfo_assoc_id);
3120 /* Validate things */
3121 if (s_info->sinfo_stream > stcb->asoc.streamoutcnt) {
3122 SCTP_TCB_UNLOCK(stcb);
3126 /* Mask off the flags that are allowed */
3127 s_info->sinfo_flags = (s_info->sinfo_flags &
3128 (MSG_UNORDERED | MSG_ADDR_OVER |
3129 MSG_PR_SCTP_TTL | MSG_PR_SCTP_BUF));
3131 stcb->asoc.def_send = *s_info;
3132 SCTP_TCB_UNLOCK(stcb);
3135 case SCTP_PEER_ADDR_PARAMS:
3137 struct sctp_paddrparams *paddrp;
3138 struct sctp_nets *net;
3139 if ((size_t)m->m_len < sizeof(struct sctp_paddrparams)) {
3143 paddrp = mtod(m, struct sctp_paddrparams *);
3145 if (paddrp->spp_assoc_id) {
3146 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3147 SCTP_INP_RLOCK(inp);
3148 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3150 SCTP_TCB_LOCK(stcb);
3151 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
3153 SCTP_INP_RUNLOCK(inp);
3155 stcb = sctp_findassociation_ep_asocid(inp, paddrp->spp_assoc_id);
3162 if ((stcb == NULL) &&
3163 ((((struct sockaddr *)&paddrp->spp_address)->sa_family == AF_INET) ||
3164 (((struct sockaddr *)&paddrp->spp_address)->sa_family == AF_INET6))) {
3165 /* Lookup via address */
3166 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3167 SCTP_INP_RLOCK(inp);
3168 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3170 SCTP_TCB_LOCK(stcb);
3171 net = sctp_findnet(stcb,
3172 (struct sockaddr *)&paddrp->spp_address);
3174 SCTP_INP_RUNLOCK(inp);
3176 SCTP_INP_WLOCK(inp);
3177 SCTP_INP_INCR_REF(inp);
3178 SCTP_INP_WUNLOCK(inp);
3179 stcb = sctp_findassociation_ep_addr(&inp,
3180 (struct sockaddr *)&paddrp->spp_address,
3183 SCTP_INP_WLOCK(inp);
3184 SCTP_INP_DECR_REF(inp);
3185 SCTP_INP_WUNLOCK(inp);
3189 /* Effects the Endpoint */
3193 /* Applies to the specific association */
3194 if (paddrp->spp_pathmaxrxt) {
3196 if (paddrp->spp_pathmaxrxt)
3197 net->failure_threshold = paddrp->spp_pathmaxrxt;
3199 if (paddrp->spp_pathmaxrxt)
3200 stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt;
3203 if ((paddrp->spp_hbinterval != 0) && (paddrp->spp_hbinterval != 0xffffffff)) {
3207 net->dest_state &= ~SCTP_ADDR_NOHB;
3209 old = stcb->asoc.heart_beat_delay;
3210 stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval;
3212 /* Turn back on the timer */
3213 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
3216 } else if (paddrp->spp_hbinterval == 0xffffffff) {
3218 sctp_send_hb(stcb, 1, net);
3221 /* off on association */
3222 if (stcb->asoc.heart_beat_delay) {
3223 int cnt_of_unconf = 0;
3224 struct sctp_nets *lnet;
3225 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
3226 if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
3230 /* stop the timer ONLY if we have no unconfirmed addresses
3232 if (cnt_of_unconf == 0)
3233 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
3235 stcb->asoc.heart_beat_delay = 0;
3237 net->dest_state |= SCTP_ADDR_NOHB;
3240 SCTP_TCB_UNLOCK(stcb);
3242 /* Use endpoint defaults */
3243 SCTP_INP_WLOCK(inp);
3244 if (paddrp->spp_pathmaxrxt)
3245 inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt;
3246 if (paddrp->spp_hbinterval != SCTP_ISSUE_HB)
3247 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = paddrp->spp_hbinterval;
3248 SCTP_INP_WUNLOCK(inp);
3254 struct sctp_rtoinfo *srto;
3255 if ((size_t)m->m_len < sizeof(struct sctp_rtoinfo)) {
3259 srto = mtod(m, struct sctp_rtoinfo *);
3260 if (srto->srto_assoc_id == 0) {
3261 SCTP_INP_WLOCK(inp);
3262 /* If we have a null asoc, its default for the endpoint */
3263 if (srto->srto_initial > 10)
3264 inp->sctp_ep.initial_rto = srto->srto_initial;
3265 if (srto->srto_max > 10)
3266 inp->sctp_ep.sctp_maxrto = srto->srto_max;
3267 if (srto->srto_min > 10)
3268 inp->sctp_ep.sctp_minrto = srto->srto_min;
3269 SCTP_INP_WUNLOCK(inp);
3272 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3273 SCTP_INP_RLOCK(inp);
3274 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3276 SCTP_TCB_LOCK(stcb);
3277 SCTP_INP_RUNLOCK(inp);
3279 stcb = sctp_findassociation_ep_asocid(inp, srto->srto_assoc_id);
3284 /* Set in ms we hope :-) */
3285 if (srto->srto_initial > 10)
3286 stcb->asoc.initial_rto = srto->srto_initial;
3287 if (srto->srto_max > 10)
3288 stcb->asoc.maxrto = srto->srto_max;
3289 if (srto->srto_min > 10)
3290 stcb->asoc.minrto = srto->srto_min;
3291 SCTP_TCB_UNLOCK(stcb);
3294 case SCTP_ASSOCINFO:
3296 struct sctp_assocparams *sasoc;
3298 if ((size_t)m->m_len < sizeof(struct sctp_assocparams)) {
3302 sasoc = mtod(m, struct sctp_assocparams *);
3303 if (sasoc->sasoc_assoc_id) {
3304 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3305 SCTP_INP_RLOCK(inp);
3306 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3308 SCTP_TCB_LOCK(stcb);
3309 SCTP_INP_RUNLOCK(inp);
3311 stcb = sctp_findassociation_ep_asocid(inp,
3312 sasoc->sasoc_assoc_id);
3322 if (sasoc->sasoc_asocmaxrxt)
3323 stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt;
3324 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
3325 sasoc->sasoc_peer_rwnd = 0;
3326 sasoc->sasoc_local_rwnd = 0;
3327 if (stcb->asoc.cookie_life)
3328 stcb->asoc.cookie_life = sasoc->sasoc_cookie_life;
3329 SCTP_TCB_UNLOCK(stcb);
3331 SCTP_INP_WLOCK(inp);
3332 if (sasoc->sasoc_asocmaxrxt)
3333 inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt;
3334 sasoc->sasoc_number_peer_destinations = 0;
3335 sasoc->sasoc_peer_rwnd = 0;
3336 sasoc->sasoc_local_rwnd = 0;
3337 if (sasoc->sasoc_cookie_life)
3338 inp->sctp_ep.def_cookie_life = sasoc->sasoc_cookie_life;
3339 SCTP_INP_WUNLOCK(inp);
3345 struct sctp_initmsg *sinit;
3347 if ((size_t)m->m_len < sizeof(struct sctp_initmsg)) {
3351 sinit = mtod(m, struct sctp_initmsg *);
3352 SCTP_INP_WLOCK(inp);
3353 if (sinit->sinit_num_ostreams)
3354 inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams;
3356 if (sinit->sinit_max_instreams)
3357 inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams;
3359 if (sinit->sinit_max_attempts)
3360 inp->sctp_ep.max_init_times = sinit->sinit_max_attempts;
3362 if (sinit->sinit_max_init_timeo > 10)
3363 /* We must be at least a 100ms (we set in ticks) */
3364 inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo;
3365 SCTP_INP_WUNLOCK(inp);
3368 case SCTP_PRIMARY_ADDR:
3370 struct sctp_setprim *spa;
3371 struct sctp_nets *net, *lnet;
3372 if ((size_t)m->m_len < sizeof(struct sctp_setprim)) {
3376 spa = mtod(m, struct sctp_setprim *);
3378 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3379 SCTP_INP_RLOCK(inp);
3380 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3382 SCTP_TCB_LOCK(stcb);
3387 SCTP_INP_RUNLOCK(inp);
3389 stcb = sctp_findassociation_ep_asocid(inp, spa->ssp_assoc_id);
3392 SCTP_INP_WLOCK(inp);
3393 SCTP_INP_INCR_REF(inp);
3394 SCTP_INP_WUNLOCK(inp);
3395 stcb = sctp_findassociation_ep_addr(&inp,
3396 (struct sockaddr *)&spa->ssp_addr,
3399 SCTP_INP_WLOCK(inp);
3400 SCTP_INP_DECR_REF(inp);
3401 SCTP_INP_WUNLOCK(inp);
3406 /* find the net, associd or connected lookup type */
3407 net = sctp_findnet(stcb, (struct sockaddr *)&spa->ssp_addr);
3409 SCTP_TCB_UNLOCK(stcb);
3414 if ((net != stcb->asoc.primary_destination) &&
3415 (!(net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
3416 /* Ok we need to set it */
3417 lnet = stcb->asoc.primary_destination;
3418 lnet->next_tsn_at_change = net->next_tsn_at_change = stcb->asoc.sending_seq;
3419 if (sctp_set_primary_addr(stcb,
3422 if (net->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3423 net->dest_state |= SCTP_ADDR_DOUBLE_SWITCH;
3425 net->dest_state |= SCTP_ADDR_SWITCH_PRIMARY;
3428 SCTP_TCB_UNLOCK(stcb);
3432 case SCTP_SET_PEER_PRIMARY_ADDR:
3434 struct sctp_setpeerprim *sspp;
3435 if ((size_t)m->m_len < sizeof(struct sctp_setpeerprim)) {
3439 sspp = mtod(m, struct sctp_setpeerprim *);
3442 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3443 SCTP_INP_RLOCK(inp);
3444 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3446 SCTP_TCB_UNLOCK(stcb);
3447 SCTP_INP_RUNLOCK(inp);
3449 stcb = sctp_findassociation_ep_asocid(inp, sspp->sspp_assoc_id);
3454 if (sctp_set_primary_ip_address_sa(stcb, (struct sockaddr *)&sspp->sspp_addr) != 0) {
3457 SCTP_TCB_UNLOCK(stcb);
3460 case SCTP_BINDX_ADD_ADDR:
3462 struct sctp_getaddresses *addrs;
3463 struct sockaddr *addr_touse;
3464 struct sockaddr_in sin;
3465 /* see if we're bound all already! */
3466 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3470 if ((size_t)m->m_len < sizeof(struct sctp_getaddresses)) {
3474 addrs = mtod(m, struct sctp_getaddresses *);
3475 addr_touse = addrs->addr;
3476 if (addrs->addr->sa_family == AF_INET6) {
3477 struct sockaddr_in6 *sin6;
3478 sin6 = (struct sockaddr_in6 *)addr_touse;
3479 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
3480 in6_sin6_2_sin(&sin, sin6);
3481 addr_touse = (struct sockaddr *)&sin;
3484 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
3486 /* Can't get proc for Net/Open BSD */
3490 error = sctp_inpcb_bind(so, addr_touse, p);
3493 /* No locks required here since bind and mgmt_ep_sa all
3494 * do their own locking. If we do something for the FIX:
3495 * below we may need to lock in that case.
3497 if (addrs->sget_assoc_id == 0) {
3498 /* add the address */
3499 struct sctp_inpcb *lep;
3500 ((struct sockaddr_in *)addr_touse)->sin_port = inp->sctp_lport;
3501 lep = sctp_pcb_findep(addr_touse, 1, 0);
3503 /* We must decrement the refcount
3504 * since we have the ep already and
3505 * are binding. No remove going on
3508 SCTP_INP_WLOCK(inp);
3509 SCTP_INP_DECR_REF(inp);
3510 SCTP_INP_WUNLOCK(inp);
3513 /* already bound to it.. ok */
3515 } else if (lep == NULL) {
3516 ((struct sockaddr_in *)addr_touse)->sin_port = 0;
3517 error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
3518 SCTP_ADD_IP_ADDRESS);
3520 error = EADDRNOTAVAIL;
3526 /* FIX: decide whether we allow assoc based bindx */
3530 case SCTP_BINDX_REM_ADDR:
3532 struct sctp_getaddresses *addrs;
3533 struct sockaddr *addr_touse;
3534 struct sockaddr_in sin;
3535 /* see if we're bound all already! */
3536 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3540 if ((size_t)m->m_len < sizeof(struct sctp_getaddresses)) {
3544 addrs = mtod(m, struct sctp_getaddresses *);
3545 addr_touse = addrs->addr;
3546 if (addrs->addr->sa_family == AF_INET6) {
3547 struct sockaddr_in6 *sin6;
3548 sin6 = (struct sockaddr_in6 *)addr_touse;
3549 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
3550 in6_sin6_2_sin(&sin, sin6);
3551 addr_touse = (struct sockaddr *)&sin;
3554 /* No lock required mgmt_ep_sa does its own locking. If
3555 * the FIX: below is ever changed we may need to
3556 * lock before calling association level binding.
3558 if (addrs->sget_assoc_id == 0) {
3559 /* delete the address */
3560 sctp_addr_mgmt_ep_sa(inp, addr_touse,
3561 SCTP_DEL_IP_ADDRESS);
3563 /* FIX: decide whether we allow assoc based bindx */
3568 error = ENOPROTOOPT;
3570 } /* end switch (opt) */
3575 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
3577 sctp_ctloutput(struct socket *so, struct sockopt *sopt)
3579 struct mbuf *m = NULL;
3580 struct sctp_inpcb *inp;
3583 inp = (struct sctp_inpcb *)so->so_pcb;
3587 /* I made the same as TCP since we are not setup? */
3588 return (ECONNRESET);
3590 if (sopt->sopt_level != IPPROTO_SCTP) {
3591 /* wrong proto level... send back up to IP */
3593 if (INP_CHECK_SOCKAF(so, AF_INET6))
3594 error = ip6_ctloutput(so, sopt);
3597 error = ip_ctloutput(so, sopt);
3601 if (sopt->sopt_valsize > MCLBYTES) {
3603 * Restrict us down to a cluster size, that's all we can
3604 * pass either way...
3606 sopt->sopt_valsize = MCLBYTES;
3608 if (sopt->sopt_valsize) {
3610 m = m_get(MB_WAIT, MT_DATA);
3611 if (sopt->sopt_valsize > MLEN) {
3612 MCLGET(m, MB_DONTWAIT);
3613 if ((m->m_flags & M_EXT) == 0) {
3619 error = sooptcopyin(sopt, mtod(m, caddr_t), sopt->sopt_valsize,
3620 sopt->sopt_valsize);
3625 m->m_len = sopt->sopt_valsize;
3627 if (sopt->sopt_dir == SOPT_SET) {
3628 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
3629 error = sctp_optsset(so, sopt->sopt_name, &m, sopt->sopt_td);
3631 error = sctp_optsset(so, sopt->sopt_name, &m, sopt->sopt_p);
3633 } else if (sopt->sopt_dir == SOPT_GET) {
3634 #if (defined (__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
3635 error = sctp_optsget(so, sopt->sopt_name, &m, sopt->sopt_td);
3637 error = sctp_optsget(so, sopt->sopt_name, &m, sopt->sopt_p);
3642 if ( (error == 0) && (m != NULL)) {
3643 error = sooptcopyout(sopt, mtod(m, caddr_t), m->m_len);
3645 } else if (m != NULL) {
3654 /* NetBSD and OpenBSD */
3656 sctp_ctloutput(int op, struct socket *so, int level, int optname,
3662 struct in6pcb *in6p;
3664 int family; /* family of the socket */
3666 family = so->so_proto->pr_domain->dom_family;
3671 inp = sotoinpcb(so);
3679 in6p = sotoin6pcb(so);
3684 return EAFNOSUPPORT;
3689 if (inp == NULL && in6p == NULL)
3693 if (op == PRCO_SETOPT && *mp)
3695 return (ECONNRESET);
3697 if (level != IPPROTO_SCTP) {
3700 error = ip_ctloutput(op, so, level, optname, mp);
3704 error = ip6_ctloutput(op, so, level, optname, mp);
3711 /* Ok if we reach here it is a SCTP option we hope */
3712 if (op == PRCO_SETOPT) {
3713 error = sctp_optsset(so, optname, mp, NULL);
3716 } else if (op == PRCO_GETOPT) {
3717 error = sctp_optsget(so, optname, mp, NULL);
3728 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
3729 sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p)
3732 #if defined(__FreeBSD__) || defined(__APPLE__)
3733 sctp_connect(struct socket *so, struct sockaddr *addr, struct proc *p)
3736 sctp_connect(struct socket *so, struct mbuf *nam, struct proc *p)
3738 struct sockaddr *addr = mtod(nam, struct sockaddr *);
3742 struct sctp_inpcb *inp;
3743 struct sctp_tcb *stcb;
3746 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
3747 kprintf("Connect called in SCTP to ");
3748 sctp_print_address(addr);
3749 kprintf("Port %d\n", ntohs(((struct sockaddr_in *)addr)->sin_port));
3751 #endif /* SCTP_DEBUG */
3753 inp = (struct sctp_inpcb *)so->so_pcb;
3756 /* I made the same as TCP since we are not setup? */
3757 return (ECONNRESET);
3759 SCTP_ASOC_CREATE_LOCK(inp);
3760 SCTP_INP_WLOCK(inp);
3761 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3762 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3763 /* Should I really unlock ? */
3764 SCTP_INP_WUNLOCK(inp);
3765 SCTP_ASOC_CREATE_UNLOCK(inp);
3770 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
3771 (addr->sa_family == AF_INET6)) {
3772 SCTP_INP_WUNLOCK(inp);
3773 SCTP_ASOC_CREATE_UNLOCK(inp);
3778 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
3779 SCTP_PCB_FLAGS_UNBOUND) {
3780 /* Bind a ephemeral port */
3781 SCTP_INP_WUNLOCK(inp);
3782 error = sctp_inpcb_bind(so, NULL, p);
3784 SCTP_ASOC_CREATE_UNLOCK(inp);
3788 SCTP_INP_WLOCK(inp);
3790 /* Now do we connect? */
3791 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3792 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
3793 /* We are already connected AND the TCP model */
3795 SCTP_INP_WUNLOCK(inp);
3796 SCTP_ASOC_CREATE_UNLOCK(inp);
3797 return (EADDRINUSE);
3799 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3800 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3802 SCTP_TCB_UNLOCK(stcb);
3803 SCTP_INP_WUNLOCK(inp);
3805 SCTP_INP_INCR_REF(inp);
3806 SCTP_INP_WUNLOCK(inp);
3807 stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL);
3809 SCTP_INP_WLOCK(inp);
3810 SCTP_INP_DECR_REF(inp);
3811 SCTP_INP_WUNLOCK(inp);
3815 /* Already have or am bring up an association */
3816 SCTP_ASOC_CREATE_UNLOCK(inp);
3817 SCTP_TCB_UNLOCK(stcb);
3821 /* We are GOOD to go */
3822 stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0);
3824 /* Gak! no memory */
3828 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
3829 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
3830 /* Set the connected flag so we can queue data */
3833 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT;
3834 SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
3835 sctp_send_initiate(inp, stcb);
3836 SCTP_ASOC_CREATE_UNLOCK(inp);
3837 SCTP_TCB_UNLOCK(stcb);
3843 sctp_usr_recvd(struct socket *so, int flags)
3845 struct sctp_socket_q_list *sq=NULL;
3847 * The user has received some data, we may be able to stuff more
3848 * up the socket. And we need to possibly update the rwnd.
3850 struct sctp_inpcb *inp;
3851 struct sctp_tcb *stcb=NULL;
3853 inp = (struct sctp_inpcb *)so->so_pcb;
3855 if (sctp_debug_on & SCTP_DEBUG_USRREQ2)
3856 kprintf("Read for so:%x inp:%x Flags:%x\n",
3857 (u_int)so, (u_int)inp, (u_int)flags);
3861 /* I made the same as TCP since we are not setup? */
3863 if (sctp_debug_on & SCTP_DEBUG_USRREQ2)
3864 kprintf("Nope, connection reset\n");
3866 return (ECONNRESET);
3870 * Grab the first one on the list. It will re-insert itself if
3871 * it runs out of room
3873 SCTP_INP_WLOCK(inp);
3874 if ((flags & MSG_EOR) && ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0)
3875 && ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
3876 /* Ok the other part of our grubby tracking
3877 * stuff for our horrible layer violation that
3878 * the tsvwg thinks is ok for sctp_peeloff.. gak!
3879 * We must update the next vtag pending on the
3880 * socket buffer (if any).
3882 inp->sctp_vtag_first = sctp_get_first_vtag_from_sb(so);
3883 sq = TAILQ_FIRST(&inp->sctp_queue_list);
3890 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3893 SCTP_TCB_LOCK(stcb);
3896 /* all code in normal stcb path assumes
3897 * that you have a tcb_lock only. Thus
3898 * we must release the inp write lock.
3900 if (flags & MSG_EOR) {
3901 if (((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0)
3902 && ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
3903 stcb = sctp_remove_from_socket_q(inp);
3906 if (sctp_debug_on & SCTP_DEBUG_USRREQ2)
3907 kprintf("remove from socket queue for inp:%x tcbret:%x\n",
3908 (u_int)inp, (u_int)stcb);
3911 stcb->asoc.my_rwnd_control_len = sctp_sbspace_sub(stcb->asoc.my_rwnd_control_len,
3912 sizeof(struct mbuf));
3913 if (inp->sctp_flags & SCTP_PCB_FLAGS_RECVDATAIOEVNT) {
3914 stcb->asoc.my_rwnd_control_len = sctp_sbspace_sub(stcb->asoc.my_rwnd_control_len,
3915 CMSG_LEN(sizeof(struct sctp_sndrcvinfo)));
3918 if ((TAILQ_EMPTY(&stcb->asoc.delivery_queue) == 0) ||
3919 (TAILQ_EMPTY(&stcb->asoc.reasmqueue) == 0)) {
3920 /* Deliver if there is something to be delivered */
3921 sctp_service_queues(stcb, &stcb->asoc, 1);
3923 sctp_set_rwnd(stcb, &stcb->asoc);
3924 /* if we increase by 1 or more MTU's (smallest MTUs of all
3925 * nets) we send a window update sack
3927 incr = stcb->asoc.my_rwnd - stcb->asoc.my_last_reported_rwnd;
3931 if (((uint32_t)incr >= (stcb->asoc.smallest_mtu * SCTP_SEG_TO_RWND_UPD)) ||
3932 ((((uint32_t)incr)*SCTP_SCALE_OF_RWND_TO_UPD) >= so->so_rcv.ssb_hiwat)) {
3933 if (callout_pending(&stcb->asoc.dack_timer.timer)) {
3934 /* If the timer is up, stop it */
3935 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
3936 stcb->sctp_ep, stcb, NULL);
3938 /* Send the sack, with the new rwnd */
3939 sctp_send_sack(stcb);
3940 /* Now do the output */
3941 sctp_chunk_output(inp, stcb, 10);
3944 if ((( sq ) && (flags & MSG_EOR) && ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0))
3945 && ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
3946 stcb = sctp_remove_from_socket_q(inp);
3949 SOCKBUF_LOCK(&so->so_rcv);
3950 if (( so->so_rcv.ssb_mb == NULL ) &&
3951 (TAILQ_EMPTY(&inp->sctp_queue_list) == 0)) {
3954 if (sctp_debug_on & SCTP_DEBUG_USRREQ2)
3955 kprintf("Something off, inp:%x so->so_rcv->ssb_mb is empty and sockq is not.. cleaning\n",
3958 if (((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) == 0)
3959 && ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
3961 done_yet = TAILQ_EMPTY(&inp->sctp_queue_list);
3964 sctp_remove_from_socket_q(inp);
3965 done_yet = TAILQ_EMPTY(&inp->sctp_queue_list);
3969 if (sctp_debug_on & SCTP_DEBUG_USRREQ2)
3970 kprintf("Cleaned up %d sockq's\n", sq_cnt);
3973 SOCKBUF_UNLOCK(&so->so_rcv);
3975 SCTP_TCB_UNLOCK(stcb);
3976 SCTP_INP_WUNLOCK(inp);
3982 #if (defined(__FreeBSD__) && __FreeBSD_version >= 500000) || defined(__DragonFly__)
3983 sctp_listen(struct socket *so, struct thread *p)
3985 sctp_listen(struct socket *so, struct proc *p)
3989 * Note this module depends on the protocol processing being
3990 * called AFTER any socket level flags and backlog are applied
3991 * to the socket. The traditional way that the socket flags are
3992 * applied is AFTER protocol processing. We have made a change
3993 * to the sys/kern/uipc_socket.c module to reverse this but this
3994 * MUST be in place if the socket API for SCTP is to work properly.
3997 struct sctp_inpcb *inp;
4000 inp = (struct sctp_inpcb *)so->so_pcb;
4003 /* I made the same as TCP since we are not setup? */
4004 return (ECONNRESET);
4006 SCTP_INP_RLOCK(inp);
4007 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4008 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
4009 /* We are already connected AND the TCP model */
4011 SCTP_INP_RUNLOCK(inp);
4012 return (EADDRINUSE);
4014 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
4015 /* We must do a bind. */
4016 SCTP_INP_RUNLOCK(inp);
4017 if ((error = sctp_inpcb_bind(so, NULL, p))) {
4018 /* bind error, probably perm */
4023 SCTP_INP_RUNLOCK(inp);
4026 SCTP_INP_WLOCK(inp);
4027 if (inp->sctp_socket->so_qlimit) {
4028 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
4030 * For the UDP model we must TURN OFF the ACCEPT
4031 * flags since we do NOT allow the accept() call.
4032 * The TCP model (when present) will do accept which
4033 * then prohibits connect().
4035 inp->sctp_socket->so_options &= ~SO_ACCEPTCONN;
4037 inp->sctp_flags |= SCTP_PCB_FLAGS_ACCEPTING;
4039 if (inp->sctp_flags & SCTP_PCB_FLAGS_ACCEPTING) {
4041 * Turning off the listen flags if the backlog is
4042 * set to 0 (i.e. qlimit is 0).
4044 inp->sctp_flags &= ~SCTP_PCB_FLAGS_ACCEPTING;
4046 inp->sctp_socket->so_options &= ~SO_ACCEPTCONN;
4048 SCTP_INP_WUNLOCK(inp);
4055 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4056 sctp_accept(struct socket *so, struct sockaddr **addr)
4059 sctp_accept(struct socket *so, struct mbuf *nam)
4061 struct sockaddr *addr = mtod(nam, struct sockaddr *);
4063 struct sctp_tcb *stcb;
4064 struct sockaddr *prim;
4065 struct sctp_inpcb *inp;
4068 inp = (struct sctp_inpcb *)so->so_pcb;
4072 return (ECONNRESET);
4074 SCTP_INP_RLOCK(inp);
4075 if (so->so_state & SS_ISDISCONNECTED) {
4077 SCTP_INP_RUNLOCK(inp);
4078 return (ECONNABORTED);
4080 stcb = LIST_FIRST(&inp->sctp_asoc_list);
4083 SCTP_INP_RUNLOCK(inp);
4084 return (ECONNRESET);
4086 SCTP_TCB_LOCK(stcb);
4087 SCTP_INP_RUNLOCK(inp);
4088 prim = (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr;
4089 if (prim->sa_family == AF_INET) {
4090 struct sockaddr_in *sin;
4091 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4092 MALLOC(sin, struct sockaddr_in *, sizeof *sin, M_SONAME,
4095 sin = (struct sockaddr_in *)addr;
4096 bzero((caddr_t)sin, sizeof (*sin));
4098 sin->sin_family = AF_INET;
4099 sin->sin_len = sizeof(*sin);
4100 sin->sin_port = ((struct sockaddr_in *)prim)->sin_port;
4101 sin->sin_addr = ((struct sockaddr_in *)prim)->sin_addr;
4102 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4103 *addr = (struct sockaddr *)sin;
4105 nam->m_len = sizeof(*sin);
4108 struct sockaddr_in6 *sin6;
4109 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4110 MALLOC(sin6, struct sockaddr_in6 *, sizeof *sin6, M_SONAME,
4113 sin6 = (struct sockaddr_in6 *)addr;
4115 bzero((caddr_t)sin6, sizeof (*sin6));
4116 sin6->sin6_family = AF_INET6;
4117 sin6->sin6_len = sizeof(*sin6);
4118 sin6->sin6_port = ((struct sockaddr_in6 *)prim)->sin6_port;
4120 sin6->sin6_addr = ((struct sockaddr_in6 *)prim)->sin6_addr;
4121 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr))
4122 /* sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]);*/
4123 in6_recoverscope(sin6, &sin6->sin6_addr, NULL); /* skip ifp check */
4125 sin6->sin6_scope_id = 0; /*XXX*/
4126 #if defined(__FreeBSD__) || defined (__APPLE__) || defined(__DragonFly__)
4127 *addr= (struct sockaddr *)sin6;
4129 nam->m_len = sizeof(*sin6);
4132 /* Wake any delayed sleep action */
4133 SCTP_TCB_UNLOCK(stcb);
4134 SCTP_INP_WLOCK(inp);
4135 if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) {
4136 inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE;
4137 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) {
4138 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT;
4139 #if defined(__NetBSD__)
4140 if (sowritable(inp->sctp_socket))
4141 sowwakeup(inp->sctp_socket);
4143 if (sowriteable(inp->sctp_socket))
4144 sowwakeup(inp->sctp_socket);
4147 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) {
4148 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT;
4149 if (soreadable(inp->sctp_socket))
4150 sorwakeup(inp->sctp_socket);
4154 SCTP_INP_WUNLOCK(inp);
4160 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4161 sctp_ingetaddr(struct socket *so, struct sockaddr **addr)
4163 sctp_ingetaddr(struct socket *so, struct mbuf *nam)
4166 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4167 struct sockaddr_in *sin;
4169 struct sockaddr_in *sin = mtod(nam, struct sockaddr_in *);
4171 struct sctp_inpcb *inp;
4173 * Do the malloc first in case it blocks.
4175 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4176 MALLOC(sin, struct sockaddr_in *, sizeof *sin, M_SONAME, M_WAITOK |
4179 nam->m_len = sizeof(*sin);
4180 memset(sin, 0, sizeof(*sin));
4182 sin->sin_family = AF_INET;
4183 sin->sin_len = sizeof(*sin);
4185 inp = (struct sctp_inpcb *)so->so_pcb;
4188 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4189 FREE(sin, M_SONAME);
4193 SCTP_INP_RLOCK(inp);
4194 sin->sin_port = inp->sctp_lport;
4195 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
4196 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
4197 struct sctp_tcb *stcb;
4198 struct sockaddr_in *sin_a;
4199 struct sctp_nets *net;
4202 stcb = LIST_FIRST(&inp->sctp_asoc_list);
4208 SCTP_TCB_LOCK(stcb);
4209 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4210 sin_a = (struct sockaddr_in *)&net->ro._l_addr;
4211 if (sin_a->sin_family == AF_INET) {
4216 if ((!fnd) || (sin_a == NULL)) {
4218 SCTP_TCB_UNLOCK(stcb);
4221 sin->sin_addr = sctp_ipv4_source_address_selection(inp,
4222 stcb, (struct route *)&net->ro, net, 0);
4223 SCTP_TCB_UNLOCK(stcb);
4225 /* For the bound all case you get back 0 */
4227 sin->sin_addr.s_addr = 0;
4231 /* Take the first IPv4 address in the list */
4232 struct sctp_laddr *laddr;
4234 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4235 if (laddr->ifa->ifa_addr->sa_family == AF_INET) {
4236 struct sockaddr_in *sin_a;
4237 sin_a = (struct sockaddr_in *)laddr->ifa->ifa_addr;
4238 sin->sin_addr = sin_a->sin_addr;
4245 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4246 FREE(sin, M_SONAME);
4248 SCTP_INP_RUNLOCK(inp);
4252 SCTP_INP_RUNLOCK(inp);
4254 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4255 (*addr) = (struct sockaddr *)sin;
4261 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4262 sctp_peeraddr(struct socket *so, struct sockaddr **addr)
4264 struct sockaddr_in *sin = (struct sockaddr_in *)*addr;
4266 sctp_peeraddr(struct socket *so, struct mbuf *nam)
4268 struct sockaddr_in *sin = mtod(nam, struct sockaddr_in *);
4271 struct sockaddr_in *sin_a;
4272 struct sctp_inpcb *inp;
4273 struct sctp_tcb *stcb;
4274 struct sctp_nets *net;
4276 /* Do the malloc first in case it blocks. */
4277 inp = (struct sctp_inpcb *)so->so_pcb;
4278 if ((inp == NULL) ||
4279 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
4280 /* UDP type and listeners will drop out here */
4285 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4286 MALLOC(sin, struct sockaddr_in *, sizeof *sin, M_SONAME, M_WAITOK |
4289 nam->m_len = sizeof(*sin);
4290 memset(sin, 0, sizeof(*sin));
4292 sin->sin_family = AF_INET;
4293 sin->sin_len = sizeof(*sin);
4295 /* We must recapture incase we blocked */
4296 inp = (struct sctp_inpcb *)so->so_pcb;
4299 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4300 FREE(sin, M_SONAME);
4304 SCTP_INP_RLOCK(inp);
4305 stcb = LIST_FIRST(&inp->sctp_asoc_list);
4307 SCTP_TCB_LOCK(stcb);
4308 SCTP_INP_RUNLOCK(inp);
4311 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4312 FREE(sin, M_SONAME);
4317 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4318 sin_a = (struct sockaddr_in *)&net->ro._l_addr;
4319 if (sin_a->sin_family == AF_INET) {
4321 sin->sin_port = stcb->rport;
4322 sin->sin_addr = sin_a->sin_addr;
4326 SCTP_TCB_UNLOCK(stcb);
4328 /* No IPv4 address */
4330 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4331 FREE(sin, M_SONAME);
4339 #if defined(__FreeBSD__) || defined(__APPLE__) || defined(__DragonFly__)
4340 struct pr_usrreqs sctp_usrreqs = {
4341 .pru_abort = sctp_abort,
4342 .pru_accept = sctp_accept,
4343 .pru_attach = sctp_attach,
4344 .pru_bind = sctp_bind,
4345 .pru_connect = sctp_connect,
4346 .pru_connect2 = pru_connect2_notsupp,
4347 .pru_control = in_control,
4348 .pru_detach = sctp_detach,
4349 .pru_disconnect = sctp_disconnect,
4350 .pru_listen = sctp_listen,
4351 .pru_peeraddr = sctp_peeraddr,
4352 .pru_rcvd = sctp_usr_recvd,
4353 .pru_rcvoob = pru_rcvoob_notsupp,
4354 .pru_send = sctp_send,
4355 .pru_sense = pru_sense_null,
4356 .pru_shutdown = sctp_shutdown,
4357 .pru_sockaddr = sctp_ingetaddr,
4358 .pru_sosend = sctp_sosend,
4359 .pru_soreceive = soreceive
4363 #if defined(__NetBSD__)
4365 sctp_usrreq(struct socket *so, int req, struct mbuf *m, struct mbuf *nam,
4366 struct mbuf *control, struct proc *p)
4370 sctp_usrreq(struct socket *so, int req, struct mbuf *m, struct mbuf *nam,
4371 struct mbuf *control)
4373 struct proc *p = curproc;
4378 family = so->so_proto->pr_domain->dom_family;
4381 if (req == PRU_CONTROL) {
4384 error = in_control(so, (long)m, (caddr_t)nam,
4385 (struct ifnet *)control
4386 #if defined(__NetBSD__)
4393 error = in6_control(so, (long)m, (caddr_t)nam,
4394 (struct ifnet *)control, p);
4398 error = EAFNOSUPPORT;
4404 if (req == PRU_PURGEIF) {
4407 ifn = (struct ifnet *)control;
4408 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
4409 if (ifa->ifa_addr->sa_family == family) {
4410 sctp_delete_ip_address(ifa);
4424 return (EAFNOSUPPORT);
4432 error = sctp_attach(so, family, p);
4435 error = sctp_detach(so);
4442 error = sctp_bind(so, nam, p);
4445 error = sctp_listen(so, p);
4452 error = sctp_connect(so, nam, p);
4454 case PRU_DISCONNECT:
4455 error = sctp_disconnect(so);
4462 error = sctp_accept(so, nam);
4465 error = sctp_shutdown(so);
4470 * For Open and Net BSD, this is real
4471 * ugly. The mbuf *nam that is passed
4472 * (by soreceive()) is the int flags c
4473 * ast as a (mbuf *) yuck!
4475 error = sctp_usr_recvd(so, (int)((long)nam));
4479 /* Flags are ignored */
4481 if (sctp_debug_on & SCTP_DEBUG_USRREQ1) {
4482 kprintf("Send called on V4 side\n");
4486 struct sockaddr *addr;
4490 addr = mtod(nam, struct sockaddr *);
4492 error = sctp_send(so, 0, m, addr, control, p);
4496 error = sctp_abort(so);
4503 error = EAFNOSUPPORT;
4506 error = EAFNOSUPPORT;
4509 error = sctp_peeraddr(so, nam);
4512 error = sctp_ingetaddr(so, nam);
4525 /* #if defined(__NetBSD__) || defined(__OpenBSD__) */
4528 * Sysctl for sctp variables.
4531 sctp_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
4535 /* All sysctl names at this level are terminal. */
4541 case SCTPCTL_MAXDGRAM:
4542 return (sysctl_int(oldp, oldlenp, newp, newlen,
4544 case SCTPCTL_RECVSPACE:
4545 return (sysctl_int(oldp, oldlenp, newp, newlen,
4547 case SCTPCTL_AUTOASCONF:
4548 return (sysctl_int(oldp, oldlenp, newp, newlen,
4549 &sctp_auto_asconf));
4550 case SCTPCTL_ECN_ENABLE:
4551 return (sysctl_int(oldp, oldlenp, newp, newlen,
4553 case SCTPCTL_ECN_NONCE:
4554 return (sysctl_int(oldp, oldlenp, newp, newlen,
4556 case SCTPCTL_STRICT_SACK:
4557 return (sysctl_int(oldp, oldlenp, newp, newlen,
4558 &sctp_strict_sacks));
4559 case SCTPCTL_NOCSUM_LO:
4560 return (sysctl_int(oldp, oldlenp, newp, newlen,
4561 &sctp_no_csum_on_loopback));
4562 case SCTPCTL_STRICT_INIT:
4563 return (sysctl_int(oldp, oldlenp, newp, newlen,
4564 &sctp_strict_init));
4565 case SCTPCTL_PEER_CHK_OH:
4566 return (sysctl_int(oldp, oldlenp, newp, newlen,
4567 &sctp_peer_chunk_oh));
4568 case SCTPCTL_MAXBURST:
4569 return (sysctl_int(oldp, oldlenp, newp, newlen,
4570 &sctp_max_burst_default));
4571 case SCTPCTL_MAXCHUNKONQ:
4572 return (sysctl_int(oldp, oldlenp, newp, newlen,
4573 &sctp_max_chunks_on_queue));
4574 case SCTPCTL_DELAYED_SACK:
4575 return (sysctl_int(oldp, oldlenp, newp, newlen,
4576 &sctp_delayed_sack_time_default));
4577 case SCTPCTL_HB_INTERVAL:
4578 return (sysctl_int(oldp, oldlenp, newp, newlen,
4579 &sctp_heartbeat_interval_default));
4580 case SCTPCTL_PMTU_RAISE:
4581 return (sysctl_int(oldp, oldlenp, newp, newlen,
4582 &sctp_pmtu_raise_time_default));
4583 case SCTPCTL_SHUTDOWN_GUARD:
4584 return (sysctl_int(oldp, oldlenp, newp, newlen,
4585 &sctp_shutdown_guard_time_default));
4586 case SCTPCTL_SECRET_LIFETIME:
4587 return (sysctl_int(oldp, oldlenp, newp, newlen,
4588 &sctp_secret_lifetime_default));
4589 case SCTPCTL_RTO_MAX:
4590 return (sysctl_int(oldp, oldlenp, newp, newlen,
4591 &sctp_rto_max_default));
4592 case SCTPCTL_RTO_MIN:
4593 return (sysctl_int(oldp, oldlenp, newp, newlen,
4594 &sctp_rto_min_default));
4595 case SCTPCTL_RTO_INITIAL:
4596 return (sysctl_int(oldp, oldlenp, newp, newlen,
4597 &sctp_rto_initial_default));
4598 case SCTPCTL_INIT_RTO_MAX:
4599 return (sysctl_int(oldp, oldlenp, newp, newlen,
4600 &sctp_init_rto_max_default));
4601 case SCTPCTL_COOKIE_LIFE:
4602 return (sysctl_int(oldp, oldlenp, newp, newlen,
4603 &sctp_valid_cookie_life_default));
4604 case SCTPCTL_INIT_RTX_MAX:
4605 return (sysctl_int(oldp, oldlenp, newp, newlen,
4606 &sctp_init_rtx_max_default));
4607 case SCTPCTL_ASSOC_RTX_MAX:
4608 return (sysctl_int(oldp, oldlenp, newp, newlen,
4609 &sctp_assoc_rtx_max_default));
4610 case SCTPCTL_PATH_RTX_MAX:
4611 return (sysctl_int(oldp, oldlenp, newp, newlen,
4612 &sctp_path_rtx_max_default));
4613 case SCTPCTL_NR_OUTGOING_STREAMS:
4614 return (sysctl_int(oldp, oldlenp, newp, newlen,
4615 &sctp_nr_outgoing_streams_default));
4618 return (sysctl_int(oldp, oldlenp, newp, newlen,
4622 return (ENOPROTOOPT);
4629 * Sysctl for sctp variables.
4631 SYSCTL_SETUP(sysctl_net_inet_sctp_setup, "sysctl net.inet.sctp subtree setup")
4634 sysctl_createv(clog, 0, NULL, NULL,
4636 CTLTYPE_NODE, "net", NULL,
4639 sysctl_createv(clog, 0, NULL, NULL,
4641 CTLTYPE_NODE, "inet", NULL,
4643 CTL_NET, PF_INET, CTL_EOL);
4644 sysctl_createv(clog, 0, NULL, NULL,
4646 CTLTYPE_NODE, "sctp",
4647 SYSCTL_DESCR("sctp related settings"),
4649 CTL_NET, PF_INET, IPPROTO_SCTP, CTL_EOL);
4651 sysctl_createv(clog, 0, NULL, NULL,
4652 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4653 CTLTYPE_INT, "maxdgram",
4654 SYSCTL_DESCR("Maximum outgoing SCTP buffer size"),
4655 NULL, 0, &sctp_sendspace, 0,
4656 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_MAXDGRAM,
4659 sysctl_createv(clog, 0, NULL, NULL,
4660 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4661 CTLTYPE_INT, "recvspace",
4662 SYSCTL_DESCR("Maximum incoming SCTP buffer size"),
4663 NULL, 0, &sctp_recvspace, 0,
4664 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_RECVSPACE,
4667 sysctl_createv(clog, 0, NULL, NULL,
4668 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4669 CTLTYPE_INT, "autoasconf",
4670 SYSCTL_DESCR("Enable SCTP Auto-ASCONF"),
4671 NULL, 0, &sctp_auto_asconf, 0,
4672 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_AUTOASCONF,
4675 sysctl_createv(clog, 0, NULL, NULL,
4676 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4677 CTLTYPE_INT, "ecn_enable",
4678 SYSCTL_DESCR("Enable SCTP ECN"),
4679 NULL, 0, &sctp_ecn, 0,
4680 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_ECN_ENABLE,
4683 sysctl_createv(clog, 0, NULL, NULL,
4684 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4685 CTLTYPE_INT, "ecn_nonce",
4686 SYSCTL_DESCR("Enable SCTP ECN Nonce"),
4687 NULL, 0, &sctp_ecn_nonce, 0,
4688 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_ECN_NONCE,
4691 sysctl_createv(clog, 0, NULL, NULL,
4692 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4693 CTLTYPE_INT, "strict_sack",
4694 SYSCTL_DESCR("Enable SCTP Strict SACK checking"),
4695 NULL, 0, &sctp_strict_sacks, 0,
4696 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_STRICT_SACK,
4699 sysctl_createv(clog, 0, NULL, NULL,
4700 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4701 CTLTYPE_INT, "loopback_nocsum",
4702 SYSCTL_DESCR("Enable NO Csum on packets sent on loopback"),
4703 NULL, 0, &sctp_no_csum_on_loopback, 0,
4704 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_NOCSUM_LO,
4707 sysctl_createv(clog, 0, NULL, NULL,
4708 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4709 CTLTYPE_INT, "strict_init",
4710 SYSCTL_DESCR("Enable strict INIT/INIT-ACK singleton enforcement"),
4711 NULL, 0, &sctp_strict_init, 0,
4712 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_STRICT_INIT,
4715 sysctl_createv(clog, 0, NULL, NULL,
4716 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4717 CTLTYPE_INT, "peer_chkoh",
4718 SYSCTL_DESCR("Amount to debit peers rwnd per chunk sent"),
4719 NULL, 0, &sctp_peer_chunk_oh, 0,
4720 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_PEER_CHK_OH,
4723 sysctl_createv(clog, 0, NULL, NULL,
4724 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4725 CTLTYPE_INT, "maxburst",
4726 SYSCTL_DESCR("Default max burst for sctp endpoints"),
4727 NULL, 0, &sctp_max_burst_default, 0,
4728 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_MAXBURST,
4731 sysctl_createv(clog, 0, NULL, NULL,
4732 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
4733 CTLTYPE_INT, "maxchunks",
4734 SYSCTL_DESCR("Default max chunks on queue per asoc"),
4735 NULL, 0, &sctp_max_chunks_on_queue, 0,
4736 CTL_NET, PF_INET, IPPROTO_SCTP, SCTPCTL_MAXCHUNKONQ,