Disallow writes to filesystems mounted read-only via NULLFS. In this case
[dragonfly.git] / sys / kern / uipc_socket.c
CommitLineData
984263bc 1/*
6ea1e9b9 2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
66d6c637
JH
3 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
4 *
5 * This code is derived from software contributed to The DragonFly Project
6 * by Jeffrey M. Hsu.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 */
33
34/*
35 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
36 *
37 * License terms: all terms for the DragonFly license above plus the following:
38 *
39 * 4. All advertising materials mentioning features or use of this software
40 * must display the following acknowledgement:
41 *
42 * This product includes software developed by Jeffrey M. Hsu
43 * for the DragonFly Project.
44 *
45 * This requirement may be waived with permission from Jeffrey Hsu.
46 * This requirement will sunset and may be removed on July 8 2005,
47 * after which the standard DragonFly license (as shown above) will
48 * apply.
49 */
50
51/*
984263bc
MD
52 * Copyright (c) 1982, 1986, 1988, 1990, 1993
53 * The Regents of the University of California. All rights reserved.
54 *
55 * Redistribution and use in source and binary forms, with or without
56 * modification, are permitted provided that the following conditions
57 * are met:
58 * 1. Redistributions of source code must retain the above copyright
59 * notice, this list of conditions and the following disclaimer.
60 * 2. Redistributions in binary form must reproduce the above copyright
61 * notice, this list of conditions and the following disclaimer in the
62 * documentation and/or other materials provided with the distribution.
63 * 3. All advertising materials mentioning features or use of this software
64 * must display the following acknowledgement:
65 * This product includes software developed by the University of
66 * California, Berkeley and its contributors.
67 * 4. Neither the name of the University nor the names of its contributors
68 * may be used to endorse or promote products derived from this software
69 * without specific prior written permission.
70 *
71 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
72 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
73 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
74 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
75 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
76 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
77 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
78 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
79 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
80 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
81 * SUCH DAMAGE.
82 *
83 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
7405c902 84 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.24 2003/11/11 17:18:18 silby Exp $
9ba76b73 85 * $DragonFly: src/sys/kern/uipc_socket.c,v 1.38 2006/06/13 08:12:03 dillon Exp $
984263bc
MD
86 */
87
88#include "opt_inet.h"
78812139 89#include "opt_sctp.h"
984263bc
MD
90
91#include <sys/param.h>
92#include <sys/systm.h>
93#include <sys/fcntl.h>
94#include <sys/malloc.h>
95#include <sys/mbuf.h>
96#include <sys/domain.h>
97#include <sys/file.h> /* for struct knote */
98#include <sys/kernel.h>
99#include <sys/malloc.h>
100#include <sys/event.h>
101#include <sys/poll.h>
102#include <sys/proc.h>
103#include <sys/protosw.h>
104#include <sys/socket.h>
105#include <sys/socketvar.h>
6b6e0885 106#include <sys/socketops.h>
984263bc
MD
107#include <sys/resourcevar.h>
108#include <sys/signalvar.h>
109#include <sys/sysctl.h>
110#include <sys/uio.h>
111#include <sys/jail.h>
112#include <vm/vm_zone.h>
113
e43a034f
MD
114#include <sys/thread2.h>
115
984263bc
MD
116#include <machine/limits.h>
117
118#ifdef INET
119static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt);
120#endif /* INET */
121
122static void filt_sordetach(struct knote *kn);
123static int filt_soread(struct knote *kn, long hint);
124static void filt_sowdetach(struct knote *kn);
125static int filt_sowrite(struct knote *kn, long hint);
126static int filt_solisten(struct knote *kn, long hint);
127
128static struct filterops solisten_filtops =
129 { 1, NULL, filt_sordetach, filt_solisten };
130static struct filterops soread_filtops =
131 { 1, NULL, filt_sordetach, filt_soread };
132static struct filterops sowrite_filtops =
133 { 1, NULL, filt_sowdetach, filt_sowrite };
134
135struct vm_zone *socket_zone;
984263bc
MD
136
137MALLOC_DEFINE(M_SONAME, "soname", "socket name");
138MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
139
984263bc
MD
140
141static int somaxconn = SOMAXCONN;
142SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW,
143 &somaxconn, 0, "Maximum pending socket connection queue size");
144
145/*
146 * Socket operation routines.
147 * These routines are called by the routines in
148 * sys_socket.c or from a system process, and
149 * implement the semantics of socket operations by
150 * switching out to the protocol specific routines.
151 */
152
153/*
154 * Get a socket structure from our zone, and initialize it.
155 * We don't implement `waitok' yet (see comments in uipc_domain.c).
156 * Note that it would probably be better to allocate socket
157 * and PCB at the same time, but I'm not convinced that all
158 * the protocols can be easily modified to do this.
159 */
160struct socket *
161soalloc(waitok)
162 int waitok;
163{
164 struct socket *so;
165
8a8d5d85 166 so = zalloc(socket_zone);
984263bc
MD
167 if (so) {
168 /* XXX race condition for reentrant kernel */
169 bzero(so, sizeof *so);
984263bc 170 TAILQ_INIT(&so->so_aiojobq);
c1d0003c
JH
171 TAILQ_INIT(&so->so_rcv.sb_sel.si_mlist);
172 TAILQ_INIT(&so->so_snd.sb_sel.si_mlist);
984263bc
MD
173 }
174 return so;
175}
176
177int
dadab5e9
MD
178socreate(int dom, struct socket **aso, int type,
179 int proto, struct thread *td)
984263bc 180{
dadab5e9
MD
181 struct proc *p = td->td_proc;
182 struct protosw *prp;
183 struct socket *so;
e4700d00 184 struct pru_attach_info ai;
dadab5e9 185 int error;
984263bc
MD
186
187 if (proto)
188 prp = pffindproto(dom, proto, type);
189 else
190 prp = pffindtype(dom, type);
191
192 if (prp == 0 || prp->pr_usrreqs->pru_attach == 0)
193 return (EPROTONOSUPPORT);
194
41c20dac 195 if (p->p_ucred->cr_prison && jail_socket_unixiproute_only &&
984263bc
MD
196 prp->pr_domain->dom_family != PF_LOCAL &&
197 prp->pr_domain->dom_family != PF_INET &&
198 prp->pr_domain->dom_family != PF_ROUTE) {
199 return (EPROTONOSUPPORT);
200 }
201
202 if (prp->pr_type != type)
203 return (EPROTOTYPE);
204 so = soalloc(p != 0);
205 if (so == 0)
206 return (ENOBUFS);
207
208 TAILQ_INIT(&so->so_incomp);
209 TAILQ_INIT(&so->so_comp);
210 so->so_type = type;
e9a372eb 211 so->so_cred = crhold(p->p_ucred);
984263bc 212 so->so_proto = prp;
e4700d00
JH
213 ai.sb_rlimit = &p->p_rlimit[RLIMIT_SBSIZE];
214 ai.p_ucred = p->p_ucred;
215 ai.fd_rdir = p->p_fd->fd_rdir;
216 error = so_pru_attach(so, proto, &ai);
984263bc
MD
217 if (error) {
218 so->so_state |= SS_NOFDREF;
219 sofree(so);
220 return (error);
221 }
222 *aso = so;
223 return (0);
224}
225
226int
dadab5e9 227sobind(struct socket *so, struct sockaddr *nam, struct thread *td)
984263bc 228{
984263bc
MD
229 int error;
230
e43a034f 231 crit_enter();
6b6e0885 232 error = so_pru_bind(so, nam, td);
e43a034f 233 crit_exit();
984263bc
MD
234 return (error);
235}
236
237void
dadab5e9 238sodealloc(struct socket *so)
984263bc 239{
984263bc
MD
240 if (so->so_rcv.sb_hiwat)
241 (void)chgsbsize(so->so_cred->cr_uidinfo,
242 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
243 if (so->so_snd.sb_hiwat)
244 (void)chgsbsize(so->so_cred->cr_uidinfo,
245 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
246#ifdef INET
81d59d3d
HP
247 /* remove accept filter if present */
248 if (so->so_accf != NULL)
249 do_setopt_accept_filter(so, NULL);
984263bc
MD
250#endif /* INET */
251 crfree(so->so_cred);
8a8d5d85 252 zfree(socket_zone, so);
984263bc
MD
253}
254
255int
dadab5e9 256solisten(struct socket *so, int backlog, struct thread *td)
984263bc 257{
e43a034f 258 int error;
78812139
EN
259#ifdef SCTP
260 short oldopt, oldqlimit;
261#endif /* SCTP */
984263bc 262
e43a034f 263 crit_enter();
78812139 264 if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) {
e43a034f 265 crit_exit();
78812139 266 return (EINVAL);
984263bc 267 }
78812139
EN
268
269#ifdef SCTP
270 oldopt = so->so_options;
271 oldqlimit = so->so_qlimit;
272#endif /* SCTP */
273
984263bc
MD
274 if (TAILQ_EMPTY(&so->so_comp))
275 so->so_options |= SO_ACCEPTCONN;
276 if (backlog < 0 || backlog > somaxconn)
277 backlog = somaxconn;
278 so->so_qlimit = backlog;
78812139
EN
279 /* SCTP needs to look at tweak both the inbound backlog parameter AND
280 * the so_options (UDP model both connect's and gets inbound
281 * connections .. implicitly).
282 */
283 error = so_pru_listen(so, td);
284 if (error) {
285#ifdef SCTP
286 /* Restore the params */
287 so->so_options = oldopt;
288 so->so_qlimit = oldqlimit;
289#endif /* SCTP */
290 crit_exit();
291 return (error);
292 }
e43a034f 293 crit_exit();
984263bc
MD
294 return (0);
295}
296
297void
dadab5e9 298sofree(struct socket *so)
984263bc
MD
299{
300 struct socket *head = so->so_head;
301
302 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
303 return;
304 if (head != NULL) {
305 if (so->so_state & SS_INCOMP) {
306 TAILQ_REMOVE(&head->so_incomp, so, so_list);
307 head->so_incqlen--;
308 } else if (so->so_state & SS_COMP) {
309 /*
310 * We must not decommission a socket that's
311 * on the accept(2) queue. If we do, then
312 * accept(2) may hang after select(2) indicated
313 * that the listening socket was ready.
314 */
315 return;
316 } else {
317 panic("sofree: not queued");
318 }
319 so->so_state &= ~SS_INCOMP;
320 so->so_head = NULL;
321 }
322 sbrelease(&so->so_snd, so);
323 sorflush(so);
324 sodealloc(so);
325}
326
327/*
328 * Close a socket on last file table reference removal.
329 * Initiate disconnect if connected.
330 * Free socket when disconnect complete.
331 */
332int
9ba76b73 333soclose(struct socket *so, int fflag)
984263bc 334{
984263bc
MD
335 int error = 0;
336
e43a034f 337 crit_enter();
984263bc 338 funsetown(so->so_sigio);
19be7d32 339 if (so->so_pcb == NULL)
984263bc
MD
340 goto discard;
341 if (so->so_state & SS_ISCONNECTED) {
342 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
343 error = sodisconnect(so);
344 if (error)
345 goto drop;
346 }
347 if (so->so_options & SO_LINGER) {
348 if ((so->so_state & SS_ISDISCONNECTING) &&
9ba76b73 349 (fflag & FNONBLOCK))
984263bc
MD
350 goto drop;
351 while (so->so_state & SS_ISCONNECTED) {
352 error = tsleep((caddr_t)&so->so_timeo,
377d4740 353 PCATCH, "soclos", so->so_linger * hz);
984263bc
MD
354 if (error)
355 break;
356 }
357 }
358 }
359drop:
360 if (so->so_pcb) {
6b6e0885
JH
361 int error2;
362
363 error2 = so_pru_detach(so);
984263bc
MD
364 if (error == 0)
365 error = error2;
366 }
367discard:
19be7d32
MD
368 if (so->so_options & SO_ACCEPTCONN) {
369 struct socket *sp, *sonext;
370
371 sp = TAILQ_FIRST(&so->so_incomp);
372 for (; sp != NULL; sp = sonext) {
373 sonext = TAILQ_NEXT(sp, so_list);
374 (void) soabort(sp);
375 }
376 for (sp = TAILQ_FIRST(&so->so_comp); sp != NULL; sp = sonext) {
377 sonext = TAILQ_NEXT(sp, so_list);
378 /* Dequeue from so_comp since sofree() won't do it */
379 TAILQ_REMOVE(&so->so_comp, sp, so_list);
380 so->so_qlen--;
381 sp->so_state &= ~SS_COMP;
382 sp->so_head = NULL;
383 (void) soabort(sp);
384 }
385 }
984263bc
MD
386 if (so->so_state & SS_NOFDREF)
387 panic("soclose: NOFDREF");
388 so->so_state |= SS_NOFDREF;
389 sofree(so);
e43a034f 390 crit_exit();
984263bc
MD
391 return (error);
392}
393
394/*
e43a034f 395 * Must be called from a critical section.
984263bc
MD
396 */
397int
398soabort(so)
399 struct socket *so;
400{
401 int error;
402
6b6e0885 403 error = so_pru_abort(so);
984263bc
MD
404 if (error) {
405 sofree(so);
406 return error;
407 }
408 return (0);
409}
410
411int
dadab5e9 412soaccept(struct socket *so, struct sockaddr **nam)
984263bc 413{
984263bc
MD
414 int error;
415
e43a034f 416 crit_enter();
984263bc
MD
417 if ((so->so_state & SS_NOFDREF) == 0)
418 panic("soaccept: !NOFDREF");
419 so->so_state &= ~SS_NOFDREF;
6b6e0885 420 error = so_pru_accept(so, nam);
e43a034f 421 crit_exit();
984263bc
MD
422 return (error);
423}
424
425int
dadab5e9 426soconnect(struct socket *so, struct sockaddr *nam, struct thread *td)
984263bc 427{
984263bc
MD
428 int error;
429
430 if (so->so_options & SO_ACCEPTCONN)
431 return (EOPNOTSUPP);
e43a034f 432 crit_enter();
984263bc
MD
433 /*
434 * If protocol is connection-based, can only connect once.
435 * Otherwise, if connected, try to disconnect first.
436 * This allows user to disconnect by connecting to, e.g.,
437 * a null address.
438 */
439 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
440 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
59429d28 441 (error = sodisconnect(so)))) {
984263bc 442 error = EISCONN;
59429d28
MD
443 } else {
444 /*
445 * Prevent accumulated error from previous connection
446 * from biting us.
447 */
448 so->so_error = 0;
6b6e0885 449 error = so_pru_connect(so, nam, td);
59429d28 450 }
e43a034f 451 crit_exit();
984263bc
MD
452 return (error);
453}
454
455int
dadab5e9 456soconnect2(struct socket *so1, struct socket *so2)
984263bc 457{
984263bc
MD
458 int error;
459
e43a034f 460 crit_enter();
6b6e0885 461 error = so_pru_connect2(so1, so2);
e43a034f 462 crit_exit();
984263bc
MD
463 return (error);
464}
465
466int
dadab5e9 467sodisconnect(struct socket *so)
984263bc 468{
984263bc
MD
469 int error;
470
e43a034f 471 crit_enter();
984263bc
MD
472 if ((so->so_state & SS_ISCONNECTED) == 0) {
473 error = ENOTCONN;
474 goto bad;
475 }
476 if (so->so_state & SS_ISDISCONNECTING) {
477 error = EALREADY;
478 goto bad;
479 }
6b6e0885 480 error = so_pru_disconnect(so);
984263bc 481bad:
e43a034f 482 crit_exit();
984263bc
MD
483 return (error);
484}
485
486#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
487/*
488 * Send on a socket.
489 * If send must go all at once and message is larger than
490 * send buffering, then hard error.
491 * Lock against other senders.
492 * If must go all at once and not enough room now, then
493 * inform user that this would block and do nothing.
494 * Otherwise, if nonblocking, send as much as possible.
495 * The data to be sent is described by "uio" if nonzero,
496 * otherwise by the mbuf chain "top" (which must be null
497 * if uio is not). Data provided in mbuf chain must be small
498 * enough to send all at once.
499 *
500 * Returns nonzero on error, timeout or signal; callers
501 * must check for short counts if EINTR/ERESTART are returned.
502 * Data and control buffers are freed on return.
503 */
504int
dadab5e9
MD
505sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
506 struct mbuf *top, struct mbuf *control, int flags,
507 struct thread *td)
984263bc
MD
508{
509 struct mbuf **mp;
dadab5e9
MD
510 struct mbuf *m;
511 long space, len, resid;
e43a034f 512 int clen = 0, error, dontroute, mlen;
984263bc 513 int atomic = sosendallatonce(so) || top;
6b6e0885 514 int pru_flags;
984263bc
MD
515
516 if (uio)
517 resid = uio->uio_resid;
518 else
519 resid = top->m_pkthdr.len;
520 /*
521 * In theory resid should be unsigned.
522 * However, space must be signed, as it might be less than 0
523 * if we over-committed, and we must use a signed comparison
524 * of space and resid. On the other hand, a negative resid
525 * causes us to loop sending 0-length segments to the protocol.
526 *
527 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
528 * type sockets since that's an error.
529 */
530 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
531 error = EINVAL;
532 goto out;
533 }
534
535 dontroute =
536 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
537 (so->so_proto->pr_flags & PR_ATOMIC);
dadab5e9
MD
538 if (td->td_proc && td->td_proc->p_stats)
539 td->td_proc->p_stats->p_ru.ru_msgsnd++;
984263bc
MD
540 if (control)
541 clen = control->m_len;
e43a034f 542#define gotoerr(errno) { error = errno; crit_exit(); goto release; }
984263bc
MD
543
544restart:
545 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
546 if (error)
547 goto out;
548 do {
e43a034f 549 crit_enter();
984263bc 550 if (so->so_state & SS_CANTSENDMORE)
6ea1e9b9 551 gotoerr(EPIPE);
984263bc
MD
552 if (so->so_error) {
553 error = so->so_error;
554 so->so_error = 0;
e43a034f 555 crit_exit();
984263bc
MD
556 goto release;
557 }
558 if ((so->so_state & SS_ISCONNECTED) == 0) {
559 /*
560 * `sendto' and `sendmsg' is allowed on a connection-
561 * based socket if it supports implied connect.
562 * Return ENOTCONN if not connected and no address is
563 * supplied.
564 */
565 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
566 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
567 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
568 !(resid == 0 && clen != 0))
6ea1e9b9 569 gotoerr(ENOTCONN);
984263bc 570 } else if (addr == 0)
6ea1e9b9 571 gotoerr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
984263bc
MD
572 ENOTCONN : EDESTADDRREQ);
573 }
574 space = sbspace(&so->so_snd);
575 if (flags & MSG_OOB)
576 space += 1024;
577 if ((atomic && resid > so->so_snd.sb_hiwat) ||
578 clen > so->so_snd.sb_hiwat)
6ea1e9b9 579 gotoerr(EMSGSIZE);
976ec718 580 if (space < resid + clen && uio &&
984263bc 581 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
9ba76b73 582 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT))
6ea1e9b9 583 gotoerr(EWOULDBLOCK);
984263bc
MD
584 sbunlock(&so->so_snd);
585 error = sbwait(&so->so_snd);
e43a034f 586 crit_exit();
984263bc
MD
587 if (error)
588 goto out;
589 goto restart;
590 }
e43a034f 591 crit_exit();
984263bc
MD
592 mp = &top;
593 space -= clen;
594 do {
595 if (uio == NULL) {
596 /*
597 * Data is prepackaged in "top".
598 */
599 resid = 0;
600 if (flags & MSG_EOR)
601 top->m_flags |= M_EOR;
602 } else do {
50503f0f
JH
603 m = m_getl(resid, MB_WAIT, MT_DATA,
604 top == NULL ? M_PKTHDR : 0, &mlen);
605 if (top == NULL) {
984263bc
MD
606 m->m_pkthdr.len = 0;
607 m->m_pkthdr.rcvif = (struct ifnet *)0;
984263bc 608 }
50503f0f
JH
609 len = min(min(mlen, resid), space);
610 if (resid < MINCLSIZE) {
984263bc
MD
611 /*
612 * For datagram protocols, leave room
613 * for protocol headers in first mbuf.
614 */
615 if (atomic && top == 0 && len < mlen)
616 MH_ALIGN(m, len);
617 }
618 space -= len;
619 error = uiomove(mtod(m, caddr_t), (int)len, uio);
620 resid = uio->uio_resid;
621 m->m_len = len;
622 *mp = m;
623 top->m_pkthdr.len += len;
624 if (error)
625 goto release;
626 mp = &m->m_next;
627 if (resid <= 0) {
628 if (flags & MSG_EOR)
629 top->m_flags |= M_EOR;
630 break;
631 }
632 } while (space > 0 && atomic);
633 if (dontroute)
634 so->so_options |= SO_DONTROUTE;
6b6e0885
JH
635 if (flags & MSG_OOB) {
636 pru_flags = PRUS_OOB;
637 } else if ((flags & MSG_EOF) &&
638 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
639 (resid <= 0)) {
640 /*
641 * If the user set MSG_EOF, the protocol
642 * understands this flag and nothing left to
643 * send then use PRU_SEND_EOF instead of PRU_SEND.
644 */
645 pru_flags = PRUS_EOF;
646 } else if (resid > 0 && space > 0) {
647 /* If there is more to send, set PRUS_MORETOCOME */
648 pru_flags = PRUS_MORETOCOME;
649 } else {
650 pru_flags = 0;
651 }
e43a034f 652 crit_enter();
984263bc
MD
653 /*
654 * XXX all the SS_CANTSENDMORE checks previously
655 * done could be out of date. We could have recieved
656 * a reset packet in an interrupt or maybe we slept
657 * while doing page faults in uiomove() etc. We could
658 * probably recheck again inside the splnet() protection
659 * here, but there are probably other places that this
660 * also happens. We must rethink this.
661 */
6b6e0885 662 error = so_pru_send(so, pru_flags, top, addr, control, td);
e43a034f 663 crit_exit();
984263bc
MD
664 if (dontroute)
665 so->so_options &= ~SO_DONTROUTE;
666 clen = 0;
667 control = 0;
668 top = 0;
669 mp = &top;
670 if (error)
6b6e0885 671 goto release;
984263bc
MD
672 } while (resid && space > 0);
673 } while (resid);
674
675release:
676 sbunlock(&so->so_snd);
677out:
678 if (top)
679 m_freem(top);
680 if (control)
681 m_freem(control);
682 return (error);
683}
684
6ea1e9b9
JH
685/*
686 * A specialization of sosend() for UDP based on protocol-specific knowledge:
687 * so->so_proto->pr_flags has the PR_ATOMIC field set. This means that
688 * sosendallatonce() returns true,
689 * the "atomic" variable is true,
690 * and sosendudp() blocks until space is available for the entire send.
691 * so->so_proto->pr_flags does not have the PR_CONNREQUIRED or
692 * PR_IMPLOPCL flags set.
693 * UDP has no out-of-band data.
694 * UDP has no control data.
695 * UDP does not support MSG_EOR.
696 */
697int
698sosendudp(struct socket *so, struct sockaddr *addr, struct uio *uio,
699 struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
700{
e43a034f 701 int resid, error;
6ea1e9b9
JH
702 boolean_t dontroute; /* temporary SO_DONTROUTE setting */
703
704 if (td->td_proc && td->td_proc->p_stats)
705 td->td_proc->p_stats->p_ru.ru_msgsnd++;
706 if (control)
707 m_freem(control);
708
709 KASSERT((uio && !top) || (top && !uio), ("bad arguments to sosendudp"));
710 resid = uio ? uio->uio_resid : top->m_pkthdr.len;
711
712restart:
713 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
714 if (error)
715 goto out;
716
e43a034f 717 crit_enter();
6ea1e9b9
JH
718 if (so->so_state & SS_CANTSENDMORE)
719 gotoerr(EPIPE);
720 if (so->so_error) {
721 error = so->so_error;
722 so->so_error = 0;
e43a034f 723 crit_exit();
6ea1e9b9
JH
724 goto release;
725 }
726 if (!(so->so_state & SS_ISCONNECTED) && addr == NULL)
727 gotoerr(EDESTADDRREQ);
728 if (resid > so->so_snd.sb_hiwat)
729 gotoerr(EMSGSIZE);
730 if (uio && sbspace(&so->so_snd) < resid) {
9ba76b73 731 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT))
6ea1e9b9
JH
732 gotoerr(EWOULDBLOCK);
733 sbunlock(&so->so_snd);
734 error = sbwait(&so->so_snd);
e43a034f 735 crit_exit();
6ea1e9b9
JH
736 if (error)
737 goto out;
738 goto restart;
739 }
e43a034f 740 crit_exit();
6ea1e9b9
JH
741
742 if (uio) {
e12241e1 743 top = m_uiomove(uio);
6ea1e9b9
JH
744 if (top == NULL)
745 goto release;
746 }
747
748 dontroute = (flags & MSG_DONTROUTE) && !(so->so_options & SO_DONTROUTE);
749 if (dontroute)
750 so->so_options |= SO_DONTROUTE;
751
752 error = so_pru_send(so, 0, top, addr, NULL, td);
753 top = NULL; /* sent or freed in lower layer */
754
755 if (dontroute)
756 so->so_options &= ~SO_DONTROUTE;
757
758release:
759 sbunlock(&so->so_snd);
760out:
761 if (top)
762 m_freem(top);
763 return (error);
764}
765
984263bc
MD
766/*
767 * Implement receive operations on a socket.
768 * We depend on the way that records are added to the sockbuf
769 * by sbappend*. In particular, each record (mbufs linked through m_next)
770 * must begin with an address if the protocol so specifies,
771 * followed by an optional mbuf or mbufs containing ancillary data,
772 * and then zero or more mbufs of data.
773 * In order to avoid blocking network interrupts for the entire time here,
e43a034f 774 * we exit the critical section while doing the actual copy to user space.
984263bc
MD
775 * Although the sockbuf is locked, new data may still be appended,
776 * and thus we must maintain consistency of the sockbuf during that time.
777 *
778 * The caller may receive the data as a single mbuf chain by supplying
779 * an mbuf **mp0 for use in returning the chain. The uio is then used
780 * only for the count in uio_resid.
781 */
782int
783soreceive(so, psa, uio, mp0, controlp, flagsp)
1fd87d54 784 struct socket *so;
984263bc
MD
785 struct sockaddr **psa;
786 struct uio *uio;
787 struct mbuf **mp0;
788 struct mbuf **controlp;
789 int *flagsp;
790{
857caa4a
MD
791 struct mbuf *m, *n, **mp;
792 struct mbuf *free_chain = NULL;
e43a034f 793 int flags, len, error, offset;
984263bc 794 struct protosw *pr = so->so_proto;
984263bc
MD
795 int moff, type = 0;
796 int orig_resid = uio->uio_resid;
797
798 mp = mp0;
799 if (psa)
857caa4a 800 *psa = NULL;
984263bc 801 if (controlp)
857caa4a 802 *controlp = NULL;
984263bc
MD
803 if (flagsp)
804 flags = *flagsp &~ MSG_EOR;
805 else
806 flags = 0;
807 if (flags & MSG_OOB) {
74f1caca 808 m = m_get(MB_WAIT, MT_DATA);
984263bc
MD
809 if (m == NULL)
810 return (ENOBUFS);
6b6e0885 811 error = so_pru_rcvoob(so, m, flags & MSG_PEEK);
984263bc
MD
812 if (error)
813 goto bad;
814 do {
815 error = uiomove(mtod(m, caddr_t),
816 (int) min(uio->uio_resid, m->m_len), uio);
817 m = m_free(m);
818 } while (uio->uio_resid && error == 0 && m);
819bad:
820 if (m)
821 m_freem(m);
822 return (error);
823 }
824 if (mp)
857caa4a 825 *mp = NULL;
984263bc 826 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
6b6e0885 827 so_pru_rcvd(so, 0);
984263bc
MD
828
829restart:
857caa4a 830 crit_enter();
984263bc
MD
831 error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
832 if (error)
857caa4a 833 goto done;
984263bc
MD
834
835 m = so->so_rcv.sb_mb;
836 /*
837 * If we have less data than requested, block awaiting more
838 * (subject to any timeout) if:
839 * 1. the current count is less than the low water mark, or
840 * 2. MSG_WAITALL is set, and it is possible to do the entire
841 * receive operation at once if we block (resid <= hiwat).
842 * 3. MSG_DONTWAIT is not set
843 * If MSG_WAITALL is set but resid is larger than the receive buffer,
844 * we have to do the receive in sections, and thus risk returning
845 * a short count if a timeout or signal occurs after we start.
846 */
857caa4a 847 if (m == NULL || (((flags & MSG_DONTWAIT) == 0 &&
984263bc
MD
848 so->so_rcv.sb_cc < uio->uio_resid) &&
849 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
850 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
851 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
857caa4a 852 KASSERT(m != NULL || !so->so_rcv.sb_cc, ("receive 1"));
984263bc
MD
853 if (so->so_error) {
854 if (m)
855 goto dontblock;
856 error = so->so_error;
857 if ((flags & MSG_PEEK) == 0)
858 so->so_error = 0;
859 goto release;
860 }
861 if (so->so_state & SS_CANTRCVMORE) {
862 if (m)
863 goto dontblock;
864 else
865 goto release;
866 }
857caa4a 867 for (; m; m = m->m_next) {
984263bc
MD
868 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
869 m = so->so_rcv.sb_mb;
870 goto dontblock;
871 }
857caa4a 872 }
984263bc 873 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
6b6e0885 874 (pr->pr_flags & PR_CONNREQUIRED)) {
984263bc
MD
875 error = ENOTCONN;
876 goto release;
877 }
878 if (uio->uio_resid == 0)
879 goto release;
9ba76b73 880 if (flags & (MSG_FNONBLOCKING|MSG_DONTWAIT)) {
984263bc
MD
881 error = EWOULDBLOCK;
882 goto release;
883 }
884 sbunlock(&so->so_rcv);
885 error = sbwait(&so->so_rcv);
984263bc 886 if (error)
857caa4a
MD
887 goto done;
888 crit_exit();
984263bc
MD
889 goto restart;
890 }
891dontblock:
dadab5e9
MD
892 if (uio->uio_td && uio->uio_td->td_proc)
893 uio->uio_td->td_proc->p_stats->p_ru.ru_msgrcv++;
857caa4a
MD
894
895 /*
896 * note: m should be == sb_mb here. Cache the next record while
897 * cleaning up. Note that calling m_free*() will break out critical
898 * section.
899 */
900 KKASSERT(m == so->so_rcv.sb_mb);
901
902 /*
903 * Skip any address mbufs prepending the record.
904 */
984263bc
MD
905 if (pr->pr_flags & PR_ADDR) {
906 KASSERT(m->m_type == MT_SONAME, ("receive 1a"));
907 orig_resid = 0;
908 if (psa)
cfa2ba21 909 *psa = dup_sockaddr(mtod(m, struct sockaddr *));
857caa4a 910 if (flags & MSG_PEEK)
984263bc 911 m = m->m_next;
857caa4a
MD
912 else
913 m = sbunlinkmbuf(&so->so_rcv, m, &free_chain);
984263bc 914 }
857caa4a
MD
915
916 /*
917 * Skip any control mbufs prepending the record.
918 */
78812139
EN
919#ifdef SCTP
920 if (pr->pr_flags & PR_ADDR_OPT) {
921 /*
922 * For SCTP we may be getting a
923 * whole message OR a partial delivery.
924 */
857caa4a 925 if (m && m->m_type == MT_SONAME) {
78812139
EN
926 orig_resid = 0;
927 if (psa)
928 *psa = dup_sockaddr(mtod(m, struct sockaddr *));
857caa4a 929 if (flags & MSG_PEEK)
78812139 930 m = m->m_next;
857caa4a
MD
931 else
932 m = sbunlinkmbuf(&so->so_rcv, m, &free_chain);
78812139
EN
933 }
934 }
935#endif /* SCTP */
984263bc
MD
936 while (m && m->m_type == MT_CONTROL && error == 0) {
937 if (flags & MSG_PEEK) {
938 if (controlp)
939 *controlp = m_copy(m, 0, m->m_len);
857caa4a 940 m = m->m_next; /* XXX race */
984263bc 941 } else {
984263bc 942 if (controlp) {
857caa4a 943 n = sbunlinkmbuf(&so->so_rcv, m, NULL);
984263bc
MD
944 if (pr->pr_domain->dom_externalize &&
945 mtod(m, struct cmsghdr *)->cmsg_type ==
946 SCM_RIGHTS)
947 error = (*pr->pr_domain->dom_externalize)(m);
948 *controlp = m;
857caa4a 949 m = n;
984263bc 950 } else {
857caa4a 951 m = sbunlinkmbuf(&so->so_rcv, m, &free_chain);
984263bc
MD
952 }
953 }
857caa4a 954 if (controlp && *controlp) {
984263bc
MD
955 orig_resid = 0;
956 controlp = &(*controlp)->m_next;
957 }
958 }
857caa4a
MD
959
960 /*
961 * flag OOB data.
962 */
984263bc 963 if (m) {
984263bc
MD
964 type = m->m_type;
965 if (type == MT_OOBDATA)
966 flags |= MSG_OOB;
967 }
857caa4a
MD
968
969 /*
970 * Copy to the UIO or mbuf return chain (*mp).
971 */
984263bc
MD
972 moff = 0;
973 offset = 0;
974 while (m && uio->uio_resid > 0 && error == 0) {
975 if (m->m_type == MT_OOBDATA) {
976 if (type != MT_OOBDATA)
977 break;
978 } else if (type == MT_OOBDATA)
979 break;
980 else
981 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER,
982 ("receive 3"));
983 so->so_state &= ~SS_RCVATMARK;
984 len = uio->uio_resid;
985 if (so->so_oobmark && len > so->so_oobmark - offset)
986 len = so->so_oobmark - offset;
987 if (len > m->m_len - moff)
988 len = m->m_len - moff;
989 /*
990 * If mp is set, just pass back the mbufs.
991 * Otherwise copy them out via the uio, then free.
992 * Sockbuf must be consistent here (points to current mbuf,
993 * it points to next record) when we drop priority;
994 * we must note any additions to the sockbuf when we
995 * block interrupts again.
996 */
857caa4a 997 if (mp == NULL) {
e43a034f 998 crit_exit();
984263bc 999 error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio);
e43a034f 1000 crit_enter();
984263bc
MD
1001 if (error)
1002 goto release;
857caa4a 1003 } else {
984263bc 1004 uio->uio_resid -= len;
857caa4a
MD
1005 }
1006
1007 /*
1008 * Eat the entire mbuf or just a piece of it
1009 */
984263bc
MD
1010 if (len == m->m_len - moff) {
1011 if (m->m_flags & M_EOR)
1012 flags |= MSG_EOR;
78812139
EN
1013#ifdef SCTP
1014 if (m->m_flags & M_NOTIFICATION)
1015 flags |= MSG_NOTIFICATION;
1016#endif /* SCTP */
984263bc
MD
1017 if (flags & MSG_PEEK) {
1018 m = m->m_next;
1019 moff = 0;
1020 } else {
984263bc 1021 if (mp) {
857caa4a 1022 n = sbunlinkmbuf(&so->so_rcv, m, NULL);
984263bc
MD
1023 *mp = m;
1024 mp = &m->m_next;
857caa4a 1025 m = n;
984263bc 1026 } else {
857caa4a 1027 m = sbunlinkmbuf(&so->so_rcv, m, &free_chain);
984263bc 1028 }
984263bc
MD
1029 }
1030 } else {
857caa4a 1031 if (flags & MSG_PEEK) {
984263bc 1032 moff += len;
857caa4a 1033 } else {
984263bc 1034 if (mp)
74f1caca 1035 *mp = m_copym(m, 0, len, MB_WAIT);
984263bc
MD
1036 m->m_data += len;
1037 m->m_len -= len;
1038 so->so_rcv.sb_cc -= len;
1039 }
1040 }
1041 if (so->so_oobmark) {
1042 if ((flags & MSG_PEEK) == 0) {
1043 so->so_oobmark -= len;
1044 if (so->so_oobmark == 0) {
1045 so->so_state |= SS_RCVATMARK;
1046 break;
1047 }
1048 } else {
1049 offset += len;
1050 if (offset == so->so_oobmark)
1051 break;
1052 }
1053 }
1054 if (flags & MSG_EOR)
1055 break;
1056 /*
1057 * If the MSG_WAITALL flag is set (for non-atomic socket),
1058 * we must not quit until "uio->uio_resid == 0" or an error
1059 * termination. If a signal/timeout occurs, return
1060 * with a short count but without error.
1061 * Keep sockbuf locked against other readers.
1062 */
857caa4a
MD
1063 while (flags & MSG_WAITALL && m == NULL &&
1064 uio->uio_resid > 0 && !sosendallatonce(so) &&
1065 so->so_rcv.sb_mb == NULL) {
984263bc
MD
1066 if (so->so_error || so->so_state & SS_CANTRCVMORE)
1067 break;
1068 /*
1069 * The window might have closed to zero, make
1070 * sure we send an ack now that we've drained
1071 * the buffer or we might end up blocking until
1072 * the idle takes over (5 seconds).
1073 */
1074 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
6b6e0885 1075 so_pru_rcvd(so, flags);
984263bc
MD
1076 error = sbwait(&so->so_rcv);
1077 if (error) {
1078 sbunlock(&so->so_rcv);
857caa4a
MD
1079 error = 0;
1080 goto done;
984263bc
MD
1081 }
1082 m = so->so_rcv.sb_mb;
984263bc
MD
1083 }
1084 }
1085
857caa4a
MD
1086 /*
1087 * If an atomic read was requested but unread data still remains
1088 * in the record, set MSG_TRUNC.
1089 */
bf8a9a6f 1090 if (m && pr->pr_flags & PR_ATOMIC)
984263bc 1091 flags |= MSG_TRUNC;
857caa4a
MD
1092
1093 /*
1094 * Cleanup. If an atomic read was requested drop any unread data.
1095 */
1096 if ((flags & MSG_PEEK) == 0) {
1097 if (m && (pr->pr_flags & PR_ATOMIC))
1098 sbdroprecord(&so->so_rcv);
1099 if ((pr->pr_flags & PR_WANTRCVD) && so->so_pcb)
6b6e0885 1100 so_pru_rcvd(so, flags);
984263bc 1101 }
bf8a9a6f 1102
984263bc
MD
1103 if (orig_resid == uio->uio_resid && orig_resid &&
1104 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
1105 sbunlock(&so->so_rcv);
e43a034f 1106 crit_exit();
984263bc
MD
1107 goto restart;
1108 }
1109
1110 if (flagsp)
1111 *flagsp |= flags;
1112release:
1113 sbunlock(&so->so_rcv);
857caa4a 1114done:
e43a034f 1115 crit_exit();
857caa4a
MD
1116 if (free_chain)
1117 m_freem(free_chain);
984263bc
MD
1118 return (error);
1119}
1120
1121int
1122soshutdown(so, how)
1fd87d54
RG
1123 struct socket *so;
1124 int how;
984263bc 1125{
984263bc
MD
1126 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
1127 return (EINVAL);
1128
1129 if (how != SHUT_WR)
1130 sorflush(so);
1131 if (how != SHUT_RD)
6b6e0885 1132 return (so_pru_shutdown(so));
984263bc
MD
1133 return (0);
1134}
1135
1136void
1137sorflush(so)
1fd87d54 1138 struct socket *so;
984263bc 1139{
1fd87d54
RG
1140 struct sockbuf *sb = &so->so_rcv;
1141 struct protosw *pr = so->so_proto;
984263bc
MD
1142 struct sockbuf asb;
1143
1144 sb->sb_flags |= SB_NOINTR;
1145 (void) sblock(sb, M_WAITOK);
e43a034f
MD
1146
1147 crit_enter();
984263bc
MD
1148 socantrcvmore(so);
1149 sbunlock(sb);
1150 asb = *sb;
1151 bzero((caddr_t)sb, sizeof (*sb));
1152 if (asb.sb_flags & SB_KNOTE) {
1153 sb->sb_sel.si_note = asb.sb_sel.si_note;
1154 sb->sb_flags = SB_KNOTE;
1155 }
e43a034f
MD
1156 crit_exit();
1157
984263bc
MD
1158 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
1159 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
1160 sbrelease(&asb, so);
1161}
1162
1163#ifdef INET
1164static int
1165do_setopt_accept_filter(so, sopt)
1166 struct socket *so;
1167 struct sockopt *sopt;
1168{
1169 struct accept_filter_arg *afap = NULL;
1170 struct accept_filter *afp;
1171 struct so_accf *af = so->so_accf;
1172 int error = 0;
1173
1174 /* do not set/remove accept filters on non listen sockets */
1175 if ((so->so_options & SO_ACCEPTCONN) == 0) {
1176 error = EINVAL;
1177 goto out;
1178 }
1179
1180 /* removing the filter */
1181 if (sopt == NULL) {
1182 if (af != NULL) {
1183 if (af->so_accept_filter != NULL &&
1184 af->so_accept_filter->accf_destroy != NULL) {
1185 af->so_accept_filter->accf_destroy(so);
1186 }
1187 if (af->so_accept_filter_str != NULL) {
1188 FREE(af->so_accept_filter_str, M_ACCF);
1189 }
1190 FREE(af, M_ACCF);
1191 so->so_accf = NULL;
1192 }
1193 so->so_options &= ~SO_ACCEPTFILTER;
1194 return (0);
1195 }
1196 /* adding a filter */
1197 /* must remove previous filter first */
1198 if (af != NULL) {
1199 error = EINVAL;
1200 goto out;
1201 }
1202 /* don't put large objects on the kernel stack */
1203 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap), M_TEMP, M_WAITOK);
1204 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap);
1205 afap->af_name[sizeof(afap->af_name)-1] = '\0';
1206 afap->af_arg[sizeof(afap->af_arg)-1] = '\0';
1207 if (error)
1208 goto out;
1209 afp = accept_filt_get(afap->af_name);
1210 if (afp == NULL) {
1211 error = ENOENT;
1212 goto out;
1213 }
1214 MALLOC(af, struct so_accf *, sizeof(*af), M_ACCF, M_WAITOK);
1215 bzero(af, sizeof(*af));
1216 if (afp->accf_create != NULL) {
1217 if (afap->af_name[0] != '\0') {
1218 int len = strlen(afap->af_name) + 1;
1219
1220 MALLOC(af->so_accept_filter_str, char *, len, M_ACCF, M_WAITOK);
1221 strcpy(af->so_accept_filter_str, afap->af_name);
1222 }
1223 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg);
1224 if (af->so_accept_filter_arg == NULL) {
1225 FREE(af->so_accept_filter_str, M_ACCF);
1226 FREE(af, M_ACCF);
1227 so->so_accf = NULL;
1228 error = EINVAL;
1229 goto out;
1230 }
1231 }
1232 af->so_accept_filter = afp;
1233 so->so_accf = af;
1234 so->so_options |= SO_ACCEPTFILTER;
1235out:
1236 if (afap != NULL)
1237 FREE(afap, M_TEMP);
1238 return (error);
1239}
1240#endif /* INET */
1241
1242/*
1243 * Perhaps this routine, and sooptcopyout(), below, ought to come in
1244 * an additional variant to handle the case where the option value needs
1245 * to be some kind of integer, but not a specific size.
1246 * In addition to their use here, these functions are also called by the
1247 * protocol-level pr_ctloutput() routines.
1248 */
1249int
1250sooptcopyin(sopt, buf, len, minlen)
1251 struct sockopt *sopt;
1252 void *buf;
1253 size_t len;
1254 size_t minlen;
1255{
1256 size_t valsize;
1257
1258 /*
1259 * If the user gives us more than we wanted, we ignore it,
1260 * but if we don't get the minimum length the caller
1261 * wants, we return EINVAL. On success, sopt->sopt_valsize
1262 * is set to however much we actually retrieved.
1263 */
1264 if ((valsize = sopt->sopt_valsize) < minlen)
1265 return EINVAL;
1266 if (valsize > len)
1267 sopt->sopt_valsize = valsize = len;
1268
dadab5e9 1269 if (sopt->sopt_td != NULL)
984263bc
MD
1270 return (copyin(sopt->sopt_val, buf, valsize));
1271
1272 bcopy(sopt->sopt_val, buf, valsize);
1273 return 0;
1274}
1275
1276int
1277sosetopt(so, sopt)
1278 struct socket *so;
1279 struct sockopt *sopt;
1280{
1281 int error, optval;
1282 struct linger l;
1283 struct timeval tv;
1284 u_long val;
1285
1286 error = 0;
e79d388f 1287 sopt->sopt_dir = SOPT_SET;
984263bc 1288 if (sopt->sopt_level != SOL_SOCKET) {
6b6e0885
JH
1289 if (so->so_proto && so->so_proto->pr_ctloutput) {
1290 return (so_pr_ctloutput(so, sopt));
1291 }
984263bc
MD
1292 error = ENOPROTOOPT;
1293 } else {
1294 switch (sopt->sopt_name) {
1295#ifdef INET
1296 case SO_ACCEPTFILTER:
1297 error = do_setopt_accept_filter(so, sopt);
1298 if (error)
1299 goto bad;
1300 break;
1301#endif /* INET */
1302 case SO_LINGER:
1303 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
1304 if (error)
1305 goto bad;
1306
1307 so->so_linger = l.l_linger;
1308 if (l.l_onoff)
1309 so->so_options |= SO_LINGER;
1310 else
1311 so->so_options &= ~SO_LINGER;
1312 break;
1313
1314 case SO_DEBUG:
1315 case SO_KEEPALIVE:
1316 case SO_DONTROUTE:
1317 case SO_USELOOPBACK:
1318 case SO_BROADCAST:
1319 case SO_REUSEADDR:
1320 case SO_REUSEPORT:
1321 case SO_OOBINLINE:
1322 case SO_TIMESTAMP:
1323 error = sooptcopyin(sopt, &optval, sizeof optval,
1324 sizeof optval);
1325 if (error)
1326 goto bad;
1327 if (optval)
1328 so->so_options |= sopt->sopt_name;
1329 else
1330 so->so_options &= ~sopt->sopt_name;
1331 break;
1332
1333 case SO_SNDBUF:
1334 case SO_RCVBUF:
1335 case SO_SNDLOWAT:
1336 case SO_RCVLOWAT:
1337 error = sooptcopyin(sopt, &optval, sizeof optval,
1338 sizeof optval);
1339 if (error)
1340 goto bad;
1341
1342 /*
1343 * Values < 1 make no sense for any of these
1344 * options, so disallow them.
1345 */
1346 if (optval < 1) {
1347 error = EINVAL;
1348 goto bad;
1349 }
1350
1351 switch (sopt->sopt_name) {
1352 case SO_SNDBUF:
1353 case SO_RCVBUF:
1354 if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
1355 &so->so_snd : &so->so_rcv, (u_long)optval,
e4700d00
JH
1356 so,
1357 &curproc->p_rlimit[RLIMIT_SBSIZE]) == 0) {
984263bc
MD
1358 error = ENOBUFS;
1359 goto bad;
1360 }
1361 break;
1362
1363 /*
1364 * Make sure the low-water is never greater than
1365 * the high-water.
1366 */
1367 case SO_SNDLOWAT:
1368 so->so_snd.sb_lowat =
1369 (optval > so->so_snd.sb_hiwat) ?
1370 so->so_snd.sb_hiwat : optval;
1371 break;
1372 case SO_RCVLOWAT:
1373 so->so_rcv.sb_lowat =
1374 (optval > so->so_rcv.sb_hiwat) ?
1375 so->so_rcv.sb_hiwat : optval;
1376 break;
1377 }
1378 break;
1379
1380 case SO_SNDTIMEO:
1381 case SO_RCVTIMEO:
1382 error = sooptcopyin(sopt, &tv, sizeof tv,
1383 sizeof tv);
1384 if (error)
1385 goto bad;
1386
1387 /* assert(hz > 0); */
1388 if (tv.tv_sec < 0 || tv.tv_sec > SHRT_MAX / hz ||
1389 tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
1390 error = EDOM;
1391 goto bad;
1392 }
1393 /* assert(tick > 0); */
1394 /* assert(ULONG_MAX - SHRT_MAX >= 1000000); */
1395 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick;
1396 if (val > SHRT_MAX) {
1397 error = EDOM;
1398 goto bad;
1399 }
1400 if (val == 0 && tv.tv_usec != 0)
1401 val = 1;
1402
1403 switch (sopt->sopt_name) {
1404 case SO_SNDTIMEO:
1405 so->so_snd.sb_timeo = val;
1406 break;
1407 case SO_RCVTIMEO:
1408 so->so_rcv.sb_timeo = val;
1409 break;
1410 }
1411 break;
1412 default:
1413 error = ENOPROTOOPT;
1414 break;
1415 }
1416 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) {
6b6e0885 1417 (void) so_pr_ctloutput(so, sopt);
984263bc
MD
1418 }
1419 }
1420bad:
1421 return (error);
1422}
1423
1424/* Helper routine for getsockopt */
1425int
f1f552f6 1426sooptcopyout(struct sockopt *sopt, const void *buf, size_t len)
984263bc
MD
1427{
1428 int error;
1429 size_t valsize;
1430
1431 error = 0;
1432
1433 /*
1434 * Documented get behavior is that we always return a value,
1435 * possibly truncated to fit in the user's buffer.
1436 * Traditional behavior is that we always tell the user
1437 * precisely how much we copied, rather than something useful
1438 * like the total amount we had available for her.
1439 * Note that this interface is not idempotent; the entire answer must
1440 * generated ahead of time.
1441 */
1442 valsize = min(len, sopt->sopt_valsize);
1443 sopt->sopt_valsize = valsize;
1444 if (sopt->sopt_val != 0) {
dadab5e9 1445 if (sopt->sopt_td != NULL)
984263bc
MD
1446 error = copyout(buf, sopt->sopt_val, valsize);
1447 else
1448 bcopy(buf, sopt->sopt_val, valsize);
1449 }
1450 return error;
1451}
1452
1453int
1454sogetopt(so, sopt)
1455 struct socket *so;
1456 struct sockopt *sopt;
1457{
1458 int error, optval;
1459 struct linger l;
1460 struct timeval tv;
51f4ca92 1461#ifdef INET
984263bc 1462 struct accept_filter_arg *afap;
51f4ca92 1463#endif
984263bc
MD
1464
1465 error = 0;
e79d388f 1466 sopt->sopt_dir = SOPT_GET;
984263bc
MD
1467 if (sopt->sopt_level != SOL_SOCKET) {
1468 if (so->so_proto && so->so_proto->pr_ctloutput) {
6b6e0885 1469 return (so_pr_ctloutput(so, sopt));
984263bc
MD
1470 } else
1471 return (ENOPROTOOPT);
1472 } else {
1473 switch (sopt->sopt_name) {
1474#ifdef INET
1475 case SO_ACCEPTFILTER:
1476 if ((so->so_options & SO_ACCEPTCONN) == 0)
1477 return (EINVAL);
1478 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap),
1479 M_TEMP, M_WAITOK);
1480 bzero(afap, sizeof(*afap));
1481 if ((so->so_options & SO_ACCEPTFILTER) != 0) {
1482 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name);
1483 if (so->so_accf->so_accept_filter_str != NULL)
1484 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str);
1485 }
1486 error = sooptcopyout(sopt, afap, sizeof(*afap));
1487 FREE(afap, M_TEMP);
1488 break;
1489#endif /* INET */
1490
1491 case SO_LINGER:
1492 l.l_onoff = so->so_options & SO_LINGER;
1493 l.l_linger = so->so_linger;
1494 error = sooptcopyout(sopt, &l, sizeof l);
1495 break;
1496
1497 case SO_USELOOPBACK:
1498 case SO_DONTROUTE:
1499 case SO_DEBUG:
1500 case SO_KEEPALIVE:
1501 case SO_REUSEADDR:
1502 case SO_REUSEPORT:
1503 case SO_BROADCAST:
1504 case SO_OOBINLINE:
1505 case SO_TIMESTAMP:
1506 optval = so->so_options & sopt->sopt_name;
1507integer:
1508 error = sooptcopyout(sopt, &optval, sizeof optval);
1509 break;
1510
1511 case SO_TYPE:
1512 optval = so->so_type;
1513 goto integer;
1514
1515 case SO_ERROR:
1516 optval = so->so_error;
1517 so->so_error = 0;
1518 goto integer;
1519
1520 case SO_SNDBUF:
1521 optval = so->so_snd.sb_hiwat;
1522 goto integer;
1523
1524 case SO_RCVBUF:
1525 optval = so->so_rcv.sb_hiwat;
1526 goto integer;
1527
1528 case SO_SNDLOWAT:
1529 optval = so->so_snd.sb_lowat;
1530 goto integer;
1531
1532 case SO_RCVLOWAT:
1533 optval = so->so_rcv.sb_lowat;
1534 goto integer;
1535
1536 case SO_SNDTIMEO:
1537 case SO_RCVTIMEO:
1538 optval = (sopt->sopt_name == SO_SNDTIMEO ?
1539 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
1540
1541 tv.tv_sec = optval / hz;
1542 tv.tv_usec = (optval % hz) * tick;
1543 error = sooptcopyout(sopt, &tv, sizeof tv);
1544 break;
1545
1546 default:
1547 error = ENOPROTOOPT;
1548 break;
1549 }
1550 return (error);
1551 }
1552}
1553
1554/* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
1555int
1556soopt_getm(struct sockopt *sopt, struct mbuf **mp)
1557{
1558 struct mbuf *m, *m_prev;
bf6ac9fa
JH
1559 int sopt_size = sopt->sopt_valsize, msize;
1560
1561 m = m_getl(sopt_size, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT, MT_DATA,
1562 0, &msize);
1563 if (m == NULL)
1564 return (ENOBUFS);
1565 m->m_len = min(msize, sopt_size);
984263bc
MD
1566 sopt_size -= m->m_len;
1567 *mp = m;
1568 m_prev = m;
1569
bf6ac9fa
JH
1570 while (sopt_size > 0) {
1571 m = m_getl(sopt_size, sopt->sopt_td ? MB_WAIT : MB_DONTWAIT,
1572 MT_DATA, 0, &msize);
1573 if (m == NULL) {
984263bc 1574 m_freem(*mp);
bf6ac9fa 1575 return (ENOBUFS);
984263bc 1576 }
bf6ac9fa 1577 m->m_len = min(msize, sopt_size);
984263bc
MD
1578 sopt_size -= m->m_len;
1579 m_prev->m_next = m;
1580 m_prev = m;
1581 }
bf6ac9fa 1582 return (0);
984263bc
MD
1583}
1584
1585/* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
1586int
1587soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
1588{
1589 struct mbuf *m0 = m;
1590
1591 if (sopt->sopt_val == NULL)
1592 return 0;
1593 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
dadab5e9 1594 if (sopt->sopt_td != NULL) {
984263bc
MD
1595 int error;
1596
1597 error = copyin(sopt->sopt_val, mtod(m, char *),
1598 m->m_len);
1599 if (error != 0) {
1600 m_freem(m0);
bf6ac9fa 1601 return (error);
984263bc
MD
1602 }
1603 } else
1604 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
1605 sopt->sopt_valsize -= m->m_len;
107a6e94 1606 sopt->sopt_val = (caddr_t)sopt->sopt_val + m->m_len;
984263bc
MD
1607 m = m->m_next;
1608 }
1609 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
1610 panic("ip6_sooptmcopyin");
1611 return 0;
1612}
1613
1614/* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
1615int
1616soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
1617{
1618 struct mbuf *m0 = m;
1619 size_t valsize = 0;
1620
1621 if (sopt->sopt_val == NULL)
1622 return 0;
1623 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
dadab5e9 1624 if (sopt->sopt_td != NULL) {
984263bc
MD
1625 int error;
1626
1627 error = copyout(mtod(m, char *), sopt->sopt_val,
1628 m->m_len);
1629 if (error != 0) {
1630 m_freem(m0);
bf6ac9fa 1631 return (error);
984263bc
MD
1632 }
1633 } else
1634 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
1635 sopt->sopt_valsize -= m->m_len;
107a6e94 1636 sopt->sopt_val = (caddr_t)sopt->sopt_val + m->m_len;
984263bc
MD
1637 valsize += m->m_len;
1638 m = m->m_next;
1639 }
1640 if (m != NULL) {
1641 /* enough soopt buffer should be given from user-land */
1642 m_freem(m0);
bf6ac9fa 1643 return (EINVAL);
984263bc
MD
1644 }
1645 sopt->sopt_valsize = valsize;
1646 return 0;
1647}
1648
1649void
1650sohasoutofband(so)
1fd87d54 1651 struct socket *so;
984263bc
MD
1652{
1653 if (so->so_sigio != NULL)
1654 pgsigio(so->so_sigio, SIGURG, 0);
1655 selwakeup(&so->so_rcv.sb_sel);
1656}
1657
1658int
dadab5e9 1659sopoll(struct socket *so, int events, struct ucred *cred, struct thread *td)
984263bc
MD
1660{
1661 int revents = 0;
e43a034f
MD
1662
1663 crit_enter();
984263bc
MD
1664
1665 if (events & (POLLIN | POLLRDNORM))
1666 if (soreadable(so))
1667 revents |= events & (POLLIN | POLLRDNORM);
1668
d08a3c4d
HP
1669 if (events & POLLINIGNEOF)
1670 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat ||
1671 !TAILQ_EMPTY(&so->so_comp) || so->so_error)
1672 revents |= POLLINIGNEOF;
1673
984263bc
MD
1674 if (events & (POLLOUT | POLLWRNORM))
1675 if (sowriteable(so))
1676 revents |= events & (POLLOUT | POLLWRNORM);
1677
1678 if (events & (POLLPRI | POLLRDBAND))
1679 if (so->so_oobmark || (so->so_state & SS_RCVATMARK))
1680 revents |= events & (POLLPRI | POLLRDBAND);
1681
1682 if (revents == 0) {
d08a3c4d 1683 if (events &
d99a0cbe 1684 (POLLIN | POLLINIGNEOF | POLLPRI | POLLRDNORM |
d08a3c4d 1685 POLLRDBAND)) {
dadab5e9 1686 selrecord(td, &so->so_rcv.sb_sel);
984263bc
MD
1687 so->so_rcv.sb_flags |= SB_SEL;
1688 }
1689
1690 if (events & (POLLOUT | POLLWRNORM)) {
dadab5e9 1691 selrecord(td, &so->so_snd.sb_sel);
984263bc
MD
1692 so->so_snd.sb_flags |= SB_SEL;
1693 }
1694 }
1695
e43a034f 1696 crit_exit();
984263bc
MD
1697 return (revents);
1698}
1699
1700int
1701sokqfilter(struct file *fp, struct knote *kn)
1702{
1703 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1704 struct sockbuf *sb;
984263bc
MD
1705
1706 switch (kn->kn_filter) {
1707 case EVFILT_READ:
1708 if (so->so_options & SO_ACCEPTCONN)
1709 kn->kn_fop = &solisten_filtops;
1710 else
1711 kn->kn_fop = &soread_filtops;
1712 sb = &so->so_rcv;
1713 break;
1714 case EVFILT_WRITE:
1715 kn->kn_fop = &sowrite_filtops;
1716 sb = &so->so_snd;
1717 break;
1718 default:
1719 return (1);
1720 }
1721
e43a034f 1722 crit_enter();
984263bc
MD
1723 SLIST_INSERT_HEAD(&sb->sb_sel.si_note, kn, kn_selnext);
1724 sb->sb_flags |= SB_KNOTE;
e43a034f 1725 crit_exit();
984263bc
MD
1726 return (0);
1727}
1728
1729static void
1730filt_sordetach(struct knote *kn)
1731{
1732 struct socket *so = (struct socket *)kn->kn_fp->f_data;
984263bc 1733
e43a034f 1734 crit_enter();
984263bc
MD
1735 SLIST_REMOVE(&so->so_rcv.sb_sel.si_note, kn, knote, kn_selnext);
1736 if (SLIST_EMPTY(&so->so_rcv.sb_sel.si_note))
1737 so->so_rcv.sb_flags &= ~SB_KNOTE;
e43a034f 1738 crit_exit();
984263bc
MD
1739}
1740
1741/*ARGSUSED*/
1742static int
1743filt_soread(struct knote *kn, long hint)
1744{
1745 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1746
1747 kn->kn_data = so->so_rcv.sb_cc;
1748 if (so->so_state & SS_CANTRCVMORE) {
1749 kn->kn_flags |= EV_EOF;
1750 kn->kn_fflags = so->so_error;
1751 return (1);
1752 }
1753 if (so->so_error) /* temporary udp error */
1754 return (1);
1755 if (kn->kn_sfflags & NOTE_LOWAT)
1756 return (kn->kn_data >= kn->kn_sdata);
1757 return (kn->kn_data >= so->so_rcv.sb_lowat);
1758}
1759
1760static void
1761filt_sowdetach(struct knote *kn)
1762{
1763 struct socket *so = (struct socket *)kn->kn_fp->f_data;
984263bc 1764
e43a034f 1765 crit_enter();
984263bc
MD
1766 SLIST_REMOVE(&so->so_snd.sb_sel.si_note, kn, knote, kn_selnext);
1767 if (SLIST_EMPTY(&so->so_snd.sb_sel.si_note))
1768 so->so_snd.sb_flags &= ~SB_KNOTE;
e43a034f 1769 crit_exit();
984263bc
MD
1770}
1771
1772/*ARGSUSED*/
1773static int
1774filt_sowrite(struct knote *kn, long hint)
1775{
1776 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1777
1778 kn->kn_data = sbspace(&so->so_snd);
1779 if (so->so_state & SS_CANTSENDMORE) {
1780 kn->kn_flags |= EV_EOF;
1781 kn->kn_fflags = so->so_error;
1782 return (1);
1783 }
1784 if (so->so_error) /* temporary udp error */
1785 return (1);
1786 if (((so->so_state & SS_ISCONNECTED) == 0) &&
1787 (so->so_proto->pr_flags & PR_CONNREQUIRED))
1788 return (0);
1789 if (kn->kn_sfflags & NOTE_LOWAT)
1790 return (kn->kn_data >= kn->kn_sdata);
1791 return (kn->kn_data >= so->so_snd.sb_lowat);
1792}
1793
1794/*ARGSUSED*/
1795static int
1796filt_solisten(struct knote *kn, long hint)
1797{
1798 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1799
1800 kn->kn_data = so->so_qlen;
1801 return (! TAILQ_EMPTY(&so->so_comp));
1802}