Initial import from FreeBSD RELENG_4:
[dragonfly.git] / sys / kern / uipc_socket.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
34 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.22 2002/12/15 09:24:23 maxim Exp $
35 */
36
37#include "opt_inet.h"
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/fcntl.h>
42#include <sys/malloc.h>
43#include <sys/mbuf.h>
44#include <sys/domain.h>
45#include <sys/file.h> /* for struct knote */
46#include <sys/kernel.h>
47#include <sys/malloc.h>
48#include <sys/event.h>
49#include <sys/poll.h>
50#include <sys/proc.h>
51#include <sys/protosw.h>
52#include <sys/socket.h>
53#include <sys/socketvar.h>
54#include <sys/resourcevar.h>
55#include <sys/signalvar.h>
56#include <sys/sysctl.h>
57#include <sys/uio.h>
58#include <sys/jail.h>
59#include <vm/vm_zone.h>
60
61#include <machine/limits.h>
62
63#ifdef INET
64static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt);
65#endif /* INET */
66
67static void filt_sordetach(struct knote *kn);
68static int filt_soread(struct knote *kn, long hint);
69static void filt_sowdetach(struct knote *kn);
70static int filt_sowrite(struct knote *kn, long hint);
71static int filt_solisten(struct knote *kn, long hint);
72
73static struct filterops solisten_filtops =
74 { 1, NULL, filt_sordetach, filt_solisten };
75static struct filterops soread_filtops =
76 { 1, NULL, filt_sordetach, filt_soread };
77static struct filterops sowrite_filtops =
78 { 1, NULL, filt_sowdetach, filt_sowrite };
79
80struct vm_zone *socket_zone;
81so_gen_t so_gencnt; /* generation count for sockets */
82
83MALLOC_DEFINE(M_SONAME, "soname", "socket name");
84MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
85
86SYSCTL_DECL(_kern_ipc);
87
88static int somaxconn = SOMAXCONN;
89SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW,
90 &somaxconn, 0, "Maximum pending socket connection queue size");
91
92/*
93 * Socket operation routines.
94 * These routines are called by the routines in
95 * sys_socket.c or from a system process, and
96 * implement the semantics of socket operations by
97 * switching out to the protocol specific routines.
98 */
99
100/*
101 * Get a socket structure from our zone, and initialize it.
102 * We don't implement `waitok' yet (see comments in uipc_domain.c).
103 * Note that it would probably be better to allocate socket
104 * and PCB at the same time, but I'm not convinced that all
105 * the protocols can be easily modified to do this.
106 */
107struct socket *
108soalloc(waitok)
109 int waitok;
110{
111 struct socket *so;
112
113 so = zalloci(socket_zone);
114 if (so) {
115 /* XXX race condition for reentrant kernel */
116 bzero(so, sizeof *so);
117 so->so_gencnt = ++so_gencnt;
118 TAILQ_INIT(&so->so_aiojobq);
119 }
120 return so;
121}
122
123int
124socreate(dom, aso, type, proto, p)
125 int dom;
126 struct socket **aso;
127 register int type;
128 int proto;
129 struct proc *p;
130{
131 register struct protosw *prp;
132 register struct socket *so;
133 register int error;
134
135 if (proto)
136 prp = pffindproto(dom, proto, type);
137 else
138 prp = pffindtype(dom, type);
139
140 if (prp == 0 || prp->pr_usrreqs->pru_attach == 0)
141 return (EPROTONOSUPPORT);
142
143 if (p->p_prison && jail_socket_unixiproute_only &&
144 prp->pr_domain->dom_family != PF_LOCAL &&
145 prp->pr_domain->dom_family != PF_INET &&
146 prp->pr_domain->dom_family != PF_ROUTE) {
147 return (EPROTONOSUPPORT);
148 }
149
150 if (prp->pr_type != type)
151 return (EPROTOTYPE);
152 so = soalloc(p != 0);
153 if (so == 0)
154 return (ENOBUFS);
155
156 TAILQ_INIT(&so->so_incomp);
157 TAILQ_INIT(&so->so_comp);
158 so->so_type = type;
159 so->so_cred = p->p_ucred;
160 crhold(so->so_cred);
161 so->so_proto = prp;
162 error = (*prp->pr_usrreqs->pru_attach)(so, proto, p);
163 if (error) {
164 so->so_state |= SS_NOFDREF;
165 sofree(so);
166 return (error);
167 }
168 *aso = so;
169 return (0);
170}
171
172int
173sobind(so, nam, p)
174 struct socket *so;
175 struct sockaddr *nam;
176 struct proc *p;
177{
178 int s = splnet();
179 int error;
180
181 error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, p);
182 splx(s);
183 return (error);
184}
185
186void
187sodealloc(so)
188 struct socket *so;
189{
190
191 so->so_gencnt = ++so_gencnt;
192 if (so->so_rcv.sb_hiwat)
193 (void)chgsbsize(so->so_cred->cr_uidinfo,
194 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
195 if (so->so_snd.sb_hiwat)
196 (void)chgsbsize(so->so_cred->cr_uidinfo,
197 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
198#ifdef INET
199 if (so->so_accf != NULL) {
200 if (so->so_accf->so_accept_filter != NULL &&
201 so->so_accf->so_accept_filter->accf_destroy != NULL) {
202 so->so_accf->so_accept_filter->accf_destroy(so);
203 }
204 if (so->so_accf->so_accept_filter_str != NULL)
205 FREE(so->so_accf->so_accept_filter_str, M_ACCF);
206 FREE(so->so_accf, M_ACCF);
207 }
208#endif /* INET */
209 crfree(so->so_cred);
210 zfreei(socket_zone, so);
211}
212
213int
214solisten(so, backlog, p)
215 register struct socket *so;
216 int backlog;
217 struct proc *p;
218{
219 int s, error;
220
221 s = splnet();
222 error = (*so->so_proto->pr_usrreqs->pru_listen)(so, p);
223 if (error) {
224 splx(s);
225 return (error);
226 }
227 if (TAILQ_EMPTY(&so->so_comp))
228 so->so_options |= SO_ACCEPTCONN;
229 if (backlog < 0 || backlog > somaxconn)
230 backlog = somaxconn;
231 so->so_qlimit = backlog;
232 splx(s);
233 return (0);
234}
235
236void
237sofree(so)
238 register struct socket *so;
239{
240 struct socket *head = so->so_head;
241
242 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
243 return;
244 if (head != NULL) {
245 if (so->so_state & SS_INCOMP) {
246 TAILQ_REMOVE(&head->so_incomp, so, so_list);
247 head->so_incqlen--;
248 } else if (so->so_state & SS_COMP) {
249 /*
250 * We must not decommission a socket that's
251 * on the accept(2) queue. If we do, then
252 * accept(2) may hang after select(2) indicated
253 * that the listening socket was ready.
254 */
255 return;
256 } else {
257 panic("sofree: not queued");
258 }
259 so->so_state &= ~SS_INCOMP;
260 so->so_head = NULL;
261 }
262 sbrelease(&so->so_snd, so);
263 sorflush(so);
264 sodealloc(so);
265}
266
267/*
268 * Close a socket on last file table reference removal.
269 * Initiate disconnect if connected.
270 * Free socket when disconnect complete.
271 */
272int
273soclose(so)
274 register struct socket *so;
275{
276 int s = splnet(); /* conservative */
277 int error = 0;
278
279 funsetown(so->so_sigio);
280 if (so->so_options & SO_ACCEPTCONN) {
281 struct socket *sp, *sonext;
282
283 sp = TAILQ_FIRST(&so->so_incomp);
284 for (; sp != NULL; sp = sonext) {
285 sonext = TAILQ_NEXT(sp, so_list);
286 (void) soabort(sp);
287 }
288 for (sp = TAILQ_FIRST(&so->so_comp); sp != NULL; sp = sonext) {
289 sonext = TAILQ_NEXT(sp, so_list);
290 /* Dequeue from so_comp since sofree() won't do it */
291 TAILQ_REMOVE(&so->so_comp, sp, so_list);
292 so->so_qlen--;
293 sp->so_state &= ~SS_COMP;
294 sp->so_head = NULL;
295 (void) soabort(sp);
296 }
297 }
298 if (so->so_pcb == 0)
299 goto discard;
300 if (so->so_state & SS_ISCONNECTED) {
301 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
302 error = sodisconnect(so);
303 if (error)
304 goto drop;
305 }
306 if (so->so_options & SO_LINGER) {
307 if ((so->so_state & SS_ISDISCONNECTING) &&
308 (so->so_state & SS_NBIO))
309 goto drop;
310 while (so->so_state & SS_ISCONNECTED) {
311 error = tsleep((caddr_t)&so->so_timeo,
312 PSOCK | PCATCH, "soclos", so->so_linger * hz);
313 if (error)
314 break;
315 }
316 }
317 }
318drop:
319 if (so->so_pcb) {
320 int error2 = (*so->so_proto->pr_usrreqs->pru_detach)(so);
321 if (error == 0)
322 error = error2;
323 }
324discard:
325 if (so->so_state & SS_NOFDREF)
326 panic("soclose: NOFDREF");
327 so->so_state |= SS_NOFDREF;
328 sofree(so);
329 splx(s);
330 return (error);
331}
332
333/*
334 * Must be called at splnet...
335 */
336int
337soabort(so)
338 struct socket *so;
339{
340 int error;
341
342 error = (*so->so_proto->pr_usrreqs->pru_abort)(so);
343 if (error) {
344 sofree(so);
345 return error;
346 }
347 return (0);
348}
349
350int
351soaccept(so, nam)
352 register struct socket *so;
353 struct sockaddr **nam;
354{
355 int s = splnet();
356 int error;
357
358 if ((so->so_state & SS_NOFDREF) == 0)
359 panic("soaccept: !NOFDREF");
360 so->so_state &= ~SS_NOFDREF;
361 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
362 splx(s);
363 return (error);
364}
365
366int
367soconnect(so, nam, p)
368 register struct socket *so;
369 struct sockaddr *nam;
370 struct proc *p;
371{
372 int s;
373 int error;
374
375 if (so->so_options & SO_ACCEPTCONN)
376 return (EOPNOTSUPP);
377 s = splnet();
378 /*
379 * If protocol is connection-based, can only connect once.
380 * Otherwise, if connected, try to disconnect first.
381 * This allows user to disconnect by connecting to, e.g.,
382 * a null address.
383 */
384 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
385 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
386 (error = sodisconnect(so))))
387 error = EISCONN;
388 else
389 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, p);
390 splx(s);
391 return (error);
392}
393
394int
395soconnect2(so1, so2)
396 register struct socket *so1;
397 struct socket *so2;
398{
399 int s = splnet();
400 int error;
401
402 error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2);
403 splx(s);
404 return (error);
405}
406
407int
408sodisconnect(so)
409 register struct socket *so;
410{
411 int s = splnet();
412 int error;
413
414 if ((so->so_state & SS_ISCONNECTED) == 0) {
415 error = ENOTCONN;
416 goto bad;
417 }
418 if (so->so_state & SS_ISDISCONNECTING) {
419 error = EALREADY;
420 goto bad;
421 }
422 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
423bad:
424 splx(s);
425 return (error);
426}
427
428#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
429/*
430 * Send on a socket.
431 * If send must go all at once and message is larger than
432 * send buffering, then hard error.
433 * Lock against other senders.
434 * If must go all at once and not enough room now, then
435 * inform user that this would block and do nothing.
436 * Otherwise, if nonblocking, send as much as possible.
437 * The data to be sent is described by "uio" if nonzero,
438 * otherwise by the mbuf chain "top" (which must be null
439 * if uio is not). Data provided in mbuf chain must be small
440 * enough to send all at once.
441 *
442 * Returns nonzero on error, timeout or signal; callers
443 * must check for short counts if EINTR/ERESTART are returned.
444 * Data and control buffers are freed on return.
445 */
446int
447sosend(so, addr, uio, top, control, flags, p)
448 register struct socket *so;
449 struct sockaddr *addr;
450 struct uio *uio;
451 struct mbuf *top;
452 struct mbuf *control;
453 int flags;
454 struct proc *p;
455{
456 struct mbuf **mp;
457 register struct mbuf *m;
458 register long space, len, resid;
459 int clen = 0, error, s, dontroute, mlen;
460 int atomic = sosendallatonce(so) || top;
461
462 if (uio)
463 resid = uio->uio_resid;
464 else
465 resid = top->m_pkthdr.len;
466 /*
467 * In theory resid should be unsigned.
468 * However, space must be signed, as it might be less than 0
469 * if we over-committed, and we must use a signed comparison
470 * of space and resid. On the other hand, a negative resid
471 * causes us to loop sending 0-length segments to the protocol.
472 *
473 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
474 * type sockets since that's an error.
475 */
476 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
477 error = EINVAL;
478 goto out;
479 }
480
481 dontroute =
482 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
483 (so->so_proto->pr_flags & PR_ATOMIC);
484 if (p)
485 p->p_stats->p_ru.ru_msgsnd++;
486 if (control)
487 clen = control->m_len;
488#define snderr(errno) { error = errno; splx(s); goto release; }
489
490restart:
491 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
492 if (error)
493 goto out;
494 do {
495 s = splnet();
496 if (so->so_state & SS_CANTSENDMORE)
497 snderr(EPIPE);
498 if (so->so_error) {
499 error = so->so_error;
500 so->so_error = 0;
501 splx(s);
502 goto release;
503 }
504 if ((so->so_state & SS_ISCONNECTED) == 0) {
505 /*
506 * `sendto' and `sendmsg' is allowed on a connection-
507 * based socket if it supports implied connect.
508 * Return ENOTCONN if not connected and no address is
509 * supplied.
510 */
511 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
512 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
513 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
514 !(resid == 0 && clen != 0))
515 snderr(ENOTCONN);
516 } else if (addr == 0)
517 snderr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
518 ENOTCONN : EDESTADDRREQ);
519 }
520 space = sbspace(&so->so_snd);
521 if (flags & MSG_OOB)
522 space += 1024;
523 if ((atomic && resid > so->so_snd.sb_hiwat) ||
524 clen > so->so_snd.sb_hiwat)
525 snderr(EMSGSIZE);
526 if (space < resid + clen &&
527 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
528 if (so->so_state & SS_NBIO)
529 snderr(EWOULDBLOCK);
530 sbunlock(&so->so_snd);
531 error = sbwait(&so->so_snd);
532 splx(s);
533 if (error)
534 goto out;
535 goto restart;
536 }
537 splx(s);
538 mp = &top;
539 space -= clen;
540 do {
541 if (uio == NULL) {
542 /*
543 * Data is prepackaged in "top".
544 */
545 resid = 0;
546 if (flags & MSG_EOR)
547 top->m_flags |= M_EOR;
548 } else do {
549 if (top == 0) {
550 MGETHDR(m, M_WAIT, MT_DATA);
551 if (m == NULL) {
552 error = ENOBUFS;
553 goto release;
554 }
555 mlen = MHLEN;
556 m->m_pkthdr.len = 0;
557 m->m_pkthdr.rcvif = (struct ifnet *)0;
558 } else {
559 MGET(m, M_WAIT, MT_DATA);
560 if (m == NULL) {
561 error = ENOBUFS;
562 goto release;
563 }
564 mlen = MLEN;
565 }
566 if (resid >= MINCLSIZE) {
567 MCLGET(m, M_WAIT);
568 if ((m->m_flags & M_EXT) == 0)
569 goto nopages;
570 mlen = MCLBYTES;
571 len = min(min(mlen, resid), space);
572 } else {
573nopages:
574 len = min(min(mlen, resid), space);
575 /*
576 * For datagram protocols, leave room
577 * for protocol headers in first mbuf.
578 */
579 if (atomic && top == 0 && len < mlen)
580 MH_ALIGN(m, len);
581 }
582 space -= len;
583 error = uiomove(mtod(m, caddr_t), (int)len, uio);
584 resid = uio->uio_resid;
585 m->m_len = len;
586 *mp = m;
587 top->m_pkthdr.len += len;
588 if (error)
589 goto release;
590 mp = &m->m_next;
591 if (resid <= 0) {
592 if (flags & MSG_EOR)
593 top->m_flags |= M_EOR;
594 break;
595 }
596 } while (space > 0 && atomic);
597 if (dontroute)
598 so->so_options |= SO_DONTROUTE;
599 s = splnet(); /* XXX */
600 /*
601 * XXX all the SS_CANTSENDMORE checks previously
602 * done could be out of date. We could have recieved
603 * a reset packet in an interrupt or maybe we slept
604 * while doing page faults in uiomove() etc. We could
605 * probably recheck again inside the splnet() protection
606 * here, but there are probably other places that this
607 * also happens. We must rethink this.
608 */
609 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
610 (flags & MSG_OOB) ? PRUS_OOB :
611 /*
612 * If the user set MSG_EOF, the protocol
613 * understands this flag and nothing left to
614 * send then use PRU_SEND_EOF instead of PRU_SEND.
615 */
616 ((flags & MSG_EOF) &&
617 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
618 (resid <= 0)) ?
619 PRUS_EOF :
620 /* If there is more to send set PRUS_MORETOCOME */
621 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
622 top, addr, control, p);
623 splx(s);
624 if (dontroute)
625 so->so_options &= ~SO_DONTROUTE;
626 clen = 0;
627 control = 0;
628 top = 0;
629 mp = &top;
630 if (error)
631 goto release;
632 } while (resid && space > 0);
633 } while (resid);
634
635release:
636 sbunlock(&so->so_snd);
637out:
638 if (top)
639 m_freem(top);
640 if (control)
641 m_freem(control);
642 return (error);
643}
644
645/*
646 * Implement receive operations on a socket.
647 * We depend on the way that records are added to the sockbuf
648 * by sbappend*. In particular, each record (mbufs linked through m_next)
649 * must begin with an address if the protocol so specifies,
650 * followed by an optional mbuf or mbufs containing ancillary data,
651 * and then zero or more mbufs of data.
652 * In order to avoid blocking network interrupts for the entire time here,
653 * we splx() while doing the actual copy to user space.
654 * Although the sockbuf is locked, new data may still be appended,
655 * and thus we must maintain consistency of the sockbuf during that time.
656 *
657 * The caller may receive the data as a single mbuf chain by supplying
658 * an mbuf **mp0 for use in returning the chain. The uio is then used
659 * only for the count in uio_resid.
660 */
661int
662soreceive(so, psa, uio, mp0, controlp, flagsp)
663 register struct socket *so;
664 struct sockaddr **psa;
665 struct uio *uio;
666 struct mbuf **mp0;
667 struct mbuf **controlp;
668 int *flagsp;
669{
670 register struct mbuf *m, **mp;
671 register int flags, len, error, s, offset;
672 struct protosw *pr = so->so_proto;
673 struct mbuf *nextrecord;
674 int moff, type = 0;
675 int orig_resid = uio->uio_resid;
676
677 mp = mp0;
678 if (psa)
679 *psa = 0;
680 if (controlp)
681 *controlp = 0;
682 if (flagsp)
683 flags = *flagsp &~ MSG_EOR;
684 else
685 flags = 0;
686 if (flags & MSG_OOB) {
687 m = m_get(M_WAIT, MT_DATA);
688 if (m == NULL)
689 return (ENOBUFS);
690 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
691 if (error)
692 goto bad;
693 do {
694 error = uiomove(mtod(m, caddr_t),
695 (int) min(uio->uio_resid, m->m_len), uio);
696 m = m_free(m);
697 } while (uio->uio_resid && error == 0 && m);
698bad:
699 if (m)
700 m_freem(m);
701 return (error);
702 }
703 if (mp)
704 *mp = (struct mbuf *)0;
705 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
706 (*pr->pr_usrreqs->pru_rcvd)(so, 0);
707
708restart:
709 error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
710 if (error)
711 return (error);
712 s = splnet();
713
714 m = so->so_rcv.sb_mb;
715 /*
716 * If we have less data than requested, block awaiting more
717 * (subject to any timeout) if:
718 * 1. the current count is less than the low water mark, or
719 * 2. MSG_WAITALL is set, and it is possible to do the entire
720 * receive operation at once if we block (resid <= hiwat).
721 * 3. MSG_DONTWAIT is not set
722 * If MSG_WAITALL is set but resid is larger than the receive buffer,
723 * we have to do the receive in sections, and thus risk returning
724 * a short count if a timeout or signal occurs after we start.
725 */
726 if (m == 0 || (((flags & MSG_DONTWAIT) == 0 &&
727 so->so_rcv.sb_cc < uio->uio_resid) &&
728 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
729 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
730 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
731 KASSERT(m != 0 || !so->so_rcv.sb_cc, ("receive 1"));
732 if (so->so_error) {
733 if (m)
734 goto dontblock;
735 error = so->so_error;
736 if ((flags & MSG_PEEK) == 0)
737 so->so_error = 0;
738 goto release;
739 }
740 if (so->so_state & SS_CANTRCVMORE) {
741 if (m)
742 goto dontblock;
743 else
744 goto release;
745 }
746 for (; m; m = m->m_next)
747 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
748 m = so->so_rcv.sb_mb;
749 goto dontblock;
750 }
751 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
752 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
753 error = ENOTCONN;
754 goto release;
755 }
756 if (uio->uio_resid == 0)
757 goto release;
758 if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {
759 error = EWOULDBLOCK;
760 goto release;
761 }
762 sbunlock(&so->so_rcv);
763 error = sbwait(&so->so_rcv);
764 splx(s);
765 if (error)
766 return (error);
767 goto restart;
768 }
769dontblock:
770 if (uio->uio_procp)
771 uio->uio_procp->p_stats->p_ru.ru_msgrcv++;
772 nextrecord = m->m_nextpkt;
773 if (pr->pr_flags & PR_ADDR) {
774 KASSERT(m->m_type == MT_SONAME, ("receive 1a"));
775 orig_resid = 0;
776 if (psa)
777 *psa = dup_sockaddr(mtod(m, struct sockaddr *),
778 mp0 == 0);
779 if (flags & MSG_PEEK) {
780 m = m->m_next;
781 } else {
782 sbfree(&so->so_rcv, m);
783 so->so_rcv.sb_mb = m_free(m);
784 m = so->so_rcv.sb_mb;
785 }
786 }
787 while (m && m->m_type == MT_CONTROL && error == 0) {
788 if (flags & MSG_PEEK) {
789 if (controlp)
790 *controlp = m_copy(m, 0, m->m_len);
791 m = m->m_next;
792 } else {
793 sbfree(&so->so_rcv, m);
794 if (controlp) {
795 if (pr->pr_domain->dom_externalize &&
796 mtod(m, struct cmsghdr *)->cmsg_type ==
797 SCM_RIGHTS)
798 error = (*pr->pr_domain->dom_externalize)(m);
799 *controlp = m;
800 so->so_rcv.sb_mb = m->m_next;
801 m->m_next = 0;
802 m = so->so_rcv.sb_mb;
803 } else {
804 so->so_rcv.sb_mb = m_free(m);
805 m = so->so_rcv.sb_mb;
806 }
807 }
808 if (controlp) {
809 orig_resid = 0;
810 controlp = &(*controlp)->m_next;
811 }
812 }
813 if (m) {
814 if ((flags & MSG_PEEK) == 0)
815 m->m_nextpkt = nextrecord;
816 type = m->m_type;
817 if (type == MT_OOBDATA)
818 flags |= MSG_OOB;
819 }
820 moff = 0;
821 offset = 0;
822 while (m && uio->uio_resid > 0 && error == 0) {
823 if (m->m_type == MT_OOBDATA) {
824 if (type != MT_OOBDATA)
825 break;
826 } else if (type == MT_OOBDATA)
827 break;
828 else
829 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER,
830 ("receive 3"));
831 so->so_state &= ~SS_RCVATMARK;
832 len = uio->uio_resid;
833 if (so->so_oobmark && len > so->so_oobmark - offset)
834 len = so->so_oobmark - offset;
835 if (len > m->m_len - moff)
836 len = m->m_len - moff;
837 /*
838 * If mp is set, just pass back the mbufs.
839 * Otherwise copy them out via the uio, then free.
840 * Sockbuf must be consistent here (points to current mbuf,
841 * it points to next record) when we drop priority;
842 * we must note any additions to the sockbuf when we
843 * block interrupts again.
844 */
845 if (mp == 0) {
846 splx(s);
847 error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio);
848 s = splnet();
849 if (error)
850 goto release;
851 } else
852 uio->uio_resid -= len;
853 if (len == m->m_len - moff) {
854 if (m->m_flags & M_EOR)
855 flags |= MSG_EOR;
856 if (flags & MSG_PEEK) {
857 m = m->m_next;
858 moff = 0;
859 } else {
860 nextrecord = m->m_nextpkt;
861 sbfree(&so->so_rcv, m);
862 if (mp) {
863 *mp = m;
864 mp = &m->m_next;
865 so->so_rcv.sb_mb = m = m->m_next;
866 *mp = (struct mbuf *)0;
867 } else {
868 so->so_rcv.sb_mb = m = m_free(m);
869 }
870 if (m)
871 m->m_nextpkt = nextrecord;
872 }
873 } else {
874 if (flags & MSG_PEEK)
875 moff += len;
876 else {
877 if (mp)
878 *mp = m_copym(m, 0, len, M_WAIT);
879 m->m_data += len;
880 m->m_len -= len;
881 so->so_rcv.sb_cc -= len;
882 }
883 }
884 if (so->so_oobmark) {
885 if ((flags & MSG_PEEK) == 0) {
886 so->so_oobmark -= len;
887 if (so->so_oobmark == 0) {
888 so->so_state |= SS_RCVATMARK;
889 break;
890 }
891 } else {
892 offset += len;
893 if (offset == so->so_oobmark)
894 break;
895 }
896 }
897 if (flags & MSG_EOR)
898 break;
899 /*
900 * If the MSG_WAITALL flag is set (for non-atomic socket),
901 * we must not quit until "uio->uio_resid == 0" or an error
902 * termination. If a signal/timeout occurs, return
903 * with a short count but without error.
904 * Keep sockbuf locked against other readers.
905 */
906 while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 &&
907 !sosendallatonce(so) && !nextrecord) {
908 if (so->so_error || so->so_state & SS_CANTRCVMORE)
909 break;
910 /*
911 * The window might have closed to zero, make
912 * sure we send an ack now that we've drained
913 * the buffer or we might end up blocking until
914 * the idle takes over (5 seconds).
915 */
916 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
917 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
918 error = sbwait(&so->so_rcv);
919 if (error) {
920 sbunlock(&so->so_rcv);
921 splx(s);
922 return (0);
923 }
924 m = so->so_rcv.sb_mb;
925 if (m)
926 nextrecord = m->m_nextpkt;
927 }
928 }
929
930 if (m && pr->pr_flags & PR_ATOMIC) {
931 flags |= MSG_TRUNC;
932 if ((flags & MSG_PEEK) == 0)
933 (void) sbdroprecord(&so->so_rcv);
934 }
935 if ((flags & MSG_PEEK) == 0) {
936 if (m == 0)
937 so->so_rcv.sb_mb = nextrecord;
938 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
939 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
940 }
941 if (orig_resid == uio->uio_resid && orig_resid &&
942 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
943 sbunlock(&so->so_rcv);
944 splx(s);
945 goto restart;
946 }
947
948 if (flagsp)
949 *flagsp |= flags;
950release:
951 sbunlock(&so->so_rcv);
952 splx(s);
953 return (error);
954}
955
956int
957soshutdown(so, how)
958 register struct socket *so;
959 register int how;
960{
961 register struct protosw *pr = so->so_proto;
962
963 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
964 return (EINVAL);
965
966 if (how != SHUT_WR)
967 sorflush(so);
968 if (how != SHUT_RD)
969 return ((*pr->pr_usrreqs->pru_shutdown)(so));
970 return (0);
971}
972
973void
974sorflush(so)
975 register struct socket *so;
976{
977 register struct sockbuf *sb = &so->so_rcv;
978 register struct protosw *pr = so->so_proto;
979 register int s;
980 struct sockbuf asb;
981
982 sb->sb_flags |= SB_NOINTR;
983 (void) sblock(sb, M_WAITOK);
984 s = splimp();
985 socantrcvmore(so);
986 sbunlock(sb);
987 asb = *sb;
988 bzero((caddr_t)sb, sizeof (*sb));
989 if (asb.sb_flags & SB_KNOTE) {
990 sb->sb_sel.si_note = asb.sb_sel.si_note;
991 sb->sb_flags = SB_KNOTE;
992 }
993 splx(s);
994 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
995 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
996 sbrelease(&asb, so);
997}
998
999#ifdef INET
1000static int
1001do_setopt_accept_filter(so, sopt)
1002 struct socket *so;
1003 struct sockopt *sopt;
1004{
1005 struct accept_filter_arg *afap = NULL;
1006 struct accept_filter *afp;
1007 struct so_accf *af = so->so_accf;
1008 int error = 0;
1009
1010 /* do not set/remove accept filters on non listen sockets */
1011 if ((so->so_options & SO_ACCEPTCONN) == 0) {
1012 error = EINVAL;
1013 goto out;
1014 }
1015
1016 /* removing the filter */
1017 if (sopt == NULL) {
1018 if (af != NULL) {
1019 if (af->so_accept_filter != NULL &&
1020 af->so_accept_filter->accf_destroy != NULL) {
1021 af->so_accept_filter->accf_destroy(so);
1022 }
1023 if (af->so_accept_filter_str != NULL) {
1024 FREE(af->so_accept_filter_str, M_ACCF);
1025 }
1026 FREE(af, M_ACCF);
1027 so->so_accf = NULL;
1028 }
1029 so->so_options &= ~SO_ACCEPTFILTER;
1030 return (0);
1031 }
1032 /* adding a filter */
1033 /* must remove previous filter first */
1034 if (af != NULL) {
1035 error = EINVAL;
1036 goto out;
1037 }
1038 /* don't put large objects on the kernel stack */
1039 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap), M_TEMP, M_WAITOK);
1040 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap);
1041 afap->af_name[sizeof(afap->af_name)-1] = '\0';
1042 afap->af_arg[sizeof(afap->af_arg)-1] = '\0';
1043 if (error)
1044 goto out;
1045 afp = accept_filt_get(afap->af_name);
1046 if (afp == NULL) {
1047 error = ENOENT;
1048 goto out;
1049 }
1050 MALLOC(af, struct so_accf *, sizeof(*af), M_ACCF, M_WAITOK);
1051 bzero(af, sizeof(*af));
1052 if (afp->accf_create != NULL) {
1053 if (afap->af_name[0] != '\0') {
1054 int len = strlen(afap->af_name) + 1;
1055
1056 MALLOC(af->so_accept_filter_str, char *, len, M_ACCF, M_WAITOK);
1057 strcpy(af->so_accept_filter_str, afap->af_name);
1058 }
1059 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg);
1060 if (af->so_accept_filter_arg == NULL) {
1061 FREE(af->so_accept_filter_str, M_ACCF);
1062 FREE(af, M_ACCF);
1063 so->so_accf = NULL;
1064 error = EINVAL;
1065 goto out;
1066 }
1067 }
1068 af->so_accept_filter = afp;
1069 so->so_accf = af;
1070 so->so_options |= SO_ACCEPTFILTER;
1071out:
1072 if (afap != NULL)
1073 FREE(afap, M_TEMP);
1074 return (error);
1075}
1076#endif /* INET */
1077
1078/*
1079 * Perhaps this routine, and sooptcopyout(), below, ought to come in
1080 * an additional variant to handle the case where the option value needs
1081 * to be some kind of integer, but not a specific size.
1082 * In addition to their use here, these functions are also called by the
1083 * protocol-level pr_ctloutput() routines.
1084 */
1085int
1086sooptcopyin(sopt, buf, len, minlen)
1087 struct sockopt *sopt;
1088 void *buf;
1089 size_t len;
1090 size_t minlen;
1091{
1092 size_t valsize;
1093
1094 /*
1095 * If the user gives us more than we wanted, we ignore it,
1096 * but if we don't get the minimum length the caller
1097 * wants, we return EINVAL. On success, sopt->sopt_valsize
1098 * is set to however much we actually retrieved.
1099 */
1100 if ((valsize = sopt->sopt_valsize) < minlen)
1101 return EINVAL;
1102 if (valsize > len)
1103 sopt->sopt_valsize = valsize = len;
1104
1105 if (sopt->sopt_p != 0)
1106 return (copyin(sopt->sopt_val, buf, valsize));
1107
1108 bcopy(sopt->sopt_val, buf, valsize);
1109 return 0;
1110}
1111
1112int
1113sosetopt(so, sopt)
1114 struct socket *so;
1115 struct sockopt *sopt;
1116{
1117 int error, optval;
1118 struct linger l;
1119 struct timeval tv;
1120 u_long val;
1121
1122 error = 0;
1123 if (sopt->sopt_level != SOL_SOCKET) {
1124 if (so->so_proto && so->so_proto->pr_ctloutput)
1125 return ((*so->so_proto->pr_ctloutput)
1126 (so, sopt));
1127 error = ENOPROTOOPT;
1128 } else {
1129 switch (sopt->sopt_name) {
1130#ifdef INET
1131 case SO_ACCEPTFILTER:
1132 error = do_setopt_accept_filter(so, sopt);
1133 if (error)
1134 goto bad;
1135 break;
1136#endif /* INET */
1137 case SO_LINGER:
1138 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
1139 if (error)
1140 goto bad;
1141
1142 so->so_linger = l.l_linger;
1143 if (l.l_onoff)
1144 so->so_options |= SO_LINGER;
1145 else
1146 so->so_options &= ~SO_LINGER;
1147 break;
1148
1149 case SO_DEBUG:
1150 case SO_KEEPALIVE:
1151 case SO_DONTROUTE:
1152 case SO_USELOOPBACK:
1153 case SO_BROADCAST:
1154 case SO_REUSEADDR:
1155 case SO_REUSEPORT:
1156 case SO_OOBINLINE:
1157 case SO_TIMESTAMP:
1158 error = sooptcopyin(sopt, &optval, sizeof optval,
1159 sizeof optval);
1160 if (error)
1161 goto bad;
1162 if (optval)
1163 so->so_options |= sopt->sopt_name;
1164 else
1165 so->so_options &= ~sopt->sopt_name;
1166 break;
1167
1168 case SO_SNDBUF:
1169 case SO_RCVBUF:
1170 case SO_SNDLOWAT:
1171 case SO_RCVLOWAT:
1172 error = sooptcopyin(sopt, &optval, sizeof optval,
1173 sizeof optval);
1174 if (error)
1175 goto bad;
1176
1177 /*
1178 * Values < 1 make no sense for any of these
1179 * options, so disallow them.
1180 */
1181 if (optval < 1) {
1182 error = EINVAL;
1183 goto bad;
1184 }
1185
1186 switch (sopt->sopt_name) {
1187 case SO_SNDBUF:
1188 case SO_RCVBUF:
1189 if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
1190 &so->so_snd : &so->so_rcv, (u_long)optval,
1191 so, curproc) == 0) {
1192 error = ENOBUFS;
1193 goto bad;
1194 }
1195 break;
1196
1197 /*
1198 * Make sure the low-water is never greater than
1199 * the high-water.
1200 */
1201 case SO_SNDLOWAT:
1202 so->so_snd.sb_lowat =
1203 (optval > so->so_snd.sb_hiwat) ?
1204 so->so_snd.sb_hiwat : optval;
1205 break;
1206 case SO_RCVLOWAT:
1207 so->so_rcv.sb_lowat =
1208 (optval > so->so_rcv.sb_hiwat) ?
1209 so->so_rcv.sb_hiwat : optval;
1210 break;
1211 }
1212 break;
1213
1214 case SO_SNDTIMEO:
1215 case SO_RCVTIMEO:
1216 error = sooptcopyin(sopt, &tv, sizeof tv,
1217 sizeof tv);
1218 if (error)
1219 goto bad;
1220
1221 /* assert(hz > 0); */
1222 if (tv.tv_sec < 0 || tv.tv_sec > SHRT_MAX / hz ||
1223 tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
1224 error = EDOM;
1225 goto bad;
1226 }
1227 /* assert(tick > 0); */
1228 /* assert(ULONG_MAX - SHRT_MAX >= 1000000); */
1229 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick;
1230 if (val > SHRT_MAX) {
1231 error = EDOM;
1232 goto bad;
1233 }
1234 if (val == 0 && tv.tv_usec != 0)
1235 val = 1;
1236
1237 switch (sopt->sopt_name) {
1238 case SO_SNDTIMEO:
1239 so->so_snd.sb_timeo = val;
1240 break;
1241 case SO_RCVTIMEO:
1242 so->so_rcv.sb_timeo = val;
1243 break;
1244 }
1245 break;
1246 default:
1247 error = ENOPROTOOPT;
1248 break;
1249 }
1250 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) {
1251 (void) ((*so->so_proto->pr_ctloutput)
1252 (so, sopt));
1253 }
1254 }
1255bad:
1256 return (error);
1257}
1258
1259/* Helper routine for getsockopt */
1260int
1261sooptcopyout(sopt, buf, len)
1262 struct sockopt *sopt;
1263 void *buf;
1264 size_t len;
1265{
1266 int error;
1267 size_t valsize;
1268
1269 error = 0;
1270
1271 /*
1272 * Documented get behavior is that we always return a value,
1273 * possibly truncated to fit in the user's buffer.
1274 * Traditional behavior is that we always tell the user
1275 * precisely how much we copied, rather than something useful
1276 * like the total amount we had available for her.
1277 * Note that this interface is not idempotent; the entire answer must
1278 * generated ahead of time.
1279 */
1280 valsize = min(len, sopt->sopt_valsize);
1281 sopt->sopt_valsize = valsize;
1282 if (sopt->sopt_val != 0) {
1283 if (sopt->sopt_p != 0)
1284 error = copyout(buf, sopt->sopt_val, valsize);
1285 else
1286 bcopy(buf, sopt->sopt_val, valsize);
1287 }
1288 return error;
1289}
1290
1291int
1292sogetopt(so, sopt)
1293 struct socket *so;
1294 struct sockopt *sopt;
1295{
1296 int error, optval;
1297 struct linger l;
1298 struct timeval tv;
1299 struct accept_filter_arg *afap;
1300
1301 error = 0;
1302 if (sopt->sopt_level != SOL_SOCKET) {
1303 if (so->so_proto && so->so_proto->pr_ctloutput) {
1304 return ((*so->so_proto->pr_ctloutput)
1305 (so, sopt));
1306 } else
1307 return (ENOPROTOOPT);
1308 } else {
1309 switch (sopt->sopt_name) {
1310#ifdef INET
1311 case SO_ACCEPTFILTER:
1312 if ((so->so_options & SO_ACCEPTCONN) == 0)
1313 return (EINVAL);
1314 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap),
1315 M_TEMP, M_WAITOK);
1316 bzero(afap, sizeof(*afap));
1317 if ((so->so_options & SO_ACCEPTFILTER) != 0) {
1318 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name);
1319 if (so->so_accf->so_accept_filter_str != NULL)
1320 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str);
1321 }
1322 error = sooptcopyout(sopt, afap, sizeof(*afap));
1323 FREE(afap, M_TEMP);
1324 break;
1325#endif /* INET */
1326
1327 case SO_LINGER:
1328 l.l_onoff = so->so_options & SO_LINGER;
1329 l.l_linger = so->so_linger;
1330 error = sooptcopyout(sopt, &l, sizeof l);
1331 break;
1332
1333 case SO_USELOOPBACK:
1334 case SO_DONTROUTE:
1335 case SO_DEBUG:
1336 case SO_KEEPALIVE:
1337 case SO_REUSEADDR:
1338 case SO_REUSEPORT:
1339 case SO_BROADCAST:
1340 case SO_OOBINLINE:
1341 case SO_TIMESTAMP:
1342 optval = so->so_options & sopt->sopt_name;
1343integer:
1344 error = sooptcopyout(sopt, &optval, sizeof optval);
1345 break;
1346
1347 case SO_TYPE:
1348 optval = so->so_type;
1349 goto integer;
1350
1351 case SO_ERROR:
1352 optval = so->so_error;
1353 so->so_error = 0;
1354 goto integer;
1355
1356 case SO_SNDBUF:
1357 optval = so->so_snd.sb_hiwat;
1358 goto integer;
1359
1360 case SO_RCVBUF:
1361 optval = so->so_rcv.sb_hiwat;
1362 goto integer;
1363
1364 case SO_SNDLOWAT:
1365 optval = so->so_snd.sb_lowat;
1366 goto integer;
1367
1368 case SO_RCVLOWAT:
1369 optval = so->so_rcv.sb_lowat;
1370 goto integer;
1371
1372 case SO_SNDTIMEO:
1373 case SO_RCVTIMEO:
1374 optval = (sopt->sopt_name == SO_SNDTIMEO ?
1375 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
1376
1377 tv.tv_sec = optval / hz;
1378 tv.tv_usec = (optval % hz) * tick;
1379 error = sooptcopyout(sopt, &tv, sizeof tv);
1380 break;
1381
1382 default:
1383 error = ENOPROTOOPT;
1384 break;
1385 }
1386 return (error);
1387 }
1388}
1389
1390/* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
1391int
1392soopt_getm(struct sockopt *sopt, struct mbuf **mp)
1393{
1394 struct mbuf *m, *m_prev;
1395 int sopt_size = sopt->sopt_valsize;
1396
1397 MGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT, MT_DATA);
1398 if (m == 0)
1399 return ENOBUFS;
1400 if (sopt_size > MLEN) {
1401 MCLGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT);
1402 if ((m->m_flags & M_EXT) == 0) {
1403 m_free(m);
1404 return ENOBUFS;
1405 }
1406 m->m_len = min(MCLBYTES, sopt_size);
1407 } else {
1408 m->m_len = min(MLEN, sopt_size);
1409 }
1410 sopt_size -= m->m_len;
1411 *mp = m;
1412 m_prev = m;
1413
1414 while (sopt_size) {
1415 MGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT, MT_DATA);
1416 if (m == 0) {
1417 m_freem(*mp);
1418 return ENOBUFS;
1419 }
1420 if (sopt_size > MLEN) {
1421 MCLGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT);
1422 if ((m->m_flags & M_EXT) == 0) {
1423 m_freem(*mp);
1424 return ENOBUFS;
1425 }
1426 m->m_len = min(MCLBYTES, sopt_size);
1427 } else {
1428 m->m_len = min(MLEN, sopt_size);
1429 }
1430 sopt_size -= m->m_len;
1431 m_prev->m_next = m;
1432 m_prev = m;
1433 }
1434 return 0;
1435}
1436
1437/* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
1438int
1439soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
1440{
1441 struct mbuf *m0 = m;
1442
1443 if (sopt->sopt_val == NULL)
1444 return 0;
1445 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
1446 if (sopt->sopt_p != NULL) {
1447 int error;
1448
1449 error = copyin(sopt->sopt_val, mtod(m, char *),
1450 m->m_len);
1451 if (error != 0) {
1452 m_freem(m0);
1453 return(error);
1454 }
1455 } else
1456 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
1457 sopt->sopt_valsize -= m->m_len;
1458 (caddr_t)sopt->sopt_val += m->m_len;
1459 m = m->m_next;
1460 }
1461 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
1462 panic("ip6_sooptmcopyin");
1463 return 0;
1464}
1465
1466/* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
1467int
1468soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
1469{
1470 struct mbuf *m0 = m;
1471 size_t valsize = 0;
1472
1473 if (sopt->sopt_val == NULL)
1474 return 0;
1475 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
1476 if (sopt->sopt_p != NULL) {
1477 int error;
1478
1479 error = copyout(mtod(m, char *), sopt->sopt_val,
1480 m->m_len);
1481 if (error != 0) {
1482 m_freem(m0);
1483 return(error);
1484 }
1485 } else
1486 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
1487 sopt->sopt_valsize -= m->m_len;
1488 (caddr_t)sopt->sopt_val += m->m_len;
1489 valsize += m->m_len;
1490 m = m->m_next;
1491 }
1492 if (m != NULL) {
1493 /* enough soopt buffer should be given from user-land */
1494 m_freem(m0);
1495 return(EINVAL);
1496 }
1497 sopt->sopt_valsize = valsize;
1498 return 0;
1499}
1500
1501void
1502sohasoutofband(so)
1503 register struct socket *so;
1504{
1505 if (so->so_sigio != NULL)
1506 pgsigio(so->so_sigio, SIGURG, 0);
1507 selwakeup(&so->so_rcv.sb_sel);
1508}
1509
1510int
1511sopoll(struct socket *so, int events, struct ucred *cred, struct proc *p)
1512{
1513 int revents = 0;
1514 int s = splnet();
1515
1516 if (events & (POLLIN | POLLRDNORM))
1517 if (soreadable(so))
1518 revents |= events & (POLLIN | POLLRDNORM);
1519
1520 if (events & (POLLOUT | POLLWRNORM))
1521 if (sowriteable(so))
1522 revents |= events & (POLLOUT | POLLWRNORM);
1523
1524 if (events & (POLLPRI | POLLRDBAND))
1525 if (so->so_oobmark || (so->so_state & SS_RCVATMARK))
1526 revents |= events & (POLLPRI | POLLRDBAND);
1527
1528 if (revents == 0) {
1529 if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
1530 selrecord(p, &so->so_rcv.sb_sel);
1531 so->so_rcv.sb_flags |= SB_SEL;
1532 }
1533
1534 if (events & (POLLOUT | POLLWRNORM)) {
1535 selrecord(p, &so->so_snd.sb_sel);
1536 so->so_snd.sb_flags |= SB_SEL;
1537 }
1538 }
1539
1540 splx(s);
1541 return (revents);
1542}
1543
1544int
1545sokqfilter(struct file *fp, struct knote *kn)
1546{
1547 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1548 struct sockbuf *sb;
1549 int s;
1550
1551 switch (kn->kn_filter) {
1552 case EVFILT_READ:
1553 if (so->so_options & SO_ACCEPTCONN)
1554 kn->kn_fop = &solisten_filtops;
1555 else
1556 kn->kn_fop = &soread_filtops;
1557 sb = &so->so_rcv;
1558 break;
1559 case EVFILT_WRITE:
1560 kn->kn_fop = &sowrite_filtops;
1561 sb = &so->so_snd;
1562 break;
1563 default:
1564 return (1);
1565 }
1566
1567 s = splnet();
1568 SLIST_INSERT_HEAD(&sb->sb_sel.si_note, kn, kn_selnext);
1569 sb->sb_flags |= SB_KNOTE;
1570 splx(s);
1571 return (0);
1572}
1573
1574static void
1575filt_sordetach(struct knote *kn)
1576{
1577 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1578 int s = splnet();
1579
1580 SLIST_REMOVE(&so->so_rcv.sb_sel.si_note, kn, knote, kn_selnext);
1581 if (SLIST_EMPTY(&so->so_rcv.sb_sel.si_note))
1582 so->so_rcv.sb_flags &= ~SB_KNOTE;
1583 splx(s);
1584}
1585
1586/*ARGSUSED*/
1587static int
1588filt_soread(struct knote *kn, long hint)
1589{
1590 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1591
1592 kn->kn_data = so->so_rcv.sb_cc;
1593 if (so->so_state & SS_CANTRCVMORE) {
1594 kn->kn_flags |= EV_EOF;
1595 kn->kn_fflags = so->so_error;
1596 return (1);
1597 }
1598 if (so->so_error) /* temporary udp error */
1599 return (1);
1600 if (kn->kn_sfflags & NOTE_LOWAT)
1601 return (kn->kn_data >= kn->kn_sdata);
1602 return (kn->kn_data >= so->so_rcv.sb_lowat);
1603}
1604
1605static void
1606filt_sowdetach(struct knote *kn)
1607{
1608 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1609 int s = splnet();
1610
1611 SLIST_REMOVE(&so->so_snd.sb_sel.si_note, kn, knote, kn_selnext);
1612 if (SLIST_EMPTY(&so->so_snd.sb_sel.si_note))
1613 so->so_snd.sb_flags &= ~SB_KNOTE;
1614 splx(s);
1615}
1616
1617/*ARGSUSED*/
1618static int
1619filt_sowrite(struct knote *kn, long hint)
1620{
1621 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1622
1623 kn->kn_data = sbspace(&so->so_snd);
1624 if (so->so_state & SS_CANTSENDMORE) {
1625 kn->kn_flags |= EV_EOF;
1626 kn->kn_fflags = so->so_error;
1627 return (1);
1628 }
1629 if (so->so_error) /* temporary udp error */
1630 return (1);
1631 if (((so->so_state & SS_ISCONNECTED) == 0) &&
1632 (so->so_proto->pr_flags & PR_CONNREQUIRED))
1633 return (0);
1634 if (kn->kn_sfflags & NOTE_LOWAT)
1635 return (kn->kn_data >= kn->kn_sdata);
1636 return (kn->kn_data >= so->so_snd.sb_lowat);
1637}
1638
1639/*ARGSUSED*/
1640static int
1641filt_solisten(struct knote *kn, long hint)
1642{
1643 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1644
1645 kn->kn_data = so->so_qlen;
1646 return (! TAILQ_EMPTY(&so->so_comp));
1647}