Optimize lwkt_rwlock.c a bit
[dragonfly.git] / sys / kern / uipc_socket.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * @(#)uipc_socket.c 8.3 (Berkeley) 4/15/94
34 * $FreeBSD: src/sys/kern/uipc_socket.c,v 1.68.2.22 2002/12/15 09:24:23 maxim Exp $
35 * $DragonFly: src/sys/kern/uipc_socket.c,v 1.2 2003/06/17 04:28:41 dillon Exp $
36 */
37
38#include "opt_inet.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/fcntl.h>
43#include <sys/malloc.h>
44#include <sys/mbuf.h>
45#include <sys/domain.h>
46#include <sys/file.h> /* for struct knote */
47#include <sys/kernel.h>
48#include <sys/malloc.h>
49#include <sys/event.h>
50#include <sys/poll.h>
51#include <sys/proc.h>
52#include <sys/protosw.h>
53#include <sys/socket.h>
54#include <sys/socketvar.h>
55#include <sys/resourcevar.h>
56#include <sys/signalvar.h>
57#include <sys/sysctl.h>
58#include <sys/uio.h>
59#include <sys/jail.h>
60#include <vm/vm_zone.h>
61
62#include <machine/limits.h>
63
64#ifdef INET
65static int do_setopt_accept_filter(struct socket *so, struct sockopt *sopt);
66#endif /* INET */
67
68static void filt_sordetach(struct knote *kn);
69static int filt_soread(struct knote *kn, long hint);
70static void filt_sowdetach(struct knote *kn);
71static int filt_sowrite(struct knote *kn, long hint);
72static int filt_solisten(struct knote *kn, long hint);
73
74static struct filterops solisten_filtops =
75 { 1, NULL, filt_sordetach, filt_solisten };
76static struct filterops soread_filtops =
77 { 1, NULL, filt_sordetach, filt_soread };
78static struct filterops sowrite_filtops =
79 { 1, NULL, filt_sowdetach, filt_sowrite };
80
81struct vm_zone *socket_zone;
82so_gen_t so_gencnt; /* generation count for sockets */
83
84MALLOC_DEFINE(M_SONAME, "soname", "socket name");
85MALLOC_DEFINE(M_PCB, "pcb", "protocol control block");
86
87SYSCTL_DECL(_kern_ipc);
88
89static int somaxconn = SOMAXCONN;
90SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, CTLFLAG_RW,
91 &somaxconn, 0, "Maximum pending socket connection queue size");
92
93/*
94 * Socket operation routines.
95 * These routines are called by the routines in
96 * sys_socket.c or from a system process, and
97 * implement the semantics of socket operations by
98 * switching out to the protocol specific routines.
99 */
100
101/*
102 * Get a socket structure from our zone, and initialize it.
103 * We don't implement `waitok' yet (see comments in uipc_domain.c).
104 * Note that it would probably be better to allocate socket
105 * and PCB at the same time, but I'm not convinced that all
106 * the protocols can be easily modified to do this.
107 */
108struct socket *
109soalloc(waitok)
110 int waitok;
111{
112 struct socket *so;
113
114 so = zalloci(socket_zone);
115 if (so) {
116 /* XXX race condition for reentrant kernel */
117 bzero(so, sizeof *so);
118 so->so_gencnt = ++so_gencnt;
119 TAILQ_INIT(&so->so_aiojobq);
120 }
121 return so;
122}
123
124int
125socreate(dom, aso, type, proto, p)
126 int dom;
127 struct socket **aso;
128 register int type;
129 int proto;
130 struct proc *p;
131{
132 register struct protosw *prp;
133 register struct socket *so;
134 register int error;
135
136 if (proto)
137 prp = pffindproto(dom, proto, type);
138 else
139 prp = pffindtype(dom, type);
140
141 if (prp == 0 || prp->pr_usrreqs->pru_attach == 0)
142 return (EPROTONOSUPPORT);
143
144 if (p->p_prison && jail_socket_unixiproute_only &&
145 prp->pr_domain->dom_family != PF_LOCAL &&
146 prp->pr_domain->dom_family != PF_INET &&
147 prp->pr_domain->dom_family != PF_ROUTE) {
148 return (EPROTONOSUPPORT);
149 }
150
151 if (prp->pr_type != type)
152 return (EPROTOTYPE);
153 so = soalloc(p != 0);
154 if (so == 0)
155 return (ENOBUFS);
156
157 TAILQ_INIT(&so->so_incomp);
158 TAILQ_INIT(&so->so_comp);
159 so->so_type = type;
160 so->so_cred = p->p_ucred;
161 crhold(so->so_cred);
162 so->so_proto = prp;
163 error = (*prp->pr_usrreqs->pru_attach)(so, proto, p);
164 if (error) {
165 so->so_state |= SS_NOFDREF;
166 sofree(so);
167 return (error);
168 }
169 *aso = so;
170 return (0);
171}
172
173int
174sobind(so, nam, p)
175 struct socket *so;
176 struct sockaddr *nam;
177 struct proc *p;
178{
179 int s = splnet();
180 int error;
181
182 error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, p);
183 splx(s);
184 return (error);
185}
186
187void
188sodealloc(so)
189 struct socket *so;
190{
191
192 so->so_gencnt = ++so_gencnt;
193 if (so->so_rcv.sb_hiwat)
194 (void)chgsbsize(so->so_cred->cr_uidinfo,
195 &so->so_rcv.sb_hiwat, 0, RLIM_INFINITY);
196 if (so->so_snd.sb_hiwat)
197 (void)chgsbsize(so->so_cred->cr_uidinfo,
198 &so->so_snd.sb_hiwat, 0, RLIM_INFINITY);
199#ifdef INET
200 if (so->so_accf != NULL) {
201 if (so->so_accf->so_accept_filter != NULL &&
202 so->so_accf->so_accept_filter->accf_destroy != NULL) {
203 so->so_accf->so_accept_filter->accf_destroy(so);
204 }
205 if (so->so_accf->so_accept_filter_str != NULL)
206 FREE(so->so_accf->so_accept_filter_str, M_ACCF);
207 FREE(so->so_accf, M_ACCF);
208 }
209#endif /* INET */
210 crfree(so->so_cred);
211 zfreei(socket_zone, so);
212}
213
214int
215solisten(so, backlog, p)
216 register struct socket *so;
217 int backlog;
218 struct proc *p;
219{
220 int s, error;
221
222 s = splnet();
223 error = (*so->so_proto->pr_usrreqs->pru_listen)(so, p);
224 if (error) {
225 splx(s);
226 return (error);
227 }
228 if (TAILQ_EMPTY(&so->so_comp))
229 so->so_options |= SO_ACCEPTCONN;
230 if (backlog < 0 || backlog > somaxconn)
231 backlog = somaxconn;
232 so->so_qlimit = backlog;
233 splx(s);
234 return (0);
235}
236
237void
238sofree(so)
239 register struct socket *so;
240{
241 struct socket *head = so->so_head;
242
243 if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
244 return;
245 if (head != NULL) {
246 if (so->so_state & SS_INCOMP) {
247 TAILQ_REMOVE(&head->so_incomp, so, so_list);
248 head->so_incqlen--;
249 } else if (so->so_state & SS_COMP) {
250 /*
251 * We must not decommission a socket that's
252 * on the accept(2) queue. If we do, then
253 * accept(2) may hang after select(2) indicated
254 * that the listening socket was ready.
255 */
256 return;
257 } else {
258 panic("sofree: not queued");
259 }
260 so->so_state &= ~SS_INCOMP;
261 so->so_head = NULL;
262 }
263 sbrelease(&so->so_snd, so);
264 sorflush(so);
265 sodealloc(so);
266}
267
268/*
269 * Close a socket on last file table reference removal.
270 * Initiate disconnect if connected.
271 * Free socket when disconnect complete.
272 */
273int
274soclose(so)
275 register struct socket *so;
276{
277 int s = splnet(); /* conservative */
278 int error = 0;
279
280 funsetown(so->so_sigio);
281 if (so->so_options & SO_ACCEPTCONN) {
282 struct socket *sp, *sonext;
283
284 sp = TAILQ_FIRST(&so->so_incomp);
285 for (; sp != NULL; sp = sonext) {
286 sonext = TAILQ_NEXT(sp, so_list);
287 (void) soabort(sp);
288 }
289 for (sp = TAILQ_FIRST(&so->so_comp); sp != NULL; sp = sonext) {
290 sonext = TAILQ_NEXT(sp, so_list);
291 /* Dequeue from so_comp since sofree() won't do it */
292 TAILQ_REMOVE(&so->so_comp, sp, so_list);
293 so->so_qlen--;
294 sp->so_state &= ~SS_COMP;
295 sp->so_head = NULL;
296 (void) soabort(sp);
297 }
298 }
299 if (so->so_pcb == 0)
300 goto discard;
301 if (so->so_state & SS_ISCONNECTED) {
302 if ((so->so_state & SS_ISDISCONNECTING) == 0) {
303 error = sodisconnect(so);
304 if (error)
305 goto drop;
306 }
307 if (so->so_options & SO_LINGER) {
308 if ((so->so_state & SS_ISDISCONNECTING) &&
309 (so->so_state & SS_NBIO))
310 goto drop;
311 while (so->so_state & SS_ISCONNECTED) {
312 error = tsleep((caddr_t)&so->so_timeo,
313 PSOCK | PCATCH, "soclos", so->so_linger * hz);
314 if (error)
315 break;
316 }
317 }
318 }
319drop:
320 if (so->so_pcb) {
321 int error2 = (*so->so_proto->pr_usrreqs->pru_detach)(so);
322 if (error == 0)
323 error = error2;
324 }
325discard:
326 if (so->so_state & SS_NOFDREF)
327 panic("soclose: NOFDREF");
328 so->so_state |= SS_NOFDREF;
329 sofree(so);
330 splx(s);
331 return (error);
332}
333
334/*
335 * Must be called at splnet...
336 */
337int
338soabort(so)
339 struct socket *so;
340{
341 int error;
342
343 error = (*so->so_proto->pr_usrreqs->pru_abort)(so);
344 if (error) {
345 sofree(so);
346 return error;
347 }
348 return (0);
349}
350
351int
352soaccept(so, nam)
353 register struct socket *so;
354 struct sockaddr **nam;
355{
356 int s = splnet();
357 int error;
358
359 if ((so->so_state & SS_NOFDREF) == 0)
360 panic("soaccept: !NOFDREF");
361 so->so_state &= ~SS_NOFDREF;
362 error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam);
363 splx(s);
364 return (error);
365}
366
367int
368soconnect(so, nam, p)
369 register struct socket *so;
370 struct sockaddr *nam;
371 struct proc *p;
372{
373 int s;
374 int error;
375
376 if (so->so_options & SO_ACCEPTCONN)
377 return (EOPNOTSUPP);
378 s = splnet();
379 /*
380 * If protocol is connection-based, can only connect once.
381 * Otherwise, if connected, try to disconnect first.
382 * This allows user to disconnect by connecting to, e.g.,
383 * a null address.
384 */
385 if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) &&
386 ((so->so_proto->pr_flags & PR_CONNREQUIRED) ||
387 (error = sodisconnect(so))))
388 error = EISCONN;
389 else
390 error = (*so->so_proto->pr_usrreqs->pru_connect)(so, nam, p);
391 splx(s);
392 return (error);
393}
394
395int
396soconnect2(so1, so2)
397 register struct socket *so1;
398 struct socket *so2;
399{
400 int s = splnet();
401 int error;
402
403 error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2);
404 splx(s);
405 return (error);
406}
407
408int
409sodisconnect(so)
410 register struct socket *so;
411{
412 int s = splnet();
413 int error;
414
415 if ((so->so_state & SS_ISCONNECTED) == 0) {
416 error = ENOTCONN;
417 goto bad;
418 }
419 if (so->so_state & SS_ISDISCONNECTING) {
420 error = EALREADY;
421 goto bad;
422 }
423 error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so);
424bad:
425 splx(s);
426 return (error);
427}
428
429#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK)
430/*
431 * Send on a socket.
432 * If send must go all at once and message is larger than
433 * send buffering, then hard error.
434 * Lock against other senders.
435 * If must go all at once and not enough room now, then
436 * inform user that this would block and do nothing.
437 * Otherwise, if nonblocking, send as much as possible.
438 * The data to be sent is described by "uio" if nonzero,
439 * otherwise by the mbuf chain "top" (which must be null
440 * if uio is not). Data provided in mbuf chain must be small
441 * enough to send all at once.
442 *
443 * Returns nonzero on error, timeout or signal; callers
444 * must check for short counts if EINTR/ERESTART are returned.
445 * Data and control buffers are freed on return.
446 */
447int
448sosend(so, addr, uio, top, control, flags, p)
449 register struct socket *so;
450 struct sockaddr *addr;
451 struct uio *uio;
452 struct mbuf *top;
453 struct mbuf *control;
454 int flags;
455 struct proc *p;
456{
457 struct mbuf **mp;
458 register struct mbuf *m;
459 register long space, len, resid;
460 int clen = 0, error, s, dontroute, mlen;
461 int atomic = sosendallatonce(so) || top;
462
463 if (uio)
464 resid = uio->uio_resid;
465 else
466 resid = top->m_pkthdr.len;
467 /*
468 * In theory resid should be unsigned.
469 * However, space must be signed, as it might be less than 0
470 * if we over-committed, and we must use a signed comparison
471 * of space and resid. On the other hand, a negative resid
472 * causes us to loop sending 0-length segments to the protocol.
473 *
474 * Also check to make sure that MSG_EOR isn't used on SOCK_STREAM
475 * type sockets since that's an error.
476 */
477 if (resid < 0 || (so->so_type == SOCK_STREAM && (flags & MSG_EOR))) {
478 error = EINVAL;
479 goto out;
480 }
481
482 dontroute =
483 (flags & MSG_DONTROUTE) && (so->so_options & SO_DONTROUTE) == 0 &&
484 (so->so_proto->pr_flags & PR_ATOMIC);
485 if (p)
486 p->p_stats->p_ru.ru_msgsnd++;
487 if (control)
488 clen = control->m_len;
489#define snderr(errno) { error = errno; splx(s); goto release; }
490
491restart:
492 error = sblock(&so->so_snd, SBLOCKWAIT(flags));
493 if (error)
494 goto out;
495 do {
496 s = splnet();
497 if (so->so_state & SS_CANTSENDMORE)
498 snderr(EPIPE);
499 if (so->so_error) {
500 error = so->so_error;
501 so->so_error = 0;
502 splx(s);
503 goto release;
504 }
505 if ((so->so_state & SS_ISCONNECTED) == 0) {
506 /*
507 * `sendto' and `sendmsg' is allowed on a connection-
508 * based socket if it supports implied connect.
509 * Return ENOTCONN if not connected and no address is
510 * supplied.
511 */
512 if ((so->so_proto->pr_flags & PR_CONNREQUIRED) &&
513 (so->so_proto->pr_flags & PR_IMPLOPCL) == 0) {
514 if ((so->so_state & SS_ISCONFIRMING) == 0 &&
515 !(resid == 0 && clen != 0))
516 snderr(ENOTCONN);
517 } else if (addr == 0)
518 snderr(so->so_proto->pr_flags & PR_CONNREQUIRED ?
519 ENOTCONN : EDESTADDRREQ);
520 }
521 space = sbspace(&so->so_snd);
522 if (flags & MSG_OOB)
523 space += 1024;
524 if ((atomic && resid > so->so_snd.sb_hiwat) ||
525 clen > so->so_snd.sb_hiwat)
526 snderr(EMSGSIZE);
527 if (space < resid + clen &&
528 (atomic || space < so->so_snd.sb_lowat || space < clen)) {
529 if (so->so_state & SS_NBIO)
530 snderr(EWOULDBLOCK);
531 sbunlock(&so->so_snd);
532 error = sbwait(&so->so_snd);
533 splx(s);
534 if (error)
535 goto out;
536 goto restart;
537 }
538 splx(s);
539 mp = &top;
540 space -= clen;
541 do {
542 if (uio == NULL) {
543 /*
544 * Data is prepackaged in "top".
545 */
546 resid = 0;
547 if (flags & MSG_EOR)
548 top->m_flags |= M_EOR;
549 } else do {
550 if (top == 0) {
551 MGETHDR(m, M_WAIT, MT_DATA);
552 if (m == NULL) {
553 error = ENOBUFS;
554 goto release;
555 }
556 mlen = MHLEN;
557 m->m_pkthdr.len = 0;
558 m->m_pkthdr.rcvif = (struct ifnet *)0;
559 } else {
560 MGET(m, M_WAIT, MT_DATA);
561 if (m == NULL) {
562 error = ENOBUFS;
563 goto release;
564 }
565 mlen = MLEN;
566 }
567 if (resid >= MINCLSIZE) {
568 MCLGET(m, M_WAIT);
569 if ((m->m_flags & M_EXT) == 0)
570 goto nopages;
571 mlen = MCLBYTES;
572 len = min(min(mlen, resid), space);
573 } else {
574nopages:
575 len = min(min(mlen, resid), space);
576 /*
577 * For datagram protocols, leave room
578 * for protocol headers in first mbuf.
579 */
580 if (atomic && top == 0 && len < mlen)
581 MH_ALIGN(m, len);
582 }
583 space -= len;
584 error = uiomove(mtod(m, caddr_t), (int)len, uio);
585 resid = uio->uio_resid;
586 m->m_len = len;
587 *mp = m;
588 top->m_pkthdr.len += len;
589 if (error)
590 goto release;
591 mp = &m->m_next;
592 if (resid <= 0) {
593 if (flags & MSG_EOR)
594 top->m_flags |= M_EOR;
595 break;
596 }
597 } while (space > 0 && atomic);
598 if (dontroute)
599 so->so_options |= SO_DONTROUTE;
600 s = splnet(); /* XXX */
601 /*
602 * XXX all the SS_CANTSENDMORE checks previously
603 * done could be out of date. We could have recieved
604 * a reset packet in an interrupt or maybe we slept
605 * while doing page faults in uiomove() etc. We could
606 * probably recheck again inside the splnet() protection
607 * here, but there are probably other places that this
608 * also happens. We must rethink this.
609 */
610 error = (*so->so_proto->pr_usrreqs->pru_send)(so,
611 (flags & MSG_OOB) ? PRUS_OOB :
612 /*
613 * If the user set MSG_EOF, the protocol
614 * understands this flag and nothing left to
615 * send then use PRU_SEND_EOF instead of PRU_SEND.
616 */
617 ((flags & MSG_EOF) &&
618 (so->so_proto->pr_flags & PR_IMPLOPCL) &&
619 (resid <= 0)) ?
620 PRUS_EOF :
621 /* If there is more to send set PRUS_MORETOCOME */
622 (resid > 0 && space > 0) ? PRUS_MORETOCOME : 0,
623 top, addr, control, p);
624 splx(s);
625 if (dontroute)
626 so->so_options &= ~SO_DONTROUTE;
627 clen = 0;
628 control = 0;
629 top = 0;
630 mp = &top;
631 if (error)
632 goto release;
633 } while (resid && space > 0);
634 } while (resid);
635
636release:
637 sbunlock(&so->so_snd);
638out:
639 if (top)
640 m_freem(top);
641 if (control)
642 m_freem(control);
643 return (error);
644}
645
646/*
647 * Implement receive operations on a socket.
648 * We depend on the way that records are added to the sockbuf
649 * by sbappend*. In particular, each record (mbufs linked through m_next)
650 * must begin with an address if the protocol so specifies,
651 * followed by an optional mbuf or mbufs containing ancillary data,
652 * and then zero or more mbufs of data.
653 * In order to avoid blocking network interrupts for the entire time here,
654 * we splx() while doing the actual copy to user space.
655 * Although the sockbuf is locked, new data may still be appended,
656 * and thus we must maintain consistency of the sockbuf during that time.
657 *
658 * The caller may receive the data as a single mbuf chain by supplying
659 * an mbuf **mp0 for use in returning the chain. The uio is then used
660 * only for the count in uio_resid.
661 */
662int
663soreceive(so, psa, uio, mp0, controlp, flagsp)
664 register struct socket *so;
665 struct sockaddr **psa;
666 struct uio *uio;
667 struct mbuf **mp0;
668 struct mbuf **controlp;
669 int *flagsp;
670{
671 register struct mbuf *m, **mp;
672 register int flags, len, error, s, offset;
673 struct protosw *pr = so->so_proto;
674 struct mbuf *nextrecord;
675 int moff, type = 0;
676 int orig_resid = uio->uio_resid;
677
678 mp = mp0;
679 if (psa)
680 *psa = 0;
681 if (controlp)
682 *controlp = 0;
683 if (flagsp)
684 flags = *flagsp &~ MSG_EOR;
685 else
686 flags = 0;
687 if (flags & MSG_OOB) {
688 m = m_get(M_WAIT, MT_DATA);
689 if (m == NULL)
690 return (ENOBUFS);
691 error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK);
692 if (error)
693 goto bad;
694 do {
695 error = uiomove(mtod(m, caddr_t),
696 (int) min(uio->uio_resid, m->m_len), uio);
697 m = m_free(m);
698 } while (uio->uio_resid && error == 0 && m);
699bad:
700 if (m)
701 m_freem(m);
702 return (error);
703 }
704 if (mp)
705 *mp = (struct mbuf *)0;
706 if (so->so_state & SS_ISCONFIRMING && uio->uio_resid)
707 (*pr->pr_usrreqs->pru_rcvd)(so, 0);
708
709restart:
710 error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
711 if (error)
712 return (error);
713 s = splnet();
714
715 m = so->so_rcv.sb_mb;
716 /*
717 * If we have less data than requested, block awaiting more
718 * (subject to any timeout) if:
719 * 1. the current count is less than the low water mark, or
720 * 2. MSG_WAITALL is set, and it is possible to do the entire
721 * receive operation at once if we block (resid <= hiwat).
722 * 3. MSG_DONTWAIT is not set
723 * If MSG_WAITALL is set but resid is larger than the receive buffer,
724 * we have to do the receive in sections, and thus risk returning
725 * a short count if a timeout or signal occurs after we start.
726 */
727 if (m == 0 || (((flags & MSG_DONTWAIT) == 0 &&
728 so->so_rcv.sb_cc < uio->uio_resid) &&
729 (so->so_rcv.sb_cc < so->so_rcv.sb_lowat ||
730 ((flags & MSG_WAITALL) && uio->uio_resid <= so->so_rcv.sb_hiwat)) &&
731 m->m_nextpkt == 0 && (pr->pr_flags & PR_ATOMIC) == 0)) {
732 KASSERT(m != 0 || !so->so_rcv.sb_cc, ("receive 1"));
733 if (so->so_error) {
734 if (m)
735 goto dontblock;
736 error = so->so_error;
737 if ((flags & MSG_PEEK) == 0)
738 so->so_error = 0;
739 goto release;
740 }
741 if (so->so_state & SS_CANTRCVMORE) {
742 if (m)
743 goto dontblock;
744 else
745 goto release;
746 }
747 for (; m; m = m->m_next)
748 if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) {
749 m = so->so_rcv.sb_mb;
750 goto dontblock;
751 }
752 if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 &&
753 (so->so_proto->pr_flags & PR_CONNREQUIRED)) {
754 error = ENOTCONN;
755 goto release;
756 }
757 if (uio->uio_resid == 0)
758 goto release;
759 if ((so->so_state & SS_NBIO) || (flags & MSG_DONTWAIT)) {
760 error = EWOULDBLOCK;
761 goto release;
762 }
763 sbunlock(&so->so_rcv);
764 error = sbwait(&so->so_rcv);
765 splx(s);
766 if (error)
767 return (error);
768 goto restart;
769 }
770dontblock:
771 if (uio->uio_procp)
772 uio->uio_procp->p_stats->p_ru.ru_msgrcv++;
773 nextrecord = m->m_nextpkt;
774 if (pr->pr_flags & PR_ADDR) {
775 KASSERT(m->m_type == MT_SONAME, ("receive 1a"));
776 orig_resid = 0;
777 if (psa)
778 *psa = dup_sockaddr(mtod(m, struct sockaddr *),
779 mp0 == 0);
780 if (flags & MSG_PEEK) {
781 m = m->m_next;
782 } else {
783 sbfree(&so->so_rcv, m);
784 so->so_rcv.sb_mb = m_free(m);
785 m = so->so_rcv.sb_mb;
786 }
787 }
788 while (m && m->m_type == MT_CONTROL && error == 0) {
789 if (flags & MSG_PEEK) {
790 if (controlp)
791 *controlp = m_copy(m, 0, m->m_len);
792 m = m->m_next;
793 } else {
794 sbfree(&so->so_rcv, m);
795 if (controlp) {
796 if (pr->pr_domain->dom_externalize &&
797 mtod(m, struct cmsghdr *)->cmsg_type ==
798 SCM_RIGHTS)
799 error = (*pr->pr_domain->dom_externalize)(m);
800 *controlp = m;
801 so->so_rcv.sb_mb = m->m_next;
802 m->m_next = 0;
803 m = so->so_rcv.sb_mb;
804 } else {
805 so->so_rcv.sb_mb = m_free(m);
806 m = so->so_rcv.sb_mb;
807 }
808 }
809 if (controlp) {
810 orig_resid = 0;
811 controlp = &(*controlp)->m_next;
812 }
813 }
814 if (m) {
815 if ((flags & MSG_PEEK) == 0)
816 m->m_nextpkt = nextrecord;
817 type = m->m_type;
818 if (type == MT_OOBDATA)
819 flags |= MSG_OOB;
820 }
821 moff = 0;
822 offset = 0;
823 while (m && uio->uio_resid > 0 && error == 0) {
824 if (m->m_type == MT_OOBDATA) {
825 if (type != MT_OOBDATA)
826 break;
827 } else if (type == MT_OOBDATA)
828 break;
829 else
830 KASSERT(m->m_type == MT_DATA || m->m_type == MT_HEADER,
831 ("receive 3"));
832 so->so_state &= ~SS_RCVATMARK;
833 len = uio->uio_resid;
834 if (so->so_oobmark && len > so->so_oobmark - offset)
835 len = so->so_oobmark - offset;
836 if (len > m->m_len - moff)
837 len = m->m_len - moff;
838 /*
839 * If mp is set, just pass back the mbufs.
840 * Otherwise copy them out via the uio, then free.
841 * Sockbuf must be consistent here (points to current mbuf,
842 * it points to next record) when we drop priority;
843 * we must note any additions to the sockbuf when we
844 * block interrupts again.
845 */
846 if (mp == 0) {
847 splx(s);
848 error = uiomove(mtod(m, caddr_t) + moff, (int)len, uio);
849 s = splnet();
850 if (error)
851 goto release;
852 } else
853 uio->uio_resid -= len;
854 if (len == m->m_len - moff) {
855 if (m->m_flags & M_EOR)
856 flags |= MSG_EOR;
857 if (flags & MSG_PEEK) {
858 m = m->m_next;
859 moff = 0;
860 } else {
861 nextrecord = m->m_nextpkt;
862 sbfree(&so->so_rcv, m);
863 if (mp) {
864 *mp = m;
865 mp = &m->m_next;
866 so->so_rcv.sb_mb = m = m->m_next;
867 *mp = (struct mbuf *)0;
868 } else {
869 so->so_rcv.sb_mb = m = m_free(m);
870 }
871 if (m)
872 m->m_nextpkt = nextrecord;
873 }
874 } else {
875 if (flags & MSG_PEEK)
876 moff += len;
877 else {
878 if (mp)
879 *mp = m_copym(m, 0, len, M_WAIT);
880 m->m_data += len;
881 m->m_len -= len;
882 so->so_rcv.sb_cc -= len;
883 }
884 }
885 if (so->so_oobmark) {
886 if ((flags & MSG_PEEK) == 0) {
887 so->so_oobmark -= len;
888 if (so->so_oobmark == 0) {
889 so->so_state |= SS_RCVATMARK;
890 break;
891 }
892 } else {
893 offset += len;
894 if (offset == so->so_oobmark)
895 break;
896 }
897 }
898 if (flags & MSG_EOR)
899 break;
900 /*
901 * If the MSG_WAITALL flag is set (for non-atomic socket),
902 * we must not quit until "uio->uio_resid == 0" or an error
903 * termination. If a signal/timeout occurs, return
904 * with a short count but without error.
905 * Keep sockbuf locked against other readers.
906 */
907 while (flags & MSG_WAITALL && m == 0 && uio->uio_resid > 0 &&
908 !sosendallatonce(so) && !nextrecord) {
909 if (so->so_error || so->so_state & SS_CANTRCVMORE)
910 break;
911 /*
912 * The window might have closed to zero, make
913 * sure we send an ack now that we've drained
914 * the buffer or we might end up blocking until
915 * the idle takes over (5 seconds).
916 */
917 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
918 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
919 error = sbwait(&so->so_rcv);
920 if (error) {
921 sbunlock(&so->so_rcv);
922 splx(s);
923 return (0);
924 }
925 m = so->so_rcv.sb_mb;
926 if (m)
927 nextrecord = m->m_nextpkt;
928 }
929 }
930
931 if (m && pr->pr_flags & PR_ATOMIC) {
932 flags |= MSG_TRUNC;
933 if ((flags & MSG_PEEK) == 0)
934 (void) sbdroprecord(&so->so_rcv);
935 }
936 if ((flags & MSG_PEEK) == 0) {
937 if (m == 0)
938 so->so_rcv.sb_mb = nextrecord;
939 if (pr->pr_flags & PR_WANTRCVD && so->so_pcb)
940 (*pr->pr_usrreqs->pru_rcvd)(so, flags);
941 }
942 if (orig_resid == uio->uio_resid && orig_resid &&
943 (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) {
944 sbunlock(&so->so_rcv);
945 splx(s);
946 goto restart;
947 }
948
949 if (flagsp)
950 *flagsp |= flags;
951release:
952 sbunlock(&so->so_rcv);
953 splx(s);
954 return (error);
955}
956
957int
958soshutdown(so, how)
959 register struct socket *so;
960 register int how;
961{
962 register struct protosw *pr = so->so_proto;
963
964 if (!(how == SHUT_RD || how == SHUT_WR || how == SHUT_RDWR))
965 return (EINVAL);
966
967 if (how != SHUT_WR)
968 sorflush(so);
969 if (how != SHUT_RD)
970 return ((*pr->pr_usrreqs->pru_shutdown)(so));
971 return (0);
972}
973
974void
975sorflush(so)
976 register struct socket *so;
977{
978 register struct sockbuf *sb = &so->so_rcv;
979 register struct protosw *pr = so->so_proto;
980 register int s;
981 struct sockbuf asb;
982
983 sb->sb_flags |= SB_NOINTR;
984 (void) sblock(sb, M_WAITOK);
985 s = splimp();
986 socantrcvmore(so);
987 sbunlock(sb);
988 asb = *sb;
989 bzero((caddr_t)sb, sizeof (*sb));
990 if (asb.sb_flags & SB_KNOTE) {
991 sb->sb_sel.si_note = asb.sb_sel.si_note;
992 sb->sb_flags = SB_KNOTE;
993 }
994 splx(s);
995 if (pr->pr_flags & PR_RIGHTS && pr->pr_domain->dom_dispose)
996 (*pr->pr_domain->dom_dispose)(asb.sb_mb);
997 sbrelease(&asb, so);
998}
999
1000#ifdef INET
1001static int
1002do_setopt_accept_filter(so, sopt)
1003 struct socket *so;
1004 struct sockopt *sopt;
1005{
1006 struct accept_filter_arg *afap = NULL;
1007 struct accept_filter *afp;
1008 struct so_accf *af = so->so_accf;
1009 int error = 0;
1010
1011 /* do not set/remove accept filters on non listen sockets */
1012 if ((so->so_options & SO_ACCEPTCONN) == 0) {
1013 error = EINVAL;
1014 goto out;
1015 }
1016
1017 /* removing the filter */
1018 if (sopt == NULL) {
1019 if (af != NULL) {
1020 if (af->so_accept_filter != NULL &&
1021 af->so_accept_filter->accf_destroy != NULL) {
1022 af->so_accept_filter->accf_destroy(so);
1023 }
1024 if (af->so_accept_filter_str != NULL) {
1025 FREE(af->so_accept_filter_str, M_ACCF);
1026 }
1027 FREE(af, M_ACCF);
1028 so->so_accf = NULL;
1029 }
1030 so->so_options &= ~SO_ACCEPTFILTER;
1031 return (0);
1032 }
1033 /* adding a filter */
1034 /* must remove previous filter first */
1035 if (af != NULL) {
1036 error = EINVAL;
1037 goto out;
1038 }
1039 /* don't put large objects on the kernel stack */
1040 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap), M_TEMP, M_WAITOK);
1041 error = sooptcopyin(sopt, afap, sizeof *afap, sizeof *afap);
1042 afap->af_name[sizeof(afap->af_name)-1] = '\0';
1043 afap->af_arg[sizeof(afap->af_arg)-1] = '\0';
1044 if (error)
1045 goto out;
1046 afp = accept_filt_get(afap->af_name);
1047 if (afp == NULL) {
1048 error = ENOENT;
1049 goto out;
1050 }
1051 MALLOC(af, struct so_accf *, sizeof(*af), M_ACCF, M_WAITOK);
1052 bzero(af, sizeof(*af));
1053 if (afp->accf_create != NULL) {
1054 if (afap->af_name[0] != '\0') {
1055 int len = strlen(afap->af_name) + 1;
1056
1057 MALLOC(af->so_accept_filter_str, char *, len, M_ACCF, M_WAITOK);
1058 strcpy(af->so_accept_filter_str, afap->af_name);
1059 }
1060 af->so_accept_filter_arg = afp->accf_create(so, afap->af_arg);
1061 if (af->so_accept_filter_arg == NULL) {
1062 FREE(af->so_accept_filter_str, M_ACCF);
1063 FREE(af, M_ACCF);
1064 so->so_accf = NULL;
1065 error = EINVAL;
1066 goto out;
1067 }
1068 }
1069 af->so_accept_filter = afp;
1070 so->so_accf = af;
1071 so->so_options |= SO_ACCEPTFILTER;
1072out:
1073 if (afap != NULL)
1074 FREE(afap, M_TEMP);
1075 return (error);
1076}
1077#endif /* INET */
1078
1079/*
1080 * Perhaps this routine, and sooptcopyout(), below, ought to come in
1081 * an additional variant to handle the case where the option value needs
1082 * to be some kind of integer, but not a specific size.
1083 * In addition to their use here, these functions are also called by the
1084 * protocol-level pr_ctloutput() routines.
1085 */
1086int
1087sooptcopyin(sopt, buf, len, minlen)
1088 struct sockopt *sopt;
1089 void *buf;
1090 size_t len;
1091 size_t minlen;
1092{
1093 size_t valsize;
1094
1095 /*
1096 * If the user gives us more than we wanted, we ignore it,
1097 * but if we don't get the minimum length the caller
1098 * wants, we return EINVAL. On success, sopt->sopt_valsize
1099 * is set to however much we actually retrieved.
1100 */
1101 if ((valsize = sopt->sopt_valsize) < minlen)
1102 return EINVAL;
1103 if (valsize > len)
1104 sopt->sopt_valsize = valsize = len;
1105
1106 if (sopt->sopt_p != 0)
1107 return (copyin(sopt->sopt_val, buf, valsize));
1108
1109 bcopy(sopt->sopt_val, buf, valsize);
1110 return 0;
1111}
1112
1113int
1114sosetopt(so, sopt)
1115 struct socket *so;
1116 struct sockopt *sopt;
1117{
1118 int error, optval;
1119 struct linger l;
1120 struct timeval tv;
1121 u_long val;
1122
1123 error = 0;
1124 if (sopt->sopt_level != SOL_SOCKET) {
1125 if (so->so_proto && so->so_proto->pr_ctloutput)
1126 return ((*so->so_proto->pr_ctloutput)
1127 (so, sopt));
1128 error = ENOPROTOOPT;
1129 } else {
1130 switch (sopt->sopt_name) {
1131#ifdef INET
1132 case SO_ACCEPTFILTER:
1133 error = do_setopt_accept_filter(so, sopt);
1134 if (error)
1135 goto bad;
1136 break;
1137#endif /* INET */
1138 case SO_LINGER:
1139 error = sooptcopyin(sopt, &l, sizeof l, sizeof l);
1140 if (error)
1141 goto bad;
1142
1143 so->so_linger = l.l_linger;
1144 if (l.l_onoff)
1145 so->so_options |= SO_LINGER;
1146 else
1147 so->so_options &= ~SO_LINGER;
1148 break;
1149
1150 case SO_DEBUG:
1151 case SO_KEEPALIVE:
1152 case SO_DONTROUTE:
1153 case SO_USELOOPBACK:
1154 case SO_BROADCAST:
1155 case SO_REUSEADDR:
1156 case SO_REUSEPORT:
1157 case SO_OOBINLINE:
1158 case SO_TIMESTAMP:
1159 error = sooptcopyin(sopt, &optval, sizeof optval,
1160 sizeof optval);
1161 if (error)
1162 goto bad;
1163 if (optval)
1164 so->so_options |= sopt->sopt_name;
1165 else
1166 so->so_options &= ~sopt->sopt_name;
1167 break;
1168
1169 case SO_SNDBUF:
1170 case SO_RCVBUF:
1171 case SO_SNDLOWAT:
1172 case SO_RCVLOWAT:
1173 error = sooptcopyin(sopt, &optval, sizeof optval,
1174 sizeof optval);
1175 if (error)
1176 goto bad;
1177
1178 /*
1179 * Values < 1 make no sense for any of these
1180 * options, so disallow them.
1181 */
1182 if (optval < 1) {
1183 error = EINVAL;
1184 goto bad;
1185 }
1186
1187 switch (sopt->sopt_name) {
1188 case SO_SNDBUF:
1189 case SO_RCVBUF:
1190 if (sbreserve(sopt->sopt_name == SO_SNDBUF ?
1191 &so->so_snd : &so->so_rcv, (u_long)optval,
1192 so, curproc) == 0) {
1193 error = ENOBUFS;
1194 goto bad;
1195 }
1196 break;
1197
1198 /*
1199 * Make sure the low-water is never greater than
1200 * the high-water.
1201 */
1202 case SO_SNDLOWAT:
1203 so->so_snd.sb_lowat =
1204 (optval > so->so_snd.sb_hiwat) ?
1205 so->so_snd.sb_hiwat : optval;
1206 break;
1207 case SO_RCVLOWAT:
1208 so->so_rcv.sb_lowat =
1209 (optval > so->so_rcv.sb_hiwat) ?
1210 so->so_rcv.sb_hiwat : optval;
1211 break;
1212 }
1213 break;
1214
1215 case SO_SNDTIMEO:
1216 case SO_RCVTIMEO:
1217 error = sooptcopyin(sopt, &tv, sizeof tv,
1218 sizeof tv);
1219 if (error)
1220 goto bad;
1221
1222 /* assert(hz > 0); */
1223 if (tv.tv_sec < 0 || tv.tv_sec > SHRT_MAX / hz ||
1224 tv.tv_usec < 0 || tv.tv_usec >= 1000000) {
1225 error = EDOM;
1226 goto bad;
1227 }
1228 /* assert(tick > 0); */
1229 /* assert(ULONG_MAX - SHRT_MAX >= 1000000); */
1230 val = (u_long)(tv.tv_sec * hz) + tv.tv_usec / tick;
1231 if (val > SHRT_MAX) {
1232 error = EDOM;
1233 goto bad;
1234 }
1235 if (val == 0 && tv.tv_usec != 0)
1236 val = 1;
1237
1238 switch (sopt->sopt_name) {
1239 case SO_SNDTIMEO:
1240 so->so_snd.sb_timeo = val;
1241 break;
1242 case SO_RCVTIMEO:
1243 so->so_rcv.sb_timeo = val;
1244 break;
1245 }
1246 break;
1247 default:
1248 error = ENOPROTOOPT;
1249 break;
1250 }
1251 if (error == 0 && so->so_proto && so->so_proto->pr_ctloutput) {
1252 (void) ((*so->so_proto->pr_ctloutput)
1253 (so, sopt));
1254 }
1255 }
1256bad:
1257 return (error);
1258}
1259
1260/* Helper routine for getsockopt */
1261int
1262sooptcopyout(sopt, buf, len)
1263 struct sockopt *sopt;
1264 void *buf;
1265 size_t len;
1266{
1267 int error;
1268 size_t valsize;
1269
1270 error = 0;
1271
1272 /*
1273 * Documented get behavior is that we always return a value,
1274 * possibly truncated to fit in the user's buffer.
1275 * Traditional behavior is that we always tell the user
1276 * precisely how much we copied, rather than something useful
1277 * like the total amount we had available for her.
1278 * Note that this interface is not idempotent; the entire answer must
1279 * generated ahead of time.
1280 */
1281 valsize = min(len, sopt->sopt_valsize);
1282 sopt->sopt_valsize = valsize;
1283 if (sopt->sopt_val != 0) {
1284 if (sopt->sopt_p != 0)
1285 error = copyout(buf, sopt->sopt_val, valsize);
1286 else
1287 bcopy(buf, sopt->sopt_val, valsize);
1288 }
1289 return error;
1290}
1291
1292int
1293sogetopt(so, sopt)
1294 struct socket *so;
1295 struct sockopt *sopt;
1296{
1297 int error, optval;
1298 struct linger l;
1299 struct timeval tv;
1300 struct accept_filter_arg *afap;
1301
1302 error = 0;
1303 if (sopt->sopt_level != SOL_SOCKET) {
1304 if (so->so_proto && so->so_proto->pr_ctloutput) {
1305 return ((*so->so_proto->pr_ctloutput)
1306 (so, sopt));
1307 } else
1308 return (ENOPROTOOPT);
1309 } else {
1310 switch (sopt->sopt_name) {
1311#ifdef INET
1312 case SO_ACCEPTFILTER:
1313 if ((so->so_options & SO_ACCEPTCONN) == 0)
1314 return (EINVAL);
1315 MALLOC(afap, struct accept_filter_arg *, sizeof(*afap),
1316 M_TEMP, M_WAITOK);
1317 bzero(afap, sizeof(*afap));
1318 if ((so->so_options & SO_ACCEPTFILTER) != 0) {
1319 strcpy(afap->af_name, so->so_accf->so_accept_filter->accf_name);
1320 if (so->so_accf->so_accept_filter_str != NULL)
1321 strcpy(afap->af_arg, so->so_accf->so_accept_filter_str);
1322 }
1323 error = sooptcopyout(sopt, afap, sizeof(*afap));
1324 FREE(afap, M_TEMP);
1325 break;
1326#endif /* INET */
1327
1328 case SO_LINGER:
1329 l.l_onoff = so->so_options & SO_LINGER;
1330 l.l_linger = so->so_linger;
1331 error = sooptcopyout(sopt, &l, sizeof l);
1332 break;
1333
1334 case SO_USELOOPBACK:
1335 case SO_DONTROUTE:
1336 case SO_DEBUG:
1337 case SO_KEEPALIVE:
1338 case SO_REUSEADDR:
1339 case SO_REUSEPORT:
1340 case SO_BROADCAST:
1341 case SO_OOBINLINE:
1342 case SO_TIMESTAMP:
1343 optval = so->so_options & sopt->sopt_name;
1344integer:
1345 error = sooptcopyout(sopt, &optval, sizeof optval);
1346 break;
1347
1348 case SO_TYPE:
1349 optval = so->so_type;
1350 goto integer;
1351
1352 case SO_ERROR:
1353 optval = so->so_error;
1354 so->so_error = 0;
1355 goto integer;
1356
1357 case SO_SNDBUF:
1358 optval = so->so_snd.sb_hiwat;
1359 goto integer;
1360
1361 case SO_RCVBUF:
1362 optval = so->so_rcv.sb_hiwat;
1363 goto integer;
1364
1365 case SO_SNDLOWAT:
1366 optval = so->so_snd.sb_lowat;
1367 goto integer;
1368
1369 case SO_RCVLOWAT:
1370 optval = so->so_rcv.sb_lowat;
1371 goto integer;
1372
1373 case SO_SNDTIMEO:
1374 case SO_RCVTIMEO:
1375 optval = (sopt->sopt_name == SO_SNDTIMEO ?
1376 so->so_snd.sb_timeo : so->so_rcv.sb_timeo);
1377
1378 tv.tv_sec = optval / hz;
1379 tv.tv_usec = (optval % hz) * tick;
1380 error = sooptcopyout(sopt, &tv, sizeof tv);
1381 break;
1382
1383 default:
1384 error = ENOPROTOOPT;
1385 break;
1386 }
1387 return (error);
1388 }
1389}
1390
1391/* XXX; prepare mbuf for (__FreeBSD__ < 3) routines. */
1392int
1393soopt_getm(struct sockopt *sopt, struct mbuf **mp)
1394{
1395 struct mbuf *m, *m_prev;
1396 int sopt_size = sopt->sopt_valsize;
1397
1398 MGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT, MT_DATA);
1399 if (m == 0)
1400 return ENOBUFS;
1401 if (sopt_size > MLEN) {
1402 MCLGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT);
1403 if ((m->m_flags & M_EXT) == 0) {
1404 m_free(m);
1405 return ENOBUFS;
1406 }
1407 m->m_len = min(MCLBYTES, sopt_size);
1408 } else {
1409 m->m_len = min(MLEN, sopt_size);
1410 }
1411 sopt_size -= m->m_len;
1412 *mp = m;
1413 m_prev = m;
1414
1415 while (sopt_size) {
1416 MGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT, MT_DATA);
1417 if (m == 0) {
1418 m_freem(*mp);
1419 return ENOBUFS;
1420 }
1421 if (sopt_size > MLEN) {
1422 MCLGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT);
1423 if ((m->m_flags & M_EXT) == 0) {
1424 m_freem(*mp);
1425 return ENOBUFS;
1426 }
1427 m->m_len = min(MCLBYTES, sopt_size);
1428 } else {
1429 m->m_len = min(MLEN, sopt_size);
1430 }
1431 sopt_size -= m->m_len;
1432 m_prev->m_next = m;
1433 m_prev = m;
1434 }
1435 return 0;
1436}
1437
1438/* XXX; copyin sopt data into mbuf chain for (__FreeBSD__ < 3) routines. */
1439int
1440soopt_mcopyin(struct sockopt *sopt, struct mbuf *m)
1441{
1442 struct mbuf *m0 = m;
1443
1444 if (sopt->sopt_val == NULL)
1445 return 0;
1446 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
1447 if (sopt->sopt_p != NULL) {
1448 int error;
1449
1450 error = copyin(sopt->sopt_val, mtod(m, char *),
1451 m->m_len);
1452 if (error != 0) {
1453 m_freem(m0);
1454 return(error);
1455 }
1456 } else
1457 bcopy(sopt->sopt_val, mtod(m, char *), m->m_len);
1458 sopt->sopt_valsize -= m->m_len;
1459 (caddr_t)sopt->sopt_val += m->m_len;
1460 m = m->m_next;
1461 }
1462 if (m != NULL) /* should be allocated enoughly at ip6_sooptmcopyin() */
1463 panic("ip6_sooptmcopyin");
1464 return 0;
1465}
1466
1467/* XXX; copyout mbuf chain data into soopt for (__FreeBSD__ < 3) routines. */
1468int
1469soopt_mcopyout(struct sockopt *sopt, struct mbuf *m)
1470{
1471 struct mbuf *m0 = m;
1472 size_t valsize = 0;
1473
1474 if (sopt->sopt_val == NULL)
1475 return 0;
1476 while (m != NULL && sopt->sopt_valsize >= m->m_len) {
1477 if (sopt->sopt_p != NULL) {
1478 int error;
1479
1480 error = copyout(mtod(m, char *), sopt->sopt_val,
1481 m->m_len);
1482 if (error != 0) {
1483 m_freem(m0);
1484 return(error);
1485 }
1486 } else
1487 bcopy(mtod(m, char *), sopt->sopt_val, m->m_len);
1488 sopt->sopt_valsize -= m->m_len;
1489 (caddr_t)sopt->sopt_val += m->m_len;
1490 valsize += m->m_len;
1491 m = m->m_next;
1492 }
1493 if (m != NULL) {
1494 /* enough soopt buffer should be given from user-land */
1495 m_freem(m0);
1496 return(EINVAL);
1497 }
1498 sopt->sopt_valsize = valsize;
1499 return 0;
1500}
1501
1502void
1503sohasoutofband(so)
1504 register struct socket *so;
1505{
1506 if (so->so_sigio != NULL)
1507 pgsigio(so->so_sigio, SIGURG, 0);
1508 selwakeup(&so->so_rcv.sb_sel);
1509}
1510
1511int
1512sopoll(struct socket *so, int events, struct ucred *cred, struct proc *p)
1513{
1514 int revents = 0;
1515 int s = splnet();
1516
1517 if (events & (POLLIN | POLLRDNORM))
1518 if (soreadable(so))
1519 revents |= events & (POLLIN | POLLRDNORM);
1520
1521 if (events & (POLLOUT | POLLWRNORM))
1522 if (sowriteable(so))
1523 revents |= events & (POLLOUT | POLLWRNORM);
1524
1525 if (events & (POLLPRI | POLLRDBAND))
1526 if (so->so_oobmark || (so->so_state & SS_RCVATMARK))
1527 revents |= events & (POLLPRI | POLLRDBAND);
1528
1529 if (revents == 0) {
1530 if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
1531 selrecord(p, &so->so_rcv.sb_sel);
1532 so->so_rcv.sb_flags |= SB_SEL;
1533 }
1534
1535 if (events & (POLLOUT | POLLWRNORM)) {
1536 selrecord(p, &so->so_snd.sb_sel);
1537 so->so_snd.sb_flags |= SB_SEL;
1538 }
1539 }
1540
1541 splx(s);
1542 return (revents);
1543}
1544
1545int
1546sokqfilter(struct file *fp, struct knote *kn)
1547{
1548 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1549 struct sockbuf *sb;
1550 int s;
1551
1552 switch (kn->kn_filter) {
1553 case EVFILT_READ:
1554 if (so->so_options & SO_ACCEPTCONN)
1555 kn->kn_fop = &solisten_filtops;
1556 else
1557 kn->kn_fop = &soread_filtops;
1558 sb = &so->so_rcv;
1559 break;
1560 case EVFILT_WRITE:
1561 kn->kn_fop = &sowrite_filtops;
1562 sb = &so->so_snd;
1563 break;
1564 default:
1565 return (1);
1566 }
1567
1568 s = splnet();
1569 SLIST_INSERT_HEAD(&sb->sb_sel.si_note, kn, kn_selnext);
1570 sb->sb_flags |= SB_KNOTE;
1571 splx(s);
1572 return (0);
1573}
1574
1575static void
1576filt_sordetach(struct knote *kn)
1577{
1578 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1579 int s = splnet();
1580
1581 SLIST_REMOVE(&so->so_rcv.sb_sel.si_note, kn, knote, kn_selnext);
1582 if (SLIST_EMPTY(&so->so_rcv.sb_sel.si_note))
1583 so->so_rcv.sb_flags &= ~SB_KNOTE;
1584 splx(s);
1585}
1586
1587/*ARGSUSED*/
1588static int
1589filt_soread(struct knote *kn, long hint)
1590{
1591 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1592
1593 kn->kn_data = so->so_rcv.sb_cc;
1594 if (so->so_state & SS_CANTRCVMORE) {
1595 kn->kn_flags |= EV_EOF;
1596 kn->kn_fflags = so->so_error;
1597 return (1);
1598 }
1599 if (so->so_error) /* temporary udp error */
1600 return (1);
1601 if (kn->kn_sfflags & NOTE_LOWAT)
1602 return (kn->kn_data >= kn->kn_sdata);
1603 return (kn->kn_data >= so->so_rcv.sb_lowat);
1604}
1605
1606static void
1607filt_sowdetach(struct knote *kn)
1608{
1609 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1610 int s = splnet();
1611
1612 SLIST_REMOVE(&so->so_snd.sb_sel.si_note, kn, knote, kn_selnext);
1613 if (SLIST_EMPTY(&so->so_snd.sb_sel.si_note))
1614 so->so_snd.sb_flags &= ~SB_KNOTE;
1615 splx(s);
1616}
1617
1618/*ARGSUSED*/
1619static int
1620filt_sowrite(struct knote *kn, long hint)
1621{
1622 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1623
1624 kn->kn_data = sbspace(&so->so_snd);
1625 if (so->so_state & SS_CANTSENDMORE) {
1626 kn->kn_flags |= EV_EOF;
1627 kn->kn_fflags = so->so_error;
1628 return (1);
1629 }
1630 if (so->so_error) /* temporary udp error */
1631 return (1);
1632 if (((so->so_state & SS_ISCONNECTED) == 0) &&
1633 (so->so_proto->pr_flags & PR_CONNREQUIRED))
1634 return (0);
1635 if (kn->kn_sfflags & NOTE_LOWAT)
1636 return (kn->kn_data >= kn->kn_sdata);
1637 return (kn->kn_data >= so->so_snd.sb_lowat);
1638}
1639
1640/*ARGSUSED*/
1641static int
1642filt_solisten(struct knote *kn, long hint)
1643{
1644 struct socket *so = (struct socket *)kn->kn_fp->f_data;
1645
1646 kn->kn_data = so->so_qlen;
1647 return (! TAILQ_EMPTY(&so->so_comp));
1648}