2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1990, 1993
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * @(#)sys_socket.c 8.1 (Berkeley) 6/10/93
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include <sys/param.h>
38 #include <sys/systm.h>
40 #include <sys/domain.h>
42 #include <sys/filedesc.h>
43 #include <sys/kernel.h>
44 #include <sys/kthread.h>
45 #include <sys/malloc.h>
47 #include <sys/protosw.h>
48 #include <sys/sigio.h>
49 #include <sys/signal.h>
50 #include <sys/signalvar.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/filio.h> /* XXX */
54 #include <sys/sockio.h>
56 #include <sys/sysctl.h>
57 #include <sys/sysproto.h>
58 #include <sys/taskqueue.h>
60 #include <sys/ucred.h>
62 #include <sys/unpcb.h>
66 #include <net/if_var.h>
67 #include <net/route.h>
70 #include <netinet/in.h>
71 #include <netinet/in_pcb.h>
73 #include <security/mac/mac_framework.h>
77 #include <vm/vm_extern.h>
78 #include <vm/vm_map.h>
80 static SYSCTL_NODE(_kern_ipc, OID_AUTO, aio, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
83 static int empty_results;
84 SYSCTL_INT(_kern_ipc_aio, OID_AUTO, empty_results, CTLFLAG_RD, &empty_results,
85 0, "socket operation returned EAGAIN");
87 static int empty_retries;
88 SYSCTL_INT(_kern_ipc_aio, OID_AUTO, empty_retries, CTLFLAG_RD, &empty_retries,
89 0, "socket operation retries");
91 static fo_rdwr_t soo_read;
92 static fo_rdwr_t soo_write;
93 static fo_ioctl_t soo_ioctl;
94 static fo_poll_t soo_poll;
95 extern fo_kqfilter_t soo_kqfilter;
96 static fo_stat_t soo_stat;
97 static fo_close_t soo_close;
98 static fo_fill_kinfo_t soo_fill_kinfo;
99 static fo_aio_queue_t soo_aio_queue;
101 static void soo_aio_cancel(struct kaiocb *job);
103 struct fileops socketops = {
105 .fo_write = soo_write,
106 .fo_truncate = invfo_truncate,
107 .fo_ioctl = soo_ioctl,
109 .fo_kqfilter = soo_kqfilter,
111 .fo_close = soo_close,
112 .fo_chmod = invfo_chmod,
113 .fo_chown = invfo_chown,
114 .fo_sendfile = invfo_sendfile,
115 .fo_fill_kinfo = soo_fill_kinfo,
116 .fo_aio_queue = soo_aio_queue,
117 .fo_flags = DFLAG_PASSABLE
121 soo_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
122 int flags, struct thread *td)
124 struct socket *so = fp->f_data;
128 error = mac_socket_check_receive(active_cred, so);
132 error = soreceive(so, 0, uio, 0, 0, 0);
137 soo_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
138 int flags, struct thread *td)
140 struct socket *so = fp->f_data;
144 error = mac_socket_check_send(active_cred, so);
148 error = sosend(so, 0, uio, 0, 0, 0, uio->uio_td);
149 if (error == EPIPE && (so->so_options & SO_NOSIGPIPE) == 0) {
150 PROC_LOCK(uio->uio_td->td_proc);
151 tdsignal(uio->uio_td, SIGPIPE);
152 PROC_UNLOCK(uio->uio_td->td_proc);
158 soo_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *active_cred,
161 struct socket *so = fp->f_data;
168 so->so_state |= SS_NBIO;
170 so->so_state &= ~SS_NBIO;
177 so->so_state |= SS_ASYNC;
178 if (SOLISTENING(so)) {
179 so->sol_sbrcv_flags |= SB_ASYNC;
180 so->sol_sbsnd_flags |= SB_ASYNC;
182 SOCK_RECVBUF_LOCK(so);
183 so->so_rcv.sb_flags |= SB_ASYNC;
184 SOCK_RECVBUF_UNLOCK(so);
185 SOCK_SENDBUF_LOCK(so);
186 so->so_snd.sb_flags |= SB_ASYNC;
187 SOCK_SENDBUF_UNLOCK(so);
192 so->so_state &= ~SS_ASYNC;
193 if (SOLISTENING(so)) {
194 so->sol_sbrcv_flags &= ~SB_ASYNC;
195 so->sol_sbsnd_flags &= ~SB_ASYNC;
197 SOCK_RECVBUF_LOCK(so);
198 so->so_rcv.sb_flags &= ~SB_ASYNC;
199 SOCK_RECVBUF_UNLOCK(so);
200 SOCK_SENDBUF_LOCK(so);
201 so->so_snd.sb_flags &= ~SB_ASYNC;
202 SOCK_SENDBUF_UNLOCK(so);
209 SOCK_RECVBUF_LOCK(so);
210 if (SOLISTENING(so)) {
213 *(int *)data = sbavail(&so->so_rcv) - so->so_rcv.sb_ctl;
215 SOCK_RECVBUF_UNLOCK(so);
220 if (SOLISTENING(so)) {
223 *(int *)data = sbavail(&so->so_snd);
229 if (SOLISTENING(so)) {
232 if ((so->so_snd.sb_hiwat < sbused(&so->so_snd)) ||
233 (so->so_snd.sb_mbmax < so->so_snd.sb_mbcnt)) {
236 *(int *)data = sbspace(&so->so_snd);
242 error = fsetown(*(int *)data, &so->so_sigio);
246 *(int *)data = fgetown(&so->so_sigio);
250 error = fsetown(-(*(int *)data), &so->so_sigio);
254 *(int *)data = -fgetown(&so->so_sigio);
259 if (SOLISTENING(so)) {
262 *(int *)data = (so->so_rcv.sb_state & SBS_RCVATMARK) != 0;
267 * Interface/routing/protocol specific ioctls: interface and
268 * routing ioctls should have a different entry since a
269 * socket is unnecessary.
271 if (IOCGROUP(cmd) == 'i')
272 error = ifioctl(so, cmd, data, td);
273 else if (IOCGROUP(cmd) == 'r') {
274 CURVNET_SET(so->so_vnet);
275 error = rtioctl_fib(cmd, data, so->so_fibnum);
278 CURVNET_SET(so->so_vnet);
279 error = so->so_proto->pr_control(so, cmd, data, 0, td);
288 soo_poll(struct file *fp, int events, struct ucred *active_cred,
291 struct socket *so = fp->f_data;
295 error = mac_socket_check_poll(active_cred, so);
299 return (sopoll(so, events, fp->f_cred, td));
303 soo_stat(struct file *fp, struct stat *ub, struct ucred *active_cred)
305 struct socket *so = fp->f_data;
308 bzero((caddr_t)ub, sizeof (*ub));
309 ub->st_mode = S_IFSOCK;
311 error = mac_socket_check_stat(active_cred, so);
316 if (!SOLISTENING(so)) {
320 * If SBS_CANTRCVMORE is set, but there's still data left
321 * in the receive buffer, the socket is still readable.
324 SOCK_RECVBUF_LOCK(so);
325 if ((sb->sb_state & SBS_CANTRCVMORE) == 0 || sbavail(sb))
326 ub->st_mode |= S_IRUSR | S_IRGRP | S_IROTH;
327 ub->st_size = sbavail(sb) - sb->sb_ctl;
328 SOCK_RECVBUF_UNLOCK(so);
331 SOCK_SENDBUF_LOCK(so);
332 if ((sb->sb_state & SBS_CANTSENDMORE) == 0)
333 ub->st_mode |= S_IWUSR | S_IWGRP | S_IWOTH;
334 SOCK_SENDBUF_UNLOCK(so);
336 ub->st_uid = so->so_cred->cr_uid;
337 ub->st_gid = so->so_cred->cr_gid;
338 if (so->so_proto->pr_sense)
339 error = so->so_proto->pr_sense(so, ub);
345 * API socket close on file pointer. We call soclose() to close the socket
346 * (including initiating closing protocols). soclose() will sorele() the
347 * file reference but the actual socket will not go away until the socket's
351 soo_close(struct file *fp, struct thread *td)
357 fp->f_ops = &badfileops;
366 soo_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
374 kif->kf_type = KF_TYPE_SOCKET;
376 CURVNET_SET(so->so_vnet);
377 kif->kf_un.kf_sock.kf_sock_domain0 =
378 so->so_proto->pr_domain->dom_family;
379 kif->kf_un.kf_sock.kf_sock_type0 = so->so_type;
380 kif->kf_un.kf_sock.kf_sock_protocol0 = so->so_proto->pr_protocol;
381 kif->kf_un.kf_sock.kf_sock_pcb = (uintptr_t)so->so_pcb;
382 switch (kif->kf_un.kf_sock.kf_sock_domain0) {
385 if (so->so_pcb != NULL) {
386 inpcb = (struct inpcb *)(so->so_pcb);
387 kif->kf_un.kf_sock.kf_sock_inpcb =
388 (uintptr_t)inpcb->inp_ppcb;
390 kif->kf_un.kf_sock.kf_sock_rcv_sb_state =
392 kif->kf_un.kf_sock.kf_sock_snd_sb_state =
394 kif->kf_un.kf_sock.kf_sock_sendq =
396 kif->kf_un.kf_sock.kf_sock_recvq =
400 if (so->so_pcb != NULL) {
401 unpcb = (struct unpcb *)(so->so_pcb);
402 if (unpcb->unp_conn) {
403 kif->kf_un.kf_sock.kf_sock_unpconn =
404 (uintptr_t)unpcb->unp_conn;
405 kif->kf_un.kf_sock.kf_sock_rcv_sb_state =
407 kif->kf_un.kf_sock.kf_sock_snd_sb_state =
409 kif->kf_un.kf_sock.kf_sock_sendq =
411 kif->kf_un.kf_sock.kf_sock_recvq =
417 error = so->so_proto->pr_sockaddr(so, &sa);
419 sa->sa_len <= sizeof(kif->kf_un.kf_sock.kf_sa_local)) {
420 bcopy(sa, &kif->kf_un.kf_sock.kf_sa_local, sa->sa_len);
423 error = so->so_proto->pr_peeraddr(so, &sa);
425 sa->sa_len <= sizeof(kif->kf_un.kf_sock.kf_sa_peer)) {
426 bcopy(sa, &kif->kf_un.kf_sock.kf_sa_peer, sa->sa_len);
429 strncpy(kif->kf_path, so->so_proto->pr_domain->dom_name,
430 sizeof(kif->kf_path));
436 * Use the 'backend3' field in AIO jobs to store the amount of data
437 * completed by the AIO job so far.
439 #define aio_done backend3
441 static STAILQ_HEAD(, task) soaio_jobs;
442 static struct mtx soaio_jobs_lock;
443 static struct task soaio_kproc_task;
444 static int soaio_starting, soaio_idle, soaio_queued;
445 static struct unrhdr *soaio_kproc_unr;
447 static int soaio_max_procs = MAX_AIO_PROCS;
448 SYSCTL_INT(_kern_ipc_aio, OID_AUTO, max_procs, CTLFLAG_RW, &soaio_max_procs, 0,
449 "Maximum number of kernel processes to use for async socket IO");
451 static int soaio_num_procs;
452 SYSCTL_INT(_kern_ipc_aio, OID_AUTO, num_procs, CTLFLAG_RD, &soaio_num_procs, 0,
453 "Number of active kernel processes for async socket IO");
455 static int soaio_target_procs = TARGET_AIO_PROCS;
456 SYSCTL_INT(_kern_ipc_aio, OID_AUTO, target_procs, CTLFLAG_RD,
457 &soaio_target_procs, 0,
458 "Preferred number of ready kernel processes for async socket IO");
460 static int soaio_lifetime;
461 SYSCTL_INT(_kern_ipc_aio, OID_AUTO, lifetime, CTLFLAG_RW, &soaio_lifetime, 0,
462 "Maximum lifetime for idle aiod");
465 soaio_kproc_loop(void *arg)
468 struct vmspace *myvm;
470 int error, id, pending;
475 * Grab an extra reference on the daemon's vmspace so that it
476 * doesn't get freed by jobs that switch to a different
480 myvm = vmspace_acquire_ref(p);
482 mtx_lock(&soaio_jobs_lock);
483 MPASS(soaio_starting > 0);
486 while (!STAILQ_EMPTY(&soaio_jobs)) {
487 task = STAILQ_FIRST(&soaio_jobs);
488 STAILQ_REMOVE_HEAD(&soaio_jobs, ta_link);
490 pending = task->ta_pending;
491 task->ta_pending = 0;
492 mtx_unlock(&soaio_jobs_lock);
494 task->ta_func(task->ta_context, pending);
496 mtx_lock(&soaio_jobs_lock);
498 MPASS(soaio_queued == 0);
500 if (p->p_vmspace != myvm) {
501 mtx_unlock(&soaio_jobs_lock);
502 vmspace_switch_aio(myvm);
503 mtx_lock(&soaio_jobs_lock);
508 error = mtx_sleep(&soaio_idle, &soaio_jobs_lock, 0, "-",
511 if (error == EWOULDBLOCK && STAILQ_EMPTY(&soaio_jobs) &&
512 soaio_num_procs > soaio_target_procs)
516 mtx_unlock(&soaio_jobs_lock);
517 free_unr(soaio_kproc_unr, id);
522 soaio_kproc_create(void *context, int pending)
527 mtx_lock(&soaio_jobs_lock);
529 if (soaio_num_procs < soaio_target_procs) {
531 } else if (soaio_num_procs >= soaio_max_procs) {
533 * Hit the limit on kernel processes, don't
534 * create another one.
537 } else if (soaio_queued <= soaio_idle + soaio_starting) {
539 * No more AIO jobs waiting for a process to be
545 mtx_unlock(&soaio_jobs_lock);
547 id = alloc_unr(soaio_kproc_unr);
548 error = kproc_create(soaio_kproc_loop, (void *)(intptr_t)id,
549 &p, 0, 0, "soaiod%d", id);
551 free_unr(soaio_kproc_unr, id);
552 mtx_lock(&soaio_jobs_lock);
557 mtx_lock(&soaio_jobs_lock);
560 mtx_unlock(&soaio_jobs_lock);
564 soaio_enqueue(struct task *task)
567 mtx_lock(&soaio_jobs_lock);
568 MPASS(task->ta_pending == 0);
570 STAILQ_INSERT_TAIL(&soaio_jobs, task, ta_link);
572 if (soaio_queued <= soaio_idle)
573 wakeup_one(&soaio_idle);
574 else if (soaio_num_procs < soaio_max_procs)
575 taskqueue_enqueue(taskqueue_thread, &soaio_kproc_task);
576 mtx_unlock(&soaio_jobs_lock);
583 soaio_lifetime = AIOD_LIFETIME_DEFAULT;
584 STAILQ_INIT(&soaio_jobs);
585 mtx_init(&soaio_jobs_lock, "soaio jobs", NULL, MTX_DEF);
586 soaio_kproc_unr = new_unrhdr(1, INT_MAX, NULL);
587 TASK_INIT(&soaio_kproc_task, 0, soaio_kproc_create, NULL);
589 SYSINIT(soaio, SI_SUB_VFS, SI_ORDER_ANY, soaio_init, NULL);
592 soaio_ready(struct socket *so, struct sockbuf *sb)
594 return (sb == &so->so_rcv ? soreadable(so) : sowriteable(so));
598 soaio_process_job(struct socket *so, sb_which which, struct kaiocb *job)
600 struct ucred *td_savedcred;
602 struct sockbuf *sb = sobuf(so, which);
604 struct file *fp = job->fd_file;
606 size_t cnt, done, job_total_nbytes __diagused;
610 SOCK_BUF_UNLOCK(so, which);
611 aio_switch_vmspace(job);
614 td_savedcred = td->td_ucred;
615 td->td_ucred = job->cred;
617 job_total_nbytes = job->uiop->uio_resid + job->aio_done;
618 done = job->aio_done;
619 cnt = job->uiop->uio_resid;
620 job->uiop->uio_offset = 0;
621 job->uiop->uio_td = td;
625 * For resource usage accounting, only count a completed request
626 * as a single message to avoid counting multiple calls to
627 * sosend/soreceive on a blocking socket.
630 if (sb == &so->so_rcv) {
631 ru_before = td->td_ru.ru_msgrcv;
633 error = mac_socket_check_receive(fp->f_cred, so);
637 error = soreceive(so, NULL, job->uiop, NULL, NULL,
639 if (td->td_ru.ru_msgrcv != ru_before)
642 if (!TAILQ_EMPTY(&sb->sb_aiojobq))
643 flags |= MSG_MORETOCOME;
644 ru_before = td->td_ru.ru_msgsnd;
646 error = mac_socket_check_send(fp->f_cred, so);
649 error = sosend(so, NULL, job->uiop, NULL, NULL, flags,
651 if (td->td_ru.ru_msgsnd != ru_before)
653 if (error == EPIPE && (so->so_options & SO_NOSIGPIPE) == 0) {
654 PROC_LOCK(job->userproc);
655 kern_psignal(job->userproc, SIGPIPE);
656 PROC_UNLOCK(job->userproc);
660 done += cnt - job->uiop->uio_resid;
661 job->aio_done = done;
662 td->td_ucred = td_savedcred;
664 if (error == EWOULDBLOCK) {
666 * The request was either partially completed or not
667 * completed at all due to racing with a read() or
668 * write() on the socket. If the socket is
669 * non-blocking, return with any partial completion.
670 * If the socket is blocking or if no progress has
671 * been made, requeue this request at the head of the
672 * queue to try again when the socket is ready.
674 MPASS(done != job_total_nbytes);
675 SOCK_BUF_LOCK(so, which);
676 if (done == 0 || !(so->so_state & SS_NBIO)) {
678 if (soaio_ready(so, sb)) {
680 SOCK_BUF_UNLOCK(so, which);
684 if (!aio_set_cancel_function(job, soo_aio_cancel)) {
685 SOCK_BUF_UNLOCK(so, which);
687 aio_complete(job, done, 0);
690 SOCK_BUF_LOCK(so, which);
692 TAILQ_INSERT_HEAD(&sb->sb_aiojobq, job, list);
696 SOCK_BUF_UNLOCK(so, which);
698 if (done != 0 && (error == ERESTART || error == EINTR ||
699 error == EWOULDBLOCK))
702 aio_complete(job, -1, error);
704 aio_complete(job, done, 0);
705 SOCK_BUF_LOCK(so, which);
709 soaio_process_sb(struct socket *so, sb_which which)
712 struct sockbuf *sb = sobuf(so, which);
714 CURVNET_SET(so->so_vnet);
715 SOCK_BUF_LOCK(so, which);
716 while (!TAILQ_EMPTY(&sb->sb_aiojobq) && soaio_ready(so, sb)) {
717 job = TAILQ_FIRST(&sb->sb_aiojobq);
718 TAILQ_REMOVE(&sb->sb_aiojobq, job, list);
719 if (!aio_clear_cancel_function(job))
722 soaio_process_job(so, which, job);
726 * If there are still pending requests, the socket must not be
727 * ready so set SB_AIO to request a wakeup when the socket
730 if (!TAILQ_EMPTY(&sb->sb_aiojobq))
731 sb->sb_flags |= SB_AIO;
732 sb->sb_flags &= ~SB_AIO_RUNNING;
733 SOCK_BUF_UNLOCK(so, which);
740 soaio_rcv(void *context, int pending)
745 soaio_process_sb(so, SO_RCV);
749 soaio_snd(void *context, int pending)
754 soaio_process_sb(so, SO_SND);
758 sowakeup_aio(struct socket *so, sb_which which)
760 struct sockbuf *sb = sobuf(so, which);
762 SOCK_BUF_LOCK_ASSERT(so, which);
764 sb->sb_flags &= ~SB_AIO;
765 if (sb->sb_flags & SB_AIO_RUNNING)
767 sb->sb_flags |= SB_AIO_RUNNING;
769 soaio_enqueue(&sb->sb_aiotask);
773 soo_aio_cancel(struct kaiocb *job)
781 so = job->fd_file->f_data;
782 opcode = job->uaiocb.aio_lio_opcode;
783 if (opcode & LIO_READ) {
787 MPASS(opcode & LIO_WRITE);
792 SOCK_BUF_LOCK(so, which);
793 if (!aio_cancel_cleared(job))
794 TAILQ_REMOVE(&sb->sb_aiojobq, job, list);
795 if (TAILQ_EMPTY(&sb->sb_aiojobq))
796 sb->sb_flags &= ~SB_AIO;
797 SOCK_BUF_UNLOCK(so, which);
799 done = job->aio_done;
801 aio_complete(job, done, 0);
807 soo_aio_queue(struct file *fp, struct kaiocb *job)
815 error = so->so_proto->pr_aio_queue(so, job);
819 /* Lock through the socket, since this may be a listening socket. */
820 switch (job->uaiocb.aio_lio_opcode & (LIO_WRITE | LIO_READ)) {
822 SOCK_RECVBUF_LOCK(so);
827 SOCK_SENDBUF_LOCK(so);
835 if (SOLISTENING(so)) {
836 SOCK_BUF_UNLOCK(so, which);
840 if (!aio_set_cancel_function(job, soo_aio_cancel))
841 panic("new job was cancelled");
842 TAILQ_INSERT_TAIL(&sb->sb_aiojobq, job, list);
843 if (!(sb->sb_flags & SB_AIO_RUNNING)) {
844 if (soaio_ready(so, sb))
845 sowakeup_aio(so, which);
847 sb->sb_flags |= SB_AIO;
849 SOCK_BUF_UNLOCK(so, which);