2 * Copyright (c) 1996 John S. Dyson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Absolutely no warranty of function or purpose is made by the author
16 * 4. Modifications may be freely made to this file if the above conditions
19 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.60.2.13 2002/08/05 15:05:15 des Exp $
23 * This file contains a high-performance replacement for the socket-based
24 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
25 * all features of sockets, but does do everything that pipes normally
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/kernel.h>
32 #include <sys/fcntl.h>
34 #include <sys/filedesc.h>
35 #include <sys/filio.h>
36 #include <sys/ttycom.h>
38 #include <sys/signalvar.h>
39 #include <sys/sysproto.h>
41 #include <sys/vnode.h>
43 #include <sys/event.h>
44 #include <sys/globaldata.h>
45 #include <sys/module.h>
46 #include <sys/malloc.h>
47 #include <sys/sysctl.h>
48 #include <sys/socket.h>
49 #include <sys/kern_syscall.h>
51 #include <sys/mutex.h>
54 #include <vm/vm_param.h>
55 #include <vm/vm_object.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_extern.h>
59 #include <vm/vm_map.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_zone.h>
63 #include <sys/file2.h>
64 #include <sys/signal2.h>
65 #include <sys/mutex2.h>
67 #include <machine/cpufunc.h>
70 * interfaces to the outside world
72 static int pipe_read (struct file *fp, struct uio *uio,
73 struct ucred *cred, int flags);
74 static int pipe_write (struct file *fp, struct uio *uio,
75 struct ucred *cred, int flags);
76 static int pipe_close (struct file *fp);
77 static int pipe_shutdown (struct file *fp, int how);
78 static int pipe_kqfilter (struct file *fp, struct knote *kn);
79 static int pipe_stat (struct file *fp, struct stat *sb, struct ucred *cred);
80 static int pipe_ioctl (struct file *fp, u_long cmd, caddr_t data,
81 struct ucred *cred, struct sysmsg *msg);
83 static struct fileops pipeops = {
85 .fo_write = pipe_write,
86 .fo_ioctl = pipe_ioctl,
87 .fo_kqfilter = pipe_kqfilter,
89 .fo_close = pipe_close,
90 .fo_shutdown = pipe_shutdown
93 static void filt_pipedetach(struct knote *kn);
94 static int filt_piperead(struct knote *kn, long hint);
95 static int filt_pipewrite(struct knote *kn, long hint);
97 static struct filterops pipe_rfiltops =
98 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_pipedetach, filt_piperead };
99 static struct filterops pipe_wfiltops =
100 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_pipedetach, filt_pipewrite };
102 MALLOC_DEFINE(M_PIPE, "pipe", "pipe structures");
105 * Default pipe buffer size(s), this can be kind-of large now because pipe
106 * space is pageable. The pipe code will try to maintain locality of
107 * reference for performance reasons, so small amounts of outstanding I/O
108 * will not wipe the cache.
110 #define MINPIPESIZE (PIPE_SIZE/3)
111 #define MAXPIPESIZE (2*PIPE_SIZE/3)
114 * Limit the number of "big" pipes
116 #define LIMITBIGPIPES 64
117 #define PIPEQ_MAX_CACHE 16 /* per-cpu pipe structure cache */
119 static int pipe_maxbig = LIMITBIGPIPES;
120 static int pipe_maxcache = PIPEQ_MAX_CACHE;
121 static int pipe_bigcount;
122 static int pipe_nbig;
123 static int pipe_bcache_alloc;
124 static int pipe_bkmem_alloc;
125 static int pipe_rblocked_count;
126 static int pipe_wblocked_count;
127 static struct mtx *pipe_gdlocks;
129 SYSCTL_NODE(_kern, OID_AUTO, pipe, CTLFLAG_RW, 0, "Pipe operation");
130 SYSCTL_INT(_kern_pipe, OID_AUTO, nbig,
131 CTLFLAG_RD, &pipe_nbig, 0, "number of big pipes allocated");
132 SYSCTL_INT(_kern_pipe, OID_AUTO, bigcount,
133 CTLFLAG_RW, &pipe_bigcount, 0, "number of times pipe expanded");
134 SYSCTL_INT(_kern_pipe, OID_AUTO, rblocked,
135 CTLFLAG_RW, &pipe_rblocked_count, 0, "number of times pipe expanded");
136 SYSCTL_INT(_kern_pipe, OID_AUTO, wblocked,
137 CTLFLAG_RW, &pipe_wblocked_count, 0, "number of times pipe expanded");
138 SYSCTL_INT(_kern_pipe, OID_AUTO, maxcache,
139 CTLFLAG_RW, &pipe_maxcache, 0, "max pipes cached per-cpu");
140 SYSCTL_INT(_kern_pipe, OID_AUTO, maxbig,
141 CTLFLAG_RW, &pipe_maxbig, 0, "max number of big pipes");
142 static int pipe_delay = 5000; /* 5uS default */
143 SYSCTL_INT(_kern_pipe, OID_AUTO, delay,
144 CTLFLAG_RW, &pipe_delay, 0, "SMP delay optimization in ns");
145 #if !defined(NO_PIPE_SYSCTL_STATS)
146 SYSCTL_INT(_kern_pipe, OID_AUTO, bcache_alloc,
147 CTLFLAG_RW, &pipe_bcache_alloc, 0, "pipe buffer from pcpu cache");
148 SYSCTL_INT(_kern_pipe, OID_AUTO, bkmem_alloc,
149 CTLFLAG_RW, &pipe_bkmem_alloc, 0, "pipe buffer from kmem");
153 * Auto-size pipe cache to reduce kmem allocations and frees.
157 pipeinit(void *dummy)
159 size_t mbytes = kmem_lim_size();
162 if (pipe_maxbig == LIMITBIGPIPES) {
163 if (mbytes >= 7 * 1024)
165 if (mbytes >= 15 * 1024)
168 if (pipe_maxcache == PIPEQ_MAX_CACHE) {
169 if (mbytes >= 7 * 1024)
171 if (mbytes >= 15 * 1024)
174 pipe_gdlocks = kmalloc(sizeof(*pipe_gdlocks) * ncpus,
175 M_PIPE, M_WAITOK | M_ZERO);
176 for (n = 0; n < ncpus; ++n)
177 mtx_init(&pipe_gdlocks[n], "pipekm");
179 SYSINIT(kmem, SI_BOOT2_MACHDEP, SI_ORDER_ANY, pipeinit, NULL);
181 static void pipeclose (struct pipe *cpipe);
182 static void pipe_free_kmem (struct pipe *cpipe);
183 static int pipe_create (struct pipe **cpipep);
184 static int pipespace (struct pipe *cpipe, int size);
187 pipewakeup(struct pipe *cpipe, int dosigio)
189 if (dosigio && (cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) {
190 lwkt_gettoken(&sigio_token);
191 pgsigio(cpipe->pipe_sigio, SIGIO, 0);
192 lwkt_reltoken(&sigio_token);
194 KNOTE(&cpipe->pipe_kq.ki_note, 0);
198 * These routines are called before and after a UIO. The UIO
199 * may block, causing our held tokens to be lost temporarily.
201 * We use these routines to serialize reads against other reads
202 * and writes against other writes.
204 * The read token is held on entry so *ipp does not race.
207 pipe_start_uio(struct pipe *cpipe, int *ipp)
213 error = tsleep(ipp, PCATCH, "pipexx", 0);
222 pipe_end_uio(struct pipe *cpipe, int *ipp)
234 * The pipe system call for the DTYPE_PIPE type of pipes
236 * pipe_args(int dummy)
241 sys_pipe(struct pipe_args *uap)
243 return kern_pipe(uap->sysmsg_fds, 0);
247 sys_pipe2(struct pipe2_args *uap)
249 return kern_pipe(uap->sysmsg_fds, uap->flags);
253 kern_pipe(long *fds, int flags)
255 struct thread *td = curthread;
256 struct filedesc *fdp = td->td_proc->p_fd;
257 struct file *rf, *wf;
258 struct pipe *rpipe, *wpipe;
261 rpipe = wpipe = NULL;
262 if (pipe_create(&rpipe) || pipe_create(&wpipe)) {
268 error = falloc(td->td_lwp, &rf, &fd1);
277 * Warning: once we've gotten past allocation of the fd for the
278 * read-side, we can only drop the read side via fdrop() in order
279 * to avoid races against processes which manage to dup() the read
280 * side while we are blocked trying to allocate the write side.
282 rf->f_type = DTYPE_PIPE;
283 rf->f_flag = FREAD | FWRITE;
284 rf->f_ops = &pipeops;
286 if (flags & O_NONBLOCK)
287 rf->f_flag |= O_NONBLOCK;
288 if (flags & O_CLOEXEC)
289 fdp->fd_files[fd1].fileflags |= UF_EXCLOSE;
291 error = falloc(td->td_lwp, &wf, &fd2);
293 fsetfd(fdp, NULL, fd1);
295 /* rpipe has been closed by fdrop(). */
299 wf->f_type = DTYPE_PIPE;
300 wf->f_flag = FREAD | FWRITE;
301 wf->f_ops = &pipeops;
303 if (flags & O_NONBLOCK)
304 wf->f_flag |= O_NONBLOCK;
305 if (flags & O_CLOEXEC)
306 fdp->fd_files[fd2].fileflags |= UF_EXCLOSE;
310 rpipe->pipe_slock = kmalloc(sizeof(struct lock),
311 M_PIPE, M_WAITOK|M_ZERO);
312 wpipe->pipe_slock = rpipe->pipe_slock;
313 rpipe->pipe_peer = wpipe;
314 wpipe->pipe_peer = rpipe;
315 lockinit(rpipe->pipe_slock, "pipecl", 0, 0);
318 * Once activated the peer relationship remains valid until
319 * both sides are closed.
321 fsetfd(fdp, rf, fd1);
322 fsetfd(fdp, wf, fd2);
330 * Allocate kva for pipe circular buffer, the space is pageable
331 * This routine will 'realloc' the size of a pipe safely, if it fails
332 * it will retain the old buffer.
333 * If it fails it will return ENOMEM.
336 pipespace(struct pipe *cpipe, int size)
338 struct vm_object *object;
342 npages = round_page(size) / PAGE_SIZE;
343 object = cpipe->pipe_buffer.object;
346 * [re]create the object if necessary and reserve space for it
347 * in the kernel_map. The object and memory are pageable. On
348 * success, free the old resources before assigning the new
351 if (object == NULL || object->size != npages) {
352 object = vm_object_allocate(OBJT_DEFAULT, npages);
353 buffer = (caddr_t)vm_map_min(&kernel_map);
355 error = vm_map_find(&kernel_map, object, NULL,
356 0, (vm_offset_t *)&buffer, size,
358 VM_MAPTYPE_NORMAL, VM_SUBSYS_PIPE,
359 VM_PROT_ALL, VM_PROT_ALL, 0);
361 if (error != KERN_SUCCESS) {
362 vm_object_deallocate(object);
365 pipe_free_kmem(cpipe);
366 cpipe->pipe_buffer.object = object;
367 cpipe->pipe_buffer.buffer = buffer;
368 cpipe->pipe_buffer.size = size;
373 cpipe->pipe_buffer.rindex = 0;
374 cpipe->pipe_buffer.windex = 0;
379 * Initialize and allocate VM and memory for pipe, pulling the pipe from
380 * our per-cpu cache if possible. For now make sure it is sized for the
381 * smaller PIPE_SIZE default.
384 pipe_create(struct pipe **cpipep)
386 globaldata_t gd = mycpu;
390 if ((cpipe = gd->gd_pipeq) != NULL) {
391 gd->gd_pipeq = cpipe->pipe_peer;
393 cpipe->pipe_peer = NULL;
394 cpipe->pipe_wantwcnt = 0;
396 cpipe = kmalloc(sizeof(struct pipe), M_PIPE, M_WAITOK|M_ZERO);
399 if ((error = pipespace(cpipe, PIPE_SIZE)) != 0)
401 vfs_timestamp(&cpipe->pipe_ctime);
402 cpipe->pipe_atime = cpipe->pipe_ctime;
403 cpipe->pipe_mtime = cpipe->pipe_ctime;
404 lwkt_token_init(&cpipe->pipe_rlock, "piper");
405 lwkt_token_init(&cpipe->pipe_wlock, "pipew");
410 pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, int fflags)
417 u_int size; /* total bytes available */
418 u_int nsize; /* total bytes to read */
419 u_int rindex; /* contiguous bytes available */
424 atomic_set_int(&curthread->td_mpflags, TDF_MP_BATCH_DEMARC);
426 if (uio->uio_resid == 0)
430 * Setup locks, calculate nbio
432 rpipe = (struct pipe *)fp->f_data;
433 wpipe = rpipe->pipe_peer;
434 lwkt_gettoken(&rpipe->pipe_rlock);
436 if (fflags & O_FBLOCKING)
438 else if (fflags & O_FNONBLOCKING)
440 else if (fp->f_flag & O_NONBLOCK)
446 * Reads are serialized. Note however that pipe_buffer.buffer and
447 * pipe_buffer.size can change out from under us when the number
448 * of bytes in the buffer are zero due to the write-side doing a
451 error = pipe_start_uio(rpipe, &rpipe->pipe_rip);
453 lwkt_reltoken(&rpipe->pipe_rlock);
458 bigread = (uio->uio_resid > 10 * 1024 * 1024);
461 while (uio->uio_resid) {
465 if (bigread && --bigcount == 0) {
468 if (CURSIG(curthread->td_lwp)) {
474 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
477 rindex = rpipe->pipe_buffer.rindex &
478 (rpipe->pipe_buffer.size - 1);
480 if (nsize > rpipe->pipe_buffer.size - rindex)
481 nsize = rpipe->pipe_buffer.size - rindex;
482 nsize = szmin(nsize, uio->uio_resid);
484 error = uiomove(&rpipe->pipe_buffer.buffer[rindex],
489 rpipe->pipe_buffer.rindex += nsize;
493 * If the FIFO is still over half full just continue
494 * and do not try to notify the writer yet.
496 if (size - nsize >= (rpipe->pipe_buffer.size >> 1)) {
502 * When the FIFO is less then half full notify any
503 * waiting writer. WANTW can be checked while
504 * holding just the rlock.
507 if ((rpipe->pipe_state & PIPE_WANTW) == 0)
512 * If the "write-side" was blocked we wake it up. This code
513 * is reached either when the buffer is completely emptied
514 * or if it becomes more then half-empty.
516 * Pipe_state can only be modified if both the rlock and
519 if (rpipe->pipe_state & PIPE_WANTW) {
520 lwkt_gettoken(&rpipe->pipe_wlock);
521 if (rpipe->pipe_state & PIPE_WANTW) {
522 rpipe->pipe_state &= ~PIPE_WANTW;
523 lwkt_reltoken(&rpipe->pipe_wlock);
526 lwkt_reltoken(&rpipe->pipe_wlock);
531 * Pick up our copy loop again if the writer sent data to
532 * us while we were messing around.
534 * On a SMP box poll up to pipe_delay nanoseconds for new
535 * data. Typically a value of 2000 to 4000 is sufficient
536 * to eradicate most IPIs/tsleeps/wakeups when a pipe
537 * is used for synchronous communications with small packets,
538 * and 8000 or so (8uS) will pipeline large buffer xfers
539 * between cpus over a pipe.
541 * For synchronous communications a hit means doing a
542 * full Awrite-Bread-Bwrite-Aread cycle in less then 2uS,
543 * where as miss requiring a tsleep/wakeup sequence
544 * will take 7uS or more.
546 if (rpipe->pipe_buffer.windex != rpipe->pipe_buffer.rindex)
549 #ifdef _RDTSC_SUPPORTED_
554 tsc_target = tsc_get_target(pipe_delay);
555 while (tsc_test_target(tsc_target) == 0) {
556 if (rpipe->pipe_buffer.windex !=
557 rpipe->pipe_buffer.rindex) {
568 * Detect EOF condition, do not set error.
570 if (rpipe->pipe_state & PIPE_REOF)
574 * Break if some data was read, or if this was a non-blocking
586 * Last chance, interlock with WANTR.
588 lwkt_gettoken(&rpipe->pipe_wlock);
589 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
591 lwkt_reltoken(&rpipe->pipe_wlock);
596 * Retest EOF - acquiring a new token can temporarily release
597 * tokens already held.
599 if (rpipe->pipe_state & PIPE_REOF) {
600 lwkt_reltoken(&rpipe->pipe_wlock);
605 * If there is no more to read in the pipe, reset its
606 * pointers to the beginning. This improves cache hit
609 * We need both locks to modify both pointers, and there
610 * must also not be a write in progress or the uiomove()
611 * in the write might block and temporarily release
612 * its wlock, then reacquire and update windex. We are
613 * only serialized against reads, not writes.
615 * XXX should we even bother resetting the indices? It
616 * might actually be more cache efficient not to.
618 if (rpipe->pipe_buffer.rindex == rpipe->pipe_buffer.windex &&
619 rpipe->pipe_wip == 0) {
620 rpipe->pipe_buffer.rindex = 0;
621 rpipe->pipe_buffer.windex = 0;
625 * Wait for more data.
627 * Pipe_state can only be set if both the rlock and wlock
630 rpipe->pipe_state |= PIPE_WANTR;
631 tsleep_interlock(rpipe, PCATCH);
632 lwkt_reltoken(&rpipe->pipe_wlock);
633 error = tsleep(rpipe, PCATCH | PINTERLOCKED, "piperd", 0);
634 ++pipe_rblocked_count;
638 pipe_end_uio(rpipe, &rpipe->pipe_rip);
641 * Uptime last access time
643 if (error == 0 && nread)
644 vfs_timestamp(&rpipe->pipe_atime);
647 * If we drained the FIFO more then half way then handle
648 * write blocking hysteresis.
650 * Note that PIPE_WANTW cannot be set by the writer without
651 * it holding both rlock and wlock, so we can test it
652 * while holding just rlock.
656 * Synchronous blocking is done on the pipe involved
658 if (rpipe->pipe_state & PIPE_WANTW) {
659 lwkt_gettoken(&rpipe->pipe_wlock);
660 if (rpipe->pipe_state & PIPE_WANTW) {
661 rpipe->pipe_state &= ~PIPE_WANTW;
662 lwkt_reltoken(&rpipe->pipe_wlock);
665 lwkt_reltoken(&rpipe->pipe_wlock);
670 * But we may also have to deal with a kqueue which is
671 * stored on the same pipe as its descriptor, so a
672 * EVFILT_WRITE event waiting for our side to drain will
673 * be on the other side.
675 lwkt_gettoken(&wpipe->pipe_wlock);
676 pipewakeup(wpipe, 0);
677 lwkt_reltoken(&wpipe->pipe_wlock);
679 /*size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;*/
680 lwkt_reltoken(&rpipe->pipe_rlock);
686 pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags)
700 * Writes go to the peer. The peer will always exist.
702 rpipe = (struct pipe *) fp->f_data;
703 wpipe = rpipe->pipe_peer;
704 lwkt_gettoken(&wpipe->pipe_wlock);
705 if (wpipe->pipe_state & PIPE_WEOF) {
706 lwkt_reltoken(&wpipe->pipe_wlock);
711 * Degenerate case (EPIPE takes prec)
713 if (uio->uio_resid == 0) {
714 lwkt_reltoken(&wpipe->pipe_wlock);
719 * Writes are serialized (start_uio must be called with wlock)
721 error = pipe_start_uio(wpipe, &wpipe->pipe_wip);
723 lwkt_reltoken(&wpipe->pipe_wlock);
727 if (fflags & O_FBLOCKING)
729 else if (fflags & O_FNONBLOCKING)
731 else if (fp->f_flag & O_NONBLOCK)
737 * If it is advantageous to resize the pipe buffer, do
738 * so. We are write-serialized so we can block safely.
740 if ((wpipe->pipe_buffer.size <= PIPE_SIZE) &&
741 (pipe_nbig < pipe_maxbig) &&
742 wpipe->pipe_wantwcnt > 4 &&
743 (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) {
745 * Recheck after lock.
747 lwkt_gettoken(&wpipe->pipe_rlock);
748 if ((wpipe->pipe_buffer.size <= PIPE_SIZE) &&
749 (pipe_nbig < pipe_maxbig) &&
750 (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) {
751 atomic_add_int(&pipe_nbig, 1);
752 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
755 atomic_subtract_int(&pipe_nbig, 1);
757 lwkt_reltoken(&wpipe->pipe_rlock);
760 orig_resid = uio->uio_resid;
763 bigwrite = (uio->uio_resid > 10 * 1024 * 1024);
766 while (uio->uio_resid) {
767 if (wpipe->pipe_state & PIPE_WEOF) {
775 if (bigwrite && --bigcount == 0) {
778 if (CURSIG(curthread->td_lwp)) {
784 windex = wpipe->pipe_buffer.windex &
785 (wpipe->pipe_buffer.size - 1);
786 space = wpipe->pipe_buffer.size -
787 (wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex);
790 /* Writes of size <= PIPE_BUF must be atomic. */
791 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
795 * Write to fill, read size handles write hysteresis. Also
796 * additional restrictions can cause select-based non-blocking
803 * Transfer size is minimum of uio transfer
804 * and free space in pipe buffer.
806 * Limit each uiocopy to no more then PIPE_SIZE
807 * so we can keep the gravy train going on a
808 * SMP box. This doubles the performance for
809 * write sizes > 16K. Otherwise large writes
810 * wind up doing an inefficient synchronous
813 space = szmin(space, uio->uio_resid);
814 if (space > PIPE_SIZE)
818 * First segment to transfer is minimum of
819 * transfer size and contiguous space in
820 * pipe buffer. If first segment to transfer
821 * is less than the transfer size, we've got
822 * a wraparound in the buffer.
824 segsize = wpipe->pipe_buffer.size - windex;
829 * If this is the first loop and the reader is
830 * blocked, do a preemptive wakeup of the reader.
832 * On SMP the IPI latency plus the wlock interlock
833 * on the reader side is the fastest way to get the
834 * reader going. (The scheduler will hard loop on
837 * NOTE: We can't clear WANTR here without acquiring
838 * the rlock, which we don't want to do here!
840 if ((wpipe->pipe_state & PIPE_WANTR))
844 * Transfer segment, which may include a wrap-around.
845 * Update windex to account for both all in one go
846 * so the reader can read() the data atomically.
848 error = uiomove(&wpipe->pipe_buffer.buffer[windex],
850 if (error == 0 && segsize < space) {
851 segsize = space - segsize;
852 error = uiomove(&wpipe->pipe_buffer.buffer[0],
858 wpipe->pipe_buffer.windex += space;
864 * We need both the rlock and the wlock to interlock against
865 * the EOF, WANTW, and size checks, and to modify pipe_state.
867 * These are token locks so we do not have to worry about
870 lwkt_gettoken(&wpipe->pipe_rlock);
873 * If the "read-side" has been blocked, wake it up now
874 * and yield to let it drain synchronously rather
877 if (wpipe->pipe_state & PIPE_WANTR) {
878 wpipe->pipe_state &= ~PIPE_WANTR;
883 * don't block on non-blocking I/O
886 lwkt_reltoken(&wpipe->pipe_rlock);
892 * re-test whether we have to block in the writer after
893 * acquiring both locks, in case the reader opened up
896 space = wpipe->pipe_buffer.size -
897 (wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex);
899 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
903 * Retest EOF - acquiring a new token can temporarily release
904 * tokens already held.
906 if (wpipe->pipe_state & PIPE_WEOF) {
907 lwkt_reltoken(&wpipe->pipe_rlock);
913 * We have no more space and have something to offer,
914 * wake up select/poll/kq.
917 wpipe->pipe_state |= PIPE_WANTW;
918 ++wpipe->pipe_wantwcnt;
919 pipewakeup(wpipe, 1);
920 if (wpipe->pipe_state & PIPE_WANTW)
921 error = tsleep(wpipe, PCATCH, "pipewr", 0);
922 ++pipe_wblocked_count;
924 lwkt_reltoken(&wpipe->pipe_rlock);
927 * Break out if we errored or the read side wants us to go
932 if (wpipe->pipe_state & PIPE_WEOF) {
937 pipe_end_uio(wpipe, &wpipe->pipe_wip);
940 * If we have put any characters in the buffer, we wake up
943 * Both rlock and wlock are required to be able to modify pipe_state.
945 if (wpipe->pipe_buffer.windex != wpipe->pipe_buffer.rindex) {
946 if (wpipe->pipe_state & PIPE_WANTR) {
947 lwkt_gettoken(&wpipe->pipe_rlock);
948 if (wpipe->pipe_state & PIPE_WANTR) {
949 wpipe->pipe_state &= ~PIPE_WANTR;
950 lwkt_reltoken(&wpipe->pipe_rlock);
953 lwkt_reltoken(&wpipe->pipe_rlock);
956 lwkt_gettoken(&wpipe->pipe_rlock);
957 pipewakeup(wpipe, 1);
958 lwkt_reltoken(&wpipe->pipe_rlock);
962 * Don't return EPIPE if I/O was successful
964 if ((wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex) &&
965 (uio->uio_resid == 0) &&
971 vfs_timestamp(&wpipe->pipe_mtime);
974 * We have something to offer,
975 * wake up select/poll/kq.
977 /*space = wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex;*/
978 lwkt_reltoken(&wpipe->pipe_wlock);
983 * we implement a very minimal set of ioctls for compatibility with sockets.
986 pipe_ioctl(struct file *fp, u_long cmd, caddr_t data,
987 struct ucred *cred, struct sysmsg *msg)
992 mpipe = (struct pipe *)fp->f_data;
994 lwkt_gettoken(&mpipe->pipe_rlock);
995 lwkt_gettoken(&mpipe->pipe_wlock);
1000 mpipe->pipe_state |= PIPE_ASYNC;
1002 mpipe->pipe_state &= ~PIPE_ASYNC;
1007 *(int *)data = mpipe->pipe_buffer.windex -
1008 mpipe->pipe_buffer.rindex;
1012 error = fsetown(*(int *)data, &mpipe->pipe_sigio);
1015 *(int *)data = fgetown(&mpipe->pipe_sigio);
1019 /* This is deprecated, FIOSETOWN should be used instead. */
1020 error = fsetown(-(*(int *)data), &mpipe->pipe_sigio);
1024 /* This is deprecated, FIOGETOWN should be used instead. */
1025 *(int *)data = -fgetown(&mpipe->pipe_sigio);
1032 lwkt_reltoken(&mpipe->pipe_wlock);
1033 lwkt_reltoken(&mpipe->pipe_rlock);
1042 pipe_stat(struct file *fp, struct stat *ub, struct ucred *cred)
1046 pipe = (struct pipe *)fp->f_data;
1048 bzero((caddr_t)ub, sizeof(*ub));
1049 ub->st_mode = S_IFIFO;
1050 ub->st_blksize = pipe->pipe_buffer.size;
1051 ub->st_size = pipe->pipe_buffer.windex - pipe->pipe_buffer.rindex;
1052 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize;
1053 ub->st_atimespec = pipe->pipe_atime;
1054 ub->st_mtimespec = pipe->pipe_mtime;
1055 ub->st_ctimespec = pipe->pipe_ctime;
1057 * Left as 0: st_dev, st_ino, st_nlink, st_uid, st_gid, st_rdev,
1059 * XXX (st_dev, st_ino) should be unique.
1065 pipe_close(struct file *fp)
1069 cpipe = (struct pipe *)fp->f_data;
1070 fp->f_ops = &badfileops;
1072 funsetown(&cpipe->pipe_sigio);
1078 * Shutdown one or both directions of a full-duplex pipe.
1081 pipe_shutdown(struct file *fp, int how)
1087 rpipe = (struct pipe *)fp->f_data;
1088 wpipe = rpipe->pipe_peer;
1091 * We modify pipe_state on both pipes, which means we need
1094 lwkt_gettoken(&rpipe->pipe_rlock);
1095 lwkt_gettoken(&rpipe->pipe_wlock);
1096 lwkt_gettoken(&wpipe->pipe_rlock);
1097 lwkt_gettoken(&wpipe->pipe_wlock);
1102 rpipe->pipe_state |= PIPE_REOF; /* my reads */
1103 rpipe->pipe_state |= PIPE_WEOF; /* peer writes */
1104 if (rpipe->pipe_state & PIPE_WANTR) {
1105 rpipe->pipe_state &= ~PIPE_WANTR;
1108 if (rpipe->pipe_state & PIPE_WANTW) {
1109 rpipe->pipe_state &= ~PIPE_WANTW;
1117 wpipe->pipe_state |= PIPE_REOF; /* peer reads */
1118 wpipe->pipe_state |= PIPE_WEOF; /* my writes */
1119 if (wpipe->pipe_state & PIPE_WANTR) {
1120 wpipe->pipe_state &= ~PIPE_WANTR;
1123 if (wpipe->pipe_state & PIPE_WANTW) {
1124 wpipe->pipe_state &= ~PIPE_WANTW;
1130 pipewakeup(rpipe, 1);
1131 pipewakeup(wpipe, 1);
1133 lwkt_reltoken(&wpipe->pipe_wlock);
1134 lwkt_reltoken(&wpipe->pipe_rlock);
1135 lwkt_reltoken(&rpipe->pipe_wlock);
1136 lwkt_reltoken(&rpipe->pipe_rlock);
1142 * Destroy the pipe buffer.
1145 pipe_free_kmem(struct pipe *cpipe)
1147 if (cpipe->pipe_buffer.buffer != NULL) {
1148 if (cpipe->pipe_buffer.size > PIPE_SIZE)
1149 atomic_subtract_int(&pipe_nbig, 1);
1150 kmem_free(&kernel_map,
1151 (vm_offset_t)cpipe->pipe_buffer.buffer,
1152 cpipe->pipe_buffer.size);
1153 cpipe->pipe_buffer.buffer = NULL;
1154 cpipe->pipe_buffer.object = NULL;
1159 * Close the pipe. The slock must be held to interlock against simultanious
1160 * closes. The rlock and wlock must be held to adjust the pipe_state.
1163 pipeclose(struct pipe *cpipe)
1172 * The slock may not have been allocated yet (close during
1175 * We need both the read and write tokens to modify pipe_state.
1177 if (cpipe->pipe_slock)
1178 lockmgr(cpipe->pipe_slock, LK_EXCLUSIVE);
1179 lwkt_gettoken(&cpipe->pipe_rlock);
1180 lwkt_gettoken(&cpipe->pipe_wlock);
1183 * Set our state, wakeup anyone waiting in select/poll/kq, and
1184 * wakeup anyone blocked on our pipe.
1186 cpipe->pipe_state |= PIPE_CLOSED | PIPE_REOF | PIPE_WEOF;
1187 pipewakeup(cpipe, 1);
1188 if (cpipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) {
1189 cpipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW);
1194 * Disconnect from peer.
1196 if ((ppipe = cpipe->pipe_peer) != NULL) {
1197 lwkt_gettoken(&ppipe->pipe_rlock);
1198 lwkt_gettoken(&ppipe->pipe_wlock);
1199 ppipe->pipe_state |= PIPE_REOF | PIPE_WEOF;
1200 pipewakeup(ppipe, 1);
1201 if (ppipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) {
1202 ppipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW);
1205 if (SLIST_FIRST(&ppipe->pipe_kq.ki_note))
1206 KNOTE(&ppipe->pipe_kq.ki_note, 0);
1207 lwkt_reltoken(&ppipe->pipe_wlock);
1208 lwkt_reltoken(&ppipe->pipe_rlock);
1212 * If the peer is also closed we can free resources for both
1213 * sides, otherwise we leave our side intact to deal with any
1214 * races (since we only have the slock).
1216 if (ppipe && (ppipe->pipe_state & PIPE_CLOSED)) {
1217 cpipe->pipe_peer = NULL;
1218 ppipe->pipe_peer = NULL;
1219 ppipe->pipe_slock = NULL; /* we will free the slock */
1224 lwkt_reltoken(&cpipe->pipe_wlock);
1225 lwkt_reltoken(&cpipe->pipe_rlock);
1226 if (cpipe->pipe_slock)
1227 lockmgr(cpipe->pipe_slock, LK_RELEASE);
1230 * If we disassociated from our peer we can free resources. We
1231 * maintain a pcpu cache to improve performance, so the actual
1232 * tear-down case is limited to bulk situations.
1234 * However, the bulk tear-down case can cause intense contention
1235 * on the kernel_map when, e.g. hundreds to hundreds of thousands
1236 * of processes are killed at the same time. To deal with this we
1237 * use a pcpu mutex to maintain concurrency but also limit the
1238 * number of threads banging on the map and pmap.
1240 * We use the mtx mechanism instead of the lockmgr mechanism because
1241 * the mtx mechanism utilizes a queued design which will not break
1242 * down in the face of thousands to hundreds of thousands of
1243 * processes trying to free pipes simultaneously. The lockmgr
1244 * mechanism will wind up waking them all up each time a lock
1247 if (ppipe == NULL) {
1249 if (cpipe->pipe_slock) {
1250 kfree(cpipe->pipe_slock, M_PIPE);
1251 cpipe->pipe_slock = NULL;
1253 if (gd->gd_pipeqcount >= pipe_maxcache ||
1254 cpipe->pipe_buffer.size != PIPE_SIZE
1256 mtx_lock(&pipe_gdlocks[gd->gd_cpuid]);
1257 pipe_free_kmem(cpipe);
1258 mtx_unlock(&pipe_gdlocks[gd->gd_cpuid]);
1259 kfree(cpipe, M_PIPE);
1261 cpipe->pipe_state = 0;
1262 cpipe->pipe_peer = gd->gd_pipeq;
1263 gd->gd_pipeq = cpipe;
1264 ++gd->gd_pipeqcount;
1270 pipe_kqfilter(struct file *fp, struct knote *kn)
1274 cpipe = (struct pipe *)kn->kn_fp->f_data;
1276 switch (kn->kn_filter) {
1278 kn->kn_fop = &pipe_rfiltops;
1281 kn->kn_fop = &pipe_wfiltops;
1282 if (cpipe->pipe_peer == NULL) {
1283 /* other end of pipe has been closed */
1288 return (EOPNOTSUPP);
1290 kn->kn_hook = (caddr_t)cpipe;
1292 knote_insert(&cpipe->pipe_kq.ki_note, kn);
1298 filt_pipedetach(struct knote *kn)
1300 struct pipe *cpipe = (struct pipe *)kn->kn_hook;
1302 knote_remove(&cpipe->pipe_kq.ki_note, kn);
1307 filt_piperead(struct knote *kn, long hint)
1309 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1312 lwkt_gettoken(&rpipe->pipe_rlock);
1313 lwkt_gettoken(&rpipe->pipe_wlock);
1315 kn->kn_data = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
1317 if (rpipe->pipe_state & PIPE_REOF) {
1319 * Only set NODATA if all data has been exhausted
1321 if (kn->kn_data == 0)
1322 kn->kn_flags |= EV_NODATA;
1323 kn->kn_flags |= EV_EOF;
1327 lwkt_reltoken(&rpipe->pipe_wlock);
1328 lwkt_reltoken(&rpipe->pipe_rlock);
1331 ready = kn->kn_data > 0;
1338 filt_pipewrite(struct knote *kn, long hint)
1340 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1341 struct pipe *wpipe = rpipe->pipe_peer;
1345 if (wpipe == NULL) {
1346 kn->kn_flags |= (EV_EOF | EV_NODATA);
1350 lwkt_gettoken(&wpipe->pipe_rlock);
1351 lwkt_gettoken(&wpipe->pipe_wlock);
1353 if (wpipe->pipe_state & PIPE_WEOF) {
1354 kn->kn_flags |= (EV_EOF | EV_NODATA);
1359 kn->kn_data = wpipe->pipe_buffer.size -
1360 (wpipe->pipe_buffer.windex -
1361 wpipe->pipe_buffer.rindex);
1363 lwkt_reltoken(&wpipe->pipe_wlock);
1364 lwkt_reltoken(&wpipe->pipe_rlock);
1367 ready = kn->kn_data >= PIPE_BUF;