2 * Copyright (c) 1996 John S. Dyson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Absolutely no warranty of function or purpose is made by the author
16 * 4. Modifications may be freely made to this file if the above conditions
19 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.60.2.13 2002/08/05 15:05:15 des Exp $
20 * $DragonFly: src/sys/kern/sys_pipe.c,v 1.50 2008/09/09 04:06:13 dillon Exp $
24 * This file contains a high-performance replacement for the socket-based
25 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
26 * all features of sockets, but does do everything that pipes normally
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
33 #include <sys/fcntl.h>
35 #include <sys/filedesc.h>
36 #include <sys/filio.h>
37 #include <sys/ttycom.h>
40 #include <sys/select.h>
41 #include <sys/signalvar.h>
42 #include <sys/sysproto.h>
44 #include <sys/vnode.h>
46 #include <sys/event.h>
47 #include <sys/globaldata.h>
48 #include <sys/module.h>
49 #include <sys/malloc.h>
50 #include <sys/sysctl.h>
51 #include <sys/socket.h>
54 #include <vm/vm_param.h>
56 #include <vm/vm_object.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_extern.h>
60 #include <vm/vm_map.h>
61 #include <vm/vm_page.h>
62 #include <vm/vm_zone.h>
64 #include <sys/file2.h>
66 #include <machine/cpufunc.h>
69 * interfaces to the outside world
71 static int pipe_read (struct file *fp, struct uio *uio,
72 struct ucred *cred, int flags);
73 static int pipe_write (struct file *fp, struct uio *uio,
74 struct ucred *cred, int flags);
75 static int pipe_close (struct file *fp);
76 static int pipe_shutdown (struct file *fp, int how);
77 static int pipe_poll (struct file *fp, int events, struct ucred *cred);
78 static int pipe_kqfilter (struct file *fp, struct knote *kn);
79 static int pipe_stat (struct file *fp, struct stat *sb, struct ucred *cred);
80 static int pipe_ioctl (struct file *fp, u_long cmd, caddr_t data, struct ucred *cred);
82 static struct fileops pipeops = {
84 .fo_write = pipe_write,
85 .fo_ioctl = pipe_ioctl,
87 .fo_kqfilter = pipe_kqfilter,
89 .fo_close = pipe_close,
90 .fo_shutdown = pipe_shutdown
93 static void filt_pipedetach(struct knote *kn);
94 static int filt_piperead(struct knote *kn, long hint);
95 static int filt_pipewrite(struct knote *kn, long hint);
97 static struct filterops pipe_rfiltops =
98 { 1, NULL, filt_pipedetach, filt_piperead };
99 static struct filterops pipe_wfiltops =
100 { 1, NULL, filt_pipedetach, filt_pipewrite };
102 MALLOC_DEFINE(M_PIPE, "pipe", "pipe structures");
105 * Default pipe buffer size(s), this can be kind-of large now because pipe
106 * space is pageable. The pipe code will try to maintain locality of
107 * reference for performance reasons, so small amounts of outstanding I/O
108 * will not wipe the cache.
110 #define MINPIPESIZE (PIPE_SIZE/3)
111 #define MAXPIPESIZE (2*PIPE_SIZE/3)
114 * Limit the number of "big" pipes
116 #define LIMITBIGPIPES 64
117 #define PIPEQ_MAX_CACHE 16 /* per-cpu pipe structure cache */
119 static int pipe_maxbig = LIMITBIGPIPES;
120 static int pipe_maxcache = PIPEQ_MAX_CACHE;
121 static int pipe_bigcount;
122 static int pipe_nbig;
123 static int pipe_bcache_alloc;
124 static int pipe_bkmem_alloc;
126 SYSCTL_NODE(_kern, OID_AUTO, pipe, CTLFLAG_RW, 0, "Pipe operation");
127 SYSCTL_INT(_kern_pipe, OID_AUTO, nbig,
128 CTLFLAG_RD, &pipe_nbig, 0, "numer of big pipes allocated");
129 SYSCTL_INT(_kern_pipe, OID_AUTO, bigcount,
130 CTLFLAG_RW, &pipe_bigcount, 0, "number of times pipe expanded");
131 SYSCTL_INT(_kern_pipe, OID_AUTO, maxcache,
132 CTLFLAG_RW, &pipe_maxcache, 0, "max pipes cached per-cpu");
133 SYSCTL_INT(_kern_pipe, OID_AUTO, maxbig,
134 CTLFLAG_RW, &pipe_maxbig, 0, "max number of big pipes");
136 static int pipe_mpsafe = 0;
137 SYSCTL_INT(_kern_pipe, OID_AUTO, mpsafe,
138 CTLFLAG_RW, &pipe_mpsafe, 0, "");
140 #if !defined(NO_PIPE_SYSCTL_STATS)
141 SYSCTL_INT(_kern_pipe, OID_AUTO, bcache_alloc,
142 CTLFLAG_RW, &pipe_bcache_alloc, 0, "pipe buffer from pcpu cache");
143 SYSCTL_INT(_kern_pipe, OID_AUTO, bkmem_alloc,
144 CTLFLAG_RW, &pipe_bkmem_alloc, 0, "pipe buffer from kmem");
147 static void pipeclose (struct pipe *cpipe);
148 static void pipe_free_kmem (struct pipe *cpipe);
149 static int pipe_create (struct pipe **cpipep);
150 static __inline void pipeselwakeup (struct pipe *cpipe);
151 static int pipespace (struct pipe *cpipe, int size);
154 pipeselwakeup(struct pipe *cpipe)
156 if (cpipe->pipe_state & PIPE_SEL) {
158 cpipe->pipe_state &= ~PIPE_SEL;
159 selwakeup(&cpipe->pipe_sel);
162 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) {
164 pgsigio(cpipe->pipe_sigio, SIGIO, 0);
167 if (SLIST_FIRST(&cpipe->pipe_sel.si_note)) {
169 KNOTE(&cpipe->pipe_sel.si_note, 0);
175 * These routines are called before and after a UIO. The UIO
176 * may block, causing our held tokens to be lost temporarily.
178 * We use these routines to serialize reads against other reads
179 * and writes against other writes.
181 * The read token is held on entry so *ipp does not race.
184 pipe_start_uio(struct pipe *cpipe, u_int *ipp)
190 error = tsleep(ipp, PCATCH, "pipexx", 0);
199 pipe_end_uio(struct pipe *cpipe, u_int *ipp)
210 pipe_get_mplock(int *save)
213 if (pipe_mpsafe == 0) {
224 pipe_rel_mplock(int *save)
234 * The pipe system call for the DTYPE_PIPE type of pipes
236 * pipe_ARgs(int dummy)
241 sys_pipe(struct pipe_args *uap)
243 struct thread *td = curthread;
244 struct proc *p = td->td_proc;
245 struct file *rf, *wf;
246 struct pipe *rpipe, *wpipe;
251 rpipe = wpipe = NULL;
252 if (pipe_create(&rpipe) || pipe_create(&wpipe)) {
258 error = falloc(p, &rf, &fd1);
264 uap->sysmsg_fds[0] = fd1;
267 * Warning: once we've gotten past allocation of the fd for the
268 * read-side, we can only drop the read side via fdrop() in order
269 * to avoid races against processes which manage to dup() the read
270 * side while we are blocked trying to allocate the write side.
272 rf->f_type = DTYPE_PIPE;
273 rf->f_flag = FREAD | FWRITE;
274 rf->f_ops = &pipeops;
276 error = falloc(p, &wf, &fd2);
278 fsetfd(p, NULL, fd1);
280 /* rpipe has been closed by fdrop(). */
284 wf->f_type = DTYPE_PIPE;
285 wf->f_flag = FREAD | FWRITE;
286 wf->f_ops = &pipeops;
288 uap->sysmsg_fds[1] = fd2;
290 rpipe->pipe_slock = kmalloc(sizeof(struct lock),
291 M_PIPE, M_WAITOK|M_ZERO);
292 wpipe->pipe_slock = rpipe->pipe_slock;
293 rpipe->pipe_peer = wpipe;
294 wpipe->pipe_peer = rpipe;
295 lockinit(rpipe->pipe_slock, "pipecl", 0, 0);
298 * Once activated the peer relationship remains valid until
299 * both sides are closed.
310 * Allocate kva for pipe circular buffer, the space is pageable
311 * This routine will 'realloc' the size of a pipe safely, if it fails
312 * it will retain the old buffer.
313 * If it fails it will return ENOMEM.
316 pipespace(struct pipe *cpipe, int size)
318 struct vm_object *object;
322 npages = round_page(size) / PAGE_SIZE;
323 object = cpipe->pipe_buffer.object;
326 * [re]create the object if necessary and reserve space for it
327 * in the kernel_map. The object and memory are pageable. On
328 * success, free the old resources before assigning the new
331 if (object == NULL || object->size != npages) {
333 object = vm_object_allocate(OBJT_DEFAULT, npages);
334 buffer = (caddr_t)vm_map_min(&kernel_map);
336 error = vm_map_find(&kernel_map, object, 0,
337 (vm_offset_t *)&buffer, size,
340 VM_PROT_ALL, VM_PROT_ALL,
343 if (error != KERN_SUCCESS) {
344 vm_object_deallocate(object);
348 pipe_free_kmem(cpipe);
350 cpipe->pipe_buffer.object = object;
351 cpipe->pipe_buffer.buffer = buffer;
352 cpipe->pipe_buffer.size = size;
357 cpipe->pipe_buffer.rindex = 0;
358 cpipe->pipe_buffer.windex = 0;
363 * Initialize and allocate VM and memory for pipe, pulling the pipe from
364 * our per-cpu cache if possible. For now make sure it is sized for the
365 * smaller PIPE_SIZE default.
368 pipe_create(struct pipe **cpipep)
370 globaldata_t gd = mycpu;
374 if ((cpipe = gd->gd_pipeq) != NULL) {
375 gd->gd_pipeq = cpipe->pipe_peer;
377 cpipe->pipe_peer = NULL;
378 cpipe->pipe_wantwcnt = 0;
380 cpipe = kmalloc(sizeof(struct pipe), M_PIPE, M_WAITOK|M_ZERO);
383 if ((error = pipespace(cpipe, PIPE_SIZE)) != 0)
385 vfs_timestamp(&cpipe->pipe_ctime);
386 cpipe->pipe_atime = cpipe->pipe_ctime;
387 cpipe->pipe_mtime = cpipe->pipe_ctime;
388 lwkt_token_init(&cpipe->pipe_rlock);
389 lwkt_token_init(&cpipe->pipe_wlock);
394 * MPALMOSTSAFE (acquires mplock)
397 pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, int fflags)
403 u_int size; /* total bytes available */
404 u_int nsize; /* total bytes to read */
405 u_int rindex; /* contiguous bytes available */
414 if (uio->uio_resid == 0)
418 * Setup locks, calculate nbio
420 pipe_get_mplock(&mpsave);
421 rpipe = (struct pipe *)fp->f_data;
422 lwkt_gettoken(&rlock, &rpipe->pipe_rlock);
424 if (fflags & O_FBLOCKING)
426 else if (fflags & O_FNONBLOCKING)
428 else if (fp->f_flag & O_NONBLOCK)
434 * Reads are serialized. Note howeverthat pipe_buffer.buffer and
435 * pipe_buffer.size can change out from under us when the number
436 * of bytes in the buffer are zero due to the write-side doing a
439 error = pipe_start_uio(rpipe, &rpipe->pipe_rip);
441 pipe_rel_mplock(&mpsave);
442 lwkt_reltoken(&rlock);
446 while (uio->uio_resid) {
447 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
450 rindex = rpipe->pipe_buffer.rindex &
451 (rpipe->pipe_buffer.size - 1);
453 if (nsize > rpipe->pipe_buffer.size - rindex)
454 nsize = rpipe->pipe_buffer.size - rindex;
455 if (nsize > (u_int)uio->uio_resid)
456 nsize = (u_int)uio->uio_resid;
458 error = uiomove(&rpipe->pipe_buffer.buffer[rindex],
463 rpipe->pipe_buffer.rindex += nsize;
467 * If the FIFO has not been drained past the half-way
468 * mark then just continue and do not try to notify
471 if (size - nsize >= (rpipe->pipe_buffer.size >> 1)) {
477 * If the FIFO has been drained past the half-way
478 * mark we have to check the writer at some point,
479 * but for now continue if the writer is not yet
483 if ((rpipe->pipe_state & PIPE_WANTW) == 0)
488 * If the "write-side" was blocked we wake it up. This code
489 * is reached either when the buffer is completely emptied
490 * or if it becomes more then half-empty.
492 * Pipe_state can only be modified if both the rlock and
495 if (rpipe->pipe_state & PIPE_WANTW) {
496 lwkt_gettoken(&wlock, &rpipe->pipe_wlock);
497 if (rpipe->pipe_state & PIPE_WANTW) {
499 rpipe->pipe_state &= ~PIPE_WANTW;
500 lwkt_reltoken(&wlock);
503 lwkt_reltoken(&wlock);
508 * Pick up our copy loop again if the writer sent data to
511 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
516 * Detect EOF condition, do not set error.
518 if (rpipe->pipe_state & PIPE_REOF)
523 * Gravy train if SMP box. This saves a ton of IPIs and
524 * allows two cpus to operate in lockstep.
526 * XXX check pipe_wip also?
529 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
535 * Break if some data was read, or if this was a non-blocking
547 * Last chance, interlock with WANTR.
549 lwkt_gettoken(&wlock, &rpipe->pipe_wlock);
550 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
552 lwkt_reltoken(&wlock);
557 * If there is no more to read in the pipe, reset its
558 * pointers to the beginning. This improves cache hit
561 * We need both locks to modify both pointers, and there
562 * must also not be a write in progress or the uiomove()
563 * in the write might block and temporarily release
564 * its wlock, then reacquire and update windex. We are
565 * only serialized against reads, not writes.
567 * XXX should we even bother resetting the indices? It
568 * might actually be more cache efficient not to.
570 if (rpipe->pipe_buffer.rindex == rpipe->pipe_buffer.windex &&
571 rpipe->pipe_wip == 0) {
572 rpipe->pipe_buffer.rindex = 0;
573 rpipe->pipe_buffer.windex = 0;
577 * Wait for more data.
579 * Pipe_state can only be set if both the rlock and wlock
582 rpipe->pipe_state |= PIPE_WANTR;
583 tsleep_interlock(rpipe);
584 lwkt_reltoken(&wlock);
585 error = tsleep(rpipe, PCATCH, "piperd", 0);
589 pipe_end_uio(rpipe, &rpipe->pipe_rip);
592 * Uptime last access time
594 if (error == 0 && nread)
595 vfs_timestamp(&rpipe->pipe_atime);
598 * If we drained the FIFO more then half way then handle
599 * write blocking hysteresis.
601 * Note that PIPE_WANTW cannot be set by the writer without
602 * it holding both rlock and wlock, so we can test it
603 * while holding just rlock.
606 if (rpipe->pipe_state & PIPE_WANTW) {
607 lwkt_gettoken(&wlock, &rpipe->pipe_wlock);
608 if (rpipe->pipe_state & PIPE_WANTW) {
609 rpipe->pipe_state &= ~PIPE_WANTW;
610 lwkt_reltoken(&wlock);
613 lwkt_reltoken(&wlock);
617 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
618 lwkt_reltoken(&rlock);
621 * If enough space is available in buffer then wakeup sel writers?
623 if ((rpipe->pipe_buffer.size - size) >= PIPE_BUF)
624 pipeselwakeup(rpipe);
625 pipe_rel_mplock(&mpsave);
630 * MPALMOSTSAFE - acquires mplock
633 pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags)
638 struct pipe *wpipe, *rpipe;
646 pipe_get_mplock(&mpsave);
649 * Writes go to the peer. The peer will always exist.
651 rpipe = (struct pipe *) fp->f_data;
652 wpipe = rpipe->pipe_peer;
653 lwkt_gettoken(&wlock, &wpipe->pipe_wlock);
654 if (wpipe->pipe_state & PIPE_WEOF) {
655 pipe_rel_mplock(&mpsave);
656 lwkt_reltoken(&wlock);
661 * Degenerate case (EPIPE takes prec)
663 if (uio->uio_resid == 0) {
664 pipe_rel_mplock(&mpsave);
665 lwkt_reltoken(&wlock);
670 * Writes are serialized (start_uio must be called with wlock)
672 error = pipe_start_uio(wpipe, &wpipe->pipe_wip);
674 pipe_rel_mplock(&mpsave);
675 lwkt_reltoken(&wlock);
679 if (fflags & O_FBLOCKING)
681 else if (fflags & O_FNONBLOCKING)
683 else if (fp->f_flag & O_NONBLOCK)
689 * If it is advantageous to resize the pipe buffer, do
690 * so. We are write-serialized so we can block safely.
692 if ((wpipe->pipe_buffer.size <= PIPE_SIZE) &&
693 (pipe_nbig < pipe_maxbig) &&
694 wpipe->pipe_wantwcnt > 4 &&
695 (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) {
697 * Recheck after lock.
699 lwkt_gettoken(&rlock, &wpipe->pipe_rlock);
700 if ((wpipe->pipe_buffer.size <= PIPE_SIZE) &&
701 (pipe_nbig < pipe_maxbig) &&
702 (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) {
703 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0) {
708 lwkt_reltoken(&rlock);
711 orig_resid = uio->uio_resid;
714 while (uio->uio_resid) {
715 if (wpipe->pipe_state & PIPE_WEOF) {
720 windex = wpipe->pipe_buffer.windex &
721 (wpipe->pipe_buffer.size - 1);
722 space = wpipe->pipe_buffer.size -
723 (wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex);
726 /* Writes of size <= PIPE_BUF must be atomic. */
727 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
731 * Write to fill, read size handles write hysteresis. Also
732 * additional restrictions can cause select-based non-blocking
739 * Transfer size is minimum of uio transfer
740 * and free space in pipe buffer.
742 * Limit each uiocopy to no more then PIPE_SIZE
743 * so we can keep the gravy train going on a
744 * SMP box. This doubles the performance for
745 * write sizes > 16K. Otherwise large writes
746 * wind up doing an inefficient synchronous
749 if (space > (u_int)uio->uio_resid)
750 space = (u_int)uio->uio_resid;
751 if (space > PIPE_SIZE)
755 * First segment to transfer is minimum of
756 * transfer size and contiguous space in
757 * pipe buffer. If first segment to transfer
758 * is less than the transfer size, we've got
759 * a wraparound in the buffer.
761 segsize = wpipe->pipe_buffer.size - windex;
766 * If this is the first loop and the reader is
767 * blocked, do a preemptive wakeup of the reader.
769 * This works for both SMP and UP. On SMP the IPI
770 * latency plus the wlock interlock on the reader
771 * side is the fastest way to get the reader going.
772 * (The scheduler will hard loop on lock tokens).
774 * NOTE: We can't clear WANTR here without acquiring
775 * the rlock, which we don't want to do here!
777 if (wpipe->pipe_state & PIPE_WANTR)
781 * Transfer first segment
783 error = uiomove(&wpipe->pipe_buffer.buffer[windex],
786 wpipe->pipe_buffer.windex += segsize;
788 if (error == 0 && segsize < space) {
790 * Transfer remaining part now, to
791 * support atomic writes. Wraparound
794 segsize = space - segsize;
795 error = uiomove(&wpipe->pipe_buffer.buffer[0],
798 wpipe->pipe_buffer.windex += segsize;
807 * We need both the rlock and the wlock to interlock against
808 * the EOF, WANTW, and size checks, and to modify pipe_state.
810 * These are token locks so we do not have to worry about
813 lwkt_gettoken(&rlock, &wpipe->pipe_rlock);
816 * If the "read-side" has been blocked, wake it up now
817 * and yield to let it drain synchronously rather
820 if (wpipe->pipe_state & PIPE_WANTR) {
821 wpipe->pipe_state &= ~PIPE_WANTR;
826 * don't block on non-blocking I/O
829 lwkt_reltoken(&rlock);
835 * re-test whether we have to block in the writer after
836 * acquiring both locks, in case the reader opened up
839 space = wpipe->pipe_buffer.size -
840 (wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex);
842 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
846 * We have no more space and have something to offer,
847 * wake up select/poll.
850 pipeselwakeup(wpipe);
851 ++wpipe->pipe_wantwcnt;
852 wpipe->pipe_state |= PIPE_WANTW;
853 error = tsleep(wpipe, PCATCH, "pipewr", 0);
855 lwkt_reltoken(&rlock);
858 * Break out if we errored or the read side wants us to go
863 if (wpipe->pipe_state & PIPE_WEOF) {
868 pipe_end_uio(wpipe, &wpipe->pipe_wip);
871 * If we have put any characters in the buffer, we wake up
874 * Both rlock and wlock are required to be able to modify pipe_state.
876 if (wpipe->pipe_buffer.windex != wpipe->pipe_buffer.rindex) {
877 if (wpipe->pipe_state & PIPE_WANTR) {
878 lwkt_gettoken(&rlock, &wpipe->pipe_rlock);
879 if (wpipe->pipe_state & PIPE_WANTR) {
880 wpipe->pipe_state &= ~PIPE_WANTR;
881 lwkt_reltoken(&rlock);
884 lwkt_reltoken(&rlock);
890 * Don't return EPIPE if I/O was successful
892 if ((wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex) &&
893 (uio->uio_resid == 0) &&
899 vfs_timestamp(&wpipe->pipe_mtime);
902 * We have something to offer,
903 * wake up select/poll.
905 space = wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex;
906 lwkt_reltoken(&wlock);
908 pipeselwakeup(wpipe);
909 pipe_rel_mplock(&mpsave);
914 * MPALMOSTSAFE - acquires mplock
916 * we implement a very minimal set of ioctls for compatibility with sockets.
919 pipe_ioctl(struct file *fp, u_long cmd, caddr_t data, struct ucred *cred)
925 mpipe = (struct pipe *)fp->f_data;
930 mpipe->pipe_state |= PIPE_ASYNC;
932 mpipe->pipe_state &= ~PIPE_ASYNC;
937 *(int *)data = mpipe->pipe_buffer.windex -
938 mpipe->pipe_buffer.rindex;
942 error = fsetown(*(int *)data, &mpipe->pipe_sigio);
945 *(int *)data = fgetown(mpipe->pipe_sigio);
949 /* This is deprecated, FIOSETOWN should be used instead. */
950 error = fsetown(-(*(int *)data), &mpipe->pipe_sigio);
954 /* This is deprecated, FIOGETOWN should be used instead. */
955 *(int *)data = -fgetown(mpipe->pipe_sigio);
967 * MPALMOSTSAFE - acquires mplock
970 pipe_poll(struct file *fp, int events, struct ucred *cred)
978 pipe_get_mplock(&mpsave);
979 rpipe = (struct pipe *)fp->f_data;
980 wpipe = rpipe->pipe_peer;
981 if (events & (POLLIN | POLLRDNORM)) {
982 if ((rpipe->pipe_buffer.windex != rpipe->pipe_buffer.rindex) ||
983 (rpipe->pipe_state & PIPE_REOF)) {
984 revents |= events & (POLLIN | POLLRDNORM);
988 if (events & (POLLOUT | POLLWRNORM)) {
989 if (wpipe == NULL || (wpipe->pipe_state & PIPE_WEOF)) {
990 revents |= events & (POLLOUT | POLLWRNORM);
992 space = wpipe->pipe_buffer.windex -
993 wpipe->pipe_buffer.rindex;
994 space = wpipe->pipe_buffer.size - space;
995 if (space >= PIPE_BUF)
996 revents |= events & (POLLOUT | POLLWRNORM);
1000 if ((rpipe->pipe_state & PIPE_REOF) ||
1002 (wpipe->pipe_state & PIPE_WEOF))
1006 if (events & (POLLIN | POLLRDNORM)) {
1007 selrecord(curthread, &rpipe->pipe_sel);
1008 rpipe->pipe_state |= PIPE_SEL;
1011 if (events & (POLLOUT | POLLWRNORM)) {
1012 selrecord(curthread, &wpipe->pipe_sel);
1013 wpipe->pipe_state |= PIPE_SEL;
1016 pipe_rel_mplock(&mpsave);
1021 * MPALMOSTSAFE - acquires mplock
1024 pipe_stat(struct file *fp, struct stat *ub, struct ucred *cred)
1029 pipe_get_mplock(&mpsave);
1030 pipe = (struct pipe *)fp->f_data;
1032 bzero((caddr_t)ub, sizeof(*ub));
1033 ub->st_mode = S_IFIFO;
1034 ub->st_blksize = pipe->pipe_buffer.size;
1035 ub->st_size = pipe->pipe_buffer.windex - pipe->pipe_buffer.rindex;
1036 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize;
1037 ub->st_atimespec = pipe->pipe_atime;
1038 ub->st_mtimespec = pipe->pipe_mtime;
1039 ub->st_ctimespec = pipe->pipe_ctime;
1041 * Left as 0: st_dev, st_ino, st_nlink, st_uid, st_gid, st_rdev,
1043 * XXX (st_dev, st_ino) should be unique.
1045 pipe_rel_mplock(&mpsave);
1050 * MPALMOSTSAFE - acquires mplock
1053 pipe_close(struct file *fp)
1058 cpipe = (struct pipe *)fp->f_data;
1059 fp->f_ops = &badfileops;
1061 funsetown(cpipe->pipe_sigio);
1068 * Shutdown one or both directions of a full-duplex pipe.
1070 * MPALMOSTSAFE - acquires mplock
1073 pipe_shutdown(struct file *fp, int how)
1078 lwkt_tokref rpipe_rlock;
1079 lwkt_tokref rpipe_wlock;
1080 lwkt_tokref wpipe_rlock;
1081 lwkt_tokref wpipe_wlock;
1084 pipe_get_mplock(&mpsave);
1085 rpipe = (struct pipe *)fp->f_data;
1086 wpipe = rpipe->pipe_peer;
1089 * We modify pipe_state on both pipes, which means we need
1092 lwkt_gettoken(&rpipe_rlock, &rpipe->pipe_rlock);
1093 lwkt_gettoken(&rpipe_wlock, &rpipe->pipe_wlock);
1094 lwkt_gettoken(&wpipe_rlock, &wpipe->pipe_rlock);
1095 lwkt_gettoken(&wpipe_wlock, &wpipe->pipe_wlock);
1100 rpipe->pipe_state |= PIPE_REOF;
1101 wpipe->pipe_state |= PIPE_WEOF;
1102 if (rpipe->pipe_state & PIPE_WANTR) {
1103 rpipe->pipe_state &= ~PIPE_WANTR;
1106 if (wpipe->pipe_state & PIPE_WANTW) {
1107 wpipe->pipe_state &= ~PIPE_WANTW;
1110 pipeselwakeup(rpipe);
1116 wpipe->pipe_state |= PIPE_WEOF;
1117 rpipe->pipe_state |= PIPE_REOF;
1118 if (wpipe->pipe_state & PIPE_WANTW) {
1119 wpipe->pipe_state &= ~PIPE_WANTW;
1122 if (rpipe->pipe_state & PIPE_WANTR) {
1123 rpipe->pipe_state &= ~PIPE_WANTR;
1126 pipeselwakeup(wpipe);
1131 lwkt_reltoken(&rpipe_rlock);
1132 lwkt_reltoken(&rpipe_wlock);
1133 lwkt_reltoken(&wpipe_rlock);
1134 lwkt_reltoken(&wpipe_wlock);
1136 pipe_rel_mplock(&mpsave);
1141 pipe_free_kmem(struct pipe *cpipe)
1143 if (cpipe->pipe_buffer.buffer != NULL) {
1144 if (cpipe->pipe_buffer.size > PIPE_SIZE)
1146 kmem_free(&kernel_map,
1147 (vm_offset_t)cpipe->pipe_buffer.buffer,
1148 cpipe->pipe_buffer.size);
1149 cpipe->pipe_buffer.buffer = NULL;
1150 cpipe->pipe_buffer.object = NULL;
1155 * Close the pipe. The slock must be held to interlock against simultanious
1156 * closes. The rlock and wlock must be held to adjust the pipe_state.
1159 pipeclose(struct pipe *cpipe)
1163 lwkt_tokref cpipe_rlock;
1164 lwkt_tokref cpipe_wlock;
1165 lwkt_tokref ppipe_rlock;
1166 lwkt_tokref ppipe_wlock;
1172 * The slock may not have been allocated yet (close during
1175 * We need both the read and write tokens to modify pipe_state.
1177 if (cpipe->pipe_slock)
1178 lockmgr(cpipe->pipe_slock, LK_EXCLUSIVE);
1179 lwkt_gettoken(&cpipe_rlock, &cpipe->pipe_rlock);
1180 lwkt_gettoken(&cpipe_wlock, &cpipe->pipe_wlock);
1183 * Set our state, wakeup anyone waiting in select, and
1184 * wakeup anyone blocked on our pipe.
1186 cpipe->pipe_state |= PIPE_CLOSED | PIPE_REOF | PIPE_WEOF;
1187 pipeselwakeup(cpipe);
1188 if (cpipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) {
1189 cpipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW);
1194 * Disconnect from peer
1196 if ((ppipe = cpipe->pipe_peer) != NULL) {
1197 lwkt_gettoken(&ppipe_rlock, &ppipe->pipe_rlock);
1198 lwkt_gettoken(&ppipe_wlock, &ppipe->pipe_wlock);
1199 ppipe->pipe_state |= PIPE_REOF;
1200 pipeselwakeup(ppipe);
1201 if (ppipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) {
1202 ppipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW);
1205 if (SLIST_FIRST(&ppipe->pipe_sel.si_note)) {
1207 KNOTE(&ppipe->pipe_sel.si_note, 0);
1210 lwkt_reltoken(&ppipe_rlock);
1211 lwkt_reltoken(&ppipe_wlock);
1215 * If the peer is also closed we can free resources for both
1216 * sides, otherwise we leave our side intact to deal with any
1217 * races (since we only have the slock).
1219 if (ppipe && (ppipe->pipe_state & PIPE_CLOSED)) {
1220 cpipe->pipe_peer = NULL;
1221 ppipe->pipe_peer = NULL;
1222 ppipe->pipe_slock = NULL; /* we will free the slock */
1227 lwkt_reltoken(&cpipe_rlock);
1228 lwkt_reltoken(&cpipe_wlock);
1229 if (cpipe->pipe_slock)
1230 lockmgr(cpipe->pipe_slock, LK_RELEASE);
1233 * If we disassociated from our peer we can free resources
1235 if (ppipe == NULL) {
1237 if (cpipe->pipe_slock) {
1238 kfree(cpipe->pipe_slock, M_PIPE);
1239 cpipe->pipe_slock = NULL;
1241 if (gd->gd_pipeqcount >= pipe_maxcache ||
1242 cpipe->pipe_buffer.size != PIPE_SIZE
1244 pipe_free_kmem(cpipe);
1245 kfree(cpipe, M_PIPE);
1247 cpipe->pipe_state = 0;
1248 cpipe->pipe_peer = gd->gd_pipeq;
1249 gd->gd_pipeq = cpipe;
1250 ++gd->gd_pipeqcount;
1256 * MPALMOSTSAFE - acquires mplock
1259 pipe_kqfilter(struct file *fp, struct knote *kn)
1264 cpipe = (struct pipe *)kn->kn_fp->f_data;
1266 switch (kn->kn_filter) {
1268 kn->kn_fop = &pipe_rfiltops;
1271 kn->kn_fop = &pipe_wfiltops;
1272 cpipe = cpipe->pipe_peer;
1273 if (cpipe == NULL) {
1274 /* other end of pipe has been closed */
1282 kn->kn_hook = (caddr_t)cpipe;
1284 SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext);
1290 filt_pipedetach(struct knote *kn)
1292 struct pipe *cpipe = (struct pipe *)kn->kn_hook;
1294 SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext);
1299 filt_piperead(struct knote *kn, long hint)
1301 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1303 kn->kn_data = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
1306 if (rpipe->pipe_state & PIPE_REOF) {
1307 kn->kn_flags |= EV_EOF;
1310 return (kn->kn_data > 0);
1315 filt_pipewrite(struct knote *kn, long hint)
1317 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1318 struct pipe *wpipe = rpipe->pipe_peer;
1322 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_WEOF)) {
1324 kn->kn_flags |= EV_EOF;
1327 space = wpipe->pipe_buffer.windex -
1328 wpipe->pipe_buffer.rindex;
1329 space = wpipe->pipe_buffer.size - space;
1330 kn->kn_data = space;
1331 return (kn->kn_data >= PIPE_BUF);