2 * Copyright (c) 1996 John S. Dyson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Absolutely no warranty of function or purpose is made by the author
16 * 4. Modifications may be freely made to this file if the above conditions
19 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.60.2.13 2002/08/05 15:05:15 des Exp $
20 * $DragonFly: src/sys/kern/sys_pipe.c,v 1.50 2008/09/09 04:06:13 dillon Exp $
24 * This file contains a high-performance replacement for the socket-based
25 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
26 * all features of sockets, but does do everything that pipes normally
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
33 #include <sys/fcntl.h>
35 #include <sys/filedesc.h>
36 #include <sys/filio.h>
37 #include <sys/ttycom.h>
40 #include <sys/select.h>
41 #include <sys/signalvar.h>
42 #include <sys/sysproto.h>
44 #include <sys/vnode.h>
46 #include <sys/event.h>
47 #include <sys/globaldata.h>
48 #include <sys/module.h>
49 #include <sys/malloc.h>
50 #include <sys/sysctl.h>
51 #include <sys/socket.h>
54 #include <vm/vm_param.h>
56 #include <vm/vm_object.h>
57 #include <vm/vm_kern.h>
58 #include <vm/vm_extern.h>
60 #include <vm/vm_map.h>
61 #include <vm/vm_page.h>
62 #include <vm/vm_zone.h>
64 #include <sys/file2.h>
65 #include <sys/signal2.h>
67 #include <machine/cpufunc.h>
70 * interfaces to the outside world
72 static int pipe_read (struct file *fp, struct uio *uio,
73 struct ucred *cred, int flags);
74 static int pipe_write (struct file *fp, struct uio *uio,
75 struct ucred *cred, int flags);
76 static int pipe_close (struct file *fp);
77 static int pipe_shutdown (struct file *fp, int how);
78 static int pipe_poll (struct file *fp, int events, struct ucred *cred);
79 static int pipe_kqfilter (struct file *fp, struct knote *kn);
80 static int pipe_stat (struct file *fp, struct stat *sb, struct ucred *cred);
81 static int pipe_ioctl (struct file *fp, u_long cmd, caddr_t data,
82 struct ucred *cred, struct sysmsg *msg);
84 static struct fileops pipeops = {
86 .fo_write = pipe_write,
87 .fo_ioctl = pipe_ioctl,
89 .fo_kqfilter = pipe_kqfilter,
91 .fo_close = pipe_close,
92 .fo_shutdown = pipe_shutdown
95 static void filt_pipedetach(struct knote *kn);
96 static int filt_piperead(struct knote *kn, long hint);
97 static int filt_pipewrite(struct knote *kn, long hint);
99 static struct filterops pipe_rfiltops =
100 { 1, NULL, filt_pipedetach, filt_piperead };
101 static struct filterops pipe_wfiltops =
102 { 1, NULL, filt_pipedetach, filt_pipewrite };
104 MALLOC_DEFINE(M_PIPE, "pipe", "pipe structures");
107 * Default pipe buffer size(s), this can be kind-of large now because pipe
108 * space is pageable. The pipe code will try to maintain locality of
109 * reference for performance reasons, so small amounts of outstanding I/O
110 * will not wipe the cache.
112 #define MINPIPESIZE (PIPE_SIZE/3)
113 #define MAXPIPESIZE (2*PIPE_SIZE/3)
116 * Limit the number of "big" pipes
118 #define LIMITBIGPIPES 64
119 #define PIPEQ_MAX_CACHE 16 /* per-cpu pipe structure cache */
121 static int pipe_maxbig = LIMITBIGPIPES;
122 static int pipe_maxcache = PIPEQ_MAX_CACHE;
123 static int pipe_bigcount;
124 static int pipe_nbig;
125 static int pipe_bcache_alloc;
126 static int pipe_bkmem_alloc;
127 static int pipe_rblocked_count;
128 static int pipe_wblocked_count;
130 SYSCTL_NODE(_kern, OID_AUTO, pipe, CTLFLAG_RW, 0, "Pipe operation");
131 SYSCTL_INT(_kern_pipe, OID_AUTO, nbig,
132 CTLFLAG_RD, &pipe_nbig, 0, "numer of big pipes allocated");
133 SYSCTL_INT(_kern_pipe, OID_AUTO, bigcount,
134 CTLFLAG_RW, &pipe_bigcount, 0, "number of times pipe expanded");
135 SYSCTL_INT(_kern_pipe, OID_AUTO, rblocked,
136 CTLFLAG_RW, &pipe_rblocked_count, 0, "number of times pipe expanded");
137 SYSCTL_INT(_kern_pipe, OID_AUTO, wblocked,
138 CTLFLAG_RW, &pipe_wblocked_count, 0, "number of times pipe expanded");
139 SYSCTL_INT(_kern_pipe, OID_AUTO, maxcache,
140 CTLFLAG_RW, &pipe_maxcache, 0, "max pipes cached per-cpu");
141 SYSCTL_INT(_kern_pipe, OID_AUTO, maxbig,
142 CTLFLAG_RW, &pipe_maxbig, 0, "max number of big pipes");
144 static int pipe_delay = 5000; /* 5uS default */
145 SYSCTL_INT(_kern_pipe, OID_AUTO, delay,
146 CTLFLAG_RW, &pipe_delay, 0, "SMP delay optimization in ns");
147 static int pipe_mpsafe = 1;
148 SYSCTL_INT(_kern_pipe, OID_AUTO, mpsafe,
149 CTLFLAG_RW, &pipe_mpsafe, 0, "");
151 #if !defined(NO_PIPE_SYSCTL_STATS)
152 SYSCTL_INT(_kern_pipe, OID_AUTO, bcache_alloc,
153 CTLFLAG_RW, &pipe_bcache_alloc, 0, "pipe buffer from pcpu cache");
154 SYSCTL_INT(_kern_pipe, OID_AUTO, bkmem_alloc,
155 CTLFLAG_RW, &pipe_bkmem_alloc, 0, "pipe buffer from kmem");
158 static void pipeclose (struct pipe *cpipe);
159 static void pipe_free_kmem (struct pipe *cpipe);
160 static int pipe_create (struct pipe **cpipep);
161 static __inline void pipeselwakeup (struct pipe *cpipe);
162 static int pipespace (struct pipe *cpipe, int size);
165 pipeseltest(struct pipe *cpipe)
167 return ((cpipe->pipe_state & PIPE_SEL) ||
168 ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) ||
169 SLIST_FIRST(&cpipe->pipe_sel.si_note));
173 pipeselwakeup(struct pipe *cpipe)
175 if (cpipe->pipe_state & PIPE_SEL) {
177 cpipe->pipe_state &= ~PIPE_SEL;
178 selwakeup(&cpipe->pipe_sel);
181 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) {
183 pgsigio(cpipe->pipe_sigio, SIGIO, 0);
186 if (SLIST_FIRST(&cpipe->pipe_sel.si_note)) {
188 KNOTE(&cpipe->pipe_sel.si_note, 0);
194 * These routines are called before and after a UIO. The UIO
195 * may block, causing our held tokens to be lost temporarily.
197 * We use these routines to serialize reads against other reads
198 * and writes against other writes.
200 * The read token is held on entry so *ipp does not race.
203 pipe_start_uio(struct pipe *cpipe, int *ipp)
209 error = tsleep(ipp, PCATCH, "pipexx", 0);
218 pipe_end_uio(struct pipe *cpipe, int *ipp)
230 pipe_get_mplock(int *save)
233 if (pipe_mpsafe == 0) {
244 pipe_rel_mplock(int *save)
254 * The pipe system call for the DTYPE_PIPE type of pipes
256 * pipe_args(int dummy)
261 sys_pipe(struct pipe_args *uap)
263 struct thread *td = curthread;
264 struct filedesc *fdp = td->td_proc->p_fd;
265 struct file *rf, *wf;
266 struct pipe *rpipe, *wpipe;
269 rpipe = wpipe = NULL;
270 if (pipe_create(&rpipe) || pipe_create(&wpipe)) {
276 error = falloc(td->td_lwp, &rf, &fd1);
282 uap->sysmsg_fds[0] = fd1;
285 * Warning: once we've gotten past allocation of the fd for the
286 * read-side, we can only drop the read side via fdrop() in order
287 * to avoid races against processes which manage to dup() the read
288 * side while we are blocked trying to allocate the write side.
290 rf->f_type = DTYPE_PIPE;
291 rf->f_flag = FREAD | FWRITE;
292 rf->f_ops = &pipeops;
294 error = falloc(td->td_lwp, &wf, &fd2);
296 fsetfd(fdp, NULL, fd1);
298 /* rpipe has been closed by fdrop(). */
302 wf->f_type = DTYPE_PIPE;
303 wf->f_flag = FREAD | FWRITE;
304 wf->f_ops = &pipeops;
306 uap->sysmsg_fds[1] = fd2;
308 rpipe->pipe_slock = kmalloc(sizeof(struct lock),
309 M_PIPE, M_WAITOK|M_ZERO);
310 wpipe->pipe_slock = rpipe->pipe_slock;
311 rpipe->pipe_peer = wpipe;
312 wpipe->pipe_peer = rpipe;
313 lockinit(rpipe->pipe_slock, "pipecl", 0, 0);
316 * Once activated the peer relationship remains valid until
317 * both sides are closed.
319 fsetfd(fdp, rf, fd1);
320 fsetfd(fdp, wf, fd2);
328 * Allocate kva for pipe circular buffer, the space is pageable
329 * This routine will 'realloc' the size of a pipe safely, if it fails
330 * it will retain the old buffer.
331 * If it fails it will return ENOMEM.
334 pipespace(struct pipe *cpipe, int size)
336 struct vm_object *object;
340 npages = round_page(size) / PAGE_SIZE;
341 object = cpipe->pipe_buffer.object;
344 * [re]create the object if necessary and reserve space for it
345 * in the kernel_map. The object and memory are pageable. On
346 * success, free the old resources before assigning the new
349 if (object == NULL || object->size != npages) {
351 object = vm_object_allocate(OBJT_DEFAULT, npages);
352 buffer = (caddr_t)vm_map_min(&kernel_map);
354 error = vm_map_find(&kernel_map, object, 0,
355 (vm_offset_t *)&buffer, size,
358 VM_PROT_ALL, VM_PROT_ALL,
361 if (error != KERN_SUCCESS) {
362 vm_object_deallocate(object);
366 pipe_free_kmem(cpipe);
368 cpipe->pipe_buffer.object = object;
369 cpipe->pipe_buffer.buffer = buffer;
370 cpipe->pipe_buffer.size = size;
375 cpipe->pipe_buffer.rindex = 0;
376 cpipe->pipe_buffer.windex = 0;
381 * Initialize and allocate VM and memory for pipe, pulling the pipe from
382 * our per-cpu cache if possible. For now make sure it is sized for the
383 * smaller PIPE_SIZE default.
386 pipe_create(struct pipe **cpipep)
388 globaldata_t gd = mycpu;
392 if ((cpipe = gd->gd_pipeq) != NULL) {
393 gd->gd_pipeq = cpipe->pipe_peer;
395 cpipe->pipe_peer = NULL;
396 cpipe->pipe_wantwcnt = 0;
398 cpipe = kmalloc(sizeof(struct pipe), M_PIPE, M_WAITOK|M_ZERO);
401 if ((error = pipespace(cpipe, PIPE_SIZE)) != 0)
403 vfs_timestamp(&cpipe->pipe_ctime);
404 cpipe->pipe_atime = cpipe->pipe_ctime;
405 cpipe->pipe_mtime = cpipe->pipe_ctime;
406 lwkt_token_init(&cpipe->pipe_rlock);
407 lwkt_token_init(&cpipe->pipe_wlock);
412 * MPALMOSTSAFE (acquires mplock)
415 pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, int fflags)
421 u_int size; /* total bytes available */
422 u_int nsize; /* total bytes to read */
423 u_int rindex; /* contiguous bytes available */
431 if (uio->uio_resid == 0)
435 * Setup locks, calculate nbio
437 pipe_get_mplock(&mpsave);
438 rpipe = (struct pipe *)fp->f_data;
439 lwkt_gettoken(&rlock, &rpipe->pipe_rlock);
441 if (fflags & O_FBLOCKING)
443 else if (fflags & O_FNONBLOCKING)
445 else if (fp->f_flag & O_NONBLOCK)
451 * Reads are serialized. Note howeverthat pipe_buffer.buffer and
452 * pipe_buffer.size can change out from under us when the number
453 * of bytes in the buffer are zero due to the write-side doing a
456 error = pipe_start_uio(rpipe, &rpipe->pipe_rip);
458 pipe_rel_mplock(&mpsave);
459 lwkt_reltoken(&rlock);
464 bigread = (uio->uio_resid > 10 * 1024 * 1024);
467 while (uio->uio_resid) {
471 if (bigread && --bigcount == 0) {
474 if (CURSIG(curthread->td_lwp)) {
480 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
483 rindex = rpipe->pipe_buffer.rindex &
484 (rpipe->pipe_buffer.size - 1);
486 if (nsize > rpipe->pipe_buffer.size - rindex)
487 nsize = rpipe->pipe_buffer.size - rindex;
488 nsize = szmin(nsize, uio->uio_resid);
490 error = uiomove(&rpipe->pipe_buffer.buffer[rindex],
495 rpipe->pipe_buffer.rindex += nsize;
499 * If the FIFO is still over half full just continue
500 * and do not try to notify the writer yet.
502 if (size - nsize >= (rpipe->pipe_buffer.size >> 1)) {
508 * When the FIFO is less then half full notify any
509 * waiting writer. WANTW can be checked while
510 * holding just the rlock.
513 if ((rpipe->pipe_state & PIPE_WANTW) == 0)
518 * If the "write-side" was blocked we wake it up. This code
519 * is reached either when the buffer is completely emptied
520 * or if it becomes more then half-empty.
522 * Pipe_state can only be modified if both the rlock and
525 if (rpipe->pipe_state & PIPE_WANTW) {
526 lwkt_gettoken(&wlock, &rpipe->pipe_wlock);
527 if (rpipe->pipe_state & PIPE_WANTW) {
529 rpipe->pipe_state &= ~PIPE_WANTW;
530 lwkt_reltoken(&wlock);
533 lwkt_reltoken(&wlock);
538 * Pick up our copy loop again if the writer sent data to
539 * us while we were messing around.
541 * On a SMP box poll up to pipe_delay nanoseconds for new
542 * data. Typically a value of 2000 to 4000 is sufficient
543 * to eradicate most IPIs/tsleeps/wakeups when a pipe
544 * is used for synchronous communications with small packets,
545 * and 8000 or so (8uS) will pipeline large buffer xfers
546 * between cpus over a pipe.
548 * For synchronous communications a hit means doing a
549 * full Awrite-Bread-Bwrite-Aread cycle in less then 2uS,
550 * where as miss requiring a tsleep/wakeup sequence
551 * will take 7uS or more.
553 if (rpipe->pipe_buffer.windex != rpipe->pipe_buffer.rindex)
556 #if defined(SMP) && defined(_RDTSC_SUPPORTED_)
561 tsc_target = tsc_get_target(pipe_delay);
562 while (tsc_test_target(tsc_target) == 0) {
563 if (rpipe->pipe_buffer.windex !=
564 rpipe->pipe_buffer.rindex) {
575 * Detect EOF condition, do not set error.
577 if (rpipe->pipe_state & PIPE_REOF)
581 * Break if some data was read, or if this was a non-blocking
593 * Last chance, interlock with WANTR.
595 lwkt_gettoken(&wlock, &rpipe->pipe_wlock);
596 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
598 lwkt_reltoken(&wlock);
603 * Retest EOF - acquiring a new token can temporarily release
604 * tokens already held.
606 if (rpipe->pipe_state & PIPE_REOF) {
607 lwkt_reltoken(&wlock);
612 * If there is no more to read in the pipe, reset its
613 * pointers to the beginning. This improves cache hit
616 * We need both locks to modify both pointers, and there
617 * must also not be a write in progress or the uiomove()
618 * in the write might block and temporarily release
619 * its wlock, then reacquire and update windex. We are
620 * only serialized against reads, not writes.
622 * XXX should we even bother resetting the indices? It
623 * might actually be more cache efficient not to.
625 if (rpipe->pipe_buffer.rindex == rpipe->pipe_buffer.windex &&
626 rpipe->pipe_wip == 0) {
627 rpipe->pipe_buffer.rindex = 0;
628 rpipe->pipe_buffer.windex = 0;
632 * Wait for more data.
634 * Pipe_state can only be set if both the rlock and wlock
637 rpipe->pipe_state |= PIPE_WANTR;
638 tsleep_interlock(rpipe, PCATCH);
639 lwkt_reltoken(&wlock);
640 error = tsleep(rpipe, PCATCH | PINTERLOCKED, "piperd", 0);
641 ++pipe_rblocked_count;
645 pipe_end_uio(rpipe, &rpipe->pipe_rip);
648 * Uptime last access time
650 if (error == 0 && nread)
651 vfs_timestamp(&rpipe->pipe_atime);
654 * If we drained the FIFO more then half way then handle
655 * write blocking hysteresis.
657 * Note that PIPE_WANTW cannot be set by the writer without
658 * it holding both rlock and wlock, so we can test it
659 * while holding just rlock.
662 if (rpipe->pipe_state & PIPE_WANTW) {
663 lwkt_gettoken(&wlock, &rpipe->pipe_wlock);
664 if (rpipe->pipe_state & PIPE_WANTW) {
665 rpipe->pipe_state &= ~PIPE_WANTW;
666 lwkt_reltoken(&wlock);
669 lwkt_reltoken(&wlock);
672 if (pipeseltest(rpipe)) {
673 lwkt_gettoken(&wlock, &rpipe->pipe_wlock);
674 pipeselwakeup(rpipe);
675 lwkt_reltoken(&wlock);
678 /*size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;*/
679 lwkt_reltoken(&rlock);
681 pipe_rel_mplock(&mpsave);
686 * MPALMOSTSAFE - acquires mplock
689 pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags)
694 struct pipe *wpipe, *rpipe;
704 pipe_get_mplock(&mpsave);
707 * Writes go to the peer. The peer will always exist.
709 rpipe = (struct pipe *) fp->f_data;
710 wpipe = rpipe->pipe_peer;
711 lwkt_gettoken(&wlock, &wpipe->pipe_wlock);
712 if (wpipe->pipe_state & PIPE_WEOF) {
713 pipe_rel_mplock(&mpsave);
714 lwkt_reltoken(&wlock);
719 * Degenerate case (EPIPE takes prec)
721 if (uio->uio_resid == 0) {
722 pipe_rel_mplock(&mpsave);
723 lwkt_reltoken(&wlock);
728 * Writes are serialized (start_uio must be called with wlock)
730 error = pipe_start_uio(wpipe, &wpipe->pipe_wip);
732 pipe_rel_mplock(&mpsave);
733 lwkt_reltoken(&wlock);
737 if (fflags & O_FBLOCKING)
739 else if (fflags & O_FNONBLOCKING)
741 else if (fp->f_flag & O_NONBLOCK)
747 * If it is advantageous to resize the pipe buffer, do
748 * so. We are write-serialized so we can block safely.
750 if ((wpipe->pipe_buffer.size <= PIPE_SIZE) &&
751 (pipe_nbig < pipe_maxbig) &&
752 wpipe->pipe_wantwcnt > 4 &&
753 (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) {
755 * Recheck after lock.
757 lwkt_gettoken(&rlock, &wpipe->pipe_rlock);
758 if ((wpipe->pipe_buffer.size <= PIPE_SIZE) &&
759 (pipe_nbig < pipe_maxbig) &&
760 (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) {
761 atomic_add_int(&pipe_nbig, 1);
762 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
765 atomic_subtract_int(&pipe_nbig, 1);
767 lwkt_reltoken(&rlock);
770 orig_resid = uio->uio_resid;
773 bigwrite = (uio->uio_resid > 10 * 1024 * 1024);
776 while (uio->uio_resid) {
777 if (wpipe->pipe_state & PIPE_WEOF) {
785 if (bigwrite && --bigcount == 0) {
788 if (CURSIG(curthread->td_lwp)) {
794 windex = wpipe->pipe_buffer.windex &
795 (wpipe->pipe_buffer.size - 1);
796 space = wpipe->pipe_buffer.size -
797 (wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex);
800 /* Writes of size <= PIPE_BUF must be atomic. */
801 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
805 * Write to fill, read size handles write hysteresis. Also
806 * additional restrictions can cause select-based non-blocking
813 * Transfer size is minimum of uio transfer
814 * and free space in pipe buffer.
816 * Limit each uiocopy to no more then PIPE_SIZE
817 * so we can keep the gravy train going on a
818 * SMP box. This doubles the performance for
819 * write sizes > 16K. Otherwise large writes
820 * wind up doing an inefficient synchronous
823 space = szmin(space, uio->uio_resid);
824 if (space > PIPE_SIZE)
828 * First segment to transfer is minimum of
829 * transfer size and contiguous space in
830 * pipe buffer. If first segment to transfer
831 * is less than the transfer size, we've got
832 * a wraparound in the buffer.
834 segsize = wpipe->pipe_buffer.size - windex;
840 * If this is the first loop and the reader is
841 * blocked, do a preemptive wakeup of the reader.
843 * On SMP the IPI latency plus the wlock interlock
844 * on the reader side is the fastest way to get the
845 * reader going. (The scheduler will hard loop on
848 * NOTE: We can't clear WANTR here without acquiring
849 * the rlock, which we don't want to do here!
851 if ((wpipe->pipe_state & PIPE_WANTR) && pipe_mpsafe > 1)
856 * Transfer segment, which may include a wrap-around.
857 * Update windex to account for both all in one go
858 * so the reader can read() the data atomically.
860 error = uiomove(&wpipe->pipe_buffer.buffer[windex],
862 if (error == 0 && segsize < space) {
863 segsize = space - segsize;
864 error = uiomove(&wpipe->pipe_buffer.buffer[0],
870 wpipe->pipe_buffer.windex += space;
876 * We need both the rlock and the wlock to interlock against
877 * the EOF, WANTW, and size checks, and to modify pipe_state.
879 * These are token locks so we do not have to worry about
882 lwkt_gettoken(&rlock, &wpipe->pipe_rlock);
885 * If the "read-side" has been blocked, wake it up now
886 * and yield to let it drain synchronously rather
889 if (wpipe->pipe_state & PIPE_WANTR) {
890 wpipe->pipe_state &= ~PIPE_WANTR;
895 * don't block on non-blocking I/O
898 lwkt_reltoken(&rlock);
904 * re-test whether we have to block in the writer after
905 * acquiring both locks, in case the reader opened up
908 space = wpipe->pipe_buffer.size -
909 (wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex);
911 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
915 * Retest EOF - acquiring a new token can temporarily release
916 * tokens already held.
918 if (wpipe->pipe_state & PIPE_WEOF) {
919 lwkt_reltoken(&rlock);
925 * We have no more space and have something to offer,
926 * wake up select/poll.
929 wpipe->pipe_state |= PIPE_WANTW;
930 ++wpipe->pipe_wantwcnt;
931 pipeselwakeup(wpipe);
932 if (wpipe->pipe_state & PIPE_WANTW)
933 error = tsleep(wpipe, PCATCH, "pipewr", 0);
934 ++pipe_wblocked_count;
936 lwkt_reltoken(&rlock);
939 * Break out if we errored or the read side wants us to go
944 if (wpipe->pipe_state & PIPE_WEOF) {
949 pipe_end_uio(wpipe, &wpipe->pipe_wip);
952 * If we have put any characters in the buffer, we wake up
955 * Both rlock and wlock are required to be able to modify pipe_state.
957 if (wpipe->pipe_buffer.windex != wpipe->pipe_buffer.rindex) {
958 if (wpipe->pipe_state & PIPE_WANTR) {
959 lwkt_gettoken(&rlock, &wpipe->pipe_rlock);
960 if (wpipe->pipe_state & PIPE_WANTR) {
961 wpipe->pipe_state &= ~PIPE_WANTR;
962 lwkt_reltoken(&rlock);
965 lwkt_reltoken(&rlock);
968 if (pipeseltest(wpipe)) {
969 lwkt_gettoken(&rlock, &wpipe->pipe_rlock);
970 pipeselwakeup(wpipe);
971 lwkt_reltoken(&rlock);
976 * Don't return EPIPE if I/O was successful
978 if ((wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex) &&
979 (uio->uio_resid == 0) &&
985 vfs_timestamp(&wpipe->pipe_mtime);
988 * We have something to offer,
989 * wake up select/poll.
991 /*space = wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex;*/
992 lwkt_reltoken(&wlock);
993 pipe_rel_mplock(&mpsave);
998 * MPALMOSTSAFE - acquires mplock
1000 * we implement a very minimal set of ioctls for compatibility with sockets.
1003 pipe_ioctl(struct file *fp, u_long cmd, caddr_t data,
1004 struct ucred *cred, struct sysmsg *msg)
1012 pipe_get_mplock(&mpsave);
1013 mpipe = (struct pipe *)fp->f_data;
1015 lwkt_gettoken(&rlock, &mpipe->pipe_rlock);
1016 lwkt_gettoken(&wlock, &mpipe->pipe_wlock);
1021 mpipe->pipe_state |= PIPE_ASYNC;
1023 mpipe->pipe_state &= ~PIPE_ASYNC;
1028 *(int *)data = mpipe->pipe_buffer.windex -
1029 mpipe->pipe_buffer.rindex;
1034 error = fsetown(*(int *)data, &mpipe->pipe_sigio);
1038 *(int *)data = fgetown(mpipe->pipe_sigio);
1042 /* This is deprecated, FIOSETOWN should be used instead. */
1044 error = fsetown(-(*(int *)data), &mpipe->pipe_sigio);
1049 /* This is deprecated, FIOGETOWN should be used instead. */
1050 *(int *)data = -fgetown(mpipe->pipe_sigio);
1057 lwkt_reltoken(&rlock);
1058 lwkt_reltoken(&wlock);
1059 pipe_rel_mplock(&mpsave);
1065 * MPALMOSTSAFE - acquires mplock
1067 * poll for events (helper)
1070 pipe_poll_events(struct pipe *rpipe, struct pipe *wpipe, int events)
1075 if (events & (POLLIN | POLLRDNORM)) {
1076 if ((rpipe->pipe_buffer.windex != rpipe->pipe_buffer.rindex) ||
1077 (rpipe->pipe_state & PIPE_REOF)) {
1078 revents |= events & (POLLIN | POLLRDNORM);
1082 if (events & (POLLOUT | POLLWRNORM)) {
1083 if (wpipe == NULL || (wpipe->pipe_state & PIPE_WEOF)) {
1084 revents |= events & (POLLOUT | POLLWRNORM);
1086 space = wpipe->pipe_buffer.windex -
1087 wpipe->pipe_buffer.rindex;
1088 space = wpipe->pipe_buffer.size - space;
1089 if (space >= PIPE_BUF)
1090 revents |= events & (POLLOUT | POLLWRNORM);
1094 if ((rpipe->pipe_state & PIPE_REOF) ||
1096 (wpipe->pipe_state & PIPE_WEOF)) {
1103 * Poll for events from file pointer.
1106 pipe_poll(struct file *fp, int events, struct ucred *cred)
1108 lwkt_tokref rpipe_rlock;
1109 lwkt_tokref rpipe_wlock;
1110 lwkt_tokref wpipe_rlock;
1111 lwkt_tokref wpipe_wlock;
1117 pipe_get_mplock(&mpsave);
1118 rpipe = (struct pipe *)fp->f_data;
1119 wpipe = rpipe->pipe_peer;
1121 revents = pipe_poll_events(rpipe, wpipe, events);
1123 if (events & (POLLIN | POLLRDNORM)) {
1124 lwkt_gettoken(&rpipe_rlock, &rpipe->pipe_rlock);
1125 lwkt_gettoken(&rpipe_wlock, &rpipe->pipe_wlock);
1127 if (events & (POLLOUT | POLLWRNORM)) {
1128 lwkt_gettoken(&wpipe_rlock, &wpipe->pipe_rlock);
1129 lwkt_gettoken(&wpipe_wlock, &wpipe->pipe_wlock);
1131 revents = pipe_poll_events(rpipe, wpipe, events);
1133 if (events & (POLLIN | POLLRDNORM)) {
1134 selrecord(curthread, &rpipe->pipe_sel);
1135 rpipe->pipe_state |= PIPE_SEL;
1138 if (events & (POLLOUT | POLLWRNORM)) {
1139 selrecord(curthread, &wpipe->pipe_sel);
1140 wpipe->pipe_state |= PIPE_SEL;
1143 if (events & (POLLIN | POLLRDNORM)) {
1144 lwkt_reltoken(&rpipe_rlock);
1145 lwkt_reltoken(&rpipe_wlock);
1147 if (events & (POLLOUT | POLLWRNORM)) {
1148 lwkt_reltoken(&wpipe_rlock);
1149 lwkt_reltoken(&wpipe_wlock);
1152 pipe_rel_mplock(&mpsave);
1160 pipe_stat(struct file *fp, struct stat *ub, struct ucred *cred)
1165 pipe_get_mplock(&mpsave);
1166 pipe = (struct pipe *)fp->f_data;
1168 bzero((caddr_t)ub, sizeof(*ub));
1169 ub->st_mode = S_IFIFO;
1170 ub->st_blksize = pipe->pipe_buffer.size;
1171 ub->st_size = pipe->pipe_buffer.windex - pipe->pipe_buffer.rindex;
1172 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize;
1173 ub->st_atimespec = pipe->pipe_atime;
1174 ub->st_mtimespec = pipe->pipe_mtime;
1175 ub->st_ctimespec = pipe->pipe_ctime;
1177 * Left as 0: st_dev, st_ino, st_nlink, st_uid, st_gid, st_rdev,
1179 * XXX (st_dev, st_ino) should be unique.
1181 pipe_rel_mplock(&mpsave);
1186 * MPALMOSTSAFE - acquires mplock
1189 pipe_close(struct file *fp)
1194 cpipe = (struct pipe *)fp->f_data;
1195 fp->f_ops = &badfileops;
1197 funsetown(cpipe->pipe_sigio);
1204 * Shutdown one or both directions of a full-duplex pipe.
1206 * MPALMOSTSAFE - acquires mplock
1209 pipe_shutdown(struct file *fp, int how)
1214 lwkt_tokref rpipe_rlock;
1215 lwkt_tokref rpipe_wlock;
1216 lwkt_tokref wpipe_rlock;
1217 lwkt_tokref wpipe_wlock;
1220 pipe_get_mplock(&mpsave);
1221 rpipe = (struct pipe *)fp->f_data;
1222 wpipe = rpipe->pipe_peer;
1225 * We modify pipe_state on both pipes, which means we need
1228 lwkt_gettoken(&rpipe_rlock, &rpipe->pipe_rlock);
1229 lwkt_gettoken(&rpipe_wlock, &rpipe->pipe_wlock);
1230 lwkt_gettoken(&wpipe_rlock, &wpipe->pipe_rlock);
1231 lwkt_gettoken(&wpipe_wlock, &wpipe->pipe_wlock);
1236 rpipe->pipe_state |= PIPE_REOF; /* my reads */
1237 rpipe->pipe_state |= PIPE_WEOF; /* peer writes */
1238 if (rpipe->pipe_state & PIPE_WANTR) {
1239 rpipe->pipe_state &= ~PIPE_WANTR;
1242 if (rpipe->pipe_state & PIPE_WANTW) {
1243 rpipe->pipe_state &= ~PIPE_WANTW;
1251 wpipe->pipe_state |= PIPE_REOF; /* peer reads */
1252 wpipe->pipe_state |= PIPE_WEOF; /* my writes */
1253 if (wpipe->pipe_state & PIPE_WANTR) {
1254 wpipe->pipe_state &= ~PIPE_WANTR;
1257 if (wpipe->pipe_state & PIPE_WANTW) {
1258 wpipe->pipe_state &= ~PIPE_WANTW;
1264 pipeselwakeup(rpipe);
1265 pipeselwakeup(wpipe);
1267 lwkt_reltoken(&rpipe_rlock);
1268 lwkt_reltoken(&rpipe_wlock);
1269 lwkt_reltoken(&wpipe_rlock);
1270 lwkt_reltoken(&wpipe_wlock);
1272 pipe_rel_mplock(&mpsave);
1277 pipe_free_kmem(struct pipe *cpipe)
1279 if (cpipe->pipe_buffer.buffer != NULL) {
1280 if (cpipe->pipe_buffer.size > PIPE_SIZE)
1281 atomic_subtract_int(&pipe_nbig, 1);
1282 kmem_free(&kernel_map,
1283 (vm_offset_t)cpipe->pipe_buffer.buffer,
1284 cpipe->pipe_buffer.size);
1285 cpipe->pipe_buffer.buffer = NULL;
1286 cpipe->pipe_buffer.object = NULL;
1291 * Close the pipe. The slock must be held to interlock against simultanious
1292 * closes. The rlock and wlock must be held to adjust the pipe_state.
1295 pipeclose(struct pipe *cpipe)
1299 lwkt_tokref cpipe_rlock;
1300 lwkt_tokref cpipe_wlock;
1301 lwkt_tokref ppipe_rlock;
1302 lwkt_tokref ppipe_wlock;
1308 * The slock may not have been allocated yet (close during
1311 * We need both the read and write tokens to modify pipe_state.
1313 if (cpipe->pipe_slock)
1314 lockmgr(cpipe->pipe_slock, LK_EXCLUSIVE);
1315 lwkt_gettoken(&cpipe_rlock, &cpipe->pipe_rlock);
1316 lwkt_gettoken(&cpipe_wlock, &cpipe->pipe_wlock);
1319 * Set our state, wakeup anyone waiting in select, and
1320 * wakeup anyone blocked on our pipe.
1322 cpipe->pipe_state |= PIPE_CLOSED | PIPE_REOF | PIPE_WEOF;
1323 pipeselwakeup(cpipe);
1324 if (cpipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) {
1325 cpipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW);
1330 * Disconnect from peer.
1332 if ((ppipe = cpipe->pipe_peer) != NULL) {
1333 lwkt_gettoken(&ppipe_rlock, &ppipe->pipe_rlock);
1334 lwkt_gettoken(&ppipe_wlock, &ppipe->pipe_wlock);
1335 ppipe->pipe_state |= PIPE_REOF | PIPE_WEOF;
1336 pipeselwakeup(ppipe);
1337 if (ppipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) {
1338 ppipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW);
1341 if (SLIST_FIRST(&ppipe->pipe_sel.si_note)) {
1343 KNOTE(&ppipe->pipe_sel.si_note, 0);
1346 lwkt_reltoken(&ppipe_rlock);
1347 lwkt_reltoken(&ppipe_wlock);
1351 * If the peer is also closed we can free resources for both
1352 * sides, otherwise we leave our side intact to deal with any
1353 * races (since we only have the slock).
1355 if (ppipe && (ppipe->pipe_state & PIPE_CLOSED)) {
1356 cpipe->pipe_peer = NULL;
1357 ppipe->pipe_peer = NULL;
1358 ppipe->pipe_slock = NULL; /* we will free the slock */
1363 lwkt_reltoken(&cpipe_rlock);
1364 lwkt_reltoken(&cpipe_wlock);
1365 if (cpipe->pipe_slock)
1366 lockmgr(cpipe->pipe_slock, LK_RELEASE);
1369 * If we disassociated from our peer we can free resources
1371 if (ppipe == NULL) {
1373 if (cpipe->pipe_slock) {
1374 kfree(cpipe->pipe_slock, M_PIPE);
1375 cpipe->pipe_slock = NULL;
1377 if (gd->gd_pipeqcount >= pipe_maxcache ||
1378 cpipe->pipe_buffer.size != PIPE_SIZE
1380 pipe_free_kmem(cpipe);
1381 kfree(cpipe, M_PIPE);
1383 cpipe->pipe_state = 0;
1384 cpipe->pipe_peer = gd->gd_pipeq;
1385 gd->gd_pipeq = cpipe;
1386 ++gd->gd_pipeqcount;
1392 * MPALMOSTSAFE - acquires mplock
1395 pipe_kqfilter(struct file *fp, struct knote *kn)
1400 cpipe = (struct pipe *)kn->kn_fp->f_data;
1402 switch (kn->kn_filter) {
1404 kn->kn_fop = &pipe_rfiltops;
1407 kn->kn_fop = &pipe_wfiltops;
1408 cpipe = cpipe->pipe_peer;
1409 if (cpipe == NULL) {
1410 /* other end of pipe has been closed */
1418 kn->kn_hook = (caddr_t)cpipe;
1420 SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext);
1426 filt_pipedetach(struct knote *kn)
1428 struct pipe *cpipe = (struct pipe *)kn->kn_hook;
1430 SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext);
1435 filt_piperead(struct knote *kn, long hint)
1437 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1439 kn->kn_data = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
1442 if (rpipe->pipe_state & PIPE_REOF) {
1443 kn->kn_flags |= EV_EOF;
1446 return (kn->kn_data > 0);
1451 filt_pipewrite(struct knote *kn, long hint)
1453 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1454 struct pipe *wpipe = rpipe->pipe_peer;
1458 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_WEOF)) {
1460 kn->kn_flags |= EV_EOF;
1463 space = wpipe->pipe_buffer.windex -
1464 wpipe->pipe_buffer.rindex;
1465 space = wpipe->pipe_buffer.size - space;
1466 kn->kn_data = space;
1467 return (kn->kn_data >= PIPE_BUF);