2 * Copyright (c) 1996 John S. Dyson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Absolutely no warranty of function or purpose is made by the author
16 * 4. Modifications may be freely made to this file if the above conditions
19 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.60.2.13 2002/08/05 15:05:15 des Exp $
20 * $DragonFly: src/sys/kern/sys_pipe.c,v 1.50 2008/09/09 04:06:13 dillon Exp $
24 * This file contains a high-performance replacement for the socket-based
25 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
26 * all features of sockets, but does do everything that pipes normally
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
33 #include <sys/fcntl.h>
35 #include <sys/filedesc.h>
36 #include <sys/filio.h>
37 #include <sys/ttycom.h>
39 #include <sys/signalvar.h>
40 #include <sys/sysproto.h>
42 #include <sys/vnode.h>
44 #include <sys/event.h>
45 #include <sys/globaldata.h>
46 #include <sys/module.h>
47 #include <sys/malloc.h>
48 #include <sys/sysctl.h>
49 #include <sys/socket.h>
52 #include <vm/vm_param.h>
54 #include <vm/vm_object.h>
55 #include <vm/vm_kern.h>
56 #include <vm/vm_extern.h>
58 #include <vm/vm_map.h>
59 #include <vm/vm_page.h>
60 #include <vm/vm_zone.h>
62 #include <sys/file2.h>
63 #include <sys/signal2.h>
64 #include <sys/mplock2.h>
66 #include <machine/cpufunc.h>
69 * interfaces to the outside world
71 static int pipe_read (struct file *fp, struct uio *uio,
72 struct ucred *cred, int flags);
73 static int pipe_write (struct file *fp, struct uio *uio,
74 struct ucred *cred, int flags);
75 static int pipe_close (struct file *fp);
76 static int pipe_shutdown (struct file *fp, int how);
77 static int pipe_kqfilter (struct file *fp, struct knote *kn);
78 static int pipe_stat (struct file *fp, struct stat *sb, struct ucred *cred);
79 static int pipe_ioctl (struct file *fp, u_long cmd, caddr_t data,
80 struct ucred *cred, struct sysmsg *msg);
82 static struct fileops pipeops = {
84 .fo_write = pipe_write,
85 .fo_ioctl = pipe_ioctl,
86 .fo_kqfilter = pipe_kqfilter,
88 .fo_close = pipe_close,
89 .fo_shutdown = pipe_shutdown
92 static void filt_pipedetach(struct knote *kn);
93 static int filt_piperead(struct knote *kn, long hint);
94 static int filt_pipewrite(struct knote *kn, long hint);
96 static struct filterops pipe_rfiltops =
97 { 1, NULL, filt_pipedetach, filt_piperead };
98 static struct filterops pipe_wfiltops =
99 { 1, NULL, filt_pipedetach, filt_pipewrite };
101 MALLOC_DEFINE(M_PIPE, "pipe", "pipe structures");
104 * Default pipe buffer size(s), this can be kind-of large now because pipe
105 * space is pageable. The pipe code will try to maintain locality of
106 * reference for performance reasons, so small amounts of outstanding I/O
107 * will not wipe the cache.
109 #define MINPIPESIZE (PIPE_SIZE/3)
110 #define MAXPIPESIZE (2*PIPE_SIZE/3)
113 * Limit the number of "big" pipes
115 #define LIMITBIGPIPES 64
116 #define PIPEQ_MAX_CACHE 16 /* per-cpu pipe structure cache */
118 static int pipe_maxbig = LIMITBIGPIPES;
119 static int pipe_maxcache = PIPEQ_MAX_CACHE;
120 static int pipe_bigcount;
121 static int pipe_nbig;
122 static int pipe_bcache_alloc;
123 static int pipe_bkmem_alloc;
124 static int pipe_rblocked_count;
125 static int pipe_wblocked_count;
127 SYSCTL_NODE(_kern, OID_AUTO, pipe, CTLFLAG_RW, 0, "Pipe operation");
128 SYSCTL_INT(_kern_pipe, OID_AUTO, nbig,
129 CTLFLAG_RD, &pipe_nbig, 0, "numer of big pipes allocated");
130 SYSCTL_INT(_kern_pipe, OID_AUTO, bigcount,
131 CTLFLAG_RW, &pipe_bigcount, 0, "number of times pipe expanded");
132 SYSCTL_INT(_kern_pipe, OID_AUTO, rblocked,
133 CTLFLAG_RW, &pipe_rblocked_count, 0, "number of times pipe expanded");
134 SYSCTL_INT(_kern_pipe, OID_AUTO, wblocked,
135 CTLFLAG_RW, &pipe_wblocked_count, 0, "number of times pipe expanded");
136 SYSCTL_INT(_kern_pipe, OID_AUTO, maxcache,
137 CTLFLAG_RW, &pipe_maxcache, 0, "max pipes cached per-cpu");
138 SYSCTL_INT(_kern_pipe, OID_AUTO, maxbig,
139 CTLFLAG_RW, &pipe_maxbig, 0, "max number of big pipes");
141 static int pipe_delay = 5000; /* 5uS default */
142 SYSCTL_INT(_kern_pipe, OID_AUTO, delay,
143 CTLFLAG_RW, &pipe_delay, 0, "SMP delay optimization in ns");
144 static int pipe_mpsafe = 1;
145 SYSCTL_INT(_kern_pipe, OID_AUTO, mpsafe,
146 CTLFLAG_RW, &pipe_mpsafe, 0, "");
148 #if !defined(NO_PIPE_SYSCTL_STATS)
149 SYSCTL_INT(_kern_pipe, OID_AUTO, bcache_alloc,
150 CTLFLAG_RW, &pipe_bcache_alloc, 0, "pipe buffer from pcpu cache");
151 SYSCTL_INT(_kern_pipe, OID_AUTO, bkmem_alloc,
152 CTLFLAG_RW, &pipe_bkmem_alloc, 0, "pipe buffer from kmem");
155 static void pipeclose (struct pipe *cpipe);
156 static void pipe_free_kmem (struct pipe *cpipe);
157 static int pipe_create (struct pipe **cpipep);
158 static __inline void pipewakeup (struct pipe *cpipe);
159 static int pipespace (struct pipe *cpipe, int size);
162 pipewakeup(struct pipe *cpipe)
164 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) {
166 pgsigio(cpipe->pipe_sigio, SIGIO, 0);
169 if (SLIST_FIRST(&cpipe->pipe_kq.ki_note))
170 KNOTE(&cpipe->pipe_kq.ki_note, 0);
174 * These routines are called before and after a UIO. The UIO
175 * may block, causing our held tokens to be lost temporarily.
177 * We use these routines to serialize reads against other reads
178 * and writes against other writes.
180 * The read token is held on entry so *ipp does not race.
183 pipe_start_uio(struct pipe *cpipe, int *ipp)
189 error = tsleep(ipp, PCATCH, "pipexx", 0);
198 pipe_end_uio(struct pipe *cpipe, int *ipp)
210 pipe_get_mplock(int *save)
213 if (pipe_mpsafe == 0) {
224 pipe_rel_mplock(int *save)
234 * The pipe system call for the DTYPE_PIPE type of pipes
236 * pipe_args(int dummy)
241 sys_pipe(struct pipe_args *uap)
243 struct thread *td = curthread;
244 struct filedesc *fdp = td->td_proc->p_fd;
245 struct file *rf, *wf;
246 struct pipe *rpipe, *wpipe;
249 rpipe = wpipe = NULL;
250 if (pipe_create(&rpipe) || pipe_create(&wpipe)) {
256 error = falloc(td->td_lwp, &rf, &fd1);
262 uap->sysmsg_fds[0] = fd1;
265 * Warning: once we've gotten past allocation of the fd for the
266 * read-side, we can only drop the read side via fdrop() in order
267 * to avoid races against processes which manage to dup() the read
268 * side while we are blocked trying to allocate the write side.
270 rf->f_type = DTYPE_PIPE;
271 rf->f_flag = FREAD | FWRITE;
272 rf->f_ops = &pipeops;
274 error = falloc(td->td_lwp, &wf, &fd2);
276 fsetfd(fdp, NULL, fd1);
278 /* rpipe has been closed by fdrop(). */
282 wf->f_type = DTYPE_PIPE;
283 wf->f_flag = FREAD | FWRITE;
284 wf->f_ops = &pipeops;
286 uap->sysmsg_fds[1] = fd2;
288 rpipe->pipe_slock = kmalloc(sizeof(struct lock),
289 M_PIPE, M_WAITOK|M_ZERO);
290 wpipe->pipe_slock = rpipe->pipe_slock;
291 rpipe->pipe_peer = wpipe;
292 wpipe->pipe_peer = rpipe;
293 lockinit(rpipe->pipe_slock, "pipecl", 0, 0);
296 * Once activated the peer relationship remains valid until
297 * both sides are closed.
299 fsetfd(fdp, rf, fd1);
300 fsetfd(fdp, wf, fd2);
308 * Allocate kva for pipe circular buffer, the space is pageable
309 * This routine will 'realloc' the size of a pipe safely, if it fails
310 * it will retain the old buffer.
311 * If it fails it will return ENOMEM.
314 pipespace(struct pipe *cpipe, int size)
316 struct vm_object *object;
320 npages = round_page(size) / PAGE_SIZE;
321 object = cpipe->pipe_buffer.object;
324 * [re]create the object if necessary and reserve space for it
325 * in the kernel_map. The object and memory are pageable. On
326 * success, free the old resources before assigning the new
329 if (object == NULL || object->size != npages) {
331 object = vm_object_allocate(OBJT_DEFAULT, npages);
332 buffer = (caddr_t)vm_map_min(&kernel_map);
334 error = vm_map_find(&kernel_map, object, 0,
335 (vm_offset_t *)&buffer,
337 1, VM_MAPTYPE_NORMAL,
338 VM_PROT_ALL, VM_PROT_ALL,
341 if (error != KERN_SUCCESS) {
342 vm_object_deallocate(object);
346 pipe_free_kmem(cpipe);
348 cpipe->pipe_buffer.object = object;
349 cpipe->pipe_buffer.buffer = buffer;
350 cpipe->pipe_buffer.size = size;
355 cpipe->pipe_buffer.rindex = 0;
356 cpipe->pipe_buffer.windex = 0;
361 * Initialize and allocate VM and memory for pipe, pulling the pipe from
362 * our per-cpu cache if possible. For now make sure it is sized for the
363 * smaller PIPE_SIZE default.
366 pipe_create(struct pipe **cpipep)
368 globaldata_t gd = mycpu;
372 if ((cpipe = gd->gd_pipeq) != NULL) {
373 gd->gd_pipeq = cpipe->pipe_peer;
375 cpipe->pipe_peer = NULL;
376 cpipe->pipe_wantwcnt = 0;
378 cpipe = kmalloc(sizeof(struct pipe), M_PIPE, M_WAITOK|M_ZERO);
381 if ((error = pipespace(cpipe, PIPE_SIZE)) != 0)
383 vfs_timestamp(&cpipe->pipe_ctime);
384 cpipe->pipe_atime = cpipe->pipe_ctime;
385 cpipe->pipe_mtime = cpipe->pipe_ctime;
386 lwkt_token_init(&cpipe->pipe_rlock, 1);
387 lwkt_token_init(&cpipe->pipe_wlock, 1);
392 * MPALMOSTSAFE (acquires mplock)
395 pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, int fflags)
401 u_int size; /* total bytes available */
402 u_int nsize; /* total bytes to read */
403 u_int rindex; /* contiguous bytes available */
409 if (uio->uio_resid == 0)
413 * Setup locks, calculate nbio
415 pipe_get_mplock(&mpsave);
416 rpipe = (struct pipe *)fp->f_data;
417 lwkt_gettoken(&rpipe->pipe_rlock);
419 if (fflags & O_FBLOCKING)
421 else if (fflags & O_FNONBLOCKING)
423 else if (fp->f_flag & O_NONBLOCK)
429 * Reads are serialized. Note however that pipe_buffer.buffer and
430 * pipe_buffer.size can change out from under us when the number
431 * of bytes in the buffer are zero due to the write-side doing a
434 error = pipe_start_uio(rpipe, &rpipe->pipe_rip);
436 pipe_rel_mplock(&mpsave);
437 lwkt_reltoken(&rpipe->pipe_rlock);
442 bigread = (uio->uio_resid > 10 * 1024 * 1024);
445 while (uio->uio_resid) {
449 if (bigread && --bigcount == 0) {
452 if (CURSIG(curthread->td_lwp)) {
458 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
461 rindex = rpipe->pipe_buffer.rindex &
462 (rpipe->pipe_buffer.size - 1);
464 if (nsize > rpipe->pipe_buffer.size - rindex)
465 nsize = rpipe->pipe_buffer.size - rindex;
466 nsize = szmin(nsize, uio->uio_resid);
468 error = uiomove(&rpipe->pipe_buffer.buffer[rindex],
473 rpipe->pipe_buffer.rindex += nsize;
477 * If the FIFO is still over half full just continue
478 * and do not try to notify the writer yet.
480 if (size - nsize >= (rpipe->pipe_buffer.size >> 1)) {
486 * When the FIFO is less then half full notify any
487 * waiting writer. WANTW can be checked while
488 * holding just the rlock.
491 if ((rpipe->pipe_state & PIPE_WANTW) == 0)
496 * If the "write-side" was blocked we wake it up. This code
497 * is reached either when the buffer is completely emptied
498 * or if it becomes more then half-empty.
500 * Pipe_state can only be modified if both the rlock and
503 if (rpipe->pipe_state & PIPE_WANTW) {
504 lwkt_gettoken(&rpipe->pipe_wlock);
505 if (rpipe->pipe_state & PIPE_WANTW) {
507 rpipe->pipe_state &= ~PIPE_WANTW;
508 lwkt_reltoken(&rpipe->pipe_wlock);
511 lwkt_reltoken(&rpipe->pipe_wlock);
516 * Pick up our copy loop again if the writer sent data to
517 * us while we were messing around.
519 * On a SMP box poll up to pipe_delay nanoseconds for new
520 * data. Typically a value of 2000 to 4000 is sufficient
521 * to eradicate most IPIs/tsleeps/wakeups when a pipe
522 * is used for synchronous communications with small packets,
523 * and 8000 or so (8uS) will pipeline large buffer xfers
524 * between cpus over a pipe.
526 * For synchronous communications a hit means doing a
527 * full Awrite-Bread-Bwrite-Aread cycle in less then 2uS,
528 * where as miss requiring a tsleep/wakeup sequence
529 * will take 7uS or more.
531 if (rpipe->pipe_buffer.windex != rpipe->pipe_buffer.rindex)
534 #if defined(SMP) && defined(_RDTSC_SUPPORTED_)
539 tsc_target = tsc_get_target(pipe_delay);
540 while (tsc_test_target(tsc_target) == 0) {
541 if (rpipe->pipe_buffer.windex !=
542 rpipe->pipe_buffer.rindex) {
553 * Detect EOF condition, do not set error.
555 if (rpipe->pipe_state & PIPE_REOF)
559 * Break if some data was read, or if this was a non-blocking
571 * Last chance, interlock with WANTR.
573 lwkt_gettoken(&rpipe->pipe_wlock);
574 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
576 lwkt_reltoken(&rpipe->pipe_wlock);
581 * Retest EOF - acquiring a new token can temporarily release
582 * tokens already held.
584 if (rpipe->pipe_state & PIPE_REOF) {
585 lwkt_reltoken(&rpipe->pipe_wlock);
590 * If there is no more to read in the pipe, reset its
591 * pointers to the beginning. This improves cache hit
594 * We need both locks to modify both pointers, and there
595 * must also not be a write in progress or the uiomove()
596 * in the write might block and temporarily release
597 * its wlock, then reacquire and update windex. We are
598 * only serialized against reads, not writes.
600 * XXX should we even bother resetting the indices? It
601 * might actually be more cache efficient not to.
603 if (rpipe->pipe_buffer.rindex == rpipe->pipe_buffer.windex &&
604 rpipe->pipe_wip == 0) {
605 rpipe->pipe_buffer.rindex = 0;
606 rpipe->pipe_buffer.windex = 0;
610 * Wait for more data.
612 * Pipe_state can only be set if both the rlock and wlock
615 rpipe->pipe_state |= PIPE_WANTR;
616 tsleep_interlock(rpipe, PCATCH);
617 lwkt_reltoken(&rpipe->pipe_wlock);
618 error = tsleep(rpipe, PCATCH | PINTERLOCKED, "piperd", 0);
619 ++pipe_rblocked_count;
623 pipe_end_uio(rpipe, &rpipe->pipe_rip);
626 * Uptime last access time
628 if (error == 0 && nread)
629 vfs_timestamp(&rpipe->pipe_atime);
632 * If we drained the FIFO more then half way then handle
633 * write blocking hysteresis.
635 * Note that PIPE_WANTW cannot be set by the writer without
636 * it holding both rlock and wlock, so we can test it
637 * while holding just rlock.
640 if (rpipe->pipe_state & PIPE_WANTW) {
641 lwkt_gettoken(&rpipe->pipe_wlock);
642 if (rpipe->pipe_state & PIPE_WANTW) {
643 rpipe->pipe_state &= ~PIPE_WANTW;
644 lwkt_reltoken(&rpipe->pipe_wlock);
647 lwkt_reltoken(&rpipe->pipe_wlock);
650 lwkt_gettoken(&rpipe->pipe_wlock);
652 lwkt_reltoken(&rpipe->pipe_wlock);
654 /*size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;*/
655 lwkt_reltoken(&rpipe->pipe_rlock);
657 pipe_rel_mplock(&mpsave);
662 * MPALMOSTSAFE - acquires mplock
665 pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags)
670 struct pipe *wpipe, *rpipe;
678 pipe_get_mplock(&mpsave);
681 * Writes go to the peer. The peer will always exist.
683 rpipe = (struct pipe *) fp->f_data;
684 wpipe = rpipe->pipe_peer;
685 lwkt_gettoken(&wpipe->pipe_wlock);
686 if (wpipe->pipe_state & PIPE_WEOF) {
687 pipe_rel_mplock(&mpsave);
688 lwkt_reltoken(&wpipe->pipe_wlock);
693 * Degenerate case (EPIPE takes prec)
695 if (uio->uio_resid == 0) {
696 pipe_rel_mplock(&mpsave);
697 lwkt_reltoken(&wpipe->pipe_wlock);
702 * Writes are serialized (start_uio must be called with wlock)
704 error = pipe_start_uio(wpipe, &wpipe->pipe_wip);
706 pipe_rel_mplock(&mpsave);
707 lwkt_reltoken(&wpipe->pipe_wlock);
711 if (fflags & O_FBLOCKING)
713 else if (fflags & O_FNONBLOCKING)
715 else if (fp->f_flag & O_NONBLOCK)
721 * If it is advantageous to resize the pipe buffer, do
722 * so. We are write-serialized so we can block safely.
724 if ((wpipe->pipe_buffer.size <= PIPE_SIZE) &&
725 (pipe_nbig < pipe_maxbig) &&
726 wpipe->pipe_wantwcnt > 4 &&
727 (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) {
729 * Recheck after lock.
731 lwkt_gettoken(&wpipe->pipe_rlock);
732 if ((wpipe->pipe_buffer.size <= PIPE_SIZE) &&
733 (pipe_nbig < pipe_maxbig) &&
734 (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) {
735 atomic_add_int(&pipe_nbig, 1);
736 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
739 atomic_subtract_int(&pipe_nbig, 1);
741 lwkt_reltoken(&wpipe->pipe_rlock);
744 orig_resid = uio->uio_resid;
747 bigwrite = (uio->uio_resid > 10 * 1024 * 1024);
750 while (uio->uio_resid) {
751 if (wpipe->pipe_state & PIPE_WEOF) {
759 if (bigwrite && --bigcount == 0) {
762 if (CURSIG(curthread->td_lwp)) {
768 windex = wpipe->pipe_buffer.windex &
769 (wpipe->pipe_buffer.size - 1);
770 space = wpipe->pipe_buffer.size -
771 (wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex);
774 /* Writes of size <= PIPE_BUF must be atomic. */
775 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
779 * Write to fill, read size handles write hysteresis. Also
780 * additional restrictions can cause select-based non-blocking
787 * Transfer size is minimum of uio transfer
788 * and free space in pipe buffer.
790 * Limit each uiocopy to no more then PIPE_SIZE
791 * so we can keep the gravy train going on a
792 * SMP box. This doubles the performance for
793 * write sizes > 16K. Otherwise large writes
794 * wind up doing an inefficient synchronous
797 space = szmin(space, uio->uio_resid);
798 if (space > PIPE_SIZE)
802 * First segment to transfer is minimum of
803 * transfer size and contiguous space in
804 * pipe buffer. If first segment to transfer
805 * is less than the transfer size, we've got
806 * a wraparound in the buffer.
808 segsize = wpipe->pipe_buffer.size - windex;
814 * If this is the first loop and the reader is
815 * blocked, do a preemptive wakeup of the reader.
817 * On SMP the IPI latency plus the wlock interlock
818 * on the reader side is the fastest way to get the
819 * reader going. (The scheduler will hard loop on
822 * NOTE: We can't clear WANTR here without acquiring
823 * the rlock, which we don't want to do here!
825 if ((wpipe->pipe_state & PIPE_WANTR) && pipe_mpsafe > 1)
830 * Transfer segment, which may include a wrap-around.
831 * Update windex to account for both all in one go
832 * so the reader can read() the data atomically.
834 error = uiomove(&wpipe->pipe_buffer.buffer[windex],
836 if (error == 0 && segsize < space) {
837 segsize = space - segsize;
838 error = uiomove(&wpipe->pipe_buffer.buffer[0],
844 wpipe->pipe_buffer.windex += space;
850 * We need both the rlock and the wlock to interlock against
851 * the EOF, WANTW, and size checks, and to modify pipe_state.
853 * These are token locks so we do not have to worry about
856 lwkt_gettoken(&wpipe->pipe_rlock);
859 * If the "read-side" has been blocked, wake it up now
860 * and yield to let it drain synchronously rather
863 if (wpipe->pipe_state & PIPE_WANTR) {
864 wpipe->pipe_state &= ~PIPE_WANTR;
869 * don't block on non-blocking I/O
872 lwkt_reltoken(&wpipe->pipe_rlock);
878 * re-test whether we have to block in the writer after
879 * acquiring both locks, in case the reader opened up
882 space = wpipe->pipe_buffer.size -
883 (wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex);
885 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
889 * Retest EOF - acquiring a new token can temporarily release
890 * tokens already held.
892 if (wpipe->pipe_state & PIPE_WEOF) {
893 lwkt_reltoken(&wpipe->pipe_rlock);
899 * We have no more space and have something to offer,
900 * wake up select/poll/kq.
903 wpipe->pipe_state |= PIPE_WANTW;
904 ++wpipe->pipe_wantwcnt;
906 if (wpipe->pipe_state & PIPE_WANTW)
907 error = tsleep(wpipe, PCATCH, "pipewr", 0);
908 ++pipe_wblocked_count;
910 lwkt_reltoken(&wpipe->pipe_rlock);
913 * Break out if we errored or the read side wants us to go
918 if (wpipe->pipe_state & PIPE_WEOF) {
923 pipe_end_uio(wpipe, &wpipe->pipe_wip);
926 * If we have put any characters in the buffer, we wake up
929 * Both rlock and wlock are required to be able to modify pipe_state.
931 if (wpipe->pipe_buffer.windex != wpipe->pipe_buffer.rindex) {
932 if (wpipe->pipe_state & PIPE_WANTR) {
933 lwkt_gettoken(&wpipe->pipe_rlock);
934 if (wpipe->pipe_state & PIPE_WANTR) {
935 wpipe->pipe_state &= ~PIPE_WANTR;
936 lwkt_reltoken(&wpipe->pipe_rlock);
939 lwkt_reltoken(&wpipe->pipe_rlock);
942 lwkt_gettoken(&wpipe->pipe_rlock);
944 lwkt_reltoken(&wpipe->pipe_rlock);
948 * Don't return EPIPE if I/O was successful
950 if ((wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex) &&
951 (uio->uio_resid == 0) &&
957 vfs_timestamp(&wpipe->pipe_mtime);
960 * We have something to offer,
961 * wake up select/poll/kq.
963 /*space = wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex;*/
964 lwkt_reltoken(&wpipe->pipe_wlock);
965 pipe_rel_mplock(&mpsave);
970 * MPALMOSTSAFE - acquires mplock
972 * we implement a very minimal set of ioctls for compatibility with sockets.
975 pipe_ioctl(struct file *fp, u_long cmd, caddr_t data,
976 struct ucred *cred, struct sysmsg *msg)
982 pipe_get_mplock(&mpsave);
983 mpipe = (struct pipe *)fp->f_data;
985 lwkt_gettoken(&mpipe->pipe_rlock);
986 lwkt_gettoken(&mpipe->pipe_wlock);
991 mpipe->pipe_state |= PIPE_ASYNC;
993 mpipe->pipe_state &= ~PIPE_ASYNC;
998 *(int *)data = mpipe->pipe_buffer.windex -
999 mpipe->pipe_buffer.rindex;
1004 error = fsetown(*(int *)data, &mpipe->pipe_sigio);
1008 *(int *)data = fgetown(mpipe->pipe_sigio);
1012 /* This is deprecated, FIOSETOWN should be used instead. */
1014 error = fsetown(-(*(int *)data), &mpipe->pipe_sigio);
1019 /* This is deprecated, FIOGETOWN should be used instead. */
1020 *(int *)data = -fgetown(mpipe->pipe_sigio);
1027 lwkt_reltoken(&mpipe->pipe_wlock);
1028 lwkt_reltoken(&mpipe->pipe_rlock);
1029 pipe_rel_mplock(&mpsave);
1038 pipe_stat(struct file *fp, struct stat *ub, struct ucred *cred)
1043 pipe_get_mplock(&mpsave);
1044 pipe = (struct pipe *)fp->f_data;
1046 bzero((caddr_t)ub, sizeof(*ub));
1047 ub->st_mode = S_IFIFO;
1048 ub->st_blksize = pipe->pipe_buffer.size;
1049 ub->st_size = pipe->pipe_buffer.windex - pipe->pipe_buffer.rindex;
1050 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize;
1051 ub->st_atimespec = pipe->pipe_atime;
1052 ub->st_mtimespec = pipe->pipe_mtime;
1053 ub->st_ctimespec = pipe->pipe_ctime;
1055 * Left as 0: st_dev, st_ino, st_nlink, st_uid, st_gid, st_rdev,
1057 * XXX (st_dev, st_ino) should be unique.
1059 pipe_rel_mplock(&mpsave);
1064 * MPALMOSTSAFE - acquires mplock
1067 pipe_close(struct file *fp)
1072 cpipe = (struct pipe *)fp->f_data;
1073 fp->f_ops = &badfileops;
1075 funsetown(cpipe->pipe_sigio);
1082 * Shutdown one or both directions of a full-duplex pipe.
1084 * MPALMOSTSAFE - acquires mplock
1087 pipe_shutdown(struct file *fp, int how)
1094 pipe_get_mplock(&mpsave);
1095 rpipe = (struct pipe *)fp->f_data;
1096 wpipe = rpipe->pipe_peer;
1099 * We modify pipe_state on both pipes, which means we need
1102 lwkt_gettoken(&rpipe->pipe_rlock);
1103 lwkt_gettoken(&rpipe->pipe_wlock);
1104 lwkt_gettoken(&wpipe->pipe_rlock);
1105 lwkt_gettoken(&wpipe->pipe_wlock);
1110 rpipe->pipe_state |= PIPE_REOF; /* my reads */
1111 rpipe->pipe_state |= PIPE_WEOF; /* peer writes */
1112 if (rpipe->pipe_state & PIPE_WANTR) {
1113 rpipe->pipe_state &= ~PIPE_WANTR;
1116 if (rpipe->pipe_state & PIPE_WANTW) {
1117 rpipe->pipe_state &= ~PIPE_WANTW;
1125 wpipe->pipe_state |= PIPE_REOF; /* peer reads */
1126 wpipe->pipe_state |= PIPE_WEOF; /* my writes */
1127 if (wpipe->pipe_state & PIPE_WANTR) {
1128 wpipe->pipe_state &= ~PIPE_WANTR;
1131 if (wpipe->pipe_state & PIPE_WANTW) {
1132 wpipe->pipe_state &= ~PIPE_WANTW;
1141 lwkt_reltoken(&wpipe->pipe_wlock);
1142 lwkt_reltoken(&wpipe->pipe_rlock);
1143 lwkt_reltoken(&rpipe->pipe_wlock);
1144 lwkt_reltoken(&rpipe->pipe_rlock);
1146 pipe_rel_mplock(&mpsave);
1151 pipe_free_kmem(struct pipe *cpipe)
1153 if (cpipe->pipe_buffer.buffer != NULL) {
1154 if (cpipe->pipe_buffer.size > PIPE_SIZE)
1155 atomic_subtract_int(&pipe_nbig, 1);
1156 kmem_free(&kernel_map,
1157 (vm_offset_t)cpipe->pipe_buffer.buffer,
1158 cpipe->pipe_buffer.size);
1159 cpipe->pipe_buffer.buffer = NULL;
1160 cpipe->pipe_buffer.object = NULL;
1165 * Close the pipe. The slock must be held to interlock against simultanious
1166 * closes. The rlock and wlock must be held to adjust the pipe_state.
1169 pipeclose(struct pipe *cpipe)
1178 * The slock may not have been allocated yet (close during
1181 * We need both the read and write tokens to modify pipe_state.
1183 if (cpipe->pipe_slock)
1184 lockmgr(cpipe->pipe_slock, LK_EXCLUSIVE);
1185 lwkt_gettoken(&cpipe->pipe_rlock);
1186 lwkt_gettoken(&cpipe->pipe_wlock);
1189 * Set our state, wakeup anyone waiting in select/poll/kq, and
1190 * wakeup anyone blocked on our pipe.
1192 cpipe->pipe_state |= PIPE_CLOSED | PIPE_REOF | PIPE_WEOF;
1194 if (cpipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) {
1195 cpipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW);
1200 * Disconnect from peer.
1202 if ((ppipe = cpipe->pipe_peer) != NULL) {
1203 lwkt_gettoken(&ppipe->pipe_rlock);
1204 lwkt_gettoken(&ppipe->pipe_wlock);
1205 ppipe->pipe_state |= PIPE_REOF | PIPE_WEOF;
1207 if (ppipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) {
1208 ppipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW);
1211 if (SLIST_FIRST(&ppipe->pipe_kq.ki_note))
1212 KNOTE(&ppipe->pipe_kq.ki_note, 0);
1213 lwkt_reltoken(&ppipe->pipe_wlock);
1214 lwkt_reltoken(&ppipe->pipe_rlock);
1218 * If the peer is also closed we can free resources for both
1219 * sides, otherwise we leave our side intact to deal with any
1220 * races (since we only have the slock).
1222 if (ppipe && (ppipe->pipe_state & PIPE_CLOSED)) {
1223 cpipe->pipe_peer = NULL;
1224 ppipe->pipe_peer = NULL;
1225 ppipe->pipe_slock = NULL; /* we will free the slock */
1230 lwkt_reltoken(&cpipe->pipe_wlock);
1231 lwkt_reltoken(&cpipe->pipe_rlock);
1232 if (cpipe->pipe_slock)
1233 lockmgr(cpipe->pipe_slock, LK_RELEASE);
1236 * If we disassociated from our peer we can free resources
1238 if (ppipe == NULL) {
1240 if (cpipe->pipe_slock) {
1241 kfree(cpipe->pipe_slock, M_PIPE);
1242 cpipe->pipe_slock = NULL;
1244 if (gd->gd_pipeqcount >= pipe_maxcache ||
1245 cpipe->pipe_buffer.size != PIPE_SIZE
1247 pipe_free_kmem(cpipe);
1248 kfree(cpipe, M_PIPE);
1250 cpipe->pipe_state = 0;
1251 cpipe->pipe_peer = gd->gd_pipeq;
1252 gd->gd_pipeq = cpipe;
1253 ++gd->gd_pipeqcount;
1259 * MPALMOSTSAFE - acquires mplock
1262 pipe_kqfilter(struct file *fp, struct knote *kn)
1266 cpipe = (struct pipe *)kn->kn_fp->f_data;
1268 switch (kn->kn_filter) {
1270 kn->kn_fop = &pipe_rfiltops;
1273 kn->kn_fop = &pipe_wfiltops;
1274 if (cpipe->pipe_peer == NULL) {
1275 /* other end of pipe has been closed */
1281 return (EOPNOTSUPP);
1283 kn->kn_hook = (caddr_t)cpipe;
1285 knote_insert(&cpipe->pipe_kq.ki_note, kn);
1291 filt_pipedetach(struct knote *kn)
1293 struct pipe *cpipe = (struct pipe *)kn->kn_hook;
1295 knote_remove(&cpipe->pipe_kq.ki_note, kn);
1300 filt_piperead(struct knote *kn, long hint)
1302 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1305 kn->kn_data = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
1306 if (rpipe->pipe_state & PIPE_REOF) {
1307 kn->kn_flags |= EV_EOF;
1311 return (kn->kn_data > 0);
1316 filt_pipewrite(struct knote *kn, long hint)
1318 struct pipe *wpipe = (struct pipe *)kn->kn_fp->f_data;
1322 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_WEOF)) {
1324 kn->kn_flags |= EV_EOF;
1327 space = wpipe->pipe_buffer.windex -
1328 wpipe->pipe_buffer.rindex;
1329 space = wpipe->pipe_buffer.size - space;
1331 kn->kn_data = space;
1332 return (kn->kn_data >= PIPE_BUF);