2 * Copyright (c) 1996 John S. Dyson
4 * Copyright (c) 2003-2017 The DragonFly Project. All rights reserved.
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice immediately at the beginning of the file, without modification,
14 * this list of conditions, and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Absolutely no warranty of function or purpose is made by the author
20 * 4. Modifications may be freely made to this file if the above conditions
25 * This file contains a high-performance replacement for the socket-based
26 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
27 * all features of sockets, but does do everything that pipes normally
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
34 #include <sys/fcntl.h>
36 #include <sys/filedesc.h>
37 #include <sys/filio.h>
38 #include <sys/ttycom.h>
40 #include <sys/signalvar.h>
41 #include <sys/sysproto.h>
43 #include <sys/vnode.h>
45 #include <sys/event.h>
46 #include <sys/globaldata.h>
47 #include <sys/module.h>
48 #include <sys/malloc.h>
49 #include <sys/sysctl.h>
50 #include <sys/socket.h>
51 #include <sys/kern_syscall.h>
53 #include <sys/mutex.h>
56 #include <vm/vm_param.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_kern.h>
59 #include <vm/vm_extern.h>
61 #include <vm/vm_map.h>
62 #include <vm/vm_page.h>
63 #include <vm/vm_zone.h>
65 #include <sys/file2.h>
66 #include <sys/signal2.h>
67 #include <sys/mutex2.h>
69 #include <machine/cpufunc.h>
76 * interfaces to the outside world
78 static int pipe_read (struct file *fp, struct uio *uio,
79 struct ucred *cred, int flags);
80 static int pipe_write (struct file *fp, struct uio *uio,
81 struct ucred *cred, int flags);
82 static int pipe_close (struct file *fp);
83 static int pipe_shutdown (struct file *fp, int how);
84 static int pipe_kqfilter (struct file *fp, struct knote *kn);
85 static int pipe_stat (struct file *fp, struct stat *sb, struct ucred *cred);
86 static int pipe_ioctl (struct file *fp, u_long cmd, caddr_t data,
87 struct ucred *cred, struct sysmsg *msg);
89 static struct fileops pipeops = {
91 .fo_write = pipe_write,
92 .fo_ioctl = pipe_ioctl,
93 .fo_kqfilter = pipe_kqfilter,
95 .fo_close = pipe_close,
96 .fo_shutdown = pipe_shutdown
99 static void filt_pipedetach(struct knote *kn);
100 static int filt_piperead(struct knote *kn, long hint);
101 static int filt_pipewrite(struct knote *kn, long hint);
103 static struct filterops pipe_rfiltops =
104 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_pipedetach, filt_piperead };
105 static struct filterops pipe_wfiltops =
106 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_pipedetach, filt_pipewrite };
108 MALLOC_DEFINE(M_PIPE, "pipe", "pipe structures");
110 #define PIPEQ_MAX_CACHE 16 /* per-cpu pipe structure cache */
112 static int pipe_maxcache = PIPEQ_MAX_CACHE;
113 static struct pipegdlock *pipe_gdlocks;
115 SYSCTL_NODE(_kern, OID_AUTO, pipe, CTLFLAG_RW, 0, "Pipe operation");
116 SYSCTL_INT(_kern_pipe, OID_AUTO, maxcache,
117 CTLFLAG_RW, &pipe_maxcache, 0, "max pipes cached per-cpu");
118 static int pipe_size = 32768;
119 SYSCTL_INT(_kern_pipe, OID_AUTO, size,
120 CTLFLAG_RW, &pipe_size, 0, "Pipe buffer size (16384 minimum)");
121 static int pipe_delay = 5000; /* 5uS default */
122 SYSCTL_INT(_kern_pipe, OID_AUTO, delay,
123 CTLFLAG_RW, &pipe_delay, 0, "SMP delay optimization in ns");
126 * Auto-size pipe cache to reduce kmem allocations and frees.
130 pipeinit(void *dummy)
132 size_t mbytes = kmem_lim_size();
135 if (pipe_maxcache == PIPEQ_MAX_CACHE) {
136 if (mbytes >= 7 * 1024)
138 if (mbytes >= 15 * 1024)
141 pipe_gdlocks = kmalloc(sizeof(*pipe_gdlocks) * ncpus,
142 M_PIPE, M_WAITOK | M_ZERO);
143 for (n = 0; n < ncpus; ++n)
144 mtx_init(&pipe_gdlocks[n].mtx, "pipekm");
146 SYSINIT(kmem, SI_BOOT2_MACHDEP, SI_ORDER_ANY, pipeinit, NULL);
148 static void pipeclose (struct pipe *pipe,
149 struct pipebuf *pbr, struct pipebuf *pbw);
150 static void pipe_free_kmem (struct pipebuf *buf);
151 static int pipe_create (struct pipe **pipep);
154 pipewakeup(struct pipebuf *pb, int dosigio)
156 if (dosigio && (pb->state & PIPE_ASYNC) && pb->sigio) {
157 lwkt_gettoken(&sigio_token);
158 pgsigio(pb->sigio, SIGIO, 0);
159 lwkt_reltoken(&sigio_token);
161 KNOTE(&pb->kq.ki_note, 0);
165 * These routines are called before and after a UIO. The UIO
166 * may block, causing our held tokens to be lost temporarily.
168 * We use these routines to serialize reads against other reads
169 * and writes against other writes.
171 * The read token is held on entry so *ipp does not race.
174 pipe_start_uio(int *ipp)
180 error = tsleep(ipp, PCATCH, "pipexx", 0);
189 pipe_end_uio(int *ipp)
201 * The pipe system call for the DTYPE_PIPE type of pipes
203 * pipe_args(int dummy)
208 sys_pipe(struct pipe_args *uap)
210 return kern_pipe(uap->sysmsg_fds, 0);
214 sys_pipe2(struct pipe2_args *uap)
216 return kern_pipe(uap->sysmsg_fds, uap->flags);
220 kern_pipe(long *fds, int flags)
222 struct thread *td = curthread;
223 struct filedesc *fdp = td->td_proc->p_fd;
224 struct file *rf, *wf;
229 if (pipe_create(&pipe)) {
230 pipeclose(pipe, &pipe->bufferA, &pipe->bufferB);
231 pipeclose(pipe, &pipe->bufferB, &pipe->bufferA);
235 error = falloc(td->td_lwp, &rf, &fd1);
237 pipeclose(pipe, &pipe->bufferA, &pipe->bufferB);
238 pipeclose(pipe, &pipe->bufferB, &pipe->bufferA);
244 * Warning: once we've gotten past allocation of the fd for the
245 * read-side, we can only drop the read side via fdrop() in order
246 * to avoid races against processes which manage to dup() the read
247 * side while we are blocked trying to allocate the write side.
249 rf->f_type = DTYPE_PIPE;
250 rf->f_flag = FREAD | FWRITE;
251 rf->f_ops = &pipeops;
252 rf->f_data = (void *)((intptr_t)pipe | 0);
253 if (flags & O_NONBLOCK)
254 rf->f_flag |= O_NONBLOCK;
255 if (flags & O_CLOEXEC)
256 fdp->fd_files[fd1].fileflags |= UF_EXCLOSE;
258 error = falloc(td->td_lwp, &wf, &fd2);
260 fsetfd(fdp, NULL, fd1);
262 /* pipeA has been closed by fdrop() */
263 /* close pipeB here */
264 pipeclose(pipe, &pipe->bufferB, &pipe->bufferA);
267 wf->f_type = DTYPE_PIPE;
268 wf->f_flag = FREAD | FWRITE;
269 wf->f_ops = &pipeops;
270 wf->f_data = (void *)((intptr_t)pipe | 1);
271 if (flags & O_NONBLOCK)
272 wf->f_flag |= O_NONBLOCK;
273 if (flags & O_CLOEXEC)
274 fdp->fd_files[fd2].fileflags |= UF_EXCLOSE;
279 * Once activated the peer relationship remains valid until
280 * both sides are closed.
282 fsetfd(fdp, rf, fd1);
283 fsetfd(fdp, wf, fd2);
291 * [re]allocates KVA for the pipe's circular buffer. The space is
292 * pageable. Called twice to setup full-duplex communications.
294 * NOTE: Independent vm_object's are used to improve performance.
296 * Returns 0 on success, ENOMEM on failure.
299 pipespace(struct pipe *pipe, struct pipebuf *pb, size_t size)
301 struct vm_object *object;
306 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
309 if (size > 1024*1024)
312 npages = round_page(size) / PAGE_SIZE;
316 * [re]create the object if necessary and reserve space for it
317 * in the kernel_map. The object and memory are pageable. On
318 * success, free the old resources before assigning the new
321 if (object == NULL || object->size != npages) {
322 object = vm_object_allocate(OBJT_DEFAULT, npages);
323 buffer = (caddr_t)vm_map_min(&kernel_map);
325 error = vm_map_find(&kernel_map, object, NULL,
326 0, (vm_offset_t *)&buffer, size,
328 VM_MAPTYPE_NORMAL, VM_SUBSYS_PIPE,
329 VM_PROT_ALL, VM_PROT_ALL, 0);
331 if (error != KERN_SUCCESS) {
332 vm_object_deallocate(object);
347 * Initialize and allocate VM and memory for pipe, pulling the pipe from
348 * our per-cpu cache if possible.
350 * Returns 0 on success, else an error code (typically ENOMEM). Caller
351 * must still deallocate the pipe on failure.
354 pipe_create(struct pipe **pipep)
356 globaldata_t gd = mycpu;
360 if ((pipe = gd->gd_pipeq) != NULL) {
361 gd->gd_pipeq = pipe->next;
365 pipe = kmalloc(sizeof(*pipe), M_PIPE, M_WAITOK | M_ZERO);
366 lwkt_token_init(&pipe->bufferA.rlock, "piper");
367 lwkt_token_init(&pipe->bufferA.wlock, "pipew");
368 lwkt_token_init(&pipe->bufferB.rlock, "piper");
369 lwkt_token_init(&pipe->bufferB.wlock, "pipew");
372 if ((error = pipespace(pipe, &pipe->bufferA, pipe_size)) != 0) {
375 if ((error = pipespace(pipe, &pipe->bufferB, pipe_size)) != 0) {
378 vfs_timestamp(&pipe->ctime);
379 pipe->bufferA.atime = pipe->ctime;
380 pipe->bufferA.mtime = pipe->ctime;
381 pipe->bufferB.atime = pipe->ctime;
382 pipe->bufferB.mtime = pipe->ctime;
383 pipe->open_count = 2;
389 * Read data from a pipe
392 pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, int fflags)
398 size_t size; /* total bytes available */
399 size_t nsize; /* total bytes to read */
400 size_t rindex; /* contiguous bytes available */
407 pipe = (struct pipe *)((intptr_t)fp->f_data & ~(intptr_t)1);
408 if ((intptr_t)fp->f_data & 1) {
409 rpb = &pipe->bufferB;
410 wpb = &pipe->bufferA;
412 rpb = &pipe->bufferA;
413 wpb = &pipe->bufferB;
415 atomic_set_int(&curthread->td_mpflags, TDF_MP_BATCH_DEMARC);
417 if (uio->uio_resid == 0)
423 if (fflags & O_FBLOCKING)
425 else if (fflags & O_FNONBLOCKING)
427 else if (fp->f_flag & O_NONBLOCK)
433 * 'quick' NBIO test before things get expensive.
435 if (nbio && rpb->rindex == rpb->windex)
439 * Reads are serialized. Note however that buffer.buffer and
440 * buffer.size can change out from under us when the number
441 * of bytes in the buffer are zero due to the write-side doing a
444 lwkt_gettoken(&rpb->rlock);
445 error = pipe_start_uio(&rpb->rip);
447 lwkt_reltoken(&rpb->rlock);
452 bigread = (uio->uio_resid > 10 * 1024 * 1024);
455 while (uio->uio_resid) {
459 if (bigread && --bigcount == 0) {
462 if (CURSIG(curthread->td_lwp)) {
468 size = rpb->windex - rpb->rindex;
471 rindex = rpb->rindex & (rpb->size - 1);
473 if (nsize > rpb->size - rindex)
474 nsize = rpb->size - rindex;
475 nsize = szmin(nsize, uio->uio_resid);
477 error = uiomove(&rpb->buffer[rindex], nsize, uio);
481 rpb->rindex += nsize;
485 * If the FIFO is still over half full just continue
486 * and do not try to notify the writer yet.
488 if (size - nsize >= (rpb->size >> 1)) {
494 * When the FIFO is less then half full notify any
495 * waiting writer. WANTW can be checked while
496 * holding just the rlock.
499 if ((rpb->state & PIPE_WANTW) == 0)
504 * If the "write-side" was blocked we wake it up. This code
505 * is reached either when the buffer is completely emptied
506 * or if it becomes more then half-empty.
508 * Pipe_state can only be modified if both the rlock and
511 if (rpb->state & PIPE_WANTW) {
512 lwkt_gettoken(&rpb->wlock);
513 if (rpb->state & PIPE_WANTW) {
514 rpb->state &= ~PIPE_WANTW;
515 lwkt_reltoken(&rpb->wlock);
518 lwkt_reltoken(&rpb->wlock);
523 * Pick up our copy loop again if the writer sent data to
524 * us while we were messing around.
526 * On a SMP box poll up to pipe_delay nanoseconds for new
527 * data. Typically a value of 2000 to 4000 is sufficient
528 * to eradicate most IPIs/tsleeps/wakeups when a pipe
529 * is used for synchronous communications with small packets,
530 * and 8000 or so (8uS) will pipeline large buffer xfers
531 * between cpus over a pipe.
533 * For synchronous communications a hit means doing a
534 * full Awrite-Bread-Bwrite-Aread cycle in less then 2uS,
535 * where as miss requiring a tsleep/wakeup sequence
536 * will take 7uS or more.
538 if (rpb->windex != rpb->rindex)
541 #ifdef _RDTSC_SUPPORTED_
546 tsc_target = tsc_get_target(pipe_delay);
547 while (tsc_test_target(tsc_target) == 0) {
548 if (rpb->windex != rpb->rindex) {
559 * Detect EOF condition, do not set error.
561 if (rpb->state & PIPE_REOF)
565 * Break if some data was read, or if this was a non-blocking
577 * Last chance, interlock with WANTR.
579 lwkt_gettoken(&rpb->wlock);
580 size = rpb->windex - rpb->rindex;
582 lwkt_reltoken(&rpb->wlock);
587 * Retest EOF - acquiring a new token can temporarily release
588 * tokens already held.
590 if (rpb->state & PIPE_REOF) {
591 lwkt_reltoken(&rpb->wlock);
596 * If there is no more to read in the pipe, reset its
597 * pointers to the beginning. This improves cache hit
600 * We need both locks to modify both pointers, and there
601 * must also not be a write in progress or the uiomove()
602 * in the write might block and temporarily release
603 * its wlock, then reacquire and update windex. We are
604 * only serialized against reads, not writes.
606 * XXX should we even bother resetting the indices? It
607 * might actually be more cache efficient not to.
609 if (rpb->rindex == rpb->windex && rpb->wip == 0) {
615 * Wait for more data.
617 * Pipe_state can only be set if both the rlock and wlock
620 rpb->state |= PIPE_WANTR;
621 tsleep_interlock(rpb, PCATCH);
622 lwkt_reltoken(&rpb->wlock);
623 error = tsleep(rpb, PCATCH | PINTERLOCKED, "piperd", 0);
627 pipe_end_uio(&rpb->rip);
630 * Uptime last access time
632 if (error == 0 && nread)
633 vfs_timestamp(&rpb->atime);
636 * If we drained the FIFO more then half way then handle
637 * write blocking hysteresis.
639 * Note that PIPE_WANTW cannot be set by the writer without
640 * it holding both rlock and wlock, so we can test it
641 * while holding just rlock.
645 * Synchronous blocking is done on the pipe involved
647 if (rpb->state & PIPE_WANTW) {
648 lwkt_gettoken(&rpb->wlock);
649 if (rpb->state & PIPE_WANTW) {
650 rpb->state &= ~PIPE_WANTW;
651 lwkt_reltoken(&rpb->wlock);
654 lwkt_reltoken(&rpb->wlock);
659 * But we may also have to deal with a kqueue which is
660 * stored on the same pipe as its descriptor, so a
661 * EVFILT_WRITE event waiting for our side to drain will
662 * be on the other side.
664 lwkt_gettoken(&wpb->wlock);
666 lwkt_reltoken(&wpb->wlock);
668 /*size = rpb->windex - rpb->rindex;*/
669 lwkt_reltoken(&rpb->rlock);
675 pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags)
689 pipe = (struct pipe *)((intptr_t)fp->f_data & ~(intptr_t)1);
690 if ((intptr_t)fp->f_data & 1) {
691 rpb = &pipe->bufferB;
692 wpb = &pipe->bufferA;
694 rpb = &pipe->bufferA;
695 wpb = &pipe->bufferB;
701 if (fflags & O_FBLOCKING)
703 else if (fflags & O_FNONBLOCKING)
705 else if (fp->f_flag & O_NONBLOCK)
711 * 'quick' NBIO test before things get expensive.
713 if (nbio && wpb->size == (wpb->windex - wpb->rindex) &&
714 uio->uio_resid && (wpb->state & PIPE_WEOF) == 0) {
719 * Writes go to the peer. The peer will always exist.
721 lwkt_gettoken(&wpb->wlock);
722 if (wpb->state & PIPE_WEOF) {
723 lwkt_reltoken(&wpb->wlock);
728 * Degenerate case (EPIPE takes prec)
730 if (uio->uio_resid == 0) {
731 lwkt_reltoken(&wpb->wlock);
736 * Writes are serialized (start_uio must be called with wlock)
738 error = pipe_start_uio(&wpb->wip);
740 lwkt_reltoken(&wpb->wlock);
744 orig_resid = uio->uio_resid;
747 bigwrite = (uio->uio_resid > 10 * 1024 * 1024);
750 while (uio->uio_resid) {
751 if (wpb->state & PIPE_WEOF) {
759 if (bigwrite && --bigcount == 0) {
762 if (CURSIG(curthread->td_lwp)) {
768 windex = wpb->windex & (wpb->size - 1);
769 space = wpb->size - (wpb->windex - wpb->rindex);
772 /* Writes of size <= PIPE_BUF must be atomic. */
773 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
777 * Write to fill, read size handles write hysteresis. Also
778 * additional restrictions can cause select-based non-blocking
785 * Transfer size is minimum of uio transfer
786 * and free space in pipe buffer.
788 * Limit each uiocopy to no more then wpb->size
789 * so we can keep the gravy train going on a
790 * SMP box. This significantly increases write
791 * performance. Otherwise large writes wind up doing
792 * an inefficient synchronous ping-pong.
794 space = szmin(space, uio->uio_resid);
795 if (space > (wpb->size >> 1))
796 space = (wpb->size >> 1);
799 * First segment to transfer is minimum of
800 * transfer size and contiguous space in
801 * pipe buffer. If first segment to transfer
802 * is less than the transfer size, we've got
803 * a wraparound in the buffer.
805 segsize = wpb->size - windex;
810 * If this is the first loop and the reader is
811 * blocked, do a preemptive wakeup of the reader.
813 * On SMP the IPI latency plus the wlock interlock
814 * on the reader side is the fastest way to get the
815 * reader going. (The scheduler will hard loop on
818 * NOTE: We can't clear WANTR here without acquiring
819 * the rlock, which we don't want to do here!
821 if ((wpb->state & PIPE_WANTR))
825 * Transfer segment, which may include a wrap-around.
826 * Update windex to account for both all in one go
827 * so the reader can read() the data atomically.
829 error = uiomove(&wpb->buffer[windex], segsize, uio);
830 if (error == 0 && segsize < space) {
831 segsize = space - segsize;
832 error = uiomove(&wpb->buffer[0], segsize, uio);
837 wpb->windex += space;
843 * We need both the rlock and the wlock to interlock against
844 * the EOF, WANTW, and size checks, and to modify pipe_state.
846 * These are token locks so we do not have to worry about
849 lwkt_gettoken(&wpb->rlock);
852 * If the "read-side" has been blocked, wake it up now
853 * and yield to let it drain synchronously rather
856 if (wpb->state & PIPE_WANTR) {
857 wpb->state &= ~PIPE_WANTR;
862 * don't block on non-blocking I/O
865 lwkt_reltoken(&wpb->rlock);
871 * re-test whether we have to block in the writer after
872 * acquiring both locks, in case the reader opened up
875 space = wpb->size - (wpb->windex - wpb->rindex);
877 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
881 * Retest EOF - acquiring a new token can temporarily release
882 * tokens already held.
884 if (wpb->state & PIPE_WEOF) {
885 lwkt_reltoken(&wpb->rlock);
891 * We have no more space and have something to offer,
892 * wake up select/poll/kq.
895 wpb->state |= PIPE_WANTW;
897 if (wpb->state & PIPE_WANTW)
898 error = tsleep(wpb, PCATCH, "pipewr", 0);
900 lwkt_reltoken(&wpb->rlock);
903 * Break out if we errored or the read side wants us to go
908 if (wpb->state & PIPE_WEOF) {
913 pipe_end_uio(&wpb->wip);
916 * If we have put any characters in the buffer, we wake up
919 * Both rlock and wlock are required to be able to modify pipe_state.
921 if (wpb->windex != wpb->rindex) {
922 if (wpb->state & PIPE_WANTR) {
923 lwkt_gettoken(&wpb->rlock);
924 if (wpb->state & PIPE_WANTR) {
925 wpb->state &= ~PIPE_WANTR;
926 lwkt_reltoken(&wpb->rlock);
929 lwkt_reltoken(&wpb->rlock);
932 lwkt_gettoken(&wpb->rlock);
934 lwkt_reltoken(&wpb->rlock);
938 * Don't return EPIPE if I/O was successful
940 if ((wpb->rindex == wpb->windex) &&
941 (uio->uio_resid == 0) &&
947 vfs_timestamp(&wpb->mtime);
950 * We have something to offer,
951 * wake up select/poll/kq.
953 /*space = wpb->windex - wpb->rindex;*/
954 lwkt_reltoken(&wpb->wlock);
960 * we implement a very minimal set of ioctls for compatibility with sockets.
963 pipe_ioctl(struct file *fp, u_long cmd, caddr_t data,
964 struct ucred *cred, struct sysmsg *msg)
970 pipe = (struct pipe *)((intptr_t)fp->f_data & ~(intptr_t)1);
971 if ((intptr_t)fp->f_data & 1) {
972 rpb = &pipe->bufferB;
974 rpb = &pipe->bufferA;
977 lwkt_gettoken(&rpb->rlock);
978 lwkt_gettoken(&rpb->wlock);
983 rpb->state |= PIPE_ASYNC;
985 rpb->state &= ~PIPE_ASYNC;
990 *(int *)data = (int)(rpb->windex - rpb->rindex);
994 error = fsetown(*(int *)data, &rpb->sigio);
997 *(int *)data = fgetown(&rpb->sigio);
1001 /* This is deprecated, FIOSETOWN should be used instead. */
1002 error = fsetown(-(*(int *)data), &rpb->sigio);
1006 /* This is deprecated, FIOGETOWN should be used instead. */
1007 *(int *)data = -fgetown(&rpb->sigio);
1014 lwkt_reltoken(&rpb->wlock);
1015 lwkt_reltoken(&rpb->rlock);
1024 pipe_stat(struct file *fp, struct stat *ub, struct ucred *cred)
1026 struct pipebuf *rpb;
1029 pipe = (struct pipe *)((intptr_t)fp->f_data & ~(intptr_t)1);
1030 if ((intptr_t)fp->f_data & 1) {
1031 rpb = &pipe->bufferB;
1033 rpb = &pipe->bufferA;
1036 bzero((caddr_t)ub, sizeof(*ub));
1037 ub->st_mode = S_IFIFO;
1038 ub->st_blksize = rpb->size;
1039 ub->st_size = rpb->windex - rpb->rindex;
1040 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize;
1041 ub->st_atimespec = rpb->atime;
1042 ub->st_mtimespec = rpb->mtime;
1043 ub->st_ctimespec = pipe->ctime;
1045 * Left as 0: st_dev, st_ino, st_nlink, st_uid, st_gid, st_rdev,
1047 * XXX (st_dev, st_ino) should be unique.
1054 pipe_close(struct file *fp)
1056 struct pipebuf *rpb;
1057 struct pipebuf *wpb;
1060 pipe = (struct pipe *)((intptr_t)fp->f_data & ~(intptr_t)1);
1061 if ((intptr_t)fp->f_data & 1) {
1062 rpb = &pipe->bufferB;
1063 wpb = &pipe->bufferA;
1065 rpb = &pipe->bufferA;
1066 wpb = &pipe->bufferB;
1069 fp->f_ops = &badfileops;
1071 funsetown(&rpb->sigio);
1072 pipeclose(pipe, rpb, wpb);
1078 * Shutdown one or both directions of a full-duplex pipe.
1081 pipe_shutdown(struct file *fp, int how)
1083 struct pipebuf *rpb;
1084 struct pipebuf *wpb;
1088 pipe = (struct pipe *)((intptr_t)fp->f_data & ~(intptr_t)1);
1089 if ((intptr_t)fp->f_data & 1) {
1090 rpb = &pipe->bufferB;
1091 wpb = &pipe->bufferA;
1093 rpb = &pipe->bufferA;
1094 wpb = &pipe->bufferB;
1098 * We modify pipe_state on both pipes, which means we need
1101 lwkt_gettoken(&rpb->rlock);
1102 lwkt_gettoken(&rpb->wlock);
1103 lwkt_gettoken(&wpb->rlock);
1104 lwkt_gettoken(&wpb->wlock);
1109 rpb->state |= PIPE_REOF; /* my reads */
1110 rpb->state |= PIPE_WEOF; /* peer writes */
1111 if (rpb->state & PIPE_WANTR) {
1112 rpb->state &= ~PIPE_WANTR;
1115 if (rpb->state & PIPE_WANTW) {
1116 rpb->state &= ~PIPE_WANTW;
1124 wpb->state |= PIPE_REOF; /* peer reads */
1125 wpb->state |= PIPE_WEOF; /* my writes */
1126 if (wpb->state & PIPE_WANTR) {
1127 wpb->state &= ~PIPE_WANTR;
1130 if (wpb->state & PIPE_WANTW) {
1131 wpb->state &= ~PIPE_WANTW;
1140 lwkt_reltoken(&wpb->wlock);
1141 lwkt_reltoken(&wpb->rlock);
1142 lwkt_reltoken(&rpb->wlock);
1143 lwkt_reltoken(&rpb->rlock);
1149 * Destroy the pipe buffer.
1152 pipe_free_kmem(struct pipebuf *pb)
1154 if (pb->buffer != NULL) {
1155 kmem_free(&kernel_map, (vm_offset_t)pb->buffer, pb->size);
1162 * Close one half of the pipe. We are closing the pipe for reading on rpb
1163 * and writing on wpb. This routine must be called twice with the pipebufs
1164 * reversed to close both directions.
1167 pipeclose(struct pipe *pipe, struct pipebuf *rpb, struct pipebuf *wpb)
1175 * We need both the read and write tokens to modify pipe_state.
1177 lwkt_gettoken(&rpb->rlock);
1178 lwkt_gettoken(&rpb->wlock);
1181 * Set our state, wakeup anyone waiting in select/poll/kq, and
1182 * wakeup anyone blocked on our pipe. No action if our side
1183 * is already closed.
1185 if (rpb->state & PIPE_CLOSED) {
1186 lwkt_reltoken(&rpb->wlock);
1187 lwkt_reltoken(&rpb->rlock);
1191 rpb->state |= PIPE_CLOSED | PIPE_REOF | PIPE_WEOF;
1193 if (rpb->state & (PIPE_WANTR | PIPE_WANTW)) {
1194 rpb->state &= ~(PIPE_WANTR | PIPE_WANTW);
1197 lwkt_reltoken(&rpb->wlock);
1198 lwkt_reltoken(&rpb->rlock);
1201 * Disconnect from peer.
1203 lwkt_gettoken(&wpb->rlock);
1204 lwkt_gettoken(&wpb->wlock);
1206 wpb->state |= PIPE_REOF | PIPE_WEOF;
1208 if (wpb->state & (PIPE_WANTR | PIPE_WANTW)) {
1209 wpb->state &= ~(PIPE_WANTR | PIPE_WANTW);
1212 if (SLIST_FIRST(&wpb->kq.ki_note))
1213 KNOTE(&wpb->kq.ki_note, 0);
1214 lwkt_reltoken(&wpb->wlock);
1215 lwkt_reltoken(&wpb->rlock);
1218 * Free resources once both sides are closed. We maintain a pcpu
1219 * cache to improve performance, so the actual tear-down case is
1220 * limited to bulk situations.
1222 * However, the bulk tear-down case can cause intense contention
1223 * on the kernel_map when, e.g. hundreds to hundreds of thousands
1224 * of processes are killed at the same time. To deal with this we
1225 * use a pcpu mutex to maintain concurrency but also limit the
1226 * number of threads banging on the map and pmap.
1228 * We use the mtx mechanism instead of the lockmgr mechanism because
1229 * the mtx mechanism utilizes a queued design which will not break
1230 * down in the face of thousands to hundreds of thousands of
1231 * processes trying to free pipes simultaneously. The lockmgr
1232 * mechanism will wind up waking them all up each time a lock
1235 if (atomic_fetchadd_int(&pipe->open_count, -1) == 1) {
1237 if (gd->gd_pipeqcount >= pipe_maxcache) {
1238 mtx_lock(&pipe_gdlocks[gd->gd_cpuid].mtx);
1239 pipe_free_kmem(rpb);
1240 pipe_free_kmem(wpb);
1241 mtx_unlock(&pipe_gdlocks[gd->gd_cpuid].mtx);
1242 kfree(pipe, M_PIPE);
1246 pipe->next = gd->gd_pipeq;
1247 gd->gd_pipeq = pipe;
1248 ++gd->gd_pipeqcount;
1254 pipe_kqfilter(struct file *fp, struct knote *kn)
1256 struct pipebuf *rpb;
1257 struct pipebuf *wpb;
1260 pipe = (struct pipe *)((intptr_t)fp->f_data & ~(intptr_t)1);
1261 if ((intptr_t)fp->f_data & 1) {
1262 rpb = &pipe->bufferB;
1263 wpb = &pipe->bufferA;
1265 rpb = &pipe->bufferA;
1266 wpb = &pipe->bufferB;
1269 switch (kn->kn_filter) {
1271 kn->kn_fop = &pipe_rfiltops;
1274 kn->kn_fop = &pipe_wfiltops;
1275 if (wpb->state & PIPE_CLOSED) {
1276 /* other end of pipe has been closed */
1281 return (EOPNOTSUPP);
1284 if (rpb == &pipe->bufferA)
1285 kn->kn_hook = (caddr_t)(void *)((intptr_t)pipe | 0);
1287 kn->kn_hook = (caddr_t)(void *)((intptr_t)pipe | 1);
1289 knote_insert(&rpb->kq.ki_note, kn);
1295 filt_pipedetach(struct knote *kn)
1297 struct pipebuf *rpb;
1298 struct pipebuf *wpb;
1301 pipe = (struct pipe *)((intptr_t)kn->kn_hook & ~(intptr_t)1);
1302 if ((intptr_t)kn->kn_hook & 1) {
1303 rpb = &pipe->bufferB;
1304 wpb = &pipe->bufferA;
1306 rpb = &pipe->bufferA;
1307 wpb = &pipe->bufferB;
1309 knote_remove(&rpb->kq.ki_note, kn);
1314 filt_piperead(struct knote *kn, long hint)
1316 struct pipebuf *rpb;
1317 struct pipebuf *wpb;
1321 pipe = (struct pipe *)((intptr_t)kn->kn_fp->f_data & ~(intptr_t)1);
1322 if ((intptr_t)kn->kn_fp->f_data & 1) {
1323 rpb = &pipe->bufferB;
1324 wpb = &pipe->bufferA;
1326 rpb = &pipe->bufferA;
1327 wpb = &pipe->bufferB;
1330 lwkt_gettoken(&rpb->rlock);
1331 lwkt_gettoken(&rpb->wlock);
1333 kn->kn_data = rpb->windex - rpb->rindex;
1335 if (rpb->state & PIPE_REOF) {
1337 * Only set NODATA if all data has been exhausted
1339 if (kn->kn_data == 0)
1340 kn->kn_flags |= EV_NODATA;
1341 kn->kn_flags |= EV_EOF;
1345 lwkt_reltoken(&rpb->wlock);
1346 lwkt_reltoken(&rpb->rlock);
1349 ready = kn->kn_data > 0;
1356 filt_pipewrite(struct knote *kn, long hint)
1358 struct pipebuf *rpb;
1359 struct pipebuf *wpb;
1363 pipe = (struct pipe *)((intptr_t)kn->kn_fp->f_data & ~(intptr_t)1);
1364 if ((intptr_t)kn->kn_fp->f_data & 1) {
1365 rpb = &pipe->bufferB;
1366 wpb = &pipe->bufferA;
1368 rpb = &pipe->bufferA;
1369 wpb = &pipe->bufferB;
1373 if (wpb->state & PIPE_CLOSED) {
1374 kn->kn_flags |= (EV_EOF | EV_NODATA);
1378 lwkt_gettoken(&wpb->rlock);
1379 lwkt_gettoken(&wpb->wlock);
1381 if (wpb->state & PIPE_WEOF) {
1382 kn->kn_flags |= (EV_EOF | EV_NODATA);
1387 kn->kn_data = wpb->size - (wpb->windex - wpb->rindex);
1389 lwkt_reltoken(&wpb->wlock);
1390 lwkt_reltoken(&wpb->rlock);
1393 ready = kn->kn_data >= PIPE_BUF;