2 * Copyright (c) 1996 John S. Dyson
4 * Copyright (c) 2003-2017 The DragonFly Project. All rights reserved.
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice immediately at the beginning of the file, without modification,
14 * this list of conditions, and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Absolutely no warranty of function or purpose is made by the author
20 * 4. Modifications may be freely made to this file if the above conditions
25 * This file contains a high-performance replacement for the socket-based
26 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
27 * all features of sockets, but does do everything that pipes normally
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
34 #include <sys/fcntl.h>
36 #include <sys/filedesc.h>
37 #include <sys/filio.h>
38 #include <sys/ttycom.h>
40 #include <sys/signalvar.h>
41 #include <sys/sysproto.h>
43 #include <sys/vnode.h>
45 #include <sys/event.h>
46 #include <sys/globaldata.h>
47 #include <sys/module.h>
48 #include <sys/malloc.h>
49 #include <sys/sysctl.h>
50 #include <sys/socket.h>
51 #include <sys/kern_syscall.h>
53 #include <sys/mutex.h>
56 #include <vm/vm_param.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_kern.h>
59 #include <vm/vm_extern.h>
61 #include <vm/vm_map.h>
62 #include <vm/vm_page.h>
63 #include <vm/vm_zone.h>
65 #include <sys/file2.h>
66 #include <sys/signal2.h>
67 #include <sys/mutex2.h>
69 #include <machine/cpufunc.h>
76 * interfaces to the outside world
78 static int pipe_read (struct file *fp, struct uio *uio,
79 struct ucred *cred, int flags);
80 static int pipe_write (struct file *fp, struct uio *uio,
81 struct ucred *cred, int flags);
82 static int pipe_close (struct file *fp);
83 static int pipe_shutdown (struct file *fp, int how);
84 static int pipe_kqfilter (struct file *fp, struct knote *kn);
85 static int pipe_stat (struct file *fp, struct stat *sb, struct ucred *cred);
86 static int pipe_ioctl (struct file *fp, u_long cmd, caddr_t data,
87 struct ucred *cred, struct sysmsg *msg);
89 static struct fileops pipeops = {
91 .fo_write = pipe_write,
92 .fo_ioctl = pipe_ioctl,
93 .fo_kqfilter = pipe_kqfilter,
95 .fo_close = pipe_close,
96 .fo_shutdown = pipe_shutdown
99 static void filt_pipedetach(struct knote *kn);
100 static int filt_piperead(struct knote *kn, long hint);
101 static int filt_pipewrite(struct knote *kn, long hint);
103 static struct filterops pipe_rfiltops =
104 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_pipedetach, filt_piperead };
105 static struct filterops pipe_wfiltops =
106 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, filt_pipedetach, filt_pipewrite };
108 MALLOC_DEFINE(M_PIPE, "pipe", "pipe structures");
110 #define PIPEQ_MAX_CACHE 16 /* per-cpu pipe structure cache */
112 static int pipe_maxcache = PIPEQ_MAX_CACHE;
113 static struct pipegdlock *pipe_gdlocks;
115 SYSCTL_NODE(_kern, OID_AUTO, pipe, CTLFLAG_RW, 0, "Pipe operation");
116 SYSCTL_INT(_kern_pipe, OID_AUTO, maxcache,
117 CTLFLAG_RW, &pipe_maxcache, 0, "max pipes cached per-cpu");
120 * The pipe buffer size can be changed at any time. Only new pipe()s
121 * are affected. Note that due to cpu cache effects, you do not want
122 * to make this value too large.
124 static int pipe_size = 32768;
125 SYSCTL_INT(_kern_pipe, OID_AUTO, size,
126 CTLFLAG_RW, &pipe_size, 0, "Pipe buffer size (16384 minimum)");
129 * Reader/writer delay loop. When the reader exhausts the pipe buffer
130 * or the write completely fills the pipe buffer and would otherwise sleep,
131 * it first busy-loops for a few microseconds waiting for data or buffer
132 * space. This eliminates IPIs for most high-bandwidth writer/reader pipes
133 * and also helps when the user program uses a large data buffer in its
136 * This defaults to 4uS.
138 #ifdef _RDTSC_SUPPORTED_
139 static int pipe_delay = 4000; /* 4uS default */
140 SYSCTL_INT(_kern_pipe, OID_AUTO, delay,
141 CTLFLAG_RW, &pipe_delay, 0, "SMP delay optimization in ns");
145 * Auto-size pipe cache to reduce kmem allocations and frees.
149 pipeinit(void *dummy)
151 size_t mbytes = kmem_lim_size();
154 if (pipe_maxcache == PIPEQ_MAX_CACHE) {
155 if (mbytes >= 7 * 1024)
157 if (mbytes >= 15 * 1024)
160 pipe_gdlocks = kmalloc(sizeof(*pipe_gdlocks) * ncpus,
161 M_PIPE, M_WAITOK | M_ZERO);
162 for (n = 0; n < ncpus; ++n)
163 mtx_init(&pipe_gdlocks[n].mtx, "pipekm");
165 SYSINIT(kmem, SI_BOOT2_MACHDEP, SI_ORDER_ANY, pipeinit, NULL);
167 static void pipeclose (struct pipe *pipe,
168 struct pipebuf *pbr, struct pipebuf *pbw);
169 static void pipe_free_kmem (struct pipebuf *buf);
170 static int pipe_create (struct pipe **pipep);
173 * Test and clear the specified flag, wakeup(pb) if it was set.
174 * This function must also act as a memory barrier.
177 pipesignal(struct pipebuf *pb, uint32_t flags)
185 nflags = oflags & ~flags;
186 if (atomic_cmpset_int(&pb->state, oflags, nflags))
197 pipewakeup(struct pipebuf *pb, int dosigio)
199 if (dosigio && (pb->state & PIPE_ASYNC) && pb->sigio) {
200 lwkt_gettoken(&sigio_token);
201 pgsigio(pb->sigio, SIGIO, 0);
202 lwkt_reltoken(&sigio_token);
204 KNOTE(&pb->kq.ki_note, 0);
208 * These routines are called before and after a UIO. The UIO
209 * may block, causing our held tokens to be lost temporarily.
211 * We use these routines to serialize reads against other reads
212 * and writes against other writes.
214 * The appropriate token is held on entry so *ipp does not race.
217 pipe_start_uio(int *ipp)
223 error = tsleep(ipp, PCATCH, "pipexx", 0);
232 pipe_end_uio(int *ipp)
244 * The pipe system call for the DTYPE_PIPE type of pipes
246 * pipe_args(int dummy)
251 sys_pipe(struct pipe_args *uap)
253 return kern_pipe(uap->sysmsg_fds, 0);
257 sys_pipe2(struct pipe2_args *uap)
259 return kern_pipe(uap->sysmsg_fds, uap->flags);
263 kern_pipe(long *fds, int flags)
265 struct thread *td = curthread;
266 struct filedesc *fdp = td->td_proc->p_fd;
267 struct file *rf, *wf;
272 if (pipe_create(&pipe)) {
273 pipeclose(pipe, &pipe->bufferA, &pipe->bufferB);
274 pipeclose(pipe, &pipe->bufferB, &pipe->bufferA);
278 error = falloc(td->td_lwp, &rf, &fd1);
280 pipeclose(pipe, &pipe->bufferA, &pipe->bufferB);
281 pipeclose(pipe, &pipe->bufferB, &pipe->bufferA);
287 * Warning: once we've gotten past allocation of the fd for the
288 * read-side, we can only drop the read side via fdrop() in order
289 * to avoid races against processes which manage to dup() the read
290 * side while we are blocked trying to allocate the write side.
292 rf->f_type = DTYPE_PIPE;
293 rf->f_flag = FREAD | FWRITE;
294 rf->f_ops = &pipeops;
295 rf->f_data = (void *)((intptr_t)pipe | 0);
296 if (flags & O_NONBLOCK)
297 rf->f_flag |= O_NONBLOCK;
298 if (flags & O_CLOEXEC)
299 fdp->fd_files[fd1].fileflags |= UF_EXCLOSE;
301 error = falloc(td->td_lwp, &wf, &fd2);
303 fsetfd(fdp, NULL, fd1);
305 /* pipeA has been closed by fdrop() */
306 /* close pipeB here */
307 pipeclose(pipe, &pipe->bufferB, &pipe->bufferA);
310 wf->f_type = DTYPE_PIPE;
311 wf->f_flag = FREAD | FWRITE;
312 wf->f_ops = &pipeops;
313 wf->f_data = (void *)((intptr_t)pipe | 1);
314 if (flags & O_NONBLOCK)
315 wf->f_flag |= O_NONBLOCK;
316 if (flags & O_CLOEXEC)
317 fdp->fd_files[fd2].fileflags |= UF_EXCLOSE;
322 * Once activated the peer relationship remains valid until
323 * both sides are closed.
325 fsetfd(fdp, rf, fd1);
326 fsetfd(fdp, wf, fd2);
334 * [re]allocates KVA for the pipe's circular buffer. The space is
335 * pageable. Called twice to setup full-duplex communications.
337 * NOTE: Independent vm_object's are used to improve performance.
339 * Returns 0 on success, ENOMEM on failure.
342 pipespace(struct pipe *pipe, struct pipebuf *pb, size_t size)
344 struct vm_object *object;
349 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
352 if (size > 1024*1024)
355 npages = round_page(size) / PAGE_SIZE;
359 * [re]create the object if necessary and reserve space for it
360 * in the kernel_map. The object and memory are pageable. On
361 * success, free the old resources before assigning the new
364 if (object == NULL || object->size != npages) {
365 object = vm_object_allocate(OBJT_DEFAULT, npages);
366 buffer = (caddr_t)vm_map_min(&kernel_map);
368 error = vm_map_find(&kernel_map, object, NULL,
369 0, (vm_offset_t *)&buffer, size,
371 VM_MAPTYPE_NORMAL, VM_SUBSYS_PIPE,
372 VM_PROT_ALL, VM_PROT_ALL, 0);
374 if (error != KERN_SUCCESS) {
375 vm_object_deallocate(object);
390 * Initialize and allocate VM and memory for pipe, pulling the pipe from
391 * our per-cpu cache if possible.
393 * Returns 0 on success, else an error code (typically ENOMEM). Caller
394 * must still deallocate the pipe on failure.
397 pipe_create(struct pipe **pipep)
399 globaldata_t gd = mycpu;
403 if ((pipe = gd->gd_pipeq) != NULL) {
404 gd->gd_pipeq = pipe->next;
408 pipe = kmalloc(sizeof(*pipe), M_PIPE, M_WAITOK | M_ZERO);
409 pipe->inum = gd->gd_anoninum++ * ncpus + gd->gd_cpuid + 2;
410 lwkt_token_init(&pipe->bufferA.rlock, "piper");
411 lwkt_token_init(&pipe->bufferA.wlock, "pipew");
412 lwkt_token_init(&pipe->bufferB.rlock, "piper");
413 lwkt_token_init(&pipe->bufferB.wlock, "pipew");
416 if ((error = pipespace(pipe, &pipe->bufferA, pipe_size)) != 0) {
419 if ((error = pipespace(pipe, &pipe->bufferB, pipe_size)) != 0) {
422 vfs_timestamp(&pipe->ctime);
423 pipe->bufferA.atime = pipe->ctime;
424 pipe->bufferA.mtime = pipe->ctime;
425 pipe->bufferB.atime = pipe->ctime;
426 pipe->bufferB.mtime = pipe->ctime;
427 pipe->open_count = 2;
433 * Read data from a pipe
436 pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, int fflags)
442 size_t size; /* total bytes available */
443 size_t nsize; /* total bytes to read */
444 size_t rindex; /* contiguous bytes available */
451 pipe = (struct pipe *)((intptr_t)fp->f_data & ~(intptr_t)1);
452 if ((intptr_t)fp->f_data & 1) {
453 rpb = &pipe->bufferB;
454 wpb = &pipe->bufferA;
456 rpb = &pipe->bufferA;
457 wpb = &pipe->bufferB;
459 atomic_set_int(&curthread->td_mpflags, TDF_MP_BATCH_DEMARC);
461 if (uio->uio_resid == 0)
467 if (fflags & O_FBLOCKING)
469 else if (fflags & O_FNONBLOCKING)
471 else if (fp->f_flag & O_NONBLOCK)
477 * 'quick' NBIO test before things get expensive.
479 if (nbio && rpb->rindex == rpb->windex &&
480 (rpb->state & PIPE_REOF) == 0) {
485 * Reads are serialized. Note however that buffer.buffer and
486 * buffer.size can change out from under us when the number
487 * of bytes in the buffer are zero due to the write-side doing a
490 lwkt_gettoken(&rpb->rlock);
491 error = pipe_start_uio(&rpb->rip);
493 lwkt_reltoken(&rpb->rlock);
498 bigread = (uio->uio_resid > 10 * 1024 * 1024);
501 while (uio->uio_resid) {
505 if (bigread && --bigcount == 0) {
508 if (CURSIG(curthread->td_lwp)) {
515 * lfence required to avoid read-reordering of buffer
516 * contents prior to validation of size.
518 size = rpb->windex - rpb->rindex;
521 rindex = rpb->rindex & (rpb->size - 1);
523 if (nsize > rpb->size - rindex)
524 nsize = rpb->size - rindex;
525 nsize = szmin(nsize, uio->uio_resid);
528 * Limit how much we move in one go so we have a
529 * chance to kick the writer while data is still
530 * available in the pipe. This avoids getting into
531 * a ping-pong with the writer.
533 if (nsize > (rpb->size >> 1))
534 nsize = rpb->size >> 1;
536 error = uiomove(&rpb->buffer[rindex], nsize, uio);
539 rpb->rindex += nsize;
543 * If the FIFO is still over half full just continue
544 * and do not try to notify the writer yet. If
545 * less than half full notify any waiting writer.
547 if (size - nsize > (rpb->size >> 1)) {
551 pipesignal(rpb, PIPE_WANTW);
557 * If the "write-side" was blocked we wake it up. This code
558 * is reached when the buffer is completely emptied.
560 pipesignal(rpb, PIPE_WANTW);
563 * Pick up our copy loop again if the writer sent data to
564 * us while we were messing around.
566 * On a SMP box poll up to pipe_delay nanoseconds for new
567 * data. Typically a value of 2000 to 4000 is sufficient
568 * to eradicate most IPIs/tsleeps/wakeups when a pipe
569 * is used for synchronous communications with small packets,
570 * and 8000 or so (8uS) will pipeline large buffer xfers
571 * between cpus over a pipe.
573 * For synchronous communications a hit means doing a
574 * full Awrite-Bread-Bwrite-Aread cycle in less then 2uS,
575 * where as miss requiring a tsleep/wakeup sequence
576 * will take 7uS or more.
578 if (rpb->windex != rpb->rindex)
581 #ifdef _RDTSC_SUPPORTED_
586 tsc_target = tsc_get_target(pipe_delay);
587 while (tsc_test_target(tsc_target) == 0) {
589 if (rpb->windex != rpb->rindex) {
601 * Detect EOF condition, do not set error.
603 if (rpb->state & PIPE_REOF)
607 * Break if some data was read, or if this was a non-blocking
619 * Last chance, interlock with WANTR
621 tsleep_interlock(rpb, PCATCH);
622 atomic_set_int(&rpb->state, PIPE_WANTR);
625 * Retest bytes available after memory barrier above.
627 size = rpb->windex - rpb->rindex;
632 * Retest EOF after memory barrier above.
634 if (rpb->state & PIPE_REOF)
638 * Wait for more data or state change
640 error = tsleep(rpb, PCATCH | PINTERLOCKED, "piperd", 0);
644 pipe_end_uio(&rpb->rip);
647 * Uptime last access time
649 if (error == 0 && nread)
650 vfs_timestamp(&rpb->atime);
653 * If we drained the FIFO more then half way then handle
654 * write blocking hysteresis.
656 * Note that PIPE_WANTW cannot be set by the writer without
657 * it holding both rlock and wlock, so we can test it
658 * while holding just rlock.
662 * Synchronous blocking is done on the pipe involved
664 pipesignal(rpb, PIPE_WANTW);
667 * But we may also have to deal with a kqueue which is
668 * stored on the same pipe as its descriptor, so a
669 * EVFILT_WRITE event waiting for our side to drain will
670 * be on the other side.
674 /*size = rpb->windex - rpb->rindex;*/
675 lwkt_reltoken(&rpb->rlock);
681 pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags)
695 pipe = (struct pipe *)((intptr_t)fp->f_data & ~(intptr_t)1);
696 if ((intptr_t)fp->f_data & 1) {
697 rpb = &pipe->bufferB;
698 wpb = &pipe->bufferA;
700 rpb = &pipe->bufferA;
701 wpb = &pipe->bufferB;
707 if (fflags & O_FBLOCKING)
709 else if (fflags & O_FNONBLOCKING)
711 else if (fp->f_flag & O_NONBLOCK)
717 * 'quick' NBIO test before things get expensive.
719 if (nbio && wpb->size == (wpb->windex - wpb->rindex) &&
720 uio->uio_resid && (wpb->state & PIPE_WEOF) == 0) {
725 * Writes go to the peer. The peer will always exist.
727 lwkt_gettoken(&wpb->wlock);
728 if (wpb->state & PIPE_WEOF) {
729 lwkt_reltoken(&wpb->wlock);
734 * Degenerate case (EPIPE takes prec)
736 if (uio->uio_resid == 0) {
737 lwkt_reltoken(&wpb->wlock);
742 * Writes are serialized (start_uio must be called with wlock)
744 error = pipe_start_uio(&wpb->wip);
746 lwkt_reltoken(&wpb->wlock);
750 orig_resid = uio->uio_resid;
753 bigwrite = (uio->uio_resid > 10 * 1024 * 1024);
756 while (uio->uio_resid) {
757 if (wpb->state & PIPE_WEOF) {
765 if (bigwrite && --bigcount == 0) {
768 if (CURSIG(curthread->td_lwp)) {
774 windex = wpb->windex & (wpb->size - 1);
775 space = wpb->size - (wpb->windex - wpb->rindex);
778 * Writes of size <= PIPE_BUF must be atomic.
780 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
784 * Write to fill, read size handles write hysteresis. Also
785 * additional restrictions can cause select-based non-blocking
792 * We want to notify a potentially waiting reader
793 * before we exhaust the write buffer for SMP
794 * pipelining. Otherwise the write/read will begin
797 space = szmin(space, uio->uio_resid);
798 if (space > (wpb->size >> 1))
799 space = (wpb->size >> 1);
802 * First segment to transfer is minimum of
803 * transfer size and contiguous space in
804 * pipe buffer. If first segment to transfer
805 * is less than the transfer size, we've got
806 * a wraparound in the buffer.
808 segsize = wpb->size - windex;
813 * If this is the first loop and the reader is
814 * blocked, do a preemptive wakeup of the reader.
816 * On SMP the IPI latency plus the wlock interlock
817 * on the reader side is the fastest way to get the
818 * reader going. (The scheduler will hard loop on
822 pipesignal(wpb, PIPE_WANTR);
825 * Transfer segment, which may include a wrap-around.
826 * Update windex to account for both all in one go
827 * so the reader can read() the data atomically.
829 error = uiomove(&wpb->buffer[windex], segsize, uio);
830 if (error == 0 && segsize < space) {
831 segsize = space - segsize;
832 error = uiomove(&wpb->buffer[0], segsize, uio);
838 * Memory fence prior to windex updating (note: not
839 * needed so this is a NOP on Intel).
842 wpb->windex += space;
848 pipesignal(wpb, PIPE_WANTR);
854 * Wakeup any pending reader
856 pipesignal(wpb, PIPE_WANTR);
859 * don't block on non-blocking I/O
866 #ifdef _RDTSC_SUPPORTED_
871 tsc_target = tsc_get_target(pipe_delay);
872 while (tsc_test_target(tsc_target) == 0) {
874 space = wpb->size - (wpb->windex - wpb->rindex);
875 if ((space < uio->uio_resid) &&
876 (orig_resid <= PIPE_BUF)) {
891 * Interlocked test. Atomic op enforces the memory barrier.
893 tsleep_interlock(wpb, PCATCH);
894 atomic_set_int(&wpb->state, PIPE_WANTW);
897 * Retest space available after memory barrier above.
898 * Writes of size <= PIPE_BUF must be atomic.
900 space = wpb->size - (wpb->windex - wpb->rindex);
901 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
905 * Retest EOF after memory barrier above.
907 if (wpb->state & PIPE_WEOF) {
913 * We have no more space and have something to offer,
914 * wake up select/poll/kq.
918 error = tsleep(wpb, PCATCH | PINTERLOCKED, "pipewr", 0);
922 * Break out if we errored or the read side wants us to go
927 if (wpb->state & PIPE_WEOF) {
932 pipe_end_uio(&wpb->wip);
935 * If we have put any characters in the buffer, we wake up
938 * Both rlock and wlock are required to be able to modify pipe_state.
940 if (wpb->windex != wpb->rindex) {
941 pipesignal(wpb, PIPE_WANTR);
946 * Don't return EPIPE if I/O was successful
948 if ((wpb->rindex == wpb->windex) &&
949 (uio->uio_resid == 0) &&
955 vfs_timestamp(&wpb->mtime);
958 * We have something to offer,
959 * wake up select/poll/kq.
961 /*space = wpb->windex - wpb->rindex;*/
962 lwkt_reltoken(&wpb->wlock);
968 * we implement a very minimal set of ioctls for compatibility with sockets.
971 pipe_ioctl(struct file *fp, u_long cmd, caddr_t data,
972 struct ucred *cred, struct sysmsg *msg)
978 pipe = (struct pipe *)((intptr_t)fp->f_data & ~(intptr_t)1);
979 if ((intptr_t)fp->f_data & 1) {
980 rpb = &pipe->bufferB;
982 rpb = &pipe->bufferA;
985 lwkt_gettoken(&rpb->rlock);
986 lwkt_gettoken(&rpb->wlock);
991 atomic_set_int(&rpb->state, PIPE_ASYNC);
993 atomic_clear_int(&rpb->state, PIPE_ASYNC);
998 *(int *)data = (int)(rpb->windex - rpb->rindex);
1002 error = fsetown(*(int *)data, &rpb->sigio);
1005 *(int *)data = fgetown(&rpb->sigio);
1009 /* This is deprecated, FIOSETOWN should be used instead. */
1010 error = fsetown(-(*(int *)data), &rpb->sigio);
1014 /* This is deprecated, FIOGETOWN should be used instead. */
1015 *(int *)data = -fgetown(&rpb->sigio);
1022 lwkt_reltoken(&rpb->wlock);
1023 lwkt_reltoken(&rpb->rlock);
1032 pipe_stat(struct file *fp, struct stat *ub, struct ucred *cred)
1034 struct pipebuf *rpb;
1037 pipe = (struct pipe *)((intptr_t)fp->f_data & ~(intptr_t)1);
1038 if ((intptr_t)fp->f_data & 1) {
1039 rpb = &pipe->bufferB;
1041 rpb = &pipe->bufferA;
1044 bzero((caddr_t)ub, sizeof(*ub));
1045 ub->st_mode = S_IFIFO;
1046 ub->st_blksize = rpb->size;
1047 ub->st_size = rpb->windex - rpb->rindex;
1048 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize;
1049 ub->st_atimespec = rpb->atime;
1050 ub->st_mtimespec = rpb->mtime;
1051 ub->st_ctimespec = pipe->ctime;
1052 ub->st_uid = fp->f_cred->cr_uid;
1053 ub->st_gid = fp->f_cred->cr_gid;
1054 ub->st_ino = pipe->inum;
1056 * Left as 0: st_dev, st_nlink, st_rdev,
1058 * XXX (st_dev, st_ino) should be unique.
1065 pipe_close(struct file *fp)
1067 struct pipebuf *rpb;
1068 struct pipebuf *wpb;
1071 pipe = (struct pipe *)((intptr_t)fp->f_data & ~(intptr_t)1);
1072 if ((intptr_t)fp->f_data & 1) {
1073 rpb = &pipe->bufferB;
1074 wpb = &pipe->bufferA;
1076 rpb = &pipe->bufferA;
1077 wpb = &pipe->bufferB;
1080 fp->f_ops = &badfileops;
1082 funsetown(&rpb->sigio);
1083 pipeclose(pipe, rpb, wpb);
1089 * Shutdown one or both directions of a full-duplex pipe.
1092 pipe_shutdown(struct file *fp, int how)
1094 struct pipebuf *rpb;
1095 struct pipebuf *wpb;
1099 pipe = (struct pipe *)((intptr_t)fp->f_data & ~(intptr_t)1);
1100 if ((intptr_t)fp->f_data & 1) {
1101 rpb = &pipe->bufferB;
1102 wpb = &pipe->bufferA;
1104 rpb = &pipe->bufferA;
1105 wpb = &pipe->bufferB;
1109 * We modify pipe_state on both pipes, which means we need
1112 lwkt_gettoken(&rpb->rlock);
1113 lwkt_gettoken(&rpb->wlock);
1114 lwkt_gettoken(&wpb->rlock);
1115 lwkt_gettoken(&wpb->wlock);
1121 * EOF on my reads and peer writes
1123 atomic_set_int(&rpb->state, PIPE_REOF | PIPE_WEOF);
1124 if (rpb->state & PIPE_WANTR) {
1125 rpb->state &= ~PIPE_WANTR;
1128 if (rpb->state & PIPE_WANTW) {
1129 rpb->state &= ~PIPE_WANTW;
1138 * EOF on peer reads and my writes
1140 atomic_set_int(&wpb->state, PIPE_REOF | PIPE_WEOF);
1141 if (wpb->state & PIPE_WANTR) {
1142 wpb->state &= ~PIPE_WANTR;
1145 if (wpb->state & PIPE_WANTW) {
1146 wpb->state &= ~PIPE_WANTW;
1155 lwkt_reltoken(&wpb->wlock);
1156 lwkt_reltoken(&wpb->rlock);
1157 lwkt_reltoken(&rpb->wlock);
1158 lwkt_reltoken(&rpb->rlock);
1164 * Destroy the pipe buffer.
1167 pipe_free_kmem(struct pipebuf *pb)
1169 if (pb->buffer != NULL) {
1170 kmem_free(&kernel_map, (vm_offset_t)pb->buffer, pb->size);
1177 * Close one half of the pipe. We are closing the pipe for reading on rpb
1178 * and writing on wpb. This routine must be called twice with the pipebufs
1179 * reversed to close both directions.
1182 pipeclose(struct pipe *pipe, struct pipebuf *rpb, struct pipebuf *wpb)
1190 * We need both the read and write tokens to modify pipe_state.
1192 lwkt_gettoken(&rpb->rlock);
1193 lwkt_gettoken(&rpb->wlock);
1196 * Set our state, wakeup anyone waiting in select/poll/kq, and
1197 * wakeup anyone blocked on our pipe. No action if our side
1198 * is already closed.
1200 if (rpb->state & PIPE_CLOSED) {
1201 lwkt_reltoken(&rpb->wlock);
1202 lwkt_reltoken(&rpb->rlock);
1206 atomic_set_int(&rpb->state, PIPE_CLOSED | PIPE_REOF | PIPE_WEOF);
1208 if (rpb->state & (PIPE_WANTR | PIPE_WANTW)) {
1209 rpb->state &= ~(PIPE_WANTR | PIPE_WANTW);
1212 lwkt_reltoken(&rpb->wlock);
1213 lwkt_reltoken(&rpb->rlock);
1216 * Disconnect from peer.
1218 lwkt_gettoken(&wpb->rlock);
1219 lwkt_gettoken(&wpb->wlock);
1221 atomic_set_int(&wpb->state, PIPE_REOF | PIPE_WEOF);
1223 if (wpb->state & (PIPE_WANTR | PIPE_WANTW)) {
1224 wpb->state &= ~(PIPE_WANTR | PIPE_WANTW);
1227 if (SLIST_FIRST(&wpb->kq.ki_note))
1228 KNOTE(&wpb->kq.ki_note, 0);
1229 lwkt_reltoken(&wpb->wlock);
1230 lwkt_reltoken(&wpb->rlock);
1233 * Free resources once both sides are closed. We maintain a pcpu
1234 * cache to improve performance, so the actual tear-down case is
1235 * limited to bulk situations.
1237 * However, the bulk tear-down case can cause intense contention
1238 * on the kernel_map when, e.g. hundreds to hundreds of thousands
1239 * of processes are killed at the same time. To deal with this we
1240 * use a pcpu mutex to maintain concurrency but also limit the
1241 * number of threads banging on the map and pmap.
1243 * We use the mtx mechanism instead of the lockmgr mechanism because
1244 * the mtx mechanism utilizes a queued design which will not break
1245 * down in the face of thousands to hundreds of thousands of
1246 * processes trying to free pipes simultaneously. The lockmgr
1247 * mechanism will wind up waking them all up each time a lock
1250 if (atomic_fetchadd_int(&pipe->open_count, -1) == 1) {
1252 if (gd->gd_pipeqcount >= pipe_maxcache) {
1253 mtx_lock(&pipe_gdlocks[gd->gd_cpuid].mtx);
1254 pipe_free_kmem(rpb);
1255 pipe_free_kmem(wpb);
1256 mtx_unlock(&pipe_gdlocks[gd->gd_cpuid].mtx);
1257 kfree(pipe, M_PIPE);
1261 pipe->next = gd->gd_pipeq;
1262 gd->gd_pipeq = pipe;
1263 ++gd->gd_pipeqcount;
1269 pipe_kqfilter(struct file *fp, struct knote *kn)
1271 struct pipebuf *rpb;
1272 struct pipebuf *wpb;
1275 pipe = (struct pipe *)((intptr_t)fp->f_data & ~(intptr_t)1);
1276 if ((intptr_t)fp->f_data & 1) {
1277 rpb = &pipe->bufferB;
1278 wpb = &pipe->bufferA;
1280 rpb = &pipe->bufferA;
1281 wpb = &pipe->bufferB;
1284 switch (kn->kn_filter) {
1286 kn->kn_fop = &pipe_rfiltops;
1289 kn->kn_fop = &pipe_wfiltops;
1290 if (wpb->state & PIPE_CLOSED) {
1291 /* other end of pipe has been closed */
1296 return (EOPNOTSUPP);
1299 if (rpb == &pipe->bufferA)
1300 kn->kn_hook = (caddr_t)(void *)((intptr_t)pipe | 0);
1302 kn->kn_hook = (caddr_t)(void *)((intptr_t)pipe | 1);
1304 knote_insert(&rpb->kq.ki_note, kn);
1310 filt_pipedetach(struct knote *kn)
1312 struct pipebuf *rpb;
1313 struct pipebuf *wpb;
1316 pipe = (struct pipe *)((intptr_t)kn->kn_hook & ~(intptr_t)1);
1317 if ((intptr_t)kn->kn_hook & 1) {
1318 rpb = &pipe->bufferB;
1319 wpb = &pipe->bufferA;
1321 rpb = &pipe->bufferA;
1322 wpb = &pipe->bufferB;
1324 knote_remove(&rpb->kq.ki_note, kn);
1329 filt_piperead(struct knote *kn, long hint)
1331 struct pipebuf *rpb;
1332 struct pipebuf *wpb;
1336 pipe = (struct pipe *)((intptr_t)kn->kn_fp->f_data & ~(intptr_t)1);
1337 if ((intptr_t)kn->kn_fp->f_data & 1) {
1338 rpb = &pipe->bufferB;
1339 wpb = &pipe->bufferA;
1341 rpb = &pipe->bufferA;
1342 wpb = &pipe->bufferB;
1346 * We shouldn't need the pipe locks because the knote itself is
1347 * locked via KN_PROCESSING. If we lose a race against the writer,
1348 * the writer will just issue a KNOTE() after us.
1351 lwkt_gettoken(&rpb->rlock);
1352 lwkt_gettoken(&rpb->wlock);
1355 kn->kn_data = rpb->windex - rpb->rindex;
1356 if (kn->kn_data < 0)
1359 if (rpb->state & PIPE_REOF) {
1361 * Only set NODATA if all data has been exhausted
1363 if (kn->kn_data == 0)
1364 kn->kn_flags |= EV_NODATA;
1365 kn->kn_flags |= EV_EOF;
1370 lwkt_reltoken(&rpb->wlock);
1371 lwkt_reltoken(&rpb->rlock);
1375 ready = kn->kn_data > 0;
1382 filt_pipewrite(struct knote *kn, long hint)
1384 struct pipebuf *rpb;
1385 struct pipebuf *wpb;
1389 pipe = (struct pipe *)((intptr_t)kn->kn_fp->f_data & ~(intptr_t)1);
1390 if ((intptr_t)kn->kn_fp->f_data & 1) {
1391 rpb = &pipe->bufferB;
1392 wpb = &pipe->bufferA;
1394 rpb = &pipe->bufferA;
1395 wpb = &pipe->bufferB;
1399 if (wpb->state & PIPE_CLOSED) {
1400 kn->kn_flags |= (EV_EOF | EV_NODATA);
1405 * We shouldn't need the pipe locks because the knote itself is
1406 * locked via KN_PROCESSING. If we lose a race against the reader,
1407 * the writer will just issue a KNOTE() after us.
1410 lwkt_gettoken(&wpb->rlock);
1411 lwkt_gettoken(&wpb->wlock);
1414 if (wpb->state & PIPE_WEOF) {
1415 kn->kn_flags |= (EV_EOF | EV_NODATA);
1420 kn->kn_data = wpb->size - (wpb->windex - wpb->rindex);
1421 if (kn->kn_data < 0)
1426 lwkt_reltoken(&wpb->wlock);
1427 lwkt_reltoken(&wpb->rlock);
1431 ready = kn->kn_data >= PIPE_BUF;