pipe - fix UP build error.
[dragonfly.git] / sys / kern / sys_pipe.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1996 John S. Dyson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Absolutely no warranty of function or purpose is made by the author
15 * John S. Dyson.
16 * 4. Modifications may be freely made to this file if the above conditions
17 * are met.
18 *
19 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.60.2.13 2002/08/05 15:05:15 des Exp $
c730be20 20 * $DragonFly: src/sys/kern/sys_pipe.c,v 1.50 2008/09/09 04:06:13 dillon Exp $
984263bc
MD
21 */
22
23/*
24 * This file contains a high-performance replacement for the socket-based
25 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
26 * all features of sockets, but does do everything that pipes normally
27 * do.
28 */
984263bc
MD
29#include <sys/param.h>
30#include <sys/systm.h>
fc7d5181 31#include <sys/kernel.h>
984263bc
MD
32#include <sys/proc.h>
33#include <sys/fcntl.h>
34#include <sys/file.h>
35#include <sys/filedesc.h>
36#include <sys/filio.h>
37#include <sys/ttycom.h>
38#include <sys/stat.h>
39#include <sys/poll.h>
40#include <sys/select.h>
41#include <sys/signalvar.h>
42#include <sys/sysproto.h>
43#include <sys/pipe.h>
44#include <sys/vnode.h>
45#include <sys/uio.h>
46#include <sys/event.h>
fc7d5181
MD
47#include <sys/globaldata.h>
48#include <sys/module.h>
49#include <sys/malloc.h>
50#include <sys/sysctl.h>
004d2de5 51#include <sys/socket.h>
984263bc
MD
52
53#include <vm/vm.h>
54#include <vm/vm_param.h>
55#include <sys/lock.h>
56#include <vm/vm_object.h>
57#include <vm/vm_kern.h>
58#include <vm/vm_extern.h>
59#include <vm/pmap.h>
60#include <vm/vm_map.h>
61#include <vm/vm_page.h>
62#include <vm/vm_zone.h>
63
dadab5e9
MD
64#include <sys/file2.h>
65
8100156a 66#include <machine/cpufunc.h>
984263bc
MD
67
68/*
69 * interfaces to the outside world
70 */
402ed7e1 71static int pipe_read (struct file *fp, struct uio *uio,
87de5057 72 struct ucred *cred, int flags);
402ed7e1 73static int pipe_write (struct file *fp, struct uio *uio,
87de5057
MD
74 struct ucred *cred, int flags);
75static int pipe_close (struct file *fp);
76static int pipe_shutdown (struct file *fp, int how);
77static int pipe_poll (struct file *fp, int events, struct ucred *cred);
402ed7e1 78static int pipe_kqfilter (struct file *fp, struct knote *kn);
87de5057
MD
79static int pipe_stat (struct file *fp, struct stat *sb, struct ucred *cred);
80static int pipe_ioctl (struct file *fp, u_long cmd, caddr_t data, struct ucred *cred);
984263bc
MD
81
82static struct fileops pipeops = {
b2d248cb
MD
83 .fo_read = pipe_read,
84 .fo_write = pipe_write,
85 .fo_ioctl = pipe_ioctl,
86 .fo_poll = pipe_poll,
87 .fo_kqfilter = pipe_kqfilter,
88 .fo_stat = pipe_stat,
89 .fo_close = pipe_close,
90 .fo_shutdown = pipe_shutdown
984263bc
MD
91};
92
93static void filt_pipedetach(struct knote *kn);
94static int filt_piperead(struct knote *kn, long hint);
95static int filt_pipewrite(struct knote *kn, long hint);
96
97static struct filterops pipe_rfiltops =
98 { 1, NULL, filt_pipedetach, filt_piperead };
99static struct filterops pipe_wfiltops =
100 { 1, NULL, filt_pipedetach, filt_pipewrite };
101
fc7d5181 102MALLOC_DEFINE(M_PIPE, "pipe", "pipe structures");
984263bc
MD
103
104/*
105 * Default pipe buffer size(s), this can be kind-of large now because pipe
106 * space is pageable. The pipe code will try to maintain locality of
107 * reference for performance reasons, so small amounts of outstanding I/O
108 * will not wipe the cache.
109 */
110#define MINPIPESIZE (PIPE_SIZE/3)
111#define MAXPIPESIZE (2*PIPE_SIZE/3)
112
113/*
984263bc
MD
114 * Limit the number of "big" pipes
115 */
08593aa1 116#define LIMITBIGPIPES 64
fc7d5181
MD
117#define PIPEQ_MAX_CACHE 16 /* per-cpu pipe structure cache */
118
119static int pipe_maxbig = LIMITBIGPIPES;
120static int pipe_maxcache = PIPEQ_MAX_CACHE;
39b0a1af 121static int pipe_bigcount;
fc7d5181 122static int pipe_nbig;
fc7d5181
MD
123static int pipe_bcache_alloc;
124static int pipe_bkmem_alloc;
880ffa3a
MD
125static int pipe_rblocked_count;
126static int pipe_wblocked_count;
fc7d5181
MD
127
128SYSCTL_NODE(_kern, OID_AUTO, pipe, CTLFLAG_RW, 0, "Pipe operation");
129SYSCTL_INT(_kern_pipe, OID_AUTO, nbig,
130 CTLFLAG_RD, &pipe_nbig, 0, "numer of big pipes allocated");
39b0a1af
MD
131SYSCTL_INT(_kern_pipe, OID_AUTO, bigcount,
132 CTLFLAG_RW, &pipe_bigcount, 0, "number of times pipe expanded");
880ffa3a
MD
133SYSCTL_INT(_kern_pipe, OID_AUTO, rblocked,
134 CTLFLAG_RW, &pipe_rblocked_count, 0, "number of times pipe expanded");
135SYSCTL_INT(_kern_pipe, OID_AUTO, wblocked,
136 CTLFLAG_RW, &pipe_wblocked_count, 0, "number of times pipe expanded");
fc7d5181
MD
137SYSCTL_INT(_kern_pipe, OID_AUTO, maxcache,
138 CTLFLAG_RW, &pipe_maxcache, 0, "max pipes cached per-cpu");
139SYSCTL_INT(_kern_pipe, OID_AUTO, maxbig,
140 CTLFLAG_RW, &pipe_maxbig, 0, "max number of big pipes");
1ae37239 141#ifdef SMP
880ffa3a
MD
142static int pipe_delay = 5000; /* 5uS default */
143SYSCTL_INT(_kern_pipe, OID_AUTO, delay,
144 CTLFLAG_RW, &pipe_delay, 0, "SMP delay optimization in ns");
1ae37239
MD
145static int pipe_mpsafe = 0;
146SYSCTL_INT(_kern_pipe, OID_AUTO, mpsafe,
147 CTLFLAG_RW, &pipe_mpsafe, 0, "");
148#endif
fc7d5181
MD
149#if !defined(NO_PIPE_SYSCTL_STATS)
150SYSCTL_INT(_kern_pipe, OID_AUTO, bcache_alloc,
151 CTLFLAG_RW, &pipe_bcache_alloc, 0, "pipe buffer from pcpu cache");
fc7d5181
MD
152SYSCTL_INT(_kern_pipe, OID_AUTO, bkmem_alloc,
153 CTLFLAG_RW, &pipe_bkmem_alloc, 0, "pipe buffer from kmem");
fc7d5181 154#endif
984263bc 155
402ed7e1
RG
156static void pipeclose (struct pipe *cpipe);
157static void pipe_free_kmem (struct pipe *cpipe);
158static int pipe_create (struct pipe **cpipep);
402ed7e1 159static __inline void pipeselwakeup (struct pipe *cpipe);
402ed7e1 160static int pipespace (struct pipe *cpipe, int size);
984263bc 161
1ae37239
MD
162static __inline void
163pipeselwakeup(struct pipe *cpipe)
164{
165 if (cpipe->pipe_state & PIPE_SEL) {
166 get_mplock();
167 cpipe->pipe_state &= ~PIPE_SEL;
168 selwakeup(&cpipe->pipe_sel);
169 rel_mplock();
170 }
171 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) {
172 get_mplock();
173 pgsigio(cpipe->pipe_sigio, SIGIO, 0);
174 rel_mplock();
175 }
176 if (SLIST_FIRST(&cpipe->pipe_sel.si_note)) {
177 get_mplock();
178 KNOTE(&cpipe->pipe_sel.si_note, 0);
179 rel_mplock();
180 }
181}
182
183/*
184 * These routines are called before and after a UIO. The UIO
185 * may block, causing our held tokens to be lost temporarily.
186 *
187 * We use these routines to serialize reads against other reads
188 * and writes against other writes.
189 *
190 * The read token is held on entry so *ipp does not race.
191 */
192static __inline int
193pipe_start_uio(struct pipe *cpipe, u_int *ipp)
194{
195 int error;
196
197 while (*ipp) {
198 *ipp = -1;
199 error = tsleep(ipp, PCATCH, "pipexx", 0);
200 if (error)
201 return (error);
202 }
203 *ipp = 1;
204 return (0);
205}
206
207static __inline void
208pipe_end_uio(struct pipe *cpipe, u_int *ipp)
209{
210 if (*ipp < 0) {
211 *ipp = 0;
212 wakeup(ipp);
213 } else {
214 *ipp = 0;
215 }
216}
217
218static __inline void
219pipe_get_mplock(int *save)
220{
221#ifdef SMP
222 if (pipe_mpsafe == 0) {
223 get_mplock();
224 *save = 1;
225 } else
226#endif
227 {
228 *save = 0;
229 }
230}
231
232static __inline void
233pipe_rel_mplock(int *save)
234{
235#ifdef SMP
236 if (*save)
237 rel_mplock();
238#endif
239}
240
241
984263bc
MD
242/*
243 * The pipe system call for the DTYPE_PIPE type of pipes
41c20dac
MD
244 *
245 * pipe_ARgs(int dummy)
984263bc
MD
246 */
247
248/* ARGSUSED */
249int
753fd850 250sys_pipe(struct pipe_args *uap)
984263bc 251{
dadab5e9
MD
252 struct thread *td = curthread;
253 struct proc *p = td->td_proc;
984263bc
MD
254 struct file *rf, *wf;
255 struct pipe *rpipe, *wpipe;
90b9818c 256 int fd1, fd2, error;
984263bc 257
dadab5e9 258 KKASSERT(p);
dadab5e9 259
984263bc
MD
260 rpipe = wpipe = NULL;
261 if (pipe_create(&rpipe) || pipe_create(&wpipe)) {
262 pipeclose(rpipe);
263 pipeclose(wpipe);
264 return (ENFILE);
265 }
266
90b9818c 267 error = falloc(p, &rf, &fd1);
984263bc
MD
268 if (error) {
269 pipeclose(rpipe);
270 pipeclose(wpipe);
271 return (error);
272 }
c7114eea 273 uap->sysmsg_fds[0] = fd1;
984263bc
MD
274
275 /*
276 * Warning: once we've gotten past allocation of the fd for the
277 * read-side, we can only drop the read side via fdrop() in order
278 * to avoid races against processes which manage to dup() the read
279 * side while we are blocked trying to allocate the write side.
280 */
984263bc 281 rf->f_type = DTYPE_PIPE;
fbb4eeab 282 rf->f_flag = FREAD | FWRITE;
984263bc 283 rf->f_ops = &pipeops;
fbb4eeab 284 rf->f_data = rpipe;
90b9818c 285 error = falloc(p, &wf, &fd2);
984263bc 286 if (error) {
259b8ea0 287 fsetfd(p, NULL, fd1);
9f87144f 288 fdrop(rf);
984263bc
MD
289 /* rpipe has been closed by fdrop(). */
290 pipeclose(wpipe);
291 return (error);
292 }
984263bc 293 wf->f_type = DTYPE_PIPE;
fbb4eeab 294 wf->f_flag = FREAD | FWRITE;
984263bc 295 wf->f_ops = &pipeops;
fbb4eeab 296 wf->f_data = wpipe;
c7114eea 297 uap->sysmsg_fds[1] = fd2;
984263bc 298
1ae37239
MD
299 rpipe->pipe_slock = kmalloc(sizeof(struct lock),
300 M_PIPE, M_WAITOK|M_ZERO);
301 wpipe->pipe_slock = rpipe->pipe_slock;
984263bc
MD
302 rpipe->pipe_peer = wpipe;
303 wpipe->pipe_peer = rpipe;
1ae37239 304 lockinit(rpipe->pipe_slock, "pipecl", 0, 0);
259b8ea0 305
1ae37239
MD
306 /*
307 * Once activated the peer relationship remains valid until
308 * both sides are closed.
309 */
259b8ea0
MD
310 fsetfd(p, rf, fd1);
311 fsetfd(p, wf, fd2);
9f87144f
MD
312 fdrop(rf);
313 fdrop(wf);
984263bc
MD
314
315 return (0);
316}
317
318/*
319 * Allocate kva for pipe circular buffer, the space is pageable
320 * This routine will 'realloc' the size of a pipe safely, if it fails
321 * it will retain the old buffer.
322 * If it fails it will return ENOMEM.
323 */
324static int
fc7d5181 325pipespace(struct pipe *cpipe, int size)
984263bc
MD
326{
327 struct vm_object *object;
328 caddr_t buffer;
329 int npages, error;
330
fc7d5181
MD
331 npages = round_page(size) / PAGE_SIZE;
332 object = cpipe->pipe_buffer.object;
984263bc
MD
333
334 /*
fc7d5181
MD
335 * [re]create the object if necessary and reserve space for it
336 * in the kernel_map. The object and memory are pageable. On
337 * success, free the old resources before assigning the new
338 * ones.
984263bc 339 */
fc7d5181 340 if (object == NULL || object->size != npages) {
1ae37239 341 get_mplock();
fc7d5181 342 object = vm_object_allocate(OBJT_DEFAULT, npages);
e4846942 343 buffer = (caddr_t)vm_map_min(&kernel_map);
984263bc 344
e4846942 345 error = vm_map_find(&kernel_map, object, 0,
1b874851
MD
346 (vm_offset_t *)&buffer, size,
347 1,
348 VM_MAPTYPE_NORMAL,
349 VM_PROT_ALL, VM_PROT_ALL,
350 0);
984263bc 351
fc7d5181
MD
352 if (error != KERN_SUCCESS) {
353 vm_object_deallocate(object);
1ae37239 354 rel_mplock();
fc7d5181
MD
355 return (ENOMEM);
356 }
fc7d5181 357 pipe_free_kmem(cpipe);
1ae37239 358 rel_mplock();
fc7d5181
MD
359 cpipe->pipe_buffer.object = object;
360 cpipe->pipe_buffer.buffer = buffer;
361 cpipe->pipe_buffer.size = size;
362 ++pipe_bkmem_alloc;
363 } else {
364 ++pipe_bcache_alloc;
fc7d5181 365 }
c600838f
MD
366 cpipe->pipe_buffer.rindex = 0;
367 cpipe->pipe_buffer.windex = 0;
984263bc
MD
368 return (0);
369}
370
371/*
fc7d5181
MD
372 * Initialize and allocate VM and memory for pipe, pulling the pipe from
373 * our per-cpu cache if possible. For now make sure it is sized for the
374 * smaller PIPE_SIZE default.
984263bc
MD
375 */
376static int
c972a82f 377pipe_create(struct pipe **cpipep)
984263bc 378{
fc7d5181 379 globaldata_t gd = mycpu;
984263bc
MD
380 struct pipe *cpipe;
381 int error;
382
fc7d5181
MD
383 if ((cpipe = gd->gd_pipeq) != NULL) {
384 gd->gd_pipeq = cpipe->pipe_peer;
385 --gd->gd_pipeqcount;
386 cpipe->pipe_peer = NULL;
1ae37239 387 cpipe->pipe_wantwcnt = 0;
fc7d5181 388 } else {
efda3bd0 389 cpipe = kmalloc(sizeof(struct pipe), M_PIPE, M_WAITOK|M_ZERO);
fc7d5181
MD
390 }
391 *cpipep = cpipe;
392 if ((error = pipespace(cpipe, PIPE_SIZE)) != 0)
984263bc 393 return (error);
984263bc
MD
394 vfs_timestamp(&cpipe->pipe_ctime);
395 cpipe->pipe_atime = cpipe->pipe_ctime;
396 cpipe->pipe_mtime = cpipe->pipe_ctime;
1ae37239
MD
397 lwkt_token_init(&cpipe->pipe_rlock);
398 lwkt_token_init(&cpipe->pipe_wlock);
984263bc
MD
399 return (0);
400}
401
d9b2033e
MD
402/*
403 * MPALMOSTSAFE (acquires mplock)
404 */
984263bc 405static int
9ba76b73 406pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, int fflags)
984263bc 407{
d9b2033e 408 struct pipe *rpipe;
984263bc 409 int error;
880ffa3a 410 int orig_resid;
984263bc 411 int nread = 0;
9ba76b73 412 int nbio;
c600838f 413 u_int size; /* total bytes available */
1ae37239 414 u_int nsize; /* total bytes to read */
c600838f 415 u_int rindex; /* contiguous bytes available */
b20720b5 416 int notify_writer;
1ae37239
MD
417 lwkt_tokref rlock;
418 lwkt_tokref wlock;
419 int mpsave;
984263bc 420
1ae37239
MD
421 /*
422 * Degenerate case
423 */
880ffa3a
MD
424 orig_resid = uio->uio_resid;
425 if (orig_resid == 0)
1ae37239
MD
426 return(0);
427
428 /*
429 * Setup locks, calculate nbio
430 */
431 pipe_get_mplock(&mpsave);
432 rpipe = (struct pipe *)fp->f_data;
433 lwkt_gettoken(&rlock, &rpipe->pipe_rlock);
984263bc 434
9ba76b73
MD
435 if (fflags & O_FBLOCKING)
436 nbio = 0;
437 else if (fflags & O_FNONBLOCKING)
438 nbio = 1;
439 else if (fp->f_flag & O_NONBLOCK)
440 nbio = 1;
441 else
442 nbio = 0;
443
1ae37239
MD
444 /*
445 * Reads are serialized. Note howeverthat pipe_buffer.buffer and
446 * pipe_buffer.size can change out from under us when the number
447 * of bytes in the buffer are zero due to the write-side doing a
448 * pipespace().
449 */
450 error = pipe_start_uio(rpipe, &rpipe->pipe_rip);
451 if (error) {
452 pipe_rel_mplock(&mpsave);
453 lwkt_reltoken(&rlock);
454 return (error);
455 }
b20720b5 456 notify_writer = 0;
984263bc 457 while (uio->uio_resid) {
c600838f 458 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
1ae37239 459 cpu_lfence();
c600838f
MD
460 if (size) {
461 rindex = rpipe->pipe_buffer.rindex &
462 (rpipe->pipe_buffer.size - 1);
1ae37239
MD
463 nsize = size;
464 if (nsize > rpipe->pipe_buffer.size - rindex)
465 nsize = rpipe->pipe_buffer.size - rindex;
466 if (nsize > (u_int)uio->uio_resid)
467 nsize = (u_int)uio->uio_resid;
c600838f
MD
468
469 error = uiomove(&rpipe->pipe_buffer.buffer[rindex],
1ae37239 470 nsize, uio);
984263bc
MD
471 if (error)
472 break;
1ae37239
MD
473 cpu_mfence();
474 rpipe->pipe_buffer.rindex += nsize;
475 nread += nsize;
984263bc
MD
476
477 /*
880ffa3a
MD
478 * If the FIFO is still over half full just continue
479 * and do not try to notify the writer yet.
984263bc 480 */
b20720b5
MD
481 if (size - nsize >= (rpipe->pipe_buffer.size >> 1)) {
482 notify_writer = 0;
1ae37239 483 continue;
984263bc 484 }
b20720b5
MD
485
486 /*
880ffa3a
MD
487 * When the FIFO is less then half full notify any
488 * waiting writer. WANTW can be checked while
489 * holding just the rlock.
b20720b5
MD
490 */
491 notify_writer = 1;
492 if ((rpipe->pipe_state & PIPE_WANTW) == 0)
493 continue;
1ae37239 494 }
984263bc 495
1ae37239
MD
496 /*
497 * If the "write-side" was blocked we wake it up. This code
498 * is reached either when the buffer is completely emptied
499 * or if it becomes more then half-empty.
500 *
501 * Pipe_state can only be modified if both the rlock and
502 * wlock are held.
503 */
504 if (rpipe->pipe_state & PIPE_WANTW) {
505 lwkt_gettoken(&wlock, &rpipe->pipe_wlock);
984263bc 506 if (rpipe->pipe_state & PIPE_WANTW) {
b20720b5 507 notify_writer = 0;
984263bc 508 rpipe->pipe_state &= ~PIPE_WANTW;
1ae37239 509 lwkt_reltoken(&wlock);
984263bc 510 wakeup(rpipe);
1ae37239
MD
511 } else {
512 lwkt_reltoken(&wlock);
984263bc 513 }
1ae37239 514 }
984263bc 515
1ae37239
MD
516 /*
517 * Pick up our copy loop again if the writer sent data to
880ffa3a
MD
518 * us while we were messing around.
519 *
520 * On a SMP box poll up to pipe_delay nanoseconds for new
521 * data. Typically a value of 2000 to 4000 is sufficient
522 * to eradicate most IPIs/tsleeps/wakeups when a pipe
523 * is used for synchronous communications with small packets,
524 * and 8000 or so (8uS) will pipeline large buffer xfers
525 * between cpus over a pipe.
526 *
527 * For synchronous communications a hit means doing a
528 * full Awrite-Bread-Bwrite-Aread cycle in less then 2uS,
529 * where as miss requiring a tsleep/wakeup sequence
530 * will take 7uS or more.
1ae37239 531 */
880ffa3a 532 if (rpipe->pipe_buffer.windex != rpipe->pipe_buffer.rindex)
1ae37239 533 continue;
984263bc 534
880ffa3a
MD
535#if defined(SMP) && defined(_RDTSC_SUPPORTED_)
536 if (pipe_delay) {
537 int64_t tsc_target;
538 int good = 0;
539
540 tsc_target = tsc_get_target(pipe_delay);
541 while (tsc_test_target(tsc_target) == 0) {
542 if (rpipe->pipe_buffer.windex !=
543 rpipe->pipe_buffer.rindex) {
544 good = 1;
545 break;
546 }
547 }
548 if (good)
549 continue;
550 }
551#endif
552
1ae37239
MD
553 /*
554 * Detect EOF condition, do not set error.
555 */
556 if (rpipe->pipe_state & PIPE_REOF)
557 break;
984263bc 558
1ae37239
MD
559 /*
560 * Break if some data was read, or if this was a non-blocking
561 * read.
562 */
563 if (nread > 0)
564 break;
565
566 if (nbio) {
567 error = EAGAIN;
568 break;
569 }
570
571 /*
572 * Last chance, interlock with WANTR.
573 */
574 lwkt_gettoken(&wlock, &rpipe->pipe_wlock);
575 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
576 if (size) {
577 lwkt_reltoken(&wlock);
578 continue;
984263bc 579 }
1ae37239
MD
580
581 /*
582 * If there is no more to read in the pipe, reset its
583 * pointers to the beginning. This improves cache hit
584 * stats.
585 *
586 * We need both locks to modify both pointers, and there
587 * must also not be a write in progress or the uiomove()
588 * in the write might block and temporarily release
589 * its wlock, then reacquire and update windex. We are
590 * only serialized against reads, not writes.
591 *
592 * XXX should we even bother resetting the indices? It
593 * might actually be more cache efficient not to.
594 */
595 if (rpipe->pipe_buffer.rindex == rpipe->pipe_buffer.windex &&
596 rpipe->pipe_wip == 0) {
597 rpipe->pipe_buffer.rindex = 0;
598 rpipe->pipe_buffer.windex = 0;
599 }
600
601 /*
602 * Wait for more data.
603 *
604 * Pipe_state can only be set if both the rlock and wlock
605 * are held.
606 */
607 rpipe->pipe_state |= PIPE_WANTR;
608 tsleep_interlock(rpipe);
609 lwkt_reltoken(&wlock);
610 error = tsleep(rpipe, PCATCH, "piperd", 0);
880ffa3a 611 ++pipe_rblocked_count;
1ae37239
MD
612 if (error)
613 break;
984263bc 614 }
1ae37239 615 pipe_end_uio(rpipe, &rpipe->pipe_rip);
984263bc 616
1ae37239
MD
617 /*
618 * Uptime last access time
619 */
620 if (error == 0 && nread)
984263bc 621 vfs_timestamp(&rpipe->pipe_atime);
984263bc
MD
622
623 /*
b20720b5
MD
624 * If we drained the FIFO more then half way then handle
625 * write blocking hysteresis.
1ae37239 626 *
b20720b5
MD
627 * Note that PIPE_WANTW cannot be set by the writer without
628 * it holding both rlock and wlock, so we can test it
629 * while holding just rlock.
984263bc 630 */
b20720b5
MD
631 if (notify_writer) {
632 if (rpipe->pipe_state & PIPE_WANTW) {
1ae37239
MD
633 lwkt_gettoken(&wlock, &rpipe->pipe_wlock);
634 if (rpipe->pipe_state & PIPE_WANTW) {
635 rpipe->pipe_state &= ~PIPE_WANTW;
636 lwkt_reltoken(&wlock);
637 wakeup(rpipe);
638 } else {
639 lwkt_reltoken(&wlock);
640 }
984263bc
MD
641 }
642 }
1ae37239
MD
643 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
644 lwkt_reltoken(&rlock);
984263bc 645
1ae37239
MD
646 /*
647 * If enough space is available in buffer then wakeup sel writers?
648 */
c600838f 649 if ((rpipe->pipe_buffer.size - size) >= PIPE_BUF)
984263bc 650 pipeselwakeup(rpipe);
1ae37239 651 pipe_rel_mplock(&mpsave);
984263bc
MD
652 return (error);
653}
654
d9b2033e
MD
655/*
656 * MPALMOSTSAFE - acquires mplock
657 */
984263bc 658static int
9ba76b73 659pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags)
984263bc 660{
1ae37239 661 int error;
984263bc 662 int orig_resid;
9ba76b73 663 int nbio;
984263bc 664 struct pipe *wpipe, *rpipe;
1ae37239
MD
665 lwkt_tokref rlock;
666 lwkt_tokref wlock;
c600838f
MD
667 u_int windex;
668 u_int space;
1ae37239
MD
669 u_int wcount;
670 int mpsave;
984263bc 671
1ae37239
MD
672 pipe_get_mplock(&mpsave);
673
674 /*
675 * Writes go to the peer. The peer will always exist.
676 */
984263bc
MD
677 rpipe = (struct pipe *) fp->f_data;
678 wpipe = rpipe->pipe_peer;
1ae37239
MD
679 lwkt_gettoken(&wlock, &wpipe->pipe_wlock);
680 if (wpipe->pipe_state & PIPE_WEOF) {
681 pipe_rel_mplock(&mpsave);
682 lwkt_reltoken(&wlock);
683 return (EPIPE);
684 }
984263bc
MD
685
686 /*
1ae37239 687 * Degenerate case (EPIPE takes prec)
984263bc 688 */
1ae37239
MD
689 if (uio->uio_resid == 0) {
690 pipe_rel_mplock(&mpsave);
691 lwkt_reltoken(&wlock);
692 return(0);
693 }
694
695 /*
696 * Writes are serialized (start_uio must be called with wlock)
697 */
698 error = pipe_start_uio(wpipe, &wpipe->pipe_wip);
699 if (error) {
700 pipe_rel_mplock(&mpsave);
701 lwkt_reltoken(&wlock);
702 return (error);
984263bc 703 }
984263bc 704
9ba76b73
MD
705 if (fflags & O_FBLOCKING)
706 nbio = 0;
707 else if (fflags & O_FNONBLOCKING)
708 nbio = 1;
709 else if (fp->f_flag & O_NONBLOCK)
710 nbio = 1;
711 else
712 nbio = 0;
713
984263bc
MD
714 /*
715 * If it is advantageous to resize the pipe buffer, do
1ae37239 716 * so. We are write-serialized so we can block safely.
984263bc 717 */
1ae37239 718 if ((wpipe->pipe_buffer.size <= PIPE_SIZE) &&
39b0a1af 719 (pipe_nbig < pipe_maxbig) &&
1ae37239
MD
720 wpipe->pipe_wantwcnt > 4 &&
721 (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) {
39b0a1af
MD
722 /*
723 * Recheck after lock.
724 */
1ae37239
MD
725 lwkt_gettoken(&rlock, &wpipe->pipe_rlock);
726 if ((wpipe->pipe_buffer.size <= PIPE_SIZE) &&
727 (pipe_nbig < pipe_maxbig) &&
c600838f 728 (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) {
880ffa3a
MD
729 atomic_add_int(&pipe_nbig, 1);
730 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
39b0a1af 731 ++pipe_bigcount;
880ffa3a
MD
732 else
733 atomic_subtract_int(&pipe_nbig, 1);
984263bc 734 }
1ae37239 735 lwkt_reltoken(&rlock);
984263bc 736 }
984263bc
MD
737
738 orig_resid = uio->uio_resid;
1ae37239 739 wcount = 0;
984263bc
MD
740
741 while (uio->uio_resid) {
1ae37239 742 if (wpipe->pipe_state & PIPE_WEOF) {
984263bc
MD
743 error = EPIPE;
744 break;
745 }
746
c600838f
MD
747 windex = wpipe->pipe_buffer.windex &
748 (wpipe->pipe_buffer.size - 1);
749 space = wpipe->pipe_buffer.size -
750 (wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex);
1ae37239 751 cpu_lfence();
984263bc
MD
752
753 /* Writes of size <= PIPE_BUF must be atomic. */
754 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
755 space = 0;
756
c617bada
MD
757 /*
758 * Write to fill, read size handles write hysteresis. Also
759 * additional restrictions can cause select-based non-blocking
760 * writes to spin.
761 */
762 if (space > 0) {
1ae37239 763 u_int segsize;
984263bc 764
984263bc 765 /*
1ae37239
MD
766 * Transfer size is minimum of uio transfer
767 * and free space in pipe buffer.
768 *
769 * Limit each uiocopy to no more then PIPE_SIZE
770 * so we can keep the gravy train going on a
771 * SMP box. This doubles the performance for
772 * write sizes > 16K. Otherwise large writes
773 * wind up doing an inefficient synchronous
774 * ping-pong.
984263bc 775 */
1ae37239
MD
776 if (space > (u_int)uio->uio_resid)
777 space = (u_int)uio->uio_resid;
778 if (space > PIPE_SIZE)
779 space = PIPE_SIZE;
984263bc
MD
780
781 /*
1ae37239
MD
782 * First segment to transfer is minimum of
783 * transfer size and contiguous space in
784 * pipe buffer. If first segment to transfer
785 * is less than the transfer size, we've got
786 * a wraparound in the buffer.
984263bc 787 */
1ae37239
MD
788 segsize = wpipe->pipe_buffer.size - windex;
789 if (segsize > space)
790 segsize = space;
984263bc 791
39055880 792#ifdef SMP
984263bc 793 /*
1ae37239
MD
794 * If this is the first loop and the reader is
795 * blocked, do a preemptive wakeup of the reader.
796 *
39055880
MD
797 * On SMP the IPI latency plus the wlock interlock
798 * on the reader side is the fastest way to get the
799 * reader going. (The scheduler will hard loop on
800 * lock tokens).
1ae37239
MD
801 *
802 * NOTE: We can't clear WANTR here without acquiring
803 * the rlock, which we don't want to do here!
984263bc 804 */
880ffa3a 805 if ((wpipe->pipe_state & PIPE_WANTR) && pipe_mpsafe > 1)
1ae37239 806 wakeup(wpipe);
39055880 807#endif
984263bc 808
984263bc 809 /*
880ffa3a
MD
810 * Transfer segment, which may include a wrap-around.
811 * Update windex to account for both all in one go
812 * so the reader can read() the data atomically.
984263bc 813 */
1ae37239
MD
814 error = uiomove(&wpipe->pipe_buffer.buffer[windex],
815 segsize, uio);
1ae37239 816 if (error == 0 && segsize < space) {
1ae37239
MD
817 segsize = space - segsize;
818 error = uiomove(&wpipe->pipe_buffer.buffer[0],
819 segsize, uio);
1ae37239
MD
820 }
821 if (error)
984263bc 822 break;
880ffa3a
MD
823 cpu_mfence();
824 wpipe->pipe_buffer.windex += space;
1ae37239
MD
825 wcount += space;
826 continue;
984263bc 827 }
984263bc 828
1ae37239
MD
829 /*
830 * We need both the rlock and the wlock to interlock against
831 * the EOF, WANTW, and size checks, and to modify pipe_state.
832 *
833 * These are token locks so we do not have to worry about
834 * deadlocks.
835 */
836 lwkt_gettoken(&rlock, &wpipe->pipe_rlock);
984263bc 837
984263bc 838 /*
1ae37239
MD
839 * If the "read-side" has been blocked, wake it up now
840 * and yield to let it drain synchronously rather
841 * then block.
984263bc
MD
842 */
843 if (wpipe->pipe_state & PIPE_WANTR) {
844 wpipe->pipe_state &= ~PIPE_WANTR;
845 wakeup(wpipe);
846 }
1ae37239
MD
847
848 /*
849 * don't block on non-blocking I/O
850 */
851 if (nbio) {
852 lwkt_reltoken(&rlock);
853 error = EAGAIN;
854 break;
855 }
856
857 /*
b20720b5
MD
858 * re-test whether we have to block in the writer after
859 * acquiring both locks, in case the reader opened up
860 * some space.
861 */
862 space = wpipe->pipe_buffer.size -
863 (wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex);
864 cpu_lfence();
865 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
866 space = 0;
867
868 /*
1ae37239
MD
869 * We have no more space and have something to offer,
870 * wake up select/poll.
871 */
b20720b5
MD
872 if (space == 0) {
873 pipeselwakeup(wpipe);
874 ++wpipe->pipe_wantwcnt;
875 wpipe->pipe_state |= PIPE_WANTW;
876 error = tsleep(wpipe, PCATCH, "pipewr", 0);
880ffa3a 877 ++pipe_wblocked_count;
b20720b5 878 }
1ae37239
MD
879 lwkt_reltoken(&rlock);
880
881 /*
882 * Break out if we errored or the read side wants us to go
883 * away.
884 */
885 if (error)
886 break;
887 if (wpipe->pipe_state & PIPE_WEOF) {
888 error = EPIPE;
889 break;
890 }
891 }
892 pipe_end_uio(wpipe, &wpipe->pipe_wip);
893
894 /*
895 * If we have put any characters in the buffer, we wake up
896 * the reader.
897 *
898 * Both rlock and wlock are required to be able to modify pipe_state.
899 */
900 if (wpipe->pipe_buffer.windex != wpipe->pipe_buffer.rindex) {
901 if (wpipe->pipe_state & PIPE_WANTR) {
902 lwkt_gettoken(&rlock, &wpipe->pipe_rlock);
903 if (wpipe->pipe_state & PIPE_WANTR) {
904 wpipe->pipe_state &= ~PIPE_WANTR;
905 lwkt_reltoken(&rlock);
906 wakeup(wpipe);
907 } else {
908 lwkt_reltoken(&rlock);
909 }
910 }
984263bc
MD
911 }
912
913 /*
914 * Don't return EPIPE if I/O was successful
915 */
c600838f 916 if ((wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex) &&
984263bc
MD
917 (uio->uio_resid == 0) &&
918 (error == EPIPE)) {
919 error = 0;
920 }
921
922 if (error == 0)
923 vfs_timestamp(&wpipe->pipe_mtime);
924
925 /*
926 * We have something to offer,
927 * wake up select/poll.
928 */
1ae37239
MD
929 space = wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex;
930 lwkt_reltoken(&wlock);
931 if (space)
984263bc 932 pipeselwakeup(wpipe);
1ae37239 933 pipe_rel_mplock(&mpsave);
984263bc
MD
934 return (error);
935}
936
937/*
d9b2033e
MD
938 * MPALMOSTSAFE - acquires mplock
939 *
984263bc
MD
940 * we implement a very minimal set of ioctls for compatibility with sockets.
941 */
942int
87de5057 943pipe_ioctl(struct file *fp, u_long cmd, caddr_t data, struct ucred *cred)
984263bc 944{
d9b2033e 945 struct pipe *mpipe;
880ffa3a
MD
946 lwkt_tokref rlock;
947 lwkt_tokref wlock;
d9b2033e 948 int error;
880ffa3a 949 int mpsave;
984263bc 950
880ffa3a 951 pipe_get_mplock(&mpsave);
d9b2033e 952 mpipe = (struct pipe *)fp->f_data;
984263bc 953
880ffa3a
MD
954 lwkt_gettoken(&rlock, &mpipe->pipe_rlock);
955 lwkt_gettoken(&wlock, &mpipe->pipe_wlock);
956
d9b2033e 957 switch (cmd) {
984263bc
MD
958 case FIOASYNC:
959 if (*(int *)data) {
960 mpipe->pipe_state |= PIPE_ASYNC;
961 } else {
962 mpipe->pipe_state &= ~PIPE_ASYNC;
963 }
d9b2033e
MD
964 error = 0;
965 break;
984263bc 966 case FIONREAD:
c600838f
MD
967 *(int *)data = mpipe->pipe_buffer.windex -
968 mpipe->pipe_buffer.rindex;
d9b2033e
MD
969 error = 0;
970 break;
984263bc 971 case FIOSETOWN:
880ffa3a 972 get_mplock();
d9b2033e 973 error = fsetown(*(int *)data, &mpipe->pipe_sigio);
880ffa3a 974 rel_mplock();
d9b2033e 975 break;
984263bc
MD
976 case FIOGETOWN:
977 *(int *)data = fgetown(mpipe->pipe_sigio);
d9b2033e
MD
978 error = 0;
979 break;
984263bc 980 case TIOCSPGRP:
d9b2033e 981 /* This is deprecated, FIOSETOWN should be used instead. */
880ffa3a 982 get_mplock();
d9b2033e 983 error = fsetown(-(*(int *)data), &mpipe->pipe_sigio);
880ffa3a 984 rel_mplock();
d9b2033e 985 break;
984263bc 986
984263bc 987 case TIOCGPGRP:
d9b2033e 988 /* This is deprecated, FIOGETOWN should be used instead. */
984263bc 989 *(int *)data = -fgetown(mpipe->pipe_sigio);
d9b2033e
MD
990 error = 0;
991 break;
992 default:
993 error = ENOTTY;
994 break;
984263bc 995 }
880ffa3a
MD
996 lwkt_reltoken(&rlock);
997 lwkt_reltoken(&wlock);
998 pipe_rel_mplock(&mpsave);
999
d9b2033e 1000 return (error);
984263bc
MD
1001}
1002
d9b2033e
MD
1003/*
1004 * MPALMOSTSAFE - acquires mplock
1005 */
984263bc 1006int
87de5057 1007pipe_poll(struct file *fp, int events, struct ucred *cred)
984263bc 1008{
d9b2033e 1009 struct pipe *rpipe;
984263bc
MD
1010 struct pipe *wpipe;
1011 int revents = 0;
c600838f 1012 u_int space;
1ae37239 1013 int mpsave;
984263bc 1014
1ae37239 1015 pipe_get_mplock(&mpsave);
d9b2033e 1016 rpipe = (struct pipe *)fp->f_data;
984263bc 1017 wpipe = rpipe->pipe_peer;
08593aa1 1018 if (events & (POLLIN | POLLRDNORM)) {
c600838f 1019 if ((rpipe->pipe_buffer.windex != rpipe->pipe_buffer.rindex) ||
1ae37239 1020 (rpipe->pipe_state & PIPE_REOF)) {
984263bc 1021 revents |= events & (POLLIN | POLLRDNORM);
08593aa1
MD
1022 }
1023 }
984263bc 1024
08593aa1 1025 if (events & (POLLOUT | POLLWRNORM)) {
1ae37239 1026 if (wpipe == NULL || (wpipe->pipe_state & PIPE_WEOF)) {
984263bc 1027 revents |= events & (POLLOUT | POLLWRNORM);
c600838f
MD
1028 } else {
1029 space = wpipe->pipe_buffer.windex -
1030 wpipe->pipe_buffer.rindex;
1031 space = wpipe->pipe_buffer.size - space;
1032 if (space >= PIPE_BUF)
1033 revents |= events & (POLLOUT | POLLWRNORM);
08593aa1
MD
1034 }
1035 }
984263bc 1036
1ae37239 1037 if ((rpipe->pipe_state & PIPE_REOF) ||
984263bc 1038 (wpipe == NULL) ||
1ae37239 1039 (wpipe->pipe_state & PIPE_WEOF))
984263bc
MD
1040 revents |= POLLHUP;
1041
1042 if (revents == 0) {
1043 if (events & (POLLIN | POLLRDNORM)) {
87de5057 1044 selrecord(curthread, &rpipe->pipe_sel);
984263bc
MD
1045 rpipe->pipe_state |= PIPE_SEL;
1046 }
1047
1048 if (events & (POLLOUT | POLLWRNORM)) {
87de5057 1049 selrecord(curthread, &wpipe->pipe_sel);
984263bc
MD
1050 wpipe->pipe_state |= PIPE_SEL;
1051 }
1052 }
1ae37239 1053 pipe_rel_mplock(&mpsave);
984263bc
MD
1054 return (revents);
1055}
1056
d9b2033e
MD
1057/*
1058 * MPALMOSTSAFE - acquires mplock
1059 */
984263bc 1060static int
87de5057 1061pipe_stat(struct file *fp, struct stat *ub, struct ucred *cred)
984263bc 1062{
d9b2033e 1063 struct pipe *pipe;
1ae37239 1064 int mpsave;
d9b2033e 1065
1ae37239 1066 pipe_get_mplock(&mpsave);
d9b2033e 1067 pipe = (struct pipe *)fp->f_data;
984263bc
MD
1068
1069 bzero((caddr_t)ub, sizeof(*ub));
1070 ub->st_mode = S_IFIFO;
1071 ub->st_blksize = pipe->pipe_buffer.size;
c600838f 1072 ub->st_size = pipe->pipe_buffer.windex - pipe->pipe_buffer.rindex;
984263bc
MD
1073 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize;
1074 ub->st_atimespec = pipe->pipe_atime;
1075 ub->st_mtimespec = pipe->pipe_mtime;
1076 ub->st_ctimespec = pipe->pipe_ctime;
1077 /*
1078 * Left as 0: st_dev, st_ino, st_nlink, st_uid, st_gid, st_rdev,
1079 * st_flags, st_gen.
1080 * XXX (st_dev, st_ino) should be unique.
1081 */
1ae37239 1082 pipe_rel_mplock(&mpsave);
984263bc
MD
1083 return (0);
1084}
1085
d9b2033e
MD
1086/*
1087 * MPALMOSTSAFE - acquires mplock
1088 */
984263bc 1089static int
87de5057 1090pipe_close(struct file *fp)
984263bc 1091{
39b0a1af 1092 struct pipe *cpipe;
984263bc 1093
d9b2033e 1094 get_mplock();
39b0a1af 1095 cpipe = (struct pipe *)fp->f_data;
984263bc
MD
1096 fp->f_ops = &badfileops;
1097 fp->f_data = NULL;
1098 funsetown(cpipe->pipe_sigio);
1099 pipeclose(cpipe);
d9b2033e 1100 rel_mplock();
984263bc
MD
1101 return (0);
1102}
1103
004d2de5
MD
1104/*
1105 * Shutdown one or both directions of a full-duplex pipe.
d9b2033e
MD
1106 *
1107 * MPALMOSTSAFE - acquires mplock
004d2de5 1108 */
004d2de5 1109static int
87de5057 1110pipe_shutdown(struct file *fp, int how)
004d2de5 1111{
d9b2033e 1112 struct pipe *rpipe;
004d2de5
MD
1113 struct pipe *wpipe;
1114 int error = EPIPE;
1ae37239
MD
1115 lwkt_tokref rpipe_rlock;
1116 lwkt_tokref rpipe_wlock;
1117 lwkt_tokref wpipe_rlock;
1118 lwkt_tokref wpipe_wlock;
1119 int mpsave;
004d2de5 1120
1ae37239 1121 pipe_get_mplock(&mpsave);
d9b2033e 1122 rpipe = (struct pipe *)fp->f_data;
1ae37239
MD
1123 wpipe = rpipe->pipe_peer;
1124
1125 /*
1126 * We modify pipe_state on both pipes, which means we need
1127 * all four tokens!
1128 */
1129 lwkt_gettoken(&rpipe_rlock, &rpipe->pipe_rlock);
1130 lwkt_gettoken(&rpipe_wlock, &rpipe->pipe_wlock);
1131 lwkt_gettoken(&wpipe_rlock, &wpipe->pipe_rlock);
1132 lwkt_gettoken(&wpipe_wlock, &wpipe->pipe_wlock);
d9b2033e 1133
004d2de5
MD
1134 switch(how) {
1135 case SHUT_RDWR:
1136 case SHUT_RD:
1ae37239
MD
1137 rpipe->pipe_state |= PIPE_REOF;
1138 wpipe->pipe_state |= PIPE_WEOF;
1139 if (rpipe->pipe_state & PIPE_WANTR) {
1140 rpipe->pipe_state &= ~PIPE_WANTR;
1141 wakeup(rpipe);
004d2de5 1142 }
1ae37239
MD
1143 if (wpipe->pipe_state & PIPE_WANTW) {
1144 wpipe->pipe_state &= ~PIPE_WANTW;
1145 wakeup(wpipe);
1146 }
1147 pipeselwakeup(rpipe);
1148 error = 0;
004d2de5
MD
1149 if (how == SHUT_RD)
1150 break;
1151 /* fall through */
1152 case SHUT_WR:
1ae37239
MD
1153 wpipe->pipe_state |= PIPE_WEOF;
1154 rpipe->pipe_state |= PIPE_REOF;
1155 if (wpipe->pipe_state & PIPE_WANTW) {
1156 wpipe->pipe_state &= ~PIPE_WANTW;
1157 wakeup(wpipe);
1158 }
1159 if (rpipe->pipe_state & PIPE_WANTR) {
1160 rpipe->pipe_state &= ~PIPE_WANTR;
1161 wakeup(rpipe);
004d2de5 1162 }
1ae37239
MD
1163 pipeselwakeup(wpipe);
1164 error = 0;
1165 break;
004d2de5 1166 }
1ae37239
MD
1167
1168 lwkt_reltoken(&rpipe_rlock);
1169 lwkt_reltoken(&rpipe_wlock);
1170 lwkt_reltoken(&wpipe_rlock);
1171 lwkt_reltoken(&wpipe_wlock);
1172
1173 pipe_rel_mplock(&mpsave);
004d2de5
MD
1174 return (error);
1175}
1176
984263bc 1177static void
dadab5e9 1178pipe_free_kmem(struct pipe *cpipe)
984263bc 1179{
984263bc
MD
1180 if (cpipe->pipe_buffer.buffer != NULL) {
1181 if (cpipe->pipe_buffer.size > PIPE_SIZE)
880ffa3a 1182 atomic_subtract_int(&pipe_nbig, 1);
e4846942 1183 kmem_free(&kernel_map,
984263bc
MD
1184 (vm_offset_t)cpipe->pipe_buffer.buffer,
1185 cpipe->pipe_buffer.size);
1186 cpipe->pipe_buffer.buffer = NULL;
fc7d5181 1187 cpipe->pipe_buffer.object = NULL;
984263bc 1188 }
984263bc
MD
1189}
1190
1191/*
1ae37239
MD
1192 * Close the pipe. The slock must be held to interlock against simultanious
1193 * closes. The rlock and wlock must be held to adjust the pipe_state.
984263bc
MD
1194 */
1195static void
dadab5e9 1196pipeclose(struct pipe *cpipe)
984263bc 1197{
fc7d5181 1198 globaldata_t gd;
984263bc 1199 struct pipe *ppipe;
1ae37239
MD
1200 lwkt_tokref cpipe_rlock;
1201 lwkt_tokref cpipe_wlock;
1202 lwkt_tokref ppipe_rlock;
1203 lwkt_tokref ppipe_wlock;
984263bc 1204
fc7d5181
MD
1205 if (cpipe == NULL)
1206 return;
984263bc 1207
1ae37239
MD
1208 /*
1209 * The slock may not have been allocated yet (close during
1210 * initialization)
1211 *
1212 * We need both the read and write tokens to modify pipe_state.
1213 */
1214 if (cpipe->pipe_slock)
1215 lockmgr(cpipe->pipe_slock, LK_EXCLUSIVE);
1216 lwkt_gettoken(&cpipe_rlock, &cpipe->pipe_rlock);
1217 lwkt_gettoken(&cpipe_wlock, &cpipe->pipe_wlock);
984263bc 1218
fc7d5181 1219 /*
1ae37239
MD
1220 * Set our state, wakeup anyone waiting in select, and
1221 * wakeup anyone blocked on our pipe.
fc7d5181 1222 */
1ae37239
MD
1223 cpipe->pipe_state |= PIPE_CLOSED | PIPE_REOF | PIPE_WEOF;
1224 pipeselwakeup(cpipe);
1225 if (cpipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) {
1226 cpipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW);
fc7d5181 1227 wakeup(cpipe);
fc7d5181 1228 }
984263bc 1229
fc7d5181
MD
1230 /*
1231 * Disconnect from peer
1232 */
1233 if ((ppipe = cpipe->pipe_peer) != NULL) {
1ae37239
MD
1234 lwkt_gettoken(&ppipe_rlock, &ppipe->pipe_rlock);
1235 lwkt_gettoken(&ppipe_wlock, &ppipe->pipe_wlock);
1236 ppipe->pipe_state |= PIPE_REOF;
fc7d5181 1237 pipeselwakeup(ppipe);
1ae37239
MD
1238 if (ppipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) {
1239 ppipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW);
1240 wakeup(ppipe);
1241 }
1242 if (SLIST_FIRST(&ppipe->pipe_sel.si_note)) {
1243 get_mplock();
1244 KNOTE(&ppipe->pipe_sel.si_note, 0);
1245 rel_mplock();
1246 }
1247 lwkt_reltoken(&ppipe_rlock);
1248 lwkt_reltoken(&ppipe_wlock);
1249 }
fc7d5181 1250
1ae37239
MD
1251 /*
1252 * If the peer is also closed we can free resources for both
1253 * sides, otherwise we leave our side intact to deal with any
1254 * races (since we only have the slock).
1255 */
1256 if (ppipe && (ppipe->pipe_state & PIPE_CLOSED)) {
1257 cpipe->pipe_peer = NULL;
fc7d5181 1258 ppipe->pipe_peer = NULL;
1ae37239
MD
1259 ppipe->pipe_slock = NULL; /* we will free the slock */
1260 pipeclose(ppipe);
1261 ppipe = NULL;
fc7d5181
MD
1262 }
1263
1ae37239
MD
1264 lwkt_reltoken(&cpipe_rlock);
1265 lwkt_reltoken(&cpipe_wlock);
1266 if (cpipe->pipe_slock)
1267 lockmgr(cpipe->pipe_slock, LK_RELEASE);
8100156a 1268
fc7d5181 1269 /*
1ae37239 1270 * If we disassociated from our peer we can free resources
fc7d5181 1271 */
1ae37239
MD
1272 if (ppipe == NULL) {
1273 gd = mycpu;
1274 if (cpipe->pipe_slock) {
1275 kfree(cpipe->pipe_slock, M_PIPE);
1276 cpipe->pipe_slock = NULL;
1277 }
1278 if (gd->gd_pipeqcount >= pipe_maxcache ||
1279 cpipe->pipe_buffer.size != PIPE_SIZE
1280 ) {
1281 pipe_free_kmem(cpipe);
1282 kfree(cpipe, M_PIPE);
1283 } else {
1284 cpipe->pipe_state = 0;
1285 cpipe->pipe_peer = gd->gd_pipeq;
1286 gd->gd_pipeq = cpipe;
1287 ++gd->gd_pipeqcount;
1288 }
984263bc
MD
1289 }
1290}
1291
d9b2033e
MD
1292/*
1293 * MPALMOSTSAFE - acquires mplock
1294 */
984263bc
MD
1295static int
1296pipe_kqfilter(struct file *fp, struct knote *kn)
1297{
d9b2033e
MD
1298 struct pipe *cpipe;
1299
1300 get_mplock();
1301 cpipe = (struct pipe *)kn->kn_fp->f_data;
984263bc
MD
1302
1303 switch (kn->kn_filter) {
1304 case EVFILT_READ:
1305 kn->kn_fop = &pipe_rfiltops;
1306 break;
1307 case EVFILT_WRITE:
1308 kn->kn_fop = &pipe_wfiltops;
1309 cpipe = cpipe->pipe_peer;
d9b2033e 1310 if (cpipe == NULL) {
984263bc 1311 /* other end of pipe has been closed */
d9b2033e 1312 rel_mplock();
41f57d15 1313 return (EPIPE);
d9b2033e 1314 }
984263bc
MD
1315 break;
1316 default:
1317 return (1);
1318 }
1319 kn->kn_hook = (caddr_t)cpipe;
1320
1321 SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext);
d9b2033e 1322 rel_mplock();
984263bc
MD
1323 return (0);
1324}
1325
1326static void
1327filt_pipedetach(struct knote *kn)
1328{
1329 struct pipe *cpipe = (struct pipe *)kn->kn_hook;
1330
1331 SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext);
1332}
1333
1334/*ARGSUSED*/
1335static int
1336filt_piperead(struct knote *kn, long hint)
1337{
1338 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
984263bc 1339
c600838f 1340 kn->kn_data = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
984263bc 1341
1ae37239
MD
1342 /* XXX RACE */
1343 if (rpipe->pipe_state & PIPE_REOF) {
984263bc
MD
1344 kn->kn_flags |= EV_EOF;
1345 return (1);
1346 }
1347 return (kn->kn_data > 0);
1348}
1349
1350/*ARGSUSED*/
1351static int
1352filt_pipewrite(struct knote *kn, long hint)
1353{
1354 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1355 struct pipe *wpipe = rpipe->pipe_peer;
c600838f 1356 u_int32_t space;
984263bc 1357
1ae37239
MD
1358 /* XXX RACE */
1359 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_WEOF)) {
984263bc
MD
1360 kn->kn_data = 0;
1361 kn->kn_flags |= EV_EOF;
1362 return (1);
1363 }
c600838f
MD
1364 space = wpipe->pipe_buffer.windex -
1365 wpipe->pipe_buffer.rindex;
1366 space = wpipe->pipe_buffer.size - space;
1367 kn->kn_data = space;
984263bc
MD
1368 return (kn->kn_data >= PIPE_BUF);
1369}