Merge branch 'master' of ssh://crater.dragonflybsd.org/repository/git/dragonfly
[dragonfly.git] / sys / kern / sys_pipe.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 1996 John S. Dyson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Absolutely no warranty of function or purpose is made by the author
15 * John S. Dyson.
16 * 4. Modifications may be freely made to this file if the above conditions
17 * are met.
18 *
19 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.60.2.13 2002/08/05 15:05:15 des Exp $
20 * $DragonFly: src/sys/kern/sys_pipe.c,v 1.50 2008/09/09 04:06:13 dillon Exp $
21 */
22
23/*
24 * This file contains a high-performance replacement for the socket-based
25 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
26 * all features of sockets, but does do everything that pipes normally
27 * do.
28 */
29#include <sys/param.h>
30#include <sys/systm.h>
31#include <sys/kernel.h>
32#include <sys/proc.h>
33#include <sys/fcntl.h>
34#include <sys/file.h>
35#include <sys/filedesc.h>
36#include <sys/filio.h>
37#include <sys/ttycom.h>
38#include <sys/stat.h>
39#include <sys/poll.h>
40#include <sys/select.h>
41#include <sys/signalvar.h>
42#include <sys/sysproto.h>
43#include <sys/pipe.h>
44#include <sys/vnode.h>
45#include <sys/uio.h>
46#include <sys/event.h>
47#include <sys/globaldata.h>
48#include <sys/module.h>
49#include <sys/malloc.h>
50#include <sys/sysctl.h>
51#include <sys/socket.h>
52
53#include <vm/vm.h>
54#include <vm/vm_param.h>
55#include <sys/lock.h>
56#include <vm/vm_object.h>
57#include <vm/vm_kern.h>
58#include <vm/vm_extern.h>
59#include <vm/pmap.h>
60#include <vm/vm_map.h>
61#include <vm/vm_page.h>
62#include <vm/vm_zone.h>
63
64#include <sys/file2.h>
65#include <sys/signal2.h>
66
67#include <machine/cpufunc.h>
68
69/*
70 * interfaces to the outside world
71 */
72static int pipe_read (struct file *fp, struct uio *uio,
73 struct ucred *cred, int flags);
74static int pipe_write (struct file *fp, struct uio *uio,
75 struct ucred *cred, int flags);
76static int pipe_close (struct file *fp);
77static int pipe_shutdown (struct file *fp, int how);
78static int pipe_poll (struct file *fp, int events, struct ucred *cred);
79static int pipe_kqfilter (struct file *fp, struct knote *kn);
80static int pipe_stat (struct file *fp, struct stat *sb, struct ucred *cred);
81static int pipe_ioctl (struct file *fp, u_long cmd, caddr_t data,
82 struct ucred *cred, struct sysmsg *msg);
83
84static struct fileops pipeops = {
85 .fo_read = pipe_read,
86 .fo_write = pipe_write,
87 .fo_ioctl = pipe_ioctl,
88 .fo_poll = pipe_poll,
89 .fo_kqfilter = pipe_kqfilter,
90 .fo_stat = pipe_stat,
91 .fo_close = pipe_close,
92 .fo_shutdown = pipe_shutdown
93};
94
95static void filt_pipedetach(struct knote *kn);
96static int filt_piperead(struct knote *kn, long hint);
97static int filt_pipewrite(struct knote *kn, long hint);
98
99static struct filterops pipe_rfiltops =
100 { 1, NULL, filt_pipedetach, filt_piperead };
101static struct filterops pipe_wfiltops =
102 { 1, NULL, filt_pipedetach, filt_pipewrite };
103
104MALLOC_DEFINE(M_PIPE, "pipe", "pipe structures");
105
106/*
107 * Default pipe buffer size(s), this can be kind-of large now because pipe
108 * space is pageable. The pipe code will try to maintain locality of
109 * reference for performance reasons, so small amounts of outstanding I/O
110 * will not wipe the cache.
111 */
112#define MINPIPESIZE (PIPE_SIZE/3)
113#define MAXPIPESIZE (2*PIPE_SIZE/3)
114
115/*
116 * Limit the number of "big" pipes
117 */
118#define LIMITBIGPIPES 64
119#define PIPEQ_MAX_CACHE 16 /* per-cpu pipe structure cache */
120
121static int pipe_maxbig = LIMITBIGPIPES;
122static int pipe_maxcache = PIPEQ_MAX_CACHE;
123static int pipe_bigcount;
124static int pipe_nbig;
125static int pipe_bcache_alloc;
126static int pipe_bkmem_alloc;
127static int pipe_rblocked_count;
128static int pipe_wblocked_count;
129
130SYSCTL_NODE(_kern, OID_AUTO, pipe, CTLFLAG_RW, 0, "Pipe operation");
131SYSCTL_INT(_kern_pipe, OID_AUTO, nbig,
132 CTLFLAG_RD, &pipe_nbig, 0, "numer of big pipes allocated");
133SYSCTL_INT(_kern_pipe, OID_AUTO, bigcount,
134 CTLFLAG_RW, &pipe_bigcount, 0, "number of times pipe expanded");
135SYSCTL_INT(_kern_pipe, OID_AUTO, rblocked,
136 CTLFLAG_RW, &pipe_rblocked_count, 0, "number of times pipe expanded");
137SYSCTL_INT(_kern_pipe, OID_AUTO, wblocked,
138 CTLFLAG_RW, &pipe_wblocked_count, 0, "number of times pipe expanded");
139SYSCTL_INT(_kern_pipe, OID_AUTO, maxcache,
140 CTLFLAG_RW, &pipe_maxcache, 0, "max pipes cached per-cpu");
141SYSCTL_INT(_kern_pipe, OID_AUTO, maxbig,
142 CTLFLAG_RW, &pipe_maxbig, 0, "max number of big pipes");
143#ifdef SMP
144static int pipe_delay = 5000; /* 5uS default */
145SYSCTL_INT(_kern_pipe, OID_AUTO, delay,
146 CTLFLAG_RW, &pipe_delay, 0, "SMP delay optimization in ns");
147static int pipe_mpsafe = 1;
148SYSCTL_INT(_kern_pipe, OID_AUTO, mpsafe,
149 CTLFLAG_RW, &pipe_mpsafe, 0, "");
150#endif
151#if !defined(NO_PIPE_SYSCTL_STATS)
152SYSCTL_INT(_kern_pipe, OID_AUTO, bcache_alloc,
153 CTLFLAG_RW, &pipe_bcache_alloc, 0, "pipe buffer from pcpu cache");
154SYSCTL_INT(_kern_pipe, OID_AUTO, bkmem_alloc,
155 CTLFLAG_RW, &pipe_bkmem_alloc, 0, "pipe buffer from kmem");
156#endif
157
158static void pipeclose (struct pipe *cpipe);
159static void pipe_free_kmem (struct pipe *cpipe);
160static int pipe_create (struct pipe **cpipep);
161static __inline void pipeselwakeup (struct pipe *cpipe);
162static int pipespace (struct pipe *cpipe, int size);
163
164static __inline void
165pipeselwakeup(struct pipe *cpipe)
166{
167 if (cpipe->pipe_state & PIPE_SEL) {
168 get_mplock();
169 cpipe->pipe_state &= ~PIPE_SEL;
170 selwakeup(&cpipe->pipe_sel);
171 rel_mplock();
172 }
173 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio) {
174 get_mplock();
175 pgsigio(cpipe->pipe_sigio, SIGIO, 0);
176 rel_mplock();
177 }
178 if (SLIST_FIRST(&cpipe->pipe_sel.si_note)) {
179 get_mplock();
180 KNOTE(&cpipe->pipe_sel.si_note, 0);
181 rel_mplock();
182 }
183}
184
185/*
186 * These routines are called before and after a UIO. The UIO
187 * may block, causing our held tokens to be lost temporarily.
188 *
189 * We use these routines to serialize reads against other reads
190 * and writes against other writes.
191 *
192 * The read token is held on entry so *ipp does not race.
193 */
194static __inline int
195pipe_start_uio(struct pipe *cpipe, int *ipp)
196{
197 int error;
198
199 while (*ipp) {
200 *ipp = -1;
201 error = tsleep(ipp, PCATCH, "pipexx", 0);
202 if (error)
203 return (error);
204 }
205 *ipp = 1;
206 return (0);
207}
208
209static __inline void
210pipe_end_uio(struct pipe *cpipe, int *ipp)
211{
212 if (*ipp < 0) {
213 *ipp = 0;
214 wakeup(ipp);
215 } else {
216 KKASSERT(*ipp > 0);
217 *ipp = 0;
218 }
219}
220
221static __inline void
222pipe_get_mplock(int *save)
223{
224#ifdef SMP
225 if (pipe_mpsafe == 0) {
226 get_mplock();
227 *save = 1;
228 } else
229#endif
230 {
231 *save = 0;
232 }
233}
234
235static __inline void
236pipe_rel_mplock(int *save)
237{
238#ifdef SMP
239 if (*save)
240 rel_mplock();
241#endif
242}
243
244
245/*
246 * The pipe system call for the DTYPE_PIPE type of pipes
247 *
248 * pipe_ARgs(int dummy)
249 */
250
251/* ARGSUSED */
252int
253sys_pipe(struct pipe_args *uap)
254{
255 struct thread *td = curthread;
256 struct proc *p = td->td_proc;
257 struct file *rf, *wf;
258 struct pipe *rpipe, *wpipe;
259 int fd1, fd2, error;
260
261 KKASSERT(p);
262
263 rpipe = wpipe = NULL;
264 if (pipe_create(&rpipe) || pipe_create(&wpipe)) {
265 pipeclose(rpipe);
266 pipeclose(wpipe);
267 return (ENFILE);
268 }
269
270 error = falloc(p, &rf, &fd1);
271 if (error) {
272 pipeclose(rpipe);
273 pipeclose(wpipe);
274 return (error);
275 }
276 uap->sysmsg_fds[0] = fd1;
277
278 /*
279 * Warning: once we've gotten past allocation of the fd for the
280 * read-side, we can only drop the read side via fdrop() in order
281 * to avoid races against processes which manage to dup() the read
282 * side while we are blocked trying to allocate the write side.
283 */
284 rf->f_type = DTYPE_PIPE;
285 rf->f_flag = FREAD | FWRITE;
286 rf->f_ops = &pipeops;
287 rf->f_data = rpipe;
288 error = falloc(p, &wf, &fd2);
289 if (error) {
290 fsetfd(p, NULL, fd1);
291 fdrop(rf);
292 /* rpipe has been closed by fdrop(). */
293 pipeclose(wpipe);
294 return (error);
295 }
296 wf->f_type = DTYPE_PIPE;
297 wf->f_flag = FREAD | FWRITE;
298 wf->f_ops = &pipeops;
299 wf->f_data = wpipe;
300 uap->sysmsg_fds[1] = fd2;
301
302 rpipe->pipe_slock = kmalloc(sizeof(struct lock),
303 M_PIPE, M_WAITOK|M_ZERO);
304 wpipe->pipe_slock = rpipe->pipe_slock;
305 rpipe->pipe_peer = wpipe;
306 wpipe->pipe_peer = rpipe;
307 lockinit(rpipe->pipe_slock, "pipecl", 0, 0);
308
309 /*
310 * Once activated the peer relationship remains valid until
311 * both sides are closed.
312 */
313 fsetfd(p, rf, fd1);
314 fsetfd(p, wf, fd2);
315 fdrop(rf);
316 fdrop(wf);
317
318 return (0);
319}
320
321/*
322 * Allocate kva for pipe circular buffer, the space is pageable
323 * This routine will 'realloc' the size of a pipe safely, if it fails
324 * it will retain the old buffer.
325 * If it fails it will return ENOMEM.
326 */
327static int
328pipespace(struct pipe *cpipe, int size)
329{
330 struct vm_object *object;
331 caddr_t buffer;
332 int npages, error;
333
334 npages = round_page(size) / PAGE_SIZE;
335 object = cpipe->pipe_buffer.object;
336
337 /*
338 * [re]create the object if necessary and reserve space for it
339 * in the kernel_map. The object and memory are pageable. On
340 * success, free the old resources before assigning the new
341 * ones.
342 */
343 if (object == NULL || object->size != npages) {
344 get_mplock();
345 object = vm_object_allocate(OBJT_DEFAULT, npages);
346 buffer = (caddr_t)vm_map_min(&kernel_map);
347
348 error = vm_map_find(&kernel_map, object, 0,
349 (vm_offset_t *)&buffer, size,
350 1,
351 VM_MAPTYPE_NORMAL,
352 VM_PROT_ALL, VM_PROT_ALL,
353 0);
354
355 if (error != KERN_SUCCESS) {
356 vm_object_deallocate(object);
357 rel_mplock();
358 return (ENOMEM);
359 }
360 pipe_free_kmem(cpipe);
361 rel_mplock();
362 cpipe->pipe_buffer.object = object;
363 cpipe->pipe_buffer.buffer = buffer;
364 cpipe->pipe_buffer.size = size;
365 ++pipe_bkmem_alloc;
366 } else {
367 ++pipe_bcache_alloc;
368 }
369 cpipe->pipe_buffer.rindex = 0;
370 cpipe->pipe_buffer.windex = 0;
371 return (0);
372}
373
374/*
375 * Initialize and allocate VM and memory for pipe, pulling the pipe from
376 * our per-cpu cache if possible. For now make sure it is sized for the
377 * smaller PIPE_SIZE default.
378 */
379static int
380pipe_create(struct pipe **cpipep)
381{
382 globaldata_t gd = mycpu;
383 struct pipe *cpipe;
384 int error;
385
386 if ((cpipe = gd->gd_pipeq) != NULL) {
387 gd->gd_pipeq = cpipe->pipe_peer;
388 --gd->gd_pipeqcount;
389 cpipe->pipe_peer = NULL;
390 cpipe->pipe_wantwcnt = 0;
391 } else {
392 cpipe = kmalloc(sizeof(struct pipe), M_PIPE, M_WAITOK|M_ZERO);
393 }
394 *cpipep = cpipe;
395 if ((error = pipespace(cpipe, PIPE_SIZE)) != 0)
396 return (error);
397 vfs_timestamp(&cpipe->pipe_ctime);
398 cpipe->pipe_atime = cpipe->pipe_ctime;
399 cpipe->pipe_mtime = cpipe->pipe_ctime;
400 lwkt_token_init(&cpipe->pipe_rlock);
401 lwkt_token_init(&cpipe->pipe_wlock);
402 return (0);
403}
404
405/*
406 * MPALMOSTSAFE (acquires mplock)
407 */
408static int
409pipe_read(struct file *fp, struct uio *uio, struct ucred *cred, int fflags)
410{
411 struct pipe *rpipe;
412 int error;
413 size_t nread = 0;
414 int nbio;
415 u_int size; /* total bytes available */
416 u_int nsize; /* total bytes to read */
417 u_int rindex; /* contiguous bytes available */
418 int notify_writer;
419 lwkt_tokref rlock;
420 lwkt_tokref wlock;
421 int mpsave;
422 int bigread;
423 int bigcount;
424
425 if (uio->uio_resid == 0)
426 return(0);
427
428 /*
429 * Setup locks, calculate nbio
430 */
431 pipe_get_mplock(&mpsave);
432 rpipe = (struct pipe *)fp->f_data;
433 lwkt_gettoken(&rlock, &rpipe->pipe_rlock);
434
435 if (fflags & O_FBLOCKING)
436 nbio = 0;
437 else if (fflags & O_FNONBLOCKING)
438 nbio = 1;
439 else if (fp->f_flag & O_NONBLOCK)
440 nbio = 1;
441 else
442 nbio = 0;
443
444 /*
445 * Reads are serialized. Note howeverthat pipe_buffer.buffer and
446 * pipe_buffer.size can change out from under us when the number
447 * of bytes in the buffer are zero due to the write-side doing a
448 * pipespace().
449 */
450 error = pipe_start_uio(rpipe, &rpipe->pipe_rip);
451 if (error) {
452 pipe_rel_mplock(&mpsave);
453 lwkt_reltoken(&rlock);
454 return (error);
455 }
456 notify_writer = 0;
457
458 bigread = (uio->uio_resid > 10 * 1024 * 1024);
459 bigcount = 10;
460
461 while (uio->uio_resid) {
462 /*
463 * Don't hog the cpu.
464 */
465 if (bigread && --bigcount == 0) {
466 lwkt_user_yield();
467 bigcount = 10;
468 if (CURSIG(curthread->td_lwp)) {
469 error = EINTR;
470 break;
471 }
472 }
473
474 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
475 cpu_lfence();
476 if (size) {
477 rindex = rpipe->pipe_buffer.rindex &
478 (rpipe->pipe_buffer.size - 1);
479 nsize = size;
480 if (nsize > rpipe->pipe_buffer.size - rindex)
481 nsize = rpipe->pipe_buffer.size - rindex;
482 nsize = szmin(nsize, uio->uio_resid);
483
484 error = uiomove(&rpipe->pipe_buffer.buffer[rindex],
485 nsize, uio);
486 if (error)
487 break;
488 cpu_mfence();
489 rpipe->pipe_buffer.rindex += nsize;
490 nread += nsize;
491
492 /*
493 * If the FIFO is still over half full just continue
494 * and do not try to notify the writer yet.
495 */
496 if (size - nsize >= (rpipe->pipe_buffer.size >> 1)) {
497 notify_writer = 0;
498 continue;
499 }
500
501 /*
502 * When the FIFO is less then half full notify any
503 * waiting writer. WANTW can be checked while
504 * holding just the rlock.
505 */
506 notify_writer = 1;
507 if ((rpipe->pipe_state & PIPE_WANTW) == 0)
508 continue;
509 }
510
511 /*
512 * If the "write-side" was blocked we wake it up. This code
513 * is reached either when the buffer is completely emptied
514 * or if it becomes more then half-empty.
515 *
516 * Pipe_state can only be modified if both the rlock and
517 * wlock are held.
518 */
519 if (rpipe->pipe_state & PIPE_WANTW) {
520 lwkt_gettoken(&wlock, &rpipe->pipe_wlock);
521 if (rpipe->pipe_state & PIPE_WANTW) {
522 notify_writer = 0;
523 rpipe->pipe_state &= ~PIPE_WANTW;
524 lwkt_reltoken(&wlock);
525 wakeup(rpipe);
526 } else {
527 lwkt_reltoken(&wlock);
528 }
529 }
530
531 /*
532 * Pick up our copy loop again if the writer sent data to
533 * us while we were messing around.
534 *
535 * On a SMP box poll up to pipe_delay nanoseconds for new
536 * data. Typically a value of 2000 to 4000 is sufficient
537 * to eradicate most IPIs/tsleeps/wakeups when a pipe
538 * is used for synchronous communications with small packets,
539 * and 8000 or so (8uS) will pipeline large buffer xfers
540 * between cpus over a pipe.
541 *
542 * For synchronous communications a hit means doing a
543 * full Awrite-Bread-Bwrite-Aread cycle in less then 2uS,
544 * where as miss requiring a tsleep/wakeup sequence
545 * will take 7uS or more.
546 */
547 if (rpipe->pipe_buffer.windex != rpipe->pipe_buffer.rindex)
548 continue;
549
550#if defined(SMP) && defined(_RDTSC_SUPPORTED_)
551 if (pipe_delay) {
552 int64_t tsc_target;
553 int good = 0;
554
555 tsc_target = tsc_get_target(pipe_delay);
556 while (tsc_test_target(tsc_target) == 0) {
557 if (rpipe->pipe_buffer.windex !=
558 rpipe->pipe_buffer.rindex) {
559 good = 1;
560 break;
561 }
562 }
563 if (good)
564 continue;
565 }
566#endif
567
568 /*
569 * Detect EOF condition, do not set error.
570 */
571 if (rpipe->pipe_state & PIPE_REOF)
572 break;
573
574 /*
575 * Break if some data was read, or if this was a non-blocking
576 * read.
577 */
578 if (nread > 0)
579 break;
580
581 if (nbio) {
582 error = EAGAIN;
583 break;
584 }
585
586 /*
587 * Last chance, interlock with WANTR.
588 */
589 lwkt_gettoken(&wlock, &rpipe->pipe_wlock);
590 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
591 if (size) {
592 lwkt_reltoken(&wlock);
593 continue;
594 }
595
596 /*
597 * If there is no more to read in the pipe, reset its
598 * pointers to the beginning. This improves cache hit
599 * stats.
600 *
601 * We need both locks to modify both pointers, and there
602 * must also not be a write in progress or the uiomove()
603 * in the write might block and temporarily release
604 * its wlock, then reacquire and update windex. We are
605 * only serialized against reads, not writes.
606 *
607 * XXX should we even bother resetting the indices? It
608 * might actually be more cache efficient not to.
609 */
610 if (rpipe->pipe_buffer.rindex == rpipe->pipe_buffer.windex &&
611 rpipe->pipe_wip == 0) {
612 rpipe->pipe_buffer.rindex = 0;
613 rpipe->pipe_buffer.windex = 0;
614 }
615
616 /*
617 * Wait for more data.
618 *
619 * Pipe_state can only be set if both the rlock and wlock
620 * are held.
621 */
622 rpipe->pipe_state |= PIPE_WANTR;
623 tsleep_interlock(rpipe, PCATCH);
624 lwkt_reltoken(&wlock);
625 error = tsleep(rpipe, PCATCH | PINTERLOCKED, "piperd", 0);
626 ++pipe_rblocked_count;
627 if (error)
628 break;
629 }
630 pipe_end_uio(rpipe, &rpipe->pipe_rip);
631
632 /*
633 * Uptime last access time
634 */
635 if (error == 0 && nread)
636 vfs_timestamp(&rpipe->pipe_atime);
637
638 /*
639 * If we drained the FIFO more then half way then handle
640 * write blocking hysteresis.
641 *
642 * Note that PIPE_WANTW cannot be set by the writer without
643 * it holding both rlock and wlock, so we can test it
644 * while holding just rlock.
645 */
646 if (notify_writer) {
647 if (rpipe->pipe_state & PIPE_WANTW) {
648 lwkt_gettoken(&wlock, &rpipe->pipe_wlock);
649 if (rpipe->pipe_state & PIPE_WANTW) {
650 rpipe->pipe_state &= ~PIPE_WANTW;
651 lwkt_reltoken(&wlock);
652 wakeup(rpipe);
653 } else {
654 lwkt_reltoken(&wlock);
655 }
656 }
657 }
658 size = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
659 lwkt_reltoken(&rlock);
660
661 /*
662 * If enough space is available in buffer then wakeup sel writers?
663 */
664 if ((rpipe->pipe_buffer.size - size) >= PIPE_BUF)
665 pipeselwakeup(rpipe);
666 pipe_rel_mplock(&mpsave);
667 return (error);
668}
669
670/*
671 * MPALMOSTSAFE - acquires mplock
672 */
673static int
674pipe_write(struct file *fp, struct uio *uio, struct ucred *cred, int fflags)
675{
676 int error;
677 int orig_resid;
678 int nbio;
679 struct pipe *wpipe, *rpipe;
680 lwkt_tokref rlock;
681 lwkt_tokref wlock;
682 u_int windex;
683 u_int space;
684 u_int wcount;
685 int mpsave;
686 int bigwrite;
687 int bigcount;
688
689 pipe_get_mplock(&mpsave);
690
691 /*
692 * Writes go to the peer. The peer will always exist.
693 */
694 rpipe = (struct pipe *) fp->f_data;
695 wpipe = rpipe->pipe_peer;
696 lwkt_gettoken(&wlock, &wpipe->pipe_wlock);
697 if (wpipe->pipe_state & PIPE_WEOF) {
698 pipe_rel_mplock(&mpsave);
699 lwkt_reltoken(&wlock);
700 return (EPIPE);
701 }
702
703 /*
704 * Degenerate case (EPIPE takes prec)
705 */
706 if (uio->uio_resid == 0) {
707 pipe_rel_mplock(&mpsave);
708 lwkt_reltoken(&wlock);
709 return(0);
710 }
711
712 /*
713 * Writes are serialized (start_uio must be called with wlock)
714 */
715 error = pipe_start_uio(wpipe, &wpipe->pipe_wip);
716 if (error) {
717 pipe_rel_mplock(&mpsave);
718 lwkt_reltoken(&wlock);
719 return (error);
720 }
721
722 if (fflags & O_FBLOCKING)
723 nbio = 0;
724 else if (fflags & O_FNONBLOCKING)
725 nbio = 1;
726 else if (fp->f_flag & O_NONBLOCK)
727 nbio = 1;
728 else
729 nbio = 0;
730
731 /*
732 * If it is advantageous to resize the pipe buffer, do
733 * so. We are write-serialized so we can block safely.
734 */
735 if ((wpipe->pipe_buffer.size <= PIPE_SIZE) &&
736 (pipe_nbig < pipe_maxbig) &&
737 wpipe->pipe_wantwcnt > 4 &&
738 (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) {
739 /*
740 * Recheck after lock.
741 */
742 lwkt_gettoken(&rlock, &wpipe->pipe_rlock);
743 if ((wpipe->pipe_buffer.size <= PIPE_SIZE) &&
744 (pipe_nbig < pipe_maxbig) &&
745 (wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex)) {
746 atomic_add_int(&pipe_nbig, 1);
747 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
748 ++pipe_bigcount;
749 else
750 atomic_subtract_int(&pipe_nbig, 1);
751 }
752 lwkt_reltoken(&rlock);
753 }
754
755 orig_resid = uio->uio_resid;
756 wcount = 0;
757
758 bigwrite = (uio->uio_resid > 10 * 1024 * 1024);
759 bigcount = 10;
760
761 while (uio->uio_resid) {
762 if (wpipe->pipe_state & PIPE_WEOF) {
763 error = EPIPE;
764 break;
765 }
766
767 /*
768 * Don't hog the cpu.
769 */
770 if (bigwrite && --bigcount == 0) {
771 lwkt_user_yield();
772 bigcount = 10;
773 if (CURSIG(curthread->td_lwp)) {
774 error = EINTR;
775 break;
776 }
777 }
778
779 windex = wpipe->pipe_buffer.windex &
780 (wpipe->pipe_buffer.size - 1);
781 space = wpipe->pipe_buffer.size -
782 (wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex);
783 cpu_lfence();
784
785 /* Writes of size <= PIPE_BUF must be atomic. */
786 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
787 space = 0;
788
789 /*
790 * Write to fill, read size handles write hysteresis. Also
791 * additional restrictions can cause select-based non-blocking
792 * writes to spin.
793 */
794 if (space > 0) {
795 u_int segsize;
796
797 /*
798 * Transfer size is minimum of uio transfer
799 * and free space in pipe buffer.
800 *
801 * Limit each uiocopy to no more then PIPE_SIZE
802 * so we can keep the gravy train going on a
803 * SMP box. This doubles the performance for
804 * write sizes > 16K. Otherwise large writes
805 * wind up doing an inefficient synchronous
806 * ping-pong.
807 */
808 space = szmin(space, uio->uio_resid);
809 if (space > PIPE_SIZE)
810 space = PIPE_SIZE;
811
812 /*
813 * First segment to transfer is minimum of
814 * transfer size and contiguous space in
815 * pipe buffer. If first segment to transfer
816 * is less than the transfer size, we've got
817 * a wraparound in the buffer.
818 */
819 segsize = wpipe->pipe_buffer.size - windex;
820 if (segsize > space)
821 segsize = space;
822
823#ifdef SMP
824 /*
825 * If this is the first loop and the reader is
826 * blocked, do a preemptive wakeup of the reader.
827 *
828 * On SMP the IPI latency plus the wlock interlock
829 * on the reader side is the fastest way to get the
830 * reader going. (The scheduler will hard loop on
831 * lock tokens).
832 *
833 * NOTE: We can't clear WANTR here without acquiring
834 * the rlock, which we don't want to do here!
835 */
836 if ((wpipe->pipe_state & PIPE_WANTR) && pipe_mpsafe > 1)
837 wakeup(wpipe);
838#endif
839
840 /*
841 * Transfer segment, which may include a wrap-around.
842 * Update windex to account for both all in one go
843 * so the reader can read() the data atomically.
844 */
845 error = uiomove(&wpipe->pipe_buffer.buffer[windex],
846 segsize, uio);
847 if (error == 0 && segsize < space) {
848 segsize = space - segsize;
849 error = uiomove(&wpipe->pipe_buffer.buffer[0],
850 segsize, uio);
851 }
852 if (error)
853 break;
854 cpu_mfence();
855 wpipe->pipe_buffer.windex += space;
856 wcount += space;
857 continue;
858 }
859
860 /*
861 * We need both the rlock and the wlock to interlock against
862 * the EOF, WANTW, and size checks, and to modify pipe_state.
863 *
864 * These are token locks so we do not have to worry about
865 * deadlocks.
866 */
867 lwkt_gettoken(&rlock, &wpipe->pipe_rlock);
868
869 /*
870 * If the "read-side" has been blocked, wake it up now
871 * and yield to let it drain synchronously rather
872 * then block.
873 */
874 if (wpipe->pipe_state & PIPE_WANTR) {
875 wpipe->pipe_state &= ~PIPE_WANTR;
876 wakeup(wpipe);
877 }
878
879 /*
880 * don't block on non-blocking I/O
881 */
882 if (nbio) {
883 lwkt_reltoken(&rlock);
884 error = EAGAIN;
885 break;
886 }
887
888 /*
889 * re-test whether we have to block in the writer after
890 * acquiring both locks, in case the reader opened up
891 * some space.
892 */
893 space = wpipe->pipe_buffer.size -
894 (wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex);
895 cpu_lfence();
896 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
897 space = 0;
898
899 /*
900 * We have no more space and have something to offer,
901 * wake up select/poll.
902 */
903 if (space == 0) {
904 wpipe->pipe_state |= PIPE_WANTW;
905 ++wpipe->pipe_wantwcnt;
906 pipeselwakeup(wpipe);
907 if (wpipe->pipe_state & PIPE_WANTW)
908 error = tsleep(wpipe, PCATCH, "pipewr", 0);
909 ++pipe_wblocked_count;
910 }
911 lwkt_reltoken(&rlock);
912
913 /*
914 * Break out if we errored or the read side wants us to go
915 * away.
916 */
917 if (error)
918 break;
919 if (wpipe->pipe_state & PIPE_WEOF) {
920 error = EPIPE;
921 break;
922 }
923 }
924 pipe_end_uio(wpipe, &wpipe->pipe_wip);
925
926 /*
927 * If we have put any characters in the buffer, we wake up
928 * the reader.
929 *
930 * Both rlock and wlock are required to be able to modify pipe_state.
931 */
932 if (wpipe->pipe_buffer.windex != wpipe->pipe_buffer.rindex) {
933 if (wpipe->pipe_state & PIPE_WANTR) {
934 lwkt_gettoken(&rlock, &wpipe->pipe_rlock);
935 if (wpipe->pipe_state & PIPE_WANTR) {
936 wpipe->pipe_state &= ~PIPE_WANTR;
937 lwkt_reltoken(&rlock);
938 wakeup(wpipe);
939 } else {
940 lwkt_reltoken(&rlock);
941 }
942 }
943 }
944
945 /*
946 * Don't return EPIPE if I/O was successful
947 */
948 if ((wpipe->pipe_buffer.rindex == wpipe->pipe_buffer.windex) &&
949 (uio->uio_resid == 0) &&
950 (error == EPIPE)) {
951 error = 0;
952 }
953
954 if (error == 0)
955 vfs_timestamp(&wpipe->pipe_mtime);
956
957 /*
958 * We have something to offer,
959 * wake up select/poll.
960 */
961 space = wpipe->pipe_buffer.windex - wpipe->pipe_buffer.rindex;
962 lwkt_reltoken(&wlock);
963 if (space)
964 pipeselwakeup(wpipe);
965 pipe_rel_mplock(&mpsave);
966 return (error);
967}
968
969/*
970 * MPALMOSTSAFE - acquires mplock
971 *
972 * we implement a very minimal set of ioctls for compatibility with sockets.
973 */
974int
975pipe_ioctl(struct file *fp, u_long cmd, caddr_t data,
976 struct ucred *cred, struct sysmsg *msg)
977{
978 struct pipe *mpipe;
979 lwkt_tokref rlock;
980 lwkt_tokref wlock;
981 int error;
982 int mpsave;
983
984 pipe_get_mplock(&mpsave);
985 mpipe = (struct pipe *)fp->f_data;
986
987 lwkt_gettoken(&rlock, &mpipe->pipe_rlock);
988 lwkt_gettoken(&wlock, &mpipe->pipe_wlock);
989
990 switch (cmd) {
991 case FIOASYNC:
992 if (*(int *)data) {
993 mpipe->pipe_state |= PIPE_ASYNC;
994 } else {
995 mpipe->pipe_state &= ~PIPE_ASYNC;
996 }
997 error = 0;
998 break;
999 case FIONREAD:
1000 *(int *)data = mpipe->pipe_buffer.windex -
1001 mpipe->pipe_buffer.rindex;
1002 error = 0;
1003 break;
1004 case FIOSETOWN:
1005 get_mplock();
1006 error = fsetown(*(int *)data, &mpipe->pipe_sigio);
1007 rel_mplock();
1008 break;
1009 case FIOGETOWN:
1010 *(int *)data = fgetown(mpipe->pipe_sigio);
1011 error = 0;
1012 break;
1013 case TIOCSPGRP:
1014 /* This is deprecated, FIOSETOWN should be used instead. */
1015 get_mplock();
1016 error = fsetown(-(*(int *)data), &mpipe->pipe_sigio);
1017 rel_mplock();
1018 break;
1019
1020 case TIOCGPGRP:
1021 /* This is deprecated, FIOGETOWN should be used instead. */
1022 *(int *)data = -fgetown(mpipe->pipe_sigio);
1023 error = 0;
1024 break;
1025 default:
1026 error = ENOTTY;
1027 break;
1028 }
1029 lwkt_reltoken(&rlock);
1030 lwkt_reltoken(&wlock);
1031 pipe_rel_mplock(&mpsave);
1032
1033 return (error);
1034}
1035
1036/*
1037 * MPALMOSTSAFE - acquires mplock
1038 */
1039int
1040pipe_poll(struct file *fp, int events, struct ucred *cred)
1041{
1042 struct pipe *rpipe;
1043 struct pipe *wpipe;
1044 int revents = 0;
1045 u_int space;
1046 int mpsave;
1047
1048 pipe_get_mplock(&mpsave);
1049 rpipe = (struct pipe *)fp->f_data;
1050 wpipe = rpipe->pipe_peer;
1051 if (events & (POLLIN | POLLRDNORM)) {
1052 if ((rpipe->pipe_buffer.windex != rpipe->pipe_buffer.rindex) ||
1053 (rpipe->pipe_state & PIPE_REOF)) {
1054 revents |= events & (POLLIN | POLLRDNORM);
1055 }
1056 }
1057
1058 if (events & (POLLOUT | POLLWRNORM)) {
1059 if (wpipe == NULL || (wpipe->pipe_state & PIPE_WEOF)) {
1060 revents |= events & (POLLOUT | POLLWRNORM);
1061 } else {
1062 space = wpipe->pipe_buffer.windex -
1063 wpipe->pipe_buffer.rindex;
1064 space = wpipe->pipe_buffer.size - space;
1065 if (space >= PIPE_BUF)
1066 revents |= events & (POLLOUT | POLLWRNORM);
1067 }
1068 }
1069
1070 if ((rpipe->pipe_state & PIPE_REOF) ||
1071 (wpipe == NULL) ||
1072 (wpipe->pipe_state & PIPE_WEOF))
1073 revents |= POLLHUP;
1074
1075 if (revents == 0) {
1076 if (events & (POLLIN | POLLRDNORM)) {
1077 selrecord(curthread, &rpipe->pipe_sel);
1078 rpipe->pipe_state |= PIPE_SEL;
1079 }
1080
1081 if (events & (POLLOUT | POLLWRNORM)) {
1082 selrecord(curthread, &wpipe->pipe_sel);
1083 wpipe->pipe_state |= PIPE_SEL;
1084 }
1085 }
1086 pipe_rel_mplock(&mpsave);
1087 return (revents);
1088}
1089
1090/*
1091 * MPSAFE
1092 */
1093static int
1094pipe_stat(struct file *fp, struct stat *ub, struct ucred *cred)
1095{
1096 struct pipe *pipe;
1097 int mpsave;
1098
1099 pipe_get_mplock(&mpsave);
1100 pipe = (struct pipe *)fp->f_data;
1101
1102 bzero((caddr_t)ub, sizeof(*ub));
1103 ub->st_mode = S_IFIFO;
1104 ub->st_blksize = pipe->pipe_buffer.size;
1105 ub->st_size = pipe->pipe_buffer.windex - pipe->pipe_buffer.rindex;
1106 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize;
1107 ub->st_atimespec = pipe->pipe_atime;
1108 ub->st_mtimespec = pipe->pipe_mtime;
1109 ub->st_ctimespec = pipe->pipe_ctime;
1110 /*
1111 * Left as 0: st_dev, st_ino, st_nlink, st_uid, st_gid, st_rdev,
1112 * st_flags, st_gen.
1113 * XXX (st_dev, st_ino) should be unique.
1114 */
1115 pipe_rel_mplock(&mpsave);
1116 return (0);
1117}
1118
1119/*
1120 * MPALMOSTSAFE - acquires mplock
1121 */
1122static int
1123pipe_close(struct file *fp)
1124{
1125 struct pipe *cpipe;
1126
1127 get_mplock();
1128 cpipe = (struct pipe *)fp->f_data;
1129 fp->f_ops = &badfileops;
1130 fp->f_data = NULL;
1131 funsetown(cpipe->pipe_sigio);
1132 pipeclose(cpipe);
1133 rel_mplock();
1134 return (0);
1135}
1136
1137/*
1138 * Shutdown one or both directions of a full-duplex pipe.
1139 *
1140 * MPALMOSTSAFE - acquires mplock
1141 */
1142static int
1143pipe_shutdown(struct file *fp, int how)
1144{
1145 struct pipe *rpipe;
1146 struct pipe *wpipe;
1147 int error = EPIPE;
1148 lwkt_tokref rpipe_rlock;
1149 lwkt_tokref rpipe_wlock;
1150 lwkt_tokref wpipe_rlock;
1151 lwkt_tokref wpipe_wlock;
1152 int mpsave;
1153
1154 pipe_get_mplock(&mpsave);
1155 rpipe = (struct pipe *)fp->f_data;
1156 wpipe = rpipe->pipe_peer;
1157
1158 /*
1159 * We modify pipe_state on both pipes, which means we need
1160 * all four tokens!
1161 */
1162 lwkt_gettoken(&rpipe_rlock, &rpipe->pipe_rlock);
1163 lwkt_gettoken(&rpipe_wlock, &rpipe->pipe_wlock);
1164 lwkt_gettoken(&wpipe_rlock, &wpipe->pipe_rlock);
1165 lwkt_gettoken(&wpipe_wlock, &wpipe->pipe_wlock);
1166
1167 switch(how) {
1168 case SHUT_RDWR:
1169 case SHUT_RD:
1170 rpipe->pipe_state |= PIPE_REOF; /* my reads */
1171 rpipe->pipe_state |= PIPE_WEOF; /* peer writes */
1172 if (rpipe->pipe_state & PIPE_WANTR) {
1173 rpipe->pipe_state &= ~PIPE_WANTR;
1174 wakeup(rpipe);
1175 }
1176 if (rpipe->pipe_state & PIPE_WANTW) {
1177 rpipe->pipe_state &= ~PIPE_WANTW;
1178 wakeup(rpipe);
1179 }
1180 error = 0;
1181 if (how == SHUT_RD)
1182 break;
1183 /* fall through */
1184 case SHUT_WR:
1185 wpipe->pipe_state |= PIPE_REOF; /* peer reads */
1186 wpipe->pipe_state |= PIPE_WEOF; /* my writes */
1187 if (wpipe->pipe_state & PIPE_WANTR) {
1188 wpipe->pipe_state &= ~PIPE_WANTR;
1189 wakeup(wpipe);
1190 }
1191 if (wpipe->pipe_state & PIPE_WANTW) {
1192 wpipe->pipe_state &= ~PIPE_WANTW;
1193 wakeup(wpipe);
1194 }
1195 error = 0;
1196 break;
1197 }
1198 pipeselwakeup(rpipe);
1199 pipeselwakeup(wpipe);
1200
1201 lwkt_reltoken(&rpipe_rlock);
1202 lwkt_reltoken(&rpipe_wlock);
1203 lwkt_reltoken(&wpipe_rlock);
1204 lwkt_reltoken(&wpipe_wlock);
1205
1206 pipe_rel_mplock(&mpsave);
1207 return (error);
1208}
1209
1210static void
1211pipe_free_kmem(struct pipe *cpipe)
1212{
1213 if (cpipe->pipe_buffer.buffer != NULL) {
1214 if (cpipe->pipe_buffer.size > PIPE_SIZE)
1215 atomic_subtract_int(&pipe_nbig, 1);
1216 kmem_free(&kernel_map,
1217 (vm_offset_t)cpipe->pipe_buffer.buffer,
1218 cpipe->pipe_buffer.size);
1219 cpipe->pipe_buffer.buffer = NULL;
1220 cpipe->pipe_buffer.object = NULL;
1221 }
1222}
1223
1224/*
1225 * Close the pipe. The slock must be held to interlock against simultanious
1226 * closes. The rlock and wlock must be held to adjust the pipe_state.
1227 */
1228static void
1229pipeclose(struct pipe *cpipe)
1230{
1231 globaldata_t gd;
1232 struct pipe *ppipe;
1233 lwkt_tokref cpipe_rlock;
1234 lwkt_tokref cpipe_wlock;
1235 lwkt_tokref ppipe_rlock;
1236 lwkt_tokref ppipe_wlock;
1237
1238 if (cpipe == NULL)
1239 return;
1240
1241 /*
1242 * The slock may not have been allocated yet (close during
1243 * initialization)
1244 *
1245 * We need both the read and write tokens to modify pipe_state.
1246 */
1247 if (cpipe->pipe_slock)
1248 lockmgr(cpipe->pipe_slock, LK_EXCLUSIVE);
1249 lwkt_gettoken(&cpipe_rlock, &cpipe->pipe_rlock);
1250 lwkt_gettoken(&cpipe_wlock, &cpipe->pipe_wlock);
1251
1252 /*
1253 * Set our state, wakeup anyone waiting in select, and
1254 * wakeup anyone blocked on our pipe.
1255 */
1256 cpipe->pipe_state |= PIPE_CLOSED | PIPE_REOF | PIPE_WEOF;
1257 pipeselwakeup(cpipe);
1258 if (cpipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) {
1259 cpipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW);
1260 wakeup(cpipe);
1261 }
1262
1263 /*
1264 * Disconnect from peer.
1265 */
1266 if ((ppipe = cpipe->pipe_peer) != NULL) {
1267 lwkt_gettoken(&ppipe_rlock, &ppipe->pipe_rlock);
1268 lwkt_gettoken(&ppipe_wlock, &ppipe->pipe_wlock);
1269 ppipe->pipe_state |= PIPE_REOF | PIPE_WEOF;
1270 pipeselwakeup(ppipe);
1271 if (ppipe->pipe_state & (PIPE_WANTR | PIPE_WANTW)) {
1272 ppipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW);
1273 wakeup(ppipe);
1274 }
1275 if (SLIST_FIRST(&ppipe->pipe_sel.si_note)) {
1276 get_mplock();
1277 KNOTE(&ppipe->pipe_sel.si_note, 0);
1278 rel_mplock();
1279 }
1280 lwkt_reltoken(&ppipe_rlock);
1281 lwkt_reltoken(&ppipe_wlock);
1282 }
1283
1284 /*
1285 * If the peer is also closed we can free resources for both
1286 * sides, otherwise we leave our side intact to deal with any
1287 * races (since we only have the slock).
1288 */
1289 if (ppipe && (ppipe->pipe_state & PIPE_CLOSED)) {
1290 cpipe->pipe_peer = NULL;
1291 ppipe->pipe_peer = NULL;
1292 ppipe->pipe_slock = NULL; /* we will free the slock */
1293 pipeclose(ppipe);
1294 ppipe = NULL;
1295 }
1296
1297 lwkt_reltoken(&cpipe_rlock);
1298 lwkt_reltoken(&cpipe_wlock);
1299 if (cpipe->pipe_slock)
1300 lockmgr(cpipe->pipe_slock, LK_RELEASE);
1301
1302 /*
1303 * If we disassociated from our peer we can free resources
1304 */
1305 if (ppipe == NULL) {
1306 gd = mycpu;
1307 if (cpipe->pipe_slock) {
1308 kfree(cpipe->pipe_slock, M_PIPE);
1309 cpipe->pipe_slock = NULL;
1310 }
1311 if (gd->gd_pipeqcount >= pipe_maxcache ||
1312 cpipe->pipe_buffer.size != PIPE_SIZE
1313 ) {
1314 pipe_free_kmem(cpipe);
1315 kfree(cpipe, M_PIPE);
1316 } else {
1317 cpipe->pipe_state = 0;
1318 cpipe->pipe_peer = gd->gd_pipeq;
1319 gd->gd_pipeq = cpipe;
1320 ++gd->gd_pipeqcount;
1321 }
1322 }
1323}
1324
1325/*
1326 * MPALMOSTSAFE - acquires mplock
1327 */
1328static int
1329pipe_kqfilter(struct file *fp, struct knote *kn)
1330{
1331 struct pipe *cpipe;
1332
1333 get_mplock();
1334 cpipe = (struct pipe *)kn->kn_fp->f_data;
1335
1336 switch (kn->kn_filter) {
1337 case EVFILT_READ:
1338 kn->kn_fop = &pipe_rfiltops;
1339 break;
1340 case EVFILT_WRITE:
1341 kn->kn_fop = &pipe_wfiltops;
1342 cpipe = cpipe->pipe_peer;
1343 if (cpipe == NULL) {
1344 /* other end of pipe has been closed */
1345 rel_mplock();
1346 return (EPIPE);
1347 }
1348 break;
1349 default:
1350 return (1);
1351 }
1352 kn->kn_hook = (caddr_t)cpipe;
1353
1354 SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext);
1355 rel_mplock();
1356 return (0);
1357}
1358
1359static void
1360filt_pipedetach(struct knote *kn)
1361{
1362 struct pipe *cpipe = (struct pipe *)kn->kn_hook;
1363
1364 SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext);
1365}
1366
1367/*ARGSUSED*/
1368static int
1369filt_piperead(struct knote *kn, long hint)
1370{
1371 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1372
1373 kn->kn_data = rpipe->pipe_buffer.windex - rpipe->pipe_buffer.rindex;
1374
1375 /* XXX RACE */
1376 if (rpipe->pipe_state & PIPE_REOF) {
1377 kn->kn_flags |= EV_EOF;
1378 return (1);
1379 }
1380 return (kn->kn_data > 0);
1381}
1382
1383/*ARGSUSED*/
1384static int
1385filt_pipewrite(struct knote *kn, long hint)
1386{
1387 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1388 struct pipe *wpipe = rpipe->pipe_peer;
1389 u_int32_t space;
1390
1391 /* XXX RACE */
1392 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_WEOF)) {
1393 kn->kn_data = 0;
1394 kn->kn_flags |= EV_EOF;
1395 return (1);
1396 }
1397 space = wpipe->pipe_buffer.windex -
1398 wpipe->pipe_buffer.rindex;
1399 space = wpipe->pipe_buffer.size - space;
1400 kn->kn_data = space;
1401 return (kn->kn_data >= PIPE_BUF);
1402}