Initial import from FreeBSD RELENG_4:
[dragonfly.git] / sys / kern / sys_pipe.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1996 John S. Dyson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Absolutely no warranty of function or purpose is made by the author
15 * John S. Dyson.
16 * 4. Modifications may be freely made to this file if the above conditions
17 * are met.
18 *
19 * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.60.2.13 2002/08/05 15:05:15 des Exp $
20 */
21
22/*
23 * This file contains a high-performance replacement for the socket-based
24 * pipes scheme originally used in FreeBSD/4.4Lite. It does not support
25 * all features of sockets, but does do everything that pipes normally
26 * do.
27 */
28
29/*
30 * This code has two modes of operation, a small write mode and a large
31 * write mode. The small write mode acts like conventional pipes with
32 * a kernel buffer. If the buffer is less than PIPE_MINDIRECT, then the
33 * "normal" pipe buffering is done. If the buffer is between PIPE_MINDIRECT
34 * and PIPE_SIZE in size, it is fully mapped and wired into the kernel, and
35 * the receiving process can copy it directly from the pages in the sending
36 * process.
37 *
38 * If the sending process receives a signal, it is possible that it will
39 * go away, and certainly its address space can change, because control
40 * is returned back to the user-mode side. In that case, the pipe code
41 * arranges to copy the buffer supplied by the user process, to a pageable
42 * kernel buffer, and the receiving process will grab the data from the
43 * pageable kernel buffer. Since signals don't happen all that often,
44 * the copy operation is normally eliminated.
45 *
46 * The constant PIPE_MINDIRECT is chosen to make sure that buffering will
47 * happen for small transfers so that the system will not spend all of
48 * its time context switching. PIPE_SIZE is constrained by the
49 * amount of kernel virtual memory.
50 */
51
52#include <sys/param.h>
53#include <sys/systm.h>
54#include <sys/proc.h>
55#include <sys/fcntl.h>
56#include <sys/file.h>
57#include <sys/filedesc.h>
58#include <sys/filio.h>
59#include <sys/ttycom.h>
60#include <sys/stat.h>
61#include <sys/poll.h>
62#include <sys/select.h>
63#include <sys/signalvar.h>
64#include <sys/sysproto.h>
65#include <sys/pipe.h>
66#include <sys/vnode.h>
67#include <sys/uio.h>
68#include <sys/event.h>
69
70#include <vm/vm.h>
71#include <vm/vm_param.h>
72#include <sys/lock.h>
73#include <vm/vm_object.h>
74#include <vm/vm_kern.h>
75#include <vm/vm_extern.h>
76#include <vm/pmap.h>
77#include <vm/vm_map.h>
78#include <vm/vm_page.h>
79#include <vm/vm_zone.h>
80
81/*
82 * Use this define if you want to disable *fancy* VM things. Expect an
83 * approx 30% decrease in transfer rate. This could be useful for
84 * NetBSD or OpenBSD.
85 */
86/* #define PIPE_NODIRECT */
87
88/*
89 * interfaces to the outside world
90 */
91static int pipe_read __P((struct file *fp, struct uio *uio,
92 struct ucred *cred, int flags, struct proc *p));
93static int pipe_write __P((struct file *fp, struct uio *uio,
94 struct ucred *cred, int flags, struct proc *p));
95static int pipe_close __P((struct file *fp, struct proc *p));
96static int pipe_poll __P((struct file *fp, int events, struct ucred *cred,
97 struct proc *p));
98static int pipe_kqfilter __P((struct file *fp, struct knote *kn));
99static int pipe_stat __P((struct file *fp, struct stat *sb, struct proc *p));
100static int pipe_ioctl __P((struct file *fp, u_long cmd, caddr_t data, struct proc *p));
101
102static struct fileops pipeops = {
103 pipe_read, pipe_write, pipe_ioctl, pipe_poll, pipe_kqfilter,
104 pipe_stat, pipe_close
105};
106
107static void filt_pipedetach(struct knote *kn);
108static int filt_piperead(struct knote *kn, long hint);
109static int filt_pipewrite(struct knote *kn, long hint);
110
111static struct filterops pipe_rfiltops =
112 { 1, NULL, filt_pipedetach, filt_piperead };
113static struct filterops pipe_wfiltops =
114 { 1, NULL, filt_pipedetach, filt_pipewrite };
115
116
117/*
118 * Default pipe buffer size(s), this can be kind-of large now because pipe
119 * space is pageable. The pipe code will try to maintain locality of
120 * reference for performance reasons, so small amounts of outstanding I/O
121 * will not wipe the cache.
122 */
123#define MINPIPESIZE (PIPE_SIZE/3)
124#define MAXPIPESIZE (2*PIPE_SIZE/3)
125
126/*
127 * Maximum amount of kva for pipes -- this is kind-of a soft limit, but
128 * is there so that on large systems, we don't exhaust it.
129 */
130#define MAXPIPEKVA (8*1024*1024)
131
132/*
133 * Limit for direct transfers, we cannot, of course limit
134 * the amount of kva for pipes in general though.
135 */
136#define LIMITPIPEKVA (16*1024*1024)
137
138/*
139 * Limit the number of "big" pipes
140 */
141#define LIMITBIGPIPES 32
142static int nbigpipe;
143
144static int amountpipekva;
145
146static void pipeclose __P((struct pipe *cpipe));
147static void pipe_free_kmem __P((struct pipe *cpipe));
148static int pipe_create __P((struct pipe **cpipep));
149static __inline int pipelock __P((struct pipe *cpipe, int catch));
150static __inline void pipeunlock __P((struct pipe *cpipe));
151static __inline void pipeselwakeup __P((struct pipe *cpipe));
152#ifndef PIPE_NODIRECT
153static int pipe_build_write_buffer __P((struct pipe *wpipe, struct uio *uio));
154static void pipe_destroy_write_buffer __P((struct pipe *wpipe));
155static int pipe_direct_write __P((struct pipe *wpipe, struct uio *uio));
156static void pipe_clone_write_buffer __P((struct pipe *wpipe));
157#endif
158static int pipespace __P((struct pipe *cpipe, int size));
159
160static vm_zone_t pipe_zone;
161
162/*
163 * The pipe system call for the DTYPE_PIPE type of pipes
164 */
165
166/* ARGSUSED */
167int
168pipe(p, uap)
169 struct proc *p;
170 struct pipe_args /* {
171 int dummy;
172 } */ *uap;
173{
174 struct filedesc *fdp = p->p_fd;
175 struct file *rf, *wf;
176 struct pipe *rpipe, *wpipe;
177 int fd, error;
178
179 if (pipe_zone == NULL)
180 pipe_zone = zinit("PIPE", sizeof(struct pipe), 0, 0, 4);
181
182 rpipe = wpipe = NULL;
183 if (pipe_create(&rpipe) || pipe_create(&wpipe)) {
184 pipeclose(rpipe);
185 pipeclose(wpipe);
186 return (ENFILE);
187 }
188
189 rpipe->pipe_state |= PIPE_DIRECTOK;
190 wpipe->pipe_state |= PIPE_DIRECTOK;
191
192 error = falloc(p, &rf, &fd);
193 if (error) {
194 pipeclose(rpipe);
195 pipeclose(wpipe);
196 return (error);
197 }
198 fhold(rf);
199 p->p_retval[0] = fd;
200
201 /*
202 * Warning: once we've gotten past allocation of the fd for the
203 * read-side, we can only drop the read side via fdrop() in order
204 * to avoid races against processes which manage to dup() the read
205 * side while we are blocked trying to allocate the write side.
206 */
207 rf->f_flag = FREAD | FWRITE;
208 rf->f_type = DTYPE_PIPE;
209 rf->f_data = (caddr_t)rpipe;
210 rf->f_ops = &pipeops;
211 error = falloc(p, &wf, &fd);
212 if (error) {
213 if (fdp->fd_ofiles[p->p_retval[0]] == rf) {
214 fdp->fd_ofiles[p->p_retval[0]] = NULL;
215 fdrop(rf, p);
216 }
217 fdrop(rf, p);
218 /* rpipe has been closed by fdrop(). */
219 pipeclose(wpipe);
220 return (error);
221 }
222 wf->f_flag = FREAD | FWRITE;
223 wf->f_type = DTYPE_PIPE;
224 wf->f_data = (caddr_t)wpipe;
225 wf->f_ops = &pipeops;
226 p->p_retval[1] = fd;
227
228 rpipe->pipe_peer = wpipe;
229 wpipe->pipe_peer = rpipe;
230 fdrop(rf, p);
231
232 return (0);
233}
234
235/*
236 * Allocate kva for pipe circular buffer, the space is pageable
237 * This routine will 'realloc' the size of a pipe safely, if it fails
238 * it will retain the old buffer.
239 * If it fails it will return ENOMEM.
240 */
241static int
242pipespace(cpipe, size)
243 struct pipe *cpipe;
244 int size;
245{
246 struct vm_object *object;
247 caddr_t buffer;
248 int npages, error;
249
250 npages = round_page(size)/PAGE_SIZE;
251 /*
252 * Create an object, I don't like the idea of paging to/from
253 * kernel_object.
254 * XXX -- minor change needed here for NetBSD/OpenBSD VM systems.
255 */
256 object = vm_object_allocate(OBJT_DEFAULT, npages);
257 buffer = (caddr_t) vm_map_min(kernel_map);
258
259 /*
260 * Insert the object into the kernel map, and allocate kva for it.
261 * The map entry is, by default, pageable.
262 * XXX -- minor change needed here for NetBSD/OpenBSD VM systems.
263 */
264 error = vm_map_find(kernel_map, object, 0,
265 (vm_offset_t *) &buffer, size, 1,
266 VM_PROT_ALL, VM_PROT_ALL, 0);
267
268 if (error != KERN_SUCCESS) {
269 vm_object_deallocate(object);
270 return (ENOMEM);
271 }
272
273 /* free old resources if we're resizing */
274 pipe_free_kmem(cpipe);
275 cpipe->pipe_buffer.object = object;
276 cpipe->pipe_buffer.buffer = buffer;
277 cpipe->pipe_buffer.size = size;
278 cpipe->pipe_buffer.in = 0;
279 cpipe->pipe_buffer.out = 0;
280 cpipe->pipe_buffer.cnt = 0;
281 amountpipekva += cpipe->pipe_buffer.size;
282 return (0);
283}
284
285/*
286 * initialize and allocate VM and memory for pipe
287 */
288static int
289pipe_create(cpipep)
290 struct pipe **cpipep;
291{
292 struct pipe *cpipe;
293 int error;
294
295 *cpipep = zalloc(pipe_zone);
296 if (*cpipep == NULL)
297 return (ENOMEM);
298
299 cpipe = *cpipep;
300
301 /* so pipespace()->pipe_free_kmem() doesn't follow junk pointer */
302 cpipe->pipe_buffer.object = NULL;
303#ifndef PIPE_NODIRECT
304 cpipe->pipe_map.kva = NULL;
305#endif
306 /*
307 * protect so pipeclose() doesn't follow a junk pointer
308 * if pipespace() fails.
309 */
310 bzero(&cpipe->pipe_sel, sizeof(cpipe->pipe_sel));
311 cpipe->pipe_state = 0;
312 cpipe->pipe_peer = NULL;
313 cpipe->pipe_busy = 0;
314
315#ifndef PIPE_NODIRECT
316 /*
317 * pipe data structure initializations to support direct pipe I/O
318 */
319 cpipe->pipe_map.cnt = 0;
320 cpipe->pipe_map.kva = 0;
321 cpipe->pipe_map.pos = 0;
322 cpipe->pipe_map.npages = 0;
323 /* cpipe->pipe_map.ms[] = invalid */
324#endif
325
326 error = pipespace(cpipe, PIPE_SIZE);
327 if (error)
328 return (error);
329
330 vfs_timestamp(&cpipe->pipe_ctime);
331 cpipe->pipe_atime = cpipe->pipe_ctime;
332 cpipe->pipe_mtime = cpipe->pipe_ctime;
333
334 return (0);
335}
336
337
338/*
339 * lock a pipe for I/O, blocking other access
340 */
341static __inline int
342pipelock(cpipe, catch)
343 struct pipe *cpipe;
344 int catch;
345{
346 int error;
347
348 while (cpipe->pipe_state & PIPE_LOCK) {
349 cpipe->pipe_state |= PIPE_LWANT;
350 error = tsleep(cpipe, catch ? (PRIBIO | PCATCH) : PRIBIO,
351 "pipelk", 0);
352 if (error != 0)
353 return (error);
354 }
355 cpipe->pipe_state |= PIPE_LOCK;
356 return (0);
357}
358
359/*
360 * unlock a pipe I/O lock
361 */
362static __inline void
363pipeunlock(cpipe)
364 struct pipe *cpipe;
365{
366
367 cpipe->pipe_state &= ~PIPE_LOCK;
368 if (cpipe->pipe_state & PIPE_LWANT) {
369 cpipe->pipe_state &= ~PIPE_LWANT;
370 wakeup(cpipe);
371 }
372}
373
374static __inline void
375pipeselwakeup(cpipe)
376 struct pipe *cpipe;
377{
378
379 if (cpipe->pipe_state & PIPE_SEL) {
380 cpipe->pipe_state &= ~PIPE_SEL;
381 selwakeup(&cpipe->pipe_sel);
382 }
383 if ((cpipe->pipe_state & PIPE_ASYNC) && cpipe->pipe_sigio)
384 pgsigio(cpipe->pipe_sigio, SIGIO, 0);
385 KNOTE(&cpipe->pipe_sel.si_note, 0);
386}
387
388/* ARGSUSED */
389static int
390pipe_read(fp, uio, cred, flags, p)
391 struct file *fp;
392 struct uio *uio;
393 struct ucred *cred;
394 struct proc *p;
395 int flags;
396{
397 struct pipe *rpipe = (struct pipe *) fp->f_data;
398 int error;
399 int nread = 0;
400 u_int size;
401
402 ++rpipe->pipe_busy;
403 error = pipelock(rpipe, 1);
404 if (error)
405 goto unlocked_error;
406
407 while (uio->uio_resid) {
408 /*
409 * normal pipe buffer receive
410 */
411 if (rpipe->pipe_buffer.cnt > 0) {
412 size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out;
413 if (size > rpipe->pipe_buffer.cnt)
414 size = rpipe->pipe_buffer.cnt;
415 if (size > (u_int) uio->uio_resid)
416 size = (u_int) uio->uio_resid;
417
418 error = uiomove(&rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out],
419 size, uio);
420 if (error)
421 break;
422
423 rpipe->pipe_buffer.out += size;
424 if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size)
425 rpipe->pipe_buffer.out = 0;
426
427 rpipe->pipe_buffer.cnt -= size;
428
429 /*
430 * If there is no more to read in the pipe, reset
431 * its pointers to the beginning. This improves
432 * cache hit stats.
433 */
434 if (rpipe->pipe_buffer.cnt == 0) {
435 rpipe->pipe_buffer.in = 0;
436 rpipe->pipe_buffer.out = 0;
437 }
438 nread += size;
439#ifndef PIPE_NODIRECT
440 /*
441 * Direct copy, bypassing a kernel buffer.
442 */
443 } else if ((size = rpipe->pipe_map.cnt) &&
444 (rpipe->pipe_state & PIPE_DIRECTW)) {
445 caddr_t va;
446 if (size > (u_int) uio->uio_resid)
447 size = (u_int) uio->uio_resid;
448
449 va = (caddr_t) rpipe->pipe_map.kva +
450 rpipe->pipe_map.pos;
451 error = uiomove(va, size, uio);
452 if (error)
453 break;
454 nread += size;
455 rpipe->pipe_map.pos += size;
456 rpipe->pipe_map.cnt -= size;
457 if (rpipe->pipe_map.cnt == 0) {
458 rpipe->pipe_state &= ~PIPE_DIRECTW;
459 wakeup(rpipe);
460 }
461#endif
462 } else {
463 /*
464 * detect EOF condition
465 * read returns 0 on EOF, no need to set error
466 */
467 if (rpipe->pipe_state & PIPE_EOF)
468 break;
469
470 /*
471 * If the "write-side" has been blocked, wake it up now.
472 */
473 if (rpipe->pipe_state & PIPE_WANTW) {
474 rpipe->pipe_state &= ~PIPE_WANTW;
475 wakeup(rpipe);
476 }
477
478 /*
479 * Break if some data was read.
480 */
481 if (nread > 0)
482 break;
483
484 /*
485 * Unlock the pipe buffer for our remaining processing. We
486 * will either break out with an error or we will sleep and
487 * relock to loop.
488 */
489 pipeunlock(rpipe);
490
491 /*
492 * Handle non-blocking mode operation or
493 * wait for more data.
494 */
495 if (fp->f_flag & FNONBLOCK) {
496 error = EAGAIN;
497 } else {
498 rpipe->pipe_state |= PIPE_WANTR;
499 if ((error = tsleep(rpipe, PRIBIO | PCATCH,
500 "piperd", 0)) == 0)
501 error = pipelock(rpipe, 1);
502 }
503 if (error)
504 goto unlocked_error;
505 }
506 }
507 pipeunlock(rpipe);
508
509 if (error == 0)
510 vfs_timestamp(&rpipe->pipe_atime);
511unlocked_error:
512 --rpipe->pipe_busy;
513
514 /*
515 * PIPE_WANT processing only makes sense if pipe_busy is 0.
516 */
517 if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) {
518 rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW);
519 wakeup(rpipe);
520 } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) {
521 /*
522 * Handle write blocking hysteresis.
523 */
524 if (rpipe->pipe_state & PIPE_WANTW) {
525 rpipe->pipe_state &= ~PIPE_WANTW;
526 wakeup(rpipe);
527 }
528 }
529
530 if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF)
531 pipeselwakeup(rpipe);
532
533 return (error);
534}
535
536#ifndef PIPE_NODIRECT
537/*
538 * Map the sending processes' buffer into kernel space and wire it.
539 * This is similar to a physical write operation.
540 */
541static int
542pipe_build_write_buffer(wpipe, uio)
543 struct pipe *wpipe;
544 struct uio *uio;
545{
546 u_int size;
547 int i;
548 vm_offset_t addr, endaddr, paddr;
549
550 size = (u_int) uio->uio_iov->iov_len;
551 if (size > wpipe->pipe_buffer.size)
552 size = wpipe->pipe_buffer.size;
553
554 endaddr = round_page((vm_offset_t)uio->uio_iov->iov_base + size);
555 addr = trunc_page((vm_offset_t)uio->uio_iov->iov_base);
556 for (i = 0; addr < endaddr; addr += PAGE_SIZE, i++) {
557 vm_page_t m;
558
559 if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0 ||
560 (paddr = pmap_kextract(addr)) == 0) {
561 int j;
562
563 for (j = 0; j < i; j++)
564 vm_page_unwire(wpipe->pipe_map.ms[j], 1);
565 return (EFAULT);
566 }
567
568 m = PHYS_TO_VM_PAGE(paddr);
569 vm_page_wire(m);
570 wpipe->pipe_map.ms[i] = m;
571 }
572
573/*
574 * set up the control block
575 */
576 wpipe->pipe_map.npages = i;
577 wpipe->pipe_map.pos =
578 ((vm_offset_t) uio->uio_iov->iov_base) & PAGE_MASK;
579 wpipe->pipe_map.cnt = size;
580
581/*
582 * and map the buffer
583 */
584 if (wpipe->pipe_map.kva == 0) {
585 /*
586 * We need to allocate space for an extra page because the
587 * address range might (will) span pages at times.
588 */
589 wpipe->pipe_map.kva = kmem_alloc_pageable(kernel_map,
590 wpipe->pipe_buffer.size + PAGE_SIZE);
591 amountpipekva += wpipe->pipe_buffer.size + PAGE_SIZE;
592 }
593 pmap_qenter(wpipe->pipe_map.kva, wpipe->pipe_map.ms,
594 wpipe->pipe_map.npages);
595
596/*
597 * and update the uio data
598 */
599
600 uio->uio_iov->iov_len -= size;
601 uio->uio_iov->iov_base += size;
602 if (uio->uio_iov->iov_len == 0)
603 uio->uio_iov++;
604 uio->uio_resid -= size;
605 uio->uio_offset += size;
606 return (0);
607}
608
609/*
610 * unmap and unwire the process buffer
611 */
612static void
613pipe_destroy_write_buffer(wpipe)
614 struct pipe *wpipe;
615{
616 int i;
617
618 if (wpipe->pipe_map.kva) {
619 pmap_qremove(wpipe->pipe_map.kva, wpipe->pipe_map.npages);
620
621 if (amountpipekva > MAXPIPEKVA) {
622 vm_offset_t kva = wpipe->pipe_map.kva;
623 wpipe->pipe_map.kva = 0;
624 kmem_free(kernel_map, kva,
625 wpipe->pipe_buffer.size + PAGE_SIZE);
626 amountpipekva -= wpipe->pipe_buffer.size + PAGE_SIZE;
627 }
628 }
629 for (i = 0; i < wpipe->pipe_map.npages; i++)
630 vm_page_unwire(wpipe->pipe_map.ms[i], 1);
631 wpipe->pipe_map.npages = 0;
632}
633
634/*
635 * In the case of a signal, the writing process might go away. This
636 * code copies the data into the circular buffer so that the source
637 * pages can be freed without loss of data.
638 */
639static void
640pipe_clone_write_buffer(wpipe)
641 struct pipe *wpipe;
642{
643 int size;
644 int pos;
645
646 size = wpipe->pipe_map.cnt;
647 pos = wpipe->pipe_map.pos;
648 bcopy((caddr_t) wpipe->pipe_map.kva + pos,
649 (caddr_t) wpipe->pipe_buffer.buffer, size);
650
651 wpipe->pipe_buffer.in = size;
652 wpipe->pipe_buffer.out = 0;
653 wpipe->pipe_buffer.cnt = size;
654 wpipe->pipe_state &= ~PIPE_DIRECTW;
655
656 pipe_destroy_write_buffer(wpipe);
657}
658
659/*
660 * This implements the pipe buffer write mechanism. Note that only
661 * a direct write OR a normal pipe write can be pending at any given time.
662 * If there are any characters in the pipe buffer, the direct write will
663 * be deferred until the receiving process grabs all of the bytes from
664 * the pipe buffer. Then the direct mapping write is set-up.
665 */
666static int
667pipe_direct_write(wpipe, uio)
668 struct pipe *wpipe;
669 struct uio *uio;
670{
671 int error;
672
673retry:
674 while (wpipe->pipe_state & PIPE_DIRECTW) {
675 if (wpipe->pipe_state & PIPE_WANTR) {
676 wpipe->pipe_state &= ~PIPE_WANTR;
677 wakeup(wpipe);
678 }
679 wpipe->pipe_state |= PIPE_WANTW;
680 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdww", 0);
681 if (error)
682 goto error1;
683 if (wpipe->pipe_state & PIPE_EOF) {
684 error = EPIPE;
685 goto error1;
686 }
687 }
688 wpipe->pipe_map.cnt = 0; /* transfer not ready yet */
689 if (wpipe->pipe_buffer.cnt > 0) {
690 if (wpipe->pipe_state & PIPE_WANTR) {
691 wpipe->pipe_state &= ~PIPE_WANTR;
692 wakeup(wpipe);
693 }
694
695 wpipe->pipe_state |= PIPE_WANTW;
696 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwc", 0);
697 if (error)
698 goto error1;
699 if (wpipe->pipe_state & PIPE_EOF) {
700 error = EPIPE;
701 goto error1;
702 }
703 goto retry;
704 }
705
706 wpipe->pipe_state |= PIPE_DIRECTW;
707
708 error = pipe_build_write_buffer(wpipe, uio);
709 if (error) {
710 wpipe->pipe_state &= ~PIPE_DIRECTW;
711 goto error1;
712 }
713
714 error = 0;
715 while (!error && (wpipe->pipe_state & PIPE_DIRECTW)) {
716 if (wpipe->pipe_state & PIPE_EOF) {
717 pipelock(wpipe, 0);
718 pipe_destroy_write_buffer(wpipe);
719 pipeunlock(wpipe);
720 pipeselwakeup(wpipe);
721 error = EPIPE;
722 goto error1;
723 }
724 if (wpipe->pipe_state & PIPE_WANTR) {
725 wpipe->pipe_state &= ~PIPE_WANTR;
726 wakeup(wpipe);
727 }
728 pipeselwakeup(wpipe);
729 error = tsleep(wpipe, PRIBIO | PCATCH, "pipdwt", 0);
730 }
731
732 pipelock(wpipe,0);
733 if (wpipe->pipe_state & PIPE_DIRECTW) {
734 /*
735 * this bit of trickery substitutes a kernel buffer for
736 * the process that might be going away.
737 */
738 pipe_clone_write_buffer(wpipe);
739 } else {
740 pipe_destroy_write_buffer(wpipe);
741 }
742 pipeunlock(wpipe);
743 return (error);
744
745error1:
746 wakeup(wpipe);
747 return (error);
748}
749#endif
750
751static int
752pipe_write(fp, uio, cred, flags, p)
753 struct file *fp;
754 struct uio *uio;
755 struct ucred *cred;
756 struct proc *p;
757 int flags;
758{
759 int error = 0;
760 int orig_resid;
761 struct pipe *wpipe, *rpipe;
762
763 rpipe = (struct pipe *) fp->f_data;
764 wpipe = rpipe->pipe_peer;
765
766 /*
767 * detect loss of pipe read side, issue SIGPIPE if lost.
768 */
769 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
770 return (EPIPE);
771 }
772 ++wpipe->pipe_busy;
773
774 /*
775 * If it is advantageous to resize the pipe buffer, do
776 * so.
777 */
778 if ((uio->uio_resid > PIPE_SIZE) &&
779 (nbigpipe < LIMITBIGPIPES) &&
780 (wpipe->pipe_state & PIPE_DIRECTW) == 0 &&
781 (wpipe->pipe_buffer.size <= PIPE_SIZE) &&
782 (wpipe->pipe_buffer.cnt == 0)) {
783
784 if ((error = pipelock(wpipe,1)) == 0) {
785 if (pipespace(wpipe, BIG_PIPE_SIZE) == 0)
786 nbigpipe++;
787 pipeunlock(wpipe);
788 }
789 }
790
791 /*
792 * If an early error occured unbusy and return, waking up any pending
793 * readers.
794 */
795 if (error) {
796 --wpipe->pipe_busy;
797 if ((wpipe->pipe_busy == 0) &&
798 (wpipe->pipe_state & PIPE_WANT)) {
799 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
800 wakeup(wpipe);
801 }
802 return(error);
803 }
804
805 KASSERT(wpipe->pipe_buffer.buffer != NULL, ("pipe buffer gone"));
806
807 orig_resid = uio->uio_resid;
808
809 while (uio->uio_resid) {
810 int space;
811
812#ifndef PIPE_NODIRECT
813 /*
814 * If the transfer is large, we can gain performance if
815 * we do process-to-process copies directly.
816 * If the write is non-blocking, we don't use the
817 * direct write mechanism.
818 *
819 * The direct write mechanism will detect the reader going
820 * away on us.
821 */
822 if ((uio->uio_iov->iov_len >= PIPE_MINDIRECT) &&
823 (fp->f_flag & FNONBLOCK) == 0 &&
824 (wpipe->pipe_map.kva || (amountpipekva < LIMITPIPEKVA)) &&
825 (uio->uio_iov->iov_len >= PIPE_MINDIRECT)) {
826 error = pipe_direct_write( wpipe, uio);
827 if (error)
828 break;
829 continue;
830 }
831#endif
832
833 /*
834 * Pipe buffered writes cannot be coincidental with
835 * direct writes. We wait until the currently executing
836 * direct write is completed before we start filling the
837 * pipe buffer. We break out if a signal occurs or the
838 * reader goes away.
839 */
840 retrywrite:
841 while (wpipe->pipe_state & PIPE_DIRECTW) {
842 if (wpipe->pipe_state & PIPE_WANTR) {
843 wpipe->pipe_state &= ~PIPE_WANTR;
844 wakeup(wpipe);
845 }
846 error = tsleep(wpipe, PRIBIO | PCATCH, "pipbww", 0);
847 if (wpipe->pipe_state & PIPE_EOF)
848 break;
849 if (error)
850 break;
851 }
852 if (wpipe->pipe_state & PIPE_EOF) {
853 error = EPIPE;
854 break;
855 }
856
857 space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
858
859 /* Writes of size <= PIPE_BUF must be atomic. */
860 if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF))
861 space = 0;
862
863 if (space > 0 && (wpipe->pipe_buffer.cnt < PIPE_SIZE)) {
864 if ((error = pipelock(wpipe,1)) == 0) {
865 int size; /* Transfer size */
866 int segsize; /* first segment to transfer */
867
868 /*
869 * It is possible for a direct write to
870 * slip in on us... handle it here...
871 */
872 if (wpipe->pipe_state & PIPE_DIRECTW) {
873 pipeunlock(wpipe);
874 goto retrywrite;
875 }
876 /*
877 * If a process blocked in uiomove, our
878 * value for space might be bad.
879 *
880 * XXX will we be ok if the reader has gone
881 * away here?
882 */
883 if (space > wpipe->pipe_buffer.size -
884 wpipe->pipe_buffer.cnt) {
885 pipeunlock(wpipe);
886 goto retrywrite;
887 }
888
889 /*
890 * Transfer size is minimum of uio transfer
891 * and free space in pipe buffer.
892 */
893 if (space > uio->uio_resid)
894 size = uio->uio_resid;
895 else
896 size = space;
897 /*
898 * First segment to transfer is minimum of
899 * transfer size and contiguous space in
900 * pipe buffer. If first segment to transfer
901 * is less than the transfer size, we've got
902 * a wraparound in the buffer.
903 */
904 segsize = wpipe->pipe_buffer.size -
905 wpipe->pipe_buffer.in;
906 if (segsize > size)
907 segsize = size;
908
909 /* Transfer first segment */
910
911 error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in],
912 segsize, uio);
913
914 if (error == 0 && segsize < size) {
915 /*
916 * Transfer remaining part now, to
917 * support atomic writes. Wraparound
918 * happened.
919 */
920 if (wpipe->pipe_buffer.in + segsize !=
921 wpipe->pipe_buffer.size)
922 panic("Expected pipe buffer wraparound disappeared");
923
924 error = uiomove(&wpipe->pipe_buffer.buffer[0],
925 size - segsize, uio);
926 }
927 if (error == 0) {
928 wpipe->pipe_buffer.in += size;
929 if (wpipe->pipe_buffer.in >=
930 wpipe->pipe_buffer.size) {
931 if (wpipe->pipe_buffer.in != size - segsize + wpipe->pipe_buffer.size)
932 panic("Expected wraparound bad");
933 wpipe->pipe_buffer.in = size - segsize;
934 }
935
936 wpipe->pipe_buffer.cnt += size;
937 if (wpipe->pipe_buffer.cnt > wpipe->pipe_buffer.size)
938 panic("Pipe buffer overflow");
939
940 }
941 pipeunlock(wpipe);
942 }
943 if (error)
944 break;
945
946 } else {
947 /*
948 * If the "read-side" has been blocked, wake it up now.
949 */
950 if (wpipe->pipe_state & PIPE_WANTR) {
951 wpipe->pipe_state &= ~PIPE_WANTR;
952 wakeup(wpipe);
953 }
954
955 /*
956 * don't block on non-blocking I/O
957 */
958 if (fp->f_flag & FNONBLOCK) {
959 error = EAGAIN;
960 break;
961 }
962
963 /*
964 * We have no more space and have something to offer,
965 * wake up select/poll.
966 */
967 pipeselwakeup(wpipe);
968
969 wpipe->pipe_state |= PIPE_WANTW;
970 error = tsleep(wpipe, PRIBIO | PCATCH, "pipewr", 0);
971 if (error != 0)
972 break;
973 /*
974 * If read side wants to go away, we just issue a signal
975 * to ourselves.
976 */
977 if (wpipe->pipe_state & PIPE_EOF) {
978 error = EPIPE;
979 break;
980 }
981 }
982 }
983
984 --wpipe->pipe_busy;
985
986 if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) {
987 wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR);
988 wakeup(wpipe);
989 } else if (wpipe->pipe_buffer.cnt > 0) {
990 /*
991 * If we have put any characters in the buffer, we wake up
992 * the reader.
993 */
994 if (wpipe->pipe_state & PIPE_WANTR) {
995 wpipe->pipe_state &= ~PIPE_WANTR;
996 wakeup(wpipe);
997 }
998 }
999
1000 /*
1001 * Don't return EPIPE if I/O was successful
1002 */
1003 if ((wpipe->pipe_buffer.cnt == 0) &&
1004 (uio->uio_resid == 0) &&
1005 (error == EPIPE)) {
1006 error = 0;
1007 }
1008
1009 if (error == 0)
1010 vfs_timestamp(&wpipe->pipe_mtime);
1011
1012 /*
1013 * We have something to offer,
1014 * wake up select/poll.
1015 */
1016 if (wpipe->pipe_buffer.cnt)
1017 pipeselwakeup(wpipe);
1018
1019 return (error);
1020}
1021
1022/*
1023 * we implement a very minimal set of ioctls for compatibility with sockets.
1024 */
1025int
1026pipe_ioctl(fp, cmd, data, p)
1027 struct file *fp;
1028 u_long cmd;
1029 caddr_t data;
1030 struct proc *p;
1031{
1032 struct pipe *mpipe = (struct pipe *)fp->f_data;
1033
1034 switch (cmd) {
1035
1036 case FIONBIO:
1037 return (0);
1038
1039 case FIOASYNC:
1040 if (*(int *)data) {
1041 mpipe->pipe_state |= PIPE_ASYNC;
1042 } else {
1043 mpipe->pipe_state &= ~PIPE_ASYNC;
1044 }
1045 return (0);
1046
1047 case FIONREAD:
1048 if (mpipe->pipe_state & PIPE_DIRECTW)
1049 *(int *)data = mpipe->pipe_map.cnt;
1050 else
1051 *(int *)data = mpipe->pipe_buffer.cnt;
1052 return (0);
1053
1054 case FIOSETOWN:
1055 return (fsetown(*(int *)data, &mpipe->pipe_sigio));
1056
1057 case FIOGETOWN:
1058 *(int *)data = fgetown(mpipe->pipe_sigio);
1059 return (0);
1060
1061 /* This is deprecated, FIOSETOWN should be used instead. */
1062 case TIOCSPGRP:
1063 return (fsetown(-(*(int *)data), &mpipe->pipe_sigio));
1064
1065 /* This is deprecated, FIOGETOWN should be used instead. */
1066 case TIOCGPGRP:
1067 *(int *)data = -fgetown(mpipe->pipe_sigio);
1068 return (0);
1069
1070 }
1071 return (ENOTTY);
1072}
1073
1074int
1075pipe_poll(fp, events, cred, p)
1076 struct file *fp;
1077 int events;
1078 struct ucred *cred;
1079 struct proc *p;
1080{
1081 struct pipe *rpipe = (struct pipe *)fp->f_data;
1082 struct pipe *wpipe;
1083 int revents = 0;
1084
1085 wpipe = rpipe->pipe_peer;
1086 if (events & (POLLIN | POLLRDNORM))
1087 if ((rpipe->pipe_state & PIPE_DIRECTW) ||
1088 (rpipe->pipe_buffer.cnt > 0) ||
1089 (rpipe->pipe_state & PIPE_EOF))
1090 revents |= events & (POLLIN | POLLRDNORM);
1091
1092 if (events & (POLLOUT | POLLWRNORM))
1093 if (wpipe == NULL || (wpipe->pipe_state & PIPE_EOF) ||
1094 (((wpipe->pipe_state & PIPE_DIRECTW) == 0) &&
1095 (wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) >= PIPE_BUF))
1096 revents |= events & (POLLOUT | POLLWRNORM);
1097
1098 if ((rpipe->pipe_state & PIPE_EOF) ||
1099 (wpipe == NULL) ||
1100 (wpipe->pipe_state & PIPE_EOF))
1101 revents |= POLLHUP;
1102
1103 if (revents == 0) {
1104 if (events & (POLLIN | POLLRDNORM)) {
1105 selrecord(p, &rpipe->pipe_sel);
1106 rpipe->pipe_state |= PIPE_SEL;
1107 }
1108
1109 if (events & (POLLOUT | POLLWRNORM)) {
1110 selrecord(p, &wpipe->pipe_sel);
1111 wpipe->pipe_state |= PIPE_SEL;
1112 }
1113 }
1114
1115 return (revents);
1116}
1117
1118static int
1119pipe_stat(fp, ub, p)
1120 struct file *fp;
1121 struct stat *ub;
1122 struct proc *p;
1123{
1124 struct pipe *pipe = (struct pipe *)fp->f_data;
1125
1126 bzero((caddr_t)ub, sizeof(*ub));
1127 ub->st_mode = S_IFIFO;
1128 ub->st_blksize = pipe->pipe_buffer.size;
1129 ub->st_size = pipe->pipe_buffer.cnt;
1130 ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize;
1131 ub->st_atimespec = pipe->pipe_atime;
1132 ub->st_mtimespec = pipe->pipe_mtime;
1133 ub->st_ctimespec = pipe->pipe_ctime;
1134 /*
1135 * Left as 0: st_dev, st_ino, st_nlink, st_uid, st_gid, st_rdev,
1136 * st_flags, st_gen.
1137 * XXX (st_dev, st_ino) should be unique.
1138 */
1139 return (0);
1140}
1141
1142/* ARGSUSED */
1143static int
1144pipe_close(fp, p)
1145 struct file *fp;
1146 struct proc *p;
1147{
1148 struct pipe *cpipe = (struct pipe *)fp->f_data;
1149
1150 fp->f_ops = &badfileops;
1151 fp->f_data = NULL;
1152 funsetown(cpipe->pipe_sigio);
1153 pipeclose(cpipe);
1154 return (0);
1155}
1156
1157static void
1158pipe_free_kmem(cpipe)
1159 struct pipe *cpipe;
1160{
1161
1162 if (cpipe->pipe_buffer.buffer != NULL) {
1163 if (cpipe->pipe_buffer.size > PIPE_SIZE)
1164 --nbigpipe;
1165 amountpipekva -= cpipe->pipe_buffer.size;
1166 kmem_free(kernel_map,
1167 (vm_offset_t)cpipe->pipe_buffer.buffer,
1168 cpipe->pipe_buffer.size);
1169 cpipe->pipe_buffer.buffer = NULL;
1170 }
1171#ifndef PIPE_NODIRECT
1172 if (cpipe->pipe_map.kva != NULL) {
1173 amountpipekva -= cpipe->pipe_buffer.size + PAGE_SIZE;
1174 kmem_free(kernel_map,
1175 cpipe->pipe_map.kva,
1176 cpipe->pipe_buffer.size + PAGE_SIZE);
1177 cpipe->pipe_map.cnt = 0;
1178 cpipe->pipe_map.kva = 0;
1179 cpipe->pipe_map.pos = 0;
1180 cpipe->pipe_map.npages = 0;
1181 }
1182#endif
1183}
1184
1185/*
1186 * shutdown the pipe
1187 */
1188static void
1189pipeclose(cpipe)
1190 struct pipe *cpipe;
1191{
1192 struct pipe *ppipe;
1193
1194 if (cpipe) {
1195
1196 pipeselwakeup(cpipe);
1197
1198 /*
1199 * If the other side is blocked, wake it up saying that
1200 * we want to close it down.
1201 */
1202 while (cpipe->pipe_busy) {
1203 wakeup(cpipe);
1204 cpipe->pipe_state |= PIPE_WANT | PIPE_EOF;
1205 tsleep(cpipe, PRIBIO, "pipecl", 0);
1206 }
1207
1208 /*
1209 * Disconnect from peer
1210 */
1211 if ((ppipe = cpipe->pipe_peer) != NULL) {
1212 pipeselwakeup(ppipe);
1213
1214 ppipe->pipe_state |= PIPE_EOF;
1215 wakeup(ppipe);
1216 KNOTE(&ppipe->pipe_sel.si_note, 0);
1217 ppipe->pipe_peer = NULL;
1218 }
1219 /*
1220 * free resources
1221 */
1222 pipe_free_kmem(cpipe);
1223 zfree(pipe_zone, cpipe);
1224 }
1225}
1226
1227/*ARGSUSED*/
1228static int
1229pipe_kqfilter(struct file *fp, struct knote *kn)
1230{
1231 struct pipe *cpipe = (struct pipe *)kn->kn_fp->f_data;
1232
1233 switch (kn->kn_filter) {
1234 case EVFILT_READ:
1235 kn->kn_fop = &pipe_rfiltops;
1236 break;
1237 case EVFILT_WRITE:
1238 kn->kn_fop = &pipe_wfiltops;
1239 cpipe = cpipe->pipe_peer;
1240 if (cpipe == NULL)
1241 /* other end of pipe has been closed */
1242 return (EBADF);
1243 break;
1244 default:
1245 return (1);
1246 }
1247 kn->kn_hook = (caddr_t)cpipe;
1248
1249 SLIST_INSERT_HEAD(&cpipe->pipe_sel.si_note, kn, kn_selnext);
1250 return (0);
1251}
1252
1253static void
1254filt_pipedetach(struct knote *kn)
1255{
1256 struct pipe *cpipe = (struct pipe *)kn->kn_hook;
1257
1258 SLIST_REMOVE(&cpipe->pipe_sel.si_note, kn, knote, kn_selnext);
1259}
1260
1261/*ARGSUSED*/
1262static int
1263filt_piperead(struct knote *kn, long hint)
1264{
1265 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1266 struct pipe *wpipe = rpipe->pipe_peer;
1267
1268 kn->kn_data = rpipe->pipe_buffer.cnt;
1269 if ((kn->kn_data == 0) && (rpipe->pipe_state & PIPE_DIRECTW))
1270 kn->kn_data = rpipe->pipe_map.cnt;
1271
1272 if ((rpipe->pipe_state & PIPE_EOF) ||
1273 (wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1274 kn->kn_flags |= EV_EOF;
1275 return (1);
1276 }
1277 return (kn->kn_data > 0);
1278}
1279
1280/*ARGSUSED*/
1281static int
1282filt_pipewrite(struct knote *kn, long hint)
1283{
1284 struct pipe *rpipe = (struct pipe *)kn->kn_fp->f_data;
1285 struct pipe *wpipe = rpipe->pipe_peer;
1286
1287 if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) {
1288 kn->kn_data = 0;
1289 kn->kn_flags |= EV_EOF;
1290 return (1);
1291 }
1292 kn->kn_data = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt;
1293 if (wpipe->pipe_state & PIPE_DIRECTW)
1294 kn->kn_data = 0;
1295
1296 return (kn->kn_data >= PIPE_BUF);
1297}