2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * Direct file pointer API functions for in-kernel operations on files. These
37 * functions provide a open/read/write/close like interface within the kernel
38 * for operating on files that are not necessarily associated with processes
39 * and which do not (typically) have descriptors.
41 * FUTURE: file handle conversion routines to support checkpointing,
42 * and additional file operations (ioctl, fcntl).
45 #include <sys/param.h>
46 #include <sys/kernel.h>
47 #include <sys/systm.h>
49 #include <sys/malloc.h>
50 #include <sys/sysmsg.h>
52 #include <sys/filedesc.h>
53 #include <sys/sysctl.h>
54 #include <sys/vnode.h>
57 #include <sys/nlookup.h>
60 #include <sys/filio.h>
61 #include <sys/fcntl.h>
62 #include <sys/unistd.h>
63 #include <sys/resourcevar.h>
64 #include <sys/event.h>
68 #include <vm/vm_param.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_object.h>
73 #include <vm/vm_page.h>
74 #include <vm/vm_pager.h>
75 #include <vm/vm_pageout.h>
76 #include <vm/vm_extern.h>
77 #include <vm/vm_kern.h>
79 #include <sys/file2.h>
80 #include <machine/limits.h>
82 typedef struct file *file_t;
87 * Open a file as specified. Use O_* flags for flags.
89 * vn_open() asserts that the cred must match the process's cred.
91 * NOTE! when fp_open() is called from a pure thread, root creds are
95 fp_open(const char *path, int flags, int mode, file_t *fpp)
97 struct nlookupdata nd;
101 if ((error = falloc(NULL, fpp, NULL)) != 0)
105 fsetcred(*fpp, td->td_proc->p_ucred);
106 error = nlookup_init(&nd, path, UIO_SYSSPACE, NLC_LOCKVP);
107 flags = FFLAGS(flags);
109 error = vn_open(&nd, fpp, flags, mode);
120 * fp_vpopen(): convert a vnode to a file pointer, call VOP_OPEN() on the
121 * the vnode. The vnode must be refd and locked.
123 * On success the vnode's ref is inherited by the file pointer and the caller
124 * should not vrele() it, and the vnode is unlocked.
126 * On failure the vnode remains locked and refd and the caller is responsible
130 fp_vpopen(struct vnode *vp, int flags, file_t *fpp)
140 * Vnode checks (from vn_open())
142 if (vp->v_type == VLNK) {
146 if (vp->v_type == VSOCK) {
150 flags = FFLAGS(flags);
152 if (flags & (FWRITE | O_TRUNC)) {
153 if (vp->v_type == VDIR) {
157 error = vn_writechk(vp);
165 error = VOP_ACCESS(vp, vmode, td->td_proc->p_ucred);
173 if ((error = falloc(NULL, fpp, NULL)) != 0)
176 fsetcred(*fpp, td->td_proc->p_ucred);
178 error = VOP_OPEN(vp, flags, td->td_proc->p_ucred, fpp);
188 fp->f_ops = &badfileops; /* open failed, don't close */
191 /* leave the vnode intact, but fall through and unlock it anyway */
199 * fp_*read() is meant to operate like the normal descriptor based syscalls
200 * would. Note that if 'buf' points to user memory a UIO_USERSPACE
201 * transfer will be used.
204 fp_pread(file_t fp, void *buf, size_t nbytes, off_t offset, ssize_t *res,
214 if (nbytes > LONG_MAX)
216 bzero(&auio, sizeof(auio));
217 aiov.iov_base = (caddr_t)buf;
218 aiov.iov_len = nbytes;
219 auio.uio_iov = &aiov;
221 auio.uio_offset = offset;
222 auio.uio_resid = nbytes;
223 auio.uio_rw = UIO_READ;
224 auio.uio_segflg = seg;
225 auio.uio_td = curthread;
228 error = fo_read(fp, &auio, fp->f_cred, O_FOFFSET);
230 if (auio.uio_resid != nbytes && (error == ERESTART || error == EINTR ||
231 error == EWOULDBLOCK)
236 count -= auio.uio_resid;
243 fp_read(file_t fp, void *buf, size_t nbytes, ssize_t *res, int all,
253 if (nbytes > LONG_MAX)
255 bzero(&auio, sizeof(auio));
256 aiov.iov_base = (caddr_t)buf;
257 aiov.iov_len = nbytes;
258 auio.uio_iov = &aiov;
261 auio.uio_resid = nbytes;
262 auio.uio_rw = UIO_READ;
263 auio.uio_segflg = seg;
264 auio.uio_td = curthread;
267 * If all is false call fo_read() once.
268 * If all is true we attempt to read the entire request. We have to
269 * break out of the loop if an unrecoverable error or EOF occurs.
272 lastresid = auio.uio_resid;
273 error = fo_read(fp, &auio, fp->f_cred, 0);
274 } while (all && auio.uio_resid &&
275 ((error == 0 && auio.uio_resid != lastresid) ||
276 error == ERESTART || error == EINTR));
277 if (all && error == 0 && auio.uio_resid)
281 * If an error occured but some data was read, silently forget the
282 * error. However, if this is a non-blocking descriptor and 'all'
283 * was specified, return an error even if some data was read (this
284 * is considered a bug in the caller for using an illegal combination
285 * of 'all' and a non-blocking descriptor).
288 if (auio.uio_resid != nbytes) {
289 if (error == ERESTART || error == EINTR)
291 if (error == EWOULDBLOCK && all == 0)
296 *res = nbytes - auio.uio_resid;
301 fp_pwrite(file_t fp, void *buf, size_t nbytes, off_t offset, ssize_t *res,
311 if (nbytes > LONG_MAX)
313 bzero(&auio, sizeof(auio));
314 aiov.iov_base = (caddr_t)buf;
315 aiov.iov_len = nbytes;
316 auio.uio_iov = &aiov;
318 auio.uio_offset = offset;
319 auio.uio_resid = nbytes;
320 auio.uio_rw = UIO_WRITE;
321 auio.uio_segflg = seg;
322 auio.uio_td = curthread;
325 error = fo_write(fp, &auio, fp->f_cred, O_FOFFSET);
327 if (auio.uio_resid != nbytes && (error == ERESTART || error == EINTR ||
328 error == EWOULDBLOCK)
333 count -= auio.uio_resid;
341 fp_write(file_t fp, void *buf, size_t nbytes, ssize_t *res, enum uio_seg seg)
350 if (nbytes > LONG_MAX)
352 bzero(&auio, sizeof(auio));
353 aiov.iov_base = (caddr_t)buf;
354 aiov.iov_len = nbytes;
355 auio.uio_iov = &aiov;
358 auio.uio_resid = nbytes;
359 auio.uio_rw = UIO_WRITE;
360 auio.uio_segflg = seg;
361 auio.uio_td = curthread;
364 error = fo_write(fp, &auio, fp->f_cred, 0);
366 if (auio.uio_resid != nbytes && (error == ERESTART || error == EINTR ||
367 error == EWOULDBLOCK)
372 count -= auio.uio_resid;
379 fp_stat(file_t fp, struct stat *ub)
383 error = fo_stat(fp, ub, fp->f_cred);
388 * non-anonymous, non-stack descriptor mappings only!
390 * This routine mostly snarfed from vm/vm_mmap.c
393 fp_mmap(void *addr_arg, size_t size, int prot, int flags, struct file *fp,
394 off_t pos, void **resp)
396 struct thread *td = curthread;
397 struct proc *p = td->td_proc;
404 struct vmspace *vms = p->p_vmspace;
409 if ((ssize_t)size < 0 || (flags & MAP_ANON))
412 pageoff = (pos & PAGE_MASK);
415 /* Adjust size for rounding (on both ends). */
416 size += pageoff; /* low end... */
417 size = (vm_size_t)round_page(size); /* hi end */
418 addr = (vm_offset_t)addr_arg;
421 * Check for illegal addresses. Watch out for address wrap... Note
422 * that VM_*_ADDRESS are not constants due to casts (argh).
424 if (flags & MAP_FIXED) {
426 * The specified address must have the same remainder
427 * as the file offset taken modulo PAGE_SIZE, so it
428 * should be aligned after adjustment by pageoff.
431 if (addr & PAGE_MASK)
433 /* Address range must be all in user VM space. */
434 if (VM_MAX_USER_ADDRESS > 0 && addr + size > VM_MAX_USER_ADDRESS)
436 if (VM_MIN_USER_ADDRESS > 0 && addr < VM_MIN_USER_ADDRESS)
438 if (addr + size < addr)
440 } else if (addr == 0 ||
441 (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
442 addr < round_page((vm_offset_t)vms->vm_daddr + maxdsiz))
445 * XXX for non-fixed mappings where no hint is provided or
446 * the hint would fall in the potential heap space,
447 * place it after the end of the largest possible heap.
449 * There should really be a pmap call to determine a reasonable
452 addr = round_page((vm_offset_t)vms->vm_daddr + maxdsiz);
456 * Mapping file, get fp for validation. Obtain vnode and make
457 * sure it is of appropriate type.
459 if (fp->f_type != DTYPE_VNODE)
463 * POSIX shared-memory objects are defined to have
464 * kernel persistence, and are not defined to support
465 * read(2)/write(2) -- or even open(2). Thus, we can
466 * use MAP_ASYNC to trade on-disk coherence for speed.
467 * The shm_open(3) library routine turns on the FPOSIXSHM
468 * flag to request this behavior.
470 if (fp->f_flag & FPOSIXSHM)
472 vp = (struct vnode *) fp->f_data;
473 if (vp->v_type != VREG && vp->v_type != VCHR)
477 * Get the proper underlying object
479 if (vp->v_type == VREG) {
480 if ((obj = vp->v_object) == NULL)
482 KKASSERT(vp == (struct vnode *)obj->handle);
486 * XXX hack to handle use of /dev/zero to map anon memory (ala
489 if (vp->v_type == VCHR && iszerodev(vp->v_rdev)) {
491 maxprot = VM_PROT_ALL;
496 * cdevs does not provide private mappings of any kind.
498 if (vp->v_type == VCHR &&
499 (flags & (MAP_PRIVATE|MAP_COPY))) {
504 * Ensure that file and memory protections are
505 * compatible. Note that we only worry about
506 * writability if mapping is shared; in this case,
507 * current and max prot are dictated by the open file.
508 * XXX use the vnode instead? Problem is: what
509 * credentials do we use for determination? What if
510 * proc does a setuid?
512 maxprot = VM_PROT_EXECUTE; /* ??? */
513 if (fp->f_flag & FREAD) {
514 maxprot |= VM_PROT_READ;
515 } else if (prot & PROT_READ) {
520 * If we are sharing potential changes (either via
521 * MAP_SHARED or via the implicit sharing of character
522 * device mappings), and we are trying to get write
523 * permission although we opened it without asking
527 if ((flags & MAP_SHARED) != 0 ||
530 if ((fp->f_flag & FWRITE) != 0) {
532 if ((error = VOP_GETATTR_FP(vp, &va, fp))) {
535 if ((va.va_flags & (IMMUTABLE|APPEND)) == 0) {
536 maxprot |= VM_PROT_WRITE;
537 } else if (prot & PROT_WRITE) {
541 } else if ((prot & PROT_WRITE) != 0) {
546 maxprot |= VM_PROT_WRITE;
550 error = vm_mmap(&vms->vm_map, &addr, size, prot,
551 maxprot, flags, handle, pos, fp);
552 if (error == 0 && addr_arg)
553 *resp = (void *)addr;
565 fp_shutdown(file_t fp, int how)
567 return(fo_shutdown(fp, how));