Remove VOP_GETVOBJECT, VOP_DESTROYVOBJECT, and VOP_CREATEVOBJECT. Rearrange
[dragonfly.git] / sys / kern / kern_fp.c
CommitLineData
39f91578 1/*
8c10bfcf
MD
2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
39f91578
MD
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
8c10bfcf 10 *
39f91578
MD
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
8c10bfcf
MD
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39f91578 32 * SUCH DAMAGE.
8c10bfcf 33 *
7540ab49 34 * $DragonFly: src/sys/kern/kern_fp.c,v 1.13 2006/03/29 18:44:50 dillon Exp $
39f91578
MD
35 */
36
37/*
38 * Direct file pointer API functions for in-kernel operations on files. These
39 * functions provide a open/read/write/close like interface within the kernel
40 * for operating on files that are not necessarily associated with processes
41 * and which do not (typically) have descriptors.
42 *
43 * FUTURE: file handle conversion routines to support checkpointing,
44 * and additional file operations (ioctl, fcntl).
45 */
46
47#include <sys/param.h>
48#include <sys/kernel.h>
49#include <sys/systm.h>
50#include <sys/malloc.h>
51#include <sys/sysproto.h>
52#include <sys/conf.h>
53#include <sys/filedesc.h>
54#include <sys/sysctl.h>
55#include <sys/vnode.h>
56#include <sys/proc.h>
fad57d0e 57#include <sys/nlookup.h>
39f91578
MD
58#include <sys/file.h>
59#include <sys/stat.h>
60#include <sys/filio.h>
61#include <sys/fcntl.h>
62#include <sys/unistd.h>
63#include <sys/resourcevar.h>
64#include <sys/event.h>
65#include <sys/mman.h>
66
67#include <vm/vm.h>
68#include <vm/vm_param.h>
69#include <sys/lock.h>
70#include <vm/pmap.h>
71#include <vm/vm_map.h>
72#include <vm/vm_object.h>
73#include <vm/vm_page.h>
74#include <vm/vm_pager.h>
75#include <vm/vm_pageout.h>
76#include <vm/vm_extern.h>
77#include <vm/vm_page.h>
78#include <vm/vm_kern.h>
79
80#include <sys/file2.h>
81#include <machine/limits.h>
82
83typedef struct file *file_t;
84
85/*
86 * fp_open:
87 *
88 * Open a file as specified. Use O_* flags for flags.
89 *
a235f7bb 90 * NOTE! O_ROOTCRED not quite working yet, vn_open() asserts that the
4a8a7971
MD
91 * cred must match the process's cred. XXX
92 *
93 * NOTE! when fp_open() is called from a pure thread, root creds are
94 * used.
39f91578
MD
95 */
96int
97fp_open(const char *path, int flags, int mode, file_t *fpp)
98{
fad57d0e 99 struct nlookupdata nd;
39f91578
MD
100 struct thread *td;
101 struct file *fp;
102 int error;
103
104 if ((error = falloc(NULL, fpp, NULL)) != 0)
105 return (error);
106 fp = *fpp;
107 td = curthread;
4a8a7971 108 if (td->td_proc) {
fad57d0e 109 if ((flags & O_ROOTCRED) == 0)
4a8a7971 110 fsetcred(fp, td->td_proc->p_ucred);
4a8a7971 111 }
fad57d0e 112 error = nlookup_init(&nd, path, UIO_SYSSPACE, NLC_LOCKVP);
39f91578 113 flags = FFLAGS(flags);
fad57d0e
MD
114 if (error == 0)
115 error = vn_open(&nd, fp, flags, mode);
116 nlookup_done(&nd);
117 if (error) {
39f91578 118 fdrop(fp, td);
a235f7bb 119 *fpp = NULL;
39f91578
MD
120 }
121 return(error);
122}
123
351b7b6d
MD
124
125/*
fad57d0e
MD
126 * fp_vpopen(): convert a vnode to a file pointer, call VOP_OPEN() on the
127 * the vnode. The vnode must be refd and locked.
128 *
129 * On success the vnode's ref is inherited by the file pointer and the caller
130 * should not vrele() it, and the vnode is unlocked.
131 *
132 * On failure the vnode remains locked and refd and the caller is responsible
133 * for vput()ing it.
351b7b6d
MD
134 */
135int
136fp_vpopen(struct vnode *vp, int flags, file_t *fpp)
137{
138 struct thread *td;
139 struct file *fp;
140 int vmode;
141 int error;
142
351b7b6d
MD
143 td = curthread;
144
145 /*
146 * Vnode checks (from vn_open())
147 */
148 if (vp->v_type == VLNK) {
149 error = EMLINK;
fad57d0e 150 goto bad2;
351b7b6d
MD
151 }
152 if (vp->v_type == VSOCK) {
153 error = EOPNOTSUPP;
fad57d0e 154 goto bad2;
351b7b6d
MD
155 }
156 flags = FFLAGS(flags);
157 vmode = 0;
158 if (flags & (FWRITE | O_TRUNC)) {
159 if (vp->v_type == VDIR) {
160 error = EISDIR;
fad57d0e 161 goto bad2;
351b7b6d
MD
162 }
163 error = vn_writechk(vp);
164 if (error)
fad57d0e 165 goto bad2;
351b7b6d
MD
166 vmode |= VWRITE;
167 }
168 if (flags & FREAD)
169 vmode |= VREAD;
170 if (vmode) {
171 error = VOP_ACCESS(vp, vmode, td->td_proc->p_ucred, td);
172 if (error)
fad57d0e 173 goto bad2;
351b7b6d
MD
174 }
175
176 /*
177 * File pointer setup
178 */
179 if ((error = falloc(NULL, fpp, NULL)) != 0)
fad57d0e 180 goto bad2;
351b7b6d
MD
181 fp = *fpp;
182 if ((flags & O_ROOTCRED) == 0 && td->td_proc)
183 fsetcred(fp, td->td_proc->p_ucred);
fbb4eeab 184 fp->f_type = DTYPE_VNODE;
351b7b6d 185 fp->f_flag = flags;
fad57d0e 186 fp->f_ops = &vnode_fileops;
fbb4eeab 187 fp->f_data = vp;
351b7b6d 188
fad57d0e
MD
189 error = VOP_OPEN(vp, flags, td->td_proc->p_ucred, fp, td);
190 if (error)
191 goto bad1;
192
fad57d0e
MD
193 /*
194 * All done, update v_writecount now that no more errors can occur.
351b7b6d 195 */
351b7b6d
MD
196 if (flags & FWRITE)
197 vp->v_writecount++;
5fd012e0 198 VOP_UNLOCK(vp, 0, td);
fad57d0e
MD
199 return (0);
200bad1:
201 fp->f_ops = &badfileops; /* open failed, don't close */
202 fp->f_data = NULL;
203 fdrop(fp, td);
204 /* leave the vnode intact, but fall through and unlock it anyway */
205bad2:
206 *fpp = NULL;
351b7b6d
MD
207 return (error);
208}
209
731100e5
MD
210/*
211 * fp_*read() is meant to operate like the normal descriptor based syscalls
212 * would. Note that if 'buf' points to user memory a UIO_USERSPACE
213 * transfer will be used.
214 */
39f91578 215int
731100e5 216fp_pread(file_t fp, void *buf, size_t nbytes, off_t offset, ssize_t *res)
39f91578
MD
217{
218 struct uio auio;
219 struct iovec aiov;
220 size_t count;
221 int error;
222
223 if (res)
224 *res = 0;
225 if (nbytes > INT_MAX)
226 return (EINVAL);
227 bzero(&auio, sizeof(auio));
228 aiov.iov_base = (caddr_t)buf;
229 aiov.iov_len = nbytes;
230 auio.uio_iov = &aiov;
231 auio.uio_iovcnt = 1;
232 auio.uio_offset = offset;
233 auio.uio_resid = nbytes;
234 auio.uio_rw = UIO_READ;
731100e5
MD
235 if ((vm_offset_t)buf < VM_MAXUSER_ADDRESS)
236 auio.uio_segflg = UIO_USERSPACE;
237 else
238 auio.uio_segflg = UIO_SYSSPACE;
39f91578
MD
239 auio.uio_td = curthread;
240
241 count = nbytes;
242 error = fo_read(fp, &auio, fp->f_cred, FOF_OFFSET, auio.uio_td);
243 if (error) {
244 if (auio.uio_resid != nbytes && (error == ERESTART || error == EINTR ||
245 error == EWOULDBLOCK)
246 ) {
247 error = 0;
248 }
249 }
250 count -= auio.uio_resid;
251 if (res)
252 *res = count;
253 return(error);
254}
255
256int
ba9c8ec1 257fp_read(file_t fp, void *buf, size_t nbytes, ssize_t *res, int all)
731100e5
MD
258{
259 struct uio auio;
260 struct iovec aiov;
731100e5 261 int error;
ba9c8ec1 262 int lastresid;
731100e5
MD
263
264 if (res)
265 *res = 0;
266 if (nbytes > INT_MAX)
267 return (EINVAL);
268 bzero(&auio, sizeof(auio));
269 aiov.iov_base = (caddr_t)buf;
270 aiov.iov_len = nbytes;
271 auio.uio_iov = &aiov;
272 auio.uio_iovcnt = 1;
273 auio.uio_offset = 0;
274 auio.uio_resid = nbytes;
275 auio.uio_rw = UIO_READ;
276 if ((vm_offset_t)buf < VM_MAXUSER_ADDRESS)
277 auio.uio_segflg = UIO_USERSPACE;
278 else
279 auio.uio_segflg = UIO_SYSSPACE;
280 auio.uio_td = curthread;
281
ba9c8ec1
MD
282 /*
283 * If all is false call fo_read() once.
284 * If all is true we attempt to read the entire request. We have to
285 * break out of the loop if an unrecoverable error or EOF occurs.
286 */
287 do {
288 lastresid = auio.uio_resid;
289 error = fo_read(fp, &auio, fp->f_cred, 0, auio.uio_td);
290 } while (all && auio.uio_resid &&
291 ((error == 0 && auio.uio_resid != lastresid) ||
292 error == ERESTART || error == EINTR));
293 if (all && error == 0 && auio.uio_resid)
294 error = ESPIPE;
295
296 /*
297 * If an error occured but some data was read, silently forget the
298 * error. However, if this is a non-blocking descriptor and 'all'
299 * was specified, return an error even if some data was read (this
300 * is considered a bug in the caller for using an illegal combination
301 * of 'all' and a non-blocking descriptor).
302 */
731100e5 303 if (error) {
ba9c8ec1
MD
304 if (auio.uio_resid != nbytes) {
305 if (error == ERESTART || error == EINTR)
306 error = 0;
307 if (error == EWOULDBLOCK && all == 0)
308 error = 0;
731100e5
MD
309 }
310 }
731100e5 311 if (res)
ba9c8ec1 312 *res = nbytes - auio.uio_resid;
731100e5
MD
313 return(error);
314}
315
316int
317fp_pwrite(file_t fp, void *buf, size_t nbytes, off_t offset, ssize_t *res)
39f91578
MD
318{
319 struct uio auio;
320 struct iovec aiov;
321 size_t count;
322 int error;
323
324 if (res)
325 *res = 0;
326 if (nbytes > INT_MAX)
327 return (EINVAL);
328 bzero(&auio, sizeof(auio));
329 aiov.iov_base = (caddr_t)buf;
330 aiov.iov_len = nbytes;
331 auio.uio_iov = &aiov;
332 auio.uio_iovcnt = 1;
333 auio.uio_offset = offset;
334 auio.uio_resid = nbytes;
335 auio.uio_rw = UIO_WRITE;
731100e5
MD
336 if ((vm_offset_t)buf < VM_MAXUSER_ADDRESS)
337 auio.uio_segflg = UIO_USERSPACE;
338 else
339 auio.uio_segflg = UIO_SYSSPACE;
39f91578
MD
340 auio.uio_td = curthread;
341
342 count = nbytes;
343 error = fo_write(fp, &auio, fp->f_cred, FOF_OFFSET, auio.uio_td);
344 if (error) {
345 if (auio.uio_resid != nbytes && (error == ERESTART || error == EINTR ||
346 error == EWOULDBLOCK)
347 ) {
348 error = 0;
349 }
350 }
351 count -= auio.uio_resid;
352 if (res)
353 *res = count;
354 return(error);
355}
356
731100e5
MD
357
358int
359fp_write(file_t fp, void *buf, size_t nbytes, ssize_t *res)
360{
361 struct uio auio;
362 struct iovec aiov;
363 size_t count;
364 int error;
365
366 if (res)
367 *res = 0;
368 if (nbytes > INT_MAX)
369 return (EINVAL);
370 bzero(&auio, sizeof(auio));
371 aiov.iov_base = (caddr_t)buf;
372 aiov.iov_len = nbytes;
373 auio.uio_iov = &aiov;
374 auio.uio_iovcnt = 1;
375 auio.uio_offset = 0;
376 auio.uio_resid = nbytes;
377 auio.uio_rw = UIO_WRITE;
378 if ((vm_offset_t)buf < VM_MAXUSER_ADDRESS)
379 auio.uio_segflg = UIO_USERSPACE;
380 else
381 auio.uio_segflg = UIO_SYSSPACE;
382 auio.uio_td = curthread;
383
384 count = nbytes;
385 error = fo_write(fp, &auio, fp->f_cred, 0, auio.uio_td);
386 if (error) {
387 if (auio.uio_resid != nbytes && (error == ERESTART || error == EINTR ||
388 error == EWOULDBLOCK)
389 ) {
390 error = 0;
391 }
392 }
393 count -= auio.uio_resid;
394 if (res)
395 *res = count;
396 return(error);
397}
398
39f91578
MD
399int
400fp_stat(file_t fp, struct stat *ub)
401{
402 int error;
403
404 error = fo_stat(fp, ub, curthread);
405 return(error);
406}
407
408/*
409 * non-anonymous, non-stack descriptor mappings only!
410 *
411 * This routine mostly snarfed from vm/vm_mmap.c
412 */
413int
414fp_mmap(void *addr_arg, size_t size, int prot, int flags, struct file *fp,
415 off_t pos, void **resp)
416{
417 struct thread *td = curthread;
418 struct proc *p = td->td_proc;
419 vm_size_t pageoff;
420 vm_prot_t maxprot;
421 vm_offset_t addr;
422 void *handle;
423 int error;
424 vm_object_t obj;
425 struct vmspace *vms = p->p_vmspace;
426 struct vnode *vp;
427 int disablexworkaround;
428
429 prot &= VM_PROT_ALL;
430
431 if ((ssize_t)size < 0 || (flags & MAP_ANON))
432 return(EINVAL);
433
434 pageoff = (pos & PAGE_MASK);
435 pos -= pageoff;
436
437 /* Adjust size for rounding (on both ends). */
438 size += pageoff; /* low end... */
439 size = (vm_size_t)round_page(size); /* hi end */
440 addr = (vm_offset_t)addr_arg;
441
442 /*
443 * Check for illegal addresses. Watch out for address wrap... Note
444 * that VM_*_ADDRESS are not constants due to casts (argh).
445 */
446 if (flags & MAP_FIXED) {
447 /*
448 * The specified address must have the same remainder
449 * as the file offset taken modulo PAGE_SIZE, so it
450 * should be aligned after adjustment by pageoff.
451 */
452 addr -= pageoff;
453 if (addr & PAGE_MASK)
454 return (EINVAL);
455 /* Address range must be all in user VM space. */
456 if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS)
457 return (EINVAL);
458#ifndef i386
459 if (VM_MIN_ADDRESS > 0 && addr < VM_MIN_ADDRESS)
460 return (EINVAL);
461#endif
462 if (addr + size < addr)
463 return (EINVAL);
464 } else if (addr == 0 ||
465 (addr >= round_page((vm_offset_t)vms->vm_taddr) &&
466 addr < round_page((vm_offset_t)vms->vm_daddr + maxdsiz))
467 ) {
468 /*
469 * XXX for non-fixed mappings where no hint is provided or
470 * the hint would fall in the potential heap space,
471 * place it after the end of the largest possible heap.
472 *
473 * There should really be a pmap call to determine a reasonable
474 * location.
475 */
476 addr = round_page((vm_offset_t)vms->vm_daddr + maxdsiz);
477 }
478
479 /*
480 * Mapping file, get fp for validation. Obtain vnode and make
481 * sure it is of appropriate type.
482 */
483 if (fp->f_type != DTYPE_VNODE)
484 return (EINVAL);
485
486 /*
487 * POSIX shared-memory objects are defined to have
488 * kernel persistence, and are not defined to support
489 * read(2)/write(2) -- or even open(2). Thus, we can
490 * use MAP_ASYNC to trade on-disk coherence for speed.
491 * The shm_open(3) library routine turns on the FPOSIXSHM
492 * flag to request this behavior.
493 */
494 if (fp->f_flag & FPOSIXSHM)
495 flags |= MAP_NOSYNC;
496 vp = (struct vnode *) fp->f_data;
497 if (vp->v_type != VREG && vp->v_type != VCHR)
498 return (EINVAL);
499
500 /*
501 * Get the proper underlying object
502 */
503 if (vp->v_type == VREG) {
7540ab49 504 if ((obj = vp->v_object) == NULL)
39f91578 505 return (EINVAL);
7540ab49 506 KKASSERT(vp == (struct vnode *)obj->handle);
39f91578
MD
507 }
508
509 /*
510 * XXX hack to handle use of /dev/zero to map anon memory (ala
511 * SunOS).
512 */
513 if (vp->v_type == VCHR && iszerodev(vp->v_rdev)) {
514 handle = NULL;
515 maxprot = VM_PROT_ALL;
516 flags |= MAP_ANON;
517 pos = 0;
518 } else {
519 /*
520 * cdevs does not provide private mappings of any kind.
521 */
522 /*
523 * However, for XIG X server to continue to work,
524 * we should allow the superuser to do it anyway.
525 * We only allow it at securelevel < 1.
526 * (Because the XIG X server writes directly to video
527 * memory via /dev/mem, it should never work at any
528 * other securelevel.
529 * XXX this will have to go
530 */
531 if (securelevel >= 1)
532 disablexworkaround = 1;
533 else
534 disablexworkaround = suser(td);
535 if (vp->v_type == VCHR && disablexworkaround &&
536 (flags & (MAP_PRIVATE|MAP_COPY))) {
537 error = EINVAL;
538 goto done;
539 }
540 /*
541 * Ensure that file and memory protections are
542 * compatible. Note that we only worry about
543 * writability if mapping is shared; in this case,
544 * current and max prot are dictated by the open file.
545 * XXX use the vnode instead? Problem is: what
546 * credentials do we use for determination? What if
547 * proc does a setuid?
548 */
549 maxprot = VM_PROT_EXECUTE; /* ??? */
550 if (fp->f_flag & FREAD) {
551 maxprot |= VM_PROT_READ;
552 } else if (prot & PROT_READ) {
553 error = EACCES;
554 goto done;
555 }
556 /*
557 * If we are sharing potential changes (either via
558 * MAP_SHARED or via the implicit sharing of character
559 * device mappings), and we are trying to get write
560 * permission although we opened it without asking
561 * for it, bail out. Check for superuser, only if
562 * we're at securelevel < 1, to allow the XIG X server
563 * to continue to work.
564 */
565
566 if ((flags & MAP_SHARED) != 0 ||
567 (vp->v_type == VCHR && disablexworkaround)
568 ) {
569 if ((fp->f_flag & FWRITE) != 0) {
570 struct vattr va;
571 if ((error = VOP_GETATTR(vp, &va, td))) {
572 goto done;
573 }
574 if ((va.va_flags & (IMMUTABLE|APPEND)) == 0) {
575 maxprot |= VM_PROT_WRITE;
576 } else if (prot & PROT_WRITE) {
577 error = EPERM;
578 goto done;
579 }
580 } else if ((prot & PROT_WRITE) != 0) {
581 error = EACCES;
582 goto done;
583 }
584 } else {
585 maxprot |= VM_PROT_WRITE;
586 }
587 handle = (void *)vp;
588 }
589 error = vm_mmap(&vms->vm_map, &addr, size, prot,
590 maxprot, flags, handle, pos);
591 if (error == 0 && addr_arg)
592 *resp = (void *)addr;
593done:
594 return (error);
595}
596
597int
598fp_close(file_t fp)
599{
600 return(fdrop(fp, curthread));
601}
602
004d2de5
MD
603int
604fp_shutdown(file_t fp, int how)
605{
606 return(fo_shutdown(fp, how, curthread));
607}
608