Register keyword removal
[dragonfly.git] / sys / kern / vfs_vnops.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
39 * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
40 * $DragonFly: src/sys/kern/vfs_vnops.c,v 1.10 2003/07/26 19:42:11 rob Exp $
41 */
42
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/fcntl.h>
46#include <sys/file.h>
47#include <sys/stat.h>
48#include <sys/proc.h>
49#include <sys/mount.h>
50#include <sys/namei.h>
51#include <sys/vnode.h>
52#include <sys/buf.h>
53#include <sys/filio.h>
54#include <sys/ttycom.h>
55#include <sys/conf.h>
56#include <sys/syslog.h>
57
58static int vn_closefile __P((struct file *fp, struct thread *td));
59static int vn_ioctl __P((struct file *fp, u_long com, caddr_t data,
60 struct thread *td));
61static int vn_read __P((struct file *fp, struct uio *uio,
62 struct ucred *cred, int flags, struct thread *td));
63static int vn_poll __P((struct file *fp, int events, struct ucred *cred,
64 struct thread *td));
65static int vn_kqfilter __P((struct file *fp, struct knote *kn));
66static int vn_statfile __P((struct file *fp, struct stat *sb, struct thread *td));
67static int vn_write __P((struct file *fp, struct uio *uio,
68 struct ucred *cred, int flags, struct thread *td));
69
70struct fileops vnops = {
71 vn_read, vn_write, vn_ioctl, vn_poll, vn_kqfilter,
72 vn_statfile, vn_closefile
73};
74
75/*
76 * Common code for vnode open operations.
77 * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
78 *
79 * Note that this does NOT free nameidata for the successful case,
80 * due to the NDINIT being done elsewhere.
81 */
82int
83vn_open(ndp, fmode, cmode)
84 struct nameidata *ndp;
85 int fmode, cmode;
86{
87 struct vnode *vp;
88 struct thread *td = ndp->ni_cnd.cn_td;
89 struct ucred *cred = ndp->ni_cnd.cn_cred;
90 struct vattr vat;
91 struct vattr *vap = &vat;
92 int mode, error;
93
94 KKASSERT(cred == td->td_proc->p_ucred);
95
96 if (fmode & O_CREAT) {
97 ndp->ni_cnd.cn_nameiop = CREATE;
98 ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF;
99 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
100 ndp->ni_cnd.cn_flags |= FOLLOW;
101 bwillwrite();
102 error = namei(ndp);
103 if (error)
104 return (error);
105 if (ndp->ni_vp == NULL) {
106 VATTR_NULL(vap);
107 vap->va_type = VREG;
108 vap->va_mode = cmode;
109 if (fmode & O_EXCL)
110 vap->va_vaflags |= VA_EXCLUSIVE;
111 VOP_LEASE(ndp->ni_dvp, td, cred, LEASE_WRITE);
112 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp,
113 &ndp->ni_cnd, vap);
114 if (error) {
115 NDFREE(ndp, NDF_ONLY_PNBUF);
116 vput(ndp->ni_dvp);
117 return (error);
118 }
119 vput(ndp->ni_dvp);
120 ASSERT_VOP_UNLOCKED(ndp->ni_dvp, "create");
121 ASSERT_VOP_LOCKED(ndp->ni_vp, "create");
122 fmode &= ~O_TRUNC;
123 vp = ndp->ni_vp;
124 } else {
125 if (ndp->ni_dvp == ndp->ni_vp)
126 vrele(ndp->ni_dvp);
127 else
128 vput(ndp->ni_dvp);
129 ndp->ni_dvp = NULL;
130 vp = ndp->ni_vp;
131 if (fmode & O_EXCL) {
132 error = EEXIST;
133 goto bad;
134 }
135 fmode &= ~O_CREAT;
136 }
137 } else {
138 ndp->ni_cnd.cn_nameiop = LOOKUP;
139 ndp->ni_cnd.cn_flags =
140 ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF;
141 error = namei(ndp);
142 if (error)
143 return (error);
144 vp = ndp->ni_vp;
145 }
146 if (vp->v_type == VLNK) {
147 error = EMLINK;
148 goto bad;
149 }
150 if (vp->v_type == VSOCK) {
151 error = EOPNOTSUPP;
152 goto bad;
153 }
154 if ((fmode & O_CREAT) == 0) {
155 mode = 0;
156 if (fmode & (FWRITE | O_TRUNC)) {
157 if (vp->v_type == VDIR) {
158 error = EISDIR;
159 goto bad;
160 }
161 error = vn_writechk(vp);
162 if (error)
163 goto bad;
164 mode |= VWRITE;
165 }
166 if (fmode & FREAD)
167 mode |= VREAD;
168 if (mode) {
169 error = VOP_ACCESS(vp, mode, cred, td);
170 if (error)
171 goto bad;
172 }
173 }
174 if (fmode & O_TRUNC) {
175 VOP_UNLOCK(vp, 0, td); /* XXX */
176 VOP_LEASE(vp, td, cred, LEASE_WRITE);
177 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); /* XXX */
178 VATTR_NULL(vap);
179 vap->va_size = 0;
180 error = VOP_SETATTR(vp, vap, cred, td);
181 if (error)
182 goto bad;
183 }
184 error = VOP_OPEN(vp, fmode, cred, td);
185 if (error)
186 goto bad;
187 /*
188 * Make sure that a VM object is created for VMIO support.
189 */
190 if (vn_canvmio(vp) == TRUE) {
191 if ((error = vfs_object_create(vp, td)) != 0)
192 goto bad;
193 }
194
195 if (fmode & FWRITE)
196 vp->v_writecount++;
197 return (0);
198bad:
199 NDFREE(ndp, NDF_ONLY_PNBUF);
200 vput(vp);
201 return (error);
202}
203
204/*
205 * Check for write permissions on the specified vnode.
206 * Prototype text segments cannot be written.
207 */
208int
209vn_writechk(vp)
210 struct vnode *vp;
211{
212
213 /*
214 * If there's shared text associated with
215 * the vnode, try to free it up once. If
216 * we fail, we can't allow writing.
217 */
218 if (vp->v_flag & VTEXT)
219 return (ETXTBSY);
220 return (0);
221}
222
223/*
224 * Vnode close call
225 */
226int
227vn_close(struct vnode *vp, int flags, struct thread *td)
228{
229 int error;
230
231 if (flags & FWRITE)
232 vp->v_writecount--;
233 error = VOP_CLOSE(vp, flags, td);
234 vrele(vp);
235 return (error);
236}
237
238static __inline
239int
240sequential_heuristic(struct uio *uio, struct file *fp)
241{
242 /*
243 * Sequential heuristic - detect sequential operation
244 */
245 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
246 uio->uio_offset == fp->f_nextoff) {
247 int tmpseq = fp->f_seqcount;
248 /*
249 * XXX we assume that the filesystem block size is
250 * the default. Not true, but still gives us a pretty
251 * good indicator of how sequential the read operations
252 * are.
253 */
254 tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
255 if (tmpseq > IO_SEQMAX)
256 tmpseq = IO_SEQMAX;
257 fp->f_seqcount = tmpseq;
258 return(fp->f_seqcount << IO_SEQSHIFT);
259 }
260
261 /*
262 * Not sequential, quick draw-down of seqcount
263 */
264 if (fp->f_seqcount > 1)
265 fp->f_seqcount = 1;
266 else
267 fp->f_seqcount = 0;
268 return(0);
269}
270
271/*
272 * Package up an I/O request on a vnode into a uio and do it.
273 */
274int
275vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, td)
276 enum uio_rw rw;
277 struct vnode *vp;
278 caddr_t base;
279 int len;
280 off_t offset;
281 enum uio_seg segflg;
282 int ioflg;
283 struct ucred *cred;
284 int *aresid;
285 struct thread *td;
286{
287 struct uio auio;
288 struct iovec aiov;
289 int error;
290
291 if ((ioflg & IO_NODELOCKED) == 0)
292 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
293 auio.uio_iov = &aiov;
294 auio.uio_iovcnt = 1;
295 aiov.iov_base = base;
296 aiov.iov_len = len;
297 auio.uio_resid = len;
298 auio.uio_offset = offset;
299 auio.uio_segflg = segflg;
300 auio.uio_rw = rw;
301 auio.uio_td = td;
302 if (rw == UIO_READ) {
303 error = VOP_READ(vp, &auio, ioflg, cred);
304 } else {
305 error = VOP_WRITE(vp, &auio, ioflg, cred);
306 }
307 if (aresid)
308 *aresid = auio.uio_resid;
309 else
310 if (auio.uio_resid && error == 0)
311 error = EIO;
312 if ((ioflg & IO_NODELOCKED) == 0)
313 VOP_UNLOCK(vp, 0, td);
314 return (error);
315}
316
317/*
318 * Package up an I/O request on a vnode into a uio and do it. The I/O
319 * request is split up into smaller chunks and we try to avoid saturating
320 * the buffer cache while potentially holding a vnode locked, so we
321 * check bwillwrite() before calling vn_rdwr(). We also call uio_yield()
322 * to give other processes a chance to lock the vnode (either other processes
323 * core'ing the same binary, or unrelated processes scanning the directory).
324 */
325int
326vn_rdwr_inchunks(rw, vp, base, len, offset, segflg, ioflg, cred, aresid, td)
327 enum uio_rw rw;
328 struct vnode *vp;
329 caddr_t base;
330 int len;
331 off_t offset;
332 enum uio_seg segflg;
333 int ioflg;
334 struct ucred *cred;
335 int *aresid;
336 struct thread *td;
337{
338 int error = 0;
339
340 do {
341 int chunk = (len > MAXBSIZE) ? MAXBSIZE : len;
342
343 if (rw != UIO_READ && vp->v_type == VREG)
344 bwillwrite();
345 error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
346 ioflg, cred, aresid, td);
347 len -= chunk; /* aresid calc already includes length */
348 if (error)
349 break;
350 offset += chunk;
351 base += chunk;
352 uio_yield();
353 } while (len);
354 if (aresid)
355 *aresid += len;
356 return (error);
357}
358
359/*
360 * File table vnode read routine.
361 */
362static int
363vn_read(fp, uio, cred, flags, td)
364 struct file *fp;
365 struct uio *uio;
366 struct ucred *cred;
367 struct thread *td;
368 int flags;
369{
370 struct vnode *vp;
371 int error, ioflag;
372
373 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", uio->uio_td, td));
374 vp = (struct vnode *)fp->f_data;
375 ioflag = 0;
376 if (fp->f_flag & FNONBLOCK)
377 ioflag |= IO_NDELAY;
378 if (fp->f_flag & O_DIRECT)
379 ioflag |= IO_DIRECT;
380 VOP_LEASE(vp, td, cred, LEASE_READ);
381 vn_lock(vp, LK_SHARED | LK_NOPAUSE | LK_RETRY, td);
382 if ((flags & FOF_OFFSET) == 0)
383 uio->uio_offset = fp->f_offset;
384
385 ioflag |= sequential_heuristic(uio, fp);
386
387 error = VOP_READ(vp, uio, ioflag, cred);
388 if ((flags & FOF_OFFSET) == 0)
389 fp->f_offset = uio->uio_offset;
390 fp->f_nextoff = uio->uio_offset;
391 VOP_UNLOCK(vp, 0, td);
392 return (error);
393}
394
395/*
396 * File table vnode write routine.
397 */
398static int
399vn_write(fp, uio, cred, flags, td)
400 struct file *fp;
401 struct uio *uio;
402 struct ucred *cred;
403 struct thread *td;
404 int flags;
405{
406 struct vnode *vp;
407 int error, ioflag;
408
409 KASSERT(uio->uio_td == td, ("uio_procp %p is not p %p",
410 uio->uio_td, td));
411 vp = (struct vnode *)fp->f_data;
412 if (vp->v_type == VREG)
413 bwillwrite();
414 vp = (struct vnode *)fp->f_data; /* XXX needed? */
415 ioflag = IO_UNIT;
416 if (vp->v_type == VREG && (fp->f_flag & O_APPEND))
417 ioflag |= IO_APPEND;
418 if (fp->f_flag & FNONBLOCK)
419 ioflag |= IO_NDELAY;
420 if (fp->f_flag & O_DIRECT)
421 ioflag |= IO_DIRECT;
422 if ((fp->f_flag & O_FSYNC) ||
423 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
424 ioflag |= IO_SYNC;
425 VOP_LEASE(vp, td, cred, LEASE_WRITE);
426 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
427 if ((flags & FOF_OFFSET) == 0)
428 uio->uio_offset = fp->f_offset;
429 ioflag |= sequential_heuristic(uio, fp);
430 error = VOP_WRITE(vp, uio, ioflag, cred);
431 if ((flags & FOF_OFFSET) == 0)
432 fp->f_offset = uio->uio_offset;
433 fp->f_nextoff = uio->uio_offset;
434 VOP_UNLOCK(vp, 0, td);
435 return (error);
436}
437
438/*
439 * File table vnode stat routine.
440 */
441static int
442vn_statfile(struct file *fp, struct stat *sb, struct thread *td)
443{
444 struct vnode *vp = (struct vnode *)fp->f_data;
445
446 return vn_stat(vp, sb, td);
447}
448
449int
450vn_stat(struct vnode *vp, struct stat *sb, struct thread *td)
451{
452 struct vattr vattr;
453 struct vattr *vap;
454 int error;
455 u_short mode;
456
457 vap = &vattr;
458 error = VOP_GETATTR(vp, vap, td);
459 if (error)
460 return (error);
461
462 /*
463 * Zero the spare stat fields
464 */
465 sb->st_lspare = 0;
466 sb->st_qspare[0] = 0;
467 sb->st_qspare[1] = 0;
468
469 /*
470 * Copy from vattr table
471 */
472 if (vap->va_fsid != VNOVAL)
473 sb->st_dev = vap->va_fsid;
474 else
475 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
476 sb->st_ino = vap->va_fileid;
477 mode = vap->va_mode;
478 switch (vap->va_type) {
479 case VREG:
480 mode |= S_IFREG;
481 break;
482 case VDIR:
483 mode |= S_IFDIR;
484 break;
485 case VBLK:
486 mode |= S_IFBLK;
487 break;
488 case VCHR:
489 mode |= S_IFCHR;
490 break;
491 case VLNK:
492 mode |= S_IFLNK;
493 /* This is a cosmetic change, symlinks do not have a mode. */
494 if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
495 sb->st_mode &= ~ACCESSPERMS; /* 0000 */
496 else
497 sb->st_mode |= ACCESSPERMS; /* 0777 */
498 break;
499 case VSOCK:
500 mode |= S_IFSOCK;
501 break;
502 case VFIFO:
503 mode |= S_IFIFO;
504 break;
505 default:
506 return (EBADF);
507 };
508 sb->st_mode = mode;
509 sb->st_nlink = vap->va_nlink;
510 sb->st_uid = vap->va_uid;
511 sb->st_gid = vap->va_gid;
512 sb->st_rdev = vap->va_rdev;
513 sb->st_size = vap->va_size;
514 sb->st_atimespec = vap->va_atime;
515 sb->st_mtimespec = vap->va_mtime;
516 sb->st_ctimespec = vap->va_ctime;
517
518 /*
519 * According to www.opengroup.org, the meaning of st_blksize is
520 * "a filesystem-specific preferred I/O block size for this
521 * object. In some filesystem types, this may vary from file
522 * to file"
523 * Default to PAGE_SIZE after much discussion.
524 */
525
526 if (vap->va_type == VREG) {
527 sb->st_blksize = vap->va_blocksize;
528 } else if (vn_isdisk(vp, NULL)) {
529 sb->st_blksize = vp->v_rdev->si_bsize_best;
530 if (sb->st_blksize < vp->v_rdev->si_bsize_phys)
531 sb->st_blksize = vp->v_rdev->si_bsize_phys;
532 if (sb->st_blksize < BLKDEV_IOSIZE)
533 sb->st_blksize = BLKDEV_IOSIZE;
534 } else {
535 sb->st_blksize = PAGE_SIZE;
536 }
537
538 sb->st_flags = vap->va_flags;
539 if (suser(td))
540 sb->st_gen = 0;
541 else
542 sb->st_gen = vap->va_gen;
543
544#if (S_BLKSIZE == 512)
545 /* Optimize this case */
546 sb->st_blocks = vap->va_bytes >> 9;
547#else
548 sb->st_blocks = vap->va_bytes / S_BLKSIZE;
549#endif
550 return (0);
551}
552
553/*
554 * File table vnode ioctl routine.
555 */
556static int
557vn_ioctl(struct file *fp, u_long com, caddr_t data, struct thread *td)
558{
559 struct vnode *vp = ((struct vnode *)fp->f_data);
560 struct ucred *ucred;
561 struct vattr vattr;
562 int error;
563
564 KKASSERT(td->td_proc != NULL);
565 ucred = td->td_proc->p_ucred;
566
567 switch (vp->v_type) {
568 case VREG:
569 case VDIR:
570 if (com == FIONREAD) {
571 error = VOP_GETATTR(vp, &vattr, td);
572 if (error)
573 return (error);
574 *(int *)data = vattr.va_size - fp->f_offset;
575 return (0);
576 }
577 if (com == FIONBIO || com == FIOASYNC) /* XXX */
578 return (0); /* XXX */
579 /* fall into ... */
580 default:
581#if 0
582 return (ENOTTY);
583#endif
584 case VFIFO:
585 case VCHR:
586 case VBLK:
587 if (com == FIODTYPE) {
588 if (vp->v_type != VCHR && vp->v_type != VBLK)
589 return (ENOTTY);
590 *(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
591 return (0);
592 }
593 error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred, td);
594 if (error == 0 && com == TIOCSCTTY) {
595 struct session *sess = td->td_proc->p_session;
596
597 /* Do nothing if reassigning same control tty */
598 if (sess->s_ttyvp == vp)
599 return (0);
600
601 /* Get rid of reference to old control tty */
602 if (sess->s_ttyvp)
603 vrele(sess->s_ttyvp);
604
605 sess->s_ttyvp = vp;
606 VREF(vp);
607 }
608 return (error);
609 }
610}
611
612/*
613 * File table vnode poll routine.
614 */
615static int
616vn_poll(struct file *fp, int events, struct ucred *cred, struct thread *td)
617{
618 return (VOP_POLL(((struct vnode *)fp->f_data), events, cred, td));
619}
620
621/*
622 * Check that the vnode is still valid, and if so
623 * acquire requested lock.
624 */
625int
626#ifndef DEBUG_LOCKS
627vn_lock(struct vnode *vp, int flags, struct thread *td)
628#else
629debug_vn_lock(struct vnode *vp, int flags, struct thread *td,
630 const char *filename, int line)
631#endif
632{
633 int error;
634
635 do {
636 if ((flags & LK_INTERLOCK) == 0)
637 lwkt_gettoken(&vp->v_interlock);
638 if ((vp->v_flag & VXLOCK) && vp->v_vxproc != curproc) {
639 vp->v_flag |= VXWANT;
640 lwkt_reltoken(&vp->v_interlock);
641 tsleep((caddr_t)vp, 0, "vn_lock", 0);
642 error = ENOENT;
643 } else {
644#if 0
645 /* this can now occur in normal operation */
646 if (vp->v_vxproc != NULL)
647 log(LOG_INFO, "VXLOCK interlock avoided in vn_lock\n");
648#endif
649#ifdef DEBUG_LOCKS
650 vp->filename = filename;
651 vp->line = line;
652#endif
653 error = VOP_LOCK(vp,
654 flags | LK_NOPAUSE | LK_INTERLOCK, td);
655 if (error == 0)
656 return (error);
657 }
658 flags &= ~LK_INTERLOCK;
659 } while (flags & LK_RETRY);
660 return (error);
661}
662
663/*
664 * File table vnode close routine.
665 */
666static int
667vn_closefile(struct file *fp, struct thread *td)
668{
669 int err;
670
671 fp->f_ops = &badfileops;
672 err = vn_close(((struct vnode *)fp->f_data), fp->f_flag, td);
673 return(err);
674}
675
676static int
677vn_kqfilter(struct file *fp, struct knote *kn)
678{
679
680 return (VOP_KQFILTER(((struct vnode *)fp->f_data), kn));
681}