Disallow writes to filesystems mounted read-only via NULLFS. In this case
[dragonfly.git] / sys / kern / vfs_vnops.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
39 * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
468bb1f9 40 * $DragonFly: src/sys/kern/vfs_vnops.c,v 1.47 2006/09/18 17:42:27 dillon Exp $
984263bc
MD
41 */
42
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/fcntl.h>
46#include <sys/file.h>
47#include <sys/stat.h>
48#include <sys/proc.h>
49#include <sys/mount.h>
fad57d0e 50#include <sys/nlookup.h>
984263bc
MD
51#include <sys/vnode.h>
52#include <sys/buf.h>
53#include <sys/filio.h>
54#include <sys/ttycom.h>
55#include <sys/conf.h>
56#include <sys/syslog.h>
57
468bb1f9
MD
58static int ncp_writechk(struct namecache *ncp);
59
87de5057
MD
60static int vn_closefile (struct file *fp);
61static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
62 struct ucred *cred);
402ed7e1 63static int vn_read (struct file *fp, struct uio *uio,
87de5057 64 struct ucred *cred, int flags);
fad57d0e 65static int svn_read (struct file *fp, struct uio *uio,
87de5057
MD
66 struct ucred *cred, int flags);
67static int vn_poll (struct file *fp, int events, struct ucred *cred);
402ed7e1 68static int vn_kqfilter (struct file *fp, struct knote *kn);
87de5057 69static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred);
402ed7e1 70static int vn_write (struct file *fp, struct uio *uio,
87de5057 71 struct ucred *cred, int flags);
fad57d0e 72static int svn_write (struct file *fp, struct uio *uio,
87de5057 73 struct ucred *cred, int flags);
984263bc 74
fad57d0e 75struct fileops vnode_fileops = {
b2d248cb
MD
76 .fo_read = vn_read,
77 .fo_write = vn_write,
78 .fo_ioctl = vn_ioctl,
79 .fo_poll = vn_poll,
80 .fo_kqfilter = vn_kqfilter,
81 .fo_stat = vn_statfile,
82 .fo_close = vn_closefile,
83 .fo_shutdown = nofo_shutdown
984263bc
MD
84};
85
fad57d0e 86struct fileops specvnode_fileops = {
b2d248cb
MD
87 .fo_read = svn_read,
88 .fo_write = svn_write,
89 .fo_ioctl = vn_ioctl,
90 .fo_poll = vn_poll,
91 .fo_kqfilter = vn_kqfilter,
92 .fo_stat = vn_statfile,
93 .fo_close = vn_closefile,
94 .fo_shutdown = nofo_shutdown
fad57d0e
MD
95};
96
97/*
98 * Shortcut the device read/write. This avoids a lot of vnode junk.
99 * Basically the specfs vnops for read and write take the locked vnode,
100 * unlock it (because we can't hold the vnode locked while reading or writing
101 * a device which may block indefinitely), issues the device operation, then
102 * relock the vnode before returning, plus other junk. This bypasses all
103 * of that and just does the device operation.
104 */
105void
106vn_setspecops(struct file *fp)
107{
108 if (vfs_fastdev && fp->f_ops == &vnode_fileops) {
109 fp->f_ops = &specvnode_fileops;
110 }
111}
112
984263bc 113/*
fad57d0e
MD
114 * Common code for vnode open operations. Check permissions, and call
115 * the VOP_NOPEN or VOP_NCREATE routine.
116 *
117 * The caller is responsible for setting up nd with nlookup_init() and
118 * for cleaning it up with nlookup_done(), whether we return an error
119 * or not.
120 *
121 * On success nd->nl_open_vp will hold a referenced and, if requested,
122 * locked vnode. A locked vnode is requested via NLC_LOCKVP. If fp
123 * is non-NULL the vnode will be installed in the file pointer.
124 *
125 * NOTE: The vnode is referenced just once on return whether or not it
126 * is also installed in the file pointer.
984263bc
MD
127 */
128int
fad57d0e 129vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode)
984263bc 130{
1fd87d54 131 struct vnode *vp;
fad57d0e 132 struct ucred *cred = nd->nl_cred;
984263bc
MD
133 struct vattr vat;
134 struct vattr *vap = &vat;
fad57d0e 135 struct namecache *ncp;
984263bc
MD
136 int mode, error;
137
fad57d0e
MD
138 /*
139 * Lookup the path and create or obtain the vnode. After a
140 * successful lookup a locked nd->nl_ncp will be returned.
141 *
142 * The result of this section should be a locked vnode.
143 *
144 * XXX with only a little work we should be able to avoid locking
145 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
146 */
984263bc 147 if (fmode & O_CREAT) {
fad57d0e
MD
148 /*
149 * CONDITIONAL CREATE FILE CASE
150 *
151 * Setting NLC_CREATE causes a negative hit to store
152 * the negative hit ncp and not return an error. Then
153 * nc_error or nc_vp may be checked to see if the ncp
154 * represents a negative hit. NLC_CREATE also requires
155 * write permission on the governing directory or EPERM
156 * is returned.
157 */
984263bc 158 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
fad57d0e
MD
159 nd->nl_flags |= NLC_FOLLOW;
160 nd->nl_flags |= NLC_CREATE;
984263bc 161 bwillwrite();
fad57d0e 162 error = nlookup(nd);
806dcf9a
MD
163 } else {
164 /*
165 * NORMAL OPEN FILE CASE
166 */
167 error = nlookup(nd);
168 }
fad57d0e 169
806dcf9a
MD
170 if (error)
171 return (error);
172 ncp = nd->nl_ncp;
fad57d0e 173
806dcf9a
MD
174 /*
175 * split case to allow us to re-resolve and retry the ncp in case
176 * we get ESTALE.
177 */
178again:
179 if (fmode & O_CREAT) {
fad57d0e 180 if (ncp->nc_vp == NULL) {
468bb1f9
MD
181 if ((error = ncp_writechk(ncp)) != 0)
182 return (error);
984263bc
MD
183 VATTR_NULL(vap);
184 vap->va_type = VREG;
185 vap->va_mode = cmode;
186 if (fmode & O_EXCL)
187 vap->va_vaflags |= VA_EXCLUSIVE;
fad57d0e
MD
188 error = VOP_NCREATE(ncp, &vp, nd->nl_cred, vap);
189 if (error)
984263bc 190 return (error);
984263bc 191 fmode &= ~O_TRUNC;
fad57d0e 192 /* locked vnode is returned */
984263bc 193 } else {
984263bc
MD
194 if (fmode & O_EXCL) {
195 error = EEXIST;
fad57d0e
MD
196 } else {
197 error = cache_vget(ncp, cred,
198 LK_EXCLUSIVE, &vp);
984263bc 199 }
fad57d0e
MD
200 if (error)
201 return (error);
984263bc
MD
202 fmode &= ~O_CREAT;
203 }
204 } else {
fad57d0e 205 error = cache_vget(ncp, cred, LK_EXCLUSIVE, &vp);
984263bc
MD
206 if (error)
207 return (error);
984263bc 208 }
fad57d0e
MD
209
210 /*
806dcf9a
MD
211 * We have a locked vnode and ncp now. Note that the ncp will
212 * be cleaned up by the caller if nd->nl_ncp is left intact.
fad57d0e 213 */
984263bc
MD
214 if (vp->v_type == VLNK) {
215 error = EMLINK;
216 goto bad;
217 }
218 if (vp->v_type == VSOCK) {
219 error = EOPNOTSUPP;
220 goto bad;
221 }
222 if ((fmode & O_CREAT) == 0) {
223 mode = 0;
224 if (fmode & (FWRITE | O_TRUNC)) {
225 if (vp->v_type == VDIR) {
226 error = EISDIR;
227 goto bad;
228 }
468bb1f9 229 error = vn_writechk(vp, ncp);
806dcf9a
MD
230 if (error) {
231 /*
232 * Special stale handling, re-resolve the
233 * vnode.
234 */
235 if (error == ESTALE) {
236 vput(vp);
237 vp = NULL;
238 cache_setunresolved(ncp);
239 error = cache_resolve(ncp, cred);
240 if (error == 0)
241 goto again;
242 }
984263bc 243 goto bad;
806dcf9a 244 }
984263bc
MD
245 mode |= VWRITE;
246 }
247 if (fmode & FREAD)
248 mode |= VREAD;
249 if (mode) {
87de5057 250 error = VOP_ACCESS(vp, mode, cred);
806dcf9a
MD
251 if (error) {
252 /*
253 * Special stale handling, re-resolve the
254 * vnode.
255 */
256 if (error == ESTALE) {
257 vput(vp);
258 vp = NULL;
259 cache_setunresolved(ncp);
260 error = cache_resolve(ncp, cred);
261 if (error == 0)
262 goto again;
263 }
984263bc 264 goto bad;
806dcf9a 265 }
984263bc
MD
266 }
267 }
268 if (fmode & O_TRUNC) {
a11aaa81 269 vn_unlock(vp); /* XXX */
ca466bae 270 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
984263bc
MD
271 VATTR_NULL(vap);
272 vap->va_size = 0;
87de5057 273 error = VOP_SETATTR(vp, vap, cred);
984263bc
MD
274 if (error)
275 goto bad;
276 }
fad57d0e
MD
277
278 /*
279 * Setup the fp so VOP_OPEN can override it. No descriptor has been
8ddc6004
MD
280 * associated with the fp yet so we own it clean. f_ncp inherits
281 * nl_ncp .
fad57d0e
MD
282 */
283 if (fp) {
fad57d0e
MD
284 if (vp->v_type == VDIR) {
285 fp->f_ncp = nd->nl_ncp;
286 nd->nl_ncp = NULL;
287 cache_unlock(fp->f_ncp);
288 }
289 }
290
291 /*
292 * Get rid of nl_ncp. vn_open does not return it (it returns the
293 * vnode or the file pointer). Note: we can't leave nl_ncp locked
294 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g.
295 * on /dev/ttyd0
296 */
297 if (nd->nl_ncp) {
298 cache_put(nd->nl_ncp);
299 nd->nl_ncp = NULL;
300 }
301
87de5057 302 error = VOP_OPEN(vp, fmode, cred, fp);
fad57d0e
MD
303 if (error) {
304 /*
305 * setting f_ops to &badfileops will prevent the descriptor
306 * code from trying to close and release the vnode, since
307 * the open failed we do not want to call close.
308 */
675eb4c0
MD
309 if (fp) {
310 fp->f_data = NULL;
311 fp->f_ops = &badfileops;
312 }
984263bc 313 goto bad;
fad57d0e 314 }
fad57d0e 315
7540ab49 316#if 0
984263bc 317 /*
7540ab49 318 * Assert that VREG files have been setup for vmio.
984263bc 319 */
7540ab49
MD
320 KASSERT(vp->v_type != VREG || vp->v_object != NULL,
321 ("vn_open: regular file was not VMIO enabled!"));
322#endif
984263bc 323
fad57d0e
MD
324 /*
325 * Return the vnode. XXX needs some cleaning up. The vnode is
8ddc6004 326 * only returned in the fp == NULL case.
fad57d0e
MD
327 */
328 if (fp == NULL) {
329 nd->nl_open_vp = vp;
330 nd->nl_vp_fmode = fmode;
331 if ((nd->nl_flags & NLC_LOCKVP) == 0)
a11aaa81 332 vn_unlock(vp);
fad57d0e 333 } else {
8ddc6004 334 vput(vp);
fad57d0e 335 }
984263bc
MD
336 return (0);
337bad:
bb5c9c00
MD
338 if (vp)
339 vput(vp);
984263bc
MD
340 return (error);
341}
342
343/*
344 * Check for write permissions on the specified vnode.
984263bc
MD
345 */
346int
468bb1f9 347vn_writechk(struct vnode *vp, struct namecache *ncp)
984263bc 348{
984263bc
MD
349 /*
350 * If there's shared text associated with
351 * the vnode, try to free it up once. If
352 * we fail, we can't allow writing.
353 */
354 if (vp->v_flag & VTEXT)
355 return (ETXTBSY);
468bb1f9
MD
356
357 /*
358 * If the vnode represents a regular file, check the mount
359 * point via the ncp. This may be a different mount point
360 * then the one embedded in the vnode (e.g. nullfs).
361 *
362 * We can still write to non-regular files (e.g. devices)
363 * via read-only mounts.
364 */
365 if (ncp && vp->v_type == VREG)
366 return (ncp_writechk(ncp));
984263bc
MD
367 return (0);
368}
369
468bb1f9
MD
370/*
371 * Check whether the underlying mount is read-only. The mount point
372 * referenced by the namecache may be different from the mount point
373 * used by the underlying vnode in the case of NULLFS, so a separate
374 * check is needed.
375 */
376static
377int
378ncp_writechk(struct namecache *ncp)
379{
380 if (ncp->nc_mount && (ncp->nc_mount->mnt_flag & MNT_RDONLY))
381 return (EROFS);
382 return(0);
383}
384
984263bc
MD
385/*
386 * Vnode close call
387 */
388int
87de5057 389vn_close(struct vnode *vp, int flags)
984263bc
MD
390{
391 int error;
392
ca466bae 393 if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY)) == 0) {
87de5057 394 error = VOP_CLOSE(vp, flags);
a11aaa81 395 vn_unlock(vp);
5fd012e0 396 }
984263bc
MD
397 vrele(vp);
398 return (error);
399}
400
401static __inline
402int
403sequential_heuristic(struct uio *uio, struct file *fp)
404{
405 /*
406 * Sequential heuristic - detect sequential operation
407 */
408 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
409 uio->uio_offset == fp->f_nextoff) {
410 int tmpseq = fp->f_seqcount;
411 /*
412 * XXX we assume that the filesystem block size is
413 * the default. Not true, but still gives us a pretty
414 * good indicator of how sequential the read operations
415 * are.
416 */
417 tmpseq += (uio->uio_resid + BKVASIZE - 1) / BKVASIZE;
418 if (tmpseq > IO_SEQMAX)
419 tmpseq = IO_SEQMAX;
420 fp->f_seqcount = tmpseq;
421 return(fp->f_seqcount << IO_SEQSHIFT);
422 }
423
424 /*
425 * Not sequential, quick draw-down of seqcount
426 */
427 if (fp->f_seqcount > 1)
428 fp->f_seqcount = 1;
429 else
430 fp->f_seqcount = 0;
431 return(0);
432}
433
434/*
435 * Package up an I/O request on a vnode into a uio and do it.
436 */
437int
87de5057
MD
438vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
439 off_t offset, enum uio_seg segflg, int ioflg,
440 struct ucred *cred, int *aresid)
984263bc
MD
441{
442 struct uio auio;
443 struct iovec aiov;
9bfc4d6d 444 struct ccms_lock ccms_lock;
984263bc
MD
445 int error;
446
447 if ((ioflg & IO_NODELOCKED) == 0)
ca466bae 448 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
984263bc
MD
449 auio.uio_iov = &aiov;
450 auio.uio_iovcnt = 1;
451 aiov.iov_base = base;
452 aiov.iov_len = len;
453 auio.uio_resid = len;
454 auio.uio_offset = offset;
455 auio.uio_segflg = segflg;
456 auio.uio_rw = rw;
87de5057 457 auio.uio_td = curthread;
9bfc4d6d 458 ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, &auio);
984263bc
MD
459 if (rw == UIO_READ) {
460 error = VOP_READ(vp, &auio, ioflg, cred);
461 } else {
462 error = VOP_WRITE(vp, &auio, ioflg, cred);
463 }
9bfc4d6d 464 ccms_lock_put(&vp->v_ccms, &ccms_lock);
984263bc
MD
465 if (aresid)
466 *aresid = auio.uio_resid;
467 else
468 if (auio.uio_resid && error == 0)
469 error = EIO;
470 if ((ioflg & IO_NODELOCKED) == 0)
a11aaa81 471 vn_unlock(vp);
984263bc
MD
472 return (error);
473}
474
475/*
476 * Package up an I/O request on a vnode into a uio and do it. The I/O
477 * request is split up into smaller chunks and we try to avoid saturating
478 * the buffer cache while potentially holding a vnode locked, so we
479 * check bwillwrite() before calling vn_rdwr(). We also call uio_yield()
480 * to give other processes a chance to lock the vnode (either other processes
481 * core'ing the same binary, or unrelated processes scanning the directory).
482 */
483int
87de5057
MD
484vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
485 off_t offset, enum uio_seg segflg, int ioflg,
486 struct ucred *cred, int *aresid)
984263bc
MD
487{
488 int error = 0;
489
490 do {
9a0222ac 491 int chunk;
984263bc 492
9a0222ac
DR
493 /*
494 * Force `offset' to a multiple of MAXBSIZE except possibly
495 * for the first chunk, so that filesystems only need to
496 * write full blocks except possibly for the first and last
497 * chunks.
498 */
499 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
500
501 if (chunk > len)
502 chunk = len;
984263bc
MD
503 if (rw != UIO_READ && vp->v_type == VREG)
504 bwillwrite();
505 error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
87de5057 506 ioflg, cred, aresid);
984263bc
MD
507 len -= chunk; /* aresid calc already includes length */
508 if (error)
509 break;
510 offset += chunk;
511 base += chunk;
512 uio_yield();
513 } while (len);
514 if (aresid)
515 *aresid += len;
516 return (error);
517}
518
519/*
d9b2033e 520 * MPALMOSTSAFE - acquires mplock
984263bc
MD
521 */
522static int
87de5057 523vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
984263bc 524{
9bfc4d6d 525 struct ccms_lock ccms_lock;
984263bc
MD
526 struct vnode *vp;
527 int error, ioflag;
528
d9b2033e 529 get_mplock();
87de5057
MD
530 KASSERT(uio->uio_td == curthread,
531 ("uio_td %p is not td %p", uio->uio_td, curthread));
984263bc 532 vp = (struct vnode *)fp->f_data;
9ba76b73 533
984263bc 534 ioflag = 0;
9ba76b73
MD
535 if (flags & O_FBLOCKING) {
536 /* ioflag &= ~IO_NDELAY; */
537 } else if (flags & O_FNONBLOCKING) {
538 ioflag |= IO_NDELAY;
539 } else if (fp->f_flag & FNONBLOCK) {
984263bc 540 ioflag |= IO_NDELAY;
9ba76b73
MD
541 }
542 if (flags & O_FBUFFERED) {
543 /* ioflag &= ~IO_DIRECT; */
544 } else if (flags & O_FUNBUFFERED) {
545 ioflag |= IO_DIRECT;
546 } else if (fp->f_flag & O_DIRECT) {
984263bc 547 ioflag |= IO_DIRECT;
9ba76b73 548 }
ab6f251b 549 vn_lock(vp, LK_SHARED | LK_RETRY);
9ba76b73 550 if ((flags & O_FOFFSET) == 0)
984263bc 551 uio->uio_offset = fp->f_offset;
984263bc
MD
552 ioflag |= sequential_heuristic(uio, fp);
553
9bfc4d6d 554 ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
984263bc 555 error = VOP_READ(vp, uio, ioflag, cred);
9bfc4d6d 556 ccms_lock_put(&vp->v_ccms, &ccms_lock);
9ba76b73 557 if ((flags & O_FOFFSET) == 0)
984263bc
MD
558 fp->f_offset = uio->uio_offset;
559 fp->f_nextoff = uio->uio_offset;
a11aaa81 560 vn_unlock(vp);
d9b2033e 561 rel_mplock();
984263bc
MD
562 return (error);
563}
564
fad57d0e
MD
565/*
566 * Device-optimized file table vnode read routine.
567 *
568 * This bypasses the VOP table and talks directly to the device. Most
569 * filesystems just route to specfs and can make this optimization.
d9b2033e
MD
570 *
571 * MPALMOSTSAFE - acquires mplock
fad57d0e
MD
572 */
573static int
87de5057 574svn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
fad57d0e
MD
575{
576 struct vnode *vp;
577 int ioflag;
578 int error;
b13267a5 579 cdev_t dev;
fad57d0e 580
d9b2033e 581 get_mplock();
87de5057
MD
582 KASSERT(uio->uio_td == curthread,
583 ("uio_td %p is not td %p", uio->uio_td, curthread));
fad57d0e
MD
584
585 vp = (struct vnode *)fp->f_data;
d9b2033e
MD
586 if (vp == NULL || vp->v_type == VBAD) {
587 error = EBADF;
588 goto done;
589 }
fad57d0e 590
d9b2033e
MD
591 if ((dev = vp->v_rdev) == NULL) {
592 error = EBADF;
593 goto done;
594 }
fad57d0e
MD
595 reference_dev(dev);
596
d9b2033e
MD
597 if (uio->uio_resid == 0) {
598 error = 0;
599 goto done;
600 }
9ba76b73 601 if ((flags & O_FOFFSET) == 0)
fad57d0e
MD
602 uio->uio_offset = fp->f_offset;
603
604 ioflag = 0;
9ba76b73
MD
605 if (flags & O_FBLOCKING) {
606 /* ioflag &= ~IO_NDELAY; */
607 } else if (flags & O_FNONBLOCKING) {
608 ioflag |= IO_NDELAY;
609 } else if (fp->f_flag & FNONBLOCK) {
fad57d0e 610 ioflag |= IO_NDELAY;
9ba76b73
MD
611 }
612 if (flags & O_FBUFFERED) {
613 /* ioflag &= ~IO_DIRECT; */
614 } else if (flags & O_FUNBUFFERED) {
615 ioflag |= IO_DIRECT;
616 } else if (fp->f_flag & O_DIRECT) {
fad57d0e 617 ioflag |= IO_DIRECT;
9ba76b73 618 }
fad57d0e
MD
619 ioflag |= sequential_heuristic(uio, fp);
620
621 error = dev_dread(dev, uio, ioflag);
622
623 release_dev(dev);
9ba76b73 624 if ((flags & O_FOFFSET) == 0)
fad57d0e
MD
625 fp->f_offset = uio->uio_offset;
626 fp->f_nextoff = uio->uio_offset;
d9b2033e
MD
627done:
628 rel_mplock();
fad57d0e
MD
629 return (error);
630}
631
984263bc 632/*
d9b2033e 633 * MPALMOSTSAFE - acquires mplock
984263bc
MD
634 */
635static int
87de5057 636vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
984263bc 637{
9bfc4d6d 638 struct ccms_lock ccms_lock;
984263bc
MD
639 struct vnode *vp;
640 int error, ioflag;
641
d9b2033e 642 get_mplock();
87de5057
MD
643 KASSERT(uio->uio_td == curthread,
644 ("uio_procp %p is not p %p", uio->uio_td, curthread));
984263bc
MD
645 vp = (struct vnode *)fp->f_data;
646 if (vp->v_type == VREG)
647 bwillwrite();
648 vp = (struct vnode *)fp->f_data; /* XXX needed? */
9ba76b73 649
984263bc 650 ioflag = IO_UNIT;
9ba76b73
MD
651 if (vp->v_type == VREG &&
652 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
984263bc 653 ioflag |= IO_APPEND;
9ba76b73
MD
654 }
655
656 if (flags & O_FBLOCKING) {
657 /* ioflag &= ~IO_NDELAY; */
658 } else if (flags & O_FNONBLOCKING) {
984263bc 659 ioflag |= IO_NDELAY;
9ba76b73
MD
660 } else if (fp->f_flag & FNONBLOCK) {
661 ioflag |= IO_NDELAY;
662 }
663 if (flags & O_FBUFFERED) {
664 /* ioflag &= ~IO_DIRECT; */
665 } else if (flags & O_FUNBUFFERED) {
666 ioflag |= IO_DIRECT;
667 } else if (fp->f_flag & O_DIRECT) {
984263bc 668 ioflag |= IO_DIRECT;
9ba76b73
MD
669 }
670 if (flags & O_FASYNCWRITE) {
671 /* ioflag &= ~IO_SYNC; */
672 } else if (flags & O_FSYNCWRITE) {
673 ioflag |= IO_SYNC;
674 } else if (fp->f_flag & O_FSYNC) {
675 ioflag |= IO_SYNC;
676 }
677
678 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
984263bc 679 ioflag |= IO_SYNC;
ca466bae 680 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
9ba76b73 681 if ((flags & O_FOFFSET) == 0)
984263bc
MD
682 uio->uio_offset = fp->f_offset;
683 ioflag |= sequential_heuristic(uio, fp);
9bfc4d6d 684 ccms_lock_get_uio(&vp->v_ccms, &ccms_lock, uio);
984263bc 685 error = VOP_WRITE(vp, uio, ioflag, cred);
9bfc4d6d 686 ccms_lock_put(&vp->v_ccms, &ccms_lock);
9ba76b73 687 if ((flags & O_FOFFSET) == 0)
984263bc
MD
688 fp->f_offset = uio->uio_offset;
689 fp->f_nextoff = uio->uio_offset;
a11aaa81 690 vn_unlock(vp);
d9b2033e 691 rel_mplock();
984263bc
MD
692 return (error);
693}
694
fad57d0e
MD
695/*
696 * Device-optimized file table vnode write routine.
697 *
698 * This bypasses the VOP table and talks directly to the device. Most
699 * filesystems just route to specfs and can make this optimization.
d9b2033e
MD
700 *
701 * MPALMOSTSAFE - acquires mplock
fad57d0e
MD
702 */
703static int
87de5057 704svn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
fad57d0e
MD
705{
706 struct vnode *vp;
707 int ioflag;
708 int error;
b13267a5 709 cdev_t dev;
fad57d0e 710
d9b2033e 711 get_mplock();
87de5057
MD
712 KASSERT(uio->uio_td == curthread,
713 ("uio_procp %p is not p %p", uio->uio_td, curthread));
fad57d0e
MD
714
715 vp = (struct vnode *)fp->f_data;
d9b2033e
MD
716 if (vp == NULL || vp->v_type == VBAD) {
717 error = EBADF;
718 goto done;
719 }
fad57d0e
MD
720 if (vp->v_type == VREG)
721 bwillwrite();
722 vp = (struct vnode *)fp->f_data; /* XXX needed? */
723
d9b2033e
MD
724 if ((dev = vp->v_rdev) == NULL) {
725 error = EBADF;
726 goto done;
727 }
fad57d0e
MD
728 reference_dev(dev);
729
9ba76b73 730 if ((flags & O_FOFFSET) == 0)
fad57d0e
MD
731 uio->uio_offset = fp->f_offset;
732
733 ioflag = IO_UNIT;
9ba76b73
MD
734 if (vp->v_type == VREG &&
735 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
fad57d0e 736 ioflag |= IO_APPEND;
9ba76b73
MD
737 }
738
739 if (flags & O_FBLOCKING) {
740 /* ioflag &= ~IO_NDELAY; */
741 } else if (flags & O_FNONBLOCKING) {
742 ioflag |= IO_NDELAY;
743 } else if (fp->f_flag & FNONBLOCK) {
fad57d0e 744 ioflag |= IO_NDELAY;
9ba76b73
MD
745 }
746 if (flags & O_FBUFFERED) {
747 /* ioflag &= ~IO_DIRECT; */
748 } else if (flags & O_FUNBUFFERED) {
749 ioflag |= IO_DIRECT;
750 } else if (fp->f_flag & O_DIRECT) {
fad57d0e 751 ioflag |= IO_DIRECT;
9ba76b73
MD
752 }
753 if (flags & O_FASYNCWRITE) {
754 /* ioflag &= ~IO_SYNC; */
755 } else if (flags & O_FSYNCWRITE) {
756 ioflag |= IO_SYNC;
757 } else if (fp->f_flag & O_FSYNC) {
758 ioflag |= IO_SYNC;
759 }
760
761 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
fad57d0e
MD
762 ioflag |= IO_SYNC;
763 ioflag |= sequential_heuristic(uio, fp);
764
765 error = dev_dwrite(dev, uio, ioflag);
766
767 release_dev(dev);
9ba76b73 768 if ((flags & O_FOFFSET) == 0)
fad57d0e
MD
769 fp->f_offset = uio->uio_offset;
770 fp->f_nextoff = uio->uio_offset;
d9b2033e
MD
771done:
772 rel_mplock();
fad57d0e
MD
773 return (error);
774}
775
984263bc 776/*
d9b2033e 777 * MPALMOSTSAFE - acquires mplock
984263bc
MD
778 */
779static int
87de5057 780vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred)
984263bc 781{
d9b2033e
MD
782 struct vnode *vp;
783 int error;
984263bc 784
d9b2033e
MD
785 get_mplock();
786 vp = (struct vnode *)fp->f_data;
787 error = vn_stat(vp, sb, cred);
788 rel_mplock();
789 return (error);
984263bc
MD
790}
791
792int
87de5057 793vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred)
984263bc
MD
794{
795 struct vattr vattr;
dadab5e9 796 struct vattr *vap;
984263bc
MD
797 int error;
798 u_short mode;
b13267a5 799 cdev_t dev;
984263bc
MD
800
801 vap = &vattr;
87de5057 802 error = VOP_GETATTR(vp, vap);
984263bc
MD
803 if (error)
804 return (error);
805
806 /*
807 * Zero the spare stat fields
808 */
809 sb->st_lspare = 0;
7d15906a 810 sb->st_qspare = 0;
984263bc
MD
811
812 /*
813 * Copy from vattr table
814 */
815 if (vap->va_fsid != VNOVAL)
816 sb->st_dev = vap->va_fsid;
817 else
818 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
819 sb->st_ino = vap->va_fileid;
820 mode = vap->va_mode;
821 switch (vap->va_type) {
822 case VREG:
823 mode |= S_IFREG;
824 break;
825 case VDIR:
826 mode |= S_IFDIR;
827 break;
828 case VBLK:
829 mode |= S_IFBLK;
830 break;
831 case VCHR:
832 mode |= S_IFCHR;
833 break;
834 case VLNK:
835 mode |= S_IFLNK;
836 /* This is a cosmetic change, symlinks do not have a mode. */
837 if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
838 sb->st_mode &= ~ACCESSPERMS; /* 0000 */
839 else
840 sb->st_mode |= ACCESSPERMS; /* 0777 */
841 break;
842 case VSOCK:
843 mode |= S_IFSOCK;
844 break;
845 case VFIFO:
846 mode |= S_IFIFO;
847 break;
848 default:
849 return (EBADF);
850 };
851 sb->st_mode = mode;
852 sb->st_nlink = vap->va_nlink;
853 sb->st_uid = vap->va_uid;
854 sb->st_gid = vap->va_gid;
855 sb->st_rdev = vap->va_rdev;
856 sb->st_size = vap->va_size;
857 sb->st_atimespec = vap->va_atime;
858 sb->st_mtimespec = vap->va_mtime;
859 sb->st_ctimespec = vap->va_ctime;
860
d8869c1b
MD
861 /*
862 * A VCHR and VBLK device may track the last access and last modified
863 * time independantly of the filesystem. This is particularly true
864 * because device read and write calls may bypass the filesystem.
865 */
866 if (vp->v_type == VCHR || vp->v_type == VBLK) {
867 if ((dev = vp->v_rdev) != NULL) {
868 if (dev->si_lastread) {
869 sb->st_atimespec.tv_sec = dev->si_lastread;
870 sb->st_atimespec.tv_nsec = 0;
871 }
872 if (dev->si_lastwrite) {
873 sb->st_atimespec.tv_sec = dev->si_lastwrite;
874 sb->st_atimespec.tv_nsec = 0;
875 }
876 }
877 }
878
984263bc
MD
879 /*
880 * According to www.opengroup.org, the meaning of st_blksize is
881 * "a filesystem-specific preferred I/O block size for this
882 * object. In some filesystem types, this may vary from file
883 * to file"
884 * Default to PAGE_SIZE after much discussion.
885 */
886
887 if (vap->va_type == VREG) {
888 sb->st_blksize = vap->va_blocksize;
889 } else if (vn_isdisk(vp, NULL)) {
e4c9c0c8
MD
890 /*
891 * XXX this is broken. If the device is not yet open (aka
892 * stat() call, aka v_rdev == NULL), how are we supposed
893 * to get a valid block size out of it?
894 */
b13267a5 895 cdev_t dev;
e4c9c0c8
MD
896
897 if ((dev = vp->v_rdev) == NULL)
898 dev = udev2dev(vp->v_udev, vp->v_type == VBLK);
899 sb->st_blksize = dev->si_bsize_best;
900 if (sb->st_blksize < dev->si_bsize_phys)
901 sb->st_blksize = dev->si_bsize_phys;
984263bc
MD
902 if (sb->st_blksize < BLKDEV_IOSIZE)
903 sb->st_blksize = BLKDEV_IOSIZE;
904 } else {
905 sb->st_blksize = PAGE_SIZE;
906 }
907
908 sb->st_flags = vap->va_flags;
87de5057 909 if (suser_cred(cred, 0))
984263bc
MD
910 sb->st_gen = 0;
911 else
912 sb->st_gen = vap->va_gen;
913
914#if (S_BLKSIZE == 512)
915 /* Optimize this case */
916 sb->st_blocks = vap->va_bytes >> 9;
917#else
918 sb->st_blocks = vap->va_bytes / S_BLKSIZE;
919#endif
dc1be39c 920 sb->st_fsmid = vap->va_fsmid;
984263bc
MD
921 return (0);
922}
923
924/*
d9b2033e 925 * MPALMOSTSAFE - acquires mplock
984263bc
MD
926 */
927static int
87de5057 928vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred)
984263bc 929{
dadab5e9 930 struct vnode *vp = ((struct vnode *)fp->f_data);
1fbb5fc0 931 struct vnode *ovp;
984263bc
MD
932 struct vattr vattr;
933 int error;
934
d9b2033e
MD
935 get_mplock();
936
dadab5e9 937 switch (vp->v_type) {
984263bc
MD
938 case VREG:
939 case VDIR:
940 if (com == FIONREAD) {
d9b2033e
MD
941 if ((error = VOP_GETATTR(vp, &vattr)) != 0)
942 break;
984263bc 943 *(int *)data = vattr.va_size - fp->f_offset;
d9b2033e
MD
944 error = 0;
945 break;
946 }
9ba76b73 947 if (com == FIOASYNC) { /* XXX */
d9b2033e
MD
948 error = 0; /* XXX */
949 break;
984263bc 950 }
984263bc 951 /* fall into ... */
984263bc
MD
952 default:
953#if 0
954 return (ENOTTY);
955#endif
956 case VFIFO:
957 case VCHR:
958 case VBLK:
959 if (com == FIODTYPE) {
d9b2033e
MD
960 if (vp->v_type != VCHR && vp->v_type != VBLK) {
961 error = ENOTTY;
962 break;
963 }
335dda38 964 *(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
d9b2033e
MD
965 error = 0;
966 break;
984263bc 967 }
87de5057 968 error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred);
984263bc 969 if (error == 0 && com == TIOCSCTTY) {
87de5057
MD
970 struct proc *p = curthread->td_proc;
971 struct session *sess;
972
d9b2033e
MD
973 if (p == NULL) {
974 error = ENOTTY;
975 break;
976 }
984263bc 977
87de5057 978 sess = p->p_session;
984263bc 979 /* Do nothing if reassigning same control tty */
d9b2033e
MD
980 if (sess->s_ttyvp == vp) {
981 error = 0;
982 break;
983 }
984263bc
MD
984
985 /* Get rid of reference to old control tty */
1fbb5fc0 986 ovp = sess->s_ttyvp;
597aea93 987 vref(vp);
1fbb5fc0
MD
988 sess->s_ttyvp = vp;
989 if (ovp)
990 vrele(ovp);
984263bc 991 }
d9b2033e 992 break;
984263bc 993 }
d9b2033e
MD
994 rel_mplock();
995 return (error);
984263bc
MD
996}
997
998/*
d9b2033e 999 * MPALMOSTSAFE - acquires mplock
984263bc
MD
1000 */
1001static int
87de5057 1002vn_poll(struct file *fp, int events, struct ucred *cred)
984263bc 1003{
d9b2033e
MD
1004 int error;
1005
1006 get_mplock();
1007 error = VOP_POLL(((struct vnode *)fp->f_data), events, cred);
1008 rel_mplock();
1009 return (error);
984263bc
MD
1010}
1011
1012/*
1013 * Check that the vnode is still valid, and if so
1014 * acquire requested lock.
1015 */
1016int
1017#ifndef DEBUG_LOCKS
ca466bae 1018vn_lock(struct vnode *vp, int flags)
984263bc 1019#else
ca466bae 1020debug_vn_lock(struct vnode *vp, int flags, const char *filename, int line)
984263bc
MD
1021#endif
1022{
1023 int error;
1024
1025 do {
984263bc 1026#ifdef DEBUG_LOCKS
5fd012e0
MD
1027 vp->filename = filename;
1028 vp->line = line;
a11aaa81
MD
1029 error = debuglockmgr(&vp->v_lock, flags,
1030 "vn_lock", filename, line);
1031#else
1032 error = lockmgr(&vp->v_lock, flags);
984263bc 1033#endif
5fd012e0
MD
1034 if (error == 0)
1035 break;
984263bc 1036 } while (flags & LK_RETRY);
5fd012e0
MD
1037
1038 /*
1039 * Because we (had better!) have a ref on the vnode, once it
1040 * goes to VRECLAIMED state it will not be recycled until all
1041 * refs go away. So we can just check the flag.
1042 */
1043 if (error == 0 && (vp->v_flag & VRECLAIMED)) {
a11aaa81 1044 lockmgr(&vp->v_lock, LK_RELEASE);
5fd012e0
MD
1045 error = ENOENT;
1046 }
984263bc
MD
1047 return (error);
1048}
1049
a11aaa81
MD
1050void
1051vn_unlock(struct vnode *vp)
1052{
1053 lockmgr(&vp->v_lock, LK_RELEASE);
1054}
1055
1056int
1057vn_islocked(struct vnode *vp)
1058{
1059 return (lockstatus(&vp->v_lock, curthread));
1060}
1061
984263bc 1062/*
d9b2033e 1063 * MPALMOSTSAFE - acquires mplock
984263bc
MD
1064 */
1065static int
87de5057 1066vn_closefile(struct file *fp)
984263bc 1067{
d9b2033e 1068 int error;
984263bc 1069
d9b2033e 1070 get_mplock();
984263bc 1071 fp->f_ops = &badfileops;
d9b2033e
MD
1072 error = vn_close(((struct vnode *)fp->f_data), fp->f_flag);
1073 rel_mplock();
1074 return(error);
984263bc
MD
1075}
1076
d9b2033e
MD
1077/*
1078 * MPALMOSTSAFE - acquires mplock
1079 */
984263bc
MD
1080static int
1081vn_kqfilter(struct file *fp, struct knote *kn)
1082{
d9b2033e 1083 int error;
984263bc 1084
d9b2033e
MD
1085 get_mplock();
1086 error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn);
1087 rel_mplock();
1088 return (error);
984263bc 1089}