kernel - Add trigger_syncer(), VFS_MODIFYING()
[dragonfly.git] / sys / kern / vfs_vnops.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1982, 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
dc71b7ab 18 * 3. Neither the name of the University nor the names of its contributors
984263bc
MD
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
35 * $FreeBSD: src/sys/kern/vfs_vnops.c,v 1.87.2.13 2002/12/29 18:19:53 dillon Exp $
36 */
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/fcntl.h>
41#include <sys/file.h>
42#include <sys/stat.h>
43#include <sys/proc.h>
895c1f85 44#include <sys/priv.h>
984263bc 45#include <sys/mount.h>
fad57d0e 46#include <sys/nlookup.h>
984263bc
MD
47#include <sys/vnode.h>
48#include <sys/buf.h>
49#include <sys/filio.h>
50#include <sys/ttycom.h>
51#include <sys/conf.h>
c0885fab 52#include <sys/sysctl.h>
984263bc
MD
53#include <sys/syslog.h>
54
c0885fab 55#include <sys/thread2.h>
684a93c4 56#include <sys/mplock2.h>
c0885fab 57
87de5057
MD
58static int vn_closefile (struct file *fp);
59static int vn_ioctl (struct file *fp, u_long com, caddr_t data,
87baaf0c 60 struct ucred *cred, struct sysmsg *msg);
0a80a445 61static int vn_read (struct file *fp, struct uio *uio,
87de5057 62 struct ucred *cred, int flags);
402ed7e1 63static int vn_kqfilter (struct file *fp, struct knote *kn);
87de5057 64static int vn_statfile (struct file *fp, struct stat *sb, struct ucred *cred);
0a80a445 65static int vn_write (struct file *fp, struct uio *uio,
87de5057 66 struct ucred *cred, int flags);
984263bc 67
fad57d0e 68struct fileops vnode_fileops = {
b2d248cb
MD
69 .fo_read = vn_read,
70 .fo_write = vn_write,
71 .fo_ioctl = vn_ioctl,
b2d248cb
MD
72 .fo_kqfilter = vn_kqfilter,
73 .fo_stat = vn_statfile,
74 .fo_close = vn_closefile,
75 .fo_shutdown = nofo_shutdown
984263bc
MD
76};
77
78/*
fad57d0e
MD
79 * Common code for vnode open operations. Check permissions, and call
80 * the VOP_NOPEN or VOP_NCREATE routine.
81 *
82 * The caller is responsible for setting up nd with nlookup_init() and
83 * for cleaning it up with nlookup_done(), whether we return an error
84 * or not.
85 *
86 * On success nd->nl_open_vp will hold a referenced and, if requested,
87 * locked vnode. A locked vnode is requested via NLC_LOCKVP. If fp
88 * is non-NULL the vnode will be installed in the file pointer.
89 *
12cdc371
MD
90 * NOTE: If the caller wishes the namecache entry to be operated with
91 * a shared lock it must use NLC_SHAREDLOCK. If NLC_LOCKVP is set
92 * then the vnode lock will also be shared.
93 *
fad57d0e 94 * NOTE: The vnode is referenced just once on return whether or not it
12cdc371 95 * is also installed in the file pointer.
984263bc
MD
96 */
97int
fad57d0e 98vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode)
984263bc 99{
1fd87d54 100 struct vnode *vp;
fad57d0e 101 struct ucred *cred = nd->nl_cred;
984263bc
MD
102 struct vattr vat;
103 struct vattr *vap = &vat;
3a907475 104 int error;
e9b56058 105 u_int flags;
18cd8808
FT
106 uint64_t osize;
107 struct mount *mp;
984263bc 108
d7c75c7a
MD
109 /*
110 * Certain combinations are illegal
111 */
112 if ((fmode & (FWRITE | O_TRUNC)) == O_TRUNC)
113 return(EACCES);
114
fad57d0e
MD
115 /*
116 * Lookup the path and create or obtain the vnode. After a
28623bf9 117 * successful lookup a locked nd->nl_nch will be returned.
fad57d0e
MD
118 *
119 * The result of this section should be a locked vnode.
120 *
121 * XXX with only a little work we should be able to avoid locking
122 * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set.
123 */
3a907475
MD
124 nd->nl_flags |= NLC_OPEN;
125 if (fmode & O_APPEND)
126 nd->nl_flags |= NLC_APPEND;
127 if (fmode & O_TRUNC)
128 nd->nl_flags |= NLC_TRUNCATE;
129 if (fmode & FREAD)
130 nd->nl_flags |= NLC_READ;
131 if (fmode & FWRITE)
132 nd->nl_flags |= NLC_WRITE;
d7c75c7a
MD
133 if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0)
134 nd->nl_flags |= NLC_FOLLOW;
3a907475 135
984263bc 136 if (fmode & O_CREAT) {
fad57d0e
MD
137 /*
138 * CONDITIONAL CREATE FILE CASE
139 *
140 * Setting NLC_CREATE causes a negative hit to store
141 * the negative hit ncp and not return an error. Then
0a80a445 142 * nc_error or nc_vp may be checked to see if the ncp
fad57d0e
MD
143 * represents a negative hit. NLC_CREATE also requires
144 * write permission on the governing directory or EPERM
145 * is returned.
146 */
fad57d0e 147 nd->nl_flags |= NLC_CREATE;
5312fa43 148 nd->nl_flags |= NLC_REFDVP;
c4df9635 149 bwillinode(1);
fad57d0e 150 error = nlookup(nd);
806dcf9a
MD
151 } else {
152 /*
153 * NORMAL OPEN FILE CASE
154 */
155 error = nlookup(nd);
156 }
fad57d0e 157
806dcf9a
MD
158 if (error)
159 return (error);
fad57d0e 160
806dcf9a
MD
161 /*
162 * split case to allow us to re-resolve and retry the ncp in case
163 * we get ESTALE.
164 */
165again:
166 if (fmode & O_CREAT) {
28623bf9
MD
167 if (nd->nl_nch.ncp->nc_vp == NULL) {
168 if ((error = ncp_writechk(&nd->nl_nch)) != 0)
468bb1f9 169 return (error);
984263bc
MD
170 VATTR_NULL(vap);
171 vap->va_type = VREG;
172 vap->va_mode = cmode;
173 if (fmode & O_EXCL)
174 vap->va_vaflags |= VA_EXCLUSIVE;
5312fa43 175 error = VOP_NCREATE(&nd->nl_nch, nd->nl_dvp, &vp,
dff430ab 176 nd->nl_cred, vap);
fad57d0e 177 if (error)
984263bc 178 return (error);
984263bc 179 fmode &= ~O_TRUNC;
fad57d0e 180 /* locked vnode is returned */
984263bc 181 } else {
984263bc
MD
182 if (fmode & O_EXCL) {
183 error = EEXIST;
fad57d0e 184 } else {
0a80a445 185 error = cache_vget(&nd->nl_nch, cred,
fad57d0e 186 LK_EXCLUSIVE, &vp);
984263bc 187 }
fad57d0e
MD
188 if (error)
189 return (error);
984263bc
MD
190 fmode &= ~O_CREAT;
191 }
192 } else {
12cdc371
MD
193 if (nd->nl_flags & NLC_SHAREDLOCK) {
194 error = cache_vget(&nd->nl_nch, cred, LK_SHARED, &vp);
195 } else {
196 error = cache_vget(&nd->nl_nch, cred,
197 LK_EXCLUSIVE, &vp);
198 }
984263bc
MD
199 if (error)
200 return (error);
984263bc 201 }
fad57d0e
MD
202
203 /*
806dcf9a 204 * We have a locked vnode and ncp now. Note that the ncp will
28623bf9 205 * be cleaned up by the caller if nd->nl_nch is left intact.
fad57d0e 206 */
984263bc
MD
207 if (vp->v_type == VLNK) {
208 error = EMLINK;
209 goto bad;
210 }
211 if (vp->v_type == VSOCK) {
212 error = EOPNOTSUPP;
213 goto bad;
214 }
28d748b9
AH
215 if (vp->v_type != VDIR && (fmode & O_DIRECTORY)) {
216 error = ENOTDIR;
217 goto bad;
218 }
984263bc 219 if ((fmode & O_CREAT) == 0) {
984263bc
MD
220 if (fmode & (FWRITE | O_TRUNC)) {
221 if (vp->v_type == VDIR) {
222 error = EISDIR;
223 goto bad;
224 }
28623bf9 225 error = vn_writechk(vp, &nd->nl_nch);
806dcf9a
MD
226 if (error) {
227 /*
228 * Special stale handling, re-resolve the
229 * vnode.
230 */
231 if (error == ESTALE) {
232 vput(vp);
233 vp = NULL;
12cdc371
MD
234 if (nd->nl_flags & NLC_SHAREDLOCK) {
235 cache_unlock(&nd->nl_nch);
236 cache_lock(&nd->nl_nch);
237 }
28623bf9 238 cache_setunresolved(&nd->nl_nch);
12cdc371
MD
239 error = cache_resolve(&nd->nl_nch,
240 cred);
806dcf9a
MD
241 if (error == 0)
242 goto again;
243 }
984263bc 244 goto bad;
806dcf9a 245 }
984263bc
MD
246 }
247 }
248 if (fmode & O_TRUNC) {
a11aaa81 249 vn_unlock(vp); /* XXX */
ca466bae 250 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
18cd8808 251 osize = vp->v_filesize;
984263bc
MD
252 VATTR_NULL(vap);
253 vap->va_size = 0;
87de5057 254 error = VOP_SETATTR(vp, vap, cred);
984263bc
MD
255 if (error)
256 goto bad;
18cd8808
FT
257 error = VOP_GETATTR(vp, vap);
258 if (error)
259 goto bad;
260 mp = vq_vptomp(vp);
261 VFS_ACCOUNT(mp, vap->va_uid, vap->va_gid, -osize);
984263bc 262 }
fad57d0e 263
e9b56058
MD
264 /*
265 * Set or clear VNSWAPCACHE on the vp based on nd->nl_nch.ncp->nc_flag.
266 * These particular bits a tracked all the way from the root.
267 *
268 * NOTE: Might not work properly on NFS servers due to the
269 * disconnected namecache.
270 */
271 flags = nd->nl_nch.ncp->nc_flag;
272 if ((flags & (NCF_UF_CACHE | NCF_UF_PCACHE)) &&
273 (flags & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE)) == 0) {
274 vsetflags(vp, VSWAPCACHE);
275 } else {
276 vclrflags(vp, VSWAPCACHE);
277 }
278
fad57d0e
MD
279 /*
280 * Setup the fp so VOP_OPEN can override it. No descriptor has been
0a80a445 281 * associated with the fp yet so we own it clean.
72310cfb 282 *
28623bf9 283 * f_nchandle inherits nl_nch. This used to be necessary only for
72310cfb
MD
284 * directories but now we do it unconditionally so f*() ops
285 * such as fchmod() can access the actual namespace that was
286 * used to open the file.
fad57d0e
MD
287 */
288 if (fp) {
3a907475
MD
289 if (nd->nl_flags & NLC_APPENDONLY)
290 fmode |= FAPPENDONLY;
28623bf9
MD
291 fp->f_nchandle = nd->nl_nch;
292 cache_zero(&nd->nl_nch);
293 cache_unlock(&fp->f_nchandle);
fad57d0e
MD
294 }
295
296 /*
28623bf9
MD
297 * Get rid of nl_nch. vn_open does not return it (it returns the
298 * vnode or the file pointer). Note: we can't leave nl_nch locked
fad57d0e
MD
299 * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g.
300 * on /dev/ttyd0
301 */
28623bf9
MD
302 if (nd->nl_nch.ncp)
303 cache_put(&nd->nl_nch);
fad57d0e 304
87de5057 305 error = VOP_OPEN(vp, fmode, cred, fp);
fad57d0e
MD
306 if (error) {
307 /*
308 * setting f_ops to &badfileops will prevent the descriptor
309 * code from trying to close and release the vnode, since
310 * the open failed we do not want to call close.
311 */
675eb4c0
MD
312 if (fp) {
313 fp->f_data = NULL;
314 fp->f_ops = &badfileops;
315 }
984263bc 316 goto bad;
fad57d0e 317 }
fad57d0e 318
7540ab49 319#if 0
984263bc 320 /*
7540ab49 321 * Assert that VREG files have been setup for vmio.
984263bc 322 */
7540ab49
MD
323 KASSERT(vp->v_type != VREG || vp->v_object != NULL,
324 ("vn_open: regular file was not VMIO enabled!"));
325#endif
984263bc 326
fad57d0e
MD
327 /*
328 * Return the vnode. XXX needs some cleaning up. The vnode is
8ddc6004 329 * only returned in the fp == NULL case.
fad57d0e
MD
330 */
331 if (fp == NULL) {
332 nd->nl_open_vp = vp;
333 nd->nl_vp_fmode = fmode;
334 if ((nd->nl_flags & NLC_LOCKVP) == 0)
a11aaa81 335 vn_unlock(vp);
fad57d0e 336 } else {
8ddc6004 337 vput(vp);
fad57d0e 338 }
984263bc
MD
339 return (0);
340bad:
bb5c9c00
MD
341 if (vp)
342 vput(vp);
984263bc
MD
343 return (error);
344}
345
a8873631
MD
346int
347vn_opendisk(const char *devname, int fmode, struct vnode **vpp)
348{
349 struct vnode *vp;
350 int error;
351
352 if (strncmp(devname, "/dev/", 5) == 0)
353 devname += 5;
354 if ((vp = getsynthvnode(devname)) == NULL) {
355 error = ENODEV;
356 } else {
357 error = VOP_OPEN(vp, fmode, proc0.p_ucred, NULL);
358 vn_unlock(vp);
359 if (error) {
360 vrele(vp);
361 vp = NULL;
362 }
363 }
364 *vpp = vp;
365 return (error);
366}
367
984263bc 368/*
28623bf9 369 * Check for write permissions on the specified vnode. nch may be NULL.
984263bc
MD
370 */
371int
28623bf9 372vn_writechk(struct vnode *vp, struct nchandle *nch)
984263bc 373{
984263bc
MD
374 /*
375 * If there's shared text associated with
376 * the vnode, try to free it up once. If
377 * we fail, we can't allow writing.
378 */
379 if (vp->v_flag & VTEXT)
380 return (ETXTBSY);
468bb1f9
MD
381
382 /*
383 * If the vnode represents a regular file, check the mount
28623bf9 384 * point via the nch. This may be a different mount point
468bb1f9
MD
385 * then the one embedded in the vnode (e.g. nullfs).
386 *
387 * We can still write to non-regular files (e.g. devices)
388 * via read-only mounts.
389 */
28623bf9
MD
390 if (nch && nch->ncp && vp->v_type == VREG)
391 return (ncp_writechk(nch));
984263bc
MD
392 return (0);
393}
394
468bb1f9 395/*
0a80a445 396 * Check whether the underlying mount is read-only. The mount point
468bb1f9
MD
397 * referenced by the namecache may be different from the mount point
398 * used by the underlying vnode in the case of NULLFS, so a separate
399 * check is needed.
400 */
468bb1f9 401int
28623bf9 402ncp_writechk(struct nchandle *nch)
468bb1f9 403{
1c222faf
MD
404 struct mount *mp;
405
406 if ((mp = nch->mount) != NULL) {
407 if (mp->mnt_flag & MNT_RDONLY)
408 return (EROFS);
409 if (mp->mnt_op->vfs_modifying != vfs_stdmodifying)
410 VFS_MODIFYING(mp);
411 }
468bb1f9
MD
412 return(0);
413}
414
984263bc
MD
415/*
416 * Vnode close call
2247fe02
MD
417 *
418 * MPSAFE
984263bc
MD
419 */
420int
3596743e 421vn_close(struct vnode *vp, int flags, struct file *fp)
984263bc
MD
422{
423 int error;
424
b458d1ab 425 error = vn_lock(vp, LK_SHARED | LK_RETRY | LK_FAILRECLAIM);
4698dfb3 426 if (error == 0) {
3596743e 427 error = VOP_CLOSE(vp, flags, fp);
a11aaa81 428 vn_unlock(vp);
5fd012e0 429 }
984263bc
MD
430 vrele(vp);
431 return (error);
432}
433
2247fe02
MD
434/*
435 * Sequential heuristic.
436 *
437 * MPSAFE (f_seqcount and f_nextoff are allowed to race)
438 */
984263bc
MD
439static __inline
440int
441sequential_heuristic(struct uio *uio, struct file *fp)
442{
443 /*
444 * Sequential heuristic - detect sequential operation
c0885fab
MD
445 *
446 * NOTE: SMP: We allow f_seqcount updates to race.
984263bc
MD
447 */
448 if ((uio->uio_offset == 0 && fp->f_seqcount > 0) ||
449 uio->uio_offset == fp->f_nextoff) {
450 int tmpseq = fp->f_seqcount;
2247fe02 451
dc6a6bd2 452 tmpseq += (uio->uio_resid + MAXBSIZE - 1) / MAXBSIZE;
984263bc
MD
453 if (tmpseq > IO_SEQMAX)
454 tmpseq = IO_SEQMAX;
455 fp->f_seqcount = tmpseq;
456 return(fp->f_seqcount << IO_SEQSHIFT);
457 }
458
459 /*
460 * Not sequential, quick draw-down of seqcount
c0885fab
MD
461 *
462 * NOTE: SMP: We allow f_seqcount updates to race.
984263bc
MD
463 */
464 if (fp->f_seqcount > 1)
465 fp->f_seqcount = 1;
466 else
467 fp->f_seqcount = 0;
468 return(0);
469}
470
c0885fab
MD
471/*
472 * get - lock and return the f_offset field.
473 * set - set and unlock the f_offset field.
474 *
475 * These routines serve the dual purpose of serializing access to the
0a80a445 476 * f_offset field (at least on x86) and guaranteeing operational integrity
c0885fab 477 * when multiple read()ers and write()ers are present on the same fp.
2247fe02
MD
478 *
479 * MPSAFE
c0885fab
MD
480 */
481static __inline off_t
482vn_get_fpf_offset(struct file *fp)
483{
484 u_int flags;
485 u_int nflags;
486
487 /*
488 * Shortcut critical path.
489 */
490 flags = fp->f_flag & ~FOFFSETLOCK;
491 if (atomic_cmpset_int(&fp->f_flag, flags, flags | FOFFSETLOCK))
492 return(fp->f_offset);
493
494 /*
495 * The hard way
496 */
497 for (;;) {
498 flags = fp->f_flag;
499 if (flags & FOFFSETLOCK) {
500 nflags = flags | FOFFSETWAKE;
ae8e83e6 501 tsleep_interlock(&fp->f_flag, 0);
c0885fab 502 if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
d9345d3a 503 tsleep(&fp->f_flag, PINTERLOCKED, "fpoff", 0);
c0885fab
MD
504 } else {
505 nflags = flags | FOFFSETLOCK;
506 if (atomic_cmpset_int(&fp->f_flag, flags, nflags))
507 break;
508 }
509 }
510 return(fp->f_offset);
511}
512
2247fe02
MD
513/*
514 * MPSAFE
515 */
c0885fab
MD
516static __inline void
517vn_set_fpf_offset(struct file *fp, off_t offset)
518{
519 u_int flags;
520 u_int nflags;
521
522 /*
523 * We hold the lock so we can set the offset without interference.
524 */
525 fp->f_offset = offset;
526
527 /*
528 * Normal release is already a reasonably critical path.
529 */
530 for (;;) {
531 flags = fp->f_flag;
532 nflags = flags & ~(FOFFSETLOCK | FOFFSETWAKE);
533 if (atomic_cmpset_int(&fp->f_flag, flags, nflags)) {
534 if (flags & FOFFSETWAKE)
535 wakeup(&fp->f_flag);
536 break;
537 }
538 }
539}
540
2247fe02
MD
541/*
542 * MPSAFE
543 */
c0885fab
MD
544static __inline off_t
545vn_poll_fpf_offset(struct file *fp)
546{
1918fc5c 547#if defined(__x86_64__)
c0885fab
MD
548 return(fp->f_offset);
549#else
550 off_t off = vn_get_fpf_offset(fp);
551 vn_set_fpf_offset(fp, off);
552 return(off);
553#endif
554}
555
984263bc
MD
556/*
557 * Package up an I/O request on a vnode into a uio and do it.
2247fe02
MD
558 *
559 * MPSAFE
984263bc
MD
560 */
561int
87de5057 562vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
0a80a445 563 off_t offset, enum uio_seg segflg, int ioflg,
87de5057 564 struct ucred *cred, int *aresid)
984263bc
MD
565{
566 struct uio auio;
567 struct iovec aiov;
568 int error;
569
570 if ((ioflg & IO_NODELOCKED) == 0)
ca466bae 571 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
984263bc
MD
572 auio.uio_iov = &aiov;
573 auio.uio_iovcnt = 1;
574 aiov.iov_base = base;
575 aiov.iov_len = len;
576 auio.uio_resid = len;
577 auio.uio_offset = offset;
578 auio.uio_segflg = segflg;
579 auio.uio_rw = rw;
87de5057 580 auio.uio_td = curthread;
984263bc
MD
581 if (rw == UIO_READ) {
582 error = VOP_READ(vp, &auio, ioflg, cred);
583 } else {
584 error = VOP_WRITE(vp, &auio, ioflg, cred);
585 }
586 if (aresid)
587 *aresid = auio.uio_resid;
588 else
589 if (auio.uio_resid && error == 0)
590 error = EIO;
591 if ((ioflg & IO_NODELOCKED) == 0)
a11aaa81 592 vn_unlock(vp);
984263bc
MD
593 return (error);
594}
595
596/*
597 * Package up an I/O request on a vnode into a uio and do it. The I/O
598 * request is split up into smaller chunks and we try to avoid saturating
0a80a445 599 * the buffer cache while potentially holding a vnode locked, so we
f9235b6d 600 * check bwillwrite() before calling vn_rdwr(). We also call lwkt_user_yield()
984263bc
MD
601 * to give other processes a chance to lock the vnode (either other processes
602 * core'ing the same binary, or unrelated processes scanning the directory).
2247fe02
MD
603 *
604 * MPSAFE
984263bc
MD
605 */
606int
87de5057
MD
607vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, caddr_t base, int len,
608 off_t offset, enum uio_seg segflg, int ioflg,
609 struct ucred *cred, int *aresid)
984263bc
MD
610{
611 int error = 0;
612
613 do {
9a0222ac 614 int chunk;
984263bc 615
9a0222ac
DR
616 /*
617 * Force `offset' to a multiple of MAXBSIZE except possibly
618 * for the first chunk, so that filesystems only need to
619 * write full blocks except possibly for the first and last
620 * chunks.
621 */
622 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE;
623
624 if (chunk > len)
625 chunk = len;
d84f6fa1 626 if (vp->v_type == VREG && (ioflg & IO_RECURSE) == 0) {
c4df9635
MD
627 switch(rw) {
628 case UIO_READ:
629 bwillread(chunk);
630 break;
631 case UIO_WRITE:
632 bwillwrite(chunk);
633 break;
634 }
635 }
984263bc 636 error = vn_rdwr(rw, vp, base, chunk, offset, segflg,
2247fe02 637 ioflg, cred, aresid);
984263bc
MD
638 len -= chunk; /* aresid calc already includes length */
639 if (error)
640 break;
641 offset += chunk;
642 base += chunk;
f9235b6d 643 lwkt_user_yield();
984263bc
MD
644 } while (len);
645 if (aresid)
646 *aresid += len;
647 return (error);
648}
649
650/*
c0885fab
MD
651 * File pointers can no longer get ripped up by revoke so
652 * we don't need to lock access to the vp.
653 *
654 * f_offset updates are not guaranteed against multiple readers
984263bc
MD
655 */
656static int
87de5057 657vn_read(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
984263bc
MD
658{
659 struct vnode *vp;
660 int error, ioflag;
661
87de5057
MD
662 KASSERT(uio->uio_td == curthread,
663 ("uio_td %p is not td %p", uio->uio_td, curthread));
984263bc 664 vp = (struct vnode *)fp->f_data;
9ba76b73 665
984263bc 666 ioflag = 0;
05dd1c0b 667 if (flags & O_FBLOCKING) {
9ba76b73
MD
668 /* ioflag &= ~IO_NDELAY; */
669 } else if (flags & O_FNONBLOCKING) {
670 ioflag |= IO_NDELAY;
671 } else if (fp->f_flag & FNONBLOCK) {
984263bc 672 ioflag |= IO_NDELAY;
9ba76b73 673 }
c72df65d 674 if (fp->f_flag & O_DIRECT) {
984263bc 675 ioflag |= IO_DIRECT;
9ba76b73 676 }
c0885fab
MD
677 if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
678 uio->uio_offset = vn_get_fpf_offset(fp);
ab6f251b 679 vn_lock(vp, LK_SHARED | LK_RETRY);
984263bc
MD
680 ioflag |= sequential_heuristic(uio, fp);
681
aac0aabd 682 error = VOP_READ(vp, uio, ioflag, cred);
984263bc 683 fp->f_nextoff = uio->uio_offset;
a11aaa81 684 vn_unlock(vp);
c0885fab
MD
685 if ((flags & O_FOFFSET) == 0 && (vp->v_flag & VNOTSEEKABLE) == 0)
686 vn_set_fpf_offset(fp, uio->uio_offset);
984263bc
MD
687 return (error);
688}
689
690/*
2247fe02 691 * MPSAFE
984263bc
MD
692 */
693static int
87de5057 694vn_write(struct file *fp, struct uio *uio, struct ucred *cred, int flags)
984263bc
MD
695{
696 struct vnode *vp;
697 int error, ioflag;
698
87de5057 699 KASSERT(uio->uio_td == curthread,
f4d08668 700 ("uio_td %p is not p %p", uio->uio_td, curthread));
984263bc 701 vp = (struct vnode *)fp->f_data;
9ba76b73 702
984263bc 703 ioflag = IO_UNIT;
9ba76b73
MD
704 if (vp->v_type == VREG &&
705 ((fp->f_flag & O_APPEND) || (flags & O_FAPPEND))) {
984263bc 706 ioflag |= IO_APPEND;
9ba76b73
MD
707 }
708
709 if (flags & O_FBLOCKING) {
710 /* ioflag &= ~IO_NDELAY; */
711 } else if (flags & O_FNONBLOCKING) {
984263bc 712 ioflag |= IO_NDELAY;
9ba76b73
MD
713 } else if (fp->f_flag & FNONBLOCK) {
714 ioflag |= IO_NDELAY;
715 }
c72df65d 716 if (fp->f_flag & O_DIRECT) {
984263bc 717 ioflag |= IO_DIRECT;
9ba76b73
MD
718 }
719 if (flags & O_FASYNCWRITE) {
720 /* ioflag &= ~IO_SYNC; */
721 } else if (flags & O_FSYNCWRITE) {
722 ioflag |= IO_SYNC;
723 } else if (fp->f_flag & O_FSYNC) {
724 ioflag |= IO_SYNC;
725 }
726
727 if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))
984263bc 728 ioflag |= IO_SYNC;
9ba76b73 729 if ((flags & O_FOFFSET) == 0)
c0885fab 730 uio->uio_offset = vn_get_fpf_offset(fp);
1c222faf
MD
731 if (vp->v_mount)
732 VFS_MODIFYING(vp->v_mount);
c0885fab 733 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
984263bc 734 ioflag |= sequential_heuristic(uio, fp);
aac0aabd 735 error = VOP_WRITE(vp, uio, ioflag, cred);
984263bc 736 fp->f_nextoff = uio->uio_offset;
a11aaa81 737 vn_unlock(vp);
c0885fab
MD
738 if ((flags & O_FOFFSET) == 0)
739 vn_set_fpf_offset(fp, uio->uio_offset);
984263bc
MD
740 return (error);
741}
742
743/*
2ad080fe 744 * MPSAFE
984263bc
MD
745 */
746static int
87de5057 747vn_statfile(struct file *fp, struct stat *sb, struct ucred *cred)
984263bc 748{
d9b2033e
MD
749 struct vnode *vp;
750 int error;
984263bc 751
d9b2033e
MD
752 vp = (struct vnode *)fp->f_data;
753 error = vn_stat(vp, sb, cred);
d9b2033e 754 return (error);
984263bc
MD
755}
756
2ad080fe 757/*
aac0aabd 758 * MPSAFE
2ad080fe 759 */
984263bc 760int
87de5057 761vn_stat(struct vnode *vp, struct stat *sb, struct ucred *cred)
984263bc
MD
762{
763 struct vattr vattr;
dadab5e9 764 struct vattr *vap;
984263bc
MD
765 int error;
766 u_short mode;
b13267a5 767 cdev_t dev;
984263bc
MD
768
769 vap = &vattr;
aac0aabd 770 error = VOP_GETATTR(vp, vap);
984263bc
MD
771 if (error)
772 return (error);
773
774 /*
775 * Zero the spare stat fields
776 */
777 sb->st_lspare = 0;
d98152a8
MD
778 sb->st_qspare1 = 0;
779 sb->st_qspare2 = 0;
984263bc
MD
780
781 /*
782 * Copy from vattr table
783 */
784 if (vap->va_fsid != VNOVAL)
785 sb->st_dev = vap->va_fsid;
786 else
787 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0];
788 sb->st_ino = vap->va_fileid;
789 mode = vap->va_mode;
790 switch (vap->va_type) {
791 case VREG:
792 mode |= S_IFREG;
793 break;
50626622
MD
794 case VDATABASE:
795 mode |= S_IFDB;
796 break;
984263bc
MD
797 case VDIR:
798 mode |= S_IFDIR;
799 break;
800 case VBLK:
801 mode |= S_IFBLK;
802 break;
803 case VCHR:
804 mode |= S_IFCHR;
805 break;
806 case VLNK:
807 mode |= S_IFLNK;
808 /* This is a cosmetic change, symlinks do not have a mode. */
809 if (vp->v_mount->mnt_flag & MNT_NOSYMFOLLOW)
810 sb->st_mode &= ~ACCESSPERMS; /* 0000 */
811 else
812 sb->st_mode |= ACCESSPERMS; /* 0777 */
813 break;
814 case VSOCK:
815 mode |= S_IFSOCK;
816 break;
817 case VFIFO:
818 mode |= S_IFIFO;
819 break;
820 default:
821 return (EBADF);
4698dfb3 822 }
984263bc 823 sb->st_mode = mode;
50626622
MD
824 if (vap->va_nlink > (nlink_t)-1)
825 sb->st_nlink = (nlink_t)-1;
826 else
827 sb->st_nlink = vap->va_nlink;
984263bc
MD
828 sb->st_uid = vap->va_uid;
829 sb->st_gid = vap->va_gid;
cd29885a 830 sb->st_rdev = dev2udev(vp->v_rdev);
984263bc
MD
831 sb->st_size = vap->va_size;
832 sb->st_atimespec = vap->va_atime;
833 sb->st_mtimespec = vap->va_mtime;
834 sb->st_ctimespec = vap->va_ctime;
835
d8869c1b
MD
836 /*
837 * A VCHR and VBLK device may track the last access and last modified
838 * time independantly of the filesystem. This is particularly true
839 * because device read and write calls may bypass the filesystem.
840 */
841 if (vp->v_type == VCHR || vp->v_type == VBLK) {
4698dfb3
MN
842 dev = vp->v_rdev;
843 if (dev != NULL) {
d8869c1b 844 if (dev->si_lastread) {
cec73927
MD
845 sb->st_atimespec.tv_sec = time_second +
846 (time_uptime -
847 dev->si_lastread);
d8869c1b
MD
848 sb->st_atimespec.tv_nsec = 0;
849 }
850 if (dev->si_lastwrite) {
cec73927
MD
851 sb->st_atimespec.tv_sec = time_second +
852 (time_uptime -
853 dev->si_lastwrite);
d8869c1b
MD
854 sb->st_atimespec.tv_nsec = 0;
855 }
856 }
857 }
858
984263bc 859 /*
0a80a445 860 * According to www.opengroup.org, the meaning of st_blksize is
861 * "a filesystem-specific preferred I/O block size for this
984263bc
MD
862 * object. In some filesystem types, this may vary from file
863 * to file"
864 * Default to PAGE_SIZE after much discussion.
865 */
866
867 if (vap->va_type == VREG) {
868 sb->st_blksize = vap->va_blocksize;
869 } else if (vn_isdisk(vp, NULL)) {
e4c9c0c8
MD
870 /*
871 * XXX this is broken. If the device is not yet open (aka
872 * stat() call, aka v_rdev == NULL), how are we supposed
873 * to get a valid block size out of it?
874 */
4698dfb3 875 dev = vp->v_rdev;
cd29885a 876
e4c9c0c8
MD
877 sb->st_blksize = dev->si_bsize_best;
878 if (sb->st_blksize < dev->si_bsize_phys)
879 sb->st_blksize = dev->si_bsize_phys;
984263bc
MD
880 if (sb->st_blksize < BLKDEV_IOSIZE)
881 sb->st_blksize = BLKDEV_IOSIZE;
882 } else {
883 sb->st_blksize = PAGE_SIZE;
884 }
0a80a445 885
984263bc 886 sb->st_flags = vap->va_flags;
f00b5e4e
MN
887
888 error = priv_check_cred(cred, PRIV_VFS_GENERATION, 0);
889 if (error)
984263bc
MD
890 sb->st_gen = 0;
891 else
50626622 892 sb->st_gen = (u_int32_t)vap->va_gen;
984263bc 893
984263bc 894 sb->st_blocks = vap->va_bytes / S_BLKSIZE;
984263bc
MD
895 return (0);
896}
897
898/*
d9b2033e 899 * MPALMOSTSAFE - acquires mplock
984263bc
MD
900 */
901static int
87baaf0c
MD
902vn_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *ucred,
903 struct sysmsg *msg)
984263bc 904{
dadab5e9 905 struct vnode *vp = ((struct vnode *)fp->f_data);
1fbb5fc0 906 struct vnode *ovp;
984263bc
MD
907 struct vattr vattr;
908 int error;
c0885fab 909 off_t size;
984263bc 910
dadab5e9 911 switch (vp->v_type) {
984263bc
MD
912 case VREG:
913 case VDIR:
914 if (com == FIONREAD) {
4698dfb3
MN
915 error = VOP_GETATTR(vp, &vattr);
916 if (error)
d9b2033e 917 break;
c0885fab
MD
918 size = vattr.va_size;
919 if ((vp->v_flag & VNOTSEEKABLE) == 0)
920 size -= vn_poll_fpf_offset(fp);
921 if (size > 0x7FFFFFFF)
922 size = 0x7FFFFFFF;
923 *(int *)data = size;
d9b2033e
MD
924 error = 0;
925 break;
926 }
9ba76b73 927 if (com == FIOASYNC) { /* XXX */
d9b2033e
MD
928 error = 0; /* XXX */
929 break;
984263bc 930 }
984263bc 931 /* fall into ... */
984263bc
MD
932 default:
933#if 0
934 return (ENOTTY);
935#endif
936 case VFIFO:
937 case VCHR:
938 case VBLK:
939 if (com == FIODTYPE) {
d9b2033e
MD
940 if (vp->v_type != VCHR && vp->v_type != VBLK) {
941 error = ENOTTY;
942 break;
943 }
335dda38 944 *(int *)data = dev_dflags(vp->v_rdev) & D_TYPEMASK;
d9b2033e
MD
945 error = 0;
946 break;
984263bc 947 }
87baaf0c 948 error = VOP_IOCTL(vp, com, data, fp->f_flag, ucred, msg);
984263bc 949 if (error == 0 && com == TIOCSCTTY) {
87de5057
MD
950 struct proc *p = curthread->td_proc;
951 struct session *sess;
952
d9b2033e
MD
953 if (p == NULL) {
954 error = ENOTTY;
955 break;
956 }
984263bc 957
2247fe02 958 get_mplock();
87de5057 959 sess = p->p_session;
984263bc 960 /* Do nothing if reassigning same control tty */
d9b2033e
MD
961 if (sess->s_ttyvp == vp) {
962 error = 0;
2247fe02 963 rel_mplock();
d9b2033e
MD
964 break;
965 }
984263bc
MD
966
967 /* Get rid of reference to old control tty */
1fbb5fc0 968 ovp = sess->s_ttyvp;
597aea93 969 vref(vp);
1fbb5fc0
MD
970 sess->s_ttyvp = vp;
971 if (ovp)
972 vrele(ovp);
2247fe02 973 rel_mplock();
984263bc 974 }
d9b2033e 975 break;
984263bc 976 }
d9b2033e 977 return (error);
984263bc
MD
978}
979
984263bc 980/*
b458d1ab
MD
981 * Obtain the requested vnode lock
982 *
983 * LK_RETRY Automatically retry on timeout
984 * LK_FAILRECLAIM Fail if the vnode is being reclaimed
985 *
986 * Failures will occur if the vnode is undergoing recyclement, but not
987 * all callers expect that the function will fail so the caller must pass
988 * LK_FAILOK if it wants to process an error code.
989 *
990 * Errors can occur for other reasons if you pass in other LK_ flags,
991 * regardless of whether you pass in LK_FAILRECLAIM
984263bc
MD
992 */
993int
ca466bae 994vn_lock(struct vnode *vp, int flags)
984263bc
MD
995{
996 int error;
0a80a445 997
984263bc 998 do {
a11aaa81 999 error = lockmgr(&vp->v_lock, flags);
5fd012e0
MD
1000 if (error == 0)
1001 break;
984263bc 1002 } while (flags & LK_RETRY);
5fd012e0
MD
1003
1004 /*
1005 * Because we (had better!) have a ref on the vnode, once it
1006 * goes to VRECLAIMED state it will not be recycled until all
1007 * refs go away. So we can just check the flag.
1008 */
1009 if (error == 0 && (vp->v_flag & VRECLAIMED)) {
b458d1ab
MD
1010 if (flags & LK_FAILRECLAIM) {
1011 lockmgr(&vp->v_lock, LK_RELEASE);
1012 error = ENOENT;
1013 }
5fd012e0 1014 }
984263bc
MD
1015 return (error);
1016}
1017
ead16d5b
MD
1018#ifdef DEBUG_VN_UNLOCK
1019
1020void
1021debug_vn_unlock(struct vnode *vp, const char *filename, int line)
1022{
1023 kprintf("vn_unlock from %s:%d\n", filename, line);
1024 lockmgr(&vp->v_lock, LK_RELEASE);
1025}
1026
1027#else
1028
a11aaa81
MD
1029void
1030vn_unlock(struct vnode *vp)
1031{
1032 lockmgr(&vp->v_lock, LK_RELEASE);
1033}
1034
ead16d5b
MD
1035#endif
1036
2247fe02
MD
1037/*
1038 * MPSAFE
1039 */
a11aaa81
MD
1040int
1041vn_islocked(struct vnode *vp)
1042{
1043 return (lockstatus(&vp->v_lock, curthread));
1044}
1045
94f2e6f2
MD
1046/*
1047 * Return the lock status of a vnode and unlock the vnode
1048 * if we owned the lock. This is not a boolean, if the
1049 * caller cares what the lock status is the caller must
1050 * check the various possible values.
1051 *
1052 * This only unlocks exclusive locks held by the caller,
1053 * it will NOT unlock shared locks (there is no way to
1054 * tell who the shared lock belongs to).
1055 *
1056 * MPSAFE
1057 */
1058int
1059vn_islocked_unlock(struct vnode *vp)
1060{
1061 int vpls;
1062
1063 vpls = lockstatus(&vp->v_lock, curthread);
1064 if (vpls == LK_EXCLUSIVE)
1065 lockmgr(&vp->v_lock, LK_RELEASE);
1066 return(vpls);
1067}
1068
1069/*
1070 * Restore a vnode lock that we previously released via
1071 * vn_islocked_unlock(). This is a NOP if we did not
1072 * own the original lock.
1073 *
1074 * MPSAFE
1075 */
1076void
1077vn_islocked_relock(struct vnode *vp, int vpls)
1078{
1079 int error;
1080
1081 if (vpls == LK_EXCLUSIVE)
1082 error = lockmgr(&vp->v_lock, vpls);
1083}
1084
984263bc 1085/*
2247fe02 1086 * MPSAFE
984263bc
MD
1087 */
1088static int
87de5057 1089vn_closefile(struct file *fp)
984263bc 1090{
d9b2033e 1091 int error;
984263bc
MD
1092
1093 fp->f_ops = &badfileops;
3596743e 1094 error = vn_close(((struct vnode *)fp->f_data), fp->f_flag, fp);
4698dfb3 1095 return (error);
984263bc
MD
1096}
1097
d9b2033e 1098/*
2247fe02 1099 * MPSAFE
d9b2033e 1100 */
984263bc
MD
1101static int
1102vn_kqfilter(struct file *fp, struct knote *kn)
1103{
d9b2033e 1104 int error;
984263bc 1105
d9b2033e 1106 error = VOP_KQFILTER(((struct vnode *)fp->f_data), kn);
d9b2033e 1107 return (error);
984263bc 1108}