2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94
39 * $FreeBSD: src/sys/kern/vfs_syscalls.c,v 1.151.2.18 2003/04/04 20:35:58 tegge Exp $
40 * $DragonFly: src/sys/kern/vfs_syscalls.c,v 1.58 2005/02/02 21:34:18 joerg Exp $
43 #include <sys/param.h>
44 #include <sys/systm.h>
47 #include <sys/sysent.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/sysproto.h>
51 #include <sys/filedesc.h>
52 #include <sys/kernel.h>
53 #include <sys/fcntl.h>
55 #include <sys/linker.h>
57 #include <sys/unistd.h>
58 #include <sys/vnode.h>
60 #include <sys/namei.h>
61 #include <sys/nlookup.h>
62 #include <sys/dirent.h>
63 #include <sys/extattr.h>
64 #include <sys/kern_syscall.h>
66 #include <machine/limits.h>
67 #include <vfs/union/union.h>
68 #include <sys/sysctl.h>
70 #include <vm/vm_object.h>
71 #include <vm/vm_zone.h>
72 #include <vm/vm_page.h>
74 #include <sys/file2.h>
76 static int checkvp_chdir (struct vnode *vn, struct thread *td);
77 static void checkdirs (struct vnode *olddp, struct namecache *ncp);
78 static int chroot_refuse_vdir_fds (struct filedesc *fdp);
79 static int chroot_visible_mnt(struct mount *mp, struct proc *p);
80 static int getutimes (const struct timeval *, struct timespec *);
81 static int setfown (struct vnode *, uid_t, gid_t);
82 static int setfmode (struct vnode *, int);
83 static int setfflags (struct vnode *, int);
84 static int setutimes (struct vnode *, const struct timespec *, int);
85 static int usermount = 0; /* if 1, non-root can mount fs. */
87 int (*union_dircheckp) (struct thread *, struct vnode **, struct file *);
89 SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0, "");
92 * Virtual File System System Calls
96 * Mount a file system.
99 * mount_args(char *type, char *path, int flags, caddr_t data)
103 mount(struct mount_args *uap)
105 struct thread *td = curthread;
106 struct proc *p = td->td_proc;
108 struct namecache *ncp;
110 struct vfsconf *vfsp;
111 int error, flag = 0, flag2 = 0;
113 struct nlookupdata nd;
114 char fstypename[MFSNAMELEN];
116 struct nlcomponent nlc;
119 if (p->p_ucred->cr_prison != NULL)
121 if (usermount == 0 && (error = suser(td)))
124 * Do not allow NFS export by non-root users.
126 if (SCARG(uap, flags) & MNT_EXPORTED) {
132 * Silently enforce MNT_NOSUID and MNT_NODEV for non-root users
135 SCARG(uap, flags) |= MNT_NOSUID | MNT_NODEV;
138 * Lookup the requested path and extract the ncp and vnode.
140 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
142 if ((error = nlookup(&nd)) == 0) {
143 if (nd.nl_ncp->nc_vp == NULL)
153 * Extract the locked+refd ncp and cleanup the nd structure
160 * now we have the locked ref'd ncp and unreferenced vnode.
163 if ((error = vget(vp, LK_EXCLUSIVE, td)) != 0) {
170 * Now we have an unlocked ref'd ncp and a locked ref'd vp
172 if (SCARG(uap, flags) & MNT_UPDATE) {
173 if ((vp->v_flag & VROOT) == 0) {
180 flag2 = mp->mnt_kern_flag;
182 * We only allow the filesystem to be reloaded if it
183 * is currently mounted read-only.
185 if ((SCARG(uap, flags) & MNT_RELOAD) &&
186 ((mp->mnt_flag & MNT_RDONLY) == 0)) {
189 return (EOPNOTSUPP); /* Needs translation */
192 * Only root, or the user that did the original mount is
193 * permitted to update it.
195 if (mp->mnt_stat.f_owner != p->p_ucred->cr_uid &&
196 (error = suser(td))) {
201 if (vfs_busy(mp, LK_NOWAIT, NULL, td)) {
206 if ((vp->v_flag & VMOUNT) != 0 ||
207 vp->v_mountedhere != NULL) {
213 vp->v_flag |= VMOUNT;
215 SCARG(uap, flags) & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE);
216 VOP_UNLOCK(vp, 0, td);
220 * If the user is not root, ensure that they own the directory
221 * onto which we are attempting to mount.
223 if ((error = VOP_GETATTR(vp, &va, td)) ||
224 (va.va_uid != p->p_ucred->cr_uid &&
225 (error = suser(td)))) {
230 if ((error = vinvalbuf(vp, V_SAVE, td, 0, 0)) != 0) {
235 if (vp->v_type != VDIR) {
240 if ((error = copyinstr(SCARG(uap, type), fstypename, MFSNAMELEN, NULL)) != 0) {
245 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
246 if (!strcmp(vfsp->vfc_name, fstypename))
252 /* Only load modules for root (very important!) */
253 if ((error = suser(td)) != 0) {
258 error = linker_load_file(fstypename, &lf);
259 if (error || lf == NULL) {
267 /* lookup again, see if the VFS was loaded */
268 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
269 if (!strcmp(vfsp->vfc_name, fstypename))
274 linker_file_unload(lf);
280 if ((vp->v_flag & VMOUNT) != 0 ||
281 vp->v_mountedhere != NULL) {
286 vp->v_flag |= VMOUNT;
289 * Allocate and initialize the filesystem.
291 mp = malloc(sizeof(struct mount), M_MOUNT, M_ZERO|M_WAITOK);
292 TAILQ_INIT(&mp->mnt_nvnodelist);
293 TAILQ_INIT(&mp->mnt_reservedvnlist);
294 TAILQ_INIT(&mp->mnt_jlist);
295 mp->mnt_nvnodelistsize = 0;
296 lockinit(&mp->mnt_lock, 0, "vfslock", 0, LK_NOPAUSE);
297 vfs_busy(mp, LK_NOWAIT, NULL, td);
298 mp->mnt_op = vfsp->vfc_vfsops;
300 vfsp->vfc_refcount++;
301 mp->mnt_stat.f_type = vfsp->vfc_typenum;
302 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
303 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
304 mp->mnt_vnodecovered = vp;
305 mp->mnt_stat.f_owner = p->p_ucred->cr_uid;
306 mp->mnt_iosize_max = DFLTPHYS;
307 VOP_UNLOCK(vp, 0, td);
310 * Set the mount level flags.
312 if (SCARG(uap, flags) & MNT_RDONLY)
313 mp->mnt_flag |= MNT_RDONLY;
314 else if (mp->mnt_flag & MNT_RDONLY)
315 mp->mnt_kern_flag |= MNTK_WANTRDWR;
316 mp->mnt_flag &=~ (MNT_NOSUID | MNT_NOEXEC | MNT_NODEV |
317 MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOATIME |
318 MNT_NOSYMFOLLOW | MNT_IGNORE |
319 MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR);
320 mp->mnt_flag |= SCARG(uap, flags) & (MNT_NOSUID | MNT_NOEXEC |
321 MNT_NODEV | MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_FORCE |
322 MNT_NOSYMFOLLOW | MNT_IGNORE |
323 MNT_NOATIME | MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR);
325 * Mount the filesystem.
326 * XXX The final recipients of VFS_MOUNT just overwrite the ndp they
329 error = VFS_MOUNT(mp, SCARG(uap, path), SCARG(uap, data), td);
330 if (mp->mnt_flag & MNT_UPDATE) {
331 if (mp->mnt_kern_flag & MNTK_WANTRDWR)
332 mp->mnt_flag &= ~MNT_RDONLY;
333 mp->mnt_flag &=~ (MNT_UPDATE | MNT_RELOAD | MNT_FORCE);
334 mp->mnt_kern_flag &=~ MNTK_WANTRDWR;
337 mp->mnt_kern_flag = flag2;
340 vp->v_flag &= ~VMOUNT;
345 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
347 * Put the new filesystem on the mount list after root. The mount
348 * point gets its own mnt_ncp which is a special ncp linking the
349 * vnode-under to the root of the new mount. The lookup code
350 * detects the mount point going forward and detects the special
351 * mnt_ncp via NCP_MOUNTPT going backwards.
353 * It is not necessary to invalidate or purge the vnode underneath
354 * because elements under the mount will be given their own glue
358 nlc.nlc_nameptr = "";
360 mp->mnt_ncp = cache_nlookup(ncp, &nlc);
361 cache_setunresolved(mp->mnt_ncp);
362 mp->mnt_ncp->nc_flag |= NCF_MOUNTPT;
363 mp->mnt_ncp->nc_mount = mp;
365 /* XXX get the root of the fs and cache_setvp(mnt_ncp...) */
366 vp->v_flag &= ~VMOUNT;
367 vp->v_mountedhere = mp;
368 lwkt_gettoken(&ilock, &mountlist_token);
369 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
370 lwkt_reltoken(&ilock);
371 checkdirs(vp, mp->mnt_ncp);
372 cache_unlock(mp->mnt_ncp); /* leave ref intact */
373 VOP_UNLOCK(vp, 0, td);
374 error = vfs_allocate_syncvnode(mp);
376 if ((error = VFS_START(mp, 0, td)) != 0)
379 vfs_rm_vnodeops(&mp->mnt_vn_coherency_ops);
380 vfs_rm_vnodeops(&mp->mnt_vn_journal_ops);
381 vfs_rm_vnodeops(&mp->mnt_vn_norm_ops);
382 vfs_rm_vnodeops(&mp->mnt_vn_spec_ops);
383 vfs_rm_vnodeops(&mp->mnt_vn_fifo_ops);
384 vp->v_flag &= ~VMOUNT;
385 mp->mnt_vfc->vfc_refcount--;
395 * Scan all active processes to see if any of them have a current
396 * or root directory onto which the new filesystem has just been
397 * mounted. If so, replace them with the new mount point.
399 * The passed ncp is ref'd and locked (from the mount code) and
400 * must be associated with the vnode representing the root of the
404 checkdirs(struct vnode *olddp, struct namecache *ncp)
406 struct filedesc *fdp;
411 if (olddp->v_usecount == 1)
413 mp = olddp->v_mountedhere;
414 if (VFS_ROOT(mp, &newdp))
415 panic("mount: lost mount");
416 cache_setvp(ncp, newdp);
418 if (rootvnode == olddp) {
420 vfs_cache_setroot(newdp, cache_hold(ncp));
423 FOREACH_PROC_IN_SYSTEM(p) {
425 if (fdp->fd_cdir == olddp) {
428 fdp->fd_cdir = newdp;
429 cache_drop(fdp->fd_ncdir);
430 fdp->fd_ncdir = cache_hold(ncp);
432 if (fdp->fd_rdir == olddp) {
435 fdp->fd_rdir = newdp;
436 cache_drop(fdp->fd_nrdir);
437 fdp->fd_nrdir = cache_hold(ncp);
444 * Unmount a file system.
446 * Note: unmount takes a path to the vnode mounted on as argument,
447 * not special file (as before).
450 * umount_args(char *path, int flags)
454 unmount(struct unmount_args *uap)
456 struct thread *td = curthread;
457 struct proc *p = td->td_proc;
461 struct nlookupdata nd;
464 if (p->p_ucred->cr_prison != NULL)
466 if (usermount == 0 && (error = suser(td)))
470 error = nlookup_init(&nd, SCARG(uap, path), UIO_USERSPACE, NLC_FOLLOW);
472 error = nlookup(&nd);
474 error = cache_vget(nd.nl_ncp, nd.nl_cred, LK_EXCLUSIVE, &vp);
482 * Only root, or the user that did the original mount is
483 * permitted to unmount this filesystem.
485 if ((mp->mnt_stat.f_owner != p->p_ucred->cr_uid) &&
486 (error = suser(td))) {
492 * Don't allow unmounting the root file system.
494 if (mp->mnt_flag & MNT_ROOTFS) {
500 * Must be the root of the filesystem
502 if ((vp->v_flag & VROOT) == 0) {
507 return (dounmount(mp, SCARG(uap, flags), td));
511 * Do the actual file system unmount.
514 dounmount(struct mount *mp, int flags, struct thread *td)
516 struct vnode *coveredvp;
521 lwkt_gettoken(&ilock, &mountlist_token);
522 if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
523 lwkt_reltoken(&ilock);
526 mp->mnt_kern_flag |= MNTK_UNMOUNT;
527 /* Allow filesystems to detect that a forced unmount is in progress. */
528 if (flags & MNT_FORCE)
529 mp->mnt_kern_flag |= MNTK_UNMOUNTF;
530 error = lockmgr(&mp->mnt_lock, LK_DRAIN | LK_INTERLOCK |
531 ((flags & MNT_FORCE) ? 0 : LK_NOWAIT), &ilock, td);
533 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF);
534 if (mp->mnt_kern_flag & MNTK_MWAIT)
539 if (mp->mnt_flag & MNT_EXPUBLIC)
540 vfs_setpublicfs(NULL, NULL, NULL);
542 vfs_msync(mp, MNT_WAIT);
543 async_flag = mp->mnt_flag & MNT_ASYNC;
544 mp->mnt_flag &=~ MNT_ASYNC;
545 cache_purgevfs(mp); /* remove cache entries for this file sys */
546 if (mp->mnt_syncer != NULL)
547 vrele(mp->mnt_syncer);
548 if (((mp->mnt_flag & MNT_RDONLY) ||
549 (error = VFS_SYNC(mp, MNT_WAIT, td)) == 0) ||
551 error = VFS_UNMOUNT(mp, flags, td);
552 lwkt_gettokref(&ilock);
554 if (mp->mnt_syncer == NULL)
555 vfs_allocate_syncvnode(mp);
556 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF);
557 mp->mnt_flag |= async_flag;
558 lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK | LK_REENABLE,
560 if (mp->mnt_kern_flag & MNTK_MWAIT)
564 TAILQ_REMOVE(&mountlist, mp, mnt_list);
567 * Remove any installed vnode ops here so the individual VFSs don't
570 vfs_rm_vnodeops(&mp->mnt_vn_coherency_ops);
571 vfs_rm_vnodeops(&mp->mnt_vn_journal_ops);
572 vfs_rm_vnodeops(&mp->mnt_vn_norm_ops);
573 vfs_rm_vnodeops(&mp->mnt_vn_spec_ops);
574 vfs_rm_vnodeops(&mp->mnt_vn_fifo_ops);
576 if ((coveredvp = mp->mnt_vnodecovered) != NULLVP) {
577 coveredvp->v_mountedhere = NULL;
579 cache_drop(mp->mnt_ncp);
582 mp->mnt_vfc->vfc_refcount--;
583 if (!TAILQ_EMPTY(&mp->mnt_nvnodelist))
584 panic("unmount: dangling vnode");
585 lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &ilock, td);
586 if (mp->mnt_kern_flag & MNTK_MWAIT)
593 * Sync each mounted filesystem.
597 static int syncprt = 0;
598 SYSCTL_INT(_debug, OID_AUTO, syncprt, CTLFLAG_RW, &syncprt, 0, "");
603 sync(struct sync_args *uap)
605 struct thread *td = curthread;
606 struct mount *mp, *nmp;
610 lwkt_gettoken(&ilock, &mountlist_token);
611 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
612 if (vfs_busy(mp, LK_NOWAIT, &ilock, td)) {
613 nmp = TAILQ_NEXT(mp, mnt_list);
616 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
617 asyncflag = mp->mnt_flag & MNT_ASYNC;
618 mp->mnt_flag &= ~MNT_ASYNC;
619 vfs_msync(mp, MNT_NOWAIT);
620 VFS_SYNC(mp, MNT_NOWAIT, td);
621 mp->mnt_flag |= asyncflag;
623 lwkt_gettokref(&ilock);
624 nmp = TAILQ_NEXT(mp, mnt_list);
627 lwkt_reltoken(&ilock);
629 * print out buffer pool stat information on each sync() call.
638 /* XXX PRISON: could be per prison flag */
639 static int prison_quotas;
641 SYSCTL_INT(_kern_prison, OID_AUTO, quotas, CTLFLAG_RW, &prison_quotas, 0, "");
645 * quotactl_args(char *path, int fcmd, int uid, caddr_t arg)
647 * Change filesystem quotas.
651 quotactl(struct quotactl_args *uap)
653 struct nlookupdata nd;
661 if (p->p_ucred->cr_prison && !prison_quotas)
664 error = nlookup_init(&nd, SCARG(uap, path), UIO_USERSPACE, NLC_FOLLOW);
666 error = nlookup(&nd);
668 mp = nd.nl_ncp->nc_mount;
669 error = VFS_QUOTACTL(mp, SCARG(uap, cmd), SCARG(uap, uid),
670 SCARG(uap, arg), nd.nl_td);
677 * mountctl(char *path, int op, int fd, const void *ctl, int ctllen,
678 * void *buf, int buflen)
680 * This function operates on a mount point and executes the specified
681 * operation using the specified control data, and possibly returns data.
683 * The actual number of bytes stored in the result buffer is returned, 0
684 * if none, otherwise an error is returned.
688 mountctl(struct mountctl_args *uap)
690 struct thread *td = curthread;
691 struct proc *p = td->td_proc;
692 struct filedesc *fdp = p->p_fd;
700 * Sanity and permissions checks. We must be root.
703 if (p->p_ucred->cr_prison != NULL)
705 if ((error = suser(td)) != 0)
709 * Argument length checks
711 if (uap->ctllen < 0 || uap->ctllen > 1024)
713 if (uap->buflen < 0 || uap->buflen > 16 * 1024)
715 if (uap->path == NULL)
719 * Allocate the necessary buffers and copyin data
721 path = zalloc(namei_zone);
722 error = copyinstr(uap->path, path, MAXPATHLEN, NULL);
727 ctl = malloc(uap->ctllen + 1, M_TEMP, M_WAITOK|M_ZERO);
728 error = copyin(uap->ctl, ctl, uap->ctllen);
733 buf = malloc(uap->buflen + 1, M_TEMP, M_WAITOK|M_ZERO);
736 * Validate the descriptor
740 } else if ((u_int)uap->fd >= fdp->fd_nfiles ||
741 (fp = fdp->fd_ofiles[uap->fd]) == NULL) {
749 * Execute the internal kernel function and clean up.
751 error = kern_mountctl(path, uap->op, fp, ctl, uap->ctllen, buf, uap->buflen, &uap->sysmsg_result);
754 if (error == 0 && uap->sysmsg_result > 0)
755 error = copyout(buf, uap->buf, uap->sysmsg_result);
758 zfree(namei_zone, path);
767 * Execute a mount control operation by resolving the path to a mount point
768 * and calling vop_mountctl().
771 kern_mountctl(const char *path, int op, struct file *fp,
772 const void *ctl, int ctllen,
773 void *buf, int buflen, int *res)
777 struct nlookupdata nd;
782 error = nlookup_init(&nd, path, UIO_SYSSPACE, NLC_FOLLOW);
784 error = nlookup(&nd);
786 error = cache_vget(nd.nl_ncp, nd.nl_cred, LK_EXCLUSIVE, &vp);
794 * Must be the root of the filesystem
796 if ((vp->v_flag & VROOT) == 0) {
800 error = vop_mountctl(mp->mnt_vn_use_ops, op, fp, ctl, ctllen,
807 kern_statfs(struct nlookupdata *nd, struct statfs *buf)
809 struct thread *td = curthread;
810 struct proc *p = td->td_proc;
813 char *fullpath, *freepath;
816 if ((error = nlookup(nd)) != 0)
818 mp = nd->nl_ncp->nc_mount;
820 if ((error = VFS_STATFS(mp, sp, td)) != 0)
823 error = cache_fullpath(p, mp->mnt_ncp, &fullpath, &freepath);
826 bzero(sp->f_mntonname, sizeof(sp->f_mntonname));
827 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname));
828 free(freepath, M_TEMP);
830 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
831 bcopy(sp, buf, sizeof(*buf));
832 /* Only root should have access to the fsid's. */
834 buf->f_fsid.val[0] = buf->f_fsid.val[1] = 0;
839 * statfs_args(char *path, struct statfs *buf)
841 * Get filesystem statistics.
844 statfs(struct statfs_args *uap)
846 struct nlookupdata nd;
850 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
852 error = kern_statfs(&nd, &buf);
855 error = copyout(&buf, uap->buf, sizeof(*uap->buf));
860 kern_fstatfs(int fd, struct statfs *buf)
862 struct thread *td = curthread;
863 struct proc *p = td->td_proc;
867 char *fullpath, *freepath;
871 error = getvnode(p->p_fd, fd, &fp);
874 mp = ((struct vnode *)fp->f_data)->v_mount;
878 error = VFS_STATFS(mp, sp, td);
882 error = cache_fullpath(p, mp->mnt_ncp, &fullpath, &freepath);
885 bzero(sp->f_mntonname, sizeof(sp->f_mntonname));
886 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname));
887 free(freepath, M_TEMP);
889 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
890 bcopy(sp, buf, sizeof(*buf));
892 /* Only root should have access to the fsid's. */
894 buf->f_fsid.val[0] = buf->f_fsid.val[1] = 0;
899 * fstatfs_args(int fd, struct statfs *buf)
901 * Get filesystem statistics.
904 fstatfs(struct fstatfs_args *uap)
909 error = kern_fstatfs(uap->fd, &buf);
912 error = copyout(&buf, uap->buf, sizeof(*uap->buf));
917 * getfsstat_args(struct statfs *buf, long bufsize, int flags)
919 * Get statistics on all filesystems.
923 getfsstat(struct getfsstat_args *uap)
925 struct thread *td = curthread;
926 struct proc *p = td->td_proc;
927 struct mount *mp, *nmp;
928 struct statfs *sp, *sfsp;
930 long count, maxcount, error;
932 char *freepath, *fullpath;
934 if (p != NULL && (p->p_fd->fd_nrdir->nc_flag & NCF_ROOT) == 0)
939 maxcount = uap->bufsize / sizeof(struct statfs);
942 lwkt_gettoken(&ilock, &mountlist_token);
943 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
944 if (vfs_busy(mp, LK_NOWAIT, &ilock, td)) {
945 nmp = TAILQ_NEXT(mp, mnt_list);
948 if (sfsp && count < maxcount) {
949 if (is_chrooted && !chroot_visible_mnt(mp, p)) {
950 lwkt_gettokref(&ilock);
951 nmp = TAILQ_NEXT(mp, mnt_list);
957 * If MNT_NOWAIT or MNT_LAZY is specified, do not
958 * refresh the fsstat cache. MNT_NOWAIT or MNT_LAZY
959 * overrides MNT_WAIT.
961 if (((uap->flags & (MNT_LAZY|MNT_NOWAIT)) == 0 ||
962 (uap->flags & MNT_WAIT)) &&
963 (error = VFS_STATFS(mp, sp, td))) {
964 lwkt_gettokref(&ilock);
965 nmp = TAILQ_NEXT(mp, mnt_list);
969 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
971 error = cache_fullpath(p, mp->mnt_ncp, &fullpath, &freepath);
974 bzero(sp->f_mntonname, sizeof(sp->f_mntonname));
975 strlcpy(sp->f_mntonname, fullpath,
976 sizeof(sp->f_mntonname));
977 free(freepath, M_TEMP);
979 error = copyout(sp, sfsp, sizeof(*sp));
987 lwkt_gettokref(&ilock);
988 nmp = TAILQ_NEXT(mp, mnt_list);
991 lwkt_reltoken(&ilock);
992 if (sfsp && count > maxcount)
993 uap->sysmsg_result = maxcount;
995 uap->sysmsg_result = count;
1000 * fchdir_args(int fd)
1002 * Change current working directory to a given file descriptor.
1006 fchdir(struct fchdir_args *uap)
1008 struct thread *td = curthread;
1009 struct proc *p = td->td_proc;
1010 struct filedesc *fdp = p->p_fd;
1011 struct vnode *vp, *ovp;
1014 struct namecache *ncp, *oncp;
1015 struct namecache *nct;
1018 if ((error = getvnode(fdp, SCARG(uap, fd), &fp)) != 0)
1020 vp = (struct vnode *)fp->f_data;
1022 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1023 if (vp->v_type != VDIR || fp->f_ncp == NULL)
1026 error = VOP_ACCESS(vp, VEXEC, p->p_ucred, td);
1031 ncp = cache_hold(fp->f_ncp);
1032 while (!error && (mp = vp->v_mountedhere) != NULL) {
1033 error = nlookup_mp(mp, &nct);
1035 cache_unlock(nct); /* leave ref intact */
1038 error = vget(vp, LK_SHARED, td);
1039 KKASSERT(error == 0);
1046 oncp = fdp->fd_ncdir;
1047 VOP_UNLOCK(vp, 0, td); /* leave ref intact */
1049 fdp->fd_ncdir = ncp;
1060 kern_chdir(struct nlookupdata *nd)
1062 struct thread *td = curthread;
1063 struct proc *p = td->td_proc;
1064 struct filedesc *fdp = p->p_fd;
1065 struct vnode *vp, *ovp;
1066 struct namecache *oncp;
1069 if ((error = nlookup(nd)) != 0)
1071 if ((vp = nd->nl_ncp->nc_vp) == NULL)
1073 if ((error = vget(vp, LK_SHARED, td)) != 0)
1076 error = checkvp_chdir(vp, td);
1077 VOP_UNLOCK(vp, 0, td);
1080 oncp = fdp->fd_ncdir;
1081 cache_unlock(nd->nl_ncp); /* leave reference intact */
1082 fdp->fd_ncdir = nd->nl_ncp;
1094 * chdir_args(char *path)
1096 * Change current working directory (``.'').
1099 chdir(struct chdir_args *uap)
1101 struct nlookupdata nd;
1104 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
1106 error = kern_chdir(&nd);
1112 * Helper function for raised chroot(2) security function: Refuse if
1113 * any filedescriptors are open directories.
1116 chroot_refuse_vdir_fds(fdp)
1117 struct filedesc *fdp;
1124 for (fd = 0; fd < fdp->fd_nfiles ; fd++) {
1125 error = getvnode(fdp, fd, &fp);
1128 vp = (struct vnode *)fp->f_data;
1129 if (vp->v_type != VDIR)
1137 * This sysctl determines if we will allow a process to chroot(2) if it
1138 * has a directory open:
1139 * 0: disallowed for all processes.
1140 * 1: allowed for processes that were not already chroot(2)'ed.
1141 * 2: allowed for all processes.
1144 static int chroot_allow_open_directories = 1;
1146 SYSCTL_INT(_kern, OID_AUTO, chroot_allow_open_directories, CTLFLAG_RW,
1147 &chroot_allow_open_directories, 0, "");
1150 * chroot to the specified namecache entry. We obtain the vp from the
1151 * namecache data. The passed ncp must be locked and referenced and will
1152 * remain locked and referenced on return.
1155 kern_chroot(struct namecache *ncp)
1157 struct thread *td = curthread;
1158 struct proc *p = td->td_proc;
1159 struct filedesc *fdp = p->p_fd;
1164 * Only root can chroot
1166 if ((error = suser_cred(p->p_ucred, PRISON_ROOT)) != 0)
1170 * Disallow open directory descriptors (fchdir() breakouts).
1172 if (chroot_allow_open_directories == 0 ||
1173 (chroot_allow_open_directories == 1 && fdp->fd_rdir != rootvnode)) {
1174 if ((error = chroot_refuse_vdir_fds(fdp)) != 0)
1177 if ((vp = ncp->nc_vp) == NULL)
1180 if ((error = vget(vp, LK_SHARED, td)) != 0)
1184 * Check the validity of vp as a directory to change to and
1185 * associate it with rdir/jdir.
1187 error = checkvp_chdir(vp, td);
1188 VOP_UNLOCK(vp, 0, td); /* leave reference intact */
1190 vrele(fdp->fd_rdir);
1191 fdp->fd_rdir = vp; /* reference inherited by fd_rdir */
1192 cache_drop(fdp->fd_nrdir);
1193 fdp->fd_nrdir = cache_hold(ncp);
1194 if (fdp->fd_jdir == NULL) {
1197 fdp->fd_njdir = cache_hold(ncp);
1206 * chroot_args(char *path)
1208 * Change notion of root (``/'') directory.
1212 chroot(struct chroot_args *uap)
1214 struct thread *td = curthread;
1215 struct nlookupdata nd;
1218 KKASSERT(td->td_proc);
1219 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
1224 error = nlookup(&nd);
1226 error = kern_chroot(nd.nl_ncp);
1232 * Common routine for chroot and chdir. Given a locked, referenced vnode,
1233 * determine whether it is legal to chdir to the vnode. The vnode's state
1234 * is not changed by this call.
1237 checkvp_chdir(struct vnode *vp, struct thread *td)
1241 if (vp->v_type != VDIR)
1244 error = VOP_ACCESS(vp, VEXEC, td->td_proc->p_ucred, td);
1249 kern_open(struct nlookupdata *nd, int oflags, int mode, int *res)
1251 struct thread *td = curthread;
1252 struct proc *p = td->td_proc;
1253 struct filedesc *fdp = p->p_fd;
1258 int type, indx, error;
1261 if ((oflags & O_ACCMODE) == O_ACCMODE)
1263 flags = FFLAGS(oflags);
1264 error = falloc(p, &nfp, NULL);
1268 cmode = ((mode &~ fdp->fd_cmask) & ALLPERMS) &~ S_ISTXT;
1271 * XXX p_dupfd is a real mess. It allows a device to return a
1272 * file descriptor to be duplicated rather then doing the open
1278 * Call vn_open() to do the lookup and assign the vnode to the
1279 * file pointer. vn_open() does not change the ref count on fp
1280 * and the vnode, on success, will be inherited by the file pointer
1283 nd->nl_flags |= NLC_LOCKVP;
1284 error = vn_open(nd, fp, flags, cmode);
1288 * handle special fdopen() case. bleh. dupfdopen() is
1289 * responsible for dropping the old contents of ofiles[indx]
1292 * Note that if fsetfd() succeeds it will add a ref to fp
1293 * which represents the fd_ofiles[] assignment. We must still
1294 * drop our reference.
1296 if ((error == ENODEV || error == ENXIO) && p->p_dupfd >= 0) {
1297 if (fsetfd(p, fp, &indx) == 0) {
1298 error = dupfdopen(fdp, indx, p->p_dupfd, flags, error);
1301 fdrop(fp, td); /* our ref */
1304 if (fdp->fd_ofiles[indx] == fp) {
1305 fdp->fd_ofiles[indx] = NULL;
1306 fdrop(fp, td); /* fd_ofiles[] ref */
1310 fdrop(fp, td); /* our ref */
1311 if (error == ERESTART)
1317 * ref the vnode for ourselves so it can't be ripped out from under
1318 * is. XXX need an ND flag to request that the vnode be returned
1321 vp = (struct vnode *)fp->f_data;
1323 if ((error = fsetfd(p, fp, &indx)) != 0) {
1330 * If no error occurs the vp will have been assigned to the file
1336 * There should be 2 references on the file, one from the descriptor
1337 * table, and one for us.
1339 * Handle the case where someone closed the file (via its file
1340 * descriptor) while we were blocked. The end result should look
1341 * like opening the file succeeded but it was immediately closed.
1343 if (fp->f_count == 1) {
1344 KASSERT(fdp->fd_ofiles[indx] != fp,
1345 ("Open file descriptor lost all refs"));
1353 if (flags & (O_EXLOCK | O_SHLOCK)) {
1354 lf.l_whence = SEEK_SET;
1357 if (flags & O_EXLOCK)
1358 lf.l_type = F_WRLCK;
1360 lf.l_type = F_RDLCK;
1362 if ((flags & FNONBLOCK) == 0)
1365 if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0) {
1367 * lock request failed. Normally close the descriptor
1368 * but handle the case where someone might have dup()d
1369 * it when we weren't looking. One reference is
1370 * owned by the descriptor array, the other by us.
1373 if (fdp->fd_ofiles[indx] == fp) {
1374 fdp->fd_ofiles[indx] = NULL;
1380 fp->f_flag |= FHASLOCK;
1382 /* assert that vn_open created a backing object if one is needed */
1383 KASSERT(!vn_canvmio(vp) || VOP_GETVOBJECT(vp, NULL) == 0,
1384 ("open: vmio vnode has no backing object after vn_open"));
1389 * release our private reference, leaving the one associated with the
1390 * descriptor table intact.
1398 * open_args(char *path, int flags, int mode)
1400 * Check permissions, allocate an open file structure,
1401 * and call the device open routine if any.
1404 open(struct open_args *uap)
1406 struct nlookupdata nd;
1409 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
1411 error = kern_open(&nd, uap->flags,
1412 uap->mode, &uap->sysmsg_result);
1419 kern_mknod(struct nlookupdata *nd, int mode, int dev)
1421 struct namecache *ncp;
1422 struct thread *td = curthread;
1423 struct proc *p = td->td_proc;
1431 switch (mode & S_IFMT) {
1437 error = suser_cred(p->p_ucred, PRISON_ROOT);
1444 nd->nl_flags |= NLC_CREATE;
1445 if ((error = nlookup(nd)) != 0)
1452 vattr.va_mode = (mode & ALLPERMS) &~ p->p_fd->fd_cmask;
1453 vattr.va_rdev = dev;
1456 switch (mode & S_IFMT) {
1457 case S_IFMT: /* used by badsect to flag bad sectors */
1458 vattr.va_type = VBAD;
1461 vattr.va_type = VCHR;
1464 vattr.va_type = VBLK;
1475 error = VOP_NWHITEOUT(ncp, nd->nl_cred, NAMEI_CREATE);
1478 error = VOP_NMKNOD(ncp, &vp, nd->nl_cred, &vattr);
1487 * mknod_args(char *path, int mode, int dev)
1489 * Create a special file.
1492 mknod(struct mknod_args *uap)
1494 struct nlookupdata nd;
1497 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
1499 error = kern_mknod(&nd, uap->mode, uap->dev);
1505 kern_mkfifo(struct nlookupdata *nd, int mode)
1507 struct namecache *ncp;
1508 struct thread *td = curthread;
1509 struct proc *p = td->td_proc;
1516 nd->nl_flags |= NLC_CREATE;
1517 if ((error = nlookup(nd)) != 0)
1524 vattr.va_type = VFIFO;
1525 vattr.va_mode = (mode & ALLPERMS) &~ p->p_fd->fd_cmask;
1527 error = VOP_NMKNOD(ncp, &vp, nd->nl_cred, &vattr);
1534 * mkfifo_args(char *path, int mode)
1536 * Create a named pipe.
1539 mkfifo(struct mkfifo_args *uap)
1541 struct nlookupdata nd;
1544 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
1546 error = kern_mkfifo(&nd, uap->mode);
1552 kern_link(struct nlookupdata *nd, struct nlookupdata *linknd)
1554 struct thread *td = curthread;
1559 * Lookup the source and obtained a locked vnode.
1561 * XXX relookup on vget failure / race ?
1564 if ((error = nlookup(nd)) != 0)
1566 vp = nd->nl_ncp->nc_vp;
1567 KKASSERT(vp != NULL);
1568 if (vp->v_type == VDIR)
1569 return (EPERM); /* POSIX */
1570 if ((error = vget(vp, LK_EXCLUSIVE, td)) != 0)
1574 * Unlock the source so we can lookup the target without deadlocking
1575 * (XXX vp is locked already, possible other deadlock?). The target
1578 KKASSERT(nd->nl_flags & NLC_NCPISLOCKED);
1579 nd->nl_flags &= ~NLC_NCPISLOCKED;
1580 cache_unlock(nd->nl_ncp);
1582 linknd->nl_flags |= NLC_CREATE;
1583 if ((error = nlookup(linknd)) != 0) {
1587 if (linknd->nl_ncp->nc_vp) {
1593 * Finally run the new API VOP.
1595 error = VOP_NLINK(linknd->nl_ncp, vp, linknd->nl_cred);
1601 * link_args(char *path, char *link)
1603 * Make a hard file link.
1606 link(struct link_args *uap)
1608 struct nlookupdata nd, linknd;
1611 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
1613 error = nlookup_init(&linknd, uap->link, UIO_USERSPACE, 0);
1615 error = kern_link(&nd, &linknd);
1616 nlookup_done(&linknd);
1623 kern_symlink(struct nlookupdata *nd, char *path, int mode)
1625 struct namecache *ncp;
1631 nd->nl_flags |= NLC_CREATE;
1632 if ((error = nlookup(nd)) != 0)
1639 vattr.va_mode = mode;
1640 error = VOP_NSYMLINK(ncp, &vp, nd->nl_cred, &vattr, path);
1647 * symlink(char *path, char *link)
1649 * Make a symbolic link.
1652 symlink(struct symlink_args *uap)
1654 struct thread *td = curthread;
1655 struct nlookupdata nd;
1660 path = zalloc(namei_zone);
1661 error = copyinstr(uap->path, path, MAXPATHLEN, NULL);
1663 error = nlookup_init(&nd, uap->link, UIO_USERSPACE, 0);
1665 mode = ACCESSPERMS & ~td->td_proc->p_fd->fd_cmask;
1666 error = kern_symlink(&nd, path, mode);
1670 zfree(namei_zone, path);
1675 * undelete_args(char *path)
1677 * Delete a whiteout from the filesystem.
1681 undelete(struct undelete_args *uap)
1683 struct nlookupdata nd;
1686 error = nlookup_init(&nd, SCARG(uap, path), UIO_USERSPACE, 0);
1688 nd.nl_flags |= NLC_DELETE;
1690 error = nlookup(&nd);
1692 error = VOP_NWHITEOUT(nd.nl_ncp, nd.nl_cred, NAMEI_DELETE);
1698 kern_unlink(struct nlookupdata *nd)
1700 struct namecache *ncp;
1704 nd->nl_flags |= NLC_DELETE;
1705 if ((error = nlookup(nd)) != 0)
1708 error = VOP_NREMOVE(ncp, nd->nl_cred);
1713 * unlink_args(char *path)
1715 * Delete a name from the filesystem.
1718 unlink(struct unlink_args *uap)
1720 struct nlookupdata nd;
1723 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
1725 error = kern_unlink(&nd);
1731 kern_lseek(int fd, off_t offset, int whence, off_t *res)
1733 struct thread *td = curthread;
1734 struct proc *p = td->td_proc;
1735 struct filedesc *fdp = p->p_fd;
1740 if ((u_int)fd >= fdp->fd_nfiles ||
1741 (fp = fdp->fd_ofiles[fd]) == NULL)
1743 if (fp->f_type != DTYPE_VNODE)
1747 fp->f_offset += offset;
1750 error=VOP_GETATTR((struct vnode *)fp->f_data, &vattr, td);
1753 fp->f_offset = offset + vattr.va_size;
1756 fp->f_offset = offset;
1761 *res = fp->f_offset;
1766 * lseek_args(int fd, int pad, off_t offset, int whence)
1768 * Reposition read/write file offset.
1771 lseek(struct lseek_args *uap)
1775 error = kern_lseek(uap->fd, uap->offset, uap->whence,
1776 &uap->sysmsg_offset);
1782 kern_access(struct nlookupdata *nd, int aflags)
1784 struct thread *td = curthread;
1788 if ((error = nlookup(nd)) != 0)
1791 error = cache_vget(nd->nl_ncp, nd->nl_cred, LK_EXCLUSIVE, &vp);
1795 /* Flags == 0 means only check for existence. */
1804 if ((flags & VWRITE) == 0 || (error = vn_writechk(vp)) == 0)
1805 error = VOP_ACCESS(vp, flags, nd->nl_cred, td);
1808 * If the file handle is stale we have to re-resolve the
1809 * entry. This is a hack at the moment.
1811 if (error == ESTALE) {
1812 cache_setunresolved(nd->nl_ncp);
1813 error = cache_resolve(nd->nl_ncp, nd->nl_cred);
1826 * access_args(char *path, int flags)
1828 * Check access permissions.
1831 access(struct access_args *uap)
1833 struct nlookupdata nd;
1836 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
1838 error = kern_access(&nd, uap->flags);
1844 kern_stat(struct nlookupdata *nd, struct stat *st)
1850 if ((error = nlookup(nd)) != 0)
1853 if ((vp = nd->nl_ncp->nc_vp) == NULL)
1857 if ((error = vget(vp, LK_SHARED, td)) != 0)
1859 error = vn_stat(vp, st, td);
1862 * If the file handle is stale we have to re-resolve the entry. This
1863 * is a hack at the moment.
1865 if (error == ESTALE) {
1866 cache_setunresolved(nd->nl_ncp);
1867 error = cache_resolve(nd->nl_ncp, nd->nl_cred);
1878 * stat_args(char *path, struct stat *ub)
1880 * Get file status; this version follows links.
1883 stat(struct stat_args *uap)
1885 struct nlookupdata nd;
1889 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
1891 error = kern_stat(&nd, &st);
1893 error = copyout(&st, uap->ub, sizeof(*uap->ub));
1900 * lstat_args(char *path, struct stat *ub)
1902 * Get file status; this version does not follow links.
1905 lstat(struct lstat_args *uap)
1907 struct nlookupdata nd;
1911 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
1913 error = kern_stat(&nd, &st);
1915 error = copyout(&st, uap->ub, sizeof(*uap->ub));
1926 nsb->st_dev = sb->st_dev;
1927 nsb->st_ino = sb->st_ino;
1928 nsb->st_mode = sb->st_mode;
1929 nsb->st_nlink = sb->st_nlink;
1930 nsb->st_uid = sb->st_uid;
1931 nsb->st_gid = sb->st_gid;
1932 nsb->st_rdev = sb->st_rdev;
1933 nsb->st_atimespec = sb->st_atimespec;
1934 nsb->st_mtimespec = sb->st_mtimespec;
1935 nsb->st_ctimespec = sb->st_ctimespec;
1936 nsb->st_size = sb->st_size;
1937 nsb->st_blocks = sb->st_blocks;
1938 nsb->st_blksize = sb->st_blksize;
1939 nsb->st_flags = sb->st_flags;
1940 nsb->st_gen = sb->st_gen;
1941 nsb->st_qspare[0] = sb->st_qspare[0];
1942 nsb->st_qspare[1] = sb->st_qspare[1];
1946 * nstat_args(char *path, struct nstat *ub)
1950 nstat(struct nstat_args *uap)
1952 struct thread *td = curthread;
1956 struct nlookupdata nd;
1960 error = nlookup_init(&nd, SCARG(uap, path), UIO_USERSPACE, NLC_FOLLOW);
1962 error = nlookup(&nd);
1964 error = cache_vget(nd.nl_ncp, nd.nl_cred, LK_EXCLUSIVE, &vp);
1967 error = vn_stat(vp, &sb, td);
1970 cvtnstat(&sb, &nsb);
1971 error = copyout(&nsb, SCARG(uap, ub), sizeof(nsb));
1978 * lstat_args(char *path, struct stat *ub)
1980 * Get file status; this version does not follow links.
1984 nlstat(struct nlstat_args *uap)
1986 struct thread *td = curthread;
1990 struct nlookupdata nd;
1994 error = nlookup_init(&nd, SCARG(uap, path), UIO_USERSPACE, 0);
1996 error = nlookup(&nd);
1998 error = cache_vget(nd.nl_ncp, nd.nl_cred, LK_EXCLUSIVE, &vp);
2001 error = vn_stat(vp, &sb, td);
2004 cvtnstat(&sb, &nsb);
2005 error = copyout(&nsb, SCARG(uap, ub), sizeof(nsb));
2012 * pathconf_Args(char *path, int name)
2014 * Get configurable pathname variables.
2018 pathconf(struct pathconf_args *uap)
2020 struct nlookupdata nd;
2025 error = nlookup_init(&nd, SCARG(uap, path), UIO_USERSPACE, NLC_FOLLOW);
2027 error = nlookup(&nd);
2029 error = cache_vget(nd.nl_ncp, nd.nl_cred, LK_EXCLUSIVE, &vp);
2032 error = VOP_PATHCONF(vp, SCARG(uap, name), uap->sysmsg_fds);
2040 * kern_readlink isn't properly split yet. There is a copyin burried
2041 * in VOP_READLINK().
2044 kern_readlink(struct nlookupdata *nd, char *buf, int count, int *res)
2046 struct thread *td = curthread;
2047 struct proc *p = td->td_proc;
2053 if ((error = nlookup(nd)) != 0)
2055 error = cache_vget(nd->nl_ncp, nd->nl_cred, LK_EXCLUSIVE, &vp);
2058 if (vp->v_type != VLNK) {
2061 aiov.iov_base = buf;
2062 aiov.iov_len = count;
2063 auio.uio_iov = &aiov;
2064 auio.uio_iovcnt = 1;
2065 auio.uio_offset = 0;
2066 auio.uio_rw = UIO_READ;
2067 auio.uio_segflg = UIO_USERSPACE;
2069 auio.uio_resid = count;
2070 error = VOP_READLINK(vp, &auio, p->p_ucred);
2073 *res = count - auio.uio_resid;
2078 * readlink_args(char *path, char *buf, int count)
2080 * Return target name of a symbolic link.
2083 readlink(struct readlink_args *uap)
2085 struct nlookupdata nd;
2088 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
2090 error = kern_readlink(&nd, uap->buf, uap->count,
2091 &uap->sysmsg_result);
2098 setfflags(struct vnode *vp, int flags)
2100 struct thread *td = curthread;
2101 struct proc *p = td->td_proc;
2106 * Prevent non-root users from setting flags on devices. When
2107 * a device is reused, users can retain ownership of the device
2108 * if they are allowed to set flags and programs assume that
2109 * chown can't fail when done as root.
2111 if ((vp->v_type == VCHR || vp->v_type == VBLK) &&
2112 ((error = suser_cred(p->p_ucred, PRISON_ROOT)) != 0))
2116 * note: vget is required for any operation that might mod the vnode
2117 * so VINACTIVE is properly cleared.
2119 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
2120 if ((error = vget(vp, LK_EXCLUSIVE, td)) == 0) {
2122 vattr.va_flags = flags;
2123 error = VOP_SETATTR(vp, &vattr, p->p_ucred, td);
2130 * chflags(char *path, int flags)
2132 * Change flags of a file given a path name.
2136 chflags(struct chflags_args *uap)
2138 struct nlookupdata nd;
2143 error = nlookup_init(&nd, SCARG(uap, path), UIO_USERSPACE, NLC_FOLLOW);
2144 /* XXX Add NLC flag indicating modifying operation? */
2146 error = nlookup(&nd);
2148 error = cache_vref(nd.nl_ncp, nd.nl_cred, &vp);
2151 error = setfflags(vp, SCARG(uap, flags));
2158 * fchflags_args(int fd, int flags)
2160 * Change flags of a file given a file descriptor.
2164 fchflags(struct fchflags_args *uap)
2166 struct thread *td = curthread;
2167 struct proc *p = td->td_proc;
2171 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2173 return setfflags((struct vnode *) fp->f_data, SCARG(uap, flags));
2177 setfmode(struct vnode *vp, int mode)
2179 struct thread *td = curthread;
2180 struct proc *p = td->td_proc;
2185 * note: vget is required for any operation that might mod the vnode
2186 * so VINACTIVE is properly cleared.
2188 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
2189 if ((error = vget(vp, LK_EXCLUSIVE, td)) == 0) {
2191 vattr.va_mode = mode & ALLPERMS;
2192 error = VOP_SETATTR(vp, &vattr, p->p_ucred, td);
2199 kern_chmod(struct nlookupdata *nd, int mode)
2204 /* XXX Add NLC flag indicating modifying operation? */
2205 if ((error = nlookup(nd)) != 0)
2207 if ((error = cache_vref(nd->nl_ncp, nd->nl_cred, &vp)) != 0)
2209 error = setfmode(vp, mode);
2215 * chmod_args(char *path, int mode)
2217 * Change mode of a file given path name.
2221 chmod(struct chmod_args *uap)
2223 struct nlookupdata nd;
2226 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
2228 error = kern_chmod(&nd, uap->mode);
2234 * lchmod_args(char *path, int mode)
2236 * Change mode of a file given path name (don't follow links.)
2240 lchmod(struct lchmod_args *uap)
2242 struct nlookupdata nd;
2245 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
2247 error = kern_chmod(&nd, uap->mode);
2253 * fchmod_args(int fd, int mode)
2255 * Change mode of a file given a file descriptor.
2259 fchmod(struct fchmod_args *uap)
2261 struct thread *td = curthread;
2262 struct proc *p = td->td_proc;
2266 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2268 return setfmode((struct vnode *)fp->f_data, SCARG(uap, mode));
2272 setfown(struct vnode *vp, uid_t uid, gid_t gid)
2274 struct thread *td = curthread;
2275 struct proc *p = td->td_proc;
2280 * note: vget is required for any operation that might mod the vnode
2281 * so VINACTIVE is properly cleared.
2283 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
2284 if ((error = vget(vp, LK_EXCLUSIVE, td)) == 0) {
2288 error = VOP_SETATTR(vp, &vattr, p->p_ucred, td);
2295 kern_chown(struct nlookupdata *nd, int uid, int gid)
2300 /* XXX Add NLC flag indicating modifying operation? */
2301 if ((error = nlookup(nd)) != 0)
2303 if ((error = cache_vref(nd->nl_ncp, nd->nl_cred, &vp)) != 0)
2305 error = setfown(vp, uid, gid);
2311 * chown(char *path, int uid, int gid)
2313 * Set ownership given a path name.
2316 chown(struct chown_args *uap)
2318 struct nlookupdata nd;
2321 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
2323 error = kern_chown(&nd, uap->uid, uap->gid);
2329 * lchown_args(char *path, int uid, int gid)
2331 * Set ownership given a path name, do not cross symlinks.
2334 lchown(struct lchown_args *uap)
2336 struct nlookupdata nd;
2339 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
2341 error = kern_chown(&nd, uap->uid, uap->gid);
2347 * fchown_args(int fd, int uid, int gid)
2349 * Set ownership given a file descriptor.
2353 fchown(struct fchown_args *uap)
2355 struct thread *td = curthread;
2356 struct proc *p = td->td_proc;
2360 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2362 return setfown((struct vnode *)fp->f_data,
2363 SCARG(uap, uid), SCARG(uap, gid));
2367 getutimes(const struct timeval *tvp, struct timespec *tsp)
2369 struct timeval tv[2];
2373 TIMEVAL_TO_TIMESPEC(&tv[0], &tsp[0]);
2376 TIMEVAL_TO_TIMESPEC(&tvp[0], &tsp[0]);
2377 TIMEVAL_TO_TIMESPEC(&tvp[1], &tsp[1]);
2383 setutimes(struct vnode *vp, const struct timespec *ts, int nullflag)
2385 struct thread *td = curthread;
2386 struct proc *p = td->td_proc;
2391 * note: vget is required for any operation that might mod the vnode
2392 * so VINACTIVE is properly cleared.
2394 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
2395 if ((error = vget(vp, LK_EXCLUSIVE, td)) == 0) {
2397 vattr.va_atime = ts[0];
2398 vattr.va_mtime = ts[1];
2400 vattr.va_vaflags |= VA_UTIMES_NULL;
2401 error = VOP_SETATTR(vp, &vattr, p->p_ucred, td);
2408 kern_utimes(struct nlookupdata *nd, struct timeval *tptr)
2410 struct timespec ts[2];
2414 if ((error = getutimes(tptr, ts)) != 0)
2416 /* XXX Add NLC flag indicating modifying operation? */
2417 if ((error = nlookup(nd)) != 0)
2419 if ((error = cache_vref(nd->nl_ncp, nd->nl_cred, &vp)) != 0)
2421 error = setutimes(vp, ts, tptr == NULL);
2427 * utimes_args(char *path, struct timeval *tptr)
2429 * Set the access and modification times of a file.
2432 utimes(struct utimes_args *uap)
2434 struct timeval tv[2];
2435 struct nlookupdata nd;
2439 error = copyin(uap->tptr, tv, sizeof(tv));
2443 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
2445 error = kern_utimes(&nd, uap->tptr ? tv : NULL);
2451 * lutimes_args(char *path, struct timeval *tptr)
2453 * Set the access and modification times of a file.
2456 lutimes(struct lutimes_args *uap)
2458 struct timeval tv[2];
2459 struct nlookupdata nd;
2463 error = copyin(uap->tptr, tv, sizeof(tv));
2467 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
2469 error = kern_utimes(&nd, uap->tptr ? tv : NULL);
2475 kern_futimes(int fd, struct timeval *tptr)
2477 struct thread *td = curthread;
2478 struct proc *p = td->td_proc;
2479 struct timespec ts[2];
2483 error = getutimes(tptr, ts);
2486 error = getvnode(p->p_fd, fd, &fp);
2489 error = setutimes((struct vnode *)fp->f_data, ts, tptr == NULL);
2494 * futimes_args(int fd, struct timeval *tptr)
2496 * Set the access and modification times of a file.
2499 futimes(struct futimes_args *uap)
2501 struct timeval tv[2];
2505 error = copyin(uap->tptr, tv, sizeof(tv));
2510 error = kern_futimes(uap->fd, uap->tptr ? tv : NULL);
2516 kern_truncate(struct nlookupdata *nd, off_t length)
2524 /* XXX Add NLC flag indicating modifying operation? */
2525 if ((error = nlookup(nd)) != 0)
2527 if ((error = cache_vref(nd->nl_ncp, nd->nl_cred, &vp)) != 0)
2529 VOP_LEASE(vp, nd->nl_td, nd->nl_cred, LEASE_WRITE);
2530 if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, nd->nl_td)) != 0) {
2534 if (vp->v_type == VDIR) {
2536 } else if ((error = vn_writechk(vp)) == 0 &&
2537 (error = VOP_ACCESS(vp, VWRITE, nd->nl_cred, nd->nl_td)) == 0) {
2539 vattr.va_size = length;
2540 error = VOP_SETATTR(vp, &vattr, nd->nl_cred, nd->nl_td);
2547 * truncate(char *path, int pad, off_t length)
2549 * Truncate a file given its path name.
2552 truncate(struct truncate_args *uap)
2554 struct nlookupdata nd;
2557 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
2559 error = kern_truncate(&nd, uap->length);
2565 kern_ftruncate(int fd, off_t length)
2567 struct thread *td = curthread;
2568 struct proc *p = td->td_proc;
2576 if ((error = getvnode(p->p_fd, fd, &fp)) != 0)
2578 if ((fp->f_flag & FWRITE) == 0)
2580 vp = (struct vnode *)fp->f_data;
2581 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
2582 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2583 if (vp->v_type == VDIR)
2585 else if ((error = vn_writechk(vp)) == 0) {
2587 vattr.va_size = length;
2588 error = VOP_SETATTR(vp, &vattr, fp->f_cred, td);
2590 VOP_UNLOCK(vp, 0, td);
2595 * ftruncate_args(int fd, int pad, off_t length)
2597 * Truncate a file given a file descriptor.
2600 ftruncate(struct ftruncate_args *uap)
2604 error = kern_ftruncate(uap->fd, uap->length);
2612 * Sync an open file.
2616 fsync(struct fsync_args *uap)
2618 struct thread *td = curthread;
2619 struct proc *p = td->td_proc;
2625 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2627 vp = (struct vnode *)fp->f_data;
2628 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2629 if (VOP_GETVOBJECT(vp, &obj) == 0)
2630 vm_object_page_clean(obj, 0, 0, 0);
2631 if ((error = VOP_FSYNC(vp, MNT_WAIT, td)) == 0 &&
2632 vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP) &&
2634 error = (*bioops.io_fsync)(vp);
2635 VOP_UNLOCK(vp, 0, td);
2640 kern_rename(struct nlookupdata *fromnd, struct nlookupdata *tond)
2642 struct namecache *fncpd;
2643 struct namecache *tncpd;
2644 struct namecache *ncp;
2649 if ((error = nlookup(fromnd)) != 0)
2651 if ((fncpd = fromnd->nl_ncp->nc_parent) == NULL)
2656 * unlock the source ncp so we can lookup the target ncp without
2657 * deadlocking. The target may or may not exist so we do not check
2658 * for a target vp like kern_mkdir() and other creation functions do.
2660 * The source and target directories are ref'd and rechecked after
2661 * everything is relocked to determine if the source or target file
2664 KKASSERT(fromnd->nl_flags & NLC_NCPISLOCKED);
2665 fromnd->nl_flags &= ~NLC_NCPISLOCKED;
2666 cache_unlock(fromnd->nl_ncp);
2668 tond->nl_flags |= NLC_CREATE;
2669 if ((error = nlookup(tond)) != 0) {
2673 if ((tncpd = tond->nl_ncp->nc_parent) == NULL) {
2680 * If the source and target are the same there is nothing to do
2682 if (fromnd->nl_ncp == tond->nl_ncp) {
2689 * relock the source ncp
2691 if (cache_lock_nonblock(fromnd->nl_ncp) == 0) {
2692 cache_resolve(fromnd->nl_ncp, fromnd->nl_cred);
2693 } else if (fromnd->nl_ncp > tond->nl_ncp) {
2694 cache_lock(fromnd->nl_ncp);
2695 cache_resolve(fromnd->nl_ncp, fromnd->nl_cred);
2697 cache_unlock(tond->nl_ncp);
2698 cache_lock(fromnd->nl_ncp);
2699 cache_resolve(fromnd->nl_ncp, fromnd->nl_cred);
2700 cache_lock(tond->nl_ncp);
2701 cache_resolve(tond->nl_ncp, tond->nl_cred);
2703 fromnd->nl_flags |= NLC_NCPISLOCKED;
2706 * make sure the parent directories linkages are the same
2708 if (fncpd != fromnd->nl_ncp->nc_parent ||
2709 tncpd != tond->nl_ncp->nc_parent) {
2716 * Both the source and target must be within the same filesystem and
2717 * in the same filesystem as their parent directories within the
2718 * namecache topology.
2720 mp = fncpd->nc_mount;
2721 if (mp != tncpd->nc_mount || mp != fromnd->nl_ncp->nc_mount ||
2722 mp != tond->nl_ncp->nc_mount) {
2729 * If the target exists and either the source or target is a directory,
2730 * then both must be directories.
2732 if (tond->nl_ncp->nc_vp) {
2733 if (fromnd->nl_ncp->nc_vp->v_type == VDIR) {
2734 if (tond->nl_ncp->nc_vp->v_type != VDIR)
2736 } else if (tond->nl_ncp->nc_vp->v_type == VDIR) {
2742 * You cannot rename a source into itself or a subdirectory of itself.
2743 * We check this by travsersing the target directory upwards looking
2744 * for a match against the source.
2747 for (ncp = tncpd; ncp; ncp = ncp->nc_parent) {
2748 if (fromnd->nl_ncp == ncp) {
2759 error = VOP_NRENAME(fromnd->nl_ncp, tond->nl_ncp, tond->nl_cred);
2764 * rename_args(char *from, char *to)
2766 * Rename files. Source and destination must either both be directories,
2767 * or both not be directories. If target is a directory, it must be empty.
2770 rename(struct rename_args *uap)
2772 struct nlookupdata fromnd, tond;
2775 error = nlookup_init(&fromnd, uap->from, UIO_USERSPACE, 0);
2777 error = nlookup_init(&tond, uap->to, UIO_USERSPACE, 0);
2779 error = kern_rename(&fromnd, &tond);
2780 nlookup_done(&tond);
2782 nlookup_done(&fromnd);
2787 kern_mkdir(struct nlookupdata *nd, int mode)
2789 struct thread *td = curthread;
2790 struct proc *p = td->td_proc;
2791 struct namecache *ncp;
2797 nd->nl_flags |= NLC_WILLBEDIR | NLC_CREATE;
2798 if ((error = nlookup(nd)) != 0)
2806 vattr.va_type = VDIR;
2807 vattr.va_mode = (mode & ACCESSPERMS) &~ p->p_fd->fd_cmask;
2810 error = VOP_NMKDIR(ncp, &vp, p->p_ucred, &vattr);
2817 * mkdir_args(char *path, int mode)
2819 * Make a directory file.
2823 mkdir(struct mkdir_args *uap)
2825 struct nlookupdata nd;
2828 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
2830 error = kern_mkdir(&nd, uap->mode);
2836 kern_rmdir(struct nlookupdata *nd)
2838 struct namecache *ncp;
2842 nd->nl_flags |= NLC_DELETE;
2843 if ((error = nlookup(nd)) != 0)
2847 error = VOP_NRMDIR(ncp, nd->nl_cred);
2852 * rmdir_args(char *path)
2854 * Remove a directory file.
2858 rmdir(struct rmdir_args *uap)
2860 struct nlookupdata nd;
2863 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
2865 error = kern_rmdir(&nd);
2871 kern_getdirentries(int fd, char *buf, u_int count, long *basep, int *res)
2873 struct thread *td = curthread;
2874 struct proc *p = td->td_proc;
2882 if ((error = getvnode(p->p_fd, fd, &fp)) != 0)
2884 if ((fp->f_flag & FREAD) == 0)
2886 vp = (struct vnode *)fp->f_data;
2888 if (vp->v_type != VDIR)
2890 aiov.iov_base = buf;
2891 aiov.iov_len = count;
2892 auio.uio_iov = &aiov;
2893 auio.uio_iovcnt = 1;
2894 auio.uio_rw = UIO_READ;
2895 auio.uio_segflg = UIO_USERSPACE;
2897 auio.uio_resid = count;
2898 /* vn_lock(vp, LK_SHARED | LK_RETRY, td); */
2899 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2900 loff = auio.uio_offset = fp->f_offset;
2901 error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, NULL, NULL);
2902 fp->f_offset = auio.uio_offset;
2903 VOP_UNLOCK(vp, 0, td);
2906 if (count == auio.uio_resid) {
2907 if (union_dircheckp) {
2908 error = union_dircheckp(td, &vp, fp);
2914 if ((vp->v_flag & VROOT) &&
2915 (vp->v_mount->mnt_flag & MNT_UNION)) {
2916 struct vnode *tvp = vp;
2917 vp = vp->v_mount->mnt_vnodecovered;
2919 fp->f_data = (caddr_t)vp;
2928 *res = count - auio.uio_resid;
2933 * getdirentries_args(int fd, char *buf, u_int conut, long *basep)
2935 * Read a block of directory entries in a file system independent format.
2938 getdirentries(struct getdirentries_args *uap)
2943 error = kern_getdirentries(uap->fd, uap->buf, uap->count, &base,
2944 &uap->sysmsg_result);
2947 error = copyout(&base, uap->basep, sizeof(*uap->basep));
2952 * getdents_args(int fd, char *buf, size_t count)
2955 getdents(struct getdents_args *uap)
2959 error = kern_getdirentries(uap->fd, uap->buf, uap->count, NULL,
2960 &uap->sysmsg_result);
2966 * umask(int newmask)
2968 * Set the mode mask for creation of filesystem nodes.
2973 umask(struct umask_args *uap)
2975 struct thread *td = curthread;
2976 struct proc *p = td->td_proc;
2977 struct filedesc *fdp;
2980 uap->sysmsg_result = fdp->fd_cmask;
2981 fdp->fd_cmask = SCARG(uap, newmask) & ALLPERMS;
2986 * revoke(char *path)
2988 * Void all references to file by ripping underlying filesystem
2993 revoke(struct revoke_args *uap)
2995 struct thread *td = curthread;
2996 struct nlookupdata nd;
3003 error = nlookup_init(&nd, SCARG(uap, path), UIO_USERSPACE, NLC_FOLLOW);
3005 error = nlookup(&nd);
3007 error = cache_vref(nd.nl_ncp, nd.nl_cred, &vp);
3008 cred = crhold(nd.nl_cred);
3011 if (vp->v_type != VCHR && vp->v_type != VBLK)
3014 error = VOP_GETATTR(vp, &vattr, td);
3015 if (error == 0 && cred->cr_uid != vattr.va_uid)
3016 error = suser_cred(cred, PRISON_ROOT);
3017 if (error == 0 && count_udev(vp->v_udev) > 0) {
3018 if ((error = vx_lock(vp)) == 0) {
3019 VOP_REVOKE(vp, REVOKEALL);
3030 * Convert a user file descriptor to a kernel file entry.
3033 getvnode(struct filedesc *fdp, int fd, struct file **fpp)
3037 if ((u_int)fd >= fdp->fd_nfiles ||
3038 (fp = fdp->fd_ofiles[fd]) == NULL)
3040 if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO)
3046 * getfh_args(char *fname, fhandle_t *fhp)
3048 * Get (NFS) file handle
3051 getfh(struct getfh_args *uap)
3053 struct thread *td = curthread;
3054 struct nlookupdata nd;
3060 * Must be super user
3062 if ((error = suser(td)) != 0)
3066 error = nlookup_init(&nd, uap->fname, UIO_USERSPACE, NLC_FOLLOW);
3068 error = nlookup(&nd);
3070 error = cache_vget(nd.nl_ncp, nd.nl_cred, LK_EXCLUSIVE, &vp);
3073 bzero(&fh, sizeof(fh));
3074 fh.fh_fsid = vp->v_mount->mnt_stat.f_fsid;
3075 error = VFS_VPTOFH(vp, &fh.fh_fid);
3078 error = copyout(&fh, uap->fhp, sizeof(fh));
3084 * fhopen_args(const struct fhandle *u_fhp, int flags)
3086 * syscall for the rpc.lockd to use to translate a NFS file handle into
3087 * an open descriptor.
3089 * warning: do not remove the suser() call or this becomes one giant
3093 fhopen(struct fhopen_args *uap)
3095 struct thread *td = curthread;
3096 struct proc *p = td->td_proc;
3101 struct vattr *vap = &vat;
3103 struct filedesc *fdp = p->p_fd;
3104 int fmode, mode, error, type;
3110 * Must be super user
3116 fmode = FFLAGS(SCARG(uap, flags));
3117 /* why not allow a non-read/write open for our lockd? */
3118 if (((fmode & (FREAD | FWRITE)) == 0) || (fmode & O_CREAT))
3120 error = copyin(SCARG(uap,u_fhp), &fhp, sizeof(fhp));
3123 /* find the mount point */
3124 mp = vfs_getvfs(&fhp.fh_fsid);
3127 /* now give me my vnode, it gets returned to me locked */
3128 error = VFS_FHTOVP(mp, &fhp.fh_fid, &vp);
3132 * from now on we have to make sure not
3133 * to forget about the vnode
3134 * any error that causes an abort must vput(vp)
3135 * just set error = err and 'goto bad;'.
3141 if (vp->v_type == VLNK) {
3145 if (vp->v_type == VSOCK) {
3150 if (fmode & (FWRITE | O_TRUNC)) {
3151 if (vp->v_type == VDIR) {
3155 error = vn_writechk(vp);
3163 error = VOP_ACCESS(vp, mode, p->p_ucred, td);
3167 if (fmode & O_TRUNC) {
3168 VOP_UNLOCK(vp, 0, td); /* XXX */
3169 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
3170 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); /* XXX */
3173 error = VOP_SETATTR(vp, vap, p->p_ucred, td);
3179 * VOP_OPEN needs the file pointer so it can potentially override
3182 * WARNING! no f_ncp will be associated when fhopen()ing a directory.
3185 if ((error = falloc(p, &nfp, NULL)) != 0)
3189 fp->f_data = (caddr_t)vp;
3190 fp->f_flag = fmode & FMASK;
3191 fp->f_ops = &vnode_fileops;
3192 fp->f_type = DTYPE_VNODE;
3194 error = VOP_OPEN(vp, fmode, p->p_ucred, fp, td);
3197 * setting f_ops this way prevents VOP_CLOSE from being
3198 * called or fdrop() releasing the vp from v_data. Since
3199 * the VOP_OPEN failed we don't want to VOP_CLOSE.
3201 fp->f_ops = &badfileops;
3210 * The fp now owns a reference on the vnode. We still have our own
3216 * Make sure that a VM object is created for VMIO support. If this
3217 * fails just fdrop() normally to clean up.
3219 if (vn_canvmio(vp) == TRUE) {
3220 if ((error = vfs_object_create(vp, td)) != 0) {
3227 * The open was successful, associate it with a file descriptor.
3229 if ((error = fsetfd(p, fp, &indx)) != 0) {
3236 if (fmode & (O_EXLOCK | O_SHLOCK)) {
3237 lf.l_whence = SEEK_SET;
3240 if (fmode & O_EXLOCK)
3241 lf.l_type = F_WRLCK;
3243 lf.l_type = F_RDLCK;
3245 if ((fmode & FNONBLOCK) == 0)
3247 VOP_UNLOCK(vp, 0, td);
3248 if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0) {
3250 * lock request failed. Normally close the descriptor
3251 * but handle the case where someone might have dup()d
3252 * or close()d it when we weren't looking.
3254 if (fdp->fd_ofiles[indx] == fp) {
3255 fdp->fd_ofiles[indx] = NULL;
3260 * release our private reference.
3266 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
3267 fp->f_flag |= FHASLOCK;
3269 if ((vp->v_type == VREG) && (VOP_GETVOBJECT(vp, NULL) != 0))
3270 vfs_object_create(vp, td);
3274 uap->sysmsg_result = indx;
3283 * fhstat_args(struct fhandle *u_fhp, struct stat *sb)
3286 fhstat(struct fhstat_args *uap)
3288 struct thread *td = curthread;
3296 * Must be super user
3302 error = copyin(SCARG(uap, u_fhp), &fh, sizeof(fhandle_t));
3306 if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL)
3308 if ((error = VFS_FHTOVP(mp, &fh.fh_fid, &vp)))
3310 error = vn_stat(vp, &sb, td);
3314 error = copyout(&sb, SCARG(uap, sb), sizeof(sb));
3319 * fhstatfs_args(struct fhandle *u_fhp, struct statfs *buf)
3322 fhstatfs(struct fhstatfs_args *uap)
3324 struct thread *td = curthread;
3325 struct proc *p = td->td_proc;
3330 char *fullpath, *freepath;
3335 * Must be super user
3337 if ((error = suser(td)))
3340 if ((error = copyin(SCARG(uap, u_fhp), &fh, sizeof(fhandle_t))) != 0)
3343 if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL)
3346 if (p != NULL && (p->p_fd->fd_nrdir->nc_flag & NCF_ROOT) == 0 &&
3347 !chroot_visible_mnt(mp, p))
3350 if ((error = VFS_FHTOVP(mp, &fh.fh_fid, &vp)))
3355 if ((error = VFS_STATFS(mp, sp, td)) != 0)
3358 error = cache_fullpath(p, mp->mnt_ncp, &fullpath, &freepath);
3361 bzero(sp->f_mntonname, sizeof(sp->f_mntonname));
3362 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname));
3363 free(freepath, M_TEMP);
3365 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
3367 bcopy(sp, &sb, sizeof(sb));
3368 sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0;
3371 return (copyout(sp, SCARG(uap, buf), sizeof(*sp)));
3375 * Syscall to push extended attribute configuration information into the
3376 * VFS. Accepts a path, which it converts to a mountpoint, as well as
3377 * a command (int cmd), and attribute name and misc data. For now, the
3378 * attribute name is left in userspace for consumption by the VFS_op.
3379 * It will probably be changed to be copied into sysspace by the
3380 * syscall in the future, once issues with various consumers of the
3381 * attribute code have raised their hands.
3383 * Currently this is used only by UFS Extended Attributes.
3386 extattrctl(struct extattrctl_args *uap)
3388 struct nlookupdata nd;
3394 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
3396 error = nlookup(&nd);
3398 mp = nd.nl_ncp->nc_mount;
3399 error = VFS_EXTATTRCTL(mp, SCARG(uap, cmd),
3400 SCARG(uap, attrname), SCARG(uap, arg),
3408 * Syscall to set a named extended attribute on a file or directory.
3409 * Accepts attribute name, and a uio structure pointing to the data to set.
3410 * The uio is consumed in the style of writev(). The real work happens
3411 * in VOP_SETEXTATTR().
3414 extattr_set_file(struct extattr_set_file_args *uap)
3416 char attrname[EXTATTR_MAXNAMELEN];
3417 struct iovec aiov[UIO_SMALLIOV];
3418 struct iovec *needfree;
3419 struct nlookupdata nd;
3428 error = copyin(SCARG(uap, attrname), attrname, EXTATTR_MAXNAMELEN);
3433 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
3435 error = nlookup(&nd);
3437 error = cache_vget(nd.nl_ncp, nd.nl_cred, LK_EXCLUSIVE, &vp);
3444 iovlen = uap->iovcnt * sizeof(struct iovec);
3445 if (uap->iovcnt > UIO_SMALLIOV) {
3446 if (uap->iovcnt > UIO_MAXIOV) {
3450 MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
3456 auio.uio_iovcnt = uap->iovcnt;
3457 auio.uio_rw = UIO_WRITE;
3458 auio.uio_segflg = UIO_USERSPACE;
3459 auio.uio_td = nd.nl_td;
3460 auio.uio_offset = 0;
3461 if ((error = copyin(uap->iovp, iov, iovlen)))
3464 for (i = 0; i < uap->iovcnt; i++) {
3465 if (iov->iov_len > INT_MAX - auio.uio_resid) {
3469 auio.uio_resid += iov->iov_len;
3472 cnt = auio.uio_resid;
3473 error = VOP_SETEXTATTR(vp, attrname, &auio, nd.nl_cred, nd.nl_td);
3474 cnt -= auio.uio_resid;
3475 uap->sysmsg_result = cnt;
3480 FREE(needfree, M_IOV);
3485 * Syscall to get a named extended attribute on a file or directory.
3486 * Accepts attribute name, and a uio structure pointing to a buffer for the
3487 * data. The uio is consumed in the style of readv(). The real work
3488 * happens in VOP_GETEXTATTR();
3491 extattr_get_file(struct extattr_get_file_args *uap)
3493 char attrname[EXTATTR_MAXNAMELEN];
3494 struct iovec aiov[UIO_SMALLIOV];
3495 struct iovec *needfree;
3496 struct nlookupdata nd;
3505 error = copyin(SCARG(uap, attrname), attrname, EXTATTR_MAXNAMELEN);
3510 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
3512 error = nlookup(&nd);
3514 error = cache_vget(nd.nl_ncp, nd.nl_cred, LK_EXCLUSIVE, &vp);
3520 iovlen = uap->iovcnt * sizeof (struct iovec);
3522 if (uap->iovcnt > UIO_SMALLIOV) {
3523 if (uap->iovcnt > UIO_MAXIOV) {
3527 MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
3533 auio.uio_iovcnt = uap->iovcnt;
3534 auio.uio_rw = UIO_READ;
3535 auio.uio_segflg = UIO_USERSPACE;
3536 auio.uio_td = nd.nl_td;
3537 auio.uio_offset = 0;
3538 if ((error = copyin(uap->iovp, iov, iovlen)))
3541 for (i = 0; i < uap->iovcnt; i++) {
3542 if (iov->iov_len > INT_MAX - auio.uio_resid) {
3546 auio.uio_resid += iov->iov_len;
3549 cnt = auio.uio_resid;
3550 error = VOP_GETEXTATTR(vp, attrname, &auio, nd.nl_cred, nd.nl_td);
3551 cnt -= auio.uio_resid;
3552 uap->sysmsg_result = cnt;
3557 FREE(needfree, M_IOV);
3562 * Syscall to delete a named extended attribute from a file or directory.
3563 * Accepts attribute name. The real work happens in VOP_SETEXTATTR().
3566 extattr_delete_file(struct extattr_delete_file_args *uap)
3568 char attrname[EXTATTR_MAXNAMELEN];
3569 struct nlookupdata nd;
3573 error = copyin(SCARG(uap, attrname), attrname, EXTATTR_MAXNAMELEN);
3578 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
3580 error = nlookup(&nd);
3582 error = cache_vget(nd.nl_ncp, nd.nl_cred, LK_EXCLUSIVE, &vp);
3588 error = VOP_SETEXTATTR(vp, attrname, NULL, nd.nl_cred, nd.nl_td);
3595 * print out statistics from the current status of the buffer pool
3596 * this can be toggeled by the system control option debug.syncprt
3605 int counts[(MAXBSIZE / PAGE_SIZE) + 1];
3606 static char *bname[3] = { "LOCKED", "LRU", "AGE" };
3608 for (dp = bufqueues, i = 0; dp < &bufqueues[3]; dp++, i++) {
3610 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
3613 TAILQ_FOREACH(bp, dp, b_freelist) {
3614 counts[bp->b_bufsize/PAGE_SIZE]++;
3618 printf("%s: total-%d", bname[i], count);
3619 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
3621 printf(", %d-%d", j * PAGE_SIZE, counts[j]);
3628 chroot_visible_mnt(struct mount *mp, struct proc *p)
3630 struct namecache *ncp;
3632 * First check if this file system is below
3636 while (ncp != NULL && ncp != p->p_fd->fd_nrdir)
3637 ncp = ncp->nc_parent;
3640 * This is not below the chroot path.
3642 * Check if the chroot path is on the same filesystem,
3643 * by determing if we have to cross a mount point
3644 * before reaching mp->mnt_ncp.
3646 ncp = p->p_fd->fd_nrdir;
3647 while (ncp != NULL && ncp != mp->mnt_ncp) {
3648 if (ncp->nc_flag & NCF_MOUNTPT) {
3652 ncp = ncp->nc_parent;
3655 return(ncp != NULL);