2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94
39 * $FreeBSD: src/sys/kern/vfs_syscalls.c,v 1.151.2.18 2003/04/04 20:35:58 tegge Exp $
40 * $DragonFly: src/sys/kern/vfs_syscalls.c,v 1.4 2003/06/25 03:55:57 dillon Exp $
43 /* For 4.3 integer FS ID compatibility */
44 #include "opt_compat.h"
46 #include <sys/param.h>
47 #include <sys/systm.h>
49 #include <sys/sysent.h>
50 #include <sys/malloc.h>
51 #include <sys/mount.h>
52 #include <sys/sysproto.h>
53 #include <sys/filedesc.h>
54 #include <sys/kernel.h>
55 #include <sys/fcntl.h>
57 #include <sys/linker.h>
59 #include <sys/unistd.h>
60 #include <sys/vnode.h>
62 #include <sys/namei.h>
63 #include <sys/dirent.h>
64 #include <sys/extattr.h>
66 #include <machine/limits.h>
67 #include <miscfs/union/union.h>
68 #include <sys/sysctl.h>
70 #include <vm/vm_object.h>
71 #include <vm/vm_zone.h>
72 #include <vm/vm_page.h>
74 #include <sys/file2.h>
76 static int change_dir __P((struct nameidata *ndp, struct thread *td));
77 static void checkdirs __P((struct vnode *olddp));
78 static int chroot_refuse_vdir_fds __P((struct filedesc *fdp));
79 static int getutimes __P((const struct timeval *, struct timespec *));
80 static int setfown __P((struct vnode *, uid_t, gid_t));
81 static int setfmode __P((struct vnode *, int));
82 static int setfflags __P((struct vnode *, int));
83 static int setutimes __P((struct vnode *, const struct timespec *, int));
84 static int usermount = 0; /* if 1, non-root can mount fs. */
86 int (*union_dircheckp) __P((struct thread *, struct vnode **, struct file *));
88 SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0, "");
91 * Virtual File System System Calls
95 * Mount a file system.
97 #ifndef _SYS_SYSPROTO_H_
106 * mount_args(char *type, char *path, int flags, caddr_t data)
110 mount(struct mount_args *uap)
112 struct thread *td = curthread;
113 struct proc *p = td->td_proc;
116 struct vfsconf *vfsp;
117 int error, flag = 0, flag2 = 0;
123 char fstypename[MFSNAMELEN];
125 if (usermount == 0 && (error = suser(td)))
128 * Do not allow NFS export by non-root users.
130 if (SCARG(uap, flags) & MNT_EXPORTED) {
136 * Silently enforce MNT_NOSUID and MNT_NODEV for non-root users
139 SCARG(uap, flags) |= MNT_NOSUID | MNT_NODEV;
141 * Get vnode to be covered
143 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
144 SCARG(uap, path), td);
145 if ((error = namei(&nd)) != 0)
147 NDFREE(&nd, NDF_ONLY_PNBUF);
149 if (SCARG(uap, flags) & MNT_UPDATE) {
150 if ((vp->v_flag & VROOT) == 0) {
156 flag2 = mp->mnt_kern_flag;
158 * We only allow the filesystem to be reloaded if it
159 * is currently mounted read-only.
161 if ((SCARG(uap, flags) & MNT_RELOAD) &&
162 ((mp->mnt_flag & MNT_RDONLY) == 0)) {
164 return (EOPNOTSUPP); /* Needs translation */
167 * Only root, or the user that did the original mount is
168 * permitted to update it.
170 if (mp->mnt_stat.f_owner != p->p_ucred->cr_uid &&
171 (error = suser(td))) {
175 if (vfs_busy(mp, LK_NOWAIT, 0, td)) {
179 simple_lock(&vp->v_interlock);
180 if ((vp->v_flag & VMOUNT) != 0 ||
181 vp->v_mountedhere != NULL) {
182 simple_unlock(&vp->v_interlock);
187 vp->v_flag |= VMOUNT;
188 simple_unlock(&vp->v_interlock);
190 SCARG(uap, flags) & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE);
191 VOP_UNLOCK(vp, 0, td);
195 * If the user is not root, ensure that they own the directory
196 * onto which we are attempting to mount.
198 if ((error = VOP_GETATTR(vp, &va, p->p_ucred, td)) ||
199 (va.va_uid != p->p_ucred->cr_uid &&
200 (error = suser(td)))) {
204 if ((error = vinvalbuf(vp, V_SAVE, p->p_ucred, td, 0, 0)) != 0) {
208 if (vp->v_type != VDIR) {
214 * Historically filesystem types were identified by number. If we
215 * get an integer for the filesystem type instead of a string, we
216 * check to see if it matches one of the historic filesystem types.
218 fstypenum = (uintptr_t)SCARG(uap, type);
219 if (fstypenum < maxvfsconf) {
220 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
221 if (vfsp->vfc_typenum == fstypenum)
227 strncpy(fstypename, vfsp->vfc_name, MFSNAMELEN);
229 #endif /* COMPAT_43 */
230 if ((error = copyinstr(SCARG(uap, type), fstypename, MFSNAMELEN, NULL)) != 0) {
234 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
235 if (!strcmp(vfsp->vfc_name, fstypename))
240 /* Only load modules for root (very important!) */
241 if ((error = suser(td)) != 0) {
245 error = linker_load_file(fstypename, &lf);
246 if (error || lf == NULL) {
253 /* lookup again, see if the VFS was loaded */
254 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
255 if (!strcmp(vfsp->vfc_name, fstypename))
259 linker_file_unload(lf);
264 simple_lock(&vp->v_interlock);
265 if ((vp->v_flag & VMOUNT) != 0 ||
266 vp->v_mountedhere != NULL) {
267 simple_unlock(&vp->v_interlock);
271 vp->v_flag |= VMOUNT;
272 simple_unlock(&vp->v_interlock);
275 * Allocate and initialize the filesystem.
277 mp = malloc(sizeof(struct mount), M_MOUNT, M_WAITOK);
278 bzero((char *)mp, (u_long)sizeof(struct mount));
279 TAILQ_INIT(&mp->mnt_nvnodelist);
280 TAILQ_INIT(&mp->mnt_reservedvnlist);
281 mp->mnt_nvnodelistsize = 0;
282 lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE);
283 (void)vfs_busy(mp, LK_NOWAIT, 0, td);
284 mp->mnt_op = vfsp->vfc_vfsops;
286 vfsp->vfc_refcount++;
287 mp->mnt_stat.f_type = vfsp->vfc_typenum;
288 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
289 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
290 mp->mnt_vnodecovered = vp;
291 mp->mnt_stat.f_owner = p->p_ucred->cr_uid;
292 mp->mnt_iosize_max = DFLTPHYS;
293 VOP_UNLOCK(vp, 0, td);
296 * Set the mount level flags.
298 if (SCARG(uap, flags) & MNT_RDONLY)
299 mp->mnt_flag |= MNT_RDONLY;
300 else if (mp->mnt_flag & MNT_RDONLY)
301 mp->mnt_kern_flag |= MNTK_WANTRDWR;
302 mp->mnt_flag &=~ (MNT_NOSUID | MNT_NOEXEC | MNT_NODEV |
303 MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOATIME |
304 MNT_NOSYMFOLLOW | MNT_IGNORE |
305 MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR);
306 mp->mnt_flag |= SCARG(uap, flags) & (MNT_NOSUID | MNT_NOEXEC |
307 MNT_NODEV | MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_FORCE |
308 MNT_NOSYMFOLLOW | MNT_IGNORE |
309 MNT_NOATIME | MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR);
311 * Mount the filesystem.
312 * XXX The final recipients of VFS_MOUNT just overwrite the ndp they
313 * get. No freeing of cn_pnbuf.
315 error = VFS_MOUNT(mp, SCARG(uap, path), SCARG(uap, data), &nd, td);
316 if (mp->mnt_flag & MNT_UPDATE) {
317 if (mp->mnt_kern_flag & MNTK_WANTRDWR)
318 mp->mnt_flag &= ~MNT_RDONLY;
319 mp->mnt_flag &=~ (MNT_UPDATE | MNT_RELOAD | MNT_FORCE);
320 mp->mnt_kern_flag &=~ MNTK_WANTRDWR;
323 mp->mnt_kern_flag = flag2;
325 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
326 if (mp->mnt_syncer == NULL)
327 error = vfs_allocate_syncvnode(mp);
329 if (mp->mnt_syncer != NULL)
330 vrele(mp->mnt_syncer);
331 mp->mnt_syncer = NULL;
334 simple_lock(&vp->v_interlock);
335 vp->v_flag &= ~VMOUNT;
336 simple_unlock(&vp->v_interlock);
340 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
342 * Put the new filesystem on the mount list after root.
346 simple_lock(&vp->v_interlock);
347 vp->v_flag &= ~VMOUNT;
348 vp->v_mountedhere = mp;
349 simple_unlock(&vp->v_interlock);
350 simple_lock(&mountlist_slock);
351 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
352 simple_unlock(&mountlist_slock);
354 VOP_UNLOCK(vp, 0, td);
355 if ((mp->mnt_flag & MNT_RDONLY) == 0)
356 error = vfs_allocate_syncvnode(mp);
358 if ((error = VFS_START(mp, 0, td)) != 0)
361 simple_lock(&vp->v_interlock);
362 vp->v_flag &= ~VMOUNT;
363 simple_unlock(&vp->v_interlock);
364 mp->mnt_vfc->vfc_refcount--;
366 free((caddr_t)mp, M_MOUNT);
373 * Scan all active processes to see if any of them have a current
374 * or root directory onto which the new filesystem has just been
375 * mounted. If so, replace them with the new mount point.
378 checkdirs(struct vnode *olddp)
380 struct filedesc *fdp;
384 if (olddp->v_usecount == 1)
386 if (VFS_ROOT(olddp->v_mountedhere, &newdp))
387 panic("mount: lost mount");
388 LIST_FOREACH(p, &allproc, p_list) {
390 if (fdp->fd_cdir == olddp) {
393 fdp->fd_cdir = newdp;
395 if (fdp->fd_rdir == olddp) {
398 fdp->fd_rdir = newdp;
401 if (rootvnode == olddp) {
410 * Unmount a file system.
412 * Note: unmount takes a path to the vnode mounted on as argument,
413 * not special file (as before).
415 #ifndef _SYS_SYSPROTO_H_
416 struct unmount_args {
422 * umount_args(char *path, int flags)
426 unmount(struct unmount_args *uap)
428 struct thread *td = curthread;
429 struct proc *p = td->td_proc;
436 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
437 SCARG(uap, path), td);
438 if ((error = namei(&nd)) != 0)
441 NDFREE(&nd, NDF_ONLY_PNBUF);
445 * Only root, or the user that did the original mount is
446 * permitted to unmount this filesystem.
448 if ((mp->mnt_stat.f_owner != p->p_ucred->cr_uid) &&
449 (error = suser(td))) {
455 * Don't allow unmounting the root file system.
457 if (mp->mnt_flag & MNT_ROOTFS) {
463 * Must be the root of the filesystem
465 if ((vp->v_flag & VROOT) == 0) {
470 return (dounmount(mp, SCARG(uap, flags), td));
474 * Do the actual file system unmount.
477 dounmount(struct mount *mp, int flags, struct thread *td)
479 struct vnode *coveredvp;
482 struct proc *p = td->td_proc;
486 simple_lock(&mountlist_slock);
487 if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
488 simple_unlock(&mountlist_slock);
491 mp->mnt_kern_flag |= MNTK_UNMOUNT;
492 /* Allow filesystems to detect that a forced unmount is in progress. */
493 if (flags & MNT_FORCE)
494 mp->mnt_kern_flag |= MNTK_UNMOUNTF;
495 error = lockmgr(&mp->mnt_lock, LK_DRAIN | LK_INTERLOCK |
496 ((flags & MNT_FORCE) ? 0 : LK_NOWAIT), &mountlist_slock, td);
498 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF);
499 if (mp->mnt_kern_flag & MNTK_MWAIT)
504 if (mp->mnt_flag & MNT_EXPUBLIC)
505 vfs_setpublicfs(NULL, NULL, NULL);
507 vfs_msync(mp, MNT_WAIT);
508 async_flag = mp->mnt_flag & MNT_ASYNC;
509 mp->mnt_flag &=~ MNT_ASYNC;
510 cache_purgevfs(mp); /* remove cache entries for this file sys */
511 if (mp->mnt_syncer != NULL)
512 vrele(mp->mnt_syncer);
513 if (((mp->mnt_flag & MNT_RDONLY) ||
514 (error = VFS_SYNC(mp, MNT_WAIT, p->p_ucred, td)) == 0) ||
516 error = VFS_UNMOUNT(mp, flags, td);
517 simple_lock(&mountlist_slock);
519 if ((mp->mnt_flag & MNT_RDONLY) == 0 && mp->mnt_syncer == NULL)
520 (void) vfs_allocate_syncvnode(mp);
521 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF);
522 mp->mnt_flag |= async_flag;
523 lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK | LK_REENABLE,
524 &mountlist_slock, td);
525 if (mp->mnt_kern_flag & MNTK_MWAIT)
529 TAILQ_REMOVE(&mountlist, mp, mnt_list);
530 if ((coveredvp = mp->mnt_vnodecovered) != NULLVP) {
531 coveredvp->v_mountedhere = (struct mount *)0;
534 mp->mnt_vfc->vfc_refcount--;
535 if (!TAILQ_EMPTY(&mp->mnt_nvnodelist))
536 panic("unmount: dangling vnode");
537 lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_slock, td);
538 if (mp->mnt_kern_flag & MNTK_MWAIT)
540 free((caddr_t)mp, M_MOUNT);
545 * Sync each mounted filesystem.
547 #ifndef _SYS_SYSPROTO_H_
554 static int syncprt = 0;
555 SYSCTL_INT(_debug, OID_AUTO, syncprt, CTLFLAG_RW, &syncprt, 0, "");
560 sync(struct sync_args *uap)
562 struct thread *td = curthread;
563 struct proc *p = td->td_proc;
564 struct mount *mp, *nmp;
567 simple_lock(&mountlist_slock);
568 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
569 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, td)) {
570 nmp = TAILQ_NEXT(mp, mnt_list);
573 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
574 asyncflag = mp->mnt_flag & MNT_ASYNC;
575 mp->mnt_flag &= ~MNT_ASYNC;
576 vfs_msync(mp, MNT_NOWAIT);
577 VFS_SYNC(mp, MNT_NOWAIT,
578 ((p != NULL) ? p->p_ucred : NOCRED), td);
579 mp->mnt_flag |= asyncflag;
581 simple_lock(&mountlist_slock);
582 nmp = TAILQ_NEXT(mp, mnt_list);
585 simple_unlock(&mountlist_slock);
588 * XXX don't call vfs_bufstats() yet because that routine
589 * was not imported in the Lite2 merge.
594 #endif /* DIAGNOSTIC */
599 /* XXX PRISON: could be per prison flag */
600 static int prison_quotas;
602 SYSCTL_INT(_kern_prison, OID_AUTO, quotas, CTLFLAG_RW, &prison_quotas, 0, "");
606 * quotactl_args(char *path, int fcmd, int uid, caddr_t arg)
608 * Change filesystem quotas.
612 quotactl(struct quotactl_args *uap)
614 struct thread *td = curthread;
615 struct proc *p = td->td_proc;
621 if (p->p_ucred->cr_prison && !prison_quotas)
623 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
624 if ((error = namei(&nd)) != 0)
626 mp = nd.ni_vp->v_mount;
627 NDFREE(&nd, NDF_ONLY_PNBUF);
629 return (VFS_QUOTACTL(mp, SCARG(uap, cmd), SCARG(uap, uid),
630 SCARG(uap, arg), td));
634 * statfs_args(char *path, struct statfs *buf)
636 * Get filesystem statistics.
640 statfs(struct statfs_args *uap)
642 struct thread *td = curthread;
643 struct proc *p = td->td_proc;
651 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
652 if ((error = namei(&nd)) != 0)
654 mp = nd.ni_vp->v_mount;
656 NDFREE(&nd, NDF_ONLY_PNBUF);
658 error = VFS_STATFS(mp, sp, td);
661 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
663 bcopy((caddr_t)sp, (caddr_t)&sb, sizeof(sb));
664 sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0;
667 return (copyout((caddr_t)sp, (caddr_t)SCARG(uap, buf), sizeof(*sp)));
671 * fstatfs_args(int fd, struct statfs *buf)
673 * Get filesystem statistics.
677 fstatfs(struct fstatfs_args *uap)
679 struct thread *td = curthread;
680 struct proc *p = td->td_proc;
683 register struct statfs *sp;
688 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
690 mp = ((struct vnode *)fp->f_data)->v_mount;
694 error = VFS_STATFS(mp, sp, td);
697 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
699 bcopy((caddr_t)sp, (caddr_t)&sb, sizeof(sb));
700 sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0;
703 return (copyout((caddr_t)sp, (caddr_t)SCARG(uap, buf), sizeof(*sp)));
707 * getfsstat_args(struct statfs *buf, long bufsize, int flags)
709 * Get statistics on all filesystems.
713 getfsstat(struct getfsstat_args *uap)
715 struct thread *td = curthread;
716 struct proc *p = td->td_proc;
717 struct mount *mp, *nmp;
720 long count, maxcount, error;
722 maxcount = SCARG(uap, bufsize) / sizeof(struct statfs);
723 sfsp = (caddr_t)SCARG(uap, buf);
725 simple_lock(&mountlist_slock);
726 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
727 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, td)) {
728 nmp = TAILQ_NEXT(mp, mnt_list);
731 if (sfsp && count < maxcount) {
734 * If MNT_NOWAIT or MNT_LAZY is specified, do not
735 * refresh the fsstat cache. MNT_NOWAIT or MNT_LAZY
736 * overrides MNT_WAIT.
738 if (((SCARG(uap, flags) & (MNT_LAZY|MNT_NOWAIT)) == 0 ||
739 (SCARG(uap, flags) & MNT_WAIT)) &&
740 (error = VFS_STATFS(mp, sp, td))) {
741 simple_lock(&mountlist_slock);
742 nmp = TAILQ_NEXT(mp, mnt_list);
746 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
747 error = copyout((caddr_t)sp, sfsp, sizeof(*sp));
755 simple_lock(&mountlist_slock);
756 nmp = TAILQ_NEXT(mp, mnt_list);
759 simple_unlock(&mountlist_slock);
760 if (sfsp && count > maxcount)
761 p->p_retval[0] = maxcount;
763 p->p_retval[0] = count;
768 * fchdir_args(int fd)
770 * Change current working directory to a given file descriptor.
774 fchdir(struct fchdir_args *uap)
776 struct thread *td = curthread;
777 struct proc *p = td->td_proc;
778 struct filedesc *fdp = p->p_fd;
779 struct vnode *vp, *tdp;
784 if ((error = getvnode(fdp, SCARG(uap, fd), &fp)) != 0)
786 vp = (struct vnode *)fp->f_data;
788 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
789 if (vp->v_type != VDIR)
792 error = VOP_ACCESS(vp, VEXEC, p->p_ucred, td);
793 while (!error && (mp = vp->v_mountedhere) != NULL) {
794 if (vfs_busy(mp, 0, 0, td))
796 error = VFS_ROOT(mp, &tdp);
807 VOP_UNLOCK(vp, 0, td);
814 * chdir_args(char *path)
816 * Change current working directory (``.'').
820 chdir(struct chdir_args *uap)
822 struct thread *td = curthread;
823 struct proc *p = td->td_proc;
824 struct filedesc *fdp = p->p_fd;
828 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
829 SCARG(uap, path), td);
830 if ((error = change_dir(&nd, td)) != 0)
832 NDFREE(&nd, NDF_ONLY_PNBUF);
834 fdp->fd_cdir = nd.ni_vp;
839 * Helper function for raised chroot(2) security function: Refuse if
840 * any filedescriptors are open directories.
843 chroot_refuse_vdir_fds(fdp)
844 struct filedesc *fdp;
851 for (fd = 0; fd < fdp->fd_nfiles ; fd++) {
852 error = getvnode(fdp, fd, &fp);
855 vp = (struct vnode *)fp->f_data;
856 if (vp->v_type != VDIR)
864 * This sysctl determines if we will allow a process to chroot(2) if it
865 * has a directory open:
866 * 0: disallowed for all processes.
867 * 1: allowed for processes that were not already chroot(2)'ed.
868 * 2: allowed for all processes.
871 static int chroot_allow_open_directories = 1;
873 SYSCTL_INT(_kern, OID_AUTO, chroot_allow_open_directories, CTLFLAG_RW,
874 &chroot_allow_open_directories, 0, "");
877 * chroot_args(char *path)
879 * Change notion of root (``/'') directory.
883 chroot(struct chroot_args *uap)
885 struct thread *td = curthread;
886 struct proc *p = td->td_proc;
887 struct filedesc *fdp = p->p_fd;
892 error = suser_cred(p->p_ucred, PRISON_ROOT);
895 if (chroot_allow_open_directories == 0 ||
896 (chroot_allow_open_directories == 1 && fdp->fd_rdir != rootvnode))
897 error = chroot_refuse_vdir_fds(fdp);
900 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
901 SCARG(uap, path), td);
902 if ((error = change_dir(&nd, td)) != 0)
904 NDFREE(&nd, NDF_ONLY_PNBUF);
906 fdp->fd_rdir = nd.ni_vp;
908 fdp->fd_jdir = nd.ni_vp;
915 * Common routine for chroot and chdir.
918 change_dir(struct nameidata *ndp, struct thread *td)
927 if (vp->v_type != VDIR)
930 error = VOP_ACCESS(vp, VEXEC, ndp->ni_cnd.cn_cred, td);
934 VOP_UNLOCK(vp, 0, td);
939 * open_args(char *path, int flags, int mode)
941 * Check permissions, allocate an open file structure,
942 * and call the device open routine if any.
945 open(struct open_args *uap)
947 struct thread *td = curthread;
948 struct proc *p = td->td_proc;
949 struct filedesc *fdp = p->p_fd;
952 int cmode, flags, oflags;
954 int type, indx, error;
958 oflags = SCARG(uap, flags);
959 if ((oflags & O_ACCMODE) == O_ACCMODE)
961 flags = FFLAGS(oflags);
962 error = falloc(p, &nfp, &indx);
966 cmode = ((SCARG(uap, mode) &~ fdp->fd_cmask) & ALLPERMS) &~ S_ISTXT;
967 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
968 p->p_dupfd = -indx - 1; /* XXX check for fdopen */
970 * Bump the ref count to prevent another process from closing
971 * the descriptor while we are blocked in vn_open()
974 error = vn_open(&nd, flags, cmode);
977 * release our own reference
982 * handle special fdopen() case. bleh. dupfdopen() is
983 * responsible for dropping the old contents of ofiles[indx]
986 if ((error == ENODEV || error == ENXIO) &&
987 p->p_dupfd >= 0 && /* XXX from fdopen */
989 dupfdopen(fdp, indx, p->p_dupfd, flags, error)) == 0) {
990 p->p_retval[0] = indx;
994 * Clean up the descriptor, but only if another thread hadn't
995 * replaced or closed it.
997 if (fdp->fd_ofiles[indx] == fp) {
998 fdp->fd_ofiles[indx] = NULL;
1002 if (error == ERESTART)
1007 NDFREE(&nd, NDF_ONLY_PNBUF);
1011 * There should be 2 references on the file, one from the descriptor
1012 * table, and one for us.
1014 * Handle the case where someone closed the file (via its file
1015 * descriptor) while we were blocked. The end result should look
1016 * like opening the file succeeded but it was immediately closed.
1018 if (fp->f_count == 1) {
1019 KASSERT(fdp->fd_ofiles[indx] != fp,
1020 ("Open file descriptor lost all refs"));
1021 VOP_UNLOCK(vp, 0, td);
1022 vn_close(vp, flags & FMASK, fp->f_cred, td);
1024 p->p_retval[0] = indx;
1028 fp->f_data = (caddr_t)vp;
1029 fp->f_flag = flags & FMASK;
1031 fp->f_type = (vp->v_type == VFIFO ? DTYPE_FIFO : DTYPE_VNODE);
1032 if (flags & (O_EXLOCK | O_SHLOCK)) {
1033 lf.l_whence = SEEK_SET;
1036 if (flags & O_EXLOCK)
1037 lf.l_type = F_WRLCK;
1039 lf.l_type = F_RDLCK;
1041 if ((flags & FNONBLOCK) == 0)
1043 VOP_UNLOCK(vp, 0, td);
1044 if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0) {
1046 * lock request failed. Normally close the descriptor
1047 * but handle the case where someone might have dup()d
1048 * it when we weren't looking. One reference is
1049 * owned by the descriptor array, the other by us.
1051 if (fdp->fd_ofiles[indx] == fp) {
1052 fdp->fd_ofiles[indx] = NULL;
1058 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1059 fp->f_flag |= FHASLOCK;
1061 /* assert that vn_open created a backing object if one is needed */
1062 KASSERT(!vn_canvmio(vp) || VOP_GETVOBJECT(vp, NULL) == 0,
1063 ("open: vmio vnode has no backing object after vn_open"));
1064 VOP_UNLOCK(vp, 0, td);
1067 * release our private reference, leaving the one associated with the
1068 * descriptor table intact.
1071 p->p_retval[0] = indx;
1077 * ocreat(char *path, int mode)
1082 ocreat(struct ocreat_args *uap)
1084 struct open_args /* {
1085 syscallarg(char *) path;
1086 syscallarg(int) flags;
1087 syscallarg(int) mode;
1090 SCARG(&nuap, path) = SCARG(uap, path);
1091 SCARG(&nuap, mode) = SCARG(uap, mode);
1092 SCARG(&nuap, flags) = O_WRONLY | O_CREAT | O_TRUNC;
1093 return (open(&nuap));
1095 #endif /* COMPAT_43 */
1098 * mknod_args(char *path, int mode, int dev)
1100 * Create a special file.
1104 mknod(struct mknod_args *uap)
1106 struct thread *td = curthread;
1107 struct proc *p = td->td_proc;
1112 struct nameidata nd;
1116 switch (SCARG(uap, mode) & S_IFMT) {
1122 error = suser_cred(p->p_ucred, PRISON_ROOT);
1128 NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), td);
1129 if ((error = namei(&nd)) != 0)
1136 vattr.va_mode = (SCARG(uap, mode) & ALLPERMS) &~ p->p_fd->fd_cmask;
1137 vattr.va_rdev = SCARG(uap, dev);
1140 switch (SCARG(uap, mode) & S_IFMT) {
1141 case S_IFMT: /* used by badsect to flag bad sectors */
1142 vattr.va_type = VBAD;
1145 vattr.va_type = VCHR;
1148 vattr.va_type = VBLK;
1159 VOP_LEASE(nd.ni_dvp, td, p->p_ucred, LEASE_WRITE);
1161 error = VOP_WHITEOUT(nd.ni_dvp, &nd.ni_cnd, CREATE);
1163 error = VOP_MKNOD(nd.ni_dvp, &nd.ni_vp,
1164 &nd.ni_cnd, &vattr);
1168 NDFREE(&nd, NDF_ONLY_PNBUF);
1171 NDFREE(&nd, NDF_ONLY_PNBUF);
1172 if (nd.ni_dvp == vp)
1179 ASSERT_VOP_UNLOCKED(nd.ni_dvp, "mknod");
1180 ASSERT_VOP_UNLOCKED(nd.ni_vp, "mknod");
1185 * mkfifo_args(char *path, int mode)
1187 * Create a named pipe.
1191 mkfifo(struct mkfifo_args *uap)
1193 struct thread *td = curthread;
1194 struct proc *p = td->td_proc;
1197 struct nameidata nd;
1200 NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), td);
1201 if ((error = namei(&nd)) != 0)
1203 if (nd.ni_vp != NULL) {
1204 NDFREE(&nd, NDF_ONLY_PNBUF);
1205 if (nd.ni_dvp == nd.ni_vp)
1213 vattr.va_type = VFIFO;
1214 vattr.va_mode = (SCARG(uap, mode) & ALLPERMS) &~ p->p_fd->fd_cmask;
1215 VOP_LEASE(nd.ni_dvp, td, p->p_ucred, LEASE_WRITE);
1216 error = VOP_MKNOD(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
1219 NDFREE(&nd, NDF_ONLY_PNBUF);
1225 * link_args(char *path, char *link)
1227 * Make a hard file link.
1231 link(struct link_args *uap)
1233 struct thread *td = curthread;
1234 struct proc *p = td->td_proc;
1236 struct nameidata nd;
1240 NDINIT(&nd, LOOKUP, FOLLOW|NOOBJ, UIO_USERSPACE, SCARG(uap, path), td);
1241 if ((error = namei(&nd)) != 0)
1243 NDFREE(&nd, NDF_ONLY_PNBUF);
1245 if (vp->v_type == VDIR)
1246 error = EPERM; /* POSIX */
1248 NDINIT(&nd, CREATE, LOCKPARENT|NOOBJ, UIO_USERSPACE, SCARG(uap, link), td);
1251 if (nd.ni_vp != NULL) {
1256 VOP_LEASE(nd.ni_dvp, td, p->p_ucred,
1258 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
1259 error = VOP_LINK(nd.ni_dvp, vp, &nd.ni_cnd);
1261 NDFREE(&nd, NDF_ONLY_PNBUF);
1262 if (nd.ni_dvp == nd.ni_vp)
1269 ASSERT_VOP_UNLOCKED(nd.ni_dvp, "link");
1270 ASSERT_VOP_UNLOCKED(nd.ni_vp, "link");
1275 * symlink(char *path, char *link)
1277 * Make a symbolic link.
1281 symlink(struct symlink_args *uap)
1283 struct thread *td = curthread;
1284 struct proc *p = td->td_proc;
1288 struct nameidata nd;
1290 path = zalloc(namei_zone);
1291 if ((error = copyinstr(SCARG(uap, path), path, MAXPATHLEN, NULL)) != 0)
1294 NDINIT(&nd, CREATE, LOCKPARENT|NOOBJ, UIO_USERSPACE, SCARG(uap, link), td);
1295 if ((error = namei(&nd)) != 0)
1298 NDFREE(&nd, NDF_ONLY_PNBUF);
1299 if (nd.ni_dvp == nd.ni_vp)
1308 vattr.va_mode = ACCESSPERMS &~ p->p_fd->fd_cmask;
1309 VOP_LEASE(nd.ni_dvp, td, p->p_ucred, LEASE_WRITE);
1310 error = VOP_SYMLINK(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr, path);
1311 NDFREE(&nd, NDF_ONLY_PNBUF);
1315 ASSERT_VOP_UNLOCKED(nd.ni_dvp, "symlink");
1316 ASSERT_VOP_UNLOCKED(nd.ni_vp, "symlink");
1318 zfree(namei_zone, path);
1323 * undelete_args(char *path)
1325 * Delete a whiteout from the filesystem.
1329 undelete(struct undelete_args *uap)
1331 struct thread *td = curthread;
1332 struct proc *p = td->td_proc;
1334 struct nameidata nd;
1337 NDINIT(&nd, DELETE, LOCKPARENT|DOWHITEOUT, UIO_USERSPACE,
1338 SCARG(uap, path), td);
1343 if (nd.ni_vp != NULLVP || !(nd.ni_cnd.cn_flags & ISWHITEOUT)) {
1344 NDFREE(&nd, NDF_ONLY_PNBUF);
1345 if (nd.ni_dvp == nd.ni_vp)
1354 VOP_LEASE(nd.ni_dvp, td, p->p_ucred, LEASE_WRITE);
1355 error = VOP_WHITEOUT(nd.ni_dvp, &nd.ni_cnd, DELETE);
1356 NDFREE(&nd, NDF_ONLY_PNBUF);
1358 ASSERT_VOP_UNLOCKED(nd.ni_dvp, "undelete");
1359 ASSERT_VOP_UNLOCKED(nd.ni_vp, "undelete");
1364 * unlink_args(char *path)
1366 * Delete a name from the filesystem.
1369 unlink(struct unlink_args *uap)
1371 struct thread *td = curthread;
1372 struct proc *p = td->td_proc;
1375 struct nameidata nd;
1378 NDINIT(&nd, DELETE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), td);
1379 if ((error = namei(&nd)) != 0)
1382 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
1383 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1385 if (vp->v_type == VDIR)
1386 error = EPERM; /* POSIX */
1389 * The root of a mounted filesystem cannot be deleted.
1391 * XXX: can this only be a VDIR case?
1393 if (vp->v_flag & VROOT)
1398 VOP_LEASE(nd.ni_dvp, td, p->p_ucred, LEASE_WRITE);
1399 error = VOP_REMOVE(nd.ni_dvp, vp, &nd.ni_cnd);
1401 NDFREE(&nd, NDF_ONLY_PNBUF);
1402 if (nd.ni_dvp == vp)
1408 ASSERT_VOP_UNLOCKED(nd.ni_dvp, "unlink");
1409 ASSERT_VOP_UNLOCKED(nd.ni_vp, "unlink");
1414 * lseek_args(int fd, int pad, off_t offset, int whence)
1416 * Reposition read/write file offset.
1419 lseek(struct lseek_args *uap)
1421 struct thread *td = curthread;
1422 struct proc *p = td->td_proc;
1423 struct ucred *cred = p->p_ucred;
1424 struct filedesc *fdp = p->p_fd;
1429 if ((u_int)SCARG(uap, fd) >= fdp->fd_nfiles ||
1430 (fp = fdp->fd_ofiles[SCARG(uap, fd)]) == NULL)
1432 if (fp->f_type != DTYPE_VNODE)
1434 switch (SCARG(uap, whence)) {
1436 fp->f_offset += SCARG(uap, offset);
1439 error=VOP_GETATTR((struct vnode *)fp->f_data, &vattr, cred, td);
1442 fp->f_offset = SCARG(uap, offset) + vattr.va_size;
1445 fp->f_offset = SCARG(uap, offset);
1450 *(off_t *)(p->p_retval) = fp->f_offset;
1454 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
1456 * Reposition read/write file offset.
1458 * olseek_args(int fd, long offset, int whence)
1461 olseek(struct olseek_args *uap)
1463 struct lseek_args /* {
1465 syscallarg(int) pad;
1466 syscallarg(off_t) offset;
1467 syscallarg(int) whence;
1471 SCARG(&nuap, fd) = SCARG(uap, fd);
1472 SCARG(&nuap, offset) = SCARG(uap, offset);
1473 SCARG(&nuap, whence) = SCARG(uap, whence);
1474 error = lseek(&nuap);
1477 #endif /* COMPAT_43 */
1480 * access_args(char *path, int flags)
1482 * Check access permissions.
1485 access(struct access_args *uap)
1487 struct thread *td = curthread;
1488 struct proc *p = td->td_proc;
1489 struct ucred *cred, *tmpcred;
1492 struct nameidata nd;
1496 * Create and modify a temporary credential instead of one that
1497 * is potentially shared. This could also mess up socket
1498 * buffer accounting which can run in an interrupt context.
1500 tmpcred = crdup(cred);
1501 tmpcred->cr_uid = p->p_ucred->cr_ruid;
1502 tmpcred->cr_groups[0] = p->p_ucred->cr_rgid;
1503 p->p_ucred = tmpcred;
1504 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
1505 SCARG(uap, path), td);
1506 if ((error = namei(&nd)) != 0)
1510 /* Flags == 0 means only check for existence. */
1511 if (SCARG(uap, flags)) {
1513 if (SCARG(uap, flags) & R_OK)
1515 if (SCARG(uap, flags) & W_OK)
1517 if (SCARG(uap, flags) & X_OK)
1519 if ((flags & VWRITE) == 0 || (error = vn_writechk(vp)) == 0)
1520 error = VOP_ACCESS(vp, flags, tmpcred, td);
1522 NDFREE(&nd, NDF_ONLY_PNBUF);
1530 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
1532 * ostat_args(char *path, struct ostat *ub)
1534 * Get file status; this version follows links.
1538 ostat(struct ostat_args *uap)
1540 struct thread *td = curthread;
1544 struct nameidata nd;
1546 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
1547 SCARG(uap, path), td);
1548 if ((error = namei(&nd)) != 0)
1550 NDFREE(&nd, NDF_ONLY_PNBUF);
1551 error = vn_stat(nd.ni_vp, &sb, td);
1556 error = copyout((caddr_t)&osb, (caddr_t)SCARG(uap, ub), sizeof (osb));
1561 * olstat_args(char *path, struct ostat *ub)
1563 * Get file status; this version does not follow links.
1567 olstat(struct olstat_args *uap)
1569 struct thread *td = curthread;
1574 struct nameidata nd;
1576 NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
1577 SCARG(uap, path), td);
1578 if ((error = namei(&nd)) != 0)
1581 error = vn_stat(vp, &sb, td);
1582 NDFREE(&nd, NDF_ONLY_PNBUF);
1587 error = copyout((caddr_t)&osb, (caddr_t)SCARG(uap, ub), sizeof (osb));
1592 * Convert from an old to a new stat structure.
1599 ost->st_dev = st->st_dev;
1600 ost->st_ino = st->st_ino;
1601 ost->st_mode = st->st_mode;
1602 ost->st_nlink = st->st_nlink;
1603 ost->st_uid = st->st_uid;
1604 ost->st_gid = st->st_gid;
1605 ost->st_rdev = st->st_rdev;
1606 if (st->st_size < (quad_t)1 << 32)
1607 ost->st_size = st->st_size;
1610 ost->st_atime = st->st_atime;
1611 ost->st_mtime = st->st_mtime;
1612 ost->st_ctime = st->st_ctime;
1613 ost->st_blksize = st->st_blksize;
1614 ost->st_blocks = st->st_blocks;
1615 ost->st_flags = st->st_flags;
1616 ost->st_gen = st->st_gen;
1618 #endif /* COMPAT_43 || COMPAT_SUNOS */
1621 * stat_args(char *path, struct stat *ub)
1623 * Get file status; this version follows links.
1627 stat(struct stat_args *uap)
1629 struct thread *td = curthread;
1632 struct nameidata nd;
1634 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
1635 SCARG(uap, path), td);
1636 if ((error = namei(&nd)) != 0)
1638 error = vn_stat(nd.ni_vp, &sb, td);
1639 NDFREE(&nd, NDF_ONLY_PNBUF);
1643 error = copyout((caddr_t)&sb, (caddr_t)SCARG(uap, ub), sizeof (sb));
1648 * lstat_args(char *path, struct stat *ub)
1650 * Get file status; this version does not follow links.
1654 lstat(struct lstat_args *uap)
1656 struct thread *td = curthread;
1660 struct nameidata nd;
1662 NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
1663 SCARG(uap, path), td);
1664 if ((error = namei(&nd)) != 0)
1667 error = vn_stat(vp, &sb, td);
1668 NDFREE(&nd, NDF_ONLY_PNBUF);
1672 error = copyout((caddr_t)&sb, (caddr_t)SCARG(uap, ub), sizeof (sb));
1681 nsb->st_dev = sb->st_dev;
1682 nsb->st_ino = sb->st_ino;
1683 nsb->st_mode = sb->st_mode;
1684 nsb->st_nlink = sb->st_nlink;
1685 nsb->st_uid = sb->st_uid;
1686 nsb->st_gid = sb->st_gid;
1687 nsb->st_rdev = sb->st_rdev;
1688 nsb->st_atimespec = sb->st_atimespec;
1689 nsb->st_mtimespec = sb->st_mtimespec;
1690 nsb->st_ctimespec = sb->st_ctimespec;
1691 nsb->st_size = sb->st_size;
1692 nsb->st_blocks = sb->st_blocks;
1693 nsb->st_blksize = sb->st_blksize;
1694 nsb->st_flags = sb->st_flags;
1695 nsb->st_gen = sb->st_gen;
1696 nsb->st_qspare[0] = sb->st_qspare[0];
1697 nsb->st_qspare[1] = sb->st_qspare[1];
1701 * nstat_args(char *path, struct nstat *ub)
1705 nstat(struct nstat_args *uap)
1707 struct thread *td = curthread;
1711 struct nameidata nd;
1713 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
1714 SCARG(uap, path), td);
1715 if ((error = namei(&nd)) != 0)
1717 NDFREE(&nd, NDF_ONLY_PNBUF);
1718 error = vn_stat(nd.ni_vp, &sb, td);
1722 cvtnstat(&sb, &nsb);
1723 error = copyout((caddr_t)&nsb, (caddr_t)SCARG(uap, ub), sizeof (nsb));
1728 * lstat_args(char *path, struct stat *ub)
1730 * Get file status; this version does not follow links.
1734 nlstat(struct nlstat_args *uap)
1736 struct thread *td = curthread;
1741 struct nameidata nd;
1743 NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
1744 SCARG(uap, path), td);
1745 if ((error = namei(&nd)) != 0)
1748 NDFREE(&nd, NDF_ONLY_PNBUF);
1749 error = vn_stat(vp, &sb, td);
1753 cvtnstat(&sb, &nsb);
1754 error = copyout((caddr_t)&nsb, (caddr_t)SCARG(uap, ub), sizeof (nsb));
1759 * pathconf_Args(char *path, int name)
1761 * Get configurable pathname variables.
1765 pathconf(struct pathconf_args *uap)
1767 struct thread *td = curthread;
1768 struct proc *p = td->td_proc;
1770 struct nameidata nd;
1772 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
1773 SCARG(uap, path), td);
1774 if ((error = namei(&nd)) != 0)
1776 NDFREE(&nd, NDF_ONLY_PNBUF);
1777 error = VOP_PATHCONF(nd.ni_vp, SCARG(uap, name), p->p_retval);
1783 * readlink_args(char *path, char *buf, int count)
1785 * Return target name of a symbolic link.
1789 readlink(struct readlink_args *uap)
1791 struct thread *td = curthread;
1792 struct proc *p = td->td_proc;
1797 struct nameidata nd;
1799 NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
1800 SCARG(uap, path), td);
1801 if ((error = namei(&nd)) != 0)
1803 NDFREE(&nd, NDF_ONLY_PNBUF);
1805 if (vp->v_type != VLNK)
1808 aiov.iov_base = SCARG(uap, buf);
1809 aiov.iov_len = SCARG(uap, count);
1810 auio.uio_iov = &aiov;
1811 auio.uio_iovcnt = 1;
1812 auio.uio_offset = 0;
1813 auio.uio_rw = UIO_READ;
1814 auio.uio_segflg = UIO_USERSPACE;
1816 auio.uio_resid = SCARG(uap, count);
1817 error = VOP_READLINK(vp, &auio, p->p_ucred);
1820 p->p_retval[0] = SCARG(uap, count) - auio.uio_resid;
1825 setfflags(struct vnode *vp, int flags)
1827 struct thread *td = curthread;
1828 struct proc *p = td->td_proc;
1833 * Prevent non-root users from setting flags on devices. When
1834 * a device is reused, users can retain ownership of the device
1835 * if they are allowed to set flags and programs assume that
1836 * chown can't fail when done as root.
1838 if ((vp->v_type == VCHR || vp->v_type == VBLK) &&
1839 ((error = suser_cred(p->p_ucred, PRISON_ROOT)) != 0))
1842 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
1843 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1845 vattr.va_flags = flags;
1846 error = VOP_SETATTR(vp, &vattr, p->p_ucred, td);
1847 VOP_UNLOCK(vp, 0, td);
1852 * chflags(char *path, int flags)
1854 * Change flags of a file given a path name.
1858 chflags(struct chflags_args *uap)
1860 struct thread *td = curthread;
1862 struct nameidata nd;
1864 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
1865 if ((error = namei(&nd)) != 0)
1867 NDFREE(&nd, NDF_ONLY_PNBUF);
1868 error = setfflags(nd.ni_vp, SCARG(uap, flags));
1874 * fchflags_args(int fd, int flags)
1876 * Change flags of a file given a file descriptor.
1880 fchflags(struct fchflags_args *uap)
1882 struct thread *td = curthread;
1883 struct proc *p = td->td_proc;
1887 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
1889 return setfflags((struct vnode *) fp->f_data, SCARG(uap, flags));
1893 setfmode(struct vnode *vp, int mode)
1895 struct thread *td = curthread;
1896 struct proc *p = td->td_proc;
1900 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
1901 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1903 vattr.va_mode = mode & ALLPERMS;
1904 error = VOP_SETATTR(vp, &vattr, p->p_ucred, td);
1905 VOP_UNLOCK(vp, 0, td);
1910 * chmod_args(char *path, int mode)
1912 * Change mode of a file given path name.
1916 chmod(struct chmod_args *uap)
1918 struct thread *td = curthread;
1920 struct nameidata nd;
1922 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
1923 if ((error = namei(&nd)) != 0)
1925 NDFREE(&nd, NDF_ONLY_PNBUF);
1926 error = setfmode(nd.ni_vp, SCARG(uap, mode));
1932 * lchmod_args(char *path, int mode)
1934 * Change mode of a file given path name (don't follow links.)
1938 lchmod(struct lchmod_args *uap)
1940 struct thread *td = curthread;
1942 struct nameidata nd;
1944 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
1945 if ((error = namei(&nd)) != 0)
1947 NDFREE(&nd, NDF_ONLY_PNBUF);
1948 error = setfmode(nd.ni_vp, SCARG(uap, mode));
1954 * fchmod_args(int fd, int mode)
1956 * Change mode of a file given a file descriptor.
1960 fchmod(struct fchmod_args *uap)
1962 struct thread *td = curthread;
1963 struct proc *p = td->td_proc;
1967 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
1969 return setfmode((struct vnode *)fp->f_data, SCARG(uap, mode));
1973 setfown(struct vnode *vp, uid_t uid, gid_t gid)
1975 struct thread *td = curthread;
1976 struct proc *p = td->td_proc;
1980 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
1981 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1985 error = VOP_SETATTR(vp, &vattr, p->p_ucred, td);
1986 VOP_UNLOCK(vp, 0, td);
1991 * chown(char *path, int uid, int gid)
1993 * Set ownership given a path name.
1997 chown(struct chown_args *uap)
1999 struct thread *td = curthread;
2001 struct nameidata nd;
2003 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
2004 if ((error = namei(&nd)) != 0)
2006 NDFREE(&nd, NDF_ONLY_PNBUF);
2007 error = setfown(nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid));
2013 * lchown_args(char *path, int uid, int gid)
2015 * Set ownership given a path name, do not cross symlinks.
2019 lchown(struct lchown_args *uap)
2021 struct thread *td = curthread;
2023 struct nameidata nd;
2025 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
2026 if ((error = namei(&nd)) != 0)
2028 NDFREE(&nd, NDF_ONLY_PNBUF);
2029 error = setfown(nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid));
2035 * fchown_args(int fd, int uid, int gid)
2037 * Set ownership given a file descriptor.
2041 fchown(struct fchown_args *uap)
2043 struct thread *td = curthread;
2044 struct proc *p = td->td_proc;
2048 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2050 return setfown((struct vnode *)fp->f_data,
2051 SCARG(uap, uid), SCARG(uap, gid));
2055 getutimes(const struct timeval *usrtvp, struct timespec *tsp)
2057 struct timeval tv[2];
2060 if (usrtvp == NULL) {
2062 TIMEVAL_TO_TIMESPEC(&tv[0], &tsp[0]);
2065 if ((error = copyin(usrtvp, tv, sizeof (tv))) != 0)
2067 TIMEVAL_TO_TIMESPEC(&tv[0], &tsp[0]);
2068 TIMEVAL_TO_TIMESPEC(&tv[1], &tsp[1]);
2074 setutimes(struct vnode *vp, const struct timespec *ts, int nullflag)
2076 struct thread *td = curthread;
2077 struct proc *p = td->td_proc;
2081 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
2082 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2084 vattr.va_atime = ts[0];
2085 vattr.va_mtime = ts[1];
2087 vattr.va_vaflags |= VA_UTIMES_NULL;
2088 error = VOP_SETATTR(vp, &vattr, p->p_ucred, td);
2089 VOP_UNLOCK(vp, 0, td);
2094 * utimes_args(char *path, struct timeval *tptr)
2096 * Set the access and modification times of a file.
2100 utimes(struct utimes_args *uap)
2102 struct thread *td = curthread;
2103 struct timespec ts[2];
2104 struct timeval *usrtvp;
2106 struct nameidata nd;
2108 usrtvp = SCARG(uap, tptr);
2109 if ((error = getutimes(usrtvp, ts)) != 0)
2111 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
2112 if ((error = namei(&nd)) != 0)
2114 NDFREE(&nd, NDF_ONLY_PNBUF);
2115 error = setutimes(nd.ni_vp, ts, usrtvp == NULL);
2121 * lutimes_args(char *path, struct timeval *tptr)
2123 * Set the access and modification times of a file.
2127 lutimes(struct lutimes_args *uap)
2129 struct thread *td = curthread;
2130 struct timespec ts[2];
2131 struct timeval *usrtvp;
2133 struct nameidata nd;
2135 usrtvp = SCARG(uap, tptr);
2136 if ((error = getutimes(usrtvp, ts)) != 0)
2138 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
2139 if ((error = namei(&nd)) != 0)
2141 NDFREE(&nd, NDF_ONLY_PNBUF);
2142 error = setutimes(nd.ni_vp, ts, usrtvp == NULL);
2148 * futimes_args(int fd, struct timeval *tptr)
2150 * Set the access and modification times of a file.
2154 futimes(struct futimes_args *uap)
2156 struct thread *td = curthread;
2157 struct proc *p = td->td_proc;
2158 struct timespec ts[2];
2160 struct timeval *usrtvp;
2163 usrtvp = SCARG(uap, tptr);
2164 if ((error = getutimes(usrtvp, ts)) != 0)
2166 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2168 return setutimes((struct vnode *)fp->f_data, ts, usrtvp == NULL);
2172 * truncate(char *path, int pad, off_t length)
2174 * Truncate a file given its path name.
2178 truncate(struct truncate_args *uap)
2180 struct thread *td = curthread;
2181 struct proc *p = td->td_proc;
2185 struct nameidata nd;
2187 if (uap->length < 0)
2189 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
2190 if ((error = namei(&nd)) != 0)
2193 NDFREE(&nd, NDF_ONLY_PNBUF);
2194 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
2195 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2196 if (vp->v_type == VDIR)
2198 else if ((error = vn_writechk(vp)) == 0 &&
2199 (error = VOP_ACCESS(vp, VWRITE, p->p_ucred, td)) == 0) {
2201 vattr.va_size = SCARG(uap, length);
2202 error = VOP_SETATTR(vp, &vattr, p->p_ucred, td);
2209 * ftruncate_args(int fd, int pad, off_t length)
2211 * Truncate a file given a file descriptor.
2215 ftruncate(struct ftruncate_args *uap)
2217 struct thread *td = curthread;
2218 struct proc *p = td->td_proc;
2224 if (uap->length < 0)
2226 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2228 if ((fp->f_flag & FWRITE) == 0)
2230 vp = (struct vnode *)fp->f_data;
2231 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
2232 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2233 if (vp->v_type == VDIR)
2235 else if ((error = vn_writechk(vp)) == 0) {
2237 vattr.va_size = SCARG(uap, length);
2238 error = VOP_SETATTR(vp, &vattr, fp->f_cred, td);
2240 VOP_UNLOCK(vp, 0, td);
2244 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
2246 * otruncate_args(char *path, long length)
2248 * Truncate a file given its path name.
2252 otruncate(struct otruncate_args *uap)
2254 struct truncate_args /* {
2255 syscallarg(char *) path;
2256 syscallarg(int) pad;
2257 syscallarg(off_t) length;
2260 SCARG(&nuap, path) = SCARG(uap, path);
2261 SCARG(&nuap, length) = SCARG(uap, length);
2262 return (truncate(&nuap));
2266 * oftruncate_args(int fd, long length)
2268 * Truncate a file given a file descriptor.
2272 oftruncate(struct oftruncate_args *uap)
2274 struct ftruncate_args /* {
2276 syscallarg(int) pad;
2277 syscallarg(off_t) length;
2280 SCARG(&nuap, fd) = SCARG(uap, fd);
2281 SCARG(&nuap, length) = SCARG(uap, length);
2282 return (ftruncate(&nuap));
2284 #endif /* COMPAT_43 || COMPAT_SUNOS */
2289 * Sync an open file.
2293 fsync(struct fsync_args *uap)
2295 struct thread *td = curthread;
2296 struct proc *p = td->td_proc;
2302 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2304 vp = (struct vnode *)fp->f_data;
2305 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2306 if (VOP_GETVOBJECT(vp, &obj) == 0)
2307 vm_object_page_clean(obj, 0, 0, 0);
2308 if ((error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, td)) == 0 &&
2309 vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP) &&
2311 error = (*bioops.io_fsync)(vp);
2312 VOP_UNLOCK(vp, 0, td);
2317 * rename_args(char *from, char *to)
2319 * Rename files. Source and destination must either both be directories,
2320 * or both not be directories. If target is a directory, it must be empty.
2324 rename(struct rename_args *uap)
2326 struct thread *td = curthread;
2327 struct proc *p = td->td_proc;
2328 struct vnode *tvp, *fvp, *tdvp;
2329 struct nameidata fromnd, tond;
2333 NDINIT(&fromnd, DELETE, WANTPARENT | SAVESTART, UIO_USERSPACE,
2334 SCARG(uap, from), td);
2335 if ((error = namei(&fromnd)) != 0)
2338 NDINIT(&tond, RENAME, LOCKPARENT | LOCKLEAF | NOCACHE | SAVESTART | NOOBJ,
2339 UIO_USERSPACE, SCARG(uap, to), td);
2340 if (fromnd.ni_vp->v_type == VDIR)
2341 tond.ni_cnd.cn_flags |= WILLBEDIR;
2342 if ((error = namei(&tond)) != 0) {
2343 /* Translate error code for rename("dir1", "dir2/."). */
2344 if (error == EISDIR && fvp->v_type == VDIR)
2346 NDFREE(&fromnd, NDF_ONLY_PNBUF);
2347 vrele(fromnd.ni_dvp);
2354 if (fvp->v_type == VDIR && tvp->v_type != VDIR) {
2357 } else if (fvp->v_type != VDIR && tvp->v_type == VDIR) {
2365 * If the source is the same as the destination (that is, if they
2366 * are links to the same vnode), then there is nothing to do.
2372 VOP_LEASE(tdvp, td, p->p_ucred, LEASE_WRITE);
2373 if (fromnd.ni_dvp != tdvp) {
2374 VOP_LEASE(fromnd.ni_dvp, td, p->p_ucred, LEASE_WRITE);
2377 VOP_LEASE(tvp, td, p->p_ucred, LEASE_WRITE);
2379 error = VOP_RENAME(fromnd.ni_dvp, fromnd.ni_vp, &fromnd.ni_cnd,
2380 tond.ni_dvp, tond.ni_vp, &tond.ni_cnd);
2381 NDFREE(&fromnd, NDF_ONLY_PNBUF);
2382 NDFREE(&tond, NDF_ONLY_PNBUF);
2384 NDFREE(&fromnd, NDF_ONLY_PNBUF);
2385 NDFREE(&tond, NDF_ONLY_PNBUF);
2392 vrele(fromnd.ni_dvp);
2395 vrele(tond.ni_startdir);
2396 ASSERT_VOP_UNLOCKED(fromnd.ni_dvp, "rename");
2397 ASSERT_VOP_UNLOCKED(fromnd.ni_vp, "rename");
2398 ASSERT_VOP_UNLOCKED(tond.ni_dvp, "rename");
2399 ASSERT_VOP_UNLOCKED(tond.ni_vp, "rename");
2401 if (fromnd.ni_startdir)
2402 vrele(fromnd.ni_startdir);
2409 * mkdir_args(char *path, int mode)
2411 * Make a directory file.
2415 mkdir(struct mkdir_args *uap)
2417 struct thread *td = curthread;
2418 struct proc *p = td->td_proc;
2422 struct nameidata nd;
2425 NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), td);
2426 nd.ni_cnd.cn_flags |= WILLBEDIR;
2427 if ((error = namei(&nd)) != 0)
2431 NDFREE(&nd, NDF_ONLY_PNBUF);
2432 if (nd.ni_dvp == vp)
2440 vattr.va_type = VDIR;
2441 vattr.va_mode = (SCARG(uap, mode) & ACCESSPERMS) &~ p->p_fd->fd_cmask;
2442 VOP_LEASE(nd.ni_dvp, td, p->p_ucred, LEASE_WRITE);
2443 error = VOP_MKDIR(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
2444 NDFREE(&nd, NDF_ONLY_PNBUF);
2448 ASSERT_VOP_UNLOCKED(nd.ni_dvp, "mkdir");
2449 ASSERT_VOP_UNLOCKED(nd.ni_vp, "mkdir");
2454 * rmdir_args(char *path)
2456 * Remove a directory file.
2460 rmdir(struct rmdir_args *uap)
2462 struct thread *td = curthread;
2463 struct proc *p = td->td_proc;
2466 struct nameidata nd;
2469 NDINIT(&nd, DELETE, LOCKPARENT | LOCKLEAF, UIO_USERSPACE,
2470 SCARG(uap, path), td);
2471 if ((error = namei(&nd)) != 0)
2474 if (vp->v_type != VDIR) {
2479 * No rmdir "." please.
2481 if (nd.ni_dvp == vp) {
2486 * The root of a mounted filesystem cannot be deleted.
2488 if (vp->v_flag & VROOT)
2491 VOP_LEASE(nd.ni_dvp, td, p->p_ucred, LEASE_WRITE);
2492 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
2493 error = VOP_RMDIR(nd.ni_dvp, nd.ni_vp, &nd.ni_cnd);
2496 NDFREE(&nd, NDF_ONLY_PNBUF);
2497 if (nd.ni_dvp == vp)
2503 ASSERT_VOP_UNLOCKED(nd.ni_dvp, "rmdir");
2504 ASSERT_VOP_UNLOCKED(nd.ni_vp, "rmdir");
2510 * ogetdirentries_args(int fd, char *buf, u_int count, long *basep)
2512 * Read a block of directory entries in a file system independent format.
2515 ogetdirentries(struct ogetdirentries_args *uap)
2517 struct thread *td = curthread;
2518 struct proc *p = td->td_proc;
2521 struct uio auio, kuio;
2522 struct iovec aiov, kiov;
2523 struct dirent *dp, *edp;
2525 int error, eofflag, readcnt;
2528 /* XXX arbitrary sanity limit on `count'. */
2529 if (SCARG(uap, count) > 64 * 1024)
2531 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2533 if ((fp->f_flag & FREAD) == 0)
2535 vp = (struct vnode *)fp->f_data;
2537 if (vp->v_type != VDIR)
2539 aiov.iov_base = SCARG(uap, buf);
2540 aiov.iov_len = SCARG(uap, count);
2541 auio.uio_iov = &aiov;
2542 auio.uio_iovcnt = 1;
2543 auio.uio_rw = UIO_READ;
2544 auio.uio_segflg = UIO_USERSPACE;
2546 auio.uio_resid = SCARG(uap, count);
2547 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2548 loff = auio.uio_offset = fp->f_offset;
2549 # if (BYTE_ORDER != LITTLE_ENDIAN)
2550 if (vp->v_mount->mnt_maxsymlinklen <= 0) {
2551 error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag,
2553 fp->f_offset = auio.uio_offset;
2558 kuio.uio_iov = &kiov;
2559 kuio.uio_segflg = UIO_SYSSPACE;
2560 kiov.iov_len = SCARG(uap, count);
2561 MALLOC(dirbuf, caddr_t, SCARG(uap, count), M_TEMP, M_WAITOK);
2562 kiov.iov_base = dirbuf;
2563 error = VOP_READDIR(vp, &kuio, fp->f_cred, &eofflag,
2565 fp->f_offset = kuio.uio_offset;
2567 readcnt = SCARG(uap, count) - kuio.uio_resid;
2568 edp = (struct dirent *)&dirbuf[readcnt];
2569 for (dp = (struct dirent *)dirbuf; dp < edp; ) {
2570 # if (BYTE_ORDER == LITTLE_ENDIAN)
2572 * The expected low byte of
2573 * dp->d_namlen is our dp->d_type.
2574 * The high MBZ byte of dp->d_namlen
2575 * is our dp->d_namlen.
2577 dp->d_type = dp->d_namlen;
2581 * The dp->d_type is the high byte
2582 * of the expected dp->d_namlen,
2583 * so must be zero'ed.
2587 if (dp->d_reclen > 0) {
2588 dp = (struct dirent *)
2589 ((char *)dp + dp->d_reclen);
2596 error = uiomove(dirbuf, readcnt, &auio);
2598 FREE(dirbuf, M_TEMP);
2600 VOP_UNLOCK(vp, 0, td);
2603 if (SCARG(uap, count) == auio.uio_resid) {
2604 if (union_dircheckp) {
2605 error = union_dircheckp(td, &vp, fp);
2611 if ((vp->v_flag & VROOT) &&
2612 (vp->v_mount->mnt_flag & MNT_UNION)) {
2613 struct vnode *tvp = vp;
2614 vp = vp->v_mount->mnt_vnodecovered;
2616 fp->f_data = (caddr_t) vp;
2622 error = copyout((caddr_t)&loff, (caddr_t)SCARG(uap, basep),
2624 p->p_retval[0] = SCARG(uap, count) - auio.uio_resid;
2627 #endif /* COMPAT_43 */
2630 * getdirentries_args(int fd, char *buf, u_int conut, long *basep)
2632 * Read a block of directory entries in a file system independent format.
2635 getdirentries(struct getdirentries_args *uap)
2637 struct thread *td = curthread;
2638 struct proc *p = td->td_proc;
2646 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2648 if ((fp->f_flag & FREAD) == 0)
2650 vp = (struct vnode *)fp->f_data;
2652 if (vp->v_type != VDIR)
2654 aiov.iov_base = SCARG(uap, buf);
2655 aiov.iov_len = SCARG(uap, count);
2656 auio.uio_iov = &aiov;
2657 auio.uio_iovcnt = 1;
2658 auio.uio_rw = UIO_READ;
2659 auio.uio_segflg = UIO_USERSPACE;
2661 auio.uio_resid = SCARG(uap, count);
2662 /* vn_lock(vp, LK_SHARED | LK_RETRY, td); */
2663 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2664 loff = auio.uio_offset = fp->f_offset;
2665 error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, NULL, NULL);
2666 fp->f_offset = auio.uio_offset;
2667 VOP_UNLOCK(vp, 0, td);
2670 if (SCARG(uap, count) == auio.uio_resid) {
2671 if (union_dircheckp) {
2672 error = union_dircheckp(td, &vp, fp);
2678 if ((vp->v_flag & VROOT) &&
2679 (vp->v_mount->mnt_flag & MNT_UNION)) {
2680 struct vnode *tvp = vp;
2681 vp = vp->v_mount->mnt_vnodecovered;
2683 fp->f_data = (caddr_t) vp;
2689 if (SCARG(uap, basep) != NULL) {
2690 error = copyout((caddr_t)&loff, (caddr_t)SCARG(uap, basep),
2693 p->p_retval[0] = SCARG(uap, count) - auio.uio_resid;
2698 * getdents_args(int fd, char *buf, size_t count)
2701 getdents(struct getdents_args *uap)
2703 struct getdirentries_args ap;
2707 ap.count = uap->count;
2709 return getdirentries(&ap);
2713 * umask(int newmask)
2715 * Set the mode mask for creation of filesystem nodes.
2720 umask(struct umask_args *uap)
2722 struct thread *td = curthread;
2723 struct proc *p = td->td_proc;
2724 struct filedesc *fdp;
2727 p->p_retval[0] = fdp->fd_cmask;
2728 fdp->fd_cmask = SCARG(uap, newmask) & ALLPERMS;
2733 * revoke(char *path)
2735 * Void all references to file by ripping underlying filesystem
2740 revoke(struct revoke_args *uap)
2742 struct thread *td = curthread;
2743 struct proc *p = td->td_proc;
2747 struct nameidata nd;
2749 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
2750 if ((error = namei(&nd)) != 0)
2753 NDFREE(&nd, NDF_ONLY_PNBUF);
2754 if (vp->v_type != VCHR && vp->v_type != VBLK) {
2758 if ((error = VOP_GETATTR(vp, &vattr, p->p_ucred, td)) != 0)
2760 if (p->p_ucred->cr_uid != vattr.va_uid &&
2761 (error = suser_cred(p->p_ucred, PRISON_ROOT)))
2764 VOP_REVOKE(vp, REVOKEALL);
2771 * Convert a user file descriptor to a kernel file entry.
2774 getvnode(struct filedesc *fdp, int fd, struct file **fpp)
2778 if ((u_int)fd >= fdp->fd_nfiles ||
2779 (fp = fdp->fd_ofiles[fd]) == NULL)
2781 if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO)
2787 * getfh_args(char *fname, fhandle_t *fhp)
2789 * Get (NFS) file handle
2792 getfh(struct getfh_args *uap)
2794 struct thread *td = curthread;
2795 struct nameidata nd;
2801 * Must be super user
2806 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, uap->fname, td);
2810 NDFREE(&nd, NDF_ONLY_PNBUF);
2812 bzero(&fh, sizeof(fh));
2813 fh.fh_fsid = vp->v_mount->mnt_stat.f_fsid;
2814 error = VFS_VPTOFH(vp, &fh.fh_fid);
2818 error = copyout(&fh, uap->fhp, sizeof (fh));
2823 * fhopen_args(const struct fhandle *u_fhp, int flags)
2825 * syscall for the rpc.lockd to use to translate a NFS file handle into
2826 * an open descriptor.
2828 * warning: do not remove the suser() call or this becomes one giant
2832 fhopen(struct fhopen_args *uap)
2834 struct thread *td = curthread;
2835 struct proc *p = td->td_proc;
2840 struct vattr *vap = &vat;
2843 struct filedesc *fdp = p->p_fd;
2844 int fmode, mode, error, type;
2849 * Must be super user
2855 fmode = FFLAGS(SCARG(uap, flags));
2856 /* why not allow a non-read/write open for our lockd? */
2857 if (((fmode & (FREAD | FWRITE)) == 0) || (fmode & O_CREAT))
2859 error = copyin(SCARG(uap,u_fhp), &fhp, sizeof(fhp));
2862 /* find the mount point */
2863 mp = vfs_getvfs(&fhp.fh_fsid);
2866 /* now give me my vnode, it gets returned to me locked */
2867 error = VFS_FHTOVP(mp, &fhp.fh_fid, &vp);
2871 * from now on we have to make sure not
2872 * to forget about the vnode
2873 * any error that causes an abort must vput(vp)
2874 * just set error = err and 'goto bad;'.
2880 if (vp->v_type == VLNK) {
2884 if (vp->v_type == VSOCK) {
2889 if (fmode & (FWRITE | O_TRUNC)) {
2890 if (vp->v_type == VDIR) {
2894 error = vn_writechk(vp);
2902 error = VOP_ACCESS(vp, mode, p->p_ucred, td);
2906 if (fmode & O_TRUNC) {
2907 VOP_UNLOCK(vp, 0, td); /* XXX */
2908 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
2909 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); /* XXX */
2912 error = VOP_SETATTR(vp, vap, p->p_ucred, td);
2916 error = VOP_OPEN(vp, fmode, p->p_ucred, td);
2920 * Make sure that a VM object is created for VMIO support.
2922 if (vn_canvmio(vp) == TRUE) {
2923 if ((error = vfs_object_create(vp, td, p->p_ucred)) != 0)
2930 * end of vn_open code
2933 if ((error = falloc(p, &nfp, &indx)) != 0) {
2941 * hold an extra reference to avoid having fp ripped out
2942 * from under us while we block in the lock op.
2945 nfp->f_data = (caddr_t)vp;
2946 nfp->f_flag = fmode & FMASK;
2947 nfp->f_ops = &vnops;
2948 nfp->f_type = DTYPE_VNODE;
2949 if (fmode & (O_EXLOCK | O_SHLOCK)) {
2950 lf.l_whence = SEEK_SET;
2953 if (fmode & O_EXLOCK)
2954 lf.l_type = F_WRLCK;
2956 lf.l_type = F_RDLCK;
2958 if ((fmode & FNONBLOCK) == 0)
2960 VOP_UNLOCK(vp, 0, td);
2961 if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0) {
2963 * lock request failed. Normally close the descriptor
2964 * but handle the case where someone might have dup()d
2965 * or close()d it when we weren't looking.
2967 if (fdp->fd_ofiles[indx] == fp) {
2968 fdp->fd_ofiles[indx] = NULL;
2973 * release our private reference.
2978 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2979 fp->f_flag |= FHASLOCK;
2981 if ((vp->v_type == VREG) && (VOP_GETVOBJECT(vp, NULL) != 0))
2982 vfs_object_create(vp, td, p->p_ucred);
2984 VOP_UNLOCK(vp, 0, td);
2986 p->p_retval[0] = indx;
2995 * fhstat_args(struct fhandle *u_fhp, struct stat *sb)
2998 fhstat(struct fhstat_args *uap)
3000 struct thread *td = curthread;
3008 * Must be super user
3014 error = copyin(SCARG(uap, u_fhp), &fh, sizeof(fhandle_t));
3018 if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL)
3020 if ((error = VFS_FHTOVP(mp, &fh.fh_fid, &vp)))
3022 error = vn_stat(vp, &sb, td);
3026 error = copyout(&sb, SCARG(uap, sb), sizeof(sb));
3031 * fhstatfs_args(struct fhandle *u_fhp, struct statfs *buf)
3034 fhstatfs(struct fhstatfs_args *uap)
3036 struct thread *td = curthread;
3045 * Must be super user
3047 if ((error = suser(td)))
3050 if ((error = copyin(SCARG(uap, u_fhp), &fh, sizeof(fhandle_t))) != 0)
3053 if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL)
3055 if ((error = VFS_FHTOVP(mp, &fh.fh_fid, &vp)))
3060 if ((error = VFS_STATFS(mp, sp, td)) != 0)
3062 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
3064 bcopy((caddr_t)sp, (caddr_t)&sb, sizeof(sb));
3065 sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0;
3068 return (copyout(sp, SCARG(uap, buf), sizeof(*sp)));
3072 * Syscall to push extended attribute configuration information into the
3073 * VFS. Accepts a path, which it converts to a mountpoint, as well as
3074 * a command (int cmd), and attribute name and misc data. For now, the
3075 * attribute name is left in userspace for consumption by the VFS_op.
3076 * It will probably be changed to be copied into sysspace by the
3077 * syscall in the future, once issues with various consumers of the
3078 * attribute code have raised their hands.
3080 * Currently this is used only by UFS Extended Attributes.
3083 extattrctl(struct extattrctl_args *uap)
3085 struct thread *td = curthread;
3086 struct nameidata nd;
3090 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
3091 if ((error = namei(&nd)) != 0)
3093 mp = nd.ni_vp->v_mount;
3095 return (VFS_EXTATTRCTL(mp, SCARG(uap, cmd), SCARG(uap, attrname),
3096 SCARG(uap, arg), td));
3100 * Syscall to set a named extended attribute on a file or directory.
3101 * Accepts attribute name, and a uio structure pointing to the data to set.
3102 * The uio is consumed in the style of writev(). The real work happens
3103 * in VOP_SETEXTATTR().
3106 extattr_set_file(struct extattr_set_file_args *uap)
3108 struct thread *td = curthread;
3109 struct proc *p = td->td_proc;
3110 struct nameidata nd;
3112 struct iovec *iov, *needfree = NULL, aiov[UIO_SMALLIOV];
3113 char attrname[EXTATTR_MAXNAMELEN];
3117 error = copyin(SCARG(uap, attrname), attrname, EXTATTR_MAXNAMELEN);
3120 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
3121 SCARG(uap, path), td);
3122 if ((error = namei(&nd)) != 0)
3124 iovlen = uap->iovcnt * sizeof(struct iovec);
3125 if (uap->iovcnt > UIO_SMALLIOV) {
3126 if (uap->iovcnt > UIO_MAXIOV) {
3130 MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
3135 auio.uio_iovcnt = uap->iovcnt;
3136 auio.uio_rw = UIO_WRITE;
3137 auio.uio_segflg = UIO_USERSPACE;
3139 auio.uio_offset = 0;
3140 if ((error = copyin((caddr_t)uap->iovp, (caddr_t)iov, iovlen)))
3143 for (i = 0; i < uap->iovcnt; i++) {
3144 if (iov->iov_len > INT_MAX - auio.uio_resid) {
3148 auio.uio_resid += iov->iov_len;
3151 cnt = auio.uio_resid;
3152 error = VOP_SETEXTATTR(nd.ni_vp, attrname, &auio, p->p_ucred, td);
3153 cnt -= auio.uio_resid;
3154 p->p_retval[0] = cnt;
3157 FREE(needfree, M_IOV);
3163 * Syscall to get a named extended attribute on a file or directory.
3164 * Accepts attribute name, and a uio structure pointing to a buffer for the
3165 * data. The uio is consumed in the style of readv(). The real work
3166 * happens in VOP_GETEXTATTR();
3169 extattr_get_file(struct extattr_get_file_args *uap)
3171 struct thread *td = curthread;
3172 struct proc *p = td->td_proc;
3173 struct nameidata nd;
3175 struct iovec *iov, *needfree, aiov[UIO_SMALLIOV];
3176 char attrname[EXTATTR_MAXNAMELEN];
3180 error = copyin(SCARG(uap, attrname), attrname, EXTATTR_MAXNAMELEN);
3183 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
3184 SCARG(uap, path), td);
3185 if ((error = namei(&nd)) != 0)
3187 iovlen = uap->iovcnt * sizeof (struct iovec);
3188 if (uap->iovcnt > UIO_SMALLIOV) {
3189 if (uap->iovcnt > UIO_MAXIOV) {
3193 MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
3200 auio.uio_iovcnt = uap->iovcnt;
3201 auio.uio_rw = UIO_READ;
3202 auio.uio_segflg = UIO_USERSPACE;
3204 auio.uio_offset = 0;
3205 if ((error = copyin((caddr_t)uap->iovp, (caddr_t)iov, iovlen)))
3208 for (i = 0; i < uap->iovcnt; i++) {
3209 if (iov->iov_len > INT_MAX - auio.uio_resid) {
3213 auio.uio_resid += iov->iov_len;
3216 cnt = auio.uio_resid;
3217 error = VOP_GETEXTATTR(nd.ni_vp, attrname, &auio, p->p_ucred, td);
3218 cnt -= auio.uio_resid;
3219 p->p_retval[0] = cnt;
3222 FREE(needfree, M_IOV);
3228 * Syscall to delete a named extended attribute from a file or directory.
3229 * Accepts attribute name. The real work happens in VOP_SETEXTATTR().
3232 extattr_delete_file(struct extattr_delete_file_args *uap)
3234 struct thread *td = curthread;
3235 struct proc *p = td->td_proc;
3236 struct nameidata nd;
3237 char attrname[EXTATTR_MAXNAMELEN];
3240 error = copyin(SCARG(uap, attrname), attrname, EXTATTR_MAXNAMELEN);
3243 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
3244 SCARG(uap, path), td);
3245 if ((error = namei(&nd)) != 0)
3247 error = VOP_SETEXTATTR(nd.ni_vp, attrname, NULL, p->p_ucred, td);