2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94
39 * $FreeBSD: src/sys/kern/vfs_syscalls.c,v 1.151.2.18 2003/04/04 20:35:58 tegge Exp $
40 * $DragonFly: src/sys/kern/vfs_syscalls.c,v 1.5 2003/06/25 05:22:32 dillon Exp $
43 /* For 4.3 integer FS ID compatibility */
44 #include "opt_compat.h"
46 #include <sys/param.h>
47 #include <sys/systm.h>
49 #include <sys/sysent.h>
50 #include <sys/malloc.h>
51 #include <sys/mount.h>
52 #include <sys/sysproto.h>
53 #include <sys/filedesc.h>
54 #include <sys/kernel.h>
55 #include <sys/fcntl.h>
57 #include <sys/linker.h>
59 #include <sys/unistd.h>
60 #include <sys/vnode.h>
62 #include <sys/namei.h>
63 #include <sys/dirent.h>
64 #include <sys/extattr.h>
66 #include <machine/limits.h>
67 #include <miscfs/union/union.h>
68 #include <sys/sysctl.h>
70 #include <vm/vm_object.h>
71 #include <vm/vm_zone.h>
72 #include <vm/vm_page.h>
74 #include <sys/file2.h>
76 static int change_dir __P((struct nameidata *ndp, struct thread *td));
77 static void checkdirs __P((struct vnode *olddp));
78 static int chroot_refuse_vdir_fds __P((struct filedesc *fdp));
79 static int getutimes __P((const struct timeval *, struct timespec *));
80 static int setfown __P((struct vnode *, uid_t, gid_t));
81 static int setfmode __P((struct vnode *, int));
82 static int setfflags __P((struct vnode *, int));
83 static int setutimes __P((struct vnode *, const struct timespec *, int));
84 static int usermount = 0; /* if 1, non-root can mount fs. */
86 int (*union_dircheckp) __P((struct thread *, struct vnode **, struct file *));
88 SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0, "");
91 * Virtual File System System Calls
95 * Mount a file system.
97 #ifndef _SYS_SYSPROTO_H_
106 * mount_args(char *type, char *path, int flags, caddr_t data)
110 mount(struct mount_args *uap)
112 struct thread *td = curthread;
113 struct proc *p = td->td_proc;
116 struct vfsconf *vfsp;
117 int error, flag = 0, flag2 = 0;
123 char fstypename[MFSNAMELEN];
125 if (usermount == 0 && (error = suser(td)))
128 * Do not allow NFS export by non-root users.
130 if (SCARG(uap, flags) & MNT_EXPORTED) {
136 * Silently enforce MNT_NOSUID and MNT_NODEV for non-root users
139 SCARG(uap, flags) |= MNT_NOSUID | MNT_NODEV;
141 * Get vnode to be covered
143 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
144 SCARG(uap, path), td);
145 if ((error = namei(&nd)) != 0)
147 NDFREE(&nd, NDF_ONLY_PNBUF);
149 if (SCARG(uap, flags) & MNT_UPDATE) {
150 if ((vp->v_flag & VROOT) == 0) {
156 flag2 = mp->mnt_kern_flag;
158 * We only allow the filesystem to be reloaded if it
159 * is currently mounted read-only.
161 if ((SCARG(uap, flags) & MNT_RELOAD) &&
162 ((mp->mnt_flag & MNT_RDONLY) == 0)) {
164 return (EOPNOTSUPP); /* Needs translation */
167 * Only root, or the user that did the original mount is
168 * permitted to update it.
170 if (mp->mnt_stat.f_owner != p->p_ucred->cr_uid &&
171 (error = suser(td))) {
175 if (vfs_busy(mp, LK_NOWAIT, 0, td)) {
179 simple_lock(&vp->v_interlock);
180 if ((vp->v_flag & VMOUNT) != 0 ||
181 vp->v_mountedhere != NULL) {
182 simple_unlock(&vp->v_interlock);
187 vp->v_flag |= VMOUNT;
188 simple_unlock(&vp->v_interlock);
190 SCARG(uap, flags) & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE);
191 VOP_UNLOCK(vp, 0, td);
195 * If the user is not root, ensure that they own the directory
196 * onto which we are attempting to mount.
198 if ((error = VOP_GETATTR(vp, &va, p->p_ucred, td)) ||
199 (va.va_uid != p->p_ucred->cr_uid &&
200 (error = suser(td)))) {
204 if ((error = vinvalbuf(vp, V_SAVE, p->p_ucred, td, 0, 0)) != 0) {
208 if (vp->v_type != VDIR) {
214 * Historically filesystem types were identified by number. If we
215 * get an integer for the filesystem type instead of a string, we
216 * check to see if it matches one of the historic filesystem types.
218 fstypenum = (uintptr_t)SCARG(uap, type);
219 if (fstypenum < maxvfsconf) {
220 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
221 if (vfsp->vfc_typenum == fstypenum)
227 strncpy(fstypename, vfsp->vfc_name, MFSNAMELEN);
229 #endif /* COMPAT_43 */
230 if ((error = copyinstr(SCARG(uap, type), fstypename, MFSNAMELEN, NULL)) != 0) {
234 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
235 if (!strcmp(vfsp->vfc_name, fstypename))
240 /* Only load modules for root (very important!) */
241 if ((error = suser(td)) != 0) {
245 error = linker_load_file(fstypename, &lf);
246 if (error || lf == NULL) {
253 /* lookup again, see if the VFS was loaded */
254 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
255 if (!strcmp(vfsp->vfc_name, fstypename))
259 linker_file_unload(lf);
264 simple_lock(&vp->v_interlock);
265 if ((vp->v_flag & VMOUNT) != 0 ||
266 vp->v_mountedhere != NULL) {
267 simple_unlock(&vp->v_interlock);
271 vp->v_flag |= VMOUNT;
272 simple_unlock(&vp->v_interlock);
275 * Allocate and initialize the filesystem.
277 mp = malloc(sizeof(struct mount), M_MOUNT, M_WAITOK);
278 bzero((char *)mp, (u_long)sizeof(struct mount));
279 TAILQ_INIT(&mp->mnt_nvnodelist);
280 TAILQ_INIT(&mp->mnt_reservedvnlist);
281 mp->mnt_nvnodelistsize = 0;
282 lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE);
283 (void)vfs_busy(mp, LK_NOWAIT, 0, td);
284 mp->mnt_op = vfsp->vfc_vfsops;
286 vfsp->vfc_refcount++;
287 mp->mnt_stat.f_type = vfsp->vfc_typenum;
288 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
289 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
290 mp->mnt_vnodecovered = vp;
291 mp->mnt_stat.f_owner = p->p_ucred->cr_uid;
292 mp->mnt_iosize_max = DFLTPHYS;
293 VOP_UNLOCK(vp, 0, td);
296 * Set the mount level flags.
298 if (SCARG(uap, flags) & MNT_RDONLY)
299 mp->mnt_flag |= MNT_RDONLY;
300 else if (mp->mnt_flag & MNT_RDONLY)
301 mp->mnt_kern_flag |= MNTK_WANTRDWR;
302 mp->mnt_flag &=~ (MNT_NOSUID | MNT_NOEXEC | MNT_NODEV |
303 MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOATIME |
304 MNT_NOSYMFOLLOW | MNT_IGNORE |
305 MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR);
306 mp->mnt_flag |= SCARG(uap, flags) & (MNT_NOSUID | MNT_NOEXEC |
307 MNT_NODEV | MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_FORCE |
308 MNT_NOSYMFOLLOW | MNT_IGNORE |
309 MNT_NOATIME | MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR);
311 * Mount the filesystem.
312 * XXX The final recipients of VFS_MOUNT just overwrite the ndp they
313 * get. No freeing of cn_pnbuf.
315 error = VFS_MOUNT(mp, SCARG(uap, path), SCARG(uap, data), &nd, td);
316 if (mp->mnt_flag & MNT_UPDATE) {
317 if (mp->mnt_kern_flag & MNTK_WANTRDWR)
318 mp->mnt_flag &= ~MNT_RDONLY;
319 mp->mnt_flag &=~ (MNT_UPDATE | MNT_RELOAD | MNT_FORCE);
320 mp->mnt_kern_flag &=~ MNTK_WANTRDWR;
323 mp->mnt_kern_flag = flag2;
325 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
326 if (mp->mnt_syncer == NULL)
327 error = vfs_allocate_syncvnode(mp);
329 if (mp->mnt_syncer != NULL)
330 vrele(mp->mnt_syncer);
331 mp->mnt_syncer = NULL;
334 simple_lock(&vp->v_interlock);
335 vp->v_flag &= ~VMOUNT;
336 simple_unlock(&vp->v_interlock);
340 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
342 * Put the new filesystem on the mount list after root.
346 simple_lock(&vp->v_interlock);
347 vp->v_flag &= ~VMOUNT;
348 vp->v_mountedhere = mp;
349 simple_unlock(&vp->v_interlock);
350 simple_lock(&mountlist_slock);
351 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
352 simple_unlock(&mountlist_slock);
354 VOP_UNLOCK(vp, 0, td);
355 if ((mp->mnt_flag & MNT_RDONLY) == 0)
356 error = vfs_allocate_syncvnode(mp);
358 if ((error = VFS_START(mp, 0, td)) != 0)
361 simple_lock(&vp->v_interlock);
362 vp->v_flag &= ~VMOUNT;
363 simple_unlock(&vp->v_interlock);
364 mp->mnt_vfc->vfc_refcount--;
366 free((caddr_t)mp, M_MOUNT);
373 * Scan all active processes to see if any of them have a current
374 * or root directory onto which the new filesystem has just been
375 * mounted. If so, replace them with the new mount point.
378 checkdirs(struct vnode *olddp)
380 struct filedesc *fdp;
384 if (olddp->v_usecount == 1)
386 if (VFS_ROOT(olddp->v_mountedhere, &newdp))
387 panic("mount: lost mount");
388 LIST_FOREACH(p, &allproc, p_list) {
390 if (fdp->fd_cdir == olddp) {
393 fdp->fd_cdir = newdp;
395 if (fdp->fd_rdir == olddp) {
398 fdp->fd_rdir = newdp;
401 if (rootvnode == olddp) {
410 * Unmount a file system.
412 * Note: unmount takes a path to the vnode mounted on as argument,
413 * not special file (as before).
415 #ifndef _SYS_SYSPROTO_H_
416 struct unmount_args {
422 * umount_args(char *path, int flags)
426 unmount(struct unmount_args *uap)
428 struct thread *td = curthread;
429 struct proc *p = td->td_proc;
436 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
437 SCARG(uap, path), td);
438 if ((error = namei(&nd)) != 0)
441 NDFREE(&nd, NDF_ONLY_PNBUF);
445 * Only root, or the user that did the original mount is
446 * permitted to unmount this filesystem.
448 if ((mp->mnt_stat.f_owner != p->p_ucred->cr_uid) &&
449 (error = suser(td))) {
455 * Don't allow unmounting the root file system.
457 if (mp->mnt_flag & MNT_ROOTFS) {
463 * Must be the root of the filesystem
465 if ((vp->v_flag & VROOT) == 0) {
470 return (dounmount(mp, SCARG(uap, flags), td));
474 * Do the actual file system unmount.
477 dounmount(struct mount *mp, int flags, struct thread *td)
479 struct vnode *coveredvp;
482 struct proc *p = td->td_proc;
486 simple_lock(&mountlist_slock);
487 if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
488 simple_unlock(&mountlist_slock);
491 mp->mnt_kern_flag |= MNTK_UNMOUNT;
492 /* Allow filesystems to detect that a forced unmount is in progress. */
493 if (flags & MNT_FORCE)
494 mp->mnt_kern_flag |= MNTK_UNMOUNTF;
495 error = lockmgr(&mp->mnt_lock, LK_DRAIN | LK_INTERLOCK |
496 ((flags & MNT_FORCE) ? 0 : LK_NOWAIT), &mountlist_slock, td);
498 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF);
499 if (mp->mnt_kern_flag & MNTK_MWAIT)
504 if (mp->mnt_flag & MNT_EXPUBLIC)
505 vfs_setpublicfs(NULL, NULL, NULL);
507 vfs_msync(mp, MNT_WAIT);
508 async_flag = mp->mnt_flag & MNT_ASYNC;
509 mp->mnt_flag &=~ MNT_ASYNC;
510 cache_purgevfs(mp); /* remove cache entries for this file sys */
511 if (mp->mnt_syncer != NULL)
512 vrele(mp->mnt_syncer);
513 if (((mp->mnt_flag & MNT_RDONLY) ||
514 (error = VFS_SYNC(mp, MNT_WAIT, p->p_ucred, td)) == 0) ||
516 error = VFS_UNMOUNT(mp, flags, td);
517 simple_lock(&mountlist_slock);
519 if ((mp->mnt_flag & MNT_RDONLY) == 0 && mp->mnt_syncer == NULL)
520 (void) vfs_allocate_syncvnode(mp);
521 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF);
522 mp->mnt_flag |= async_flag;
523 lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK | LK_REENABLE,
524 &mountlist_slock, td);
525 if (mp->mnt_kern_flag & MNTK_MWAIT)
529 TAILQ_REMOVE(&mountlist, mp, mnt_list);
530 if ((coveredvp = mp->mnt_vnodecovered) != NULLVP) {
531 coveredvp->v_mountedhere = (struct mount *)0;
534 mp->mnt_vfc->vfc_refcount--;
535 if (!TAILQ_EMPTY(&mp->mnt_nvnodelist))
536 panic("unmount: dangling vnode");
537 lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &mountlist_slock, td);
538 if (mp->mnt_kern_flag & MNTK_MWAIT)
540 free((caddr_t)mp, M_MOUNT);
545 * Sync each mounted filesystem.
547 #ifndef _SYS_SYSPROTO_H_
554 static int syncprt = 0;
555 SYSCTL_INT(_debug, OID_AUTO, syncprt, CTLFLAG_RW, &syncprt, 0, "");
560 sync(struct sync_args *uap)
562 struct thread *td = curthread;
563 struct proc *p = td->td_proc;
564 struct mount *mp, *nmp;
567 simple_lock(&mountlist_slock);
568 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
569 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, td)) {
570 nmp = TAILQ_NEXT(mp, mnt_list);
573 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
574 asyncflag = mp->mnt_flag & MNT_ASYNC;
575 mp->mnt_flag &= ~MNT_ASYNC;
576 vfs_msync(mp, MNT_NOWAIT);
577 VFS_SYNC(mp, MNT_NOWAIT,
578 ((p != NULL) ? p->p_ucred : NOCRED), td);
579 mp->mnt_flag |= asyncflag;
581 simple_lock(&mountlist_slock);
582 nmp = TAILQ_NEXT(mp, mnt_list);
585 simple_unlock(&mountlist_slock);
588 * XXX don't call vfs_bufstats() yet because that routine
589 * was not imported in the Lite2 merge.
594 #endif /* DIAGNOSTIC */
599 /* XXX PRISON: could be per prison flag */
600 static int prison_quotas;
602 SYSCTL_INT(_kern_prison, OID_AUTO, quotas, CTLFLAG_RW, &prison_quotas, 0, "");
606 * quotactl_args(char *path, int fcmd, int uid, caddr_t arg)
608 * Change filesystem quotas.
612 quotactl(struct quotactl_args *uap)
614 struct thread *td = curthread;
615 struct proc *p = td->td_proc;
621 if (p->p_ucred->cr_prison && !prison_quotas)
623 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
624 if ((error = namei(&nd)) != 0)
626 mp = nd.ni_vp->v_mount;
627 NDFREE(&nd, NDF_ONLY_PNBUF);
629 return (VFS_QUOTACTL(mp, SCARG(uap, cmd), SCARG(uap, uid),
630 SCARG(uap, arg), td));
634 * statfs_args(char *path, struct statfs *buf)
636 * Get filesystem statistics.
640 statfs(struct statfs_args *uap)
642 struct thread *td = curthread;
649 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
650 if ((error = namei(&nd)) != 0)
652 mp = nd.ni_vp->v_mount;
654 NDFREE(&nd, NDF_ONLY_PNBUF);
656 error = VFS_STATFS(mp, sp, td);
659 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
661 bcopy((caddr_t)sp, (caddr_t)&sb, sizeof(sb));
662 sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0;
665 return (copyout((caddr_t)sp, (caddr_t)SCARG(uap, buf), sizeof(*sp)));
669 * fstatfs_args(int fd, struct statfs *buf)
671 * Get filesystem statistics.
675 fstatfs(struct fstatfs_args *uap)
677 struct thread *td = curthread;
678 struct proc *p = td->td_proc;
681 register struct statfs *sp;
686 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
688 mp = ((struct vnode *)fp->f_data)->v_mount;
692 error = VFS_STATFS(mp, sp, td);
695 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
697 bcopy((caddr_t)sp, (caddr_t)&sb, sizeof(sb));
698 sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0;
701 return (copyout((caddr_t)sp, (caddr_t)SCARG(uap, buf), sizeof(*sp)));
705 * getfsstat_args(struct statfs *buf, long bufsize, int flags)
707 * Get statistics on all filesystems.
711 getfsstat(struct getfsstat_args *uap)
713 struct thread *td = curthread;
714 struct proc *p = td->td_proc;
715 struct mount *mp, *nmp;
718 long count, maxcount, error;
720 maxcount = SCARG(uap, bufsize) / sizeof(struct statfs);
721 sfsp = (caddr_t)SCARG(uap, buf);
723 simple_lock(&mountlist_slock);
724 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
725 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, td)) {
726 nmp = TAILQ_NEXT(mp, mnt_list);
729 if (sfsp && count < maxcount) {
732 * If MNT_NOWAIT or MNT_LAZY is specified, do not
733 * refresh the fsstat cache. MNT_NOWAIT or MNT_LAZY
734 * overrides MNT_WAIT.
736 if (((SCARG(uap, flags) & (MNT_LAZY|MNT_NOWAIT)) == 0 ||
737 (SCARG(uap, flags) & MNT_WAIT)) &&
738 (error = VFS_STATFS(mp, sp, td))) {
739 simple_lock(&mountlist_slock);
740 nmp = TAILQ_NEXT(mp, mnt_list);
744 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
745 error = copyout((caddr_t)sp, sfsp, sizeof(*sp));
753 simple_lock(&mountlist_slock);
754 nmp = TAILQ_NEXT(mp, mnt_list);
757 simple_unlock(&mountlist_slock);
758 if (sfsp && count > maxcount)
759 p->p_retval[0] = maxcount;
761 p->p_retval[0] = count;
766 * fchdir_args(int fd)
768 * Change current working directory to a given file descriptor.
772 fchdir(struct fchdir_args *uap)
774 struct thread *td = curthread;
775 struct proc *p = td->td_proc;
776 struct filedesc *fdp = p->p_fd;
777 struct vnode *vp, *tdp;
782 if ((error = getvnode(fdp, SCARG(uap, fd), &fp)) != 0)
784 vp = (struct vnode *)fp->f_data;
786 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
787 if (vp->v_type != VDIR)
790 error = VOP_ACCESS(vp, VEXEC, p->p_ucred, td);
791 while (!error && (mp = vp->v_mountedhere) != NULL) {
792 if (vfs_busy(mp, 0, 0, td))
794 error = VFS_ROOT(mp, &tdp);
805 VOP_UNLOCK(vp, 0, td);
812 * chdir_args(char *path)
814 * Change current working directory (``.'').
818 chdir(struct chdir_args *uap)
820 struct thread *td = curthread;
821 struct proc *p = td->td_proc;
822 struct filedesc *fdp = p->p_fd;
826 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
827 SCARG(uap, path), td);
828 if ((error = change_dir(&nd, td)) != 0)
830 NDFREE(&nd, NDF_ONLY_PNBUF);
832 fdp->fd_cdir = nd.ni_vp;
837 * Helper function for raised chroot(2) security function: Refuse if
838 * any filedescriptors are open directories.
841 chroot_refuse_vdir_fds(fdp)
842 struct filedesc *fdp;
849 for (fd = 0; fd < fdp->fd_nfiles ; fd++) {
850 error = getvnode(fdp, fd, &fp);
853 vp = (struct vnode *)fp->f_data;
854 if (vp->v_type != VDIR)
862 * This sysctl determines if we will allow a process to chroot(2) if it
863 * has a directory open:
864 * 0: disallowed for all processes.
865 * 1: allowed for processes that were not already chroot(2)'ed.
866 * 2: allowed for all processes.
869 static int chroot_allow_open_directories = 1;
871 SYSCTL_INT(_kern, OID_AUTO, chroot_allow_open_directories, CTLFLAG_RW,
872 &chroot_allow_open_directories, 0, "");
875 * chroot_args(char *path)
877 * Change notion of root (``/'') directory.
881 chroot(struct chroot_args *uap)
883 struct thread *td = curthread;
884 struct proc *p = td->td_proc;
885 struct filedesc *fdp = p->p_fd;
890 error = suser_cred(p->p_ucred, PRISON_ROOT);
893 if (chroot_allow_open_directories == 0 ||
894 (chroot_allow_open_directories == 1 && fdp->fd_rdir != rootvnode))
895 error = chroot_refuse_vdir_fds(fdp);
898 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
899 SCARG(uap, path), td);
900 if ((error = change_dir(&nd, td)) != 0)
902 NDFREE(&nd, NDF_ONLY_PNBUF);
904 fdp->fd_rdir = nd.ni_vp;
906 fdp->fd_jdir = nd.ni_vp;
913 * Common routine for chroot and chdir.
916 change_dir(struct nameidata *ndp, struct thread *td)
925 if (vp->v_type != VDIR)
928 error = VOP_ACCESS(vp, VEXEC, ndp->ni_cnd.cn_cred, td);
932 VOP_UNLOCK(vp, 0, td);
937 * open_args(char *path, int flags, int mode)
939 * Check permissions, allocate an open file structure,
940 * and call the device open routine if any.
943 open(struct open_args *uap)
945 struct thread *td = curthread;
946 struct proc *p = td->td_proc;
947 struct filedesc *fdp = p->p_fd;
950 int cmode, flags, oflags;
952 int type, indx, error;
956 oflags = SCARG(uap, flags);
957 if ((oflags & O_ACCMODE) == O_ACCMODE)
959 flags = FFLAGS(oflags);
960 error = falloc(p, &nfp, &indx);
964 cmode = ((SCARG(uap, mode) &~ fdp->fd_cmask) & ALLPERMS) &~ S_ISTXT;
965 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
966 p->p_dupfd = -indx - 1; /* XXX check for fdopen */
968 * Bump the ref count to prevent another process from closing
969 * the descriptor while we are blocked in vn_open()
972 error = vn_open(&nd, flags, cmode);
975 * release our own reference
980 * handle special fdopen() case. bleh. dupfdopen() is
981 * responsible for dropping the old contents of ofiles[indx]
984 if ((error == ENODEV || error == ENXIO) &&
985 p->p_dupfd >= 0 && /* XXX from fdopen */
987 dupfdopen(fdp, indx, p->p_dupfd, flags, error)) == 0) {
988 p->p_retval[0] = indx;
992 * Clean up the descriptor, but only if another thread hadn't
993 * replaced or closed it.
995 if (fdp->fd_ofiles[indx] == fp) {
996 fdp->fd_ofiles[indx] = NULL;
1000 if (error == ERESTART)
1005 NDFREE(&nd, NDF_ONLY_PNBUF);
1009 * There should be 2 references on the file, one from the descriptor
1010 * table, and one for us.
1012 * Handle the case where someone closed the file (via its file
1013 * descriptor) while we were blocked. The end result should look
1014 * like opening the file succeeded but it was immediately closed.
1016 if (fp->f_count == 1) {
1017 KASSERT(fdp->fd_ofiles[indx] != fp,
1018 ("Open file descriptor lost all refs"));
1019 VOP_UNLOCK(vp, 0, td);
1020 vn_close(vp, flags & FMASK, fp->f_cred, td);
1022 p->p_retval[0] = indx;
1026 fp->f_data = (caddr_t)vp;
1027 fp->f_flag = flags & FMASK;
1029 fp->f_type = (vp->v_type == VFIFO ? DTYPE_FIFO : DTYPE_VNODE);
1030 if (flags & (O_EXLOCK | O_SHLOCK)) {
1031 lf.l_whence = SEEK_SET;
1034 if (flags & O_EXLOCK)
1035 lf.l_type = F_WRLCK;
1037 lf.l_type = F_RDLCK;
1039 if ((flags & FNONBLOCK) == 0)
1041 VOP_UNLOCK(vp, 0, td);
1042 if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0) {
1044 * lock request failed. Normally close the descriptor
1045 * but handle the case where someone might have dup()d
1046 * it when we weren't looking. One reference is
1047 * owned by the descriptor array, the other by us.
1049 if (fdp->fd_ofiles[indx] == fp) {
1050 fdp->fd_ofiles[indx] = NULL;
1056 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1057 fp->f_flag |= FHASLOCK;
1059 /* assert that vn_open created a backing object if one is needed */
1060 KASSERT(!vn_canvmio(vp) || VOP_GETVOBJECT(vp, NULL) == 0,
1061 ("open: vmio vnode has no backing object after vn_open"));
1062 VOP_UNLOCK(vp, 0, td);
1065 * release our private reference, leaving the one associated with the
1066 * descriptor table intact.
1069 p->p_retval[0] = indx;
1075 * ocreat(char *path, int mode)
1080 ocreat(struct ocreat_args *uap)
1082 struct open_args /* {
1083 syscallarg(char *) path;
1084 syscallarg(int) flags;
1085 syscallarg(int) mode;
1088 SCARG(&nuap, path) = SCARG(uap, path);
1089 SCARG(&nuap, mode) = SCARG(uap, mode);
1090 SCARG(&nuap, flags) = O_WRONLY | O_CREAT | O_TRUNC;
1091 return (open(&nuap));
1093 #endif /* COMPAT_43 */
1096 * mknod_args(char *path, int mode, int dev)
1098 * Create a special file.
1102 mknod(struct mknod_args *uap)
1104 struct thread *td = curthread;
1105 struct proc *p = td->td_proc;
1110 struct nameidata nd;
1114 switch (SCARG(uap, mode) & S_IFMT) {
1120 error = suser_cred(p->p_ucred, PRISON_ROOT);
1126 NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), td);
1127 if ((error = namei(&nd)) != 0)
1134 vattr.va_mode = (SCARG(uap, mode) & ALLPERMS) &~ p->p_fd->fd_cmask;
1135 vattr.va_rdev = SCARG(uap, dev);
1138 switch (SCARG(uap, mode) & S_IFMT) {
1139 case S_IFMT: /* used by badsect to flag bad sectors */
1140 vattr.va_type = VBAD;
1143 vattr.va_type = VCHR;
1146 vattr.va_type = VBLK;
1157 VOP_LEASE(nd.ni_dvp, td, p->p_ucred, LEASE_WRITE);
1159 error = VOP_WHITEOUT(nd.ni_dvp, &nd.ni_cnd, CREATE);
1161 error = VOP_MKNOD(nd.ni_dvp, &nd.ni_vp,
1162 &nd.ni_cnd, &vattr);
1166 NDFREE(&nd, NDF_ONLY_PNBUF);
1169 NDFREE(&nd, NDF_ONLY_PNBUF);
1170 if (nd.ni_dvp == vp)
1177 ASSERT_VOP_UNLOCKED(nd.ni_dvp, "mknod");
1178 ASSERT_VOP_UNLOCKED(nd.ni_vp, "mknod");
1183 * mkfifo_args(char *path, int mode)
1185 * Create a named pipe.
1189 mkfifo(struct mkfifo_args *uap)
1191 struct thread *td = curthread;
1192 struct proc *p = td->td_proc;
1195 struct nameidata nd;
1198 NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), td);
1199 if ((error = namei(&nd)) != 0)
1201 if (nd.ni_vp != NULL) {
1202 NDFREE(&nd, NDF_ONLY_PNBUF);
1203 if (nd.ni_dvp == nd.ni_vp)
1211 vattr.va_type = VFIFO;
1212 vattr.va_mode = (SCARG(uap, mode) & ALLPERMS) &~ p->p_fd->fd_cmask;
1213 VOP_LEASE(nd.ni_dvp, td, p->p_ucred, LEASE_WRITE);
1214 error = VOP_MKNOD(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
1217 NDFREE(&nd, NDF_ONLY_PNBUF);
1223 * link_args(char *path, char *link)
1225 * Make a hard file link.
1229 link(struct link_args *uap)
1231 struct thread *td = curthread;
1232 struct proc *p = td->td_proc;
1234 struct nameidata nd;
1238 NDINIT(&nd, LOOKUP, FOLLOW|NOOBJ, UIO_USERSPACE, SCARG(uap, path), td);
1239 if ((error = namei(&nd)) != 0)
1241 NDFREE(&nd, NDF_ONLY_PNBUF);
1243 if (vp->v_type == VDIR)
1244 error = EPERM; /* POSIX */
1246 NDINIT(&nd, CREATE, LOCKPARENT|NOOBJ, UIO_USERSPACE, SCARG(uap, link), td);
1249 if (nd.ni_vp != NULL) {
1254 VOP_LEASE(nd.ni_dvp, td, p->p_ucred,
1256 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
1257 error = VOP_LINK(nd.ni_dvp, vp, &nd.ni_cnd);
1259 NDFREE(&nd, NDF_ONLY_PNBUF);
1260 if (nd.ni_dvp == nd.ni_vp)
1267 ASSERT_VOP_UNLOCKED(nd.ni_dvp, "link");
1268 ASSERT_VOP_UNLOCKED(nd.ni_vp, "link");
1273 * symlink(char *path, char *link)
1275 * Make a symbolic link.
1279 symlink(struct symlink_args *uap)
1281 struct thread *td = curthread;
1282 struct proc *p = td->td_proc;
1286 struct nameidata nd;
1288 path = zalloc(namei_zone);
1289 if ((error = copyinstr(SCARG(uap, path), path, MAXPATHLEN, NULL)) != 0)
1292 NDINIT(&nd, CREATE, LOCKPARENT|NOOBJ, UIO_USERSPACE, SCARG(uap, link), td);
1293 if ((error = namei(&nd)) != 0)
1296 NDFREE(&nd, NDF_ONLY_PNBUF);
1297 if (nd.ni_dvp == nd.ni_vp)
1306 vattr.va_mode = ACCESSPERMS &~ p->p_fd->fd_cmask;
1307 VOP_LEASE(nd.ni_dvp, td, p->p_ucred, LEASE_WRITE);
1308 error = VOP_SYMLINK(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr, path);
1309 NDFREE(&nd, NDF_ONLY_PNBUF);
1313 ASSERT_VOP_UNLOCKED(nd.ni_dvp, "symlink");
1314 ASSERT_VOP_UNLOCKED(nd.ni_vp, "symlink");
1316 zfree(namei_zone, path);
1321 * undelete_args(char *path)
1323 * Delete a whiteout from the filesystem.
1327 undelete(struct undelete_args *uap)
1329 struct thread *td = curthread;
1330 struct proc *p = td->td_proc;
1332 struct nameidata nd;
1335 NDINIT(&nd, DELETE, LOCKPARENT|DOWHITEOUT, UIO_USERSPACE,
1336 SCARG(uap, path), td);
1341 if (nd.ni_vp != NULLVP || !(nd.ni_cnd.cn_flags & ISWHITEOUT)) {
1342 NDFREE(&nd, NDF_ONLY_PNBUF);
1343 if (nd.ni_dvp == nd.ni_vp)
1352 VOP_LEASE(nd.ni_dvp, td, p->p_ucred, LEASE_WRITE);
1353 error = VOP_WHITEOUT(nd.ni_dvp, &nd.ni_cnd, DELETE);
1354 NDFREE(&nd, NDF_ONLY_PNBUF);
1356 ASSERT_VOP_UNLOCKED(nd.ni_dvp, "undelete");
1357 ASSERT_VOP_UNLOCKED(nd.ni_vp, "undelete");
1362 * unlink_args(char *path)
1364 * Delete a name from the filesystem.
1367 unlink(struct unlink_args *uap)
1369 struct thread *td = curthread;
1370 struct proc *p = td->td_proc;
1373 struct nameidata nd;
1376 NDINIT(&nd, DELETE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), td);
1377 if ((error = namei(&nd)) != 0)
1380 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
1381 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1383 if (vp->v_type == VDIR)
1384 error = EPERM; /* POSIX */
1387 * The root of a mounted filesystem cannot be deleted.
1389 * XXX: can this only be a VDIR case?
1391 if (vp->v_flag & VROOT)
1396 VOP_LEASE(nd.ni_dvp, td, p->p_ucred, LEASE_WRITE);
1397 error = VOP_REMOVE(nd.ni_dvp, vp, &nd.ni_cnd);
1399 NDFREE(&nd, NDF_ONLY_PNBUF);
1400 if (nd.ni_dvp == vp)
1406 ASSERT_VOP_UNLOCKED(nd.ni_dvp, "unlink");
1407 ASSERT_VOP_UNLOCKED(nd.ni_vp, "unlink");
1412 * lseek_args(int fd, int pad, off_t offset, int whence)
1414 * Reposition read/write file offset.
1417 lseek(struct lseek_args *uap)
1419 struct thread *td = curthread;
1420 struct proc *p = td->td_proc;
1421 struct ucred *cred = p->p_ucred;
1422 struct filedesc *fdp = p->p_fd;
1427 if ((u_int)SCARG(uap, fd) >= fdp->fd_nfiles ||
1428 (fp = fdp->fd_ofiles[SCARG(uap, fd)]) == NULL)
1430 if (fp->f_type != DTYPE_VNODE)
1432 switch (SCARG(uap, whence)) {
1434 fp->f_offset += SCARG(uap, offset);
1437 error=VOP_GETATTR((struct vnode *)fp->f_data, &vattr, cred, td);
1440 fp->f_offset = SCARG(uap, offset) + vattr.va_size;
1443 fp->f_offset = SCARG(uap, offset);
1448 *(off_t *)(p->p_retval) = fp->f_offset;
1452 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
1454 * Reposition read/write file offset.
1456 * olseek_args(int fd, long offset, int whence)
1459 olseek(struct olseek_args *uap)
1461 struct lseek_args /* {
1463 syscallarg(int) pad;
1464 syscallarg(off_t) offset;
1465 syscallarg(int) whence;
1469 SCARG(&nuap, fd) = SCARG(uap, fd);
1470 SCARG(&nuap, offset) = SCARG(uap, offset);
1471 SCARG(&nuap, whence) = SCARG(uap, whence);
1472 error = lseek(&nuap);
1475 #endif /* COMPAT_43 */
1478 * access_args(char *path, int flags)
1480 * Check access permissions.
1483 access(struct access_args *uap)
1485 struct thread *td = curthread;
1486 struct proc *p = td->td_proc;
1487 struct ucred *cred, *tmpcred;
1490 struct nameidata nd;
1494 * Create and modify a temporary credential instead of one that
1495 * is potentially shared. This could also mess up socket
1496 * buffer accounting which can run in an interrupt context.
1498 tmpcred = crdup(cred);
1499 tmpcred->cr_uid = p->p_ucred->cr_ruid;
1500 tmpcred->cr_groups[0] = p->p_ucred->cr_rgid;
1501 p->p_ucred = tmpcred;
1502 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
1503 SCARG(uap, path), td);
1504 if ((error = namei(&nd)) != 0)
1508 /* Flags == 0 means only check for existence. */
1509 if (SCARG(uap, flags)) {
1511 if (SCARG(uap, flags) & R_OK)
1513 if (SCARG(uap, flags) & W_OK)
1515 if (SCARG(uap, flags) & X_OK)
1517 if ((flags & VWRITE) == 0 || (error = vn_writechk(vp)) == 0)
1518 error = VOP_ACCESS(vp, flags, tmpcred, td);
1520 NDFREE(&nd, NDF_ONLY_PNBUF);
1528 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
1530 * ostat_args(char *path, struct ostat *ub)
1532 * Get file status; this version follows links.
1536 ostat(struct ostat_args *uap)
1538 struct thread *td = curthread;
1542 struct nameidata nd;
1544 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
1545 SCARG(uap, path), td);
1546 if ((error = namei(&nd)) != 0)
1548 NDFREE(&nd, NDF_ONLY_PNBUF);
1549 error = vn_stat(nd.ni_vp, &sb, td);
1554 error = copyout((caddr_t)&osb, (caddr_t)SCARG(uap, ub), sizeof (osb));
1559 * olstat_args(char *path, struct ostat *ub)
1561 * Get file status; this version does not follow links.
1565 olstat(struct olstat_args *uap)
1567 struct thread *td = curthread;
1572 struct nameidata nd;
1574 NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
1575 SCARG(uap, path), td);
1576 if ((error = namei(&nd)) != 0)
1579 error = vn_stat(vp, &sb, td);
1580 NDFREE(&nd, NDF_ONLY_PNBUF);
1585 error = copyout((caddr_t)&osb, (caddr_t)SCARG(uap, ub), sizeof (osb));
1590 * Convert from an old to a new stat structure.
1597 ost->st_dev = st->st_dev;
1598 ost->st_ino = st->st_ino;
1599 ost->st_mode = st->st_mode;
1600 ost->st_nlink = st->st_nlink;
1601 ost->st_uid = st->st_uid;
1602 ost->st_gid = st->st_gid;
1603 ost->st_rdev = st->st_rdev;
1604 if (st->st_size < (quad_t)1 << 32)
1605 ost->st_size = st->st_size;
1608 ost->st_atime = st->st_atime;
1609 ost->st_mtime = st->st_mtime;
1610 ost->st_ctime = st->st_ctime;
1611 ost->st_blksize = st->st_blksize;
1612 ost->st_blocks = st->st_blocks;
1613 ost->st_flags = st->st_flags;
1614 ost->st_gen = st->st_gen;
1616 #endif /* COMPAT_43 || COMPAT_SUNOS */
1619 * stat_args(char *path, struct stat *ub)
1621 * Get file status; this version follows links.
1625 stat(struct stat_args *uap)
1627 struct thread *td = curthread;
1630 struct nameidata nd;
1632 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
1633 SCARG(uap, path), td);
1634 if ((error = namei(&nd)) != 0)
1636 error = vn_stat(nd.ni_vp, &sb, td);
1637 NDFREE(&nd, NDF_ONLY_PNBUF);
1641 error = copyout((caddr_t)&sb, (caddr_t)SCARG(uap, ub), sizeof (sb));
1646 * lstat_args(char *path, struct stat *ub)
1648 * Get file status; this version does not follow links.
1652 lstat(struct lstat_args *uap)
1654 struct thread *td = curthread;
1658 struct nameidata nd;
1660 NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
1661 SCARG(uap, path), td);
1662 if ((error = namei(&nd)) != 0)
1665 error = vn_stat(vp, &sb, td);
1666 NDFREE(&nd, NDF_ONLY_PNBUF);
1670 error = copyout((caddr_t)&sb, (caddr_t)SCARG(uap, ub), sizeof (sb));
1679 nsb->st_dev = sb->st_dev;
1680 nsb->st_ino = sb->st_ino;
1681 nsb->st_mode = sb->st_mode;
1682 nsb->st_nlink = sb->st_nlink;
1683 nsb->st_uid = sb->st_uid;
1684 nsb->st_gid = sb->st_gid;
1685 nsb->st_rdev = sb->st_rdev;
1686 nsb->st_atimespec = sb->st_atimespec;
1687 nsb->st_mtimespec = sb->st_mtimespec;
1688 nsb->st_ctimespec = sb->st_ctimespec;
1689 nsb->st_size = sb->st_size;
1690 nsb->st_blocks = sb->st_blocks;
1691 nsb->st_blksize = sb->st_blksize;
1692 nsb->st_flags = sb->st_flags;
1693 nsb->st_gen = sb->st_gen;
1694 nsb->st_qspare[0] = sb->st_qspare[0];
1695 nsb->st_qspare[1] = sb->st_qspare[1];
1699 * nstat_args(char *path, struct nstat *ub)
1703 nstat(struct nstat_args *uap)
1705 struct thread *td = curthread;
1709 struct nameidata nd;
1711 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
1712 SCARG(uap, path), td);
1713 if ((error = namei(&nd)) != 0)
1715 NDFREE(&nd, NDF_ONLY_PNBUF);
1716 error = vn_stat(nd.ni_vp, &sb, td);
1720 cvtnstat(&sb, &nsb);
1721 error = copyout((caddr_t)&nsb, (caddr_t)SCARG(uap, ub), sizeof (nsb));
1726 * lstat_args(char *path, struct stat *ub)
1728 * Get file status; this version does not follow links.
1732 nlstat(struct nlstat_args *uap)
1734 struct thread *td = curthread;
1739 struct nameidata nd;
1741 NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
1742 SCARG(uap, path), td);
1743 if ((error = namei(&nd)) != 0)
1746 NDFREE(&nd, NDF_ONLY_PNBUF);
1747 error = vn_stat(vp, &sb, td);
1751 cvtnstat(&sb, &nsb);
1752 error = copyout((caddr_t)&nsb, (caddr_t)SCARG(uap, ub), sizeof (nsb));
1757 * pathconf_Args(char *path, int name)
1759 * Get configurable pathname variables.
1763 pathconf(struct pathconf_args *uap)
1765 struct thread *td = curthread;
1766 struct proc *p = td->td_proc;
1768 struct nameidata nd;
1770 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
1771 SCARG(uap, path), td);
1772 if ((error = namei(&nd)) != 0)
1774 NDFREE(&nd, NDF_ONLY_PNBUF);
1775 error = VOP_PATHCONF(nd.ni_vp, SCARG(uap, name), p->p_retval);
1781 * readlink_args(char *path, char *buf, int count)
1783 * Return target name of a symbolic link.
1787 readlink(struct readlink_args *uap)
1789 struct thread *td = curthread;
1790 struct proc *p = td->td_proc;
1795 struct nameidata nd;
1797 NDINIT(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | NOOBJ, UIO_USERSPACE,
1798 SCARG(uap, path), td);
1799 if ((error = namei(&nd)) != 0)
1801 NDFREE(&nd, NDF_ONLY_PNBUF);
1803 if (vp->v_type != VLNK)
1806 aiov.iov_base = SCARG(uap, buf);
1807 aiov.iov_len = SCARG(uap, count);
1808 auio.uio_iov = &aiov;
1809 auio.uio_iovcnt = 1;
1810 auio.uio_offset = 0;
1811 auio.uio_rw = UIO_READ;
1812 auio.uio_segflg = UIO_USERSPACE;
1814 auio.uio_resid = SCARG(uap, count);
1815 error = VOP_READLINK(vp, &auio, p->p_ucred);
1818 p->p_retval[0] = SCARG(uap, count) - auio.uio_resid;
1823 setfflags(struct vnode *vp, int flags)
1825 struct thread *td = curthread;
1826 struct proc *p = td->td_proc;
1831 * Prevent non-root users from setting flags on devices. When
1832 * a device is reused, users can retain ownership of the device
1833 * if they are allowed to set flags and programs assume that
1834 * chown can't fail when done as root.
1836 if ((vp->v_type == VCHR || vp->v_type == VBLK) &&
1837 ((error = suser_cred(p->p_ucred, PRISON_ROOT)) != 0))
1840 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
1841 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1843 vattr.va_flags = flags;
1844 error = VOP_SETATTR(vp, &vattr, p->p_ucred, td);
1845 VOP_UNLOCK(vp, 0, td);
1850 * chflags(char *path, int flags)
1852 * Change flags of a file given a path name.
1856 chflags(struct chflags_args *uap)
1858 struct thread *td = curthread;
1860 struct nameidata nd;
1862 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
1863 if ((error = namei(&nd)) != 0)
1865 NDFREE(&nd, NDF_ONLY_PNBUF);
1866 error = setfflags(nd.ni_vp, SCARG(uap, flags));
1872 * fchflags_args(int fd, int flags)
1874 * Change flags of a file given a file descriptor.
1878 fchflags(struct fchflags_args *uap)
1880 struct thread *td = curthread;
1881 struct proc *p = td->td_proc;
1885 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
1887 return setfflags((struct vnode *) fp->f_data, SCARG(uap, flags));
1891 setfmode(struct vnode *vp, int mode)
1893 struct thread *td = curthread;
1894 struct proc *p = td->td_proc;
1898 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
1899 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1901 vattr.va_mode = mode & ALLPERMS;
1902 error = VOP_SETATTR(vp, &vattr, p->p_ucred, td);
1903 VOP_UNLOCK(vp, 0, td);
1908 * chmod_args(char *path, int mode)
1910 * Change mode of a file given path name.
1914 chmod(struct chmod_args *uap)
1916 struct thread *td = curthread;
1918 struct nameidata nd;
1920 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
1921 if ((error = namei(&nd)) != 0)
1923 NDFREE(&nd, NDF_ONLY_PNBUF);
1924 error = setfmode(nd.ni_vp, SCARG(uap, mode));
1930 * lchmod_args(char *path, int mode)
1932 * Change mode of a file given path name (don't follow links.)
1936 lchmod(struct lchmod_args *uap)
1938 struct thread *td = curthread;
1940 struct nameidata nd;
1942 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
1943 if ((error = namei(&nd)) != 0)
1945 NDFREE(&nd, NDF_ONLY_PNBUF);
1946 error = setfmode(nd.ni_vp, SCARG(uap, mode));
1952 * fchmod_args(int fd, int mode)
1954 * Change mode of a file given a file descriptor.
1958 fchmod(struct fchmod_args *uap)
1960 struct thread *td = curthread;
1961 struct proc *p = td->td_proc;
1965 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
1967 return setfmode((struct vnode *)fp->f_data, SCARG(uap, mode));
1971 setfown(struct vnode *vp, uid_t uid, gid_t gid)
1973 struct thread *td = curthread;
1974 struct proc *p = td->td_proc;
1978 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
1979 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1983 error = VOP_SETATTR(vp, &vattr, p->p_ucred, td);
1984 VOP_UNLOCK(vp, 0, td);
1989 * chown(char *path, int uid, int gid)
1991 * Set ownership given a path name.
1995 chown(struct chown_args *uap)
1997 struct thread *td = curthread;
1999 struct nameidata nd;
2001 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
2002 if ((error = namei(&nd)) != 0)
2004 NDFREE(&nd, NDF_ONLY_PNBUF);
2005 error = setfown(nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid));
2011 * lchown_args(char *path, int uid, int gid)
2013 * Set ownership given a path name, do not cross symlinks.
2017 lchown(struct lchown_args *uap)
2019 struct thread *td = curthread;
2021 struct nameidata nd;
2023 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
2024 if ((error = namei(&nd)) != 0)
2026 NDFREE(&nd, NDF_ONLY_PNBUF);
2027 error = setfown(nd.ni_vp, SCARG(uap, uid), SCARG(uap, gid));
2033 * fchown_args(int fd, int uid, int gid)
2035 * Set ownership given a file descriptor.
2039 fchown(struct fchown_args *uap)
2041 struct thread *td = curthread;
2042 struct proc *p = td->td_proc;
2046 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2048 return setfown((struct vnode *)fp->f_data,
2049 SCARG(uap, uid), SCARG(uap, gid));
2053 getutimes(const struct timeval *usrtvp, struct timespec *tsp)
2055 struct timeval tv[2];
2058 if (usrtvp == NULL) {
2060 TIMEVAL_TO_TIMESPEC(&tv[0], &tsp[0]);
2063 if ((error = copyin(usrtvp, tv, sizeof (tv))) != 0)
2065 TIMEVAL_TO_TIMESPEC(&tv[0], &tsp[0]);
2066 TIMEVAL_TO_TIMESPEC(&tv[1], &tsp[1]);
2072 setutimes(struct vnode *vp, const struct timespec *ts, int nullflag)
2074 struct thread *td = curthread;
2075 struct proc *p = td->td_proc;
2079 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
2080 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2082 vattr.va_atime = ts[0];
2083 vattr.va_mtime = ts[1];
2085 vattr.va_vaflags |= VA_UTIMES_NULL;
2086 error = VOP_SETATTR(vp, &vattr, p->p_ucred, td);
2087 VOP_UNLOCK(vp, 0, td);
2092 * utimes_args(char *path, struct timeval *tptr)
2094 * Set the access and modification times of a file.
2098 utimes(struct utimes_args *uap)
2100 struct thread *td = curthread;
2101 struct timespec ts[2];
2102 struct timeval *usrtvp;
2104 struct nameidata nd;
2106 usrtvp = SCARG(uap, tptr);
2107 if ((error = getutimes(usrtvp, ts)) != 0)
2109 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
2110 if ((error = namei(&nd)) != 0)
2112 NDFREE(&nd, NDF_ONLY_PNBUF);
2113 error = setutimes(nd.ni_vp, ts, usrtvp == NULL);
2119 * lutimes_args(char *path, struct timeval *tptr)
2121 * Set the access and modification times of a file.
2125 lutimes(struct lutimes_args *uap)
2127 struct thread *td = curthread;
2128 struct timespec ts[2];
2129 struct timeval *usrtvp;
2131 struct nameidata nd;
2133 usrtvp = SCARG(uap, tptr);
2134 if ((error = getutimes(usrtvp, ts)) != 0)
2136 NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
2137 if ((error = namei(&nd)) != 0)
2139 NDFREE(&nd, NDF_ONLY_PNBUF);
2140 error = setutimes(nd.ni_vp, ts, usrtvp == NULL);
2146 * futimes_args(int fd, struct timeval *tptr)
2148 * Set the access and modification times of a file.
2152 futimes(struct futimes_args *uap)
2154 struct thread *td = curthread;
2155 struct proc *p = td->td_proc;
2156 struct timespec ts[2];
2158 struct timeval *usrtvp;
2161 usrtvp = SCARG(uap, tptr);
2162 if ((error = getutimes(usrtvp, ts)) != 0)
2164 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2166 return setutimes((struct vnode *)fp->f_data, ts, usrtvp == NULL);
2170 * truncate(char *path, int pad, off_t length)
2172 * Truncate a file given its path name.
2176 truncate(struct truncate_args *uap)
2178 struct thread *td = curthread;
2179 struct proc *p = td->td_proc;
2183 struct nameidata nd;
2185 if (uap->length < 0)
2187 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
2188 if ((error = namei(&nd)) != 0)
2191 NDFREE(&nd, NDF_ONLY_PNBUF);
2192 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
2193 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2194 if (vp->v_type == VDIR)
2196 else if ((error = vn_writechk(vp)) == 0 &&
2197 (error = VOP_ACCESS(vp, VWRITE, p->p_ucred, td)) == 0) {
2199 vattr.va_size = SCARG(uap, length);
2200 error = VOP_SETATTR(vp, &vattr, p->p_ucred, td);
2207 * ftruncate_args(int fd, int pad, off_t length)
2209 * Truncate a file given a file descriptor.
2213 ftruncate(struct ftruncate_args *uap)
2215 struct thread *td = curthread;
2216 struct proc *p = td->td_proc;
2222 if (uap->length < 0)
2224 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2226 if ((fp->f_flag & FWRITE) == 0)
2228 vp = (struct vnode *)fp->f_data;
2229 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
2230 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2231 if (vp->v_type == VDIR)
2233 else if ((error = vn_writechk(vp)) == 0) {
2235 vattr.va_size = SCARG(uap, length);
2236 error = VOP_SETATTR(vp, &vattr, fp->f_cred, td);
2238 VOP_UNLOCK(vp, 0, td);
2242 #if defined(COMPAT_43) || defined(COMPAT_SUNOS)
2244 * otruncate_args(char *path, long length)
2246 * Truncate a file given its path name.
2250 otruncate(struct otruncate_args *uap)
2252 struct truncate_args /* {
2253 syscallarg(char *) path;
2254 syscallarg(int) pad;
2255 syscallarg(off_t) length;
2258 SCARG(&nuap, path) = SCARG(uap, path);
2259 SCARG(&nuap, length) = SCARG(uap, length);
2260 return (truncate(&nuap));
2264 * oftruncate_args(int fd, long length)
2266 * Truncate a file given a file descriptor.
2270 oftruncate(struct oftruncate_args *uap)
2272 struct ftruncate_args /* {
2274 syscallarg(int) pad;
2275 syscallarg(off_t) length;
2278 SCARG(&nuap, fd) = SCARG(uap, fd);
2279 SCARG(&nuap, length) = SCARG(uap, length);
2280 return (ftruncate(&nuap));
2282 #endif /* COMPAT_43 || COMPAT_SUNOS */
2287 * Sync an open file.
2291 fsync(struct fsync_args *uap)
2293 struct thread *td = curthread;
2294 struct proc *p = td->td_proc;
2300 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2302 vp = (struct vnode *)fp->f_data;
2303 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2304 if (VOP_GETVOBJECT(vp, &obj) == 0)
2305 vm_object_page_clean(obj, 0, 0, 0);
2306 if ((error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, td)) == 0 &&
2307 vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP) &&
2309 error = (*bioops.io_fsync)(vp);
2310 VOP_UNLOCK(vp, 0, td);
2315 * rename_args(char *from, char *to)
2317 * Rename files. Source and destination must either both be directories,
2318 * or both not be directories. If target is a directory, it must be empty.
2322 rename(struct rename_args *uap)
2324 struct thread *td = curthread;
2325 struct proc *p = td->td_proc;
2326 struct vnode *tvp, *fvp, *tdvp;
2327 struct nameidata fromnd, tond;
2331 NDINIT(&fromnd, DELETE, WANTPARENT | SAVESTART, UIO_USERSPACE,
2332 SCARG(uap, from), td);
2333 if ((error = namei(&fromnd)) != 0)
2336 NDINIT(&tond, RENAME, LOCKPARENT | LOCKLEAF | NOCACHE | SAVESTART | NOOBJ,
2337 UIO_USERSPACE, SCARG(uap, to), td);
2338 if (fromnd.ni_vp->v_type == VDIR)
2339 tond.ni_cnd.cn_flags |= WILLBEDIR;
2340 if ((error = namei(&tond)) != 0) {
2341 /* Translate error code for rename("dir1", "dir2/."). */
2342 if (error == EISDIR && fvp->v_type == VDIR)
2344 NDFREE(&fromnd, NDF_ONLY_PNBUF);
2345 vrele(fromnd.ni_dvp);
2352 if (fvp->v_type == VDIR && tvp->v_type != VDIR) {
2355 } else if (fvp->v_type != VDIR && tvp->v_type == VDIR) {
2363 * If the source is the same as the destination (that is, if they
2364 * are links to the same vnode), then there is nothing to do.
2370 VOP_LEASE(tdvp, td, p->p_ucred, LEASE_WRITE);
2371 if (fromnd.ni_dvp != tdvp) {
2372 VOP_LEASE(fromnd.ni_dvp, td, p->p_ucred, LEASE_WRITE);
2375 VOP_LEASE(tvp, td, p->p_ucred, LEASE_WRITE);
2377 error = VOP_RENAME(fromnd.ni_dvp, fromnd.ni_vp, &fromnd.ni_cnd,
2378 tond.ni_dvp, tond.ni_vp, &tond.ni_cnd);
2379 NDFREE(&fromnd, NDF_ONLY_PNBUF);
2380 NDFREE(&tond, NDF_ONLY_PNBUF);
2382 NDFREE(&fromnd, NDF_ONLY_PNBUF);
2383 NDFREE(&tond, NDF_ONLY_PNBUF);
2390 vrele(fromnd.ni_dvp);
2393 vrele(tond.ni_startdir);
2394 ASSERT_VOP_UNLOCKED(fromnd.ni_dvp, "rename");
2395 ASSERT_VOP_UNLOCKED(fromnd.ni_vp, "rename");
2396 ASSERT_VOP_UNLOCKED(tond.ni_dvp, "rename");
2397 ASSERT_VOP_UNLOCKED(tond.ni_vp, "rename");
2399 if (fromnd.ni_startdir)
2400 vrele(fromnd.ni_startdir);
2407 * mkdir_args(char *path, int mode)
2409 * Make a directory file.
2413 mkdir(struct mkdir_args *uap)
2415 struct thread *td = curthread;
2416 struct proc *p = td->td_proc;
2420 struct nameidata nd;
2423 NDINIT(&nd, CREATE, LOCKPARENT, UIO_USERSPACE, SCARG(uap, path), td);
2424 nd.ni_cnd.cn_flags |= WILLBEDIR;
2425 if ((error = namei(&nd)) != 0)
2429 NDFREE(&nd, NDF_ONLY_PNBUF);
2430 if (nd.ni_dvp == vp)
2438 vattr.va_type = VDIR;
2439 vattr.va_mode = (SCARG(uap, mode) & ACCESSPERMS) &~ p->p_fd->fd_cmask;
2440 VOP_LEASE(nd.ni_dvp, td, p->p_ucred, LEASE_WRITE);
2441 error = VOP_MKDIR(nd.ni_dvp, &nd.ni_vp, &nd.ni_cnd, &vattr);
2442 NDFREE(&nd, NDF_ONLY_PNBUF);
2446 ASSERT_VOP_UNLOCKED(nd.ni_dvp, "mkdir");
2447 ASSERT_VOP_UNLOCKED(nd.ni_vp, "mkdir");
2452 * rmdir_args(char *path)
2454 * Remove a directory file.
2458 rmdir(struct rmdir_args *uap)
2460 struct thread *td = curthread;
2461 struct proc *p = td->td_proc;
2464 struct nameidata nd;
2467 NDINIT(&nd, DELETE, LOCKPARENT | LOCKLEAF, UIO_USERSPACE,
2468 SCARG(uap, path), td);
2469 if ((error = namei(&nd)) != 0)
2472 if (vp->v_type != VDIR) {
2477 * No rmdir "." please.
2479 if (nd.ni_dvp == vp) {
2484 * The root of a mounted filesystem cannot be deleted.
2486 if (vp->v_flag & VROOT)
2489 VOP_LEASE(nd.ni_dvp, td, p->p_ucred, LEASE_WRITE);
2490 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
2491 error = VOP_RMDIR(nd.ni_dvp, nd.ni_vp, &nd.ni_cnd);
2494 NDFREE(&nd, NDF_ONLY_PNBUF);
2495 if (nd.ni_dvp == vp)
2501 ASSERT_VOP_UNLOCKED(nd.ni_dvp, "rmdir");
2502 ASSERT_VOP_UNLOCKED(nd.ni_vp, "rmdir");
2508 * ogetdirentries_args(int fd, char *buf, u_int count, long *basep)
2510 * Read a block of directory entries in a file system independent format.
2513 ogetdirentries(struct ogetdirentries_args *uap)
2515 struct thread *td = curthread;
2516 struct proc *p = td->td_proc;
2519 struct uio auio, kuio;
2520 struct iovec aiov, kiov;
2521 struct dirent *dp, *edp;
2523 int error, eofflag, readcnt;
2526 /* XXX arbitrary sanity limit on `count'. */
2527 if (SCARG(uap, count) > 64 * 1024)
2529 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2531 if ((fp->f_flag & FREAD) == 0)
2533 vp = (struct vnode *)fp->f_data;
2535 if (vp->v_type != VDIR)
2537 aiov.iov_base = SCARG(uap, buf);
2538 aiov.iov_len = SCARG(uap, count);
2539 auio.uio_iov = &aiov;
2540 auio.uio_iovcnt = 1;
2541 auio.uio_rw = UIO_READ;
2542 auio.uio_segflg = UIO_USERSPACE;
2544 auio.uio_resid = SCARG(uap, count);
2545 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2546 loff = auio.uio_offset = fp->f_offset;
2547 # if (BYTE_ORDER != LITTLE_ENDIAN)
2548 if (vp->v_mount->mnt_maxsymlinklen <= 0) {
2549 error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag,
2551 fp->f_offset = auio.uio_offset;
2556 kuio.uio_iov = &kiov;
2557 kuio.uio_segflg = UIO_SYSSPACE;
2558 kiov.iov_len = SCARG(uap, count);
2559 MALLOC(dirbuf, caddr_t, SCARG(uap, count), M_TEMP, M_WAITOK);
2560 kiov.iov_base = dirbuf;
2561 error = VOP_READDIR(vp, &kuio, fp->f_cred, &eofflag,
2563 fp->f_offset = kuio.uio_offset;
2565 readcnt = SCARG(uap, count) - kuio.uio_resid;
2566 edp = (struct dirent *)&dirbuf[readcnt];
2567 for (dp = (struct dirent *)dirbuf; dp < edp; ) {
2568 # if (BYTE_ORDER == LITTLE_ENDIAN)
2570 * The expected low byte of
2571 * dp->d_namlen is our dp->d_type.
2572 * The high MBZ byte of dp->d_namlen
2573 * is our dp->d_namlen.
2575 dp->d_type = dp->d_namlen;
2579 * The dp->d_type is the high byte
2580 * of the expected dp->d_namlen,
2581 * so must be zero'ed.
2585 if (dp->d_reclen > 0) {
2586 dp = (struct dirent *)
2587 ((char *)dp + dp->d_reclen);
2594 error = uiomove(dirbuf, readcnt, &auio);
2596 FREE(dirbuf, M_TEMP);
2598 VOP_UNLOCK(vp, 0, td);
2601 if (SCARG(uap, count) == auio.uio_resid) {
2602 if (union_dircheckp) {
2603 error = union_dircheckp(td, &vp, fp);
2609 if ((vp->v_flag & VROOT) &&
2610 (vp->v_mount->mnt_flag & MNT_UNION)) {
2611 struct vnode *tvp = vp;
2612 vp = vp->v_mount->mnt_vnodecovered;
2614 fp->f_data = (caddr_t) vp;
2620 error = copyout((caddr_t)&loff, (caddr_t)SCARG(uap, basep),
2622 p->p_retval[0] = SCARG(uap, count) - auio.uio_resid;
2625 #endif /* COMPAT_43 */
2628 * getdirentries_args(int fd, char *buf, u_int conut, long *basep)
2630 * Read a block of directory entries in a file system independent format.
2633 getdirentries(struct getdirentries_args *uap)
2635 struct thread *td = curthread;
2636 struct proc *p = td->td_proc;
2644 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2646 if ((fp->f_flag & FREAD) == 0)
2648 vp = (struct vnode *)fp->f_data;
2650 if (vp->v_type != VDIR)
2652 aiov.iov_base = SCARG(uap, buf);
2653 aiov.iov_len = SCARG(uap, count);
2654 auio.uio_iov = &aiov;
2655 auio.uio_iovcnt = 1;
2656 auio.uio_rw = UIO_READ;
2657 auio.uio_segflg = UIO_USERSPACE;
2659 auio.uio_resid = SCARG(uap, count);
2660 /* vn_lock(vp, LK_SHARED | LK_RETRY, td); */
2661 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2662 loff = auio.uio_offset = fp->f_offset;
2663 error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, NULL, NULL);
2664 fp->f_offset = auio.uio_offset;
2665 VOP_UNLOCK(vp, 0, td);
2668 if (SCARG(uap, count) == auio.uio_resid) {
2669 if (union_dircheckp) {
2670 error = union_dircheckp(td, &vp, fp);
2676 if ((vp->v_flag & VROOT) &&
2677 (vp->v_mount->mnt_flag & MNT_UNION)) {
2678 struct vnode *tvp = vp;
2679 vp = vp->v_mount->mnt_vnodecovered;
2681 fp->f_data = (caddr_t) vp;
2687 if (SCARG(uap, basep) != NULL) {
2688 error = copyout((caddr_t)&loff, (caddr_t)SCARG(uap, basep),
2691 p->p_retval[0] = SCARG(uap, count) - auio.uio_resid;
2696 * getdents_args(int fd, char *buf, size_t count)
2699 getdents(struct getdents_args *uap)
2701 struct getdirentries_args ap;
2705 ap.count = uap->count;
2707 return getdirentries(&ap);
2711 * umask(int newmask)
2713 * Set the mode mask for creation of filesystem nodes.
2718 umask(struct umask_args *uap)
2720 struct thread *td = curthread;
2721 struct proc *p = td->td_proc;
2722 struct filedesc *fdp;
2725 p->p_retval[0] = fdp->fd_cmask;
2726 fdp->fd_cmask = SCARG(uap, newmask) & ALLPERMS;
2731 * revoke(char *path)
2733 * Void all references to file by ripping underlying filesystem
2738 revoke(struct revoke_args *uap)
2740 struct thread *td = curthread;
2741 struct proc *p = td->td_proc;
2745 struct nameidata nd;
2747 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
2748 if ((error = namei(&nd)) != 0)
2751 NDFREE(&nd, NDF_ONLY_PNBUF);
2752 if (vp->v_type != VCHR && vp->v_type != VBLK) {
2756 if ((error = VOP_GETATTR(vp, &vattr, p->p_ucred, td)) != 0)
2758 if (p->p_ucred->cr_uid != vattr.va_uid &&
2759 (error = suser_cred(p->p_ucred, PRISON_ROOT)))
2762 VOP_REVOKE(vp, REVOKEALL);
2769 * Convert a user file descriptor to a kernel file entry.
2772 getvnode(struct filedesc *fdp, int fd, struct file **fpp)
2776 if ((u_int)fd >= fdp->fd_nfiles ||
2777 (fp = fdp->fd_ofiles[fd]) == NULL)
2779 if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO)
2785 * getfh_args(char *fname, fhandle_t *fhp)
2787 * Get (NFS) file handle
2790 getfh(struct getfh_args *uap)
2792 struct thread *td = curthread;
2793 struct nameidata nd;
2799 * Must be super user
2804 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE, uap->fname, td);
2808 NDFREE(&nd, NDF_ONLY_PNBUF);
2810 bzero(&fh, sizeof(fh));
2811 fh.fh_fsid = vp->v_mount->mnt_stat.f_fsid;
2812 error = VFS_VPTOFH(vp, &fh.fh_fid);
2816 error = copyout(&fh, uap->fhp, sizeof (fh));
2821 * fhopen_args(const struct fhandle *u_fhp, int flags)
2823 * syscall for the rpc.lockd to use to translate a NFS file handle into
2824 * an open descriptor.
2826 * warning: do not remove the suser() call or this becomes one giant
2830 fhopen(struct fhopen_args *uap)
2832 struct thread *td = curthread;
2833 struct proc *p = td->td_proc;
2838 struct vattr *vap = &vat;
2841 struct filedesc *fdp = p->p_fd;
2842 int fmode, mode, error, type;
2847 * Must be super user
2853 fmode = FFLAGS(SCARG(uap, flags));
2854 /* why not allow a non-read/write open for our lockd? */
2855 if (((fmode & (FREAD | FWRITE)) == 0) || (fmode & O_CREAT))
2857 error = copyin(SCARG(uap,u_fhp), &fhp, sizeof(fhp));
2860 /* find the mount point */
2861 mp = vfs_getvfs(&fhp.fh_fsid);
2864 /* now give me my vnode, it gets returned to me locked */
2865 error = VFS_FHTOVP(mp, &fhp.fh_fid, &vp);
2869 * from now on we have to make sure not
2870 * to forget about the vnode
2871 * any error that causes an abort must vput(vp)
2872 * just set error = err and 'goto bad;'.
2878 if (vp->v_type == VLNK) {
2882 if (vp->v_type == VSOCK) {
2887 if (fmode & (FWRITE | O_TRUNC)) {
2888 if (vp->v_type == VDIR) {
2892 error = vn_writechk(vp);
2900 error = VOP_ACCESS(vp, mode, p->p_ucred, td);
2904 if (fmode & O_TRUNC) {
2905 VOP_UNLOCK(vp, 0, td); /* XXX */
2906 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
2907 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); /* XXX */
2910 error = VOP_SETATTR(vp, vap, p->p_ucred, td);
2914 error = VOP_OPEN(vp, fmode, p->p_ucred, td);
2918 * Make sure that a VM object is created for VMIO support.
2920 if (vn_canvmio(vp) == TRUE) {
2921 if ((error = vfs_object_create(vp, td, p->p_ucred)) != 0)
2928 * end of vn_open code
2931 if ((error = falloc(p, &nfp, &indx)) != 0) {
2939 * hold an extra reference to avoid having fp ripped out
2940 * from under us while we block in the lock op.
2943 nfp->f_data = (caddr_t)vp;
2944 nfp->f_flag = fmode & FMASK;
2945 nfp->f_ops = &vnops;
2946 nfp->f_type = DTYPE_VNODE;
2947 if (fmode & (O_EXLOCK | O_SHLOCK)) {
2948 lf.l_whence = SEEK_SET;
2951 if (fmode & O_EXLOCK)
2952 lf.l_type = F_WRLCK;
2954 lf.l_type = F_RDLCK;
2956 if ((fmode & FNONBLOCK) == 0)
2958 VOP_UNLOCK(vp, 0, td);
2959 if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0) {
2961 * lock request failed. Normally close the descriptor
2962 * but handle the case where someone might have dup()d
2963 * or close()d it when we weren't looking.
2965 if (fdp->fd_ofiles[indx] == fp) {
2966 fdp->fd_ofiles[indx] = NULL;
2971 * release our private reference.
2976 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2977 fp->f_flag |= FHASLOCK;
2979 if ((vp->v_type == VREG) && (VOP_GETVOBJECT(vp, NULL) != 0))
2980 vfs_object_create(vp, td, p->p_ucred);
2982 VOP_UNLOCK(vp, 0, td);
2984 p->p_retval[0] = indx;
2993 * fhstat_args(struct fhandle *u_fhp, struct stat *sb)
2996 fhstat(struct fhstat_args *uap)
2998 struct thread *td = curthread;
3006 * Must be super user
3012 error = copyin(SCARG(uap, u_fhp), &fh, sizeof(fhandle_t));
3016 if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL)
3018 if ((error = VFS_FHTOVP(mp, &fh.fh_fid, &vp)))
3020 error = vn_stat(vp, &sb, td);
3024 error = copyout(&sb, SCARG(uap, sb), sizeof(sb));
3029 * fhstatfs_args(struct fhandle *u_fhp, struct statfs *buf)
3032 fhstatfs(struct fhstatfs_args *uap)
3034 struct thread *td = curthread;
3043 * Must be super user
3045 if ((error = suser(td)))
3048 if ((error = copyin(SCARG(uap, u_fhp), &fh, sizeof(fhandle_t))) != 0)
3051 if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL)
3053 if ((error = VFS_FHTOVP(mp, &fh.fh_fid, &vp)))
3058 if ((error = VFS_STATFS(mp, sp, td)) != 0)
3060 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
3062 bcopy((caddr_t)sp, (caddr_t)&sb, sizeof(sb));
3063 sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0;
3066 return (copyout(sp, SCARG(uap, buf), sizeof(*sp)));
3070 * Syscall to push extended attribute configuration information into the
3071 * VFS. Accepts a path, which it converts to a mountpoint, as well as
3072 * a command (int cmd), and attribute name and misc data. For now, the
3073 * attribute name is left in userspace for consumption by the VFS_op.
3074 * It will probably be changed to be copied into sysspace by the
3075 * syscall in the future, once issues with various consumers of the
3076 * attribute code have raised their hands.
3078 * Currently this is used only by UFS Extended Attributes.
3081 extattrctl(struct extattrctl_args *uap)
3083 struct thread *td = curthread;
3084 struct nameidata nd;
3088 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
3089 if ((error = namei(&nd)) != 0)
3091 mp = nd.ni_vp->v_mount;
3093 return (VFS_EXTATTRCTL(mp, SCARG(uap, cmd), SCARG(uap, attrname),
3094 SCARG(uap, arg), td));
3098 * Syscall to set a named extended attribute on a file or directory.
3099 * Accepts attribute name, and a uio structure pointing to the data to set.
3100 * The uio is consumed in the style of writev(). The real work happens
3101 * in VOP_SETEXTATTR().
3104 extattr_set_file(struct extattr_set_file_args *uap)
3106 struct thread *td = curthread;
3107 struct proc *p = td->td_proc;
3108 struct nameidata nd;
3110 struct iovec *iov, *needfree = NULL, aiov[UIO_SMALLIOV];
3111 char attrname[EXTATTR_MAXNAMELEN];
3115 error = copyin(SCARG(uap, attrname), attrname, EXTATTR_MAXNAMELEN);
3118 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
3119 SCARG(uap, path), td);
3120 if ((error = namei(&nd)) != 0)
3122 iovlen = uap->iovcnt * sizeof(struct iovec);
3123 if (uap->iovcnt > UIO_SMALLIOV) {
3124 if (uap->iovcnt > UIO_MAXIOV) {
3128 MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
3133 auio.uio_iovcnt = uap->iovcnt;
3134 auio.uio_rw = UIO_WRITE;
3135 auio.uio_segflg = UIO_USERSPACE;
3137 auio.uio_offset = 0;
3138 if ((error = copyin((caddr_t)uap->iovp, (caddr_t)iov, iovlen)))
3141 for (i = 0; i < uap->iovcnt; i++) {
3142 if (iov->iov_len > INT_MAX - auio.uio_resid) {
3146 auio.uio_resid += iov->iov_len;
3149 cnt = auio.uio_resid;
3150 error = VOP_SETEXTATTR(nd.ni_vp, attrname, &auio, p->p_ucred, td);
3151 cnt -= auio.uio_resid;
3152 p->p_retval[0] = cnt;
3155 FREE(needfree, M_IOV);
3161 * Syscall to get a named extended attribute on a file or directory.
3162 * Accepts attribute name, and a uio structure pointing to a buffer for the
3163 * data. The uio is consumed in the style of readv(). The real work
3164 * happens in VOP_GETEXTATTR();
3167 extattr_get_file(struct extattr_get_file_args *uap)
3169 struct thread *td = curthread;
3170 struct proc *p = td->td_proc;
3171 struct nameidata nd;
3173 struct iovec *iov, *needfree, aiov[UIO_SMALLIOV];
3174 char attrname[EXTATTR_MAXNAMELEN];
3178 error = copyin(SCARG(uap, attrname), attrname, EXTATTR_MAXNAMELEN);
3181 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
3182 SCARG(uap, path), td);
3183 if ((error = namei(&nd)) != 0)
3185 iovlen = uap->iovcnt * sizeof (struct iovec);
3186 if (uap->iovcnt > UIO_SMALLIOV) {
3187 if (uap->iovcnt > UIO_MAXIOV) {
3191 MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
3198 auio.uio_iovcnt = uap->iovcnt;
3199 auio.uio_rw = UIO_READ;
3200 auio.uio_segflg = UIO_USERSPACE;
3202 auio.uio_offset = 0;
3203 if ((error = copyin((caddr_t)uap->iovp, (caddr_t)iov, iovlen)))
3206 for (i = 0; i < uap->iovcnt; i++) {
3207 if (iov->iov_len > INT_MAX - auio.uio_resid) {
3211 auio.uio_resid += iov->iov_len;
3214 cnt = auio.uio_resid;
3215 error = VOP_GETEXTATTR(nd.ni_vp, attrname, &auio, p->p_ucred, td);
3216 cnt -= auio.uio_resid;
3217 p->p_retval[0] = cnt;
3220 FREE(needfree, M_IOV);
3226 * Syscall to delete a named extended attribute from a file or directory.
3227 * Accepts attribute name. The real work happens in VOP_SETEXTATTR().
3230 extattr_delete_file(struct extattr_delete_file_args *uap)
3232 struct thread *td = curthread;
3233 struct proc *p = td->td_proc;
3234 struct nameidata nd;
3235 char attrname[EXTATTR_MAXNAMELEN];
3238 error = copyin(SCARG(uap, attrname), attrname, EXTATTR_MAXNAMELEN);
3241 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF, UIO_USERSPACE,
3242 SCARG(uap, path), td);
3243 if ((error = namei(&nd)) != 0)
3245 error = VOP_SETEXTATTR(nd.ni_vp, attrname, NULL, p->p_ucred, td);