Cleanup pass. Removed code that is not needed anymore.
[dragonfly.git] / sys / kern / vfs_syscalls.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94
39 * $FreeBSD: src/sys/kern/vfs_syscalls.c,v 1.151.2.18 2003/04/04 20:35:58 tegge Exp $
40 * $DragonFly: src/sys/kern/vfs_syscalls.c,v 1.35 2004/05/21 15:41:23 drhodus Exp $
41 */
42
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/buf.h>
46#include <sys/sysent.h>
47#include <sys/malloc.h>
48#include <sys/mount.h>
49#include <sys/sysproto.h>
50#include <sys/filedesc.h>
51#include <sys/kernel.h>
52#include <sys/fcntl.h>
53#include <sys/file.h>
54#include <sys/linker.h>
55#include <sys/stat.h>
56#include <sys/unistd.h>
57#include <sys/vnode.h>
58#include <sys/proc.h>
59#include <sys/namei.h>
60#include <sys/dirent.h>
61#include <sys/extattr.h>
62#include <sys/kern_syscall.h>
63
64#include <machine/limits.h>
65#include <vfs/union/union.h>
66#include <sys/sysctl.h>
67#include <vm/vm.h>
68#include <vm/vm_object.h>
69#include <vm/vm_zone.h>
70#include <vm/vm_page.h>
71
72#include <sys/file2.h>
73
74static int checkvp_chdir (struct vnode *vn, struct thread *td);
75static void checkdirs (struct vnode *olddp);
76static int chroot_refuse_vdir_fds (struct filedesc *fdp);
77static int getutimes (const struct timeval *, struct timespec *);
78static int setfown (struct vnode *, uid_t, gid_t);
79static int setfmode (struct vnode *, int);
80static int setfflags (struct vnode *, int);
81static int setutimes (struct vnode *, const struct timespec *, int);
82static int usermount = 0; /* if 1, non-root can mount fs. */
83
84int (*union_dircheckp) (struct thread *, struct vnode **, struct file *);
85
86SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0, "");
87
88/*
89 * Virtual File System System Calls
90 */
91
92/*
93 * Mount a file system.
94 */
95/*
96 * mount_args(char *type, char *path, int flags, caddr_t data)
97 */
98/* ARGSUSED */
99int
100mount(struct mount_args *uap)
101{
102 struct thread *td = curthread;
103 struct proc *p = td->td_proc;
104 struct vnode *vp;
105 struct mount *mp;
106 struct vfsconf *vfsp;
107 int error, flag = 0, flag2 = 0;
108 struct vattr va;
109 struct nameidata nd;
110 char fstypename[MFSNAMELEN];
111 lwkt_tokref vlock;
112 lwkt_tokref ilock;
113
114 KKASSERT(p);
115 if (p->p_ucred->cr_prison != NULL)
116 return (EPERM);
117 if (usermount == 0 && (error = suser(td)))
118 return (error);
119 /*
120 * Do not allow NFS export by non-root users.
121 */
122 if (SCARG(uap, flags) & MNT_EXPORTED) {
123 error = suser(td);
124 if (error)
125 return (error);
126 }
127 /*
128 * Silently enforce MNT_NOSUID and MNT_NODEV for non-root users
129 */
130 if (suser(td))
131 SCARG(uap, flags) |= MNT_NOSUID | MNT_NODEV;
132 /*
133 * Get vnode to be covered
134 */
135 NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW | CNP_LOCKLEAF, UIO_USERSPACE,
136 SCARG(uap, path), td);
137 if ((error = namei(&nd)) != 0)
138 return (error);
139 NDFREE(&nd, NDF_ONLY_PNBUF);
140 vp = nd.ni_vp;
141 if (SCARG(uap, flags) & MNT_UPDATE) {
142 if ((vp->v_flag & VROOT) == 0) {
143 vput(vp);
144 return (EINVAL);
145 }
146 mp = vp->v_mount;
147 flag = mp->mnt_flag;
148 flag2 = mp->mnt_kern_flag;
149 /*
150 * We only allow the filesystem to be reloaded if it
151 * is currently mounted read-only.
152 */
153 if ((SCARG(uap, flags) & MNT_RELOAD) &&
154 ((mp->mnt_flag & MNT_RDONLY) == 0)) {
155 vput(vp);
156 return (EOPNOTSUPP); /* Needs translation */
157 }
158 /*
159 * Only root, or the user that did the original mount is
160 * permitted to update it.
161 */
162 if (mp->mnt_stat.f_owner != p->p_ucred->cr_uid &&
163 (error = suser(td))) {
164 vput(vp);
165 return (error);
166 }
167 if (vfs_busy(mp, LK_NOWAIT, NULL, td)) {
168 vput(vp);
169 return (EBUSY);
170 }
171 lwkt_gettoken(&vlock, vp->v_interlock);
172 if ((vp->v_flag & VMOUNT) != 0 ||
173 vp->v_mountedhere != NULL) {
174 lwkt_reltoken(&vlock);
175 vfs_unbusy(mp, td);
176 vput(vp);
177 return (EBUSY);
178 }
179 vp->v_flag |= VMOUNT;
180 lwkt_reltoken(&vlock);
181 mp->mnt_flag |=
182 SCARG(uap, flags) & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE);
183 VOP_UNLOCK(vp, NULL, 0, td);
184 goto update;
185 }
186 /*
187 * If the user is not root, ensure that they own the directory
188 * onto which we are attempting to mount.
189 */
190 if ((error = VOP_GETATTR(vp, &va, td)) ||
191 (va.va_uid != p->p_ucred->cr_uid &&
192 (error = suser(td)))) {
193 vput(vp);
194 return (error);
195 }
196 if ((error = vinvalbuf(vp, V_SAVE, td, 0, 0)) != 0) {
197 vput(vp);
198 return (error);
199 }
200 if (vp->v_type != VDIR) {
201 vput(vp);
202 return (ENOTDIR);
203 }
204 if ((error = copyinstr(SCARG(uap, type), fstypename, MFSNAMELEN, NULL)) != 0) {
205 vput(vp);
206 return (error);
207 }
208 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
209 if (!strcmp(vfsp->vfc_name, fstypename))
210 break;
211 if (vfsp == NULL) {
212 linker_file_t lf;
213
214 /* Only load modules for root (very important!) */
215 if ((error = suser(td)) != 0) {
216 vput(vp);
217 return error;
218 }
219 error = linker_load_file(fstypename, &lf);
220 if (error || lf == NULL) {
221 vput(vp);
222 if (lf == NULL)
223 error = ENODEV;
224 return error;
225 }
226 lf->userrefs++;
227 /* lookup again, see if the VFS was loaded */
228 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
229 if (!strcmp(vfsp->vfc_name, fstypename))
230 break;
231 if (vfsp == NULL) {
232 lf->userrefs--;
233 linker_file_unload(lf);
234 vput(vp);
235 return (ENODEV);
236 }
237 }
238 lwkt_gettoken(&vlock, vp->v_interlock);
239 if ((vp->v_flag & VMOUNT) != 0 ||
240 vp->v_mountedhere != NULL) {
241 lwkt_reltoken(&vlock);
242 vput(vp);
243 return (EBUSY);
244 }
245 vp->v_flag |= VMOUNT;
246 lwkt_reltoken(&vlock);
247
248 /*
249 * Allocate and initialize the filesystem.
250 */
251 mp = malloc(sizeof(struct mount), M_MOUNT, M_WAITOK);
252 bzero((char *)mp, (u_long)sizeof(struct mount));
253 TAILQ_INIT(&mp->mnt_nvnodelist);
254 TAILQ_INIT(&mp->mnt_reservedvnlist);
255 mp->mnt_nvnodelistsize = 0;
256 lockinit(&mp->mnt_lock, 0, "vfslock", 0, LK_NOPAUSE);
257 vfs_busy(mp, LK_NOWAIT, NULL, td);
258 mp->mnt_op = vfsp->vfc_vfsops;
259 mp->mnt_vfc = vfsp;
260 vfsp->vfc_refcount++;
261 mp->mnt_stat.f_type = vfsp->vfc_typenum;
262 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
263 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
264 mp->mnt_vnodecovered = vp;
265 mp->mnt_stat.f_owner = p->p_ucred->cr_uid;
266 mp->mnt_iosize_max = DFLTPHYS;
267 VOP_UNLOCK(vp, NULL, 0, td);
268update:
269 /*
270 * Set the mount level flags.
271 */
272 if (SCARG(uap, flags) & MNT_RDONLY)
273 mp->mnt_flag |= MNT_RDONLY;
274 else if (mp->mnt_flag & MNT_RDONLY)
275 mp->mnt_kern_flag |= MNTK_WANTRDWR;
276 mp->mnt_flag &=~ (MNT_NOSUID | MNT_NOEXEC | MNT_NODEV |
277 MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOATIME |
278 MNT_NOSYMFOLLOW | MNT_IGNORE |
279 MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR);
280 mp->mnt_flag |= SCARG(uap, flags) & (MNT_NOSUID | MNT_NOEXEC |
281 MNT_NODEV | MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_FORCE |
282 MNT_NOSYMFOLLOW | MNT_IGNORE |
283 MNT_NOATIME | MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR);
284 /*
285 * Mount the filesystem.
286 * XXX The final recipients of VFS_MOUNT just overwrite the ndp they
287 * get. No freeing of cn_pnbuf.
288 */
289 error = VFS_MOUNT(mp, SCARG(uap, path), SCARG(uap, data), &nd, td);
290 if (mp->mnt_flag & MNT_UPDATE) {
291 if (mp->mnt_kern_flag & MNTK_WANTRDWR)
292 mp->mnt_flag &= ~MNT_RDONLY;
293 mp->mnt_flag &=~ (MNT_UPDATE | MNT_RELOAD | MNT_FORCE);
294 mp->mnt_kern_flag &=~ MNTK_WANTRDWR;
295 if (error) {
296 mp->mnt_flag = flag;
297 mp->mnt_kern_flag = flag2;
298 }
299 vfs_unbusy(mp, td);
300 lwkt_gettoken(&vlock, vp->v_interlock);
301 vp->v_flag &= ~VMOUNT;
302 lwkt_reltoken(&vlock);
303 vrele(vp);
304 return (error);
305 }
306 vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
307 /*
308 * Put the new filesystem on the mount list after root.
309 */
310 cache_purge(vp);
311 if (!error) {
312 lwkt_gettoken(&vlock, vp->v_interlock);
313 vp->v_flag &= ~VMOUNT;
314 vp->v_mountedhere = mp;
315 lwkt_reltoken(&vlock);
316 lwkt_gettoken(&ilock, &mountlist_token);
317 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
318 lwkt_reltoken(&ilock);
319 checkdirs(vp);
320 VOP_UNLOCK(vp, NULL, 0, td);
321 error = vfs_allocate_syncvnode(mp);
322 vfs_unbusy(mp, td);
323 if ((error = VFS_START(mp, 0, td)) != 0)
324 vrele(vp);
325 } else {
326 lwkt_gettoken(&vlock, vp->v_interlock);
327 vp->v_flag &= ~VMOUNT;
328 lwkt_reltoken(&vlock);
329 mp->mnt_vfc->vfc_refcount--;
330 vfs_unbusy(mp, td);
331 free((caddr_t)mp, M_MOUNT);
332 vput(vp);
333 }
334 return (error);
335}
336
337/*
338 * Scan all active processes to see if any of them have a current
339 * or root directory onto which the new filesystem has just been
340 * mounted. If so, replace them with the new mount point.
341 */
342static void
343checkdirs(struct vnode *olddp)
344{
345 struct filedesc *fdp;
346 struct vnode *newdp;
347 struct proc *p;
348
349 if (olddp->v_usecount == 1)
350 return;
351 if (VFS_ROOT(olddp->v_mountedhere, &newdp))
352 panic("mount: lost mount");
353 FOREACH_PROC_IN_SYSTEM(p) {
354 fdp = p->p_fd;
355 if (fdp->fd_cdir == olddp) {
356 vrele(fdp->fd_cdir);
357 vref(newdp);
358 fdp->fd_cdir = newdp;
359 }
360 if (fdp->fd_rdir == olddp) {
361 vrele(fdp->fd_rdir);
362 vref(newdp);
363 fdp->fd_rdir = newdp;
364 }
365 }
366 if (rootvnode == olddp) {
367 vrele(rootvnode);
368 vref(newdp);
369 rootvnode = newdp;
370 vfs_cache_setroot(rootvnode);
371 }
372 vput(newdp);
373}
374
375/*
376 * Unmount a file system.
377 *
378 * Note: unmount takes a path to the vnode mounted on as argument,
379 * not special file (as before).
380 */
381/*
382 * umount_args(char *path, int flags)
383 */
384/* ARGSUSED */
385int
386unmount(struct unmount_args *uap)
387{
388 struct thread *td = curthread;
389 struct proc *p = td->td_proc;
390 struct vnode *vp;
391 struct mount *mp;
392 int error;
393 struct nameidata nd;
394
395 KKASSERT(p);
396 if (p->p_ucred->cr_prison != NULL)
397 return (EPERM);
398 if (usermount == 0 && (error = suser(td)))
399 return (error);
400
401 NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW | CNP_LOCKLEAF, UIO_USERSPACE,
402 SCARG(uap, path), td);
403 if ((error = namei(&nd)) != 0)
404 return (error);
405 vp = nd.ni_vp;
406 NDFREE(&nd, NDF_ONLY_PNBUF);
407 mp = vp->v_mount;
408
409 /*
410 * Only root, or the user that did the original mount is
411 * permitted to unmount this filesystem.
412 */
413 if ((mp->mnt_stat.f_owner != p->p_ucred->cr_uid) &&
414 (error = suser(td))) {
415 vput(vp);
416 return (error);
417 }
418
419 /*
420 * Don't allow unmounting the root file system.
421 */
422 if (mp->mnt_flag & MNT_ROOTFS) {
423 vput(vp);
424 return (EINVAL);
425 }
426
427 /*
428 * Must be the root of the filesystem
429 */
430 if ((vp->v_flag & VROOT) == 0) {
431 vput(vp);
432 return (EINVAL);
433 }
434 vput(vp);
435 return (dounmount(mp, SCARG(uap, flags), td));
436}
437
438/*
439 * Do the actual file system unmount.
440 */
441int
442dounmount(struct mount *mp, int flags, struct thread *td)
443{
444 struct vnode *coveredvp;
445 int error;
446 int async_flag;
447 lwkt_tokref ilock;
448
449 lwkt_gettoken(&ilock, &mountlist_token);
450 if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
451 lwkt_reltoken(&ilock);
452 return (EBUSY);
453 }
454 mp->mnt_kern_flag |= MNTK_UNMOUNT;
455 /* Allow filesystems to detect that a forced unmount is in progress. */
456 if (flags & MNT_FORCE)
457 mp->mnt_kern_flag |= MNTK_UNMOUNTF;
458 error = lockmgr(&mp->mnt_lock, LK_DRAIN | LK_INTERLOCK |
459 ((flags & MNT_FORCE) ? 0 : LK_NOWAIT), &ilock, td);
460 if (error) {
461 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF);
462 if (mp->mnt_kern_flag & MNTK_MWAIT)
463 wakeup((caddr_t)mp);
464 return (error);
465 }
466
467 if (mp->mnt_flag & MNT_EXPUBLIC)
468 vfs_setpublicfs(NULL, NULL, NULL);
469
470 vfs_msync(mp, MNT_WAIT);
471 async_flag = mp->mnt_flag & MNT_ASYNC;
472 mp->mnt_flag &=~ MNT_ASYNC;
473 cache_purgevfs(mp); /* remove cache entries for this file sys */
474 if (mp->mnt_syncer != NULL)
475 vrele(mp->mnt_syncer);
476 if (((mp->mnt_flag & MNT_RDONLY) ||
477 (error = VFS_SYNC(mp, MNT_WAIT, td)) == 0) ||
478 (flags & MNT_FORCE))
479 error = VFS_UNMOUNT(mp, flags, td);
480 lwkt_gettokref(&ilock);
481 if (error) {
482 if (mp->mnt_syncer == NULL)
483 vfs_allocate_syncvnode(mp);
484 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF);
485 mp->mnt_flag |= async_flag;
486 lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK | LK_REENABLE,
487 &ilock, td);
488 if (mp->mnt_kern_flag & MNTK_MWAIT)
489 wakeup((caddr_t)mp);
490 return (error);
491 }
492 TAILQ_REMOVE(&mountlist, mp, mnt_list);
493 if ((coveredvp = mp->mnt_vnodecovered) != NULLVP) {
494 coveredvp->v_mountedhere = NULL;
495 vrele(coveredvp);
496 }
497 mp->mnt_vfc->vfc_refcount--;
498 if (!TAILQ_EMPTY(&mp->mnt_nvnodelist))
499 panic("unmount: dangling vnode");
500 lockmgr(&mp->mnt_lock, LK_RELEASE | LK_INTERLOCK, &ilock, td);
501 if (mp->mnt_kern_flag & MNTK_MWAIT)
502 wakeup((caddr_t)mp);
503 free((caddr_t)mp, M_MOUNT);
504 return (0);
505}
506
507/*
508 * Sync each mounted filesystem.
509 */
510
511#ifdef DEBUG
512static int syncprt = 0;
513SYSCTL_INT(_debug, OID_AUTO, syncprt, CTLFLAG_RW, &syncprt, 0, "");
514#endif /* DEBUG */
515
516/* ARGSUSED */
517int
518sync(struct sync_args *uap)
519{
520 struct thread *td = curthread;
521 struct mount *mp, *nmp;
522 lwkt_tokref ilock;
523 int asyncflag;
524
525 lwkt_gettoken(&ilock, &mountlist_token);
526 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
527 if (vfs_busy(mp, LK_NOWAIT, &ilock, td)) {
528 nmp = TAILQ_NEXT(mp, mnt_list);
529 continue;
530 }
531 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
532 asyncflag = mp->mnt_flag & MNT_ASYNC;
533 mp->mnt_flag &= ~MNT_ASYNC;
534 vfs_msync(mp, MNT_NOWAIT);
535 VFS_SYNC(mp, MNT_NOWAIT, td);
536 mp->mnt_flag |= asyncflag;
537 }
538 lwkt_gettokref(&ilock);
539 nmp = TAILQ_NEXT(mp, mnt_list);
540 vfs_unbusy(mp, td);
541 }
542 lwkt_reltoken(&ilock);
543/*
544 * print out buffer pool stat information on each sync() call.
545 */
546#ifdef DEBUG
547 if (syncprt)
548 vfs_bufstats();
549#endif /* DEBUG */
550 return (0);
551}
552
553/* XXX PRISON: could be per prison flag */
554static int prison_quotas;
555#if 0
556SYSCTL_INT(_kern_prison, OID_AUTO, quotas, CTLFLAG_RW, &prison_quotas, 0, "");
557#endif
558
559/*
560 * quotactl_args(char *path, int fcmd, int uid, caddr_t arg)
561 *
562 * Change filesystem quotas.
563 */
564/* ARGSUSED */
565int
566quotactl(struct quotactl_args *uap)
567{
568 struct thread *td = curthread;
569 struct proc *p = td->td_proc;
570 struct mount *mp;
571 int error;
572 struct nameidata nd;
573
574 KKASSERT(p);
575 if (p->p_ucred->cr_prison && !prison_quotas)
576 return (EPERM);
577 NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW, UIO_USERSPACE,
578 SCARG(uap, path), td);
579 if ((error = namei(&nd)) != 0)
580 return (error);
581 mp = nd.ni_vp->v_mount;
582 NDFREE(&nd, NDF_ONLY_PNBUF);
583 vrele(nd.ni_vp);
584 return (VFS_QUOTACTL(mp, SCARG(uap, cmd), SCARG(uap, uid),
585 SCARG(uap, arg), td));
586}
587
588int
589kern_statfs(struct nameidata *nd, struct statfs *buf)
590{
591 struct thread *td = curthread;
592 struct mount *mp;
593 struct statfs *sp;
594 int error;
595
596 error = namei(nd);
597 if (error)
598 return (error);
599 mp = nd->ni_vp->v_mount;
600 sp = &mp->mnt_stat;
601 NDFREE(nd, NDF_ONLY_PNBUF);
602 vrele(nd->ni_vp);
603 error = VFS_STATFS(mp, sp, td);
604 if (error)
605 return (error);
606 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
607 bcopy(sp, buf, sizeof(*buf));
608 /* Only root should have access to the fsid's. */
609 if (suser(td))
610 buf->f_fsid.val[0] = buf->f_fsid.val[1] = 0;
611 return (0);
612}
613
614/*
615 * statfs_args(char *path, struct statfs *buf)
616 *
617 * Get filesystem statistics.
618 */
619int
620statfs(struct statfs_args *uap)
621{
622 struct thread *td = curthread;
623 struct nameidata nd;
624 struct statfs buf;
625 int error;
626
627 NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW, UIO_USERSPACE, uap->path, td);
628
629 error = kern_statfs(&nd, &buf);
630
631 if (error == 0)
632 error = copyout(&buf, uap->buf, sizeof(*uap->buf));
633 return (error);
634}
635
636int
637kern_fstatfs(int fd, struct statfs *buf)
638{
639 struct thread *td = curthread;
640 struct proc *p = td->td_proc;
641 struct file *fp;
642 struct mount *mp;
643 struct statfs *sp;
644 int error;
645
646 KKASSERT(p);
647 error = getvnode(p->p_fd, fd, &fp);
648 if (error)
649 return (error);
650 mp = ((struct vnode *)fp->f_data)->v_mount;
651 if (mp == NULL)
652 return (EBADF);
653 sp = &mp->mnt_stat;
654 error = VFS_STATFS(mp, sp, td);
655 if (error)
656 return (error);
657 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
658 bcopy(sp, buf, sizeof(*buf));
659 /* Only root should have access to the fsid's. */
660 if (suser(td))
661 buf->f_fsid.val[0] = buf->f_fsid.val[1] = 0;
662 return (0);
663}
664
665/*
666 * fstatfs_args(int fd, struct statfs *buf)
667 *
668 * Get filesystem statistics.
669 */
670int
671fstatfs(struct fstatfs_args *uap)
672{
673 struct statfs buf;
674 int error;
675
676 error = kern_fstatfs(uap->fd, &buf);
677
678 if (error == 0)
679 error = copyout(&buf, uap->buf, sizeof(*uap->buf));
680 return (error);
681}
682
683/*
684 * getfsstat_args(struct statfs *buf, long bufsize, int flags)
685 *
686 * Get statistics on all filesystems.
687 */
688/* ARGSUSED */
689int
690getfsstat(struct getfsstat_args *uap)
691{
692 struct thread *td = curthread;
693 struct mount *mp, *nmp;
694 struct statfs *sp;
695 caddr_t sfsp;
696 lwkt_tokref ilock;
697 long count, maxcount, error;
698
699 maxcount = SCARG(uap, bufsize) / sizeof(struct statfs);
700 sfsp = (caddr_t)SCARG(uap, buf);
701 count = 0;
702 lwkt_gettoken(&ilock, &mountlist_token);
703 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
704 if (vfs_busy(mp, LK_NOWAIT, &ilock, td)) {
705 nmp = TAILQ_NEXT(mp, mnt_list);
706 continue;
707 }
708 if (sfsp && count < maxcount) {
709 sp = &mp->mnt_stat;
710 /*
711 * If MNT_NOWAIT or MNT_LAZY is specified, do not
712 * refresh the fsstat cache. MNT_NOWAIT or MNT_LAZY
713 * overrides MNT_WAIT.
714 */
715 if (((SCARG(uap, flags) & (MNT_LAZY|MNT_NOWAIT)) == 0 ||
716 (SCARG(uap, flags) & MNT_WAIT)) &&
717 (error = VFS_STATFS(mp, sp, td))) {
718 lwkt_gettokref(&ilock);
719 nmp = TAILQ_NEXT(mp, mnt_list);
720 vfs_unbusy(mp, td);
721 continue;
722 }
723 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
724 error = copyout((caddr_t)sp, sfsp, sizeof(*sp));
725 if (error) {
726 vfs_unbusy(mp, td);
727 return (error);
728 }
729 sfsp += sizeof(*sp);
730 }
731 count++;
732 lwkt_gettokref(&ilock);
733 nmp = TAILQ_NEXT(mp, mnt_list);
734 vfs_unbusy(mp, td);
735 }
736 lwkt_reltoken(&ilock);
737 if (sfsp && count > maxcount)
738 uap->sysmsg_result = maxcount;
739 else
740 uap->sysmsg_result = count;
741 return (0);
742}
743
744/*
745 * fchdir_args(int fd)
746 *
747 * Change current working directory to a given file descriptor.
748 */
749/* ARGSUSED */
750int
751fchdir(struct fchdir_args *uap)
752{
753 struct thread *td = curthread;
754 struct proc *p = td->td_proc;
755 struct filedesc *fdp = p->p_fd;
756 struct vnode *vp, *tdp;
757 struct mount *mp;
758 struct file *fp;
759 int error;
760
761 if ((error = getvnode(fdp, SCARG(uap, fd), &fp)) != 0)
762 return (error);
763 vp = (struct vnode *)fp->f_data;
764 vref(vp);
765 vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
766 if (vp->v_type != VDIR)
767 error = ENOTDIR;
768 else
769 error = VOP_ACCESS(vp, VEXEC, p->p_ucred, td);
770 while (!error && (mp = vp->v_mountedhere) != NULL) {
771 if (vfs_busy(mp, 0, NULL, td))
772 continue;
773 error = VFS_ROOT(mp, &tdp);
774 vfs_unbusy(mp, td);
775 if (error)
776 break;
777 vput(vp);
778 vp = tdp;
779 }
780 if (error) {
781 vput(vp);
782 return (error);
783 }
784 VOP_UNLOCK(vp, NULL, 0, td);
785 vrele(fdp->fd_cdir);
786 fdp->fd_cdir = vp;
787 return (0);
788}
789
790int
791kern_chdir(struct nameidata *nd)
792{
793 struct thread *td = curthread;
794 struct proc *p = td->td_proc;
795 struct filedesc *fdp = p->p_fd;
796 int error;
797
798 if ((error = namei(nd)) != 0)
799 return (error);
800 if ((error = checkvp_chdir(nd->ni_vp, td)) == 0) {
801 vrele(fdp->fd_cdir);
802 fdp->fd_cdir = nd->ni_vp;
803 vref(fdp->fd_cdir);
804 }
805 NDFREE(nd, ~(NDF_NO_FREE_PNBUF | NDF_NO_VP_PUT));
806 return (error);
807}
808
809/*
810 * chdir_args(char *path)
811 *
812 * Change current working directory (``.'').
813 */
814int
815chdir(struct chdir_args *uap)
816{
817 struct thread *td = curthread;
818 struct nameidata nd;
819 int error;
820
821 NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW | CNP_LOCKLEAF, UIO_USERSPACE,
822 uap->path, td);
823
824 error = kern_chdir(&nd);
825
826 return (error);
827}
828
829/*
830 * Helper function for raised chroot(2) security function: Refuse if
831 * any filedescriptors are open directories.
832 */
833static int
834chroot_refuse_vdir_fds(fdp)
835 struct filedesc *fdp;
836{
837 struct vnode *vp;
838 struct file *fp;
839 int error;
840 int fd;
841
842 for (fd = 0; fd < fdp->fd_nfiles ; fd++) {
843 error = getvnode(fdp, fd, &fp);
844 if (error)
845 continue;
846 vp = (struct vnode *)fp->f_data;
847 if (vp->v_type != VDIR)
848 continue;
849 return(EPERM);
850 }
851 return (0);
852}
853
854/*
855 * This sysctl determines if we will allow a process to chroot(2) if it
856 * has a directory open:
857 * 0: disallowed for all processes.
858 * 1: allowed for processes that were not already chroot(2)'ed.
859 * 2: allowed for all processes.
860 */
861
862static int chroot_allow_open_directories = 1;
863
864SYSCTL_INT(_kern, OID_AUTO, chroot_allow_open_directories, CTLFLAG_RW,
865 &chroot_allow_open_directories, 0, "");
866
867/*
868 * Chroot to the specified vnode. vp must be locked and referenced on
869 * call, and will be left locked and referenced on return. This routine
870 * may acquire additional refs on the vnode when associating it with
871 * the process's root and/or jail dirs.
872 */
873int
874kern_chroot(struct vnode *vp)
875{
876 struct thread *td = curthread;
877 struct proc *p = td->td_proc;
878 struct filedesc *fdp = p->p_fd;
879 int error;
880
881 /*
882 * Only root can chroot
883 */
884 if ((error = suser_cred(p->p_ucred, PRISON_ROOT)) != 0)
885 return (error);
886
887 /*
888 * Disallow open directory descriptors (fchdir() breakouts).
889 */
890 if (chroot_allow_open_directories == 0 ||
891 (chroot_allow_open_directories == 1 && fdp->fd_rdir != rootvnode)) {
892 if ((error = chroot_refuse_vdir_fds(fdp)) != 0)
893 return (error);
894 }
895
896 /*
897 * Check the validity of vp as a directory to change to and
898 * associate it with rdir/jdir.
899 */
900 if ((error = checkvp_chdir(vp, td)) == 0) {
901 vrele(fdp->fd_rdir);
902 fdp->fd_rdir = vp;
903 vref(fdp->fd_rdir);
904 if (fdp->fd_jdir == NULL) {
905 fdp->fd_jdir = vp;
906 vref(fdp->fd_jdir);
907 }
908 }
909 return (error);
910}
911
912/*
913 * chroot_args(char *path)
914 *
915 * Change notion of root (``/'') directory.
916 */
917/* ARGSUSED */
918int
919chroot(struct chroot_args *uap)
920{
921 struct thread *td = curthread;
922 struct nameidata nd;
923 int error;
924
925 KKASSERT(td->td_proc);
926 NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW | CNP_LOCKLEAF, UIO_USERSPACE,
927 SCARG(uap, path), td);
928 if ((error = namei(&nd)) == 0) {
929 error = kern_chroot(nd.ni_vp);
930 NDFREE(&nd, ~(NDF_NO_FREE_PNBUF | NDF_NO_VP_PUT));
931 }
932 return (error);
933}
934
935/*
936 * Common routine for chroot and chdir. Given a locked, referenced vnode,
937 * determine whether it is legal to chdir to the vnode. The vnode's state
938 * is not changed by this call.
939 */
940int
941checkvp_chdir(struct vnode *vp, struct thread *td)
942{
943 int error;
944
945 if (vp->v_type != VDIR)
946 error = ENOTDIR;
947 else
948 error = VOP_ACCESS(vp, VEXEC, td->td_proc->p_ucred, td);
949 return (error);
950}
951
952int
953kern_open(struct nameidata *nd, int oflags, int mode, int *res)
954{
955 struct thread *td = curthread;
956 struct proc *p = td->td_proc;
957 struct filedesc *fdp = p->p_fd;
958 struct file *fp;
959 struct vnode *vp;
960 int cmode, flags;
961 struct file *nfp;
962 int type, indx, error;
963 struct flock lf;
964
965 if ((oflags & O_ACCMODE) == O_ACCMODE)
966 return (EINVAL);
967 flags = FFLAGS(oflags);
968 error = falloc(p, &nfp, &indx);
969 if (error)
970 return (error);
971 fp = nfp;
972 cmode = ((mode &~ fdp->fd_cmask) & ALLPERMS) &~ S_ISTXT;
973 p->p_dupfd = -indx - 1; /* XXX check for fdopen */
974 /*
975 * Bump the ref count to prevent another process from closing
976 * the descriptor while we are blocked in vn_open()
977 */
978 fhold(fp);
979 error = vn_open(nd, flags, cmode);
980 if (error) {
981 /*
982 * release our own reference
983 */
984 fdrop(fp, td);
985
986 /*
987 * handle special fdopen() case. bleh. dupfdopen() is
988 * responsible for dropping the old contents of ofiles[indx]
989 * if it succeeds.
990 */
991 if ((error == ENODEV || error == ENXIO) &&
992 p->p_dupfd >= 0 && /* XXX from fdopen */
993 (error =
994 dupfdopen(fdp, indx, p->p_dupfd, flags, error)) == 0) {
995 *res = indx;
996 return (0);
997 }
998 /*
999 * Clean up the descriptor, but only if another thread hadn't
1000 * replaced or closed it.
1001 */
1002 if (fdp->fd_ofiles[indx] == fp) {
1003 fdp->fd_ofiles[indx] = NULL;
1004 fdrop(fp, td);
1005 }
1006
1007 if (error == ERESTART)
1008 error = EINTR;
1009 return (error);
1010 }
1011 p->p_dupfd = 0;
1012 NDFREE(nd, NDF_ONLY_PNBUF);
1013 vp = nd->ni_vp;
1014
1015 /*
1016 * There should be 2 references on the file, one from the descriptor
1017 * table, and one for us.
1018 *
1019 * Handle the case where someone closed the file (via its file
1020 * descriptor) while we were blocked. The end result should look
1021 * like opening the file succeeded but it was immediately closed.
1022 */
1023 if (fp->f_count == 1) {
1024 KASSERT(fdp->fd_ofiles[indx] != fp,
1025 ("Open file descriptor lost all refs"));
1026 VOP_UNLOCK(vp, NULL, 0, td);
1027 vn_close(vp, flags & FMASK, td);
1028 fdrop(fp, td);
1029 *res = indx;
1030 return 0;
1031 }
1032
1033 fp->f_data = (caddr_t)vp;
1034 fp->f_flag = flags & FMASK;
1035 fp->f_ops = &vnops;
1036 fp->f_type = (vp->v_type == VFIFO ? DTYPE_FIFO : DTYPE_VNODE);
1037 if (flags & (O_EXLOCK | O_SHLOCK)) {
1038 lf.l_whence = SEEK_SET;
1039 lf.l_start = 0;
1040 lf.l_len = 0;
1041 if (flags & O_EXLOCK)
1042 lf.l_type = F_WRLCK;
1043 else
1044 lf.l_type = F_RDLCK;
1045 type = F_FLOCK;
1046 if ((flags & FNONBLOCK) == 0)
1047 type |= F_WAIT;
1048 VOP_UNLOCK(vp, NULL, 0, td);
1049 if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0) {
1050 /*
1051 * lock request failed. Normally close the descriptor
1052 * but handle the case where someone might have dup()d
1053 * it when we weren't looking. One reference is
1054 * owned by the descriptor array, the other by us.
1055 */
1056 if (fdp->fd_ofiles[indx] == fp) {
1057 fdp->fd_ofiles[indx] = NULL;
1058 fdrop(fp, td);
1059 }
1060 fdrop(fp, td);
1061 return (error);
1062 }
1063 vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
1064 fp->f_flag |= FHASLOCK;
1065 }
1066 /* assert that vn_open created a backing object if one is needed */
1067 KASSERT(!vn_canvmio(vp) || VOP_GETVOBJECT(vp, NULL) == 0,
1068 ("open: vmio vnode has no backing object after vn_open"));
1069 VOP_UNLOCK(vp, NULL, 0, td);
1070
1071 /*
1072 * release our private reference, leaving the one associated with the
1073 * descriptor table intact.
1074 */
1075 fdrop(fp, td);
1076 *res = indx;
1077 return (0);
1078}
1079
1080/*
1081 * open_args(char *path, int flags, int mode)
1082 *
1083 * Check permissions, allocate an open file structure,
1084 * and call the device open routine if any.
1085 */
1086int
1087open(struct open_args *uap)
1088{
1089 struct thread *td = curthread;
1090 struct nameidata nd;
1091 int error;
1092
1093 NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW, UIO_USERSPACE, uap->path, td);
1094
1095 error = kern_open(&nd, uap->flags, uap->mode, &uap->sysmsg_result);
1096
1097 return (error);
1098}
1099
1100int
1101kern_mknod(struct nameidata *nd, int mode, int dev)
1102{
1103 struct thread *td = curthread;
1104 struct proc *p = td->td_proc;
1105 struct vnode *vp;
1106 struct vattr vattr;
1107 int error;
1108 int whiteout = 0;
1109
1110 KKASSERT(p);
1111
1112 switch (mode & S_IFMT) {
1113 case S_IFCHR:
1114 case S_IFBLK:
1115 error = suser(td);
1116 break;
1117 default:
1118 error = suser_cred(p->p_ucred, PRISON_ROOT);
1119 break;
1120 }
1121 if (error)
1122 return (error);
1123 bwillwrite();
1124 error = namei(nd);
1125 if (error)
1126 return (error);
1127 vp = nd->ni_vp;
1128 if (vp != NULL)
1129 error = EEXIST;
1130 else {
1131 VATTR_NULL(&vattr);
1132 vattr.va_mode = (mode & ALLPERMS) &~ p->p_fd->fd_cmask;
1133 vattr.va_rdev = dev;
1134 whiteout = 0;
1135
1136 switch (mode & S_IFMT) {
1137 case S_IFMT: /* used by badsect to flag bad sectors */
1138 vattr.va_type = VBAD;
1139 break;
1140 case S_IFCHR:
1141 vattr.va_type = VCHR;
1142 break;
1143 case S_IFBLK:
1144 vattr.va_type = VBLK;
1145 break;
1146 case S_IFWHT:
1147 whiteout = 1;
1148 break;
1149 default:
1150 error = EINVAL;
1151 break;
1152 }
1153 }
1154 if (error == 0) {
1155 VOP_LEASE(nd->ni_dvp, td, p->p_ucred, LEASE_WRITE);
1156 if (whiteout)
1157 error = VOP_WHITEOUT(nd->ni_dvp, NCPNULL,
1158 &nd->ni_cnd, NAMEI_CREATE);
1159 else {
1160 error = VOP_MKNOD(nd->ni_dvp, NCPNULL, &nd->ni_vp,
1161 &nd->ni_cnd, &vattr);
1162 if (error == 0)
1163 vput(nd->ni_vp);
1164 }
1165 NDFREE(nd, NDF_ONLY_PNBUF);
1166 vput(nd->ni_dvp);
1167 } else {
1168 NDFREE(nd, NDF_ONLY_PNBUF);
1169 if (nd->ni_dvp == vp)
1170 vrele(nd->ni_dvp);
1171 else
1172 vput(nd->ni_dvp);
1173 if (vp)
1174 vrele(vp);
1175 }
1176 ASSERT_VOP_UNLOCKED(nd->ni_dvp, "mknod");
1177 ASSERT_VOP_UNLOCKED(nd->ni_vp, "mknod");
1178 return (error);
1179}
1180
1181/*
1182 * mknod_args(char *path, int mode, int dev)
1183 *
1184 * Create a special file.
1185 */
1186int
1187mknod(struct mknod_args *uap)
1188{
1189 struct thread *td = curthread;
1190 struct nameidata nd;
1191 int error;
1192
1193 NDINIT(&nd, NAMEI_CREATE, CNP_LOCKPARENT, UIO_USERSPACE, uap->path,
1194 td);
1195
1196 error = kern_mknod(&nd, uap->mode, uap->dev);
1197
1198 return (error);
1199}
1200
1201int
1202kern_mkfifo(struct nameidata *nd, int mode)
1203{
1204 struct thread *td = curthread;
1205 struct proc *p = td->td_proc;
1206 struct vattr vattr;
1207 int error;
1208
1209 bwillwrite();
1210 error = namei(nd);
1211 if (error)
1212 return (error);
1213 if (nd->ni_vp != NULL) {
1214 NDFREE(nd, NDF_ONLY_PNBUF);
1215 if (nd->ni_dvp == nd->ni_vp)
1216 vrele(nd->ni_dvp);
1217 else
1218 vput(nd->ni_dvp);
1219 vrele(nd->ni_vp);
1220 return (EEXIST);
1221 }
1222 VATTR_NULL(&vattr);
1223 vattr.va_type = VFIFO;
1224 vattr.va_mode = (mode & ALLPERMS) &~ p->p_fd->fd_cmask;
1225 VOP_LEASE(nd->ni_dvp, td, p->p_ucred, LEASE_WRITE);
1226 error = VOP_MKNOD(nd->ni_dvp, NCPNULL, &nd->ni_vp, &nd->ni_cnd, &vattr);
1227 if (error == 0)
1228 vput(nd->ni_vp);
1229 NDFREE(nd, NDF_ONLY_PNBUF);
1230 vput(nd->ni_dvp);
1231 return (error);
1232}
1233
1234/*
1235 * mkfifo_args(char *path, int mode)
1236 *
1237 * Create a named pipe.
1238 */
1239int
1240mkfifo(struct mkfifo_args *uap)
1241{
1242 struct thread *td = curthread;
1243 struct nameidata nd;
1244 int error;
1245
1246 NDINIT(&nd, NAMEI_CREATE, CNP_LOCKPARENT, UIO_USERSPACE, uap->path,
1247 td);
1248
1249 error = kern_mkfifo(&nd, uap->mode);
1250
1251 return (error);
1252}
1253
1254int
1255kern_link(struct nameidata *nd, struct nameidata *linknd)
1256{
1257 struct thread *td = curthread;
1258 struct proc *p = td->td_proc;
1259 struct vnode *vp;
1260 int error;
1261
1262 bwillwrite();
1263 error = namei(nd);
1264 if (error)
1265 return (error);
1266 NDFREE(nd, NDF_ONLY_PNBUF);
1267 vp = nd->ni_vp;
1268 if (vp->v_type == VDIR)
1269 error = EPERM; /* POSIX */
1270 else {
1271 error = namei(linknd);
1272 if (error == 0) {
1273 if (linknd->ni_vp != NULL) {
1274 if (linknd->ni_vp)
1275 vrele(linknd->ni_vp);
1276 error = EEXIST;
1277 } else {
1278 VOP_LEASE(linknd->ni_dvp, td, p->p_ucred,
1279 LEASE_WRITE);
1280 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
1281 error = VOP_LINK(linknd->ni_dvp, NCPNULL, vp,
1282 &linknd->ni_cnd);
1283 }
1284 NDFREE(linknd, NDF_ONLY_PNBUF);
1285 if (linknd->ni_dvp == linknd->ni_vp)
1286 vrele(linknd->ni_dvp);
1287 else
1288 vput(linknd->ni_dvp);
1289 ASSERT_VOP_UNLOCKED(linknd->ni_dvp, "link");
1290 ASSERT_VOP_UNLOCKED(linknd->ni_vp, "link");
1291 }
1292 }
1293 vrele(vp);
1294 return (error);
1295}
1296
1297/*
1298 * link_args(char *path, char *link)
1299 *
1300 * Make a hard file link.
1301 */
1302int
1303link(struct link_args *uap)
1304{
1305 struct thread *td = curthread;
1306 struct nameidata nd, linknd;
1307 int error;
1308
1309 NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW | CNP_NOOBJ, UIO_USERSPACE,
1310 uap->path, td);
1311 NDINIT(&linknd, NAMEI_CREATE, CNP_LOCKPARENT | CNP_NOOBJ,
1312 UIO_USERSPACE, uap->link, td);
1313
1314 error = kern_link(&nd, &linknd);
1315
1316 return (error);
1317}
1318
1319int
1320kern_symlink(char *path, struct nameidata *nd)
1321{
1322 struct thread *td = curthread;
1323 struct proc *p = td->td_proc;
1324 struct vattr vattr;
1325 int error;
1326
1327 bwillwrite();
1328 error = namei(nd);
1329 if (error)
1330 return (error);
1331 if (nd->ni_vp) {
1332 NDFREE(nd, NDF_ONLY_PNBUF);
1333 if (nd->ni_dvp == nd->ni_vp)
1334 vrele(nd->ni_dvp);
1335 else
1336 vput(nd->ni_dvp);
1337 vrele(nd->ni_vp);
1338 return (EEXIST);
1339 }
1340 VATTR_NULL(&vattr);
1341 vattr.va_mode = ACCESSPERMS &~ p->p_fd->fd_cmask;
1342 VOP_LEASE(nd->ni_dvp, td, p->p_ucred, LEASE_WRITE);
1343 error = VOP_SYMLINK(nd->ni_dvp, NCPNULL, &nd->ni_vp, &nd->ni_cnd,
1344 &vattr, path);
1345 NDFREE(nd, NDF_ONLY_PNBUF);
1346 if (error == 0)
1347 vput(nd->ni_vp);
1348 vput(nd->ni_dvp);
1349 ASSERT_VOP_UNLOCKED(nd->ni_dvp, "symlink");
1350 ASSERT_VOP_UNLOCKED(nd->ni_vp, "symlink");
1351
1352 return (error);
1353}
1354
1355/*
1356 * symlink(char *path, char *link)
1357 *
1358 * Make a symbolic link.
1359 */
1360int
1361symlink(struct symlink_args *uap)
1362{
1363 struct thread *td = curthread;
1364 struct nameidata nd;
1365 char *path;
1366 int error;
1367
1368 path = zalloc(namei_zone);
1369 error = copyinstr(uap->path, path, MAXPATHLEN, NULL);
1370 if (error == 0) {
1371 NDINIT(&nd, NAMEI_CREATE, CNP_LOCKPARENT | CNP_NOOBJ,
1372 UIO_USERSPACE, uap->link, td);
1373 error = kern_symlink(path, &nd);
1374 }
1375 zfree(namei_zone, path);
1376 return (error);
1377}
1378
1379/*
1380 * undelete_args(char *path)
1381 *
1382 * Delete a whiteout from the filesystem.
1383 */
1384/* ARGSUSED */
1385int
1386undelete(struct undelete_args *uap)
1387{
1388 struct thread *td = curthread;
1389 struct proc *p = td->td_proc;
1390 int error;
1391 struct nameidata nd;
1392
1393 bwillwrite();
1394 NDINIT(&nd, NAMEI_DELETE, CNP_LOCKPARENT | CNP_DOWHITEOUT, UIO_USERSPACE,
1395 SCARG(uap, path), td);
1396 error = namei(&nd);
1397 if (error)
1398 return (error);
1399
1400 if (nd.ni_vp != NULLVP || !(nd.ni_cnd.cn_flags & CNP_ISWHITEOUT)) {
1401 NDFREE(&nd, NDF_ONLY_PNBUF);
1402 if (nd.ni_dvp == nd.ni_vp)
1403 vrele(nd.ni_dvp);
1404 else
1405 vput(nd.ni_dvp);
1406 if (nd.ni_vp)
1407 vrele(nd.ni_vp);
1408 return (EEXIST);
1409 }
1410
1411 VOP_LEASE(nd.ni_dvp, td, p->p_ucred, LEASE_WRITE);
1412 error = VOP_WHITEOUT(nd.ni_dvp, NCPNULL, &nd.ni_cnd, NAMEI_DELETE);
1413 NDFREE(&nd, NDF_ONLY_PNBUF);
1414 vput(nd.ni_dvp);
1415 ASSERT_VOP_UNLOCKED(nd.ni_dvp, "undelete");
1416 ASSERT_VOP_UNLOCKED(nd.ni_vp, "undelete");
1417 return (error);
1418}
1419
1420int
1421kern_unlink(struct nameidata *nd)
1422{
1423 struct thread *td = curthread;
1424 struct proc *p = td->td_proc;
1425 struct vnode *vp;
1426 int error;
1427
1428 bwillwrite();
1429 error = namei(nd);
1430 if (error)
1431 return (error);
1432 vp = nd->ni_vp;
1433 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
1434 vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
1435
1436 if (vp->v_type == VDIR)
1437 error = EPERM; /* POSIX */
1438 else {
1439 /*
1440 * The root of a mounted filesystem cannot be deleted.
1441 *
1442 * XXX: can this only be a VDIR case?
1443 */
1444 if (vp->v_flag & VROOT)
1445 error = EBUSY;
1446 }
1447
1448 if (error == 0) {
1449 VOP_LEASE(nd->ni_dvp, td, p->p_ucred, LEASE_WRITE);
1450 error = VOP_REMOVE(nd->ni_dvp, NCPNULL, vp, &nd->ni_cnd);
1451 }
1452 NDFREE(nd, NDF_ONLY_PNBUF);
1453 if (nd->ni_dvp == vp)
1454 vrele(nd->ni_dvp);
1455 else
1456 vput(nd->ni_dvp);
1457 if (vp != NULLVP)
1458 vput(vp);
1459 ASSERT_VOP_UNLOCKED(nd->ni_dvp, "unlink");
1460 ASSERT_VOP_UNLOCKED(nd->ni_vp, "unlink");
1461 return (error);
1462}
1463
1464/*
1465 * unlink_args(char *path)
1466 *
1467 * Delete a name from the filesystem.
1468 */
1469int
1470unlink(struct unlink_args *uap)
1471{
1472 struct thread *td = curthread;
1473 struct nameidata nd;
1474 int error;
1475
1476 NDINIT(&nd, NAMEI_DELETE, CNP_LOCKPARENT, UIO_USERSPACE, uap->path,
1477 td);
1478
1479 error = kern_unlink(&nd);
1480
1481 return (error);
1482}
1483
1484int
1485kern_lseek(int fd, off_t offset, int whence, off_t *res)
1486{
1487 struct thread *td = curthread;
1488 struct proc *p = td->td_proc;
1489 struct filedesc *fdp = p->p_fd;
1490 struct file *fp;
1491 struct vattr vattr;
1492 int error;
1493
1494 if (fd >= fdp->fd_nfiles ||
1495 (fp = fdp->fd_ofiles[fd]) == NULL)
1496 return (EBADF);
1497 if (fp->f_type != DTYPE_VNODE)
1498 return (ESPIPE);
1499 switch (whence) {
1500 case L_INCR:
1501 fp->f_offset += offset;
1502 break;
1503 case L_XTND:
1504 error=VOP_GETATTR((struct vnode *)fp->f_data, &vattr, td);
1505 if (error)
1506 return (error);
1507 fp->f_offset = offset + vattr.va_size;
1508 break;
1509 case L_SET:
1510 fp->f_offset = offset;
1511 break;
1512 default:
1513 return (EINVAL);
1514 }
1515 *res = fp->f_offset;
1516 return (0);
1517}
1518
1519/*
1520 * lseek_args(int fd, int pad, off_t offset, int whence)
1521 *
1522 * Reposition read/write file offset.
1523 */
1524int
1525lseek(struct lseek_args *uap)
1526{
1527 int error;
1528
1529 error = kern_lseek(uap->fd, uap->offset, uap->whence,
1530 &uap->sysmsg_offset);
1531
1532 return (error);
1533}
1534
1535int
1536kern_access(struct nameidata *nd, int aflags)
1537{
1538 struct thread *td = curthread;
1539 struct proc *p = td->td_proc;
1540 struct ucred *cred, *tmpcred;
1541 struct vnode *vp;
1542 int error, flags;
1543
1544 cred = p->p_ucred;
1545 /*
1546 * Create and modify a temporary credential instead of one that
1547 * is potentially shared. This could also mess up socket
1548 * buffer accounting which can run in an interrupt context.
1549 */
1550 tmpcred = crdup(cred);
1551 tmpcred->cr_uid = p->p_ucred->cr_ruid;
1552 tmpcred->cr_groups[0] = p->p_ucred->cr_rgid;
1553 p->p_ucred = tmpcred;
1554 nd->ni_cnd.cn_cred = tmpcred;
1555 error = namei(nd);
1556 if (error)
1557 goto out1;
1558 vp = nd->ni_vp;
1559
1560 /* Flags == 0 means only check for existence. */
1561 if (aflags) {
1562 flags = 0;
1563 if (aflags & R_OK)
1564 flags |= VREAD;
1565 if (aflags & W_OK)
1566 flags |= VWRITE;
1567 if (aflags & X_OK)
1568 flags |= VEXEC;
1569 if ((flags & VWRITE) == 0 || (error = vn_writechk(vp)) == 0)
1570 error = VOP_ACCESS(vp, flags, tmpcred, td);
1571 }
1572 NDFREE(nd, NDF_ONLY_PNBUF);
1573 vput(vp);
1574out1:
1575 p->p_ucred = cred;
1576 crfree(tmpcred);
1577 return (error);
1578}
1579
1580/*
1581 * access_args(char *path, int flags)
1582 *
1583 * Check access permissions.
1584 */
1585int
1586access(struct access_args *uap)
1587{
1588 struct thread *td = curthread;
1589 struct nameidata nd;
1590 int error;
1591
1592 NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW | CNP_LOCKLEAF | CNP_NOOBJ,
1593 UIO_USERSPACE, uap->path, td);
1594
1595 error = kern_access(&nd, uap->flags);
1596
1597 return (error);
1598}
1599
1600int
1601kern_stat(struct nameidata *nd, struct stat *st)
1602{
1603 struct thread *td = curthread;
1604 int error;
1605
1606 error = namei(nd);
1607 if (error)
1608 return (error);
1609 error = vn_stat(nd->ni_vp, st, td);
1610 NDFREE(nd, NDF_ONLY_PNBUF);
1611 vput(nd->ni_vp);
1612 return (error);
1613}
1614
1615/*
1616 * stat_args(char *path, struct stat *ub)
1617 *
1618 * Get file status; this version follows links.
1619 */
1620int
1621stat(struct stat_args *uap)
1622{
1623 struct thread *td = curthread;
1624 struct nameidata nd;
1625 struct stat st;
1626 int error;
1627
1628 NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW | CNP_LOCKLEAF | CNP_NOOBJ,
1629 UIO_USERSPACE, uap->path, td);
1630
1631 error = kern_stat(&nd, &st);
1632
1633 if (error == 0)
1634 error = copyout(&st, uap->ub, sizeof(*uap->ub));
1635 return (error);
1636}
1637
1638/*
1639 * lstat_args(char *path, struct stat *ub)
1640 *
1641 * Get file status; this version does not follow links.
1642 */
1643int
1644lstat(struct lstat_args *uap)
1645{
1646 struct thread *td = curthread;
1647 struct nameidata nd;
1648 struct stat st;
1649 int error;
1650
1651 NDINIT(&nd, NAMEI_LOOKUP, CNP_LOCKLEAF | CNP_NOOBJ,
1652 UIO_USERSPACE, SCARG(uap, path), td);
1653
1654 error = kern_stat(&nd, &st);
1655
1656 if (error == 0)
1657 error = copyout(&st, uap->ub, sizeof(*uap->ub));
1658 return (error);
1659}
1660
1661void
1662cvtnstat(sb, nsb)
1663 struct stat *sb;
1664 struct nstat *nsb;
1665{
1666 nsb->st_dev = sb->st_dev;
1667 nsb->st_ino = sb->st_ino;
1668 nsb->st_mode = sb->st_mode;
1669 nsb->st_nlink = sb->st_nlink;
1670 nsb->st_uid = sb->st_uid;
1671 nsb->st_gid = sb->st_gid;
1672 nsb->st_rdev = sb->st_rdev;
1673 nsb->st_atimespec = sb->st_atimespec;
1674 nsb->st_mtimespec = sb->st_mtimespec;
1675 nsb->st_ctimespec = sb->st_ctimespec;
1676 nsb->st_size = sb->st_size;
1677 nsb->st_blocks = sb->st_blocks;
1678 nsb->st_blksize = sb->st_blksize;
1679 nsb->st_flags = sb->st_flags;
1680 nsb->st_gen = sb->st_gen;
1681 nsb->st_qspare[0] = sb->st_qspare[0];
1682 nsb->st_qspare[1] = sb->st_qspare[1];
1683}
1684
1685/*
1686 * nstat_args(char *path, struct nstat *ub)
1687 */
1688/* ARGSUSED */
1689int
1690nstat(struct nstat_args *uap)
1691{
1692 struct thread *td = curthread;
1693 struct stat sb;
1694 struct nstat nsb;
1695 int error;
1696 struct nameidata nd;
1697
1698 NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW | CNP_LOCKLEAF | CNP_NOOBJ,
1699 UIO_USERSPACE, SCARG(uap, path), td);
1700 if ((error = namei(&nd)) != 0)
1701 return (error);
1702 NDFREE(&nd, NDF_ONLY_PNBUF);
1703 error = vn_stat(nd.ni_vp, &sb, td);
1704 vput(nd.ni_vp);
1705 if (error)
1706 return (error);
1707 cvtnstat(&sb, &nsb);
1708 error = copyout((caddr_t)&nsb, (caddr_t)SCARG(uap, ub), sizeof (nsb));
1709 return (error);
1710}
1711
1712/*
1713 * lstat_args(char *path, struct stat *ub)
1714 *
1715 * Get file status; this version does not follow links.
1716 */
1717/* ARGSUSED */
1718int
1719nlstat(struct nlstat_args *uap)
1720{
1721 struct thread *td = curthread;
1722 int error;
1723 struct vnode *vp;
1724 struct stat sb;
1725 struct nstat nsb;
1726 struct nameidata nd;
1727
1728 NDINIT(&nd, NAMEI_LOOKUP, CNP_LOCKLEAF | CNP_NOOBJ,
1729 UIO_USERSPACE, SCARG(uap, path), td);
1730 if ((error = namei(&nd)) != 0)
1731 return (error);
1732 vp = nd.ni_vp;
1733 NDFREE(&nd, NDF_ONLY_PNBUF);
1734 error = vn_stat(vp, &sb, td);
1735 vput(vp);
1736 if (error)
1737 return (error);
1738 cvtnstat(&sb, &nsb);
1739 error = copyout((caddr_t)&nsb, (caddr_t)SCARG(uap, ub), sizeof (nsb));
1740 return (error);
1741}
1742
1743/*
1744 * pathconf_Args(char *path, int name)
1745 *
1746 * Get configurable pathname variables.
1747 */
1748/* ARGSUSED */
1749int
1750pathconf(struct pathconf_args *uap)
1751{
1752 struct thread *td = curthread;
1753 int error;
1754 struct nameidata nd;
1755
1756 NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW | CNP_LOCKLEAF | CNP_NOOBJ,
1757 UIO_USERSPACE, SCARG(uap, path), td);
1758 if ((error = namei(&nd)) != 0)
1759 return (error);
1760 NDFREE(&nd, NDF_ONLY_PNBUF);
1761 error = VOP_PATHCONF(nd.ni_vp, SCARG(uap, name), uap->sysmsg_fds);
1762 vput(nd.ni_vp);
1763 return (error);
1764}
1765
1766/*
1767 * XXX: daver
1768 * kern_readlink isn't properly split yet. There is a copyin burried
1769 * in VOP_READLINK().
1770 */
1771int
1772kern_readlink(struct nameidata *nd, char *buf, int count, int *res)
1773{
1774 struct thread *td = curthread;
1775 struct proc *p = td->td_proc;
1776 struct vnode *vp;
1777 struct iovec aiov;
1778 struct uio auio;
1779 int error;
1780
1781 error = namei(nd);
1782 if (error)
1783 return (error);
1784 NDFREE(nd, NDF_ONLY_PNBUF);
1785 vp = nd->ni_vp;
1786 if (vp->v_type != VLNK)
1787 error = EINVAL;
1788 else {
1789 aiov.iov_base = buf;
1790 aiov.iov_len = count;
1791 auio.uio_iov = &aiov;
1792 auio.uio_iovcnt = 1;
1793 auio.uio_offset = 0;
1794 auio.uio_rw = UIO_READ;
1795 auio.uio_segflg = UIO_USERSPACE;
1796 auio.uio_td = td;
1797 auio.uio_resid = count;
1798 error = VOP_READLINK(vp, &auio, p->p_ucred);
1799 }
1800 vput(vp);
1801 *res = count - auio.uio_resid;
1802 return (error);
1803}
1804
1805/*
1806 * readlink_args(char *path, char *buf, int count)
1807 *
1808 * Return target name of a symbolic link.
1809 */
1810int
1811readlink(struct readlink_args *uap)
1812{
1813 struct thread *td = curthread;
1814 struct nameidata nd;
1815 int error;
1816
1817 NDINIT(&nd, NAMEI_LOOKUP, CNP_LOCKLEAF | CNP_NOOBJ, UIO_USERSPACE,
1818 uap->path, td);
1819
1820 error = kern_readlink(&nd, uap->buf, uap->count,
1821 &uap->sysmsg_result);
1822
1823 return (error);
1824}
1825
1826static int
1827setfflags(struct vnode *vp, int flags)
1828{
1829 struct thread *td = curthread;
1830 struct proc *p = td->td_proc;
1831 int error;
1832 struct vattr vattr;
1833
1834 /*
1835 * Prevent non-root users from setting flags on devices. When
1836 * a device is reused, users can retain ownership of the device
1837 * if they are allowed to set flags and programs assume that
1838 * chown can't fail when done as root.
1839 */
1840 if ((vp->v_type == VCHR || vp->v_type == VBLK) &&
1841 ((error = suser_cred(p->p_ucred, PRISON_ROOT)) != 0))
1842 return (error);
1843
1844 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
1845 vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
1846 VATTR_NULL(&vattr);
1847 vattr.va_flags = flags;
1848 error = VOP_SETATTR(vp, &vattr, p->p_ucred, td);
1849 VOP_UNLOCK(vp, NULL, 0, td);
1850 return (error);
1851}
1852
1853/*
1854 * chflags(char *path, int flags)
1855 *
1856 * Change flags of a file given a path name.
1857 */
1858/* ARGSUSED */
1859int
1860chflags(struct chflags_args *uap)
1861{
1862 struct thread *td = curthread;
1863 int error;
1864 struct nameidata nd;
1865
1866 NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW, UIO_USERSPACE,
1867 SCARG(uap, path), td);
1868 if ((error = namei(&nd)) != 0)
1869 return (error);
1870 NDFREE(&nd, NDF_ONLY_PNBUF);
1871 error = setfflags(nd.ni_vp, SCARG(uap, flags));
1872 vrele(nd.ni_vp);
1873 return error;
1874}
1875
1876/*
1877 * fchflags_args(int fd, int flags)
1878 *
1879 * Change flags of a file given a file descriptor.
1880 */
1881/* ARGSUSED */
1882int
1883fchflags(struct fchflags_args *uap)
1884{
1885 struct thread *td = curthread;
1886 struct proc *p = td->td_proc;
1887 struct file *fp;
1888 int error;
1889
1890 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
1891 return (error);
1892 return setfflags((struct vnode *) fp->f_data, SCARG(uap, flags));
1893}
1894
1895static int
1896setfmode(struct vnode *vp, int mode)
1897{
1898 struct thread *td = curthread;
1899 struct proc *p = td->td_proc;
1900 int error;
1901 struct vattr vattr;
1902
1903 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
1904 vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
1905 VATTR_NULL(&vattr);
1906 vattr.va_mode = mode & ALLPERMS;
1907 error = VOP_SETATTR(vp, &vattr, p->p_ucred, td);
1908 VOP_UNLOCK(vp, NULL, 0, td);
1909 return error;
1910}
1911
1912int
1913kern_chmod(struct nameidata *nd, int mode)
1914{
1915 int error;
1916
1917 error = namei(nd);
1918 if (error)
1919 return (error);
1920 NDFREE(nd, NDF_ONLY_PNBUF);
1921 error = setfmode(nd->ni_vp, mode);
1922 vrele(nd->ni_vp);
1923 return error;
1924}
1925
1926/*
1927 * chmod_args(char *path, int mode)
1928 *
1929 * Change mode of a file given path name.
1930 */
1931/* ARGSUSED */
1932int
1933chmod(struct chmod_args *uap)
1934{
1935 struct thread *td = curthread;
1936 struct nameidata nd;
1937 int error;
1938
1939 NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW, UIO_USERSPACE, uap->path, td);
1940
1941 error = kern_chmod(&nd, uap->mode);
1942
1943 return (error);
1944}
1945
1946/*
1947 * lchmod_args(char *path, int mode)
1948 *
1949 * Change mode of a file given path name (don't follow links.)
1950 */
1951/* ARGSUSED */
1952int
1953lchmod(struct lchmod_args *uap)
1954{
1955 struct thread *td = curthread;
1956 int error;
1957 struct nameidata nd;
1958
1959 NDINIT(&nd, NAMEI_LOOKUP, 0, UIO_USERSPACE, SCARG(uap, path), td);
1960 if ((error = namei(&nd)) != 0)
1961 return (error);
1962 NDFREE(&nd, NDF_ONLY_PNBUF);
1963 error = setfmode(nd.ni_vp, SCARG(uap, mode));
1964 vrele(nd.ni_vp);
1965 return error;
1966}
1967
1968/*
1969 * fchmod_args(int fd, int mode)
1970 *
1971 * Change mode of a file given a file descriptor.
1972 */
1973/* ARGSUSED */
1974int
1975fchmod(struct fchmod_args *uap)
1976{
1977 struct thread *td = curthread;
1978 struct proc *p = td->td_proc;
1979 struct file *fp;
1980 int error;
1981
1982 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
1983 return (error);
1984 return setfmode((struct vnode *)fp->f_data, SCARG(uap, mode));
1985}
1986
1987static int
1988setfown(struct vnode *vp, uid_t uid, gid_t gid)
1989{
1990 struct thread *td = curthread;
1991 struct proc *p = td->td_proc;
1992 int error;
1993 struct vattr vattr;
1994
1995 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
1996 vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
1997 VATTR_NULL(&vattr);
1998 vattr.va_uid = uid;
1999 vattr.va_gid = gid;
2000 error = VOP_SETATTR(vp, &vattr, p->p_ucred, td);
2001 VOP_UNLOCK(vp, NULL, 0, td);
2002 return error;
2003}
2004
2005int
2006kern_chown(struct nameidata *nd, int uid, int gid)
2007{
2008 int error;
2009
2010 error = namei(nd);
2011 if (error)
2012 return (error);
2013 NDFREE(nd, NDF_ONLY_PNBUF);
2014 error = setfown(nd->ni_vp, uid, gid);
2015 vrele(nd->ni_vp);
2016 return (error);
2017}
2018
2019/*
2020 * chown(char *path, int uid, int gid)
2021 *
2022 * Set ownership given a path name.
2023 */
2024int
2025chown(struct chown_args *uap)
2026{
2027 struct thread *td = curthread;
2028 struct nameidata nd;
2029 int error;
2030
2031 NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW, UIO_USERSPACE, uap->path, td);
2032
2033 error = kern_chown(&nd, uap->uid, uap->gid);
2034
2035 return (error);
2036}
2037
2038/*
2039 * lchown_args(char *path, int uid, int gid)
2040 *
2041 * Set ownership given a path name, do not cross symlinks.
2042 */
2043int
2044lchown(struct lchown_args *uap)
2045{
2046 struct thread *td = curthread;
2047 int error;
2048 struct nameidata nd;
2049
2050 NDINIT(&nd, NAMEI_LOOKUP, 0, UIO_USERSPACE, uap->path, td);
2051
2052 error = kern_chown(&nd, uap->uid, uap->gid);
2053
2054 return (error);
2055}
2056
2057/*
2058 * fchown_args(int fd, int uid, int gid)
2059 *
2060 * Set ownership given a file descriptor.
2061 */
2062/* ARGSUSED */
2063int
2064fchown(struct fchown_args *uap)
2065{
2066 struct thread *td = curthread;
2067 struct proc *p = td->td_proc;
2068 struct file *fp;
2069 int error;
2070
2071 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2072 return (error);
2073 return setfown((struct vnode *)fp->f_data,
2074 SCARG(uap, uid), SCARG(uap, gid));
2075}
2076
2077static int
2078getutimes(const struct timeval *tvp, struct timespec *tsp)
2079{
2080 struct timeval tv[2];
2081
2082 if (tvp == NULL) {
2083 microtime(&tv[0]);
2084 TIMEVAL_TO_TIMESPEC(&tv[0], &tsp[0]);
2085 tsp[1] = tsp[0];
2086 } else {
2087 TIMEVAL_TO_TIMESPEC(&tvp[0], &tsp[0]);
2088 TIMEVAL_TO_TIMESPEC(&tvp[1], &tsp[1]);
2089 }
2090 return 0;
2091}
2092
2093static int
2094setutimes(struct vnode *vp, const struct timespec *ts, int nullflag)
2095{
2096 struct thread *td = curthread;
2097 struct proc *p = td->td_proc;
2098 int error;
2099 struct vattr vattr;
2100
2101 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
2102 vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
2103 VATTR_NULL(&vattr);
2104 vattr.va_atime = ts[0];
2105 vattr.va_mtime = ts[1];
2106 if (nullflag)
2107 vattr.va_vaflags |= VA_UTIMES_NULL;
2108 error = VOP_SETATTR(vp, &vattr, p->p_ucred, td);
2109 VOP_UNLOCK(vp, NULL, 0, td);
2110 return error;
2111}
2112
2113int
2114kern_utimes(struct nameidata *nd, struct timeval *tptr)
2115{
2116 struct timespec ts[2];
2117 int error;
2118
2119 error = getutimes(tptr, ts);
2120 if (error)
2121 return (error);
2122 error = namei(nd);
2123 if (error)
2124 return (error);
2125 NDFREE(nd, NDF_ONLY_PNBUF);
2126 error = setutimes(nd->ni_vp, ts, tptr == NULL);
2127 vrele(nd->ni_vp);
2128 return (error);
2129}
2130
2131/*
2132 * utimes_args(char *path, struct timeval *tptr)
2133 *
2134 * Set the access and modification times of a file.
2135 */
2136int
2137utimes(struct utimes_args *uap)
2138{
2139 struct thread *td = curthread;
2140 struct timeval tv[2];
2141 struct nameidata nd;
2142 int error;
2143
2144 if (uap->tptr) {
2145 error = copyin(uap->tptr, tv, sizeof(tv));
2146 if (error)
2147 return (error);
2148 }
2149 NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW, UIO_USERSPACE, uap->path, td);
2150
2151 error = kern_utimes(&nd, uap->tptr ? tv : NULL);
2152
2153 return (error);
2154}
2155
2156/*
2157 * lutimes_args(char *path, struct timeval *tptr)
2158 *
2159 * Set the access and modification times of a file.
2160 */
2161int
2162lutimes(struct lutimes_args *uap)
2163{
2164 struct thread *td = curthread;
2165 struct timeval tv[2];
2166 struct nameidata nd;
2167 int error;
2168
2169 if (uap->tptr) {
2170 error = copyin(uap->tptr, tv, sizeof(tv));
2171 if (error)
2172 return (error);
2173 }
2174 NDINIT(&nd, NAMEI_LOOKUP, 0, UIO_USERSPACE, uap->path, td);
2175
2176 error = kern_utimes(&nd, uap->tptr ? tv : NULL);
2177
2178 return (error);
2179}
2180
2181int
2182kern_futimes(int fd, struct timeval *tptr)
2183{
2184 struct thread *td = curthread;
2185 struct proc *p = td->td_proc;
2186 struct timespec ts[2];
2187 struct file *fp;
2188 int error;
2189
2190 error = getutimes(tptr, ts);
2191 if (error)
2192 return (error);
2193 error = getvnode(p->p_fd, fd, &fp);
2194 if (error)
2195 return (error);
2196 error = setutimes((struct vnode *)fp->f_data, ts, tptr == NULL);
2197 return (error);
2198}
2199
2200/*
2201 * futimes_args(int fd, struct timeval *tptr)
2202 *
2203 * Set the access and modification times of a file.
2204 */
2205int
2206futimes(struct futimes_args *uap)
2207{
2208 struct timeval tv[2];
2209 int error;
2210
2211 if (uap->tptr) {
2212 error = copyin(uap->tptr, tv, sizeof(tv));
2213 if (error)
2214 return (error);
2215 }
2216
2217 error = kern_futimes(uap->fd, uap->tptr ? tv : NULL);
2218
2219 return (error);
2220}
2221
2222int
2223kern_truncate(struct nameidata* nd, off_t length)
2224{
2225 struct thread *td = curthread;
2226 struct proc *p = td->td_proc;
2227 struct vnode *vp;
2228 struct vattr vattr;
2229 int error;
2230
2231 if (length < 0)
2232 return(EINVAL);
2233 if ((error = namei(nd)) != 0)
2234 return (error);
2235 vp = nd->ni_vp;
2236 NDFREE(nd, NDF_ONLY_PNBUF);
2237 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
2238 vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
2239 if (vp->v_type == VDIR)
2240 error = EISDIR;
2241 else if ((error = vn_writechk(vp)) == 0 &&
2242 (error = VOP_ACCESS(vp, VWRITE, p->p_ucred, td)) == 0) {
2243 VATTR_NULL(&vattr);
2244 vattr.va_size = length;
2245 error = VOP_SETATTR(vp, &vattr, p->p_ucred, td);
2246 }
2247 vput(vp);
2248 return (error);
2249}
2250
2251/*
2252 * truncate(char *path, int pad, off_t length)
2253 *
2254 * Truncate a file given its path name.
2255 */
2256int
2257truncate(struct truncate_args *uap)
2258{
2259 struct thread *td = curthread;
2260 struct nameidata nd;
2261 int error;
2262
2263 NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW, UIO_USERSPACE, uap->path, td);
2264
2265 error = kern_truncate(&nd, uap->length);
2266
2267 return error;
2268}
2269
2270int
2271kern_ftruncate(int fd, off_t length)
2272{
2273 struct thread *td = curthread;
2274 struct proc *p = td->td_proc;
2275 struct vattr vattr;
2276 struct vnode *vp;
2277 struct file *fp;
2278 int error;
2279
2280 if (length < 0)
2281 return(EINVAL);
2282 if ((error = getvnode(p->p_fd, fd, &fp)) != 0)
2283 return (error);
2284 if ((fp->f_flag & FWRITE) == 0)
2285 return (EINVAL);
2286 vp = (struct vnode *)fp->f_data;
2287 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
2288 vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
2289 if (vp->v_type == VDIR)
2290 error = EISDIR;
2291 else if ((error = vn_writechk(vp)) == 0) {
2292 VATTR_NULL(&vattr);
2293 vattr.va_size = length;
2294 error = VOP_SETATTR(vp, &vattr, fp->f_cred, td);
2295 }
2296 VOP_UNLOCK(vp, NULL, 0, td);
2297 return (error);
2298}
2299
2300/*
2301 * ftruncate_args(int fd, int pad, off_t length)
2302 *
2303 * Truncate a file given a file descriptor.
2304 */
2305int
2306ftruncate(struct ftruncate_args *uap)
2307{
2308 int error;
2309
2310 error = kern_ftruncate(uap->fd, uap->length);
2311
2312 return (error);
2313}
2314
2315/*
2316 * fsync(int fd)
2317 *
2318 * Sync an open file.
2319 */
2320/* ARGSUSED */
2321int
2322fsync(struct fsync_args *uap)
2323{
2324 struct thread *td = curthread;
2325 struct proc *p = td->td_proc;
2326 struct vnode *vp;
2327 struct file *fp;
2328 vm_object_t obj;
2329 int error;
2330
2331 if ((error = getvnode(p->p_fd, SCARG(uap, fd), &fp)) != 0)
2332 return (error);
2333 vp = (struct vnode *)fp->f_data;
2334 vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
2335 if (VOP_GETVOBJECT(vp, &obj) == 0)
2336 vm_object_page_clean(obj, 0, 0, 0);
2337 if ((error = VOP_FSYNC(vp, MNT_WAIT, td)) == 0 &&
2338 vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP) &&
2339 bioops.io_fsync)
2340 error = (*bioops.io_fsync)(vp);
2341 VOP_UNLOCK(vp, NULL, 0, td);
2342 return (error);
2343}
2344
2345int
2346kern_rename(struct nameidata *fromnd, struct nameidata *tond)
2347{
2348 struct thread *td = curthread;
2349 struct proc *p = td->td_proc;
2350 struct vnode *tvp, *fvp, *tdvp;
2351 int error;
2352
2353 bwillwrite();
2354 error = namei(fromnd);
2355 if (error)
2356 return (error);
2357 fvp = fromnd->ni_vp;
2358 if (fromnd->ni_vp->v_type == VDIR)
2359 tond->ni_cnd.cn_flags |= CNP_WILLBEDIR;
2360 error = namei(tond);
2361 if (error) {
2362 /* Translate error code for rename("dir1", "dir2/."). */
2363 if (error == EISDIR && fvp->v_type == VDIR)
2364 error = EINVAL;
2365 NDFREE(fromnd, NDF_ONLY_PNBUF);
2366 vrele(fromnd->ni_dvp);
2367 vrele(fvp);
2368 goto out1;
2369 }
2370 tdvp = tond->ni_dvp;
2371 tvp = tond->ni_vp;
2372 if (tvp != NULL) {
2373 if (fvp->v_type == VDIR && tvp->v_type != VDIR) {
2374 error = ENOTDIR;
2375 goto out;
2376 } else if (fvp->v_type != VDIR && tvp->v_type == VDIR) {
2377 error = EISDIR;
2378 goto out;
2379 }
2380 }
2381 if (fvp == tdvp)
2382 error = EINVAL;
2383 /*
2384 * If the source is the same as the destination (that is, if they
2385 * are links to the same vnode), then there is nothing to do.
2386 */
2387 if (fvp == tvp)
2388 error = -1;
2389out:
2390 if (!error) {
2391 VOP_LEASE(tdvp, td, p->p_ucred, LEASE_WRITE);
2392 if (fromnd->ni_dvp != tdvp) {
2393 VOP_LEASE(fromnd->ni_dvp, td, p->p_ucred, LEASE_WRITE);
2394 }
2395 if (tvp) {
2396 VOP_LEASE(tvp, td, p->p_ucred, LEASE_WRITE);
2397 }
2398 error = VOP_RENAME(fromnd->ni_dvp, NCPNULL, fromnd->ni_vp,
2399 &fromnd->ni_cnd, tond->ni_dvp, NCPNULL, tond->ni_vp,
2400 &tond->ni_cnd);
2401 NDFREE(fromnd, NDF_ONLY_PNBUF);
2402 NDFREE(tond, NDF_ONLY_PNBUF);
2403 } else {
2404 NDFREE(fromnd, NDF_ONLY_PNBUF);
2405 NDFREE(tond, NDF_ONLY_PNBUF);
2406 if (tdvp == tvp)
2407 vrele(tdvp);
2408 else
2409 vput(tdvp);
2410 if (tvp)
2411 vput(tvp);
2412 vrele(fromnd->ni_dvp);
2413 vrele(fvp);
2414 }
2415 vrele(tond->ni_startdir);
2416 ASSERT_VOP_UNLOCKED(fromnd->ni_dvp, "rename");
2417 ASSERT_VOP_UNLOCKED(fromnd->ni_vp, "rename");
2418 ASSERT_VOP_UNLOCKED(tond->ni_dvp, "rename");
2419 ASSERT_VOP_UNLOCKED(tond->ni_vp, "rename");
2420out1:
2421 if (fromnd->ni_startdir)
2422 vrele(fromnd->ni_startdir);
2423 if (error == -1)
2424 return (0);
2425 return (error);
2426}
2427
2428/*
2429 * rename_args(char *from, char *to)
2430 *
2431 * Rename files. Source and destination must either both be directories,
2432 * or both not be directories. If target is a directory, it must be empty.
2433 */
2434int
2435rename(struct rename_args *uap)
2436{
2437 struct thread *td = curthread;
2438 struct nameidata fromnd, tond;
2439 int error;
2440
2441 NDINIT(&fromnd, NAMEI_DELETE, CNP_WANTPARENT | CNP_SAVESTART,
2442 UIO_USERSPACE, uap->from, td);
2443 NDINIT(&tond, NAMEI_RENAME,
2444 CNP_LOCKPARENT | CNP_LOCKLEAF | CNP_NOCACHE |
2445 CNP_SAVESTART | CNP_NOOBJ,
2446 UIO_USERSPACE, uap->to, td);
2447
2448 error = kern_rename(&fromnd, &tond);
2449
2450 return (error);
2451}
2452
2453int
2454kern_mkdir(struct nameidata *nd, int mode)
2455{
2456 struct thread *td = curthread;
2457 struct proc *p = td->td_proc;
2458 struct vnode *vp;
2459 struct vattr vattr;
2460 int error;
2461
2462 bwillwrite();
2463 nd->ni_cnd.cn_flags |= CNP_WILLBEDIR;
2464 error = namei(nd);
2465 if (error)
2466 return (error);
2467 vp = nd->ni_vp;
2468 if (vp) {
2469 NDFREE(nd, NDF_ONLY_PNBUF);
2470 if (nd->ni_dvp == vp)
2471 vrele(nd->ni_dvp);
2472 else
2473 vput(nd->ni_dvp);
2474 vrele(vp);
2475 return (EEXIST);
2476 }
2477 VATTR_NULL(&vattr);
2478 vattr.va_type = VDIR;
2479 vattr.va_mode = (mode & ACCESSPERMS) &~ p->p_fd->fd_cmask;
2480 VOP_LEASE(nd->ni_dvp, td, p->p_ucred, LEASE_WRITE);
2481 error = VOP_MKDIR(nd->ni_dvp, NCPNULL, &nd->ni_vp, &nd->ni_cnd,
2482 &vattr);
2483 NDFREE(nd, NDF_ONLY_PNBUF);
2484 vput(nd->ni_dvp);
2485 if (error == 0)
2486 vput(nd->ni_vp);
2487 ASSERT_VOP_UNLOCKED(nd->ni_dvp, "mkdir");
2488 ASSERT_VOP_UNLOCKED(nd->ni_vp, "mkdir");
2489 return (error);
2490}
2491
2492/*
2493 * mkdir_args(char *path, int mode)
2494 *
2495 * Make a directory file.
2496 */
2497/* ARGSUSED */
2498int
2499mkdir(struct mkdir_args *uap)
2500{
2501 struct thread *td = curthread;
2502 struct nameidata nd;
2503 int error;
2504
2505 NDINIT(&nd, NAMEI_CREATE, CNP_LOCKPARENT, UIO_USERSPACE, uap->path,
2506 td);
2507
2508 error = kern_mkdir(&nd, uap->mode);
2509
2510 return (error);
2511}
2512
2513int
2514kern_rmdir(struct nameidata *nd)
2515{
2516 struct thread *td = curthread;
2517 struct proc *p = td->td_proc;
2518 struct vnode *vp;
2519 int error;
2520
2521 bwillwrite();
2522 error = namei(nd);
2523 if (error)
2524 return (error);
2525 vp = nd->ni_vp;
2526 if (vp->v_type != VDIR) {
2527 error = ENOTDIR;
2528 goto out;
2529 }
2530 /*
2531 * No rmdir "." please.
2532 */
2533 if (nd->ni_dvp == vp) {
2534 error = EINVAL;
2535 goto out;
2536 }
2537 /*
2538 * The root of a mounted filesystem cannot be deleted.
2539 */
2540 if (vp->v_flag & VROOT)
2541 error = EBUSY;
2542 else {
2543 VOP_LEASE(nd->ni_dvp, td, p->p_ucred, LEASE_WRITE);
2544 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
2545 error = VOP_RMDIR(nd->ni_dvp, NCPNULL, nd->ni_vp,
2546 &nd->ni_cnd);
2547 }
2548out:
2549 NDFREE(nd, NDF_ONLY_PNBUF);
2550 if (nd->ni_dvp == vp)
2551 vrele(nd->ni_dvp);
2552 else
2553 vput(nd->ni_dvp);
2554 if (vp != NULLVP)
2555 vput(vp);
2556 ASSERT_VOP_UNLOCKED(nd->ni_dvp, "rmdir");
2557 ASSERT_VOP_UNLOCKED(nd->ni_vp, "rmdir");
2558 return (error);
2559}
2560
2561/*
2562 * rmdir_args(char *path)
2563 *
2564 * Remove a directory file.
2565 */
2566/* ARGSUSED */
2567int
2568rmdir(struct rmdir_args *uap)
2569{
2570 struct thread *td = curthread;
2571 struct nameidata nd;
2572 int error;
2573
2574 NDINIT(&nd, NAMEI_DELETE, CNP_LOCKPARENT | CNP_LOCKLEAF,
2575 UIO_USERSPACE, uap->path, td);
2576
2577 error = kern_rmdir(&nd);
2578
2579 return (error);
2580}
2581
2582int
2583kern_getdirentries(int fd, char *buf, u_int count, long *basep, int *res)
2584{
2585 struct thread *td = curthread;
2586 struct proc *p = td->td_proc;
2587 struct vnode *vp;
2588 struct file *fp;
2589 struct uio auio;
2590 struct iovec aiov;
2591 long loff;
2592 int error, eofflag;
2593
2594 if ((error = getvnode(p->p_fd, fd, &fp)) != 0)
2595 return (error);
2596 if ((fp->f_flag & FREAD) == 0)
2597 return (EBADF);
2598 vp = (struct vnode *)fp->f_data;
2599unionread:
2600 if (vp->v_type != VDIR)
2601 return (EINVAL);
2602 aiov.iov_base = buf;
2603 aiov.iov_len = count;
2604 auio.uio_iov = &aiov;
2605 auio.uio_iovcnt = 1;
2606 auio.uio_rw = UIO_READ;
2607 auio.uio_segflg = UIO_USERSPACE;
2608 auio.uio_td = td;
2609 auio.uio_resid = count;
2610 /* vn_lock(vp, NULL, LK_SHARED | LK_RETRY, td); */
2611 vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
2612 loff = auio.uio_offset = fp->f_offset;
2613 error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, NULL, NULL);
2614 fp->f_offset = auio.uio_offset;
2615 VOP_UNLOCK(vp, NULL, 0, td);
2616 if (error)
2617 return (error);
2618 if (count == auio.uio_resid) {
2619 if (union_dircheckp) {
2620 error = union_dircheckp(td, &vp, fp);
2621 if (error == -1)
2622 goto unionread;
2623 if (error)
2624 return (error);
2625 }
2626 if ((vp->v_flag & VROOT) &&
2627 (vp->v_mount->mnt_flag & MNT_UNION)) {
2628 struct vnode *tvp = vp;
2629 vp = vp->v_mount->mnt_vnodecovered;
2630 vref(vp);
2631 fp->f_data = (caddr_t) vp;
2632 fp->f_offset = 0;
2633 vrele(tvp);
2634 goto unionread;
2635 }
2636 }
2637 if (basep) {
2638 *basep = loff;
2639 }
2640 *res = count - auio.uio_resid;
2641 return (error);
2642}
2643
2644/*
2645 * getdirentries_args(int fd, char *buf, u_int conut, long *basep)
2646 *
2647 * Read a block of directory entries in a file system independent format.
2648 */
2649int
2650getdirentries(struct getdirentries_args *uap)
2651{
2652 long base;
2653 int error;
2654
2655 error = kern_getdirentries(uap->fd, uap->buf, uap->count, &base,
2656 &uap->sysmsg_result);
2657
2658 if (error == 0)
2659 error = copyout(&base, uap->basep, sizeof(*uap->basep));
2660 return (error);
2661}
2662
2663/*
2664 * getdents_args(int fd, char *buf, size_t count)
2665 */
2666int
2667getdents(struct getdents_args *uap)
2668{
2669 int error;
2670
2671 error = kern_getdirentries(uap->fd, uap->buf, uap->count, NULL,
2672 &uap->sysmsg_result);
2673
2674 return (error);
2675}
2676
2677/*
2678 * umask(int newmask)
2679 *
2680 * Set the mode mask for creation of filesystem nodes.
2681 *
2682 * MP SAFE
2683 */
2684int
2685umask(struct umask_args *uap)
2686{
2687 struct thread *td = curthread;
2688 struct proc *p = td->td_proc;
2689 struct filedesc *fdp;
2690
2691 fdp = p->p_fd;
2692 uap->sysmsg_result = fdp->fd_cmask;
2693 fdp->fd_cmask = SCARG(uap, newmask) & ALLPERMS;
2694 return (0);
2695}
2696
2697/*
2698 * revoke(char *path)
2699 *
2700 * Void all references to file by ripping underlying filesystem
2701 * away from vnode.
2702 */
2703/* ARGSUSED */
2704int
2705revoke(struct revoke_args *uap)
2706{
2707 struct thread *td = curthread;
2708 struct proc *p = td->td_proc;
2709 struct vnode *vp;
2710 struct vattr vattr;
2711 int error;
2712 struct nameidata nd;
2713
2714 NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
2715 if ((error = namei(&nd)) != 0)
2716 return (error);
2717 vp = nd.ni_vp;
2718 NDFREE(&nd, NDF_ONLY_PNBUF);
2719 if (vp->v_type != VCHR && vp->v_type != VBLK) {
2720 error = EINVAL;
2721 goto out;
2722 }
2723 if ((error = VOP_GETATTR(vp, &vattr, td)) != 0)
2724 goto out;
2725 if (p->p_ucred->cr_uid != vattr.va_uid &&
2726 (error = suser_cred(p->p_ucred, PRISON_ROOT)))
2727 goto out;
2728 if (count_udev(vp->v_udev) > 0)
2729 VOP_REVOKE(vp, REVOKEALL);
2730out:
2731 vrele(vp);
2732 return (error);
2733}
2734
2735/*
2736 * Convert a user file descriptor to a kernel file entry.
2737 */
2738int
2739getvnode(struct filedesc *fdp, int fd, struct file **fpp)
2740{
2741 struct file *fp;
2742
2743 if ((u_int)fd >= fdp->fd_nfiles ||
2744 (fp = fdp->fd_ofiles[fd]) == NULL)
2745 return (EBADF);
2746 if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO)
2747 return (EINVAL);
2748 *fpp = fp;
2749 return (0);
2750}
2751/*
2752 * getfh_args(char *fname, fhandle_t *fhp)
2753 *
2754 * Get (NFS) file handle
2755 */
2756int
2757getfh(struct getfh_args *uap)
2758{
2759 struct thread *td = curthread;
2760 struct nameidata nd;
2761 fhandle_t fh;
2762 struct vnode *vp;
2763 int error;
2764
2765 /*
2766 * Must be super user
2767 */
2768 error = suser(td);
2769 if (error)
2770 return (error);
2771 NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW | CNP_LOCKLEAF, UIO_USERSPACE, uap->fname, td);
2772 error = namei(&nd);
2773 if (error)
2774 return (error);
2775 NDFREE(&nd, NDF_ONLY_PNBUF);
2776 vp = nd.ni_vp;
2777 bzero(&fh, sizeof(fh));
2778 fh.fh_fsid = vp->v_mount->mnt_stat.f_fsid;
2779 error = VFS_VPTOFH(vp, &fh.fh_fid);
2780 vput(vp);
2781 if (error)
2782 return (error);
2783 error = copyout(&fh, uap->fhp, sizeof (fh));
2784 return (error);
2785}
2786
2787/*
2788 * fhopen_args(const struct fhandle *u_fhp, int flags)
2789 *
2790 * syscall for the rpc.lockd to use to translate a NFS file handle into
2791 * an open descriptor.
2792 *
2793 * warning: do not remove the suser() call or this becomes one giant
2794 * security hole.
2795 */
2796int
2797fhopen(struct fhopen_args *uap)
2798{
2799 struct thread *td = curthread;
2800 struct proc *p = td->td_proc;
2801 struct mount *mp;
2802 struct vnode *vp;
2803 struct fhandle fhp;
2804 struct vattr vat;
2805 struct vattr *vap = &vat;
2806 struct flock lf;
2807 struct file *fp;
2808 struct filedesc *fdp = p->p_fd;
2809 int fmode, mode, error, type;
2810 struct file *nfp;
2811 int indx;
2812
2813 /*
2814 * Must be super user
2815 */
2816 error = suser(td);
2817 if (error)
2818 return (error);
2819
2820 fmode = FFLAGS(SCARG(uap, flags));
2821 /* why not allow a non-read/write open for our lockd? */
2822 if (((fmode & (FREAD | FWRITE)) == 0) || (fmode & O_CREAT))
2823 return (EINVAL);
2824 error = copyin(SCARG(uap,u_fhp), &fhp, sizeof(fhp));
2825 if (error)
2826 return(error);
2827 /* find the mount point */
2828 mp = vfs_getvfs(&fhp.fh_fsid);
2829 if (mp == NULL)
2830 return (ESTALE);
2831 /* now give me my vnode, it gets returned to me locked */
2832 error = VFS_FHTOVP(mp, &fhp.fh_fid, &vp);
2833 if (error)
2834 return (error);
2835 /*
2836 * from now on we have to make sure not
2837 * to forget about the vnode
2838 * any error that causes an abort must vput(vp)
2839 * just set error = err and 'goto bad;'.
2840 */
2841
2842 /*
2843 * from vn_open
2844 */
2845 if (vp->v_type == VLNK) {
2846 error = EMLINK;
2847 goto bad;
2848 }
2849 if (vp->v_type == VSOCK) {
2850 error = EOPNOTSUPP;
2851 goto bad;
2852 }
2853 mode = 0;
2854 if (fmode & (FWRITE | O_TRUNC)) {
2855 if (vp->v_type == VDIR) {
2856 error = EISDIR;
2857 goto bad;
2858 }
2859 error = vn_writechk(vp);
2860 if (error)
2861 goto bad;
2862 mode |= VWRITE;
2863 }
2864 if (fmode & FREAD)
2865 mode |= VREAD;
2866 if (mode) {
2867 error = VOP_ACCESS(vp, mode, p->p_ucred, td);
2868 if (error)
2869 goto bad;
2870 }
2871 if (fmode & O_TRUNC) {
2872 VOP_UNLOCK(vp, NULL, 0, td); /* XXX */
2873 VOP_LEASE(vp, td, p->p_ucred, LEASE_WRITE);
2874 vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td); /* XXX */
2875 VATTR_NULL(vap);
2876 vap->va_size = 0;
2877 error = VOP_SETATTR(vp, vap, p->p_ucred, td);
2878 if (error)
2879 goto bad;
2880 }
2881 error = VOP_OPEN(vp, fmode, p->p_ucred, td);
2882 if (error)
2883 goto bad;
2884 /*
2885 * Make sure that a VM object is created for VMIO support.
2886 */
2887 if (vn_canvmio(vp) == TRUE) {
2888 if ((error = vfs_object_create(vp, td)) != 0)
2889 goto bad;
2890 }
2891 if (fmode & FWRITE)
2892 vp->v_writecount++;
2893
2894 /*
2895 * end of vn_open code
2896 */
2897
2898 if ((error = falloc(p, &nfp, &indx)) != 0) {
2899 if (fmode & FWRITE)
2900 vp->v_writecount--;
2901 goto bad;
2902 }
2903 fp = nfp;
2904
2905 /*
2906 * hold an extra reference to avoid having fp ripped out
2907 * from under us while we block in the lock op.
2908 */
2909 fhold(fp);
2910 nfp->f_data = (caddr_t)vp;
2911 nfp->f_flag = fmode & FMASK;
2912 nfp->f_ops = &vnops;
2913 nfp->f_type = DTYPE_VNODE;
2914 if (fmode & (O_EXLOCK | O_SHLOCK)) {
2915 lf.l_whence = SEEK_SET;
2916 lf.l_start = 0;
2917 lf.l_len = 0;
2918 if (fmode & O_EXLOCK)
2919 lf.l_type = F_WRLCK;
2920 else
2921 lf.l_type = F_RDLCK;
2922 type = F_FLOCK;
2923 if ((fmode & FNONBLOCK) == 0)
2924 type |= F_WAIT;
2925 VOP_UNLOCK(vp, NULL, 0, td);
2926 if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0) {
2927 /*
2928 * lock request failed. Normally close the descriptor
2929 * but handle the case where someone might have dup()d
2930 * or close()d it when we weren't looking.
2931 */
2932 if (fdp->fd_ofiles[indx] == fp) {
2933 fdp->fd_ofiles[indx] = NULL;
2934 fdrop(fp, td);
2935 }
2936
2937 /*
2938 * release our private reference.
2939 */
2940 fdrop(fp, td);
2941 return (error);
2942 }
2943 vn_lock(vp, NULL, LK_EXCLUSIVE | LK_RETRY, td);
2944 fp->f_flag |= FHASLOCK;
2945 }
2946 if ((vp->v_type == VREG) && (VOP_GETVOBJECT(vp, NULL) != 0))
2947 vfs_object_create(vp, td);
2948
2949 VOP_UNLOCK(vp, NULL, 0, td);
2950 fdrop(fp, td);
2951 uap->sysmsg_result = indx;
2952 return (0);
2953
2954bad:
2955 vput(vp);
2956 return (error);
2957}
2958
2959/*
2960 * fhstat_args(struct fhandle *u_fhp, struct stat *sb)
2961 */
2962int
2963fhstat(struct fhstat_args *uap)
2964{
2965 struct thread *td = curthread;
2966 struct stat sb;
2967 fhandle_t fh;
2968 struct mount *mp;
2969 struct vnode *vp;
2970 int error;
2971
2972 /*
2973 * Must be super user
2974 */
2975 error = suser(td);
2976 if (error)
2977 return (error);
2978
2979 error = copyin(SCARG(uap, u_fhp), &fh, sizeof(fhandle_t));
2980 if (error)
2981 return (error);
2982
2983 if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL)
2984 return (ESTALE);
2985 if ((error = VFS_FHTOVP(mp, &fh.fh_fid, &vp)))
2986 return (error);
2987 error = vn_stat(vp, &sb, td);
2988 vput(vp);
2989 if (error)
2990 return (error);
2991 error = copyout(&sb, SCARG(uap, sb), sizeof(sb));
2992 return (error);
2993}
2994
2995/*
2996 * fhstatfs_args(struct fhandle *u_fhp, struct statfs *buf)
2997 */
2998int
2999fhstatfs(struct fhstatfs_args *uap)
3000{
3001 struct thread *td = curthread;
3002 struct statfs *sp;
3003 struct mount *mp;
3004 struct vnode *vp;
3005 struct statfs sb;
3006 fhandle_t fh;
3007 int error;
3008
3009 /*
3010 * Must be super user
3011 */
3012 if ((error = suser(td)))
3013 return (error);
3014
3015 if ((error = copyin(SCARG(uap, u_fhp), &fh, sizeof(fhandle_t))) != 0)
3016 return (error);
3017
3018 if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL)
3019 return (ESTALE);
3020 if ((error = VFS_FHTOVP(mp, &fh.fh_fid, &vp)))
3021 return (error);
3022 mp = vp->v_mount;
3023 sp = &mp->mnt_stat;
3024 vput(vp);
3025 if ((error = VFS_STATFS(mp, sp, td)) != 0)
3026 return (error);
3027 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
3028 if (suser(td)) {
3029 bcopy((caddr_t)sp, (caddr_t)&sb, sizeof(sb));
3030 sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0;
3031 sp = &sb;
3032 }
3033 return (copyout(sp, SCARG(uap, buf), sizeof(*sp)));
3034}
3035
3036/*
3037 * Syscall to push extended attribute configuration information into the
3038 * VFS. Accepts a path, which it converts to a mountpoint, as well as
3039 * a command (int cmd), and attribute name and misc data. For now, the
3040 * attribute name is left in userspace for consumption by the VFS_op.
3041 * It will probably be changed to be copied into sysspace by the
3042 * syscall in the future, once issues with various consumers of the
3043 * attribute code have raised their hands.
3044 *
3045 * Currently this is used only by UFS Extended Attributes.
3046 */
3047int
3048extattrctl(struct extattrctl_args *uap)
3049{
3050 struct thread *td = curthread;
3051 struct nameidata nd;
3052 struct mount *mp;
3053 int error;
3054
3055 NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW, UIO_USERSPACE, SCARG(uap, path), td);
3056 if ((error = namei(&nd)) != 0)
3057 return (error);
3058 mp = nd.ni_vp->v_mount;
3059 NDFREE(&nd, 0);
3060 return (VFS_EXTATTRCTL(mp, SCARG(uap, cmd), SCARG(uap, attrname),
3061 SCARG(uap, arg), td));
3062}
3063
3064/*
3065 * Syscall to set a named extended attribute on a file or directory.
3066 * Accepts attribute name, and a uio structure pointing to the data to set.
3067 * The uio is consumed in the style of writev(). The real work happens
3068 * in VOP_SETEXTATTR().
3069 */
3070int
3071extattr_set_file(struct extattr_set_file_args *uap)
3072{
3073 struct thread *td = curthread;
3074 struct proc *p = td->td_proc;
3075 struct nameidata nd;
3076 struct uio auio;
3077 struct iovec *iov, *needfree = NULL, aiov[UIO_SMALLIOV];
3078 char attrname[EXTATTR_MAXNAMELEN];
3079 u_int iovlen, cnt;
3080 int error, i;
3081
3082 error = copyin(SCARG(uap, attrname), attrname, EXTATTR_MAXNAMELEN);
3083 if (error)
3084 return (error);
3085 NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW | CNP_LOCKLEAF, UIO_USERSPACE,
3086 SCARG(uap, path), td);
3087 if ((error = namei(&nd)) != 0)
3088 return(error);
3089 iovlen = uap->iovcnt * sizeof(struct iovec);
3090 if (uap->iovcnt > UIO_SMALLIOV) {
3091 if (uap->iovcnt > UIO_MAXIOV) {
3092 error = EINVAL;
3093 goto done;
3094 }
3095 MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
3096 needfree = iov;
3097 } else
3098 iov = aiov;
3099 auio.uio_iov = iov;
3100 auio.uio_iovcnt = uap->iovcnt;
3101 auio.uio_rw = UIO_WRITE;
3102 auio.uio_segflg = UIO_USERSPACE;
3103 auio.uio_td = td;
3104 auio.uio_offset = 0;
3105 if ((error = copyin((caddr_t)uap->iovp, (caddr_t)iov, iovlen)))
3106 goto done;
3107 auio.uio_resid = 0;
3108 for (i = 0; i < uap->iovcnt; i++) {
3109 if (iov->iov_len > INT_MAX - auio.uio_resid) {
3110 error = EINVAL;
3111 goto done;
3112 }
3113 auio.uio_resid += iov->iov_len;
3114 iov++;
3115 }
3116 cnt = auio.uio_resid;
3117 error = VOP_SETEXTATTR(nd.ni_vp, attrname, &auio, p->p_ucred, td);
3118 cnt -= auio.uio_resid;
3119 uap->sysmsg_result = cnt;
3120done:
3121 if (needfree)
3122 FREE(needfree, M_IOV);
3123 NDFREE(&nd, 0);
3124 return (error);
3125}
3126
3127/*
3128 * Syscall to get a named extended attribute on a file or directory.
3129 * Accepts attribute name, and a uio structure pointing to a buffer for the
3130 * data. The uio is consumed in the style of readv(). The real work
3131 * happens in VOP_GETEXTATTR();
3132 */
3133int
3134extattr_get_file(struct extattr_get_file_args *uap)
3135{
3136 struct thread *td = curthread;
3137 struct proc *p = td->td_proc;
3138 struct nameidata nd;
3139 struct uio auio;
3140 struct iovec *iov, *needfree, aiov[UIO_SMALLIOV];
3141 char attrname[EXTATTR_MAXNAMELEN];
3142 u_int iovlen, cnt;
3143 int error, i;
3144
3145 error = copyin(SCARG(uap, attrname), attrname, EXTATTR_MAXNAMELEN);
3146 if (error)
3147 return (error);
3148 NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW | CNP_LOCKLEAF, UIO_USERSPACE,
3149 SCARG(uap, path), td);
3150 if ((error = namei(&nd)) != 0)
3151 return (error);
3152 iovlen = uap->iovcnt * sizeof (struct iovec);
3153 if (uap->iovcnt > UIO_SMALLIOV) {
3154 if (uap->iovcnt > UIO_MAXIOV) {
3155 NDFREE(&nd, 0);
3156 return (EINVAL);
3157 }
3158 MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
3159 needfree = iov;
3160 } else {
3161 iov = aiov;
3162 needfree = NULL;
3163 }
3164 auio.uio_iov = iov;
3165 auio.uio_iovcnt = uap->iovcnt;
3166 auio.uio_rw = UIO_READ;
3167 auio.uio_segflg = UIO_USERSPACE;
3168 auio.uio_td = td;
3169 auio.uio_offset = 0;
3170 if ((error = copyin((caddr_t)uap->iovp, (caddr_t)iov, iovlen)))
3171 goto done;
3172 auio.uio_resid = 0;
3173 for (i = 0; i < uap->iovcnt; i++) {
3174 if (iov->iov_len > INT_MAX - auio.uio_resid) {
3175 error = EINVAL;
3176 goto done;
3177 }
3178 auio.uio_resid += iov->iov_len;
3179 iov++;
3180 }
3181 cnt = auio.uio_resid;
3182 error = VOP_GETEXTATTR(nd.ni_vp, attrname, &auio, p->p_ucred, td);
3183 cnt -= auio.uio_resid;
3184 uap->sysmsg_result = cnt;
3185done:
3186 if (needfree)
3187 FREE(needfree, M_IOV);
3188 NDFREE(&nd, 0);
3189 return(error);
3190}
3191
3192/*
3193 * Syscall to delete a named extended attribute from a file or directory.
3194 * Accepts attribute name. The real work happens in VOP_SETEXTATTR().
3195 */
3196int
3197extattr_delete_file(struct extattr_delete_file_args *uap)
3198{
3199 struct thread *td = curthread;
3200 struct proc *p = td->td_proc;
3201 struct nameidata nd;
3202 char attrname[EXTATTR_MAXNAMELEN];
3203 int error;
3204
3205 error = copyin(SCARG(uap, attrname), attrname, EXTATTR_MAXNAMELEN);
3206 if (error)
3207 return(error);
3208 NDINIT(&nd, NAMEI_LOOKUP, CNP_FOLLOW | CNP_LOCKLEAF, UIO_USERSPACE,
3209 SCARG(uap, path), td);
3210 if ((error = namei(&nd)) != 0)
3211 return(error);
3212 error = VOP_SETEXTATTR(nd.ni_vp, attrname, NULL, p->p_ucred, td);
3213 NDFREE(&nd, 0);
3214 return(error);
3215}
3216
3217/*
3218 * print out statistics from the current status of the buffer pool
3219 * this can be toggeled by the system control option debug.syncprt
3220 */
3221#ifdef DEBUG
3222void
3223vfs_bufstats(void)
3224{
3225 int s, i, j, count;
3226 struct buf *bp;
3227 struct bqueues *dp;
3228 int counts[(MAXBSIZE / PAGE_SIZE) + 1];
3229 static char *bname[3] = { "LOCKED", "LRU", "AGE" };
3230
3231 for (dp = bufqueues, i = 0; dp < &bufqueues[3]; dp++, i++) {
3232 count = 0;
3233 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
3234 counts[j] = 0;
3235 s = splbio();
3236 TAILQ_FOREACH(bp, dp, b_freelist) {
3237 counts[bp->b_bufsize/PAGE_SIZE]++;
3238 count++;
3239 }
3240 splx(s);
3241 printf("%s: total-%d", bname[i], count);
3242 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
3243 if (counts[j] != 0)
3244 printf(", %d-%d", j * PAGE_SIZE, counts[j]);
3245 printf("\n");
3246 }
3247}
3248#endif