Adjust pamp_growkernel(), elf_brand_inuse(), and ktrace() to use
[dragonfly.git] / sys / kern / vfs_syscalls.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94
39 * $FreeBSD: src/sys/kern/vfs_syscalls.c,v 1.151.2.18 2003/04/04 20:35:58 tegge Exp $
40 * $DragonFly: src/sys/kern/vfs_syscalls.c,v 1.93 2006/05/24 03:23:31 dillon Exp $
41 */
42
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/buf.h>
46#include <sys/conf.h>
47#include <sys/sysent.h>
48#include <sys/malloc.h>
49#include <sys/mount.h>
50#include <sys/mountctl.h>
51#include <sys/sysproto.h>
52#include <sys/filedesc.h>
53#include <sys/kernel.h>
54#include <sys/fcntl.h>
55#include <sys/file.h>
56#include <sys/linker.h>
57#include <sys/stat.h>
58#include <sys/unistd.h>
59#include <sys/vnode.h>
60#include <sys/proc.h>
61#include <sys/namei.h>
62#include <sys/nlookup.h>
63#include <sys/dirent.h>
64#include <sys/extattr.h>
65#include <sys/kern_syscall.h>
66
67#include <machine/limits.h>
68#include <vfs/union/union.h>
69#include <sys/sysctl.h>
70#include <vm/vm.h>
71#include <vm/vm_object.h>
72#include <vm/vm_zone.h>
73#include <vm/vm_page.h>
74
75#include <sys/file2.h>
76
77static int checkvp_chdir (struct vnode *vn, struct thread *td);
78static void checkdirs (struct vnode *olddp, struct namecache *ncp);
79static int chroot_refuse_vdir_fds (struct filedesc *fdp);
80static int chroot_visible_mnt(struct mount *mp, struct proc *p);
81static int getutimes (const struct timeval *, struct timespec *);
82static int setfown (struct vnode *, uid_t, gid_t);
83static int setfmode (struct vnode *, int);
84static int setfflags (struct vnode *, int);
85static int setutimes (struct vnode *, const struct timespec *, int);
86static int usermount = 0; /* if 1, non-root can mount fs. */
87
88int (*union_dircheckp) (struct thread *, struct vnode **, struct file *);
89
90SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0, "");
91
92/*
93 * Virtual File System System Calls
94 */
95
96/*
97 * Mount a file system.
98 */
99/*
100 * mount_args(char *type, char *path, int flags, caddr_t data)
101 */
102/* ARGSUSED */
103int
104mount(struct mount_args *uap)
105{
106 struct thread *td = curthread;
107 struct proc *p = td->td_proc;
108 struct vnode *vp;
109 struct namecache *ncp;
110 struct mount *mp;
111 struct vfsconf *vfsp;
112 int error, flag = 0, flag2 = 0;
113 struct vattr va;
114 struct nlookupdata nd;
115 char fstypename[MFSNAMELEN];
116 struct nlcomponent nlc;
117 struct ucred *cred = p->p_ucred;
118
119 KKASSERT(p);
120 if (cred->cr_prison != NULL)
121 return (EPERM);
122 if (usermount == 0 && (error = suser(td)))
123 return (error);
124 /*
125 * Do not allow NFS export by non-root users.
126 */
127 if (uap->flags & MNT_EXPORTED) {
128 error = suser(td);
129 if (error)
130 return (error);
131 }
132 /*
133 * Silently enforce MNT_NOSUID and MNT_NODEV for non-root users
134 */
135 if (suser(td))
136 uap->flags |= MNT_NOSUID | MNT_NODEV;
137
138 /*
139 * Lookup the requested path and extract the ncp and vnode.
140 */
141 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
142 if (error == 0) {
143 if ((error = nlookup(&nd)) == 0) {
144 if (nd.nl_ncp->nc_vp == NULL)
145 error = ENOENT;
146 }
147 }
148 if (error) {
149 nlookup_done(&nd);
150 return (error);
151 }
152
153 /*
154 * Extract the locked+refd ncp and cleanup the nd structure
155 */
156 ncp = nd.nl_ncp;
157 nd.nl_ncp = NULL;
158 nlookup_done(&nd);
159
160 /*
161 * now we have the locked ref'd ncp and unreferenced vnode.
162 */
163 vp = ncp->nc_vp;
164 if ((error = vget(vp, LK_EXCLUSIVE)) != 0) {
165 cache_put(ncp);
166 return (error);
167 }
168 cache_unlock(ncp);
169
170 /*
171 * Now we have an unlocked ref'd ncp and a locked ref'd vp
172 */
173 if (uap->flags & MNT_UPDATE) {
174 if ((vp->v_flag & VROOT) == 0) {
175 cache_drop(ncp);
176 vput(vp);
177 return (EINVAL);
178 }
179 mp = vp->v_mount;
180 flag = mp->mnt_flag;
181 flag2 = mp->mnt_kern_flag;
182 /*
183 * We only allow the filesystem to be reloaded if it
184 * is currently mounted read-only.
185 */
186 if ((uap->flags & MNT_RELOAD) &&
187 ((mp->mnt_flag & MNT_RDONLY) == 0)) {
188 cache_drop(ncp);
189 vput(vp);
190 return (EOPNOTSUPP); /* Needs translation */
191 }
192 /*
193 * Only root, or the user that did the original mount is
194 * permitted to update it.
195 */
196 if (mp->mnt_stat.f_owner != cred->cr_uid &&
197 (error = suser(td))) {
198 cache_drop(ncp);
199 vput(vp);
200 return (error);
201 }
202 if (vfs_busy(mp, LK_NOWAIT)) {
203 cache_drop(ncp);
204 vput(vp);
205 return (EBUSY);
206 }
207 if ((vp->v_flag & VMOUNT) != 0 ||
208 vp->v_mountedhere != NULL) {
209 cache_drop(ncp);
210 vfs_unbusy(mp);
211 vput(vp);
212 return (EBUSY);
213 }
214 vp->v_flag |= VMOUNT;
215 mp->mnt_flag |=
216 uap->flags & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE);
217 VOP_UNLOCK(vp, 0);
218 goto update;
219 }
220 /*
221 * If the user is not root, ensure that they own the directory
222 * onto which we are attempting to mount.
223 */
224 if ((error = VOP_GETATTR(vp, &va)) ||
225 (va.va_uid != cred->cr_uid && (error = suser(td)))) {
226 cache_drop(ncp);
227 vput(vp);
228 return (error);
229 }
230 if ((error = vinvalbuf(vp, V_SAVE, 0, 0)) != 0) {
231 cache_drop(ncp);
232 vput(vp);
233 return (error);
234 }
235 if (vp->v_type != VDIR) {
236 cache_drop(ncp);
237 vput(vp);
238 return (ENOTDIR);
239 }
240 if ((error = copyinstr(uap->type, fstypename, MFSNAMELEN, NULL)) != 0) {
241 cache_drop(ncp);
242 vput(vp);
243 return (error);
244 }
245 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
246 if (!strcmp(vfsp->vfc_name, fstypename))
247 break;
248 }
249 if (vfsp == NULL) {
250 linker_file_t lf;
251
252 /* Only load modules for root (very important!) */
253 if ((error = suser(td)) != 0) {
254 cache_drop(ncp);
255 vput(vp);
256 return error;
257 }
258 error = linker_load_file(fstypename, &lf);
259 if (error || lf == NULL) {
260 cache_drop(ncp);
261 vput(vp);
262 if (lf == NULL)
263 error = ENODEV;
264 return error;
265 }
266 lf->userrefs++;
267 /* lookup again, see if the VFS was loaded */
268 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
269 if (!strcmp(vfsp->vfc_name, fstypename))
270 break;
271 }
272 if (vfsp == NULL) {
273 lf->userrefs--;
274 linker_file_unload(lf);
275 cache_drop(ncp);
276 vput(vp);
277 return (ENODEV);
278 }
279 }
280 if ((vp->v_flag & VMOUNT) != 0 ||
281 vp->v_mountedhere != NULL) {
282 cache_drop(ncp);
283 vput(vp);
284 return (EBUSY);
285 }
286 vp->v_flag |= VMOUNT;
287
288 /*
289 * Allocate and initialize the filesystem.
290 */
291 mp = malloc(sizeof(struct mount), M_MOUNT, M_ZERO|M_WAITOK);
292 TAILQ_INIT(&mp->mnt_nvnodelist);
293 TAILQ_INIT(&mp->mnt_reservedvnlist);
294 TAILQ_INIT(&mp->mnt_jlist);
295 mp->mnt_nvnodelistsize = 0;
296 lockinit(&mp->mnt_lock, "vfslock", 0, LK_NOPAUSE);
297 vfs_busy(mp, LK_NOWAIT);
298 mp->mnt_op = vfsp->vfc_vfsops;
299 mp->mnt_vfc = vfsp;
300 vfsp->vfc_refcount++;
301 mp->mnt_stat.f_type = vfsp->vfc_typenum;
302 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
303 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
304 mp->mnt_vnodecovered = vp;
305 mp->mnt_stat.f_owner = cred->cr_uid;
306 mp->mnt_iosize_max = DFLTPHYS;
307 VOP_UNLOCK(vp, 0);
308update:
309 /*
310 * Set the mount level flags.
311 */
312 if (uap->flags & MNT_RDONLY)
313 mp->mnt_flag |= MNT_RDONLY;
314 else if (mp->mnt_flag & MNT_RDONLY)
315 mp->mnt_kern_flag |= MNTK_WANTRDWR;
316 mp->mnt_flag &=~ (MNT_NOSUID | MNT_NOEXEC | MNT_NODEV |
317 MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOATIME |
318 MNT_NOSYMFOLLOW | MNT_IGNORE |
319 MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR);
320 mp->mnt_flag |= uap->flags & (MNT_NOSUID | MNT_NOEXEC |
321 MNT_NODEV | MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_FORCE |
322 MNT_NOSYMFOLLOW | MNT_IGNORE |
323 MNT_NOATIME | MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR);
324 /*
325 * Mount the filesystem.
326 * XXX The final recipients of VFS_MOUNT just overwrite the ndp they
327 * get.
328 */
329 error = VFS_MOUNT(mp, uap->path, uap->data, cred);
330 if (mp->mnt_flag & MNT_UPDATE) {
331 if (mp->mnt_kern_flag & MNTK_WANTRDWR)
332 mp->mnt_flag &= ~MNT_RDONLY;
333 mp->mnt_flag &=~ (MNT_UPDATE | MNT_RELOAD | MNT_FORCE);
334 mp->mnt_kern_flag &=~ MNTK_WANTRDWR;
335 if (error) {
336 mp->mnt_flag = flag;
337 mp->mnt_kern_flag = flag2;
338 }
339 vfs_unbusy(mp);
340 vp->v_flag &= ~VMOUNT;
341 vrele(vp);
342 cache_drop(ncp);
343 return (error);
344 }
345 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
346 /*
347 * Put the new filesystem on the mount list after root. The mount
348 * point gets its own mnt_ncp which is a special ncp linking the
349 * vnode-under to the root of the new mount. The lookup code
350 * detects the mount point going forward and detects the special
351 * mnt_ncp via NCP_MOUNTPT going backwards.
352 *
353 * It is not necessary to invalidate or purge the vnode underneath
354 * because elements under the mount will be given their own glue
355 * namecache record.
356 */
357 if (!error) {
358 nlc.nlc_nameptr = "";
359 nlc.nlc_namelen = 0;
360 mp->mnt_ncp = cache_nlookup(ncp, &nlc);
361 cache_setunresolved(mp->mnt_ncp);
362 mp->mnt_ncp->nc_flag |= NCF_MOUNTPT;
363 mp->mnt_ncp->nc_mount = mp;
364 cache_drop(ncp);
365 /* XXX get the root of the fs and cache_setvp(mnt_ncp...) */
366 vp->v_flag &= ~VMOUNT;
367 vp->v_mountedhere = mp;
368 mountlist_insert(mp, MNTINS_LAST);
369 checkdirs(vp, mp->mnt_ncp);
370 cache_unlock(mp->mnt_ncp); /* leave ref intact */
371 VOP_UNLOCK(vp, 0);
372 error = vfs_allocate_syncvnode(mp);
373 vfs_unbusy(mp);
374 if ((error = VFS_START(mp, 0)) != 0)
375 vrele(vp);
376 } else {
377 vfs_rm_vnodeops(&mp->mnt_vn_coherency_ops);
378 vfs_rm_vnodeops(&mp->mnt_vn_journal_ops);
379 vfs_rm_vnodeops(&mp->mnt_vn_norm_ops);
380 vfs_rm_vnodeops(&mp->mnt_vn_spec_ops);
381 vfs_rm_vnodeops(&mp->mnt_vn_fifo_ops);
382 vp->v_flag &= ~VMOUNT;
383 mp->mnt_vfc->vfc_refcount--;
384 vfs_unbusy(mp);
385 free(mp, M_MOUNT);
386 cache_drop(ncp);
387 vput(vp);
388 }
389 return (error);
390}
391
392/*
393 * Scan all active processes to see if any of them have a current
394 * or root directory onto which the new filesystem has just been
395 * mounted. If so, replace them with the new mount point.
396 *
397 * The passed ncp is ref'd and locked (from the mount code) and
398 * must be associated with the vnode representing the root of the
399 * mount point.
400 */
401static void
402checkdirs(struct vnode *olddp, struct namecache *ncp)
403{
404 struct filedesc *fdp;
405 struct vnode *newdp;
406 struct mount *mp;
407 struct proc *p;
408
409 if (olddp->v_usecount == 1)
410 return;
411 mp = olddp->v_mountedhere;
412 if (VFS_ROOT(mp, &newdp))
413 panic("mount: lost mount");
414 cache_setvp(ncp, newdp);
415
416 if (rootvnode == olddp) {
417 vref(newdp);
418 vfs_cache_setroot(newdp, cache_hold(ncp));
419 }
420
421 FOREACH_PROC_IN_SYSTEM(p) {
422 fdp = p->p_fd;
423 if (fdp->fd_cdir == olddp) {
424 vrele(fdp->fd_cdir);
425 vref(newdp);
426 fdp->fd_cdir = newdp;
427 cache_drop(fdp->fd_ncdir);
428 fdp->fd_ncdir = cache_hold(ncp);
429 }
430 if (fdp->fd_rdir == olddp) {
431 vrele(fdp->fd_rdir);
432 vref(newdp);
433 fdp->fd_rdir = newdp;
434 cache_drop(fdp->fd_nrdir);
435 fdp->fd_nrdir = cache_hold(ncp);
436 }
437 }
438 vput(newdp);
439}
440
441/*
442 * Unmount a file system.
443 *
444 * Note: unmount takes a path to the vnode mounted on as argument,
445 * not special file (as before).
446 */
447/*
448 * umount_args(char *path, int flags)
449 */
450/* ARGSUSED */
451int
452unmount(struct unmount_args *uap)
453{
454 struct thread *td = curthread;
455 struct proc *p = td->td_proc;
456 struct mount *mp = NULL;
457 int error;
458 struct nlookupdata nd;
459
460 KKASSERT(p);
461 if (p->p_ucred->cr_prison != NULL)
462 return (EPERM);
463 if (usermount == 0 && (error = suser(td)))
464 return (error);
465
466 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
467 if (error == 0)
468 error = nlookup(&nd);
469 if (error)
470 goto out;
471
472 mp = nd.nl_ncp->nc_mount;
473
474 /*
475 * Only root, or the user that did the original mount is
476 * permitted to unmount this filesystem.
477 */
478 if ((mp->mnt_stat.f_owner != p->p_ucred->cr_uid) &&
479 (error = suser(td)))
480 goto out;
481
482 /*
483 * Don't allow unmounting the root file system.
484 */
485 if (mp->mnt_flag & MNT_ROOTFS) {
486 error = EINVAL;
487 goto out;
488 }
489
490 /*
491 * Must be the root of the filesystem
492 */
493 if (! (nd.nl_ncp->nc_flag & NCF_MOUNTPT)) {
494 error = EINVAL;
495 goto out;
496 }
497
498out:
499 nlookup_done(&nd);
500 if (error)
501 return (error);
502 return (dounmount(mp, uap->flags));
503}
504
505/*
506 * Do the actual file system unmount.
507 */
508static int
509dounmount_interlock(struct mount *mp)
510{
511 if (mp->mnt_kern_flag & MNTK_UNMOUNT)
512 return (EBUSY);
513 mp->mnt_kern_flag |= MNTK_UNMOUNT;
514 return(0);
515}
516
517int
518dounmount(struct mount *mp, int flags)
519{
520 struct vnode *coveredvp;
521 int error;
522 int async_flag;
523 int lflags;
524
525 /*
526 * Exclusive access for unmounting purposes
527 */
528 if ((error = mountlist_interlock(dounmount_interlock, mp)) != 0)
529 return (error);
530
531 /*
532 * Allow filesystems to detect that a forced unmount is in progress.
533 */
534 if (flags & MNT_FORCE)
535 mp->mnt_kern_flag |= MNTK_UNMOUNTF;
536 lflags = LK_EXCLUSIVE | ((flags & MNT_FORCE) ? 0 : LK_NOWAIT);
537 error = lockmgr(&mp->mnt_lock, lflags);
538 if (error) {
539 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF);
540 if (mp->mnt_kern_flag & MNTK_MWAIT)
541 wakeup(mp);
542 return (error);
543 }
544
545 if (mp->mnt_flag & MNT_EXPUBLIC)
546 vfs_setpublicfs(NULL, NULL, NULL);
547
548 vfs_msync(mp, MNT_WAIT);
549 async_flag = mp->mnt_flag & MNT_ASYNC;
550 mp->mnt_flag &=~ MNT_ASYNC;
551 cache_purgevfs(mp); /* remove cache entries for this file sys */
552 if (mp->mnt_syncer != NULL)
553 vrele(mp->mnt_syncer);
554 if (((mp->mnt_flag & MNT_RDONLY) ||
555 (error = VFS_SYNC(mp, MNT_WAIT)) == 0) ||
556 (flags & MNT_FORCE))
557 error = VFS_UNMOUNT(mp, flags);
558 if (error) {
559 if (mp->mnt_syncer == NULL)
560 vfs_allocate_syncvnode(mp);
561 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF);
562 mp->mnt_flag |= async_flag;
563 lockmgr(&mp->mnt_lock, LK_RELEASE);
564 if (mp->mnt_kern_flag & MNTK_MWAIT)
565 wakeup(mp);
566 return (error);
567 }
568 /*
569 * Clean up any journals still associated with the mount after
570 * filesystem activity has ceased.
571 */
572 journal_remove_all_journals(mp,
573 ((flags & MNT_FORCE) ? MC_JOURNAL_STOP_IMM : 0));
574
575 mountlist_remove(mp);
576
577 /*
578 * Remove any installed vnode ops here so the individual VFSs don't
579 * have to.
580 */
581 vfs_rm_vnodeops(&mp->mnt_vn_coherency_ops);
582 vfs_rm_vnodeops(&mp->mnt_vn_journal_ops);
583 vfs_rm_vnodeops(&mp->mnt_vn_norm_ops);
584 vfs_rm_vnodeops(&mp->mnt_vn_spec_ops);
585 vfs_rm_vnodeops(&mp->mnt_vn_fifo_ops);
586
587 if ((coveredvp = mp->mnt_vnodecovered) != NULLVP) {
588 coveredvp->v_mountedhere = NULL;
589 vrele(coveredvp);
590 cache_drop(mp->mnt_ncp);
591 mp->mnt_ncp = NULL;
592 }
593 mp->mnt_vfc->vfc_refcount--;
594 if (!TAILQ_EMPTY(&mp->mnt_nvnodelist))
595 panic("unmount: dangling vnode");
596 lockmgr(&mp->mnt_lock, LK_RELEASE);
597 if (mp->mnt_kern_flag & MNTK_MWAIT)
598 wakeup(mp);
599 free(mp, M_MOUNT);
600 return (0);
601}
602
603/*
604 * Sync each mounted filesystem.
605 */
606
607#ifdef DEBUG
608static int syncprt = 0;
609SYSCTL_INT(_debug, OID_AUTO, syncprt, CTLFLAG_RW, &syncprt, 0, "");
610#endif /* DEBUG */
611
612static int sync_callback(struct mount *mp, void *data);
613
614/* ARGSUSED */
615int
616sync(struct sync_args *uap)
617{
618 mountlist_scan(sync_callback, NULL, MNTSCAN_FORWARD);
619#ifdef DEBUG
620 /*
621 * print out buffer pool stat information on each sync() call.
622 */
623 if (syncprt)
624 vfs_bufstats();
625#endif /* DEBUG */
626 return (0);
627}
628
629static
630int
631sync_callback(struct mount *mp, void *data __unused)
632{
633 int asyncflag;
634
635 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
636 asyncflag = mp->mnt_flag & MNT_ASYNC;
637 mp->mnt_flag &= ~MNT_ASYNC;
638 vfs_msync(mp, MNT_NOWAIT);
639 VFS_SYNC(mp, MNT_NOWAIT);
640 mp->mnt_flag |= asyncflag;
641 }
642 return(0);
643}
644
645/* XXX PRISON: could be per prison flag */
646static int prison_quotas;
647#if 0
648SYSCTL_INT(_kern_prison, OID_AUTO, quotas, CTLFLAG_RW, &prison_quotas, 0, "");
649#endif
650
651/*
652 * quotactl_args(char *path, int fcmd, int uid, caddr_t arg)
653 *
654 * Change filesystem quotas.
655 */
656/* ARGSUSED */
657int
658quotactl(struct quotactl_args *uap)
659{
660 struct nlookupdata nd;
661 struct thread *td;
662 struct proc *p;
663 struct mount *mp;
664 int error;
665
666 td = curthread;
667 p = td->td_proc;
668 if (p->p_ucred->cr_prison && !prison_quotas)
669 return (EPERM);
670
671 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
672 if (error == 0)
673 error = nlookup(&nd);
674 if (error == 0) {
675 mp = nd.nl_ncp->nc_mount;
676 error = VFS_QUOTACTL(mp, uap->cmd, uap->uid,
677 uap->arg, nd.nl_cred);
678 }
679 nlookup_done(&nd);
680 return (error);
681}
682
683/*
684 * mountctl(char *path, int op, int fd, const void *ctl, int ctllen,
685 * void *buf, int buflen)
686 *
687 * This function operates on a mount point and executes the specified
688 * operation using the specified control data, and possibly returns data.
689 *
690 * The actual number of bytes stored in the result buffer is returned, 0
691 * if none, otherwise an error is returned.
692 */
693/* ARGSUSED */
694int
695mountctl(struct mountctl_args *uap)
696{
697 struct thread *td = curthread;
698 struct proc *p = td->td_proc;
699 struct file *fp;
700 void *ctl = NULL;
701 void *buf = NULL;
702 char *path = NULL;
703 int error;
704
705 /*
706 * Sanity and permissions checks. We must be root.
707 */
708 KKASSERT(p);
709 if (p->p_ucred->cr_prison != NULL)
710 return (EPERM);
711 if ((error = suser(td)) != 0)
712 return (error);
713
714 /*
715 * Argument length checks
716 */
717 if (uap->ctllen < 0 || uap->ctllen > 1024)
718 return (EINVAL);
719 if (uap->buflen < 0 || uap->buflen > 16 * 1024)
720 return (EINVAL);
721 if (uap->path == NULL)
722 return (EINVAL);
723
724 /*
725 * Allocate the necessary buffers and copyin data
726 */
727 path = zalloc(namei_zone);
728 error = copyinstr(uap->path, path, MAXPATHLEN, NULL);
729 if (error)
730 goto done;
731
732 if (uap->ctllen) {
733 ctl = malloc(uap->ctllen + 1, M_TEMP, M_WAITOK|M_ZERO);
734 error = copyin(uap->ctl, ctl, uap->ctllen);
735 if (error)
736 goto done;
737 }
738 if (uap->buflen)
739 buf = malloc(uap->buflen + 1, M_TEMP, M_WAITOK|M_ZERO);
740
741 /*
742 * Validate the descriptor
743 */
744 fp = holdfp(p->p_fd, uap->fd, -1);
745 if (fp == NULL) {
746 error = EBADF;
747 goto done;
748 }
749
750 /*
751 * Execute the internal kernel function and clean up.
752 */
753 error = kern_mountctl(path, uap->op, fp, ctl, uap->ctllen, buf, uap->buflen, &uap->sysmsg_result);
754 if (fp)
755 fdrop(fp);
756 if (error == 0 && uap->sysmsg_result > 0)
757 error = copyout(buf, uap->buf, uap->sysmsg_result);
758done:
759 if (path)
760 zfree(namei_zone, path);
761 if (ctl)
762 free(ctl, M_TEMP);
763 if (buf)
764 free(buf, M_TEMP);
765 return (error);
766}
767
768/*
769 * Execute a mount control operation by resolving the path to a mount point
770 * and calling vop_mountctl().
771 */
772int
773kern_mountctl(const char *path, int op, struct file *fp,
774 const void *ctl, int ctllen,
775 void *buf, int buflen, int *res)
776{
777 struct vnode *vp;
778 struct mount *mp;
779 struct nlookupdata nd;
780 int error;
781
782 *res = 0;
783 vp = NULL;
784 error = nlookup_init(&nd, path, UIO_SYSSPACE, NLC_FOLLOW);
785 if (error == 0)
786 error = nlookup(&nd);
787 if (error == 0)
788 error = cache_vget(nd.nl_ncp, nd.nl_cred, LK_EXCLUSIVE, &vp);
789 nlookup_done(&nd);
790 if (error)
791 return (error);
792
793 mp = vp->v_mount;
794
795 /*
796 * Must be the root of the filesystem
797 */
798 if ((vp->v_flag & VROOT) == 0) {
799 vput(vp);
800 return (EINVAL);
801 }
802 error = vop_mountctl(mp->mnt_vn_use_ops, op, fp, ctl, ctllen,
803 buf, buflen, res);
804 vput(vp);
805 return (error);
806}
807
808int
809kern_statfs(struct nlookupdata *nd, struct statfs *buf)
810{
811 struct thread *td = curthread;
812 struct proc *p = td->td_proc;
813 struct mount *mp;
814 struct statfs *sp;
815 char *fullpath, *freepath;
816 int error;
817
818 if ((error = nlookup(nd)) != 0)
819 return (error);
820 mp = nd->nl_ncp->nc_mount;
821 sp = &mp->mnt_stat;
822 if ((error = VFS_STATFS(mp, sp, nd->nl_cred)) != 0)
823 return (error);
824
825 error = cache_fullpath(p, mp->mnt_ncp, &fullpath, &freepath);
826 if (error)
827 return(error);
828 bzero(sp->f_mntonname, sizeof(sp->f_mntonname));
829 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname));
830 free(freepath, M_TEMP);
831
832 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
833 bcopy(sp, buf, sizeof(*buf));
834 /* Only root should have access to the fsid's. */
835 if (suser(td))
836 buf->f_fsid.val[0] = buf->f_fsid.val[1] = 0;
837 return (0);
838}
839
840/*
841 * statfs_args(char *path, struct statfs *buf)
842 *
843 * Get filesystem statistics.
844 */
845int
846statfs(struct statfs_args *uap)
847{
848 struct nlookupdata nd;
849 struct statfs buf;
850 int error;
851
852 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
853 if (error == 0)
854 error = kern_statfs(&nd, &buf);
855 nlookup_done(&nd);
856 if (error == 0)
857 error = copyout(&buf, uap->buf, sizeof(*uap->buf));
858 return (error);
859}
860
861int
862kern_fstatfs(int fd, struct statfs *buf)
863{
864 struct thread *td = curthread;
865 struct proc *p = td->td_proc;
866 struct file *fp;
867 struct mount *mp;
868 struct statfs *sp;
869 char *fullpath, *freepath;
870 int error;
871
872 KKASSERT(p);
873 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0)
874 return (error);
875 mp = ((struct vnode *)fp->f_data)->v_mount;
876 if (mp == NULL) {
877 error = EBADF;
878 goto done;
879 }
880 if (fp->f_cred == NULL) {
881 error = EINVAL;
882 goto done;
883 }
884 sp = &mp->mnt_stat;
885 if ((error = VFS_STATFS(mp, sp, fp->f_cred)) != 0)
886 goto done;
887
888 if ((error = cache_fullpath(p, mp->mnt_ncp, &fullpath, &freepath)) != 0)
889 goto done;
890 bzero(sp->f_mntonname, sizeof(sp->f_mntonname));
891 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname));
892 free(freepath, M_TEMP);
893
894 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
895 bcopy(sp, buf, sizeof(*buf));
896
897 /* Only root should have access to the fsid's. */
898 if (suser(td))
899 buf->f_fsid.val[0] = buf->f_fsid.val[1] = 0;
900 error = 0;
901done:
902 fdrop(fp);
903 return (error);
904}
905
906/*
907 * fstatfs_args(int fd, struct statfs *buf)
908 *
909 * Get filesystem statistics.
910 */
911int
912fstatfs(struct fstatfs_args *uap)
913{
914 struct statfs buf;
915 int error;
916
917 error = kern_fstatfs(uap->fd, &buf);
918
919 if (error == 0)
920 error = copyout(&buf, uap->buf, sizeof(*uap->buf));
921 return (error);
922}
923
924/*
925 * getfsstat_args(struct statfs *buf, long bufsize, int flags)
926 *
927 * Get statistics on all filesystems.
928 */
929
930struct getfsstat_info {
931 struct statfs *sfsp;
932 long count;
933 long maxcount;
934 int error;
935 int flags;
936 int is_chrooted;
937 struct proc *p;
938};
939
940static int getfsstat_callback(struct mount *, void *);
941
942/* ARGSUSED */
943int
944getfsstat(struct getfsstat_args *uap)
945{
946 struct thread *td = curthread;
947 struct proc *p = td->td_proc;
948 struct getfsstat_info info;
949
950 bzero(&info, sizeof(info));
951 if (p != NULL && (p->p_fd->fd_nrdir->nc_flag & NCF_ROOT) == 0)
952 info.is_chrooted = 1;
953 else
954 info.is_chrooted = 0;
955
956 info.maxcount = uap->bufsize / sizeof(struct statfs);
957 info.sfsp = uap->buf;
958 info.count = 0;
959 info.flags = uap->flags;
960 info.p = p;
961
962 mountlist_scan(getfsstat_callback, &info, MNTSCAN_FORWARD);
963 if (info.sfsp && info.count > info.maxcount)
964 uap->sysmsg_result = info.maxcount;
965 else
966 uap->sysmsg_result = info.count;
967 return (info.error);
968}
969
970static int
971getfsstat_callback(struct mount *mp, void *data)
972{
973 struct getfsstat_info *info = data;
974 struct statfs *sp;
975 char *freepath;
976 char *fullpath;
977 int error;
978
979 if (info->sfsp && info->count < info->maxcount) {
980 if (info->is_chrooted && !chroot_visible_mnt(mp, info->p))
981 return(0);
982 sp = &mp->mnt_stat;
983
984 /*
985 * If MNT_NOWAIT or MNT_LAZY is specified, do not
986 * refresh the fsstat cache. MNT_NOWAIT or MNT_LAZY
987 * overrides MNT_WAIT.
988 */
989 if (((info->flags & (MNT_LAZY|MNT_NOWAIT)) == 0 ||
990 (info->flags & MNT_WAIT)) &&
991 (error = VFS_STATFS(mp, sp, info->p->p_ucred))) {
992 return(0);
993 }
994 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
995
996 error = cache_fullpath(info->p, mp->mnt_ncp,
997 &fullpath, &freepath);
998 if (error) {
999 info->error = error;
1000 return(-1);
1001 }
1002 bzero(sp->f_mntonname, sizeof(sp->f_mntonname));
1003 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname));
1004 free(freepath, M_TEMP);
1005
1006 error = copyout(sp, info->sfsp, sizeof(*sp));
1007 if (error) {
1008 info->error = error;
1009 return (-1);
1010 }
1011 ++info->sfsp;
1012 }
1013 info->count++;
1014 return(0);
1015}
1016
1017/*
1018 * fchdir_args(int fd)
1019 *
1020 * Change current working directory to a given file descriptor.
1021 */
1022/* ARGSUSED */
1023int
1024fchdir(struct fchdir_args *uap)
1025{
1026 struct thread *td = curthread;
1027 struct proc *p = td->td_proc;
1028 struct filedesc *fdp = p->p_fd;
1029 struct vnode *vp, *ovp;
1030 struct mount *mp;
1031 struct file *fp;
1032 struct namecache *ncp, *oncp;
1033 struct namecache *nct;
1034 int error;
1035
1036 if ((error = holdvnode(fdp, uap->fd, &fp)) != 0)
1037 return (error);
1038 vp = (struct vnode *)fp->f_data;
1039 vref(vp);
1040 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1041 if (vp->v_type != VDIR || fp->f_ncp == NULL)
1042 error = ENOTDIR;
1043 else
1044 error = VOP_ACCESS(vp, VEXEC, p->p_ucred);
1045 if (error) {
1046 vput(vp);
1047 fdrop(fp);
1048 return (error);
1049 }
1050 ncp = cache_hold(fp->f_ncp);
1051 while (!error && (mp = vp->v_mountedhere) != NULL) {
1052 error = nlookup_mp(mp, &nct);
1053 if (error == 0) {
1054 cache_unlock(nct); /* leave ref intact */
1055 vput(vp);
1056 vp = nct->nc_vp;
1057 error = vget(vp, LK_SHARED);
1058 KKASSERT(error == 0);
1059 cache_drop(ncp);
1060 ncp = nct;
1061 }
1062 }
1063 if (error == 0) {
1064 ovp = fdp->fd_cdir;
1065 oncp = fdp->fd_ncdir;
1066 VOP_UNLOCK(vp, 0); /* leave ref intact */
1067 fdp->fd_cdir = vp;
1068 fdp->fd_ncdir = ncp;
1069 cache_drop(oncp);
1070 vrele(ovp);
1071 } else {
1072 cache_drop(ncp);
1073 vput(vp);
1074 }
1075 fdrop(fp);
1076 return (error);
1077}
1078
1079int
1080kern_chdir(struct nlookupdata *nd)
1081{
1082 struct thread *td = curthread;
1083 struct proc *p = td->td_proc;
1084 struct filedesc *fdp = p->p_fd;
1085 struct vnode *vp, *ovp;
1086 struct namecache *oncp;
1087 int error;
1088
1089 if ((error = nlookup(nd)) != 0)
1090 return (error);
1091 if ((vp = nd->nl_ncp->nc_vp) == NULL)
1092 return (ENOENT);
1093 if ((error = vget(vp, LK_SHARED)) != 0)
1094 return (error);
1095
1096 error = checkvp_chdir(vp, td);
1097 VOP_UNLOCK(vp, 0);
1098 if (error == 0) {
1099 ovp = fdp->fd_cdir;
1100 oncp = fdp->fd_ncdir;
1101 cache_unlock(nd->nl_ncp); /* leave reference intact */
1102 fdp->fd_ncdir = nd->nl_ncp;
1103 fdp->fd_cdir = vp;
1104 cache_drop(oncp);
1105 vrele(ovp);
1106 nd->nl_ncp = NULL;
1107 } else {
1108 vrele(vp);
1109 }
1110 return (error);
1111}
1112
1113/*
1114 * chdir_args(char *path)
1115 *
1116 * Change current working directory (``.'').
1117 */
1118int
1119chdir(struct chdir_args *uap)
1120{
1121 struct nlookupdata nd;
1122 int error;
1123
1124 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
1125 if (error == 0)
1126 error = kern_chdir(&nd);
1127 nlookup_done(&nd);
1128 return (error);
1129}
1130
1131/*
1132 * Helper function for raised chroot(2) security function: Refuse if
1133 * any filedescriptors are open directories.
1134 */
1135static int
1136chroot_refuse_vdir_fds(fdp)
1137 struct filedesc *fdp;
1138{
1139 struct vnode *vp;
1140 struct file *fp;
1141 int error;
1142 int fd;
1143
1144 for (fd = 0; fd < fdp->fd_nfiles ; fd++) {
1145 if ((error = holdvnode(fdp, fd, &fp)) != 0)
1146 continue;
1147 vp = (struct vnode *)fp->f_data;
1148 if (vp->v_type != VDIR) {
1149 fdrop(fp);
1150 continue;
1151 }
1152 fdrop(fp);
1153 return(EPERM);
1154 }
1155 return (0);
1156}
1157
1158/*
1159 * This sysctl determines if we will allow a process to chroot(2) if it
1160 * has a directory open:
1161 * 0: disallowed for all processes.
1162 * 1: allowed for processes that were not already chroot(2)'ed.
1163 * 2: allowed for all processes.
1164 */
1165
1166static int chroot_allow_open_directories = 1;
1167
1168SYSCTL_INT(_kern, OID_AUTO, chroot_allow_open_directories, CTLFLAG_RW,
1169 &chroot_allow_open_directories, 0, "");
1170
1171/*
1172 * chroot to the specified namecache entry. We obtain the vp from the
1173 * namecache data. The passed ncp must be locked and referenced and will
1174 * remain locked and referenced on return.
1175 */
1176int
1177kern_chroot(struct namecache *ncp)
1178{
1179 struct thread *td = curthread;
1180 struct proc *p = td->td_proc;
1181 struct filedesc *fdp = p->p_fd;
1182 struct vnode *vp;
1183 int error;
1184
1185 /*
1186 * Only root can chroot
1187 */
1188 if ((error = suser_cred(p->p_ucred, PRISON_ROOT)) != 0)
1189 return (error);
1190
1191 /*
1192 * Disallow open directory descriptors (fchdir() breakouts).
1193 */
1194 if (chroot_allow_open_directories == 0 ||
1195 (chroot_allow_open_directories == 1 && fdp->fd_rdir != rootvnode)) {
1196 if ((error = chroot_refuse_vdir_fds(fdp)) != 0)
1197 return (error);
1198 }
1199 if ((vp = ncp->nc_vp) == NULL)
1200 return (ENOENT);
1201
1202 if ((error = vget(vp, LK_SHARED)) != 0)
1203 return (error);
1204
1205 /*
1206 * Check the validity of vp as a directory to change to and
1207 * associate it with rdir/jdir.
1208 */
1209 error = checkvp_chdir(vp, td);
1210 VOP_UNLOCK(vp, 0); /* leave reference intact */
1211 if (error == 0) {
1212 vrele(fdp->fd_rdir);
1213 fdp->fd_rdir = vp; /* reference inherited by fd_rdir */
1214 cache_drop(fdp->fd_nrdir);
1215 fdp->fd_nrdir = cache_hold(ncp);
1216 if (fdp->fd_jdir == NULL) {
1217 fdp->fd_jdir = vp;
1218 vref(fdp->fd_jdir);
1219 fdp->fd_njdir = cache_hold(ncp);
1220 }
1221 } else {
1222 vrele(vp);
1223 }
1224 return (error);
1225}
1226
1227/*
1228 * chroot_args(char *path)
1229 *
1230 * Change notion of root (``/'') directory.
1231 */
1232/* ARGSUSED */
1233int
1234chroot(struct chroot_args *uap)
1235{
1236 struct thread *td = curthread;
1237 struct nlookupdata nd;
1238 int error;
1239
1240 KKASSERT(td->td_proc);
1241 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
1242 if (error) {
1243 nlookup_done(&nd);
1244 return(error);
1245 }
1246 error = nlookup(&nd);
1247 if (error == 0)
1248 error = kern_chroot(nd.nl_ncp);
1249 nlookup_done(&nd);
1250 return(error);
1251}
1252
1253/*
1254 * Common routine for chroot and chdir. Given a locked, referenced vnode,
1255 * determine whether it is legal to chdir to the vnode. The vnode's state
1256 * is not changed by this call.
1257 */
1258int
1259checkvp_chdir(struct vnode *vp, struct thread *td)
1260{
1261 int error;
1262
1263 if (vp->v_type != VDIR)
1264 error = ENOTDIR;
1265 else
1266 error = VOP_ACCESS(vp, VEXEC, td->td_proc->p_ucred);
1267 return (error);
1268}
1269
1270int
1271kern_open(struct nlookupdata *nd, int oflags, int mode, int *res)
1272{
1273 struct thread *td = curthread;
1274 struct proc *p = td->td_proc;
1275 struct lwp *lp = td->td_lwp;
1276 struct filedesc *fdp = p->p_fd;
1277 int cmode, flags;
1278 struct file *nfp;
1279 struct file *fp;
1280 struct vnode *vp;
1281 int type, indx, error;
1282 struct flock lf;
1283
1284 if ((oflags & O_ACCMODE) == O_ACCMODE)
1285 return (EINVAL);
1286 flags = FFLAGS(oflags);
1287 error = falloc(p, &nfp, NULL);
1288 if (error)
1289 return (error);
1290 fp = nfp;
1291 cmode = ((mode &~ fdp->fd_cmask) & ALLPERMS) &~ S_ISTXT;
1292
1293 /*
1294 * XXX p_dupfd is a real mess. It allows a device to return a
1295 * file descriptor to be duplicated rather then doing the open
1296 * itself.
1297 */
1298 lp->lwp_dupfd = -1;
1299
1300 /*
1301 * Call vn_open() to do the lookup and assign the vnode to the
1302 * file pointer. vn_open() does not change the ref count on fp
1303 * and the vnode, on success, will be inherited by the file pointer
1304 * and unlocked.
1305 */
1306 nd->nl_flags |= NLC_LOCKVP;
1307 error = vn_open(nd, fp, flags, cmode);
1308 nlookup_done(nd);
1309 if (error) {
1310 /*
1311 * handle special fdopen() case. bleh. dupfdopen() is
1312 * responsible for dropping the old contents of ofiles[indx]
1313 * if it succeeds.
1314 *
1315 * Note that fsetfd() will add a ref to fp which represents
1316 * the fd_files[] assignment. We must still drop our
1317 * reference.
1318 */
1319 if ((error == ENODEV || error == ENXIO) && lp->lwp_dupfd >= 0) {
1320 if (fdalloc(p, 0, &indx) == 0) {
1321 error = dupfdopen(p, indx, lp->lwp_dupfd, flags, error);
1322 if (error == 0) {
1323 *res = indx;
1324 fdrop(fp); /* our ref */
1325 return (0);
1326 }
1327 fsetfd(p, NULL, indx);
1328 }
1329 }
1330 fdrop(fp); /* our ref */
1331 if (error == ERESTART)
1332 error = EINTR;
1333 return (error);
1334 }
1335
1336 /*
1337 * ref the vnode for ourselves so it can't be ripped out from under
1338 * is. XXX need an ND flag to request that the vnode be returned
1339 * anyway.
1340 *
1341 * Reserve a file descriptor but do not assign it until the open
1342 * succeeds.
1343 */
1344 vp = (struct vnode *)fp->f_data;
1345 vref(vp);
1346 if ((error = fdalloc(p, 0, &indx)) != 0) {
1347 fdrop(fp);
1348 vrele(vp);
1349 return (error);
1350 }
1351
1352 /*
1353 * If no error occurs the vp will have been assigned to the file
1354 * pointer.
1355 */
1356 lp->lwp_dupfd = 0;
1357
1358 if (flags & (O_EXLOCK | O_SHLOCK)) {
1359 lf.l_whence = SEEK_SET;
1360 lf.l_start = 0;
1361 lf.l_len = 0;
1362 if (flags & O_EXLOCK)
1363 lf.l_type = F_WRLCK;
1364 else
1365 lf.l_type = F_RDLCK;
1366 if (flags & FNONBLOCK)
1367 type = 0;
1368 else
1369 type = F_WAIT;
1370
1371 if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0) {
1372 /*
1373 * lock request failed. Clean up the reserved
1374 * descriptor.
1375 */
1376 vrele(vp);
1377 fsetfd(p, NULL, indx);
1378 fdrop(fp);
1379 return (error);
1380 }
1381 fp->f_flag |= FHASLOCK;
1382 }
1383#if 0
1384 /*
1385 * Assert that all regular file vnodes were created with a object.
1386 */
1387 KASSERT(vp->v_type != VREG || vp->v_object != NULL,
1388 ("open: regular file has no backing object after vn_open"));
1389#endif
1390
1391 vrele(vp);
1392
1393 /*
1394 * release our private reference, leaving the one associated with the
1395 * descriptor table intact.
1396 */
1397 fsetfd(p, fp, indx);
1398 fdrop(fp);
1399 *res = indx;
1400 return (0);
1401}
1402
1403/*
1404 * open_args(char *path, int flags, int mode)
1405 *
1406 * Check permissions, allocate an open file structure,
1407 * and call the device open routine if any.
1408 */
1409int
1410open(struct open_args *uap)
1411{
1412 struct nlookupdata nd;
1413 int error;
1414
1415 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
1416 if (error == 0) {
1417 error = kern_open(&nd, uap->flags,
1418 uap->mode, &uap->sysmsg_result);
1419 }
1420 nlookup_done(&nd);
1421 return (error);
1422}
1423
1424int
1425kern_mknod(struct nlookupdata *nd, int mode, int dev)
1426{
1427 struct namecache *ncp;
1428 struct thread *td = curthread;
1429 struct proc *p = td->td_proc;
1430 struct vnode *vp;
1431 struct vattr vattr;
1432 int error;
1433 int whiteout = 0;
1434
1435 KKASSERT(p);
1436
1437 switch (mode & S_IFMT) {
1438 case S_IFCHR:
1439 case S_IFBLK:
1440 error = suser(td);
1441 break;
1442 default:
1443 error = suser_cred(p->p_ucred, PRISON_ROOT);
1444 break;
1445 }
1446 if (error)
1447 return (error);
1448
1449 bwillwrite();
1450 nd->nl_flags |= NLC_CREATE;
1451 if ((error = nlookup(nd)) != 0)
1452 return (error);
1453 ncp = nd->nl_ncp;
1454 if (ncp->nc_vp)
1455 return (EEXIST);
1456
1457 VATTR_NULL(&vattr);
1458 vattr.va_mode = (mode & ALLPERMS) &~ p->p_fd->fd_cmask;
1459 vattr.va_rdev = dev;
1460 whiteout = 0;
1461
1462 switch (mode & S_IFMT) {
1463 case S_IFMT: /* used by badsect to flag bad sectors */
1464 vattr.va_type = VBAD;
1465 break;
1466 case S_IFCHR:
1467 vattr.va_type = VCHR;
1468 break;
1469 case S_IFBLK:
1470 vattr.va_type = VBLK;
1471 break;
1472 case S_IFWHT:
1473 whiteout = 1;
1474 break;
1475 default:
1476 error = EINVAL;
1477 break;
1478 }
1479 if (error == 0) {
1480 if (whiteout) {
1481 error = VOP_NWHITEOUT(ncp, nd->nl_cred, NAMEI_CREATE);
1482 } else {
1483 vp = NULL;
1484 error = VOP_NMKNOD(ncp, &vp, nd->nl_cred, &vattr);
1485 if (error == 0)
1486 vput(vp);
1487 }
1488 }
1489 return (error);
1490}
1491
1492/*
1493 * mknod_args(char *path, int mode, int dev)
1494 *
1495 * Create a special file.
1496 */
1497int
1498mknod(struct mknod_args *uap)
1499{
1500 struct nlookupdata nd;
1501 int error;
1502
1503 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
1504 if (error == 0)
1505 error = kern_mknod(&nd, uap->mode, uap->dev);
1506 nlookup_done(&nd);
1507 return (error);
1508}
1509
1510int
1511kern_mkfifo(struct nlookupdata *nd, int mode)
1512{
1513 struct namecache *ncp;
1514 struct thread *td = curthread;
1515 struct proc *p = td->td_proc;
1516 struct vattr vattr;
1517 struct vnode *vp;
1518 int error;
1519
1520 bwillwrite();
1521
1522 nd->nl_flags |= NLC_CREATE;
1523 if ((error = nlookup(nd)) != 0)
1524 return (error);
1525 ncp = nd->nl_ncp;
1526 if (ncp->nc_vp)
1527 return (EEXIST);
1528
1529 VATTR_NULL(&vattr);
1530 vattr.va_type = VFIFO;
1531 vattr.va_mode = (mode & ALLPERMS) &~ p->p_fd->fd_cmask;
1532 vp = NULL;
1533 error = VOP_NMKNOD(ncp, &vp, nd->nl_cred, &vattr);
1534 if (error == 0)
1535 vput(vp);
1536 return (error);
1537}
1538
1539/*
1540 * mkfifo_args(char *path, int mode)
1541 *
1542 * Create a named pipe.
1543 */
1544int
1545mkfifo(struct mkfifo_args *uap)
1546{
1547 struct nlookupdata nd;
1548 int error;
1549
1550 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
1551 if (error == 0)
1552 error = kern_mkfifo(&nd, uap->mode);
1553 nlookup_done(&nd);
1554 return (error);
1555}
1556
1557static int hardlink_check_uid = 0;
1558SYSCTL_INT(_kern, OID_AUTO, hardlink_check_uid, CTLFLAG_RW,
1559 &hardlink_check_uid, 0,
1560 "Unprivileged processes cannot create hard links to files owned by other "
1561 "users");
1562static int hardlink_check_gid = 0;
1563SYSCTL_INT(_kern, OID_AUTO, hardlink_check_gid, CTLFLAG_RW,
1564 &hardlink_check_gid, 0,
1565 "Unprivileged processes cannot create hard links to files owned by other "
1566 "groups");
1567
1568static int
1569can_hardlink(struct vnode *vp, struct thread *td, struct ucred *cred)
1570{
1571 struct vattr va;
1572 int error;
1573
1574 /*
1575 * Shortcut if disabled
1576 */
1577 if (hardlink_check_uid == 0 && hardlink_check_gid == 0)
1578 return (0);
1579
1580 /*
1581 * root cred can always hardlink
1582 */
1583 if (suser_cred(cred, PRISON_ROOT) == 0)
1584 return (0);
1585
1586 /*
1587 * Otherwise only if the originating file is owned by the
1588 * same user or group. Note that any group is allowed if
1589 * the file is owned by the caller.
1590 */
1591 error = VOP_GETATTR(vp, &va);
1592 if (error != 0)
1593 return (error);
1594
1595 if (hardlink_check_uid) {
1596 if (cred->cr_uid != va.va_uid)
1597 return (EPERM);
1598 }
1599
1600 if (hardlink_check_gid) {
1601 if (cred->cr_uid != va.va_uid && !groupmember(va.va_gid, cred))
1602 return (EPERM);
1603 }
1604
1605 return (0);
1606}
1607
1608int
1609kern_link(struct nlookupdata *nd, struct nlookupdata *linknd)
1610{
1611 struct thread *td = curthread;
1612 struct vnode *vp;
1613 int error;
1614
1615 /*
1616 * Lookup the source and obtained a locked vnode.
1617 *
1618 * XXX relookup on vget failure / race ?
1619 */
1620 bwillwrite();
1621 if ((error = nlookup(nd)) != 0)
1622 return (error);
1623 vp = nd->nl_ncp->nc_vp;
1624 KKASSERT(vp != NULL);
1625 if (vp->v_type == VDIR)
1626 return (EPERM); /* POSIX */
1627 if ((error = vget(vp, LK_EXCLUSIVE)) != 0)
1628 return (error);
1629
1630 /*
1631 * Unlock the source so we can lookup the target without deadlocking
1632 * (XXX vp is locked already, possible other deadlock?). The target
1633 * must not exist.
1634 */
1635 KKASSERT(nd->nl_flags & NLC_NCPISLOCKED);
1636 nd->nl_flags &= ~NLC_NCPISLOCKED;
1637 cache_unlock(nd->nl_ncp);
1638
1639 linknd->nl_flags |= NLC_CREATE;
1640 if ((error = nlookup(linknd)) != 0) {
1641 vput(vp);
1642 return (error);
1643 }
1644 if (linknd->nl_ncp->nc_vp) {
1645 vput(vp);
1646 return (EEXIST);
1647 }
1648
1649 /*
1650 * Finally run the new API VOP.
1651 */
1652 error = can_hardlink(vp, td, td->td_proc->p_ucred);
1653 if (error == 0)
1654 error = VOP_NLINK(linknd->nl_ncp, vp, linknd->nl_cred);
1655 vput(vp);
1656 return (error);
1657}
1658
1659/*
1660 * link_args(char *path, char *link)
1661 *
1662 * Make a hard file link.
1663 */
1664int
1665link(struct link_args *uap)
1666{
1667 struct nlookupdata nd, linknd;
1668 int error;
1669
1670 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
1671 if (error == 0) {
1672 error = nlookup_init(&linknd, uap->link, UIO_USERSPACE, 0);
1673 if (error == 0)
1674 error = kern_link(&nd, &linknd);
1675 nlookup_done(&linknd);
1676 }
1677 nlookup_done(&nd);
1678 return (error);
1679}
1680
1681int
1682kern_symlink(struct nlookupdata *nd, char *path, int mode)
1683{
1684 struct namecache *ncp;
1685 struct vattr vattr;
1686 struct vnode *vp;
1687 int error;
1688
1689 bwillwrite();
1690 nd->nl_flags |= NLC_CREATE;
1691 if ((error = nlookup(nd)) != 0)
1692 return (error);
1693 ncp = nd->nl_ncp;
1694 if (ncp->nc_vp)
1695 return (EEXIST);
1696
1697 VATTR_NULL(&vattr);
1698 vattr.va_mode = mode;
1699 error = VOP_NSYMLINK(ncp, &vp, nd->nl_cred, &vattr, path);
1700 if (error == 0)
1701 vput(vp);
1702 return (error);
1703}
1704
1705/*
1706 * symlink(char *path, char *link)
1707 *
1708 * Make a symbolic link.
1709 */
1710int
1711symlink(struct symlink_args *uap)
1712{
1713 struct thread *td = curthread;
1714 struct nlookupdata nd;
1715 char *path;
1716 int error;
1717 int mode;
1718
1719 path = zalloc(namei_zone);
1720 error = copyinstr(uap->path, path, MAXPATHLEN, NULL);
1721 if (error == 0) {
1722 error = nlookup_init(&nd, uap->link, UIO_USERSPACE, 0);
1723 if (error == 0) {
1724 mode = ACCESSPERMS & ~td->td_proc->p_fd->fd_cmask;
1725 error = kern_symlink(&nd, path, mode);
1726 }
1727 nlookup_done(&nd);
1728 }
1729 zfree(namei_zone, path);
1730 return (error);
1731}
1732
1733/*
1734 * undelete_args(char *path)
1735 *
1736 * Delete a whiteout from the filesystem.
1737 */
1738/* ARGSUSED */
1739int
1740undelete(struct undelete_args *uap)
1741{
1742 struct nlookupdata nd;
1743 int error;
1744
1745 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
1746 bwillwrite();
1747 nd.nl_flags |= NLC_DELETE;
1748 if (error == 0)
1749 error = nlookup(&nd);
1750 if (error == 0)
1751 error = VOP_NWHITEOUT(nd.nl_ncp, nd.nl_cred, NAMEI_DELETE);
1752 nlookup_done(&nd);
1753 return (error);
1754}
1755
1756int
1757kern_unlink(struct nlookupdata *nd)
1758{
1759 struct namecache *ncp;
1760 int error;
1761
1762 bwillwrite();
1763 nd->nl_flags |= NLC_DELETE;
1764 if ((error = nlookup(nd)) != 0)
1765 return (error);
1766 ncp = nd->nl_ncp;
1767 error = VOP_NREMOVE(ncp, nd->nl_cred);
1768 return (error);
1769}
1770
1771/*
1772 * unlink_args(char *path)
1773 *
1774 * Delete a name from the filesystem.
1775 */
1776int
1777unlink(struct unlink_args *uap)
1778{
1779 struct nlookupdata nd;
1780 int error;
1781
1782 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
1783 if (error == 0)
1784 error = kern_unlink(&nd);
1785 nlookup_done(&nd);
1786 return (error);
1787}
1788
1789int
1790kern_lseek(int fd, off_t offset, int whence, off_t *res)
1791{
1792 struct thread *td = curthread;
1793 struct proc *p = td->td_proc;
1794 struct file *fp;
1795 struct vattr vattr;
1796 int error;
1797
1798 fp = holdfp(p->p_fd, fd, -1);
1799 if (fp == NULL)
1800 return (EBADF);
1801 if (fp->f_type != DTYPE_VNODE) {
1802 error = ESPIPE;
1803 goto done;
1804 }
1805
1806 switch (whence) {
1807 case L_INCR:
1808 fp->f_offset += offset;
1809 error = 0;
1810 break;
1811 case L_XTND:
1812 error = VOP_GETATTR((struct vnode *)fp->f_data, &vattr);
1813 if (error == 0)
1814 fp->f_offset = offset + vattr.va_size;
1815 break;
1816 case L_SET:
1817 fp->f_offset = offset;
1818 error = 0;
1819 break;
1820 default:
1821 error = EINVAL;
1822 break;
1823 }
1824 *res = fp->f_offset;
1825done:
1826 fdrop(fp);
1827 return (error);
1828}
1829
1830/*
1831 * lseek_args(int fd, int pad, off_t offset, int whence)
1832 *
1833 * Reposition read/write file offset.
1834 */
1835int
1836lseek(struct lseek_args *uap)
1837{
1838 int error;
1839
1840 error = kern_lseek(uap->fd, uap->offset, uap->whence,
1841 &uap->sysmsg_offset);
1842
1843 return (error);
1844}
1845
1846int
1847kern_access(struct nlookupdata *nd, int aflags)
1848{
1849 struct vnode *vp;
1850 int error, flags;
1851
1852 if ((error = nlookup(nd)) != 0)
1853 return (error);
1854retry:
1855 error = cache_vget(nd->nl_ncp, nd->nl_cred, LK_EXCLUSIVE, &vp);
1856 if (error)
1857 return (error);
1858
1859 /* Flags == 0 means only check for existence. */
1860 if (aflags) {
1861 flags = 0;
1862 if (aflags & R_OK)
1863 flags |= VREAD;
1864 if (aflags & W_OK)
1865 flags |= VWRITE;
1866 if (aflags & X_OK)
1867 flags |= VEXEC;
1868 if ((flags & VWRITE) == 0 || (error = vn_writechk(vp)) == 0)
1869 error = VOP_ACCESS(vp, flags, nd->nl_cred);
1870
1871 /*
1872 * If the file handle is stale we have to re-resolve the
1873 * entry. This is a hack at the moment.
1874 */
1875 if (error == ESTALE) {
1876 cache_setunresolved(nd->nl_ncp);
1877 error = cache_resolve(nd->nl_ncp, nd->nl_cred);
1878 if (error == 0) {
1879 vput(vp);
1880 vp = NULL;
1881 goto retry;
1882 }
1883 }
1884 }
1885 vput(vp);
1886 return (error);
1887}
1888
1889/*
1890 * access_args(char *path, int flags)
1891 *
1892 * Check access permissions.
1893 */
1894int
1895access(struct access_args *uap)
1896{
1897 struct nlookupdata nd;
1898 int error;
1899
1900 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
1901 if (error == 0)
1902 error = kern_access(&nd, uap->flags);
1903 nlookup_done(&nd);
1904 return (error);
1905}
1906
1907int
1908kern_stat(struct nlookupdata *nd, struct stat *st)
1909{
1910 int error;
1911 struct vnode *vp;
1912 thread_t td;
1913
1914 if ((error = nlookup(nd)) != 0)
1915 return (error);
1916again:
1917 if ((vp = nd->nl_ncp->nc_vp) == NULL)
1918 return (ENOENT);
1919
1920 td = curthread;
1921 if ((error = vget(vp, LK_SHARED)) != 0)
1922 return (error);
1923 error = vn_stat(vp, st, nd->nl_cred);
1924
1925 /*
1926 * If the file handle is stale we have to re-resolve the entry. This
1927 * is a hack at the moment.
1928 */
1929 if (error == ESTALE) {
1930 cache_setunresolved(nd->nl_ncp);
1931 error = cache_resolve(nd->nl_ncp, nd->nl_cred);
1932 if (error == 0) {
1933 vput(vp);
1934 goto again;
1935 }
1936 }
1937 vput(vp);
1938 return (error);
1939}
1940
1941/*
1942 * stat_args(char *path, struct stat *ub)
1943 *
1944 * Get file status; this version follows links.
1945 */
1946int
1947stat(struct stat_args *uap)
1948{
1949 struct nlookupdata nd;
1950 struct stat st;
1951 int error;
1952
1953 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
1954 if (error == 0) {
1955 error = kern_stat(&nd, &st);
1956 if (error == 0)
1957 error = copyout(&st, uap->ub, sizeof(*uap->ub));
1958 }
1959 nlookup_done(&nd);
1960 return (error);
1961}
1962
1963/*
1964 * lstat_args(char *path, struct stat *ub)
1965 *
1966 * Get file status; this version does not follow links.
1967 */
1968int
1969lstat(struct lstat_args *uap)
1970{
1971 struct nlookupdata nd;
1972 struct stat st;
1973 int error;
1974
1975 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
1976 if (error == 0) {
1977 error = kern_stat(&nd, &st);
1978 if (error == 0)
1979 error = copyout(&st, uap->ub, sizeof(*uap->ub));
1980 }
1981 nlookup_done(&nd);
1982 return (error);
1983}
1984
1985/*
1986 * pathconf_Args(char *path, int name)
1987 *
1988 * Get configurable pathname variables.
1989 */
1990/* ARGSUSED */
1991int
1992pathconf(struct pathconf_args *uap)
1993{
1994 struct nlookupdata nd;
1995 struct vnode *vp;
1996 int error;
1997
1998 vp = NULL;
1999 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
2000 if (error == 0)
2001 error = nlookup(&nd);
2002 if (error == 0)
2003 error = cache_vget(nd.nl_ncp, nd.nl_cred, LK_EXCLUSIVE, &vp);
2004 nlookup_done(&nd);
2005 if (error == 0) {
2006 error = VOP_PATHCONF(vp, uap->name, uap->sysmsg_fds);
2007 vput(vp);
2008 }
2009 return (error);
2010}
2011
2012/*
2013 * XXX: daver
2014 * kern_readlink isn't properly split yet. There is a copyin burried
2015 * in VOP_READLINK().
2016 */
2017int
2018kern_readlink(struct nlookupdata *nd, char *buf, int count, int *res)
2019{
2020 struct thread *td = curthread;
2021 struct proc *p = td->td_proc;
2022 struct vnode *vp;
2023 struct iovec aiov;
2024 struct uio auio;
2025 int error;
2026
2027 if ((error = nlookup(nd)) != 0)
2028 return (error);
2029 error = cache_vget(nd->nl_ncp, nd->nl_cred, LK_EXCLUSIVE, &vp);
2030 if (error)
2031 return (error);
2032 if (vp->v_type != VLNK) {
2033 error = EINVAL;
2034 } else {
2035 aiov.iov_base = buf;
2036 aiov.iov_len = count;
2037 auio.uio_iov = &aiov;
2038 auio.uio_iovcnt = 1;
2039 auio.uio_offset = 0;
2040 auio.uio_rw = UIO_READ;
2041 auio.uio_segflg = UIO_USERSPACE;
2042 auio.uio_td = td;
2043 auio.uio_resid = count;
2044 error = VOP_READLINK(vp, &auio, p->p_ucred);
2045 }
2046 vput(vp);
2047 *res = count - auio.uio_resid;
2048 return (error);
2049}
2050
2051/*
2052 * readlink_args(char *path, char *buf, int count)
2053 *
2054 * Return target name of a symbolic link.
2055 */
2056int
2057readlink(struct readlink_args *uap)
2058{
2059 struct nlookupdata nd;
2060 int error;
2061
2062 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
2063 if (error == 0) {
2064 error = kern_readlink(&nd, uap->buf, uap->count,
2065 &uap->sysmsg_result);
2066 }
2067 nlookup_done(&nd);
2068 return (error);
2069}
2070
2071static int
2072setfflags(struct vnode *vp, int flags)
2073{
2074 struct thread *td = curthread;
2075 struct proc *p = td->td_proc;
2076 int error;
2077 struct vattr vattr;
2078
2079 /*
2080 * Prevent non-root users from setting flags on devices. When
2081 * a device is reused, users can retain ownership of the device
2082 * if they are allowed to set flags and programs assume that
2083 * chown can't fail when done as root.
2084 */
2085 if ((vp->v_type == VCHR || vp->v_type == VBLK) &&
2086 ((error = suser_cred(p->p_ucred, PRISON_ROOT)) != 0))
2087 return (error);
2088
2089 /*
2090 * note: vget is required for any operation that might mod the vnode
2091 * so VINACTIVE is properly cleared.
2092 */
2093 if ((error = vget(vp, LK_EXCLUSIVE)) == 0) {
2094 VATTR_NULL(&vattr);
2095 vattr.va_flags = flags;
2096 error = VOP_SETATTR(vp, &vattr, p->p_ucred);
2097 vput(vp);
2098 }
2099 return (error);
2100}
2101
2102/*
2103 * chflags(char *path, int flags)
2104 *
2105 * Change flags of a file given a path name.
2106 */
2107/* ARGSUSED */
2108int
2109chflags(struct chflags_args *uap)
2110{
2111 struct nlookupdata nd;
2112 struct vnode *vp;
2113 int error;
2114
2115 vp = NULL;
2116 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
2117 /* XXX Add NLC flag indicating modifying operation? */
2118 if (error == 0)
2119 error = nlookup(&nd);
2120 if (error == 0)
2121 error = cache_vref(nd.nl_ncp, nd.nl_cred, &vp);
2122 nlookup_done(&nd);
2123 if (error == 0) {
2124 error = setfflags(vp, uap->flags);
2125 vrele(vp);
2126 }
2127 return (error);
2128}
2129
2130/*
2131 * fchflags_args(int fd, int flags)
2132 *
2133 * Change flags of a file given a file descriptor.
2134 */
2135/* ARGSUSED */
2136int
2137fchflags(struct fchflags_args *uap)
2138{
2139 struct thread *td = curthread;
2140 struct proc *p = td->td_proc;
2141 struct file *fp;
2142 int error;
2143
2144 if ((error = holdvnode(p->p_fd, uap->fd, &fp)) != 0)
2145 return (error);
2146 error = setfflags((struct vnode *) fp->f_data, uap->flags);
2147 fdrop(fp);
2148 return (error);
2149}
2150
2151static int
2152setfmode(struct vnode *vp, int mode)
2153{
2154 struct thread *td = curthread;
2155 struct proc *p = td->td_proc;
2156 int error;
2157 struct vattr vattr;
2158
2159 /*
2160 * note: vget is required for any operation that might mod the vnode
2161 * so VINACTIVE is properly cleared.
2162 */
2163 if ((error = vget(vp, LK_EXCLUSIVE)) == 0) {
2164 VATTR_NULL(&vattr);
2165 vattr.va_mode = mode & ALLPERMS;
2166 error = VOP_SETATTR(vp, &vattr, p->p_ucred);
2167 vput(vp);
2168 }
2169 return error;
2170}
2171
2172int
2173kern_chmod(struct nlookupdata *nd, int mode)
2174{
2175 struct vnode *vp;
2176 int error;
2177
2178 /* XXX Add NLC flag indicating modifying operation? */
2179 if ((error = nlookup(nd)) != 0)
2180 return (error);
2181 if ((error = cache_vref(nd->nl_ncp, nd->nl_cred, &vp)) != 0)
2182 return (error);
2183 error = setfmode(vp, mode);
2184 vrele(vp);
2185 return (error);
2186}
2187
2188/*
2189 * chmod_args(char *path, int mode)
2190 *
2191 * Change mode of a file given path name.
2192 */
2193/* ARGSUSED */
2194int
2195chmod(struct chmod_args *uap)
2196{
2197 struct nlookupdata nd;
2198 int error;
2199
2200 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
2201 if (error == 0)
2202 error = kern_chmod(&nd, uap->mode);
2203 nlookup_done(&nd);
2204 return (error);
2205}
2206
2207/*
2208 * lchmod_args(char *path, int mode)
2209 *
2210 * Change mode of a file given path name (don't follow links.)
2211 */
2212/* ARGSUSED */
2213int
2214lchmod(struct lchmod_args *uap)
2215{
2216 struct nlookupdata nd;
2217 int error;
2218
2219 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
2220 if (error == 0)
2221 error = kern_chmod(&nd, uap->mode);
2222 nlookup_done(&nd);
2223 return (error);
2224}
2225
2226/*
2227 * fchmod_args(int fd, int mode)
2228 *
2229 * Change mode of a file given a file descriptor.
2230 */
2231/* ARGSUSED */
2232int
2233fchmod(struct fchmod_args *uap)
2234{
2235 struct thread *td = curthread;
2236 struct proc *p = td->td_proc;
2237 struct file *fp;
2238 int error;
2239
2240 if ((error = holdvnode(p->p_fd, uap->fd, &fp)) != 0)
2241 return (error);
2242 error = setfmode((struct vnode *)fp->f_data, uap->mode);
2243 fdrop(fp);
2244 return (error);
2245}
2246
2247static int
2248setfown(struct vnode *vp, uid_t uid, gid_t gid)
2249{
2250 struct thread *td = curthread;
2251 struct proc *p = td->td_proc;
2252 int error;
2253 struct vattr vattr;
2254
2255 /*
2256 * note: vget is required for any operation that might mod the vnode
2257 * so VINACTIVE is properly cleared.
2258 */
2259 if ((error = vget(vp, LK_EXCLUSIVE)) == 0) {
2260 VATTR_NULL(&vattr);
2261 vattr.va_uid = uid;
2262 vattr.va_gid = gid;
2263 error = VOP_SETATTR(vp, &vattr, p->p_ucred);
2264 vput(vp);
2265 }
2266 return error;
2267}
2268
2269int
2270kern_chown(struct nlookupdata *nd, int uid, int gid)
2271{
2272 struct vnode *vp;
2273 int error;
2274
2275 /* XXX Add NLC flag indicating modifying operation? */
2276 if ((error = nlookup(nd)) != 0)
2277 return (error);
2278 if ((error = cache_vref(nd->nl_ncp, nd->nl_cred, &vp)) != 0)
2279 return (error);
2280 error = setfown(vp, uid, gid);
2281 vrele(vp);
2282 return (error);
2283}
2284
2285/*
2286 * chown(char *path, int uid, int gid)
2287 *
2288 * Set ownership given a path name.
2289 */
2290int
2291chown(struct chown_args *uap)
2292{
2293 struct nlookupdata nd;
2294 int error;
2295
2296 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
2297 if (error == 0)
2298 error = kern_chown(&nd, uap->uid, uap->gid);
2299 nlookup_done(&nd);
2300 return (error);
2301}
2302
2303/*
2304 * lchown_args(char *path, int uid, int gid)
2305 *
2306 * Set ownership given a path name, do not cross symlinks.
2307 */
2308int
2309lchown(struct lchown_args *uap)
2310{
2311 struct nlookupdata nd;
2312 int error;
2313
2314 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
2315 if (error == 0)
2316 error = kern_chown(&nd, uap->uid, uap->gid);
2317 nlookup_done(&nd);
2318 return (error);
2319}
2320
2321/*
2322 * fchown_args(int fd, int uid, int gid)
2323 *
2324 * Set ownership given a file descriptor.
2325 */
2326/* ARGSUSED */
2327int
2328fchown(struct fchown_args *uap)
2329{
2330 struct thread *td = curthread;
2331 struct proc *p = td->td_proc;
2332 struct file *fp;
2333 int error;
2334
2335 if ((error = holdvnode(p->p_fd, uap->fd, &fp)) != 0)
2336 return (error);
2337 error = setfown((struct vnode *)fp->f_data, uap->uid, uap->gid);
2338 fdrop(fp);
2339 return (error);
2340}
2341
2342static int
2343getutimes(const struct timeval *tvp, struct timespec *tsp)
2344{
2345 struct timeval tv[2];
2346
2347 if (tvp == NULL) {
2348 microtime(&tv[0]);
2349 TIMEVAL_TO_TIMESPEC(&tv[0], &tsp[0]);
2350 tsp[1] = tsp[0];
2351 } else {
2352 TIMEVAL_TO_TIMESPEC(&tvp[0], &tsp[0]);
2353 TIMEVAL_TO_TIMESPEC(&tvp[1], &tsp[1]);
2354 }
2355 return 0;
2356}
2357
2358static int
2359setutimes(struct vnode *vp, const struct timespec *ts, int nullflag)
2360{
2361 struct thread *td = curthread;
2362 struct proc *p = td->td_proc;
2363 int error;
2364 struct vattr vattr;
2365
2366 /*
2367 * note: vget is required for any operation that might mod the vnode
2368 * so VINACTIVE is properly cleared.
2369 */
2370 if ((error = vget(vp, LK_EXCLUSIVE)) == 0) {
2371 VATTR_NULL(&vattr);
2372 vattr.va_atime = ts[0];
2373 vattr.va_mtime = ts[1];
2374 if (nullflag)
2375 vattr.va_vaflags |= VA_UTIMES_NULL;
2376 error = VOP_SETATTR(vp, &vattr, p->p_ucred);
2377 vput(vp);
2378 }
2379 return error;
2380}
2381
2382int
2383kern_utimes(struct nlookupdata *nd, struct timeval *tptr)
2384{
2385 struct timespec ts[2];
2386 struct vnode *vp;
2387 int error;
2388
2389 if ((error = getutimes(tptr, ts)) != 0)
2390 return (error);
2391 /* XXX Add NLC flag indicating modifying operation? */
2392 if ((error = nlookup(nd)) != 0)
2393 return (error);
2394 if ((error = cache_vref(nd->nl_ncp, nd->nl_cred, &vp)) != 0)
2395 return (error);
2396 error = setutimes(vp, ts, tptr == NULL);
2397 vrele(vp);
2398 return (error);
2399}
2400
2401/*
2402 * utimes_args(char *path, struct timeval *tptr)
2403 *
2404 * Set the access and modification times of a file.
2405 */
2406int
2407utimes(struct utimes_args *uap)
2408{
2409 struct timeval tv[2];
2410 struct nlookupdata nd;
2411 int error;
2412
2413 if (uap->tptr) {
2414 error = copyin(uap->tptr, tv, sizeof(tv));
2415 if (error)
2416 return (error);
2417 }
2418 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
2419 if (error == 0)
2420 error = kern_utimes(&nd, uap->tptr ? tv : NULL);
2421 nlookup_done(&nd);
2422 return (error);
2423}
2424
2425/*
2426 * lutimes_args(char *path, struct timeval *tptr)
2427 *
2428 * Set the access and modification times of a file.
2429 */
2430int
2431lutimes(struct lutimes_args *uap)
2432{
2433 struct timeval tv[2];
2434 struct nlookupdata nd;
2435 int error;
2436
2437 if (uap->tptr) {
2438 error = copyin(uap->tptr, tv, sizeof(tv));
2439 if (error)
2440 return (error);
2441 }
2442 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
2443 if (error == 0)
2444 error = kern_utimes(&nd, uap->tptr ? tv : NULL);
2445 nlookup_done(&nd);
2446 return (error);
2447}
2448
2449int
2450kern_futimes(int fd, struct timeval *tptr)
2451{
2452 struct thread *td = curthread;
2453 struct proc *p = td->td_proc;
2454 struct timespec ts[2];
2455 struct file *fp;
2456 int error;
2457
2458 error = getutimes(tptr, ts);
2459 if (error)
2460 return (error);
2461 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0)
2462 return (error);
2463 error = setutimes((struct vnode *)fp->f_data, ts, tptr == NULL);
2464 fdrop(fp);
2465 return (error);
2466}
2467
2468/*
2469 * futimes_args(int fd, struct timeval *tptr)
2470 *
2471 * Set the access and modification times of a file.
2472 */
2473int
2474futimes(struct futimes_args *uap)
2475{
2476 struct timeval tv[2];
2477 int error;
2478
2479 if (uap->tptr) {
2480 error = copyin(uap->tptr, tv, sizeof(tv));
2481 if (error)
2482 return (error);
2483 }
2484
2485 error = kern_futimes(uap->fd, uap->tptr ? tv : NULL);
2486
2487 return (error);
2488}
2489
2490int
2491kern_truncate(struct nlookupdata *nd, off_t length)
2492{
2493 struct vnode *vp;
2494 struct vattr vattr;
2495 int error;
2496
2497 if (length < 0)
2498 return(EINVAL);
2499 /* XXX Add NLC flag indicating modifying operation? */
2500 if ((error = nlookup(nd)) != 0)
2501 return (error);
2502 if ((error = cache_vref(nd->nl_ncp, nd->nl_cred, &vp)) != 0)
2503 return (error);
2504 if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY)) != 0) {
2505 vrele(vp);
2506 return (error);
2507 }
2508 if (vp->v_type == VDIR) {
2509 error = EISDIR;
2510 } else if ((error = vn_writechk(vp)) == 0 &&
2511 (error = VOP_ACCESS(vp, VWRITE, nd->nl_cred)) == 0) {
2512 VATTR_NULL(&vattr);
2513 vattr.va_size = length;
2514 error = VOP_SETATTR(vp, &vattr, nd->nl_cred);
2515 }
2516 vput(vp);
2517 return (error);
2518}
2519
2520/*
2521 * truncate(char *path, int pad, off_t length)
2522 *
2523 * Truncate a file given its path name.
2524 */
2525int
2526truncate(struct truncate_args *uap)
2527{
2528 struct nlookupdata nd;
2529 int error;
2530
2531 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
2532 if (error == 0)
2533 error = kern_truncate(&nd, uap->length);
2534 nlookup_done(&nd);
2535 return error;
2536}
2537
2538int
2539kern_ftruncate(int fd, off_t length)
2540{
2541 struct thread *td = curthread;
2542 struct proc *p = td->td_proc;
2543 struct vattr vattr;
2544 struct vnode *vp;
2545 struct file *fp;
2546 int error;
2547
2548 if (length < 0)
2549 return(EINVAL);
2550 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0)
2551 return (error);
2552 if ((fp->f_flag & FWRITE) == 0) {
2553 error = EINVAL;
2554 goto done;
2555 }
2556 vp = (struct vnode *)fp->f_data;
2557 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2558 if (vp->v_type == VDIR) {
2559 error = EISDIR;
2560 } else if ((error = vn_writechk(vp)) == 0) {
2561 VATTR_NULL(&vattr);
2562 vattr.va_size = length;
2563 error = VOP_SETATTR(vp, &vattr, fp->f_cred);
2564 }
2565 VOP_UNLOCK(vp, 0);
2566done:
2567 fdrop(fp);
2568 return (error);
2569}
2570
2571/*
2572 * ftruncate_args(int fd, int pad, off_t length)
2573 *
2574 * Truncate a file given a file descriptor.
2575 */
2576int
2577ftruncate(struct ftruncate_args *uap)
2578{
2579 int error;
2580
2581 error = kern_ftruncate(uap->fd, uap->length);
2582
2583 return (error);
2584}
2585
2586/*
2587 * fsync(int fd)
2588 *
2589 * Sync an open file.
2590 */
2591/* ARGSUSED */
2592int
2593fsync(struct fsync_args *uap)
2594{
2595 struct thread *td = curthread;
2596 struct proc *p = td->td_proc;
2597 struct vnode *vp;
2598 struct file *fp;
2599 vm_object_t obj;
2600 int error;
2601
2602 if ((error = holdvnode(p->p_fd, uap->fd, &fp)) != 0)
2603 return (error);
2604 vp = (struct vnode *)fp->f_data;
2605 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2606 if ((obj = vp->v_object) != NULL)
2607 vm_object_page_clean(obj, 0, 0, 0);
2608 if ((error = VOP_FSYNC(vp, MNT_WAIT)) == 0 &&
2609 vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP) &&
2610 bioops.io_fsync) {
2611 error = (*bioops.io_fsync)(vp);
2612 }
2613 VOP_UNLOCK(vp, 0);
2614 fdrop(fp);
2615 return (error);
2616}
2617
2618int
2619kern_rename(struct nlookupdata *fromnd, struct nlookupdata *tond)
2620{
2621 struct namecache *fncpd;
2622 struct namecache *tncpd;
2623 struct namecache *ncp;
2624 struct mount *mp;
2625 int error;
2626
2627 bwillwrite();
2628 if ((error = nlookup(fromnd)) != 0)
2629 return (error);
2630 if ((fncpd = fromnd->nl_ncp->nc_parent) == NULL)
2631 return (ENOENT);
2632 cache_hold(fncpd);
2633
2634 /*
2635 * unlock the source ncp so we can lookup the target ncp without
2636 * deadlocking. The target may or may not exist so we do not check
2637 * for a target vp like kern_mkdir() and other creation functions do.
2638 *
2639 * The source and target directories are ref'd and rechecked after
2640 * everything is relocked to determine if the source or target file
2641 * has been renamed.
2642 */
2643 KKASSERT(fromnd->nl_flags & NLC_NCPISLOCKED);
2644 fromnd->nl_flags &= ~NLC_NCPISLOCKED;
2645 cache_unlock(fromnd->nl_ncp);
2646
2647 tond->nl_flags |= NLC_CREATE;
2648 if ((error = nlookup(tond)) != 0) {
2649 cache_drop(fncpd);
2650 return (error);
2651 }
2652 if ((tncpd = tond->nl_ncp->nc_parent) == NULL) {
2653 cache_drop(fncpd);
2654 return (ENOENT);
2655 }
2656 cache_hold(tncpd);
2657
2658 /*
2659 * If the source and target are the same there is nothing to do
2660 */
2661 if (fromnd->nl_ncp == tond->nl_ncp) {
2662 cache_drop(fncpd);
2663 cache_drop(tncpd);
2664 return (0);
2665 }
2666
2667 /*
2668 * relock the source ncp. NOTE AFTER RELOCKING: the source ncp
2669 * may have become invalid while it was unlocked, nc_vp and nc_mount
2670 * could be NULL.
2671 */
2672 if (cache_lock_nonblock(fromnd->nl_ncp) == 0) {
2673 cache_resolve(fromnd->nl_ncp, fromnd->nl_cred);
2674 } else if (fromnd->nl_ncp > tond->nl_ncp) {
2675 cache_lock(fromnd->nl_ncp);
2676 cache_resolve(fromnd->nl_ncp, fromnd->nl_cred);
2677 } else {
2678 cache_unlock(tond->nl_ncp);
2679 cache_lock(fromnd->nl_ncp);
2680 cache_resolve(fromnd->nl_ncp, fromnd->nl_cred);
2681 cache_lock(tond->nl_ncp);
2682 cache_resolve(tond->nl_ncp, tond->nl_cred);
2683 }
2684 fromnd->nl_flags |= NLC_NCPISLOCKED;
2685
2686 /*
2687 * make sure the parent directories linkages are the same
2688 */
2689 if (fncpd != fromnd->nl_ncp->nc_parent ||
2690 tncpd != tond->nl_ncp->nc_parent) {
2691 cache_drop(fncpd);
2692 cache_drop(tncpd);
2693 return (ENOENT);
2694 }
2695
2696 /*
2697 * Both the source and target must be within the same filesystem and
2698 * in the same filesystem as their parent directories within the
2699 * namecache topology.
2700 *
2701 * NOTE: fromnd's nc_mount or nc_vp could be NULL.
2702 */
2703 mp = fncpd->nc_mount;
2704 if (mp != tncpd->nc_mount || mp != fromnd->nl_ncp->nc_mount ||
2705 mp != tond->nl_ncp->nc_mount) {
2706 cache_drop(fncpd);
2707 cache_drop(tncpd);
2708 return (EXDEV);
2709 }
2710
2711 /*
2712 * If the target exists and either the source or target is a directory,
2713 * then both must be directories.
2714 *
2715 * Due to relocking of the source, fromnd->nl_ncp->nc_vp might have
2716 * become NULL.
2717 */
2718 if (tond->nl_ncp->nc_vp) {
2719 if (fromnd->nl_ncp->nc_vp == NULL) {
2720 error = ENOENT;
2721 } else if (fromnd->nl_ncp->nc_vp->v_type == VDIR) {
2722 if (tond->nl_ncp->nc_vp->v_type != VDIR)
2723 error = ENOTDIR;
2724 } else if (tond->nl_ncp->nc_vp->v_type == VDIR) {
2725 error = EISDIR;
2726 }
2727 }
2728
2729 /*
2730 * You cannot rename a source into itself or a subdirectory of itself.
2731 * We check this by travsersing the target directory upwards looking
2732 * for a match against the source.
2733 */
2734 if (error == 0) {
2735 for (ncp = tncpd; ncp; ncp = ncp->nc_parent) {
2736 if (fromnd->nl_ncp == ncp) {
2737 error = EINVAL;
2738 break;
2739 }
2740 }
2741 }
2742
2743 cache_drop(fncpd);
2744 cache_drop(tncpd);
2745
2746 /*
2747 * Even though the namespaces are different, they may still represent
2748 * hardlinks to the same file. The filesystem might have a hard time
2749 * with this so we issue a NREMOVE of the source instead of a NRENAME
2750 * when we detect the situation.
2751 */
2752 if (error == 0) {
2753 if (fromnd->nl_ncp->nc_vp == tond->nl_ncp->nc_vp) {
2754 error = VOP_NREMOVE(fromnd->nl_ncp, fromnd->nl_cred);
2755 } else {
2756 error = VOP_NRENAME(fromnd->nl_ncp, tond->nl_ncp,
2757 tond->nl_cred);
2758 }
2759 }
2760 return (error);
2761}
2762
2763/*
2764 * rename_args(char *from, char *to)
2765 *
2766 * Rename files. Source and destination must either both be directories,
2767 * or both not be directories. If target is a directory, it must be empty.
2768 */
2769int
2770rename(struct rename_args *uap)
2771{
2772 struct nlookupdata fromnd, tond;
2773 int error;
2774
2775 error = nlookup_init(&fromnd, uap->from, UIO_USERSPACE, 0);
2776 if (error == 0) {
2777 error = nlookup_init(&tond, uap->to, UIO_USERSPACE, 0);
2778 if (error == 0)
2779 error = kern_rename(&fromnd, &tond);
2780 nlookup_done(&tond);
2781 }
2782 nlookup_done(&fromnd);
2783 return (error);
2784}
2785
2786int
2787kern_mkdir(struct nlookupdata *nd, int mode)
2788{
2789 struct thread *td = curthread;
2790 struct proc *p = td->td_proc;
2791 struct namecache *ncp;
2792 struct vnode *vp;
2793 struct vattr vattr;
2794 int error;
2795
2796 bwillwrite();
2797 nd->nl_flags |= NLC_WILLBEDIR | NLC_CREATE;
2798 if ((error = nlookup(nd)) != 0)
2799 return (error);
2800
2801 ncp = nd->nl_ncp;
2802 if (ncp->nc_vp)
2803 return (EEXIST);
2804
2805 VATTR_NULL(&vattr);
2806 vattr.va_type = VDIR;
2807 vattr.va_mode = (mode & ACCESSPERMS) &~ p->p_fd->fd_cmask;
2808
2809 vp = NULL;
2810 error = VOP_NMKDIR(ncp, &vp, p->p_ucred, &vattr);
2811 if (error == 0)
2812 vput(vp);
2813 return (error);
2814}
2815
2816/*
2817 * mkdir_args(char *path, int mode)
2818 *
2819 * Make a directory file.
2820 */
2821/* ARGSUSED */
2822int
2823mkdir(struct mkdir_args *uap)
2824{
2825 struct nlookupdata nd;
2826 int error;
2827
2828 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
2829 if (error == 0)
2830 error = kern_mkdir(&nd, uap->mode);
2831 nlookup_done(&nd);
2832 return (error);
2833}
2834
2835int
2836kern_rmdir(struct nlookupdata *nd)
2837{
2838 struct namecache *ncp;
2839 int error;
2840
2841 bwillwrite();
2842 nd->nl_flags |= NLC_DELETE;
2843 if ((error = nlookup(nd)) != 0)
2844 return (error);
2845
2846 ncp = nd->nl_ncp;
2847 error = VOP_NRMDIR(ncp, nd->nl_cred);
2848 return (error);
2849}
2850
2851/*
2852 * rmdir_args(char *path)
2853 *
2854 * Remove a directory file.
2855 */
2856/* ARGSUSED */
2857int
2858rmdir(struct rmdir_args *uap)
2859{
2860 struct nlookupdata nd;
2861 int error;
2862
2863 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
2864 if (error == 0)
2865 error = kern_rmdir(&nd);
2866 nlookup_done(&nd);
2867 return (error);
2868}
2869
2870int
2871kern_getdirentries(int fd, char *buf, u_int count, long *basep, int *res,
2872 enum uio_seg direction)
2873{
2874 struct thread *td = curthread;
2875 struct proc *p = td->td_proc;
2876 struct vnode *vp;
2877 struct file *fp;
2878 struct uio auio;
2879 struct iovec aiov;
2880 long loff;
2881 int error, eofflag;
2882
2883 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0)
2884 return (error);
2885 if ((fp->f_flag & FREAD) == 0) {
2886 error = EBADF;
2887 goto done;
2888 }
2889 vp = (struct vnode *)fp->f_data;
2890unionread:
2891 if (vp->v_type != VDIR) {
2892 error = EINVAL;
2893 goto done;
2894 }
2895 aiov.iov_base = buf;
2896 aiov.iov_len = count;
2897 auio.uio_iov = &aiov;
2898 auio.uio_iovcnt = 1;
2899 auio.uio_rw = UIO_READ;
2900 auio.uio_segflg = direction;
2901 auio.uio_td = td;
2902 auio.uio_resid = count;
2903 /* vn_lock(vp, LK_SHARED | LK_RETRY); */
2904 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2905 loff = auio.uio_offset = fp->f_offset;
2906 error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, NULL, NULL);
2907 fp->f_offset = auio.uio_offset;
2908 VOP_UNLOCK(vp, 0);
2909 if (error)
2910 goto done;
2911 if (count == auio.uio_resid) {
2912 if (union_dircheckp) {
2913 error = union_dircheckp(td, &vp, fp);
2914 if (error == -1)
2915 goto unionread;
2916 if (error)
2917 goto done;
2918 }
2919 if ((vp->v_flag & VROOT) &&
2920 (vp->v_mount->mnt_flag & MNT_UNION)) {
2921 struct vnode *tvp = vp;
2922 vp = vp->v_mount->mnt_vnodecovered;
2923 vref(vp);
2924 fp->f_data = vp;
2925 fp->f_offset = 0;
2926 vrele(tvp);
2927 goto unionread;
2928 }
2929 }
2930 if (basep) {
2931 *basep = loff;
2932 }
2933 *res = count - auio.uio_resid;
2934done:
2935 fdrop(fp);
2936 return (error);
2937}
2938
2939/*
2940 * getdirentries_args(int fd, char *buf, u_int conut, long *basep)
2941 *
2942 * Read a block of directory entries in a file system independent format.
2943 */
2944int
2945getdirentries(struct getdirentries_args *uap)
2946{
2947 long base;
2948 int error;
2949
2950 error = kern_getdirentries(uap->fd, uap->buf, uap->count, &base,
2951 &uap->sysmsg_result, UIO_USERSPACE);
2952
2953 if (error == 0)
2954 error = copyout(&base, uap->basep, sizeof(*uap->basep));
2955 return (error);
2956}
2957
2958/*
2959 * getdents_args(int fd, char *buf, size_t count)
2960 */
2961int
2962getdents(struct getdents_args *uap)
2963{
2964 int error;
2965
2966 error = kern_getdirentries(uap->fd, uap->buf, uap->count, NULL,
2967 &uap->sysmsg_result, UIO_USERSPACE);
2968
2969 return (error);
2970}
2971
2972/*
2973 * umask(int newmask)
2974 *
2975 * Set the mode mask for creation of filesystem nodes.
2976 *
2977 * MP SAFE
2978 */
2979int
2980umask(struct umask_args *uap)
2981{
2982 struct thread *td = curthread;
2983 struct proc *p = td->td_proc;
2984 struct filedesc *fdp;
2985
2986 fdp = p->p_fd;
2987 uap->sysmsg_result = fdp->fd_cmask;
2988 fdp->fd_cmask = uap->newmask & ALLPERMS;
2989 return (0);
2990}
2991
2992/*
2993 * revoke(char *path)
2994 *
2995 * Void all references to file by ripping underlying filesystem
2996 * away from vnode.
2997 */
2998/* ARGSUSED */
2999int
3000revoke(struct revoke_args *uap)
3001{
3002 struct nlookupdata nd;
3003 struct vattr vattr;
3004 struct vnode *vp;
3005 struct ucred *cred;
3006 int error;
3007
3008 vp = NULL;
3009 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
3010 if (error == 0)
3011 error = nlookup(&nd);
3012 if (error == 0)
3013 error = cache_vref(nd.nl_ncp, nd.nl_cred, &vp);
3014 cred = crhold(nd.nl_cred);
3015 nlookup_done(&nd);
3016 if (error == 0) {
3017 if (vp->v_type != VCHR && vp->v_type != VBLK)
3018 error = EINVAL;
3019 if (error == 0)
3020 error = VOP_GETATTR(vp, &vattr);
3021 if (error == 0 && cred->cr_uid != vattr.va_uid)
3022 error = suser_cred(cred, PRISON_ROOT);
3023 if (error == 0 && count_udev(vp->v_udev) > 0) {
3024 if ((error = vx_lock(vp)) == 0) {
3025 VOP_REVOKE(vp, REVOKEALL);
3026 vx_unlock(vp);
3027 }
3028 }
3029 vrele(vp);
3030 }
3031 if (cred)
3032 crfree(cred);
3033 return (error);
3034}
3035
3036/*
3037 * getfh_args(char *fname, fhandle_t *fhp)
3038 *
3039 * Get (NFS) file handle
3040 */
3041int
3042getfh(struct getfh_args *uap)
3043{
3044 struct thread *td = curthread;
3045 struct nlookupdata nd;
3046 fhandle_t fh;
3047 struct vnode *vp;
3048 int error;
3049
3050 /*
3051 * Must be super user
3052 */
3053 if ((error = suser(td)) != 0)
3054 return (error);
3055
3056 vp = NULL;
3057 error = nlookup_init(&nd, uap->fname, UIO_USERSPACE, NLC_FOLLOW);
3058 if (error == 0)
3059 error = nlookup(&nd);
3060 if (error == 0)
3061 error = cache_vget(nd.nl_ncp, nd.nl_cred, LK_EXCLUSIVE, &vp);
3062 nlookup_done(&nd);
3063 if (error == 0) {
3064 bzero(&fh, sizeof(fh));
3065 fh.fh_fsid = vp->v_mount->mnt_stat.f_fsid;
3066 error = VFS_VPTOFH(vp, &fh.fh_fid);
3067 vput(vp);
3068 if (error == 0)
3069 error = copyout(&fh, uap->fhp, sizeof(fh));
3070 }
3071 return (error);
3072}
3073
3074/*
3075 * fhopen_args(const struct fhandle *u_fhp, int flags)
3076 *
3077 * syscall for the rpc.lockd to use to translate a NFS file handle into
3078 * an open descriptor.
3079 *
3080 * warning: do not remove the suser() call or this becomes one giant
3081 * security hole.
3082 */
3083int
3084fhopen(struct fhopen_args *uap)
3085{
3086 struct thread *td = curthread;
3087 struct proc *p = td->td_proc;
3088 struct mount *mp;
3089 struct vnode *vp;
3090 struct fhandle fhp;
3091 struct vattr vat;
3092 struct vattr *vap = &vat;
3093 struct flock lf;
3094 int fmode, mode, error, type;
3095 struct file *nfp;
3096 struct file *fp;
3097 int indx;
3098
3099 /*
3100 * Must be super user
3101 */
3102 error = suser(td);
3103 if (error)
3104 return (error);
3105
3106 fmode = FFLAGS(uap->flags);
3107 /* why not allow a non-read/write open for our lockd? */
3108 if (((fmode & (FREAD | FWRITE)) == 0) || (fmode & O_CREAT))
3109 return (EINVAL);
3110 error = copyin(uap->u_fhp, &fhp, sizeof(fhp));
3111 if (error)
3112 return(error);
3113 /* find the mount point */
3114 mp = vfs_getvfs(&fhp.fh_fsid);
3115 if (mp == NULL)
3116 return (ESTALE);
3117 /* now give me my vnode, it gets returned to me locked */
3118 error = VFS_FHTOVP(mp, &fhp.fh_fid, &vp);
3119 if (error)
3120 return (error);
3121 /*
3122 * from now on we have to make sure not
3123 * to forget about the vnode
3124 * any error that causes an abort must vput(vp)
3125 * just set error = err and 'goto bad;'.
3126 */
3127
3128 /*
3129 * from vn_open
3130 */
3131 if (vp->v_type == VLNK) {
3132 error = EMLINK;
3133 goto bad;
3134 }
3135 if (vp->v_type == VSOCK) {
3136 error = EOPNOTSUPP;
3137 goto bad;
3138 }
3139 mode = 0;
3140 if (fmode & (FWRITE | O_TRUNC)) {
3141 if (vp->v_type == VDIR) {
3142 error = EISDIR;
3143 goto bad;
3144 }
3145 error = vn_writechk(vp);
3146 if (error)
3147 goto bad;
3148 mode |= VWRITE;
3149 }
3150 if (fmode & FREAD)
3151 mode |= VREAD;
3152 if (mode) {
3153 error = VOP_ACCESS(vp, mode, p->p_ucred);
3154 if (error)
3155 goto bad;
3156 }
3157 if (fmode & O_TRUNC) {
3158 VOP_UNLOCK(vp, 0); /* XXX */
3159 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
3160 VATTR_NULL(vap);
3161 vap->va_size = 0;
3162 error = VOP_SETATTR(vp, vap, p->p_ucred);
3163 if (error)
3164 goto bad;
3165 }
3166
3167 /*
3168 * VOP_OPEN needs the file pointer so it can potentially override
3169 * it.
3170 *
3171 * WARNING! no f_ncp will be associated when fhopen()ing a directory.
3172 * XXX
3173 */
3174 if ((error = falloc(p, &nfp, &indx)) != 0)
3175 goto bad;
3176 fp = nfp;
3177
3178 error = VOP_OPEN(vp, fmode, p->p_ucred, fp);
3179 if (error) {
3180 /*
3181 * setting f_ops this way prevents VOP_CLOSE from being
3182 * called or fdrop() releasing the vp from v_data. Since
3183 * the VOP_OPEN failed we don't want to VOP_CLOSE.
3184 */
3185 fp->f_ops = &badfileops;
3186 fp->f_data = NULL;
3187 goto bad_drop;
3188 }
3189
3190 /*
3191 * The fp is given its own reference, we still have our ref and lock.
3192 *
3193 * Assert that all regular files must be created with a VM object.
3194 */
3195 if (vp->v_type == VREG && vp->v_object == NULL) {
3196 printf("fhopen: regular file did not have VM object: %p\n", vp);
3197 goto bad_drop;
3198 }
3199
3200 /*
3201 * The open was successful. Handle any locking requirements.
3202 */
3203 if (fmode & (O_EXLOCK | O_SHLOCK)) {
3204 lf.l_whence = SEEK_SET;
3205 lf.l_start = 0;
3206 lf.l_len = 0;
3207 if (fmode & O_EXLOCK)
3208 lf.l_type = F_WRLCK;
3209 else
3210 lf.l_type = F_RDLCK;
3211 if (fmode & FNONBLOCK)
3212 type = 0;
3213 else
3214 type = F_WAIT;
3215 VOP_UNLOCK(vp, 0);
3216 if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0) {
3217 /*
3218 * release our private reference.
3219 */
3220 fsetfd(p, NULL, indx);
3221 fdrop(fp);
3222 vrele(vp);
3223 return (error);
3224 }
3225 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3226 fp->f_flag |= FHASLOCK;
3227 }
3228
3229 /*
3230 * Clean up. Associate the file pointer with the previously
3231 * reserved descriptor and return it.
3232 */
3233 vput(vp);
3234 fsetfd(p, fp, indx);
3235 fdrop(fp);
3236 uap->sysmsg_result = indx;
3237 return (0);
3238
3239bad_drop:
3240 fsetfd(p, NULL, indx);
3241 fdrop(fp);
3242bad:
3243 vput(vp);
3244 return (error);
3245}
3246
3247/*
3248 * fhstat_args(struct fhandle *u_fhp, struct stat *sb)
3249 */
3250int
3251fhstat(struct fhstat_args *uap)
3252{
3253 struct thread *td = curthread;
3254 struct stat sb;
3255 fhandle_t fh;
3256 struct mount *mp;
3257 struct vnode *vp;
3258 int error;
3259
3260 /*
3261 * Must be super user
3262 */
3263 error = suser(td);
3264 if (error)
3265 return (error);
3266
3267 error = copyin(uap->u_fhp, &fh, sizeof(fhandle_t));
3268 if (error)
3269 return (error);
3270
3271 if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL)
3272 return (ESTALE);
3273 if ((error = VFS_FHTOVP(mp, &fh.fh_fid, &vp)))
3274 return (error);
3275 error = vn_stat(vp, &sb, td->td_proc->p_ucred);
3276 vput(vp);
3277 if (error)
3278 return (error);
3279 error = copyout(&sb, uap->sb, sizeof(sb));
3280 return (error);
3281}
3282
3283/*
3284 * fhstatfs_args(struct fhandle *u_fhp, struct statfs *buf)
3285 */
3286int
3287fhstatfs(struct fhstatfs_args *uap)
3288{
3289 struct thread *td = curthread;
3290 struct proc *p = td->td_proc;
3291 struct statfs *sp;
3292 struct mount *mp;
3293 struct vnode *vp;
3294 struct statfs sb;
3295 char *fullpath, *freepath;
3296 fhandle_t fh;
3297 int error;
3298
3299 /*
3300 * Must be super user
3301 */
3302 if ((error = suser(td)))
3303 return (error);
3304
3305 if ((error = copyin(uap->u_fhp, &fh, sizeof(fhandle_t))) != 0)
3306 return (error);
3307
3308 if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL)
3309 return (ESTALE);
3310
3311 if (p != NULL && (p->p_fd->fd_nrdir->nc_flag & NCF_ROOT) == 0 &&
3312 !chroot_visible_mnt(mp, p))
3313 return (ESTALE);
3314
3315 if ((error = VFS_FHTOVP(mp, &fh.fh_fid, &vp)))
3316 return (error);
3317 mp = vp->v_mount;
3318 sp = &mp->mnt_stat;
3319 vput(vp);
3320 if ((error = VFS_STATFS(mp, sp, p->p_ucred)) != 0)
3321 return (error);
3322
3323 error = cache_fullpath(p, mp->mnt_ncp, &fullpath, &freepath);
3324 if (error)
3325 return(error);
3326 bzero(sp->f_mntonname, sizeof(sp->f_mntonname));
3327 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname));
3328 free(freepath, M_TEMP);
3329
3330 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
3331 if (suser(td)) {
3332 bcopy(sp, &sb, sizeof(sb));
3333 sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0;
3334 sp = &sb;
3335 }
3336 return (copyout(sp, uap->buf, sizeof(*sp)));
3337}
3338
3339/*
3340 * Syscall to push extended attribute configuration information into the
3341 * VFS. Accepts a path, which it converts to a mountpoint, as well as
3342 * a command (int cmd), and attribute name and misc data. For now, the
3343 * attribute name is left in userspace for consumption by the VFS_op.
3344 * It will probably be changed to be copied into sysspace by the
3345 * syscall in the future, once issues with various consumers of the
3346 * attribute code have raised their hands.
3347 *
3348 * Currently this is used only by UFS Extended Attributes.
3349 */
3350int
3351extattrctl(struct extattrctl_args *uap)
3352{
3353 struct nlookupdata nd;
3354 struct mount *mp;
3355 struct vnode *vp;
3356 int error;
3357
3358 vp = NULL;
3359 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
3360 if (error == 0)
3361 error = nlookup(&nd);
3362 if (error == 0) {
3363 mp = nd.nl_ncp->nc_mount;
3364 error = VFS_EXTATTRCTL(mp, uap->cmd,
3365 uap->attrname, uap->arg,
3366 nd.nl_cred);
3367 }
3368 nlookup_done(&nd);
3369 return (error);
3370}
3371
3372/*
3373 * Syscall to set a named extended attribute on a file or directory.
3374 * Accepts attribute name, and a uio structure pointing to the data to set.
3375 * The uio is consumed in the style of writev(). The real work happens
3376 * in VOP_SETEXTATTR().
3377 */
3378int
3379extattr_set_file(struct extattr_set_file_args *uap)
3380{
3381 char attrname[EXTATTR_MAXNAMELEN];
3382 struct iovec aiov[UIO_SMALLIOV];
3383 struct iovec *needfree;
3384 struct nlookupdata nd;
3385 struct iovec *iov;
3386 struct vnode *vp;
3387 struct uio auio;
3388 u_int iovlen;
3389 u_int cnt;
3390 int error;
3391 int i;
3392
3393 error = copyin(uap->attrname, attrname, EXTATTR_MAXNAMELEN);
3394 if (error)
3395 return (error);
3396
3397 vp = NULL;
3398 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
3399 if (error == 0)
3400 error = nlookup(&nd);
3401 if (error == 0)
3402 error = cache_vget(nd.nl_ncp, nd.nl_cred, LK_EXCLUSIVE, &vp);
3403 if (error) {
3404 nlookup_done(&nd);
3405 return (error);
3406 }
3407
3408 needfree = NULL;
3409 iovlen = uap->iovcnt * sizeof(struct iovec);
3410 if (uap->iovcnt > UIO_SMALLIOV) {
3411 if (uap->iovcnt > UIO_MAXIOV) {
3412 error = EINVAL;
3413 goto done;
3414 }
3415 MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
3416 needfree = iov;
3417 } else {
3418 iov = aiov;
3419 }
3420 auio.uio_iov = iov;
3421 auio.uio_iovcnt = uap->iovcnt;
3422 auio.uio_rw = UIO_WRITE;
3423 auio.uio_segflg = UIO_USERSPACE;
3424 auio.uio_td = nd.nl_td;
3425 auio.uio_offset = 0;
3426 if ((error = copyin(uap->iovp, iov, iovlen)))
3427 goto done;
3428 auio.uio_resid = 0;
3429 for (i = 0; i < uap->iovcnt; i++) {
3430 if (iov->iov_len > INT_MAX - auio.uio_resid) {
3431 error = EINVAL;
3432 goto done;
3433 }
3434 auio.uio_resid += iov->iov_len;
3435 iov++;
3436 }
3437 cnt = auio.uio_resid;
3438 error = VOP_SETEXTATTR(vp, attrname, &auio, nd.nl_cred);
3439 cnt -= auio.uio_resid;
3440 uap->sysmsg_result = cnt;
3441done:
3442 vput(vp);
3443 nlookup_done(&nd);
3444 if (needfree)
3445 FREE(needfree, M_IOV);
3446 return (error);
3447}
3448
3449/*
3450 * Syscall to get a named extended attribute on a file or directory.
3451 * Accepts attribute name, and a uio structure pointing to a buffer for the
3452 * data. The uio is consumed in the style of readv(). The real work
3453 * happens in VOP_GETEXTATTR();
3454 */
3455int
3456extattr_get_file(struct extattr_get_file_args *uap)
3457{
3458 char attrname[EXTATTR_MAXNAMELEN];
3459 struct iovec aiov[UIO_SMALLIOV];
3460 struct iovec *needfree;
3461 struct nlookupdata nd;
3462 struct iovec *iov;
3463 struct vnode *vp;
3464 struct uio auio;
3465 u_int iovlen;
3466 u_int cnt;
3467 int error;
3468 int i;
3469
3470 error = copyin(uap->attrname, attrname, EXTATTR_MAXNAMELEN);
3471 if (error)
3472 return (error);
3473
3474 vp = NULL;
3475 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
3476 if (error == 0)
3477 error = nlookup(&nd);
3478 if (error == 0)
3479 error = cache_vget(nd.nl_ncp, nd.nl_cred, LK_EXCLUSIVE, &vp);
3480 if (error) {
3481 nlookup_done(&nd);
3482 return (error);
3483 }
3484
3485 iovlen = uap->iovcnt * sizeof (struct iovec);
3486 needfree = NULL;
3487 if (uap->iovcnt > UIO_SMALLIOV) {
3488 if (uap->iovcnt > UIO_MAXIOV) {
3489 error = EINVAL;
3490 goto done;
3491 }
3492 MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
3493 needfree = iov;
3494 } else {
3495 iov = aiov;
3496 }
3497 auio.uio_iov = iov;
3498 auio.uio_iovcnt = uap->iovcnt;
3499 auio.uio_rw = UIO_READ;
3500 auio.uio_segflg = UIO_USERSPACE;
3501 auio.uio_td = nd.nl_td;
3502 auio.uio_offset = 0;
3503 if ((error = copyin(uap->iovp, iov, iovlen)))
3504 goto done;
3505 auio.uio_resid = 0;
3506 for (i = 0; i < uap->iovcnt; i++) {
3507 if (iov->iov_len > INT_MAX - auio.uio_resid) {
3508 error = EINVAL;
3509 goto done;
3510 }
3511 auio.uio_resid += iov->iov_len;
3512 iov++;
3513 }
3514 cnt = auio.uio_resid;
3515 error = VOP_GETEXTATTR(vp, attrname, &auio, nd.nl_cred);
3516 cnt -= auio.uio_resid;
3517 uap->sysmsg_result = cnt;
3518done:
3519 vput(vp);
3520 nlookup_done(&nd);
3521 if (needfree)
3522 FREE(needfree, M_IOV);
3523 return(error);
3524}
3525
3526/*
3527 * Syscall to delete a named extended attribute from a file or directory.
3528 * Accepts attribute name. The real work happens in VOP_SETEXTATTR().
3529 */
3530int
3531extattr_delete_file(struct extattr_delete_file_args *uap)
3532{
3533 char attrname[EXTATTR_MAXNAMELEN];
3534 struct nlookupdata nd;
3535 struct vnode *vp;
3536 int error;
3537
3538 error = copyin(uap->attrname, attrname, EXTATTR_MAXNAMELEN);
3539 if (error)
3540 return(error);
3541
3542 vp = NULL;
3543 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
3544 if (error == 0)
3545 error = nlookup(&nd);
3546 if (error == 0)
3547 error = cache_vget(nd.nl_ncp, nd.nl_cred, LK_EXCLUSIVE, &vp);
3548 if (error) {
3549 nlookup_done(&nd);
3550 return (error);
3551 }
3552
3553 error = VOP_SETEXTATTR(vp, attrname, NULL, nd.nl_cred);
3554 vput(vp);
3555 nlookup_done(&nd);
3556 return(error);
3557}
3558
3559static int
3560chroot_visible_mnt(struct mount *mp, struct proc *p)
3561{
3562 struct namecache *ncp;
3563 /*
3564 * First check if this file system is below
3565 * the chroot path.
3566 */
3567 ncp = mp->mnt_ncp;
3568 while (ncp != NULL && ncp != p->p_fd->fd_nrdir)
3569 ncp = ncp->nc_parent;
3570 if (ncp == NULL) {
3571 /*
3572 * This is not below the chroot path.
3573 *
3574 * Check if the chroot path is on the same filesystem,
3575 * by determing if we have to cross a mount point
3576 * before reaching mp->mnt_ncp.
3577 */
3578 ncp = p->p_fd->fd_nrdir;
3579 while (ncp != NULL && ncp != mp->mnt_ncp) {
3580 if (ncp->nc_flag & NCF_MOUNTPT) {
3581 ncp = NULL;
3582 break;
3583 }
3584 ncp = ncp->nc_parent;
3585 }
3586 }
3587 return(ncp != NULL);
3588}