Rename printf -> kprintf in sys/ and add some defines where necessary
[dragonfly.git] / sys / kern / vfs_syscalls.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)vfs_syscalls.c 8.13 (Berkeley) 4/15/94
39 * $FreeBSD: src/sys/kern/vfs_syscalls.c,v 1.151.2.18 2003/04/04 20:35:58 tegge Exp $
40 * $DragonFly: src/sys/kern/vfs_syscalls.c,v 1.110 2006/12/23 00:35:04 swildner Exp $
41 */
42
43#include <sys/param.h>
44#include <sys/systm.h>
45#include <sys/buf.h>
46#include <sys/conf.h>
47#include <sys/sysent.h>
48#include <sys/malloc.h>
49#include <sys/mount.h>
50#include <sys/mountctl.h>
51#include <sys/sysproto.h>
52#include <sys/filedesc.h>
53#include <sys/kernel.h>
54#include <sys/fcntl.h>
55#include <sys/file.h>
56#include <sys/linker.h>
57#include <sys/stat.h>
58#include <sys/unistd.h>
59#include <sys/vnode.h>
60#include <sys/proc.h>
61#include <sys/namei.h>
62#include <sys/nlookup.h>
63#include <sys/dirent.h>
64#include <sys/extattr.h>
65#include <sys/spinlock.h>
66#include <sys/kern_syscall.h>
67#include <sys/objcache.h>
68#include <sys/sysctl.h>
69#include <sys/file2.h>
70#include <sys/spinlock2.h>
71
72#include <vm/vm.h>
73#include <vm/vm_object.h>
74#include <vm/vm_page.h>
75
76#include <machine/limits.h>
77#include <machine/stdarg.h>
78
79#include <vfs/union/union.h>
80
81static void mount_warning(struct mount *mp, const char *ctl, ...);
82static int checkvp_chdir (struct vnode *vn, struct thread *td);
83static void checkdirs (struct vnode *olddp, struct nchandle *nch);
84static int chroot_refuse_vdir_fds (struct filedesc *fdp);
85static int chroot_visible_mnt(struct mount *mp, struct proc *p);
86static int getutimes (const struct timeval *, struct timespec *);
87static int setfown (struct vnode *, uid_t, gid_t);
88static int setfmode (struct vnode *, int);
89static int setfflags (struct vnode *, int);
90static int setutimes (struct vnode *, const struct timespec *, int);
91static int usermount = 0; /* if 1, non-root can mount fs. */
92
93int (*union_dircheckp) (struct thread *, struct vnode **, struct file *);
94
95SYSCTL_INT(_vfs, OID_AUTO, usermount, CTLFLAG_RW, &usermount, 0, "");
96
97/*
98 * Virtual File System System Calls
99 */
100
101/*
102 * Mount a file system.
103 */
104/*
105 * mount_args(char *type, char *path, int flags, caddr_t data)
106 */
107/* ARGSUSED */
108int
109sys_mount(struct mount_args *uap)
110{
111 struct thread *td = curthread;
112 struct proc *p = td->td_proc;
113 struct vnode *vp;
114 struct nchandle nch;
115 struct mount *mp;
116 struct vfsconf *vfsp;
117 int error, flag = 0, flag2 = 0;
118 int hasmount;
119 struct vattr va;
120 struct nlookupdata nd;
121 char fstypename[MFSNAMELEN];
122 struct ucred *cred = p->p_ucred;
123
124 KKASSERT(p);
125 if (cred->cr_prison != NULL)
126 return (EPERM);
127 if (usermount == 0 && (error = suser(td)))
128 return (error);
129 /*
130 * Do not allow NFS export by non-root users.
131 */
132 if (uap->flags & MNT_EXPORTED) {
133 error = suser(td);
134 if (error)
135 return (error);
136 }
137 /*
138 * Silently enforce MNT_NOSUID and MNT_NODEV for non-root users
139 */
140 if (suser(td))
141 uap->flags |= MNT_NOSUID | MNT_NODEV;
142
143 /*
144 * Lookup the requested path and extract the nch and vnode.
145 */
146 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
147 if (error == 0) {
148 if ((error = nlookup(&nd)) == 0) {
149 if (nd.nl_nch.ncp->nc_vp == NULL)
150 error = ENOENT;
151 }
152 }
153 if (error) {
154 nlookup_done(&nd);
155 return (error);
156 }
157
158 /*
159 * Extract the locked+refd ncp and cleanup the nd structure
160 */
161 nch = nd.nl_nch;
162 cache_zero(&nd.nl_nch);
163 nlookup_done(&nd);
164
165 if ((nch.ncp->nc_flag & NCF_ISMOUNTPT) && cache_findmount(&nch))
166 hasmount = 1;
167 else
168 hasmount = 0;
169
170
171 /*
172 * now we have the locked ref'd nch and unreferenced vnode.
173 */
174 vp = nch.ncp->nc_vp;
175 if ((error = vget(vp, LK_EXCLUSIVE)) != 0) {
176 cache_put(&nch);
177 return (error);
178 }
179 cache_unlock(&nch);
180
181 /*
182 * Now we have an unlocked ref'd nch and a locked ref'd vp
183 */
184 if (uap->flags & MNT_UPDATE) {
185 if ((vp->v_flag & VROOT) == 0) {
186 cache_drop(&nch);
187 vput(vp);
188 return (EINVAL);
189 }
190 mp = vp->v_mount;
191 flag = mp->mnt_flag;
192 flag2 = mp->mnt_kern_flag;
193 /*
194 * We only allow the filesystem to be reloaded if it
195 * is currently mounted read-only.
196 */
197 if ((uap->flags & MNT_RELOAD) &&
198 ((mp->mnt_flag & MNT_RDONLY) == 0)) {
199 cache_drop(&nch);
200 vput(vp);
201 return (EOPNOTSUPP); /* Needs translation */
202 }
203 /*
204 * Only root, or the user that did the original mount is
205 * permitted to update it.
206 */
207 if (mp->mnt_stat.f_owner != cred->cr_uid &&
208 (error = suser(td))) {
209 cache_drop(&nch);
210 vput(vp);
211 return (error);
212 }
213 if (vfs_busy(mp, LK_NOWAIT)) {
214 cache_drop(&nch);
215 vput(vp);
216 return (EBUSY);
217 }
218 if ((vp->v_flag & VMOUNT) != 0 || hasmount) {
219 cache_drop(&nch);
220 vfs_unbusy(mp);
221 vput(vp);
222 return (EBUSY);
223 }
224 vp->v_flag |= VMOUNT;
225 mp->mnt_flag |=
226 uap->flags & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE);
227 vn_unlock(vp);
228 goto update;
229 }
230 /*
231 * If the user is not root, ensure that they own the directory
232 * onto which we are attempting to mount.
233 */
234 if ((error = VOP_GETATTR(vp, &va)) ||
235 (va.va_uid != cred->cr_uid && (error = suser(td)))) {
236 cache_drop(&nch);
237 vput(vp);
238 return (error);
239 }
240 if ((error = vinvalbuf(vp, V_SAVE, 0, 0)) != 0) {
241 cache_drop(&nch);
242 vput(vp);
243 return (error);
244 }
245 if (vp->v_type != VDIR) {
246 cache_drop(&nch);
247 vput(vp);
248 return (ENOTDIR);
249 }
250 if ((error = copyinstr(uap->type, fstypename, MFSNAMELEN, NULL)) != 0) {
251 cache_drop(&nch);
252 vput(vp);
253 return (error);
254 }
255 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
256 if (!strcmp(vfsp->vfc_name, fstypename))
257 break;
258 }
259 if (vfsp == NULL) {
260 linker_file_t lf;
261
262 /* Only load modules for root (very important!) */
263 if ((error = suser(td)) != 0) {
264 cache_drop(&nch);
265 vput(vp);
266 return error;
267 }
268 error = linker_load_file(fstypename, &lf);
269 if (error || lf == NULL) {
270 cache_drop(&nch);
271 vput(vp);
272 if (lf == NULL)
273 error = ENODEV;
274 return error;
275 }
276 lf->userrefs++;
277 /* lookup again, see if the VFS was loaded */
278 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
279 if (!strcmp(vfsp->vfc_name, fstypename))
280 break;
281 }
282 if (vfsp == NULL) {
283 lf->userrefs--;
284 linker_file_unload(lf);
285 cache_drop(&nch);
286 vput(vp);
287 return (ENODEV);
288 }
289 }
290 if ((vp->v_flag & VMOUNT) != 0 || hasmount) {
291 cache_drop(&nch);
292 vput(vp);
293 return (EBUSY);
294 }
295 vp->v_flag |= VMOUNT;
296
297 /*
298 * Allocate and initialize the filesystem.
299 */
300 mp = kmalloc(sizeof(struct mount), M_MOUNT, M_ZERO|M_WAITOK);
301 TAILQ_INIT(&mp->mnt_nvnodelist);
302 TAILQ_INIT(&mp->mnt_reservedvnlist);
303 TAILQ_INIT(&mp->mnt_jlist);
304 mp->mnt_nvnodelistsize = 0;
305 lockinit(&mp->mnt_lock, "vfslock", 0, 0);
306 vfs_busy(mp, LK_NOWAIT);
307 mp->mnt_op = vfsp->vfc_vfsops;
308 mp->mnt_vfc = vfsp;
309 vfsp->vfc_refcount++;
310 mp->mnt_stat.f_type = vfsp->vfc_typenum;
311 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
312 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
313 mp->mnt_stat.f_owner = cred->cr_uid;
314 mp->mnt_iosize_max = DFLTPHYS;
315 vn_unlock(vp);
316update:
317 /*
318 * Set the mount level flags.
319 */
320 if (uap->flags & MNT_RDONLY)
321 mp->mnt_flag |= MNT_RDONLY;
322 else if (mp->mnt_flag & MNT_RDONLY)
323 mp->mnt_kern_flag |= MNTK_WANTRDWR;
324 mp->mnt_flag &=~ (MNT_NOSUID | MNT_NOEXEC | MNT_NODEV |
325 MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_NOATIME |
326 MNT_NOSYMFOLLOW | MNT_IGNORE |
327 MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR);
328 mp->mnt_flag |= uap->flags & (MNT_NOSUID | MNT_NOEXEC |
329 MNT_NODEV | MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | MNT_FORCE |
330 MNT_NOSYMFOLLOW | MNT_IGNORE |
331 MNT_NOATIME | MNT_NOCLUSTERR | MNT_NOCLUSTERW | MNT_SUIDDIR);
332 /*
333 * Mount the filesystem.
334 * XXX The final recipients of VFS_MOUNT just overwrite the ndp they
335 * get.
336 */
337 error = VFS_MOUNT(mp, uap->path, uap->data, cred);
338 if (mp->mnt_flag & MNT_UPDATE) {
339 if (mp->mnt_kern_flag & MNTK_WANTRDWR)
340 mp->mnt_flag &= ~MNT_RDONLY;
341 mp->mnt_flag &=~ (MNT_UPDATE | MNT_RELOAD | MNT_FORCE);
342 mp->mnt_kern_flag &=~ MNTK_WANTRDWR;
343 if (error) {
344 mp->mnt_flag = flag;
345 mp->mnt_kern_flag = flag2;
346 }
347 vfs_unbusy(mp);
348 vp->v_flag &= ~VMOUNT;
349 vrele(vp);
350 cache_drop(&nch);
351 return (error);
352 }
353 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
354 /*
355 * Put the new filesystem on the mount list after root. The mount
356 * point gets its own mnt_ncmountpt (unless the VFS already set one
357 * up) which represents the root of the mount. The lookup code
358 * detects the mount point going forward and checks the root of
359 * the mount going backwards.
360 *
361 * It is not necessary to invalidate or purge the vnode underneath
362 * because elements under the mount will be given their own glue
363 * namecache record.
364 */
365 if (!error) {
366 if (mp->mnt_ncmountpt.ncp == NULL) {
367 /*
368 * allocate, then unlock, but leave the ref intact
369 */
370 cache_allocroot(&mp->mnt_ncmountpt, mp, NULL);
371 cache_unlock(&mp->mnt_ncmountpt);
372 }
373 mp->mnt_ncmounton = nch; /* inherits ref */
374 nch.ncp->nc_flag |= NCF_ISMOUNTPT;
375
376 /* XXX get the root of the fs and cache_setvp(mnt_ncmountpt...) */
377 vp->v_flag &= ~VMOUNT;
378 mountlist_insert(mp, MNTINS_LAST);
379 checkdirs(vp, &mp->mnt_ncmounton);
380 vn_unlock(vp);
381 error = vfs_allocate_syncvnode(mp);
382 vfs_unbusy(mp);
383 error = VFS_START(mp, 0);
384 vrele(vp);
385 } else {
386 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_coherency_ops);
387 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_journal_ops);
388 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_norm_ops);
389 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_spec_ops);
390 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_fifo_ops);
391 vp->v_flag &= ~VMOUNT;
392 mp->mnt_vfc->vfc_refcount--;
393 vfs_unbusy(mp);
394 kfree(mp, M_MOUNT);
395 cache_drop(&nch);
396 vput(vp);
397 }
398 return (error);
399}
400
401/*
402 * Scan all active processes to see if any of them have a current
403 * or root directory onto which the new filesystem has just been
404 * mounted. If so, replace them with the new mount point.
405 *
406 * The passed ncp is ref'd and locked (from the mount code) and
407 * must be associated with the vnode representing the root of the
408 * mount point.
409 */
410struct checkdirs_info {
411 struct vnode *olddp;
412 struct vnode *newdp;
413 struct nchandle nch;
414};
415
416static int checkdirs_callback(struct proc *p, void *data);
417
418static void
419checkdirs(struct vnode *olddp, struct nchandle *nch)
420{
421 struct checkdirs_info info;
422 struct vnode *newdp;
423 struct mount *mp;
424
425 if (olddp->v_usecount == 1)
426 return;
427 mp = nch->mount;
428 if (VFS_ROOT(mp, &newdp))
429 panic("mount: lost mount");
430 cache_setunresolved(nch);
431 cache_setvp(nch, newdp);
432
433 if (rootvnode == olddp) {
434 vref(newdp);
435 vfs_cache_setroot(newdp, cache_hold(nch));
436 }
437
438 info.olddp = olddp;
439 info.newdp = newdp;
440 info.nch = *nch;
441 allproc_scan(checkdirs_callback, &info);
442 vput(newdp);
443}
444
445/*
446 * NOTE: callback is not MP safe because the scanned process's filedesc
447 * structure can be ripped out from under us, amoung other things.
448 */
449static int
450checkdirs_callback(struct proc *p, void *data)
451{
452 struct checkdirs_info *info = data;
453 struct filedesc *fdp;
454 struct nchandle ncdrop1;
455 struct nchandle ncdrop2;
456 struct vnode *vprele1;
457 struct vnode *vprele2;
458
459 if ((fdp = p->p_fd) != NULL) {
460 cache_zero(&ncdrop1);
461 cache_zero(&ncdrop2);
462 vprele1 = NULL;
463 vprele2 = NULL;
464
465 /*
466 * MPUNSAFE - XXX fdp can be pulled out from under a
467 * foreign process.
468 *
469 * A shared filedesc is ok, we don't have to copy it
470 * because we are making this change globally.
471 */
472 spin_lock_wr(&fdp->fd_spin);
473 if (fdp->fd_cdir == info->olddp) {
474 vprele1 = fdp->fd_cdir;
475 vref(info->newdp);
476 fdp->fd_cdir = info->newdp;
477 ncdrop1 = fdp->fd_ncdir;
478 cache_copy(&info->nch, &fdp->fd_ncdir);
479 }
480 if (fdp->fd_rdir == info->olddp) {
481 vprele2 = fdp->fd_rdir;
482 vref(info->newdp);
483 fdp->fd_rdir = info->newdp;
484 ncdrop2 = fdp->fd_nrdir;
485 cache_copy(&info->nch, &fdp->fd_nrdir);
486 }
487 spin_unlock_wr(&fdp->fd_spin);
488 if (ncdrop1.ncp)
489 cache_drop(&ncdrop1);
490 if (ncdrop2.ncp)
491 cache_drop(&ncdrop2);
492 if (vprele1)
493 vrele(vprele1);
494 if (vprele2)
495 vrele(vprele2);
496 }
497 return(0);
498}
499
500/*
501 * Unmount a file system.
502 *
503 * Note: unmount takes a path to the vnode mounted on as argument,
504 * not special file (as before).
505 */
506/*
507 * umount_args(char *path, int flags)
508 */
509/* ARGSUSED */
510int
511sys_unmount(struct unmount_args *uap)
512{
513 struct thread *td = curthread;
514 struct proc *p = td->td_proc;
515 struct mount *mp = NULL;
516 int error;
517 struct nlookupdata nd;
518
519 KKASSERT(p);
520 if (p->p_ucred->cr_prison != NULL)
521 return (EPERM);
522 if (usermount == 0 && (error = suser(td)))
523 return (error);
524
525 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
526 if (error == 0)
527 error = nlookup(&nd);
528 if (error)
529 goto out;
530
531 mp = nd.nl_nch.mount;
532
533 /*
534 * Only root, or the user that did the original mount is
535 * permitted to unmount this filesystem.
536 */
537 if ((mp->mnt_stat.f_owner != p->p_ucred->cr_uid) &&
538 (error = suser(td)))
539 goto out;
540
541 /*
542 * Don't allow unmounting the root file system.
543 */
544 if (mp->mnt_flag & MNT_ROOTFS) {
545 error = EINVAL;
546 goto out;
547 }
548
549 /*
550 * Must be the root of the filesystem
551 */
552 if (nd.nl_nch.ncp != mp->mnt_ncmountpt.ncp) {
553 error = EINVAL;
554 goto out;
555 }
556
557out:
558 nlookup_done(&nd);
559 if (error)
560 return (error);
561 return (dounmount(mp, uap->flags));
562}
563
564/*
565 * Do the actual file system unmount.
566 */
567static int
568dounmount_interlock(struct mount *mp)
569{
570 if (mp->mnt_kern_flag & MNTK_UNMOUNT)
571 return (EBUSY);
572 mp->mnt_kern_flag |= MNTK_UNMOUNT;
573 return(0);
574}
575
576int
577dounmount(struct mount *mp, int flags)
578{
579 struct namecache *ncp;
580 struct nchandle nch;
581 int error;
582 int async_flag;
583 int lflags;
584 int freeok = 1;
585
586 /*
587 * Exclusive access for unmounting purposes
588 */
589 if ((error = mountlist_interlock(dounmount_interlock, mp)) != 0)
590 return (error);
591
592 /*
593 * Allow filesystems to detect that a forced unmount is in progress.
594 */
595 if (flags & MNT_FORCE)
596 mp->mnt_kern_flag |= MNTK_UNMOUNTF;
597 lflags = LK_EXCLUSIVE | ((flags & MNT_FORCE) ? 0 : LK_NOWAIT);
598 error = lockmgr(&mp->mnt_lock, lflags);
599 if (error) {
600 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF);
601 if (mp->mnt_kern_flag & MNTK_MWAIT)
602 wakeup(mp);
603 return (error);
604 }
605
606 if (mp->mnt_flag & MNT_EXPUBLIC)
607 vfs_setpublicfs(NULL, NULL, NULL);
608
609 vfs_msync(mp, MNT_WAIT);
610 async_flag = mp->mnt_flag & MNT_ASYNC;
611 mp->mnt_flag &=~ MNT_ASYNC;
612
613 /*
614 * If this filesystem isn't aliasing other filesystems,
615 * try to invalidate any remaining namecache entries and
616 * check the count afterwords.
617 */
618 if ((mp->mnt_kern_flag & MNTK_NCALIASED) == 0) {
619 cache_lock(&mp->mnt_ncmountpt);
620 cache_inval(&mp->mnt_ncmountpt, CINV_DESTROY|CINV_CHILDREN);
621 cache_unlock(&mp->mnt_ncmountpt);
622
623 if ((ncp = mp->mnt_ncmountpt.ncp) != NULL &&
624 (ncp->nc_refs != 1 || TAILQ_FIRST(&ncp->nc_list))) {
625
626 if ((flags & MNT_FORCE) == 0) {
627 error = EBUSY;
628 mount_warning(mp, "Cannot unmount: "
629 "%d namecache "
630 "references still "
631 "present",
632 ncp->nc_refs - 1);
633 } else {
634 mount_warning(mp, "Forced unmount: "
635 "%d namecache "
636 "references still "
637 "present",
638 ncp->nc_refs - 1);
639 freeok = 0;
640 }
641 }
642 }
643
644 /*
645 * nchandle records ref the mount structure. Expect a count of 1
646 * (our mount->mnt_ncmountpt).
647 */
648 if (mp->mnt_refs != 1) {
649 if ((flags & MNT_FORCE) == 0) {
650 mount_warning(mp, "Cannot unmount: "
651 "%d process references still "
652 "present", mp->mnt_refs);
653 error = EBUSY;
654 } else {
655 mount_warning(mp, "Forced unmount: "
656 "%d process references still "
657 "present", mp->mnt_refs);
658 freeok = 0;
659 }
660 }
661
662 if (error == 0) {
663 if (mp->mnt_syncer != NULL)
664 vrele(mp->mnt_syncer);
665 if (((mp->mnt_flag & MNT_RDONLY) ||
666 (error = VFS_SYNC(mp, MNT_WAIT)) == 0) ||
667 (flags & MNT_FORCE)) {
668 error = VFS_UNMOUNT(mp, flags);
669 }
670 }
671 if (error) {
672 if (mp->mnt_syncer == NULL)
673 vfs_allocate_syncvnode(mp);
674 mp->mnt_kern_flag &= ~(MNTK_UNMOUNT | MNTK_UNMOUNTF);
675 mp->mnt_flag |= async_flag;
676 lockmgr(&mp->mnt_lock, LK_RELEASE);
677 if (mp->mnt_kern_flag & MNTK_MWAIT)
678 wakeup(mp);
679 return (error);
680 }
681 /*
682 * Clean up any journals still associated with the mount after
683 * filesystem activity has ceased.
684 */
685 journal_remove_all_journals(mp,
686 ((flags & MNT_FORCE) ? MC_JOURNAL_STOP_IMM : 0));
687
688 mountlist_remove(mp);
689
690 /*
691 * Remove any installed vnode ops here so the individual VFSs don't
692 * have to.
693 */
694 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_coherency_ops);
695 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_journal_ops);
696 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_norm_ops);
697 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_spec_ops);
698 vfs_rm_vnodeops(mp, NULL, &mp->mnt_vn_fifo_ops);
699
700 if (mp->mnt_ncmountpt.ncp != NULL) {
701 nch = mp->mnt_ncmountpt;
702 cache_zero(&mp->mnt_ncmountpt);
703 cache_clrmountpt(&nch);
704 cache_drop(&nch);
705 }
706 if (mp->mnt_ncmounton.ncp != NULL) {
707 nch = mp->mnt_ncmounton;
708 cache_zero(&mp->mnt_ncmounton);
709 cache_clrmountpt(&nch);
710 cache_drop(&nch);
711 }
712
713 mp->mnt_vfc->vfc_refcount--;
714 if (!TAILQ_EMPTY(&mp->mnt_nvnodelist))
715 panic("unmount: dangling vnode");
716 lockmgr(&mp->mnt_lock, LK_RELEASE);
717 if (mp->mnt_kern_flag & MNTK_MWAIT)
718 wakeup(mp);
719 if (freeok)
720 kfree(mp, M_MOUNT);
721 return (0);
722}
723
724static
725void
726mount_warning(struct mount *mp, const char *ctl, ...)
727{
728 char *ptr;
729 char *buf;
730 __va_list va;
731
732 __va_start(va, ctl);
733 if (cache_fullpath(NULL, &mp->mnt_ncmounton, &ptr, &buf) == 0) {
734 kprintf("unmount(%s): ", ptr);
735 kvprintf(ctl, va);
736 kprintf("\n");
737 kfree(buf, M_TEMP);
738 } else {
739 kprintf("unmount(%p): ", mp);
740 kvprintf(ctl, va);
741 kprintf("\n");
742 }
743 __va_end(va);
744}
745
746/*
747 * Sync each mounted filesystem.
748 */
749
750#ifdef DEBUG
751static int syncprt = 0;
752SYSCTL_INT(_debug, OID_AUTO, syncprt, CTLFLAG_RW, &syncprt, 0, "");
753#endif /* DEBUG */
754
755static int sync_callback(struct mount *mp, void *data);
756
757/* ARGSUSED */
758int
759sys_sync(struct sync_args *uap)
760{
761 mountlist_scan(sync_callback, NULL, MNTSCAN_FORWARD);
762#ifdef DEBUG
763 /*
764 * print out buffer pool stat information on each sync() call.
765 */
766 if (syncprt)
767 vfs_bufstats();
768#endif /* DEBUG */
769 return (0);
770}
771
772static
773int
774sync_callback(struct mount *mp, void *data __unused)
775{
776 int asyncflag;
777
778 if ((mp->mnt_flag & MNT_RDONLY) == 0) {
779 asyncflag = mp->mnt_flag & MNT_ASYNC;
780 mp->mnt_flag &= ~MNT_ASYNC;
781 vfs_msync(mp, MNT_NOWAIT);
782 VFS_SYNC(mp, MNT_NOWAIT);
783 mp->mnt_flag |= asyncflag;
784 }
785 return(0);
786}
787
788/* XXX PRISON: could be per prison flag */
789static int prison_quotas;
790#if 0
791SYSCTL_INT(_kern_prison, OID_AUTO, quotas, CTLFLAG_RW, &prison_quotas, 0, "");
792#endif
793
794/*
795 * quotactl_args(char *path, int fcmd, int uid, caddr_t arg)
796 *
797 * Change filesystem quotas.
798 */
799/* ARGSUSED */
800int
801sys_quotactl(struct quotactl_args *uap)
802{
803 struct nlookupdata nd;
804 struct thread *td;
805 struct proc *p;
806 struct mount *mp;
807 int error;
808
809 td = curthread;
810 p = td->td_proc;
811 if (p->p_ucred->cr_prison && !prison_quotas)
812 return (EPERM);
813
814 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
815 if (error == 0)
816 error = nlookup(&nd);
817 if (error == 0) {
818 mp = nd.nl_nch.mount;
819 error = VFS_QUOTACTL(mp, uap->cmd, uap->uid,
820 uap->arg, nd.nl_cred);
821 }
822 nlookup_done(&nd);
823 return (error);
824}
825
826/*
827 * mountctl(char *path, int op, int fd, const void *ctl, int ctllen,
828 * void *buf, int buflen)
829 *
830 * This function operates on a mount point and executes the specified
831 * operation using the specified control data, and possibly returns data.
832 *
833 * The actual number of bytes stored in the result buffer is returned, 0
834 * if none, otherwise an error is returned.
835 */
836/* ARGSUSED */
837int
838sys_mountctl(struct mountctl_args *uap)
839{
840 struct thread *td = curthread;
841 struct proc *p = td->td_proc;
842 struct file *fp;
843 void *ctl = NULL;
844 void *buf = NULL;
845 char *path = NULL;
846 int error;
847
848 /*
849 * Sanity and permissions checks. We must be root.
850 */
851 KKASSERT(p);
852 if (p->p_ucred->cr_prison != NULL)
853 return (EPERM);
854 if ((error = suser(td)) != 0)
855 return (error);
856
857 /*
858 * Argument length checks
859 */
860 if (uap->ctllen < 0 || uap->ctllen > 1024)
861 return (EINVAL);
862 if (uap->buflen < 0 || uap->buflen > 16 * 1024)
863 return (EINVAL);
864 if (uap->path == NULL)
865 return (EINVAL);
866
867 /*
868 * Allocate the necessary buffers and copyin data
869 */
870 path = objcache_get(namei_oc, M_WAITOK);
871 error = copyinstr(uap->path, path, MAXPATHLEN, NULL);
872 if (error)
873 goto done;
874
875 if (uap->ctllen) {
876 ctl = kmalloc(uap->ctllen + 1, M_TEMP, M_WAITOK|M_ZERO);
877 error = copyin(uap->ctl, ctl, uap->ctllen);
878 if (error)
879 goto done;
880 }
881 if (uap->buflen)
882 buf = kmalloc(uap->buflen + 1, M_TEMP, M_WAITOK|M_ZERO);
883
884 /*
885 * Validate the descriptor
886 */
887 fp = holdfp(p->p_fd, uap->fd, -1);
888 if (fp == NULL) {
889 error = EBADF;
890 goto done;
891 }
892
893 /*
894 * Execute the internal kernel function and clean up.
895 */
896 error = kern_mountctl(path, uap->op, fp, ctl, uap->ctllen, buf, uap->buflen, &uap->sysmsg_result);
897 if (fp)
898 fdrop(fp);
899 if (error == 0 && uap->sysmsg_result > 0)
900 error = copyout(buf, uap->buf, uap->sysmsg_result);
901done:
902 if (path)
903 objcache_put(namei_oc, path);
904 if (ctl)
905 kfree(ctl, M_TEMP);
906 if (buf)
907 kfree(buf, M_TEMP);
908 return (error);
909}
910
911/*
912 * Execute a mount control operation by resolving the path to a mount point
913 * and calling vop_mountctl().
914 */
915int
916kern_mountctl(const char *path, int op, struct file *fp,
917 const void *ctl, int ctllen,
918 void *buf, int buflen, int *res)
919{
920 struct vnode *vp;
921 struct mount *mp;
922 struct nlookupdata nd;
923 int error;
924
925 *res = 0;
926 vp = NULL;
927 error = nlookup_init(&nd, path, UIO_SYSSPACE, NLC_FOLLOW);
928 if (error == 0)
929 error = nlookup(&nd);
930 if (error == 0)
931 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp);
932 nlookup_done(&nd);
933 if (error)
934 return (error);
935
936 mp = vp->v_mount;
937
938 /*
939 * Must be the root of the filesystem
940 */
941 if ((vp->v_flag & VROOT) == 0) {
942 vput(vp);
943 return (EINVAL);
944 }
945 error = vop_mountctl(mp->mnt_vn_use_ops, op, fp, ctl, ctllen,
946 buf, buflen, res);
947 vput(vp);
948 return (error);
949}
950
951int
952kern_statfs(struct nlookupdata *nd, struct statfs *buf)
953{
954 struct thread *td = curthread;
955 struct proc *p = td->td_proc;
956 struct mount *mp;
957 struct statfs *sp;
958 char *fullpath, *freepath;
959 int error;
960
961 if ((error = nlookup(nd)) != 0)
962 return (error);
963 mp = nd->nl_nch.mount;
964 sp = &mp->mnt_stat;
965 if ((error = VFS_STATFS(mp, sp, nd->nl_cred)) != 0)
966 return (error);
967
968 error = cache_fullpath(p, &mp->mnt_ncmountpt, &fullpath, &freepath);
969 if (error)
970 return(error);
971 bzero(sp->f_mntonname, sizeof(sp->f_mntonname));
972 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname));
973 kfree(freepath, M_TEMP);
974
975 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
976 bcopy(sp, buf, sizeof(*buf));
977 /* Only root should have access to the fsid's. */
978 if (suser(td))
979 buf->f_fsid.val[0] = buf->f_fsid.val[1] = 0;
980 return (0);
981}
982
983/*
984 * statfs_args(char *path, struct statfs *buf)
985 *
986 * Get filesystem statistics.
987 */
988int
989sys_statfs(struct statfs_args *uap)
990{
991 struct nlookupdata nd;
992 struct statfs buf;
993 int error;
994
995 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
996 if (error == 0)
997 error = kern_statfs(&nd, &buf);
998 nlookup_done(&nd);
999 if (error == 0)
1000 error = copyout(&buf, uap->buf, sizeof(*uap->buf));
1001 return (error);
1002}
1003
1004int
1005kern_fstatfs(int fd, struct statfs *buf)
1006{
1007 struct thread *td = curthread;
1008 struct proc *p = td->td_proc;
1009 struct file *fp;
1010 struct mount *mp;
1011 struct statfs *sp;
1012 char *fullpath, *freepath;
1013 int error;
1014
1015 KKASSERT(p);
1016 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0)
1017 return (error);
1018 mp = ((struct vnode *)fp->f_data)->v_mount;
1019 if (mp == NULL) {
1020 error = EBADF;
1021 goto done;
1022 }
1023 if (fp->f_cred == NULL) {
1024 error = EINVAL;
1025 goto done;
1026 }
1027 sp = &mp->mnt_stat;
1028 if ((error = VFS_STATFS(mp, sp, fp->f_cred)) != 0)
1029 goto done;
1030
1031 if ((error = cache_fullpath(p, &mp->mnt_ncmountpt, &fullpath, &freepath)) != 0)
1032 goto done;
1033 bzero(sp->f_mntonname, sizeof(sp->f_mntonname));
1034 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname));
1035 kfree(freepath, M_TEMP);
1036
1037 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
1038 bcopy(sp, buf, sizeof(*buf));
1039
1040 /* Only root should have access to the fsid's. */
1041 if (suser(td))
1042 buf->f_fsid.val[0] = buf->f_fsid.val[1] = 0;
1043 error = 0;
1044done:
1045 fdrop(fp);
1046 return (error);
1047}
1048
1049/*
1050 * fstatfs_args(int fd, struct statfs *buf)
1051 *
1052 * Get filesystem statistics.
1053 */
1054int
1055sys_fstatfs(struct fstatfs_args *uap)
1056{
1057 struct statfs buf;
1058 int error;
1059
1060 error = kern_fstatfs(uap->fd, &buf);
1061
1062 if (error == 0)
1063 error = copyout(&buf, uap->buf, sizeof(*uap->buf));
1064 return (error);
1065}
1066
1067/*
1068 * getfsstat_args(struct statfs *buf, long bufsize, int flags)
1069 *
1070 * Get statistics on all filesystems.
1071 */
1072
1073struct getfsstat_info {
1074 struct statfs *sfsp;
1075 long count;
1076 long maxcount;
1077 int error;
1078 int flags;
1079 struct proc *p;
1080};
1081
1082static int getfsstat_callback(struct mount *, void *);
1083
1084/* ARGSUSED */
1085int
1086sys_getfsstat(struct getfsstat_args *uap)
1087{
1088 struct thread *td = curthread;
1089 struct proc *p = td->td_proc;
1090 struct getfsstat_info info;
1091
1092 bzero(&info, sizeof(info));
1093
1094 info.maxcount = uap->bufsize / sizeof(struct statfs);
1095 info.sfsp = uap->buf;
1096 info.count = 0;
1097 info.flags = uap->flags;
1098 info.p = p;
1099
1100 mountlist_scan(getfsstat_callback, &info, MNTSCAN_FORWARD);
1101 if (info.sfsp && info.count > info.maxcount)
1102 uap->sysmsg_result = info.maxcount;
1103 else
1104 uap->sysmsg_result = info.count;
1105 return (info.error);
1106}
1107
1108static int
1109getfsstat_callback(struct mount *mp, void *data)
1110{
1111 struct getfsstat_info *info = data;
1112 struct statfs *sp;
1113 char *freepath;
1114 char *fullpath;
1115 int error;
1116
1117 if (info->sfsp && info->count < info->maxcount) {
1118 if (info->p && !chroot_visible_mnt(mp, info->p))
1119 return(0);
1120 sp = &mp->mnt_stat;
1121
1122 /*
1123 * If MNT_NOWAIT or MNT_LAZY is specified, do not
1124 * refresh the fsstat cache. MNT_NOWAIT or MNT_LAZY
1125 * overrides MNT_WAIT.
1126 */
1127 if (((info->flags & (MNT_LAZY|MNT_NOWAIT)) == 0 ||
1128 (info->flags & MNT_WAIT)) &&
1129 (error = VFS_STATFS(mp, sp, info->p->p_ucred))) {
1130 return(0);
1131 }
1132 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
1133
1134 error = cache_fullpath(info->p, &mp->mnt_ncmountpt,
1135 &fullpath, &freepath);
1136 if (error) {
1137 info->error = error;
1138 return(-1);
1139 }
1140 bzero(sp->f_mntonname, sizeof(sp->f_mntonname));
1141 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname));
1142 kfree(freepath, M_TEMP);
1143
1144 error = copyout(sp, info->sfsp, sizeof(*sp));
1145 if (error) {
1146 info->error = error;
1147 return (-1);
1148 }
1149 ++info->sfsp;
1150 }
1151 info->count++;
1152 return(0);
1153}
1154
1155/*
1156 * fchdir_args(int fd)
1157 *
1158 * Change current working directory to a given file descriptor.
1159 */
1160/* ARGSUSED */
1161int
1162sys_fchdir(struct fchdir_args *uap)
1163{
1164 struct thread *td = curthread;
1165 struct proc *p = td->td_proc;
1166 struct filedesc *fdp = p->p_fd;
1167 struct vnode *vp, *ovp;
1168 struct mount *mp;
1169 struct file *fp;
1170 struct nchandle nch, onch, tnch;
1171 int error;
1172
1173 if ((error = holdvnode(fdp, uap->fd, &fp)) != 0)
1174 return (error);
1175 vp = (struct vnode *)fp->f_data;
1176 vref(vp);
1177 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1178 if (vp->v_type != VDIR || fp->f_nchandle.ncp == NULL)
1179 error = ENOTDIR;
1180 else
1181 error = VOP_ACCESS(vp, VEXEC, p->p_ucred);
1182 if (error) {
1183 vput(vp);
1184 fdrop(fp);
1185 return (error);
1186 }
1187 cache_copy(&fp->f_nchandle, &nch);
1188
1189 /*
1190 * If the ncp has become a mount point, traverse through
1191 * the mount point.
1192 */
1193
1194 while (!error && (nch.ncp->nc_flag & NCF_ISMOUNTPT) &&
1195 (mp = cache_findmount(&nch)) != NULL
1196 ) {
1197 error = nlookup_mp(mp, &tnch);
1198 if (error == 0) {
1199 cache_unlock(&tnch); /* leave ref intact */
1200 vput(vp);
1201 vp = tnch.ncp->nc_vp;
1202 error = vget(vp, LK_SHARED);
1203 KKASSERT(error == 0);
1204 cache_drop(&nch);
1205 nch = tnch;
1206 }
1207 }
1208 if (error == 0) {
1209 ovp = fdp->fd_cdir;
1210 onch = fdp->fd_ncdir;
1211 vn_unlock(vp); /* leave ref intact */
1212 fdp->fd_cdir = vp;
1213 fdp->fd_ncdir = nch;
1214 cache_drop(&onch);
1215 vrele(ovp);
1216 } else {
1217 cache_drop(&nch);
1218 vput(vp);
1219 }
1220 fdrop(fp);
1221 return (error);
1222}
1223
1224int
1225kern_chdir(struct nlookupdata *nd)
1226{
1227 struct thread *td = curthread;
1228 struct proc *p = td->td_proc;
1229 struct filedesc *fdp = p->p_fd;
1230 struct vnode *vp, *ovp;
1231 struct nchandle onch;
1232 int error;
1233
1234 if ((error = nlookup(nd)) != 0)
1235 return (error);
1236 if ((vp = nd->nl_nch.ncp->nc_vp) == NULL)
1237 return (ENOENT);
1238 if ((error = vget(vp, LK_SHARED)) != 0)
1239 return (error);
1240
1241 error = checkvp_chdir(vp, td);
1242 vn_unlock(vp);
1243 if (error == 0) {
1244 ovp = fdp->fd_cdir;
1245 onch = fdp->fd_ncdir;
1246 cache_unlock(&nd->nl_nch); /* leave reference intact */
1247 fdp->fd_ncdir = nd->nl_nch;
1248 fdp->fd_cdir = vp;
1249 cache_drop(&onch);
1250 vrele(ovp);
1251 cache_zero(&nd->nl_nch);
1252 } else {
1253 vrele(vp);
1254 }
1255 return (error);
1256}
1257
1258/*
1259 * chdir_args(char *path)
1260 *
1261 * Change current working directory (``.'').
1262 */
1263int
1264sys_chdir(struct chdir_args *uap)
1265{
1266 struct nlookupdata nd;
1267 int error;
1268
1269 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
1270 if (error == 0)
1271 error = kern_chdir(&nd);
1272 nlookup_done(&nd);
1273 return (error);
1274}
1275
1276/*
1277 * Helper function for raised chroot(2) security function: Refuse if
1278 * any filedescriptors are open directories.
1279 */
1280static int
1281chroot_refuse_vdir_fds(fdp)
1282 struct filedesc *fdp;
1283{
1284 struct vnode *vp;
1285 struct file *fp;
1286 int error;
1287 int fd;
1288
1289 for (fd = 0; fd < fdp->fd_nfiles ; fd++) {
1290 if ((error = holdvnode(fdp, fd, &fp)) != 0)
1291 continue;
1292 vp = (struct vnode *)fp->f_data;
1293 if (vp->v_type != VDIR) {
1294 fdrop(fp);
1295 continue;
1296 }
1297 fdrop(fp);
1298 return(EPERM);
1299 }
1300 return (0);
1301}
1302
1303/*
1304 * This sysctl determines if we will allow a process to chroot(2) if it
1305 * has a directory open:
1306 * 0: disallowed for all processes.
1307 * 1: allowed for processes that were not already chroot(2)'ed.
1308 * 2: allowed for all processes.
1309 */
1310
1311static int chroot_allow_open_directories = 1;
1312
1313SYSCTL_INT(_kern, OID_AUTO, chroot_allow_open_directories, CTLFLAG_RW,
1314 &chroot_allow_open_directories, 0, "");
1315
1316/*
1317 * chroot to the specified namecache entry. We obtain the vp from the
1318 * namecache data. The passed ncp must be locked and referenced and will
1319 * remain locked and referenced on return.
1320 */
1321int
1322kern_chroot(struct nchandle *nch)
1323{
1324 struct thread *td = curthread;
1325 struct proc *p = td->td_proc;
1326 struct filedesc *fdp = p->p_fd;
1327 struct vnode *vp;
1328 int error;
1329
1330 /*
1331 * Only root can chroot
1332 */
1333 if ((error = suser_cred(p->p_ucred, PRISON_ROOT)) != 0)
1334 return (error);
1335
1336 /*
1337 * Disallow open directory descriptors (fchdir() breakouts).
1338 */
1339 if (chroot_allow_open_directories == 0 ||
1340 (chroot_allow_open_directories == 1 && fdp->fd_rdir != rootvnode)) {
1341 if ((error = chroot_refuse_vdir_fds(fdp)) != 0)
1342 return (error);
1343 }
1344 if ((vp = nch->ncp->nc_vp) == NULL)
1345 return (ENOENT);
1346
1347 if ((error = vget(vp, LK_SHARED)) != 0)
1348 return (error);
1349
1350 /*
1351 * Check the validity of vp as a directory to change to and
1352 * associate it with rdir/jdir.
1353 */
1354 error = checkvp_chdir(vp, td);
1355 vn_unlock(vp); /* leave reference intact */
1356 if (error == 0) {
1357 vrele(fdp->fd_rdir);
1358 fdp->fd_rdir = vp; /* reference inherited by fd_rdir */
1359 cache_drop(&fdp->fd_nrdir);
1360 cache_copy(nch, &fdp->fd_nrdir);
1361 if (fdp->fd_jdir == NULL) {
1362 fdp->fd_jdir = vp;
1363 vref(fdp->fd_jdir);
1364 cache_copy(nch, &fdp->fd_njdir);
1365 }
1366 } else {
1367 vrele(vp);
1368 }
1369 return (error);
1370}
1371
1372/*
1373 * chroot_args(char *path)
1374 *
1375 * Change notion of root (``/'') directory.
1376 */
1377/* ARGSUSED */
1378int
1379sys_chroot(struct chroot_args *uap)
1380{
1381 struct thread *td = curthread;
1382 struct nlookupdata nd;
1383 int error;
1384
1385 KKASSERT(td->td_proc);
1386 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
1387 if (error) {
1388 nlookup_done(&nd);
1389 return(error);
1390 }
1391 error = nlookup(&nd);
1392 if (error == 0)
1393 error = kern_chroot(&nd.nl_nch);
1394 nlookup_done(&nd);
1395 return(error);
1396}
1397
1398/*
1399 * Common routine for chroot and chdir. Given a locked, referenced vnode,
1400 * determine whether it is legal to chdir to the vnode. The vnode's state
1401 * is not changed by this call.
1402 */
1403int
1404checkvp_chdir(struct vnode *vp, struct thread *td)
1405{
1406 int error;
1407
1408 if (vp->v_type != VDIR)
1409 error = ENOTDIR;
1410 else
1411 error = VOP_ACCESS(vp, VEXEC, td->td_proc->p_ucred);
1412 return (error);
1413}
1414
1415int
1416kern_open(struct nlookupdata *nd, int oflags, int mode, int *res)
1417{
1418 struct thread *td = curthread;
1419 struct proc *p = td->td_proc;
1420 struct lwp *lp = td->td_lwp;
1421 struct filedesc *fdp = p->p_fd;
1422 int cmode, flags;
1423 struct file *nfp;
1424 struct file *fp;
1425 struct vnode *vp;
1426 int type, indx, error;
1427 struct flock lf;
1428
1429 if ((oflags & O_ACCMODE) == O_ACCMODE)
1430 return (EINVAL);
1431 flags = FFLAGS(oflags);
1432 error = falloc(p, &nfp, NULL);
1433 if (error)
1434 return (error);
1435 fp = nfp;
1436 cmode = ((mode &~ fdp->fd_cmask) & ALLPERMS) &~ S_ISTXT;
1437
1438 /*
1439 * XXX p_dupfd is a real mess. It allows a device to return a
1440 * file descriptor to be duplicated rather then doing the open
1441 * itself.
1442 */
1443 lp->lwp_dupfd = -1;
1444
1445 /*
1446 * Call vn_open() to do the lookup and assign the vnode to the
1447 * file pointer. vn_open() does not change the ref count on fp
1448 * and the vnode, on success, will be inherited by the file pointer
1449 * and unlocked.
1450 */
1451 nd->nl_flags |= NLC_LOCKVP;
1452 error = vn_open(nd, fp, flags, cmode);
1453 nlookup_done(nd);
1454 if (error) {
1455 /*
1456 * handle special fdopen() case. bleh. dupfdopen() is
1457 * responsible for dropping the old contents of ofiles[indx]
1458 * if it succeeds.
1459 *
1460 * Note that fsetfd() will add a ref to fp which represents
1461 * the fd_files[] assignment. We must still drop our
1462 * reference.
1463 */
1464 if ((error == ENODEV || error == ENXIO) && lp->lwp_dupfd >= 0) {
1465 if (fdalloc(p, 0, &indx) == 0) {
1466 error = dupfdopen(p, indx, lp->lwp_dupfd, flags, error);
1467 if (error == 0) {
1468 *res = indx;
1469 fdrop(fp); /* our ref */
1470 return (0);
1471 }
1472 fsetfd(p, NULL, indx);
1473 }
1474 }
1475 fdrop(fp); /* our ref */
1476 if (error == ERESTART)
1477 error = EINTR;
1478 return (error);
1479 }
1480
1481 /*
1482 * ref the vnode for ourselves so it can't be ripped out from under
1483 * is. XXX need an ND flag to request that the vnode be returned
1484 * anyway.
1485 *
1486 * Reserve a file descriptor but do not assign it until the open
1487 * succeeds.
1488 */
1489 vp = (struct vnode *)fp->f_data;
1490 vref(vp);
1491 if ((error = fdalloc(p, 0, &indx)) != 0) {
1492 fdrop(fp);
1493 vrele(vp);
1494 return (error);
1495 }
1496
1497 /*
1498 * If no error occurs the vp will have been assigned to the file
1499 * pointer.
1500 */
1501 lp->lwp_dupfd = 0;
1502
1503 if (flags & (O_EXLOCK | O_SHLOCK)) {
1504 lf.l_whence = SEEK_SET;
1505 lf.l_start = 0;
1506 lf.l_len = 0;
1507 if (flags & O_EXLOCK)
1508 lf.l_type = F_WRLCK;
1509 else
1510 lf.l_type = F_RDLCK;
1511 if (flags & FNONBLOCK)
1512 type = 0;
1513 else
1514 type = F_WAIT;
1515
1516 if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0) {
1517 /*
1518 * lock request failed. Clean up the reserved
1519 * descriptor.
1520 */
1521 vrele(vp);
1522 fsetfd(p, NULL, indx);
1523 fdrop(fp);
1524 return (error);
1525 }
1526 fp->f_flag |= FHASLOCK;
1527 }
1528#if 0
1529 /*
1530 * Assert that all regular file vnodes were created with a object.
1531 */
1532 KASSERT(vp->v_type != VREG || vp->v_object != NULL,
1533 ("open: regular file has no backing object after vn_open"));
1534#endif
1535
1536 vrele(vp);
1537
1538 /*
1539 * release our private reference, leaving the one associated with the
1540 * descriptor table intact.
1541 */
1542 fsetfd(p, fp, indx);
1543 fdrop(fp);
1544 *res = indx;
1545 return (0);
1546}
1547
1548/*
1549 * open_args(char *path, int flags, int mode)
1550 *
1551 * Check permissions, allocate an open file structure,
1552 * and call the device open routine if any.
1553 */
1554int
1555sys_open(struct open_args *uap)
1556{
1557 struct nlookupdata nd;
1558 int error;
1559
1560 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
1561 if (error == 0) {
1562 error = kern_open(&nd, uap->flags,
1563 uap->mode, &uap->sysmsg_result);
1564 }
1565 nlookup_done(&nd);
1566 return (error);
1567}
1568
1569int
1570kern_mknod(struct nlookupdata *nd, int mode, int dev)
1571{
1572 struct thread *td = curthread;
1573 struct proc *p = td->td_proc;
1574 struct vnode *vp;
1575 struct vattr vattr;
1576 int error;
1577 int whiteout = 0;
1578
1579 KKASSERT(p);
1580
1581 switch (mode & S_IFMT) {
1582 case S_IFCHR:
1583 case S_IFBLK:
1584 error = suser(td);
1585 break;
1586 default:
1587 error = suser_cred(p->p_ucred, PRISON_ROOT);
1588 break;
1589 }
1590 if (error)
1591 return (error);
1592
1593 bwillwrite();
1594 nd->nl_flags |= NLC_CREATE;
1595 if ((error = nlookup(nd)) != 0)
1596 return (error);
1597 if (nd->nl_nch.ncp->nc_vp)
1598 return (EEXIST);
1599 if ((error = ncp_writechk(&nd->nl_nch)) != 0)
1600 return (error);
1601
1602 VATTR_NULL(&vattr);
1603 vattr.va_mode = (mode & ALLPERMS) &~ p->p_fd->fd_cmask;
1604 vattr.va_rdev = dev;
1605 whiteout = 0;
1606
1607 switch (mode & S_IFMT) {
1608 case S_IFMT: /* used by badsect to flag bad sectors */
1609 vattr.va_type = VBAD;
1610 break;
1611 case S_IFCHR:
1612 vattr.va_type = VCHR;
1613 break;
1614 case S_IFBLK:
1615 vattr.va_type = VBLK;
1616 break;
1617 case S_IFWHT:
1618 whiteout = 1;
1619 break;
1620 default:
1621 error = EINVAL;
1622 break;
1623 }
1624 if (error == 0) {
1625 if (whiteout) {
1626 error = VOP_NWHITEOUT(&nd->nl_nch, nd->nl_cred, NAMEI_CREATE);
1627 } else {
1628 vp = NULL;
1629 error = VOP_NMKNOD(&nd->nl_nch, &vp, nd->nl_cred, &vattr);
1630 if (error == 0)
1631 vput(vp);
1632 }
1633 }
1634 return (error);
1635}
1636
1637/*
1638 * mknod_args(char *path, int mode, int dev)
1639 *
1640 * Create a special file.
1641 */
1642int
1643sys_mknod(struct mknod_args *uap)
1644{
1645 struct nlookupdata nd;
1646 int error;
1647
1648 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
1649 if (error == 0)
1650 error = kern_mknod(&nd, uap->mode, uap->dev);
1651 nlookup_done(&nd);
1652 return (error);
1653}
1654
1655int
1656kern_mkfifo(struct nlookupdata *nd, int mode)
1657{
1658 struct thread *td = curthread;
1659 struct proc *p = td->td_proc;
1660 struct vattr vattr;
1661 struct vnode *vp;
1662 int error;
1663
1664 bwillwrite();
1665
1666 nd->nl_flags |= NLC_CREATE;
1667 if ((error = nlookup(nd)) != 0)
1668 return (error);
1669 if (nd->nl_nch.ncp->nc_vp)
1670 return (EEXIST);
1671 if ((error = ncp_writechk(&nd->nl_nch)) != 0)
1672 return (error);
1673
1674 VATTR_NULL(&vattr);
1675 vattr.va_type = VFIFO;
1676 vattr.va_mode = (mode & ALLPERMS) &~ p->p_fd->fd_cmask;
1677 vp = NULL;
1678 error = VOP_NMKNOD(&nd->nl_nch, &vp, nd->nl_cred, &vattr);
1679 if (error == 0)
1680 vput(vp);
1681 return (error);
1682}
1683
1684/*
1685 * mkfifo_args(char *path, int mode)
1686 *
1687 * Create a named pipe.
1688 */
1689int
1690sys_mkfifo(struct mkfifo_args *uap)
1691{
1692 struct nlookupdata nd;
1693 int error;
1694
1695 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
1696 if (error == 0)
1697 error = kern_mkfifo(&nd, uap->mode);
1698 nlookup_done(&nd);
1699 return (error);
1700}
1701
1702static int hardlink_check_uid = 0;
1703SYSCTL_INT(_kern, OID_AUTO, hardlink_check_uid, CTLFLAG_RW,
1704 &hardlink_check_uid, 0,
1705 "Unprivileged processes cannot create hard links to files owned by other "
1706 "users");
1707static int hardlink_check_gid = 0;
1708SYSCTL_INT(_kern, OID_AUTO, hardlink_check_gid, CTLFLAG_RW,
1709 &hardlink_check_gid, 0,
1710 "Unprivileged processes cannot create hard links to files owned by other "
1711 "groups");
1712
1713static int
1714can_hardlink(struct vnode *vp, struct thread *td, struct ucred *cred)
1715{
1716 struct vattr va;
1717 int error;
1718
1719 /*
1720 * Shortcut if disabled
1721 */
1722 if (hardlink_check_uid == 0 && hardlink_check_gid == 0)
1723 return (0);
1724
1725 /*
1726 * root cred can always hardlink
1727 */
1728 if (suser_cred(cred, PRISON_ROOT) == 0)
1729 return (0);
1730
1731 /*
1732 * Otherwise only if the originating file is owned by the
1733 * same user or group. Note that any group is allowed if
1734 * the file is owned by the caller.
1735 */
1736 error = VOP_GETATTR(vp, &va);
1737 if (error != 0)
1738 return (error);
1739
1740 if (hardlink_check_uid) {
1741 if (cred->cr_uid != va.va_uid)
1742 return (EPERM);
1743 }
1744
1745 if (hardlink_check_gid) {
1746 if (cred->cr_uid != va.va_uid && !groupmember(va.va_gid, cred))
1747 return (EPERM);
1748 }
1749
1750 return (0);
1751}
1752
1753int
1754kern_link(struct nlookupdata *nd, struct nlookupdata *linknd)
1755{
1756 struct thread *td = curthread;
1757 struct vnode *vp;
1758 int error;
1759
1760 /*
1761 * Lookup the source and obtained a locked vnode.
1762 *
1763 * XXX relookup on vget failure / race ?
1764 */
1765 bwillwrite();
1766 if ((error = nlookup(nd)) != 0)
1767 return (error);
1768 vp = nd->nl_nch.ncp->nc_vp;
1769 KKASSERT(vp != NULL);
1770 if (vp->v_type == VDIR)
1771 return (EPERM); /* POSIX */
1772 if ((error = ncp_writechk(&nd->nl_nch)) != 0)
1773 return (error);
1774 if ((error = vget(vp, LK_EXCLUSIVE)) != 0)
1775 return (error);
1776
1777 /*
1778 * Unlock the source so we can lookup the target without deadlocking
1779 * (XXX vp is locked already, possible other deadlock?). The target
1780 * must not exist.
1781 */
1782 KKASSERT(nd->nl_flags & NLC_NCPISLOCKED);
1783 nd->nl_flags &= ~NLC_NCPISLOCKED;
1784 cache_unlock(&nd->nl_nch);
1785
1786 linknd->nl_flags |= NLC_CREATE;
1787 if ((error = nlookup(linknd)) != 0) {
1788 vput(vp);
1789 return (error);
1790 }
1791 if (linknd->nl_nch.ncp->nc_vp) {
1792 vput(vp);
1793 return (EEXIST);
1794 }
1795
1796 /*
1797 * Finally run the new API VOP.
1798 */
1799 error = can_hardlink(vp, td, td->td_proc->p_ucred);
1800 if (error == 0)
1801 error = VOP_NLINK(&linknd->nl_nch, vp, linknd->nl_cred);
1802 vput(vp);
1803 return (error);
1804}
1805
1806/*
1807 * link_args(char *path, char *link)
1808 *
1809 * Make a hard file link.
1810 */
1811int
1812sys_link(struct link_args *uap)
1813{
1814 struct nlookupdata nd, linknd;
1815 int error;
1816
1817 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
1818 if (error == 0) {
1819 error = nlookup_init(&linknd, uap->link, UIO_USERSPACE, 0);
1820 if (error == 0)
1821 error = kern_link(&nd, &linknd);
1822 nlookup_done(&linknd);
1823 }
1824 nlookup_done(&nd);
1825 return (error);
1826}
1827
1828int
1829kern_symlink(struct nlookupdata *nd, char *path, int mode)
1830{
1831 struct vattr vattr;
1832 struct vnode *vp;
1833 int error;
1834
1835 bwillwrite();
1836 nd->nl_flags |= NLC_CREATE;
1837 if ((error = nlookup(nd)) != 0)
1838 return (error);
1839 if (nd->nl_nch.ncp->nc_vp)
1840 return (EEXIST);
1841 if ((error = ncp_writechk(&nd->nl_nch)) != 0)
1842 return (error);
1843 VATTR_NULL(&vattr);
1844 vattr.va_mode = mode;
1845 error = VOP_NSYMLINK(&nd->nl_nch, &vp, nd->nl_cred, &vattr, path);
1846 if (error == 0)
1847 vput(vp);
1848 return (error);
1849}
1850
1851/*
1852 * symlink(char *path, char *link)
1853 *
1854 * Make a symbolic link.
1855 */
1856int
1857sys_symlink(struct symlink_args *uap)
1858{
1859 struct thread *td = curthread;
1860 struct nlookupdata nd;
1861 char *path;
1862 int error;
1863 int mode;
1864
1865 path = objcache_get(namei_oc, M_WAITOK);
1866 error = copyinstr(uap->path, path, MAXPATHLEN, NULL);
1867 if (error == 0) {
1868 error = nlookup_init(&nd, uap->link, UIO_USERSPACE, 0);
1869 if (error == 0) {
1870 mode = ACCESSPERMS & ~td->td_proc->p_fd->fd_cmask;
1871 error = kern_symlink(&nd, path, mode);
1872 }
1873 nlookup_done(&nd);
1874 }
1875 objcache_put(namei_oc, path);
1876 return (error);
1877}
1878
1879/*
1880 * undelete_args(char *path)
1881 *
1882 * Delete a whiteout from the filesystem.
1883 */
1884/* ARGSUSED */
1885int
1886sys_undelete(struct undelete_args *uap)
1887{
1888 struct nlookupdata nd;
1889 int error;
1890
1891 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
1892 bwillwrite();
1893 nd.nl_flags |= NLC_DELETE;
1894 if (error == 0)
1895 error = nlookup(&nd);
1896 if (error == 0)
1897 error = ncp_writechk(&nd.nl_nch);
1898 if (error == 0)
1899 error = VOP_NWHITEOUT(&nd.nl_nch, nd.nl_cred, NAMEI_DELETE);
1900 nlookup_done(&nd);
1901 return (error);
1902}
1903
1904int
1905kern_unlink(struct nlookupdata *nd)
1906{
1907 int error;
1908
1909 bwillwrite();
1910 nd->nl_flags |= NLC_DELETE;
1911 if ((error = nlookup(nd)) != 0)
1912 return (error);
1913 if ((error = ncp_writechk(&nd->nl_nch)) != 0)
1914 return (error);
1915 error = VOP_NREMOVE(&nd->nl_nch, nd->nl_cred);
1916 return (error);
1917}
1918
1919/*
1920 * unlink_args(char *path)
1921 *
1922 * Delete a name from the filesystem.
1923 */
1924int
1925sys_unlink(struct unlink_args *uap)
1926{
1927 struct nlookupdata nd;
1928 int error;
1929
1930 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
1931 if (error == 0)
1932 error = kern_unlink(&nd);
1933 nlookup_done(&nd);
1934 return (error);
1935}
1936
1937int
1938kern_lseek(int fd, off_t offset, int whence, off_t *res)
1939{
1940 struct thread *td = curthread;
1941 struct proc *p = td->td_proc;
1942 struct file *fp;
1943 struct vattr vattr;
1944 int error;
1945
1946 fp = holdfp(p->p_fd, fd, -1);
1947 if (fp == NULL)
1948 return (EBADF);
1949 if (fp->f_type != DTYPE_VNODE) {
1950 error = ESPIPE;
1951 goto done;
1952 }
1953
1954 switch (whence) {
1955 case L_INCR:
1956 fp->f_offset += offset;
1957 error = 0;
1958 break;
1959 case L_XTND:
1960 error = VOP_GETATTR((struct vnode *)fp->f_data, &vattr);
1961 if (error == 0)
1962 fp->f_offset = offset + vattr.va_size;
1963 break;
1964 case L_SET:
1965 fp->f_offset = offset;
1966 error = 0;
1967 break;
1968 default:
1969 error = EINVAL;
1970 break;
1971 }
1972 *res = fp->f_offset;
1973done:
1974 fdrop(fp);
1975 return (error);
1976}
1977
1978/*
1979 * lseek_args(int fd, int pad, off_t offset, int whence)
1980 *
1981 * Reposition read/write file offset.
1982 */
1983int
1984sys_lseek(struct lseek_args *uap)
1985{
1986 int error;
1987
1988 error = kern_lseek(uap->fd, uap->offset, uap->whence,
1989 &uap->sysmsg_offset);
1990
1991 return (error);
1992}
1993
1994int
1995kern_access(struct nlookupdata *nd, int aflags)
1996{
1997 struct vnode *vp;
1998 int error, flags;
1999
2000 if ((error = nlookup(nd)) != 0)
2001 return (error);
2002retry:
2003 error = cache_vget(&nd->nl_nch, nd->nl_cred, LK_EXCLUSIVE, &vp);
2004 if (error)
2005 return (error);
2006
2007 /* Flags == 0 means only check for existence. */
2008 if (aflags) {
2009 flags = 0;
2010 if (aflags & R_OK)
2011 flags |= VREAD;
2012 if (aflags & W_OK)
2013 flags |= VWRITE;
2014 if (aflags & X_OK)
2015 flags |= VEXEC;
2016 if ((flags & VWRITE) == 0 ||
2017 (error = vn_writechk(vp, &nd->nl_nch)) == 0)
2018 error = VOP_ACCESS(vp, flags, nd->nl_cred);
2019
2020 /*
2021 * If the file handle is stale we have to re-resolve the
2022 * entry. This is a hack at the moment.
2023 */
2024 if (error == ESTALE) {
2025 cache_setunresolved(&nd->nl_nch);
2026 error = cache_resolve(&nd->nl_nch, nd->nl_cred);
2027 if (error == 0) {
2028 vput(vp);
2029 vp = NULL;
2030 goto retry;
2031 }
2032 }
2033 }
2034 vput(vp);
2035 return (error);
2036}
2037
2038/*
2039 * access_args(char *path, int flags)
2040 *
2041 * Check access permissions.
2042 */
2043int
2044sys_access(struct access_args *uap)
2045{
2046 struct nlookupdata nd;
2047 int error;
2048
2049 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
2050 if (error == 0)
2051 error = kern_access(&nd, uap->flags);
2052 nlookup_done(&nd);
2053 return (error);
2054}
2055
2056int
2057kern_stat(struct nlookupdata *nd, struct stat *st)
2058{
2059 int error;
2060 struct vnode *vp;
2061 thread_t td;
2062
2063 if ((error = nlookup(nd)) != 0)
2064 return (error);
2065again:
2066 if ((vp = nd->nl_nch.ncp->nc_vp) == NULL)
2067 return (ENOENT);
2068
2069 td = curthread;
2070 if ((error = vget(vp, LK_SHARED)) != 0)
2071 return (error);
2072 error = vn_stat(vp, st, nd->nl_cred);
2073
2074 /*
2075 * If the file handle is stale we have to re-resolve the entry. This
2076 * is a hack at the moment.
2077 */
2078 if (error == ESTALE) {
2079 cache_setunresolved(&nd->nl_nch);
2080 error = cache_resolve(&nd->nl_nch, nd->nl_cred);
2081 if (error == 0) {
2082 vput(vp);
2083 goto again;
2084 }
2085 }
2086 vput(vp);
2087 return (error);
2088}
2089
2090/*
2091 * stat_args(char *path, struct stat *ub)
2092 *
2093 * Get file status; this version follows links.
2094 */
2095int
2096sys_stat(struct stat_args *uap)
2097{
2098 struct nlookupdata nd;
2099 struct stat st;
2100 int error;
2101
2102 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
2103 if (error == 0) {
2104 error = kern_stat(&nd, &st);
2105 if (error == 0)
2106 error = copyout(&st, uap->ub, sizeof(*uap->ub));
2107 }
2108 nlookup_done(&nd);
2109 return (error);
2110}
2111
2112/*
2113 * lstat_args(char *path, struct stat *ub)
2114 *
2115 * Get file status; this version does not follow links.
2116 */
2117int
2118sys_lstat(struct lstat_args *uap)
2119{
2120 struct nlookupdata nd;
2121 struct stat st;
2122 int error;
2123
2124 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
2125 if (error == 0) {
2126 error = kern_stat(&nd, &st);
2127 if (error == 0)
2128 error = copyout(&st, uap->ub, sizeof(*uap->ub));
2129 }
2130 nlookup_done(&nd);
2131 return (error);
2132}
2133
2134/*
2135 * pathconf_Args(char *path, int name)
2136 *
2137 * Get configurable pathname variables.
2138 */
2139/* ARGSUSED */
2140int
2141sys_pathconf(struct pathconf_args *uap)
2142{
2143 struct nlookupdata nd;
2144 struct vnode *vp;
2145 int error;
2146
2147 vp = NULL;
2148 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
2149 if (error == 0)
2150 error = nlookup(&nd);
2151 if (error == 0)
2152 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp);
2153 nlookup_done(&nd);
2154 if (error == 0) {
2155 error = VOP_PATHCONF(vp, uap->name, uap->sysmsg_fds);
2156 vput(vp);
2157 }
2158 return (error);
2159}
2160
2161/*
2162 * XXX: daver
2163 * kern_readlink isn't properly split yet. There is a copyin burried
2164 * in VOP_READLINK().
2165 */
2166int
2167kern_readlink(struct nlookupdata *nd, char *buf, int count, int *res)
2168{
2169 struct thread *td = curthread;
2170 struct proc *p = td->td_proc;
2171 struct vnode *vp;
2172 struct iovec aiov;
2173 struct uio auio;
2174 int error;
2175
2176 if ((error = nlookup(nd)) != 0)
2177 return (error);
2178 error = cache_vget(&nd->nl_nch, nd->nl_cred, LK_EXCLUSIVE, &vp);
2179 if (error)
2180 return (error);
2181 if (vp->v_type != VLNK) {
2182 error = EINVAL;
2183 } else {
2184 aiov.iov_base = buf;
2185 aiov.iov_len = count;
2186 auio.uio_iov = &aiov;
2187 auio.uio_iovcnt = 1;
2188 auio.uio_offset = 0;
2189 auio.uio_rw = UIO_READ;
2190 auio.uio_segflg = UIO_USERSPACE;
2191 auio.uio_td = td;
2192 auio.uio_resid = count;
2193 error = VOP_READLINK(vp, &auio, p->p_ucred);
2194 }
2195 vput(vp);
2196 *res = count - auio.uio_resid;
2197 return (error);
2198}
2199
2200/*
2201 * readlink_args(char *path, char *buf, int count)
2202 *
2203 * Return target name of a symbolic link.
2204 */
2205int
2206sys_readlink(struct readlink_args *uap)
2207{
2208 struct nlookupdata nd;
2209 int error;
2210
2211 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
2212 if (error == 0) {
2213 error = kern_readlink(&nd, uap->buf, uap->count,
2214 &uap->sysmsg_result);
2215 }
2216 nlookup_done(&nd);
2217 return (error);
2218}
2219
2220static int
2221setfflags(struct vnode *vp, int flags)
2222{
2223 struct thread *td = curthread;
2224 struct proc *p = td->td_proc;
2225 int error;
2226 struct vattr vattr;
2227
2228 /*
2229 * Prevent non-root users from setting flags on devices. When
2230 * a device is reused, users can retain ownership of the device
2231 * if they are allowed to set flags and programs assume that
2232 * chown can't fail when done as root.
2233 */
2234 if ((vp->v_type == VCHR || vp->v_type == VBLK) &&
2235 ((error = suser_cred(p->p_ucred, PRISON_ROOT)) != 0))
2236 return (error);
2237
2238 /*
2239 * note: vget is required for any operation that might mod the vnode
2240 * so VINACTIVE is properly cleared.
2241 */
2242 if ((error = vget(vp, LK_EXCLUSIVE)) == 0) {
2243 VATTR_NULL(&vattr);
2244 vattr.va_flags = flags;
2245 error = VOP_SETATTR(vp, &vattr, p->p_ucred);
2246 vput(vp);
2247 }
2248 return (error);
2249}
2250
2251/*
2252 * chflags(char *path, int flags)
2253 *
2254 * Change flags of a file given a path name.
2255 */
2256/* ARGSUSED */
2257int
2258sys_chflags(struct chflags_args *uap)
2259{
2260 struct nlookupdata nd;
2261 struct vnode *vp;
2262 int error;
2263
2264 vp = NULL;
2265 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
2266 /* XXX Add NLC flag indicating modifying operation? */
2267 if (error == 0)
2268 error = nlookup(&nd);
2269 if (error == 0)
2270 error = ncp_writechk(&nd.nl_nch);
2271 if (error == 0)
2272 error = cache_vref(&nd.nl_nch, nd.nl_cred, &vp);
2273 nlookup_done(&nd);
2274 if (error == 0) {
2275 error = setfflags(vp, uap->flags);
2276 vrele(vp);
2277 }
2278 return (error);
2279}
2280
2281/*
2282 * fchflags_args(int fd, int flags)
2283 *
2284 * Change flags of a file given a file descriptor.
2285 */
2286/* ARGSUSED */
2287int
2288sys_fchflags(struct fchflags_args *uap)
2289{
2290 struct thread *td = curthread;
2291 struct proc *p = td->td_proc;
2292 struct file *fp;
2293 int error;
2294
2295 if ((error = holdvnode(p->p_fd, uap->fd, &fp)) != 0)
2296 return (error);
2297 if (fp->f_nchandle.ncp)
2298 error = ncp_writechk(&fp->f_nchandle);
2299 if (error == 0)
2300 error = setfflags((struct vnode *) fp->f_data, uap->flags);
2301 fdrop(fp);
2302 return (error);
2303}
2304
2305static int
2306setfmode(struct vnode *vp, int mode)
2307{
2308 struct thread *td = curthread;
2309 struct proc *p = td->td_proc;
2310 int error;
2311 struct vattr vattr;
2312
2313 /*
2314 * note: vget is required for any operation that might mod the vnode
2315 * so VINACTIVE is properly cleared.
2316 */
2317 if ((error = vget(vp, LK_EXCLUSIVE)) == 0) {
2318 VATTR_NULL(&vattr);
2319 vattr.va_mode = mode & ALLPERMS;
2320 error = VOP_SETATTR(vp, &vattr, p->p_ucred);
2321 vput(vp);
2322 }
2323 return error;
2324}
2325
2326int
2327kern_chmod(struct nlookupdata *nd, int mode)
2328{
2329 struct vnode *vp;
2330 int error;
2331
2332 /* XXX Add NLC flag indicating modifying operation? */
2333 if ((error = nlookup(nd)) != 0)
2334 return (error);
2335 if ((error = cache_vref(&nd->nl_nch, nd->nl_cred, &vp)) != 0)
2336 return (error);
2337 if ((error = ncp_writechk(&nd->nl_nch)) == 0)
2338 error = setfmode(vp, mode);
2339 vrele(vp);
2340 return (error);
2341}
2342
2343/*
2344 * chmod_args(char *path, int mode)
2345 *
2346 * Change mode of a file given path name.
2347 */
2348/* ARGSUSED */
2349int
2350sys_chmod(struct chmod_args *uap)
2351{
2352 struct nlookupdata nd;
2353 int error;
2354
2355 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
2356 if (error == 0)
2357 error = kern_chmod(&nd, uap->mode);
2358 nlookup_done(&nd);
2359 return (error);
2360}
2361
2362/*
2363 * lchmod_args(char *path, int mode)
2364 *
2365 * Change mode of a file given path name (don't follow links.)
2366 */
2367/* ARGSUSED */
2368int
2369sys_lchmod(struct lchmod_args *uap)
2370{
2371 struct nlookupdata nd;
2372 int error;
2373
2374 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
2375 if (error == 0)
2376 error = kern_chmod(&nd, uap->mode);
2377 nlookup_done(&nd);
2378 return (error);
2379}
2380
2381/*
2382 * fchmod_args(int fd, int mode)
2383 *
2384 * Change mode of a file given a file descriptor.
2385 */
2386/* ARGSUSED */
2387int
2388sys_fchmod(struct fchmod_args *uap)
2389{
2390 struct thread *td = curthread;
2391 struct proc *p = td->td_proc;
2392 struct file *fp;
2393 int error;
2394
2395 if ((error = holdvnode(p->p_fd, uap->fd, &fp)) != 0)
2396 return (error);
2397 if (fp->f_nchandle.ncp)
2398 error = ncp_writechk(&fp->f_nchandle);
2399 if (error == 0)
2400 error = setfmode((struct vnode *)fp->f_data, uap->mode);
2401 fdrop(fp);
2402 return (error);
2403}
2404
2405static int
2406setfown(struct vnode *vp, uid_t uid, gid_t gid)
2407{
2408 struct thread *td = curthread;
2409 struct proc *p = td->td_proc;
2410 int error;
2411 struct vattr vattr;
2412
2413 /*
2414 * note: vget is required for any operation that might mod the vnode
2415 * so VINACTIVE is properly cleared.
2416 */
2417 if ((error = vget(vp, LK_EXCLUSIVE)) == 0) {
2418 VATTR_NULL(&vattr);
2419 vattr.va_uid = uid;
2420 vattr.va_gid = gid;
2421 error = VOP_SETATTR(vp, &vattr, p->p_ucred);
2422 vput(vp);
2423 }
2424 return error;
2425}
2426
2427int
2428kern_chown(struct nlookupdata *nd, int uid, int gid)
2429{
2430 struct vnode *vp;
2431 int error;
2432
2433 /* XXX Add NLC flag indicating modifying operation? */
2434 if ((error = nlookup(nd)) != 0)
2435 return (error);
2436 if ((error = cache_vref(&nd->nl_nch, nd->nl_cred, &vp)) != 0)
2437 return (error);
2438 if ((error = ncp_writechk(&nd->nl_nch)) == 0)
2439 error = setfown(vp, uid, gid);
2440 vrele(vp);
2441 return (error);
2442}
2443
2444/*
2445 * chown(char *path, int uid, int gid)
2446 *
2447 * Set ownership given a path name.
2448 */
2449int
2450sys_chown(struct chown_args *uap)
2451{
2452 struct nlookupdata nd;
2453 int error;
2454
2455 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
2456 if (error == 0)
2457 error = kern_chown(&nd, uap->uid, uap->gid);
2458 nlookup_done(&nd);
2459 return (error);
2460}
2461
2462/*
2463 * lchown_args(char *path, int uid, int gid)
2464 *
2465 * Set ownership given a path name, do not cross symlinks.
2466 */
2467int
2468sys_lchown(struct lchown_args *uap)
2469{
2470 struct nlookupdata nd;
2471 int error;
2472
2473 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
2474 if (error == 0)
2475 error = kern_chown(&nd, uap->uid, uap->gid);
2476 nlookup_done(&nd);
2477 return (error);
2478}
2479
2480/*
2481 * fchown_args(int fd, int uid, int gid)
2482 *
2483 * Set ownership given a file descriptor.
2484 */
2485/* ARGSUSED */
2486int
2487sys_fchown(struct fchown_args *uap)
2488{
2489 struct thread *td = curthread;
2490 struct proc *p = td->td_proc;
2491 struct file *fp;
2492 int error;
2493
2494 if ((error = holdvnode(p->p_fd, uap->fd, &fp)) != 0)
2495 return (error);
2496 if (fp->f_nchandle.ncp)
2497 error = ncp_writechk(&fp->f_nchandle);
2498 if (error == 0)
2499 error = setfown((struct vnode *)fp->f_data, uap->uid, uap->gid);
2500 fdrop(fp);
2501 return (error);
2502}
2503
2504static int
2505getutimes(const struct timeval *tvp, struct timespec *tsp)
2506{
2507 struct timeval tv[2];
2508
2509 if (tvp == NULL) {
2510 microtime(&tv[0]);
2511 TIMEVAL_TO_TIMESPEC(&tv[0], &tsp[0]);
2512 tsp[1] = tsp[0];
2513 } else {
2514 TIMEVAL_TO_TIMESPEC(&tvp[0], &tsp[0]);
2515 TIMEVAL_TO_TIMESPEC(&tvp[1], &tsp[1]);
2516 }
2517 return 0;
2518}
2519
2520static int
2521setutimes(struct vnode *vp, const struct timespec *ts, int nullflag)
2522{
2523 struct thread *td = curthread;
2524 struct proc *p = td->td_proc;
2525 int error;
2526 struct vattr vattr;
2527
2528 /*
2529 * note: vget is required for any operation that might mod the vnode
2530 * so VINACTIVE is properly cleared.
2531 */
2532 if ((error = vget(vp, LK_EXCLUSIVE)) == 0) {
2533 VATTR_NULL(&vattr);
2534 vattr.va_atime = ts[0];
2535 vattr.va_mtime = ts[1];
2536 if (nullflag)
2537 vattr.va_vaflags |= VA_UTIMES_NULL;
2538 error = VOP_SETATTR(vp, &vattr, p->p_ucred);
2539 vput(vp);
2540 }
2541 return error;
2542}
2543
2544int
2545kern_utimes(struct nlookupdata *nd, struct timeval *tptr)
2546{
2547 struct timespec ts[2];
2548 struct vnode *vp;
2549 int error;
2550
2551 if ((error = getutimes(tptr, ts)) != 0)
2552 return (error);
2553 /* XXX Add NLC flag indicating modifying operation? */
2554 if ((error = nlookup(nd)) != 0)
2555 return (error);
2556 if ((error = ncp_writechk(&nd->nl_nch)) != 0)
2557 return (error);
2558 if ((error = cache_vref(&nd->nl_nch, nd->nl_cred, &vp)) != 0)
2559 return (error);
2560 error = setutimes(vp, ts, tptr == NULL);
2561 vrele(vp);
2562 return (error);
2563}
2564
2565/*
2566 * utimes_args(char *path, struct timeval *tptr)
2567 *
2568 * Set the access and modification times of a file.
2569 */
2570int
2571sys_utimes(struct utimes_args *uap)
2572{
2573 struct timeval tv[2];
2574 struct nlookupdata nd;
2575 int error;
2576
2577 if (uap->tptr) {
2578 error = copyin(uap->tptr, tv, sizeof(tv));
2579 if (error)
2580 return (error);
2581 }
2582 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
2583 if (error == 0)
2584 error = kern_utimes(&nd, uap->tptr ? tv : NULL);
2585 nlookup_done(&nd);
2586 return (error);
2587}
2588
2589/*
2590 * lutimes_args(char *path, struct timeval *tptr)
2591 *
2592 * Set the access and modification times of a file.
2593 */
2594int
2595sys_lutimes(struct lutimes_args *uap)
2596{
2597 struct timeval tv[2];
2598 struct nlookupdata nd;
2599 int error;
2600
2601 if (uap->tptr) {
2602 error = copyin(uap->tptr, tv, sizeof(tv));
2603 if (error)
2604 return (error);
2605 }
2606 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
2607 if (error == 0)
2608 error = kern_utimes(&nd, uap->tptr ? tv : NULL);
2609 nlookup_done(&nd);
2610 return (error);
2611}
2612
2613int
2614kern_futimes(int fd, struct timeval *tptr)
2615{
2616 struct thread *td = curthread;
2617 struct proc *p = td->td_proc;
2618 struct timespec ts[2];
2619 struct file *fp;
2620 int error;
2621
2622 error = getutimes(tptr, ts);
2623 if (error)
2624 return (error);
2625 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0)
2626 return (error);
2627 if (fp->f_nchandle.ncp)
2628 error = ncp_writechk(&fp->f_nchandle);
2629 if (error == 0)
2630 error = setutimes((struct vnode *)fp->f_data, ts, tptr == NULL);
2631 fdrop(fp);
2632 return (error);
2633}
2634
2635/*
2636 * futimes_args(int fd, struct timeval *tptr)
2637 *
2638 * Set the access and modification times of a file.
2639 */
2640int
2641sys_futimes(struct futimes_args *uap)
2642{
2643 struct timeval tv[2];
2644 int error;
2645
2646 if (uap->tptr) {
2647 error = copyin(uap->tptr, tv, sizeof(tv));
2648 if (error)
2649 return (error);
2650 }
2651
2652 error = kern_futimes(uap->fd, uap->tptr ? tv : NULL);
2653
2654 return (error);
2655}
2656
2657int
2658kern_truncate(struct nlookupdata *nd, off_t length)
2659{
2660 struct vnode *vp;
2661 struct vattr vattr;
2662 int error;
2663
2664 if (length < 0)
2665 return(EINVAL);
2666 /* XXX Add NLC flag indicating modifying operation? */
2667 if ((error = nlookup(nd)) != 0)
2668 return (error);
2669 if ((error = ncp_writechk(&nd->nl_nch)) != 0)
2670 return (error);
2671 if ((error = cache_vref(&nd->nl_nch, nd->nl_cred, &vp)) != 0)
2672 return (error);
2673 if ((error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY)) != 0) {
2674 vrele(vp);
2675 return (error);
2676 }
2677 if (vp->v_type == VDIR) {
2678 error = EISDIR;
2679 } else if ((error = vn_writechk(vp, &nd->nl_nch)) == 0 &&
2680 (error = VOP_ACCESS(vp, VWRITE, nd->nl_cred)) == 0) {
2681 VATTR_NULL(&vattr);
2682 vattr.va_size = length;
2683 error = VOP_SETATTR(vp, &vattr, nd->nl_cred);
2684 }
2685 vput(vp);
2686 return (error);
2687}
2688
2689/*
2690 * truncate(char *path, int pad, off_t length)
2691 *
2692 * Truncate a file given its path name.
2693 */
2694int
2695sys_truncate(struct truncate_args *uap)
2696{
2697 struct nlookupdata nd;
2698 int error;
2699
2700 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
2701 if (error == 0)
2702 error = kern_truncate(&nd, uap->length);
2703 nlookup_done(&nd);
2704 return error;
2705}
2706
2707int
2708kern_ftruncate(int fd, off_t length)
2709{
2710 struct thread *td = curthread;
2711 struct proc *p = td->td_proc;
2712 struct vattr vattr;
2713 struct vnode *vp;
2714 struct file *fp;
2715 int error;
2716
2717 if (length < 0)
2718 return(EINVAL);
2719 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0)
2720 return (error);
2721 if (fp->f_nchandle.ncp) {
2722 error = ncp_writechk(&fp->f_nchandle);
2723 if (error)
2724 goto done;
2725 }
2726 if ((fp->f_flag & FWRITE) == 0) {
2727 error = EINVAL;
2728 goto done;
2729 }
2730 vp = (struct vnode *)fp->f_data;
2731 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2732 if (vp->v_type == VDIR) {
2733 error = EISDIR;
2734 } else if ((error = vn_writechk(vp, NULL)) == 0) {
2735 VATTR_NULL(&vattr);
2736 vattr.va_size = length;
2737 error = VOP_SETATTR(vp, &vattr, fp->f_cred);
2738 }
2739 vn_unlock(vp);
2740done:
2741 fdrop(fp);
2742 return (error);
2743}
2744
2745/*
2746 * ftruncate_args(int fd, int pad, off_t length)
2747 *
2748 * Truncate a file given a file descriptor.
2749 */
2750int
2751sys_ftruncate(struct ftruncate_args *uap)
2752{
2753 int error;
2754
2755 error = kern_ftruncate(uap->fd, uap->length);
2756
2757 return (error);
2758}
2759
2760/*
2761 * fsync(int fd)
2762 *
2763 * Sync an open file.
2764 */
2765/* ARGSUSED */
2766int
2767sys_fsync(struct fsync_args *uap)
2768{
2769 struct thread *td = curthread;
2770 struct proc *p = td->td_proc;
2771 struct vnode *vp;
2772 struct file *fp;
2773 vm_object_t obj;
2774 int error;
2775
2776 if ((error = holdvnode(p->p_fd, uap->fd, &fp)) != 0)
2777 return (error);
2778 vp = (struct vnode *)fp->f_data;
2779 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2780 if ((obj = vp->v_object) != NULL)
2781 vm_object_page_clean(obj, 0, 0, 0);
2782 if ((error = VOP_FSYNC(vp, MNT_WAIT)) == 0 &&
2783 vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP) &&
2784 bioops.io_fsync) {
2785 error = (*bioops.io_fsync)(vp);
2786 }
2787 vn_unlock(vp);
2788 fdrop(fp);
2789 return (error);
2790}
2791
2792int
2793kern_rename(struct nlookupdata *fromnd, struct nlookupdata *tond)
2794{
2795 struct nchandle fnchd;
2796 struct nchandle tnchd;
2797 struct namecache *ncp;
2798 struct mount *mp;
2799 int error;
2800
2801 bwillwrite();
2802 if ((error = nlookup(fromnd)) != 0)
2803 return (error);
2804 if ((fnchd.ncp = fromnd->nl_nch.ncp->nc_parent) == NULL)
2805 return (ENOENT);
2806 fnchd.mount = fromnd->nl_nch.mount;
2807 cache_hold(&fnchd);
2808
2809 /*
2810 * unlock the source nch so we can lookup the target nch without
2811 * deadlocking. The target may or may not exist so we do not check
2812 * for a target vp like kern_mkdir() and other creation functions do.
2813 *
2814 * The source and target directories are ref'd and rechecked after
2815 * everything is relocked to determine if the source or target file
2816 * has been renamed.
2817 */
2818 KKASSERT(fromnd->nl_flags & NLC_NCPISLOCKED);
2819 fromnd->nl_flags &= ~NLC_NCPISLOCKED;
2820 cache_unlock(&fromnd->nl_nch);
2821
2822 tond->nl_flags |= NLC_CREATE;
2823 if ((error = nlookup(tond)) != 0) {
2824 cache_drop(&fnchd);
2825 return (error);
2826 }
2827 if ((tnchd.ncp = tond->nl_nch.ncp->nc_parent) == NULL) {
2828 cache_drop(&fnchd);
2829 return (ENOENT);
2830 }
2831 tnchd.mount = tond->nl_nch.mount;
2832 cache_hold(&tnchd);
2833
2834 /*
2835 * If the source and target are the same there is nothing to do
2836 */
2837 if (fromnd->nl_nch.ncp == tond->nl_nch.ncp) {
2838 cache_drop(&fnchd);
2839 cache_drop(&tnchd);
2840 return (0);
2841 }
2842
2843 /*
2844 * Mount points cannot be renamed or overwritten
2845 */
2846 if ((fromnd->nl_nch.ncp->nc_flag | tond->nl_nch.ncp->nc_flag) &
2847 NCF_ISMOUNTPT
2848 ) {
2849 cache_drop(&fnchd);
2850 cache_drop(&tnchd);
2851 return (EINVAL);
2852 }
2853
2854 /*
2855 * relock the source ncp. NOTE AFTER RELOCKING: the source ncp
2856 * may have become invalid while it was unlocked, nc_vp and nc_mount
2857 * could be NULL.
2858 */
2859 if (cache_lock_nonblock(&fromnd->nl_nch) == 0) {
2860 cache_resolve(&fromnd->nl_nch, fromnd->nl_cred);
2861 } else if (fromnd->nl_nch.ncp > tond->nl_nch.ncp) {
2862 cache_lock(&fromnd->nl_nch);
2863 cache_resolve(&fromnd->nl_nch, fromnd->nl_cred);
2864 } else {
2865 cache_unlock(&tond->nl_nch);
2866 cache_lock(&fromnd->nl_nch);
2867 cache_resolve(&fromnd->nl_nch, fromnd->nl_cred);
2868 cache_lock(&tond->nl_nch);
2869 cache_resolve(&tond->nl_nch, tond->nl_cred);
2870 }
2871 fromnd->nl_flags |= NLC_NCPISLOCKED;
2872
2873 /*
2874 * make sure the parent directories linkages are the same
2875 */
2876 if (fnchd.ncp != fromnd->nl_nch.ncp->nc_parent ||
2877 tnchd.ncp != tond->nl_nch.ncp->nc_parent) {
2878 cache_drop(&fnchd);
2879 cache_drop(&tnchd);
2880 return (ENOENT);
2881 }
2882
2883 /*
2884 * Both the source and target must be within the same filesystem and
2885 * in the same filesystem as their parent directories within the
2886 * namecache topology.
2887 *
2888 * NOTE: fromnd's nc_mount or nc_vp could be NULL.
2889 */
2890 mp = fnchd.mount;
2891 if (mp != tnchd.mount || mp != fromnd->nl_nch.mount ||
2892 mp != tond->nl_nch.mount) {
2893 cache_drop(&fnchd);
2894 cache_drop(&tnchd);
2895 return (EXDEV);
2896 }
2897
2898 /*
2899 * Make sure the mount point is writable
2900 */
2901 if ((error = ncp_writechk(&tond->nl_nch)) != 0) {
2902 cache_drop(&fnchd);
2903 cache_drop(&tnchd);
2904 return (error);
2905 }
2906
2907 /*
2908 * If the target exists and either the source or target is a directory,
2909 * then both must be directories.
2910 *
2911 * Due to relocking of the source, fromnd->nl_nch.ncp->nc_vp might h
2912 * have become NULL.
2913 */
2914 if (tond->nl_nch.ncp->nc_vp) {
2915 if (fromnd->nl_nch.ncp->nc_vp == NULL) {
2916 error = ENOENT;
2917 } else if (fromnd->nl_nch.ncp->nc_vp->v_type == VDIR) {
2918 if (tond->nl_nch.ncp->nc_vp->v_type != VDIR)
2919 error = ENOTDIR;
2920 } else if (tond->nl_nch.ncp->nc_vp->v_type == VDIR) {
2921 error = EISDIR;
2922 }
2923 }
2924
2925 /*
2926 * You cannot rename a source into itself or a subdirectory of itself.
2927 * We check this by travsersing the target directory upwards looking
2928 * for a match against the source.
2929 */
2930 if (error == 0) {
2931 for (ncp = tnchd.ncp; ncp; ncp = ncp->nc_parent) {
2932 if (fromnd->nl_nch.ncp == ncp) {
2933 error = EINVAL;
2934 break;
2935 }
2936 }
2937 }
2938
2939 cache_drop(&fnchd);
2940 cache_drop(&tnchd);
2941
2942 /*
2943 * Even though the namespaces are different, they may still represent
2944 * hardlinks to the same file. The filesystem might have a hard time
2945 * with this so we issue a NREMOVE of the source instead of a NRENAME
2946 * when we detect the situation.
2947 */
2948 if (error == 0) {
2949 if (fromnd->nl_nch.ncp->nc_vp == tond->nl_nch.ncp->nc_vp) {
2950 error = VOP_NREMOVE(&fromnd->nl_nch, fromnd->nl_cred);
2951 } else {
2952 error = VOP_NRENAME(&fromnd->nl_nch, &tond->nl_nch,
2953 tond->nl_cred);
2954 }
2955 }
2956 return (error);
2957}
2958
2959/*
2960 * rename_args(char *from, char *to)
2961 *
2962 * Rename files. Source and destination must either both be directories,
2963 * or both not be directories. If target is a directory, it must be empty.
2964 */
2965int
2966sys_rename(struct rename_args *uap)
2967{
2968 struct nlookupdata fromnd, tond;
2969 int error;
2970
2971 error = nlookup_init(&fromnd, uap->from, UIO_USERSPACE, 0);
2972 if (error == 0) {
2973 error = nlookup_init(&tond, uap->to, UIO_USERSPACE, 0);
2974 if (error == 0)
2975 error = kern_rename(&fromnd, &tond);
2976 nlookup_done(&tond);
2977 }
2978 nlookup_done(&fromnd);
2979 return (error);
2980}
2981
2982int
2983kern_mkdir(struct nlookupdata *nd, int mode)
2984{
2985 struct thread *td = curthread;
2986 struct proc *p = td->td_proc;
2987 struct vnode *vp;
2988 struct vattr vattr;
2989 int error;
2990
2991 bwillwrite();
2992 nd->nl_flags |= NLC_WILLBEDIR | NLC_CREATE;
2993 if ((error = nlookup(nd)) != 0)
2994 return (error);
2995
2996 if (nd->nl_nch.ncp->nc_vp)
2997 return (EEXIST);
2998 if ((error = ncp_writechk(&nd->nl_nch)) != 0)
2999 return (error);
3000
3001 VATTR_NULL(&vattr);
3002 vattr.va_type = VDIR;
3003 vattr.va_mode = (mode & ACCESSPERMS) &~ p->p_fd->fd_cmask;
3004
3005 vp = NULL;
3006 error = VOP_NMKDIR(&nd->nl_nch, &vp, p->p_ucred, &vattr);
3007 if (error == 0)
3008 vput(vp);
3009 return (error);
3010}
3011
3012/*
3013 * mkdir_args(char *path, int mode)
3014 *
3015 * Make a directory file.
3016 */
3017/* ARGSUSED */
3018int
3019sys_mkdir(struct mkdir_args *uap)
3020{
3021 struct nlookupdata nd;
3022 int error;
3023
3024 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
3025 if (error == 0)
3026 error = kern_mkdir(&nd, uap->mode);
3027 nlookup_done(&nd);
3028 return (error);
3029}
3030
3031int
3032kern_rmdir(struct nlookupdata *nd)
3033{
3034 int error;
3035
3036 bwillwrite();
3037 nd->nl_flags |= NLC_DELETE;
3038 if ((error = nlookup(nd)) != 0)
3039 return (error);
3040
3041 /*
3042 * Do not allow directories representing mount points to be
3043 * deleted, even if empty. Check write perms on mount point
3044 * in case the vnode is aliased (aka nullfs).
3045 */
3046 if (nd->nl_nch.ncp->nc_flag & (NCF_ISMOUNTPT))
3047 return (EINVAL);
3048 if ((error = ncp_writechk(&nd->nl_nch)) != 0)
3049 return (error);
3050
3051 error = VOP_NRMDIR(&nd->nl_nch, nd->nl_cred);
3052 return (error);
3053}
3054
3055/*
3056 * rmdir_args(char *path)
3057 *
3058 * Remove a directory file.
3059 */
3060/* ARGSUSED */
3061int
3062sys_rmdir(struct rmdir_args *uap)
3063{
3064 struct nlookupdata nd;
3065 int error;
3066
3067 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0);
3068 if (error == 0)
3069 error = kern_rmdir(&nd);
3070 nlookup_done(&nd);
3071 return (error);
3072}
3073
3074int
3075kern_getdirentries(int fd, char *buf, u_int count, long *basep, int *res,
3076 enum uio_seg direction)
3077{
3078 struct thread *td = curthread;
3079 struct proc *p = td->td_proc;
3080 struct vnode *vp;
3081 struct file *fp;
3082 struct uio auio;
3083 struct iovec aiov;
3084 long loff;
3085 int error, eofflag;
3086
3087 if ((error = holdvnode(p->p_fd, fd, &fp)) != 0)
3088 return (error);
3089 if ((fp->f_flag & FREAD) == 0) {
3090 error = EBADF;
3091 goto done;
3092 }
3093 vp = (struct vnode *)fp->f_data;
3094unionread:
3095 if (vp->v_type != VDIR) {
3096 error = EINVAL;
3097 goto done;
3098 }
3099 aiov.iov_base = buf;
3100 aiov.iov_len = count;
3101 auio.uio_iov = &aiov;
3102 auio.uio_iovcnt = 1;
3103 auio.uio_rw = UIO_READ;
3104 auio.uio_segflg = direction;
3105 auio.uio_td = td;
3106 auio.uio_resid = count;
3107 loff = auio.uio_offset = fp->f_offset;
3108 error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, NULL, NULL);
3109 fp->f_offset = auio.uio_offset;
3110 if (error)
3111 goto done;
3112 if (count == auio.uio_resid) {
3113 if (union_dircheckp) {
3114 error = union_dircheckp(td, &vp, fp);
3115 if (error == -1)
3116 goto unionread;
3117 if (error)
3118 goto done;
3119 }
3120#if 0
3121 if ((vp->v_flag & VROOT) &&
3122 (vp->v_mount->mnt_flag & MNT_UNION)) {
3123 struct vnode *tvp = vp;
3124 vp = vp->v_mount->mnt_vnodecovered;
3125 vref(vp);
3126 fp->f_data = vp;
3127 fp->f_offset = 0;
3128 vrele(tvp);
3129 goto unionread;
3130 }
3131#endif
3132 }
3133 if (basep) {
3134 *basep = loff;
3135 }
3136 *res = count - auio.uio_resid;
3137done:
3138 fdrop(fp);
3139 return (error);
3140}
3141
3142/*
3143 * getdirentries_args(int fd, char *buf, u_int conut, long *basep)
3144 *
3145 * Read a block of directory entries in a file system independent format.
3146 */
3147int
3148sys_getdirentries(struct getdirentries_args *uap)
3149{
3150 long base;
3151 int error;
3152
3153 error = kern_getdirentries(uap->fd, uap->buf, uap->count, &base,
3154 &uap->sysmsg_result, UIO_USERSPACE);
3155
3156 if (error == 0)
3157 error = copyout(&base, uap->basep, sizeof(*uap->basep));
3158 return (error);
3159}
3160
3161/*
3162 * getdents_args(int fd, char *buf, size_t count)
3163 */
3164int
3165sys_getdents(struct getdents_args *uap)
3166{
3167 int error;
3168
3169 error = kern_getdirentries(uap->fd, uap->buf, uap->count, NULL,
3170 &uap->sysmsg_result, UIO_USERSPACE);
3171
3172 return (error);
3173}
3174
3175/*
3176 * umask(int newmask)
3177 *
3178 * Set the mode mask for creation of filesystem nodes.
3179 *
3180 * MP SAFE
3181 */
3182int
3183sys_umask(struct umask_args *uap)
3184{
3185 struct thread *td = curthread;
3186 struct proc *p = td->td_proc;
3187 struct filedesc *fdp;
3188
3189 fdp = p->p_fd;
3190 uap->sysmsg_result = fdp->fd_cmask;
3191 fdp->fd_cmask = uap->newmask & ALLPERMS;
3192 return (0);
3193}
3194
3195/*
3196 * revoke(char *path)
3197 *
3198 * Void all references to file by ripping underlying filesystem
3199 * away from vnode.
3200 */
3201/* ARGSUSED */
3202int
3203sys_revoke(struct revoke_args *uap)
3204{
3205 struct nlookupdata nd;
3206 struct vattr vattr;
3207 struct vnode *vp;
3208 struct ucred *cred;
3209 int error;
3210
3211 vp = NULL;
3212 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
3213 if (error == 0)
3214 error = nlookup(&nd);
3215 if (error == 0)
3216 error = cache_vref(&nd.nl_nch, nd.nl_cred, &vp);
3217 cred = crhold(nd.nl_cred);
3218 nlookup_done(&nd);
3219 if (error == 0) {
3220 if (vp->v_type != VCHR && vp->v_type != VBLK)
3221 error = EINVAL;
3222 if (error == 0)
3223 error = VOP_GETATTR(vp, &vattr);
3224 if (error == 0 && cred->cr_uid != vattr.va_uid)
3225 error = suser_cred(cred, PRISON_ROOT);
3226 if (error == 0 && count_udev(vp->v_udev) > 0) {
3227 error = 0;
3228 vx_lock(vp);
3229 VOP_REVOKE(vp, REVOKEALL);
3230 vx_unlock(vp);
3231 }
3232 vrele(vp);
3233 }
3234 if (cred)
3235 crfree(cred);
3236 return (error);
3237}
3238
3239/*
3240 * getfh_args(char *fname, fhandle_t *fhp)
3241 *
3242 * Get (NFS) file handle
3243 */
3244int
3245sys_getfh(struct getfh_args *uap)
3246{
3247 struct thread *td = curthread;
3248 struct nlookupdata nd;
3249 fhandle_t fh;
3250 struct vnode *vp;
3251 int error;
3252
3253 /*
3254 * Must be super user
3255 */
3256 if ((error = suser(td)) != 0)
3257 return (error);
3258
3259 vp = NULL;
3260 error = nlookup_init(&nd, uap->fname, UIO_USERSPACE, NLC_FOLLOW);
3261 if (error == 0)
3262 error = nlookup(&nd);
3263 if (error == 0)
3264 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp);
3265 nlookup_done(&nd);
3266 if (error == 0) {
3267 bzero(&fh, sizeof(fh));
3268 fh.fh_fsid = vp->v_mount->mnt_stat.f_fsid;
3269 error = VFS_VPTOFH(vp, &fh.fh_fid);
3270 vput(vp);
3271 if (error == 0)
3272 error = copyout(&fh, uap->fhp, sizeof(fh));
3273 }
3274 return (error);
3275}
3276
3277/*
3278 * fhopen_args(const struct fhandle *u_fhp, int flags)
3279 *
3280 * syscall for the rpc.lockd to use to translate a NFS file handle into
3281 * an open descriptor.
3282 *
3283 * warning: do not remove the suser() call or this becomes one giant
3284 * security hole.
3285 */
3286int
3287sys_fhopen(struct fhopen_args *uap)
3288{
3289 struct thread *td = curthread;
3290 struct proc *p = td->td_proc;
3291 struct mount *mp;
3292 struct vnode *vp;
3293 struct fhandle fhp;
3294 struct vattr vat;
3295 struct vattr *vap = &vat;
3296 struct flock lf;
3297 int fmode, mode, error, type;
3298 struct file *nfp;
3299 struct file *fp;
3300 int indx;
3301
3302 /*
3303 * Must be super user
3304 */
3305 error = suser(td);
3306 if (error)
3307 return (error);
3308
3309 fmode = FFLAGS(uap->flags);
3310 /* why not allow a non-read/write open for our lockd? */
3311 if (((fmode & (FREAD | FWRITE)) == 0) || (fmode & O_CREAT))
3312 return (EINVAL);
3313 error = copyin(uap->u_fhp, &fhp, sizeof(fhp));
3314 if (error)
3315 return(error);
3316 /* find the mount point */
3317 mp = vfs_getvfs(&fhp.fh_fsid);
3318 if (mp == NULL)
3319 return (ESTALE);
3320 /* now give me my vnode, it gets returned to me locked */
3321 error = VFS_FHTOVP(mp, &fhp.fh_fid, &vp);
3322 if (error)
3323 return (error);
3324 /*
3325 * from now on we have to make sure not
3326 * to forget about the vnode
3327 * any error that causes an abort must vput(vp)
3328 * just set error = err and 'goto bad;'.
3329 */
3330
3331 /*
3332 * from vn_open
3333 */
3334 if (vp->v_type == VLNK) {
3335 error = EMLINK;
3336 goto bad;
3337 }
3338 if (vp->v_type == VSOCK) {
3339 error = EOPNOTSUPP;
3340 goto bad;
3341 }
3342 mode = 0;
3343 if (fmode & (FWRITE | O_TRUNC)) {
3344 if (vp->v_type == VDIR) {
3345 error = EISDIR;
3346 goto bad;
3347 }
3348 error = vn_writechk(vp, NULL);
3349 if (error)
3350 goto bad;
3351 mode |= VWRITE;
3352 }
3353 if (fmode & FREAD)
3354 mode |= VREAD;
3355 if (mode) {
3356 error = VOP_ACCESS(vp, mode, p->p_ucred);
3357 if (error)
3358 goto bad;
3359 }
3360 if (fmode & O_TRUNC) {
3361 vn_unlock(vp); /* XXX */
3362 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */
3363 VATTR_NULL(vap);
3364 vap->va_size = 0;
3365 error = VOP_SETATTR(vp, vap, p->p_ucred);
3366 if (error)
3367 goto bad;
3368 }
3369
3370 /*
3371 * VOP_OPEN needs the file pointer so it can potentially override
3372 * it.
3373 *
3374 * WARNING! no f_nchandle will be associated when fhopen()ing a
3375 * directory. XXX
3376 */
3377 if ((error = falloc(p, &nfp, &indx)) != 0)
3378 goto bad;
3379 fp = nfp;
3380
3381 error = VOP_OPEN(vp, fmode, p->p_ucred, fp);
3382 if (error) {
3383 /*
3384 * setting f_ops this way prevents VOP_CLOSE from being
3385 * called or fdrop() releasing the vp from v_data. Since
3386 * the VOP_OPEN failed we don't want to VOP_CLOSE.
3387 */
3388 fp->f_ops = &badfileops;
3389 fp->f_data = NULL;
3390 goto bad_drop;
3391 }
3392
3393 /*
3394 * The fp is given its own reference, we still have our ref and lock.
3395 *
3396 * Assert that all regular files must be created with a VM object.
3397 */
3398 if (vp->v_type == VREG && vp->v_object == NULL) {
3399 kprintf("fhopen: regular file did not have VM object: %p\n", vp);
3400 goto bad_drop;
3401 }
3402
3403 /*
3404 * The open was successful. Handle any locking requirements.
3405 */
3406 if (fmode & (O_EXLOCK | O_SHLOCK)) {
3407 lf.l_whence = SEEK_SET;
3408 lf.l_start = 0;
3409 lf.l_len = 0;
3410 if (fmode & O_EXLOCK)
3411 lf.l_type = F_WRLCK;
3412 else
3413 lf.l_type = F_RDLCK;
3414 if (fmode & FNONBLOCK)
3415 type = 0;
3416 else
3417 type = F_WAIT;
3418 vn_unlock(vp);
3419 if ((error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, type)) != 0) {
3420 /*
3421 * release our private reference.
3422 */
3423 fsetfd(p, NULL, indx);
3424 fdrop(fp);
3425 vrele(vp);
3426 return (error);
3427 }
3428 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3429 fp->f_flag |= FHASLOCK;
3430 }
3431
3432 /*
3433 * Clean up. Associate the file pointer with the previously
3434 * reserved descriptor and return it.
3435 */
3436 vput(vp);
3437 fsetfd(p, fp, indx);
3438 fdrop(fp);
3439 uap->sysmsg_result = indx;
3440 return (0);
3441
3442bad_drop:
3443 fsetfd(p, NULL, indx);
3444 fdrop(fp);
3445bad:
3446 vput(vp);
3447 return (error);
3448}
3449
3450/*
3451 * fhstat_args(struct fhandle *u_fhp, struct stat *sb)
3452 */
3453int
3454sys_fhstat(struct fhstat_args *uap)
3455{
3456 struct thread *td = curthread;
3457 struct stat sb;
3458 fhandle_t fh;
3459 struct mount *mp;
3460 struct vnode *vp;
3461 int error;
3462
3463 /*
3464 * Must be super user
3465 */
3466 error = suser(td);
3467 if (error)
3468 return (error);
3469
3470 error = copyin(uap->u_fhp, &fh, sizeof(fhandle_t));
3471 if (error)
3472 return (error);
3473
3474 if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL)
3475 return (ESTALE);
3476 if ((error = VFS_FHTOVP(mp, &fh.fh_fid, &vp)))
3477 return (error);
3478 error = vn_stat(vp, &sb, td->td_proc->p_ucred);
3479 vput(vp);
3480 if (error)
3481 return (error);
3482 error = copyout(&sb, uap->sb, sizeof(sb));
3483 return (error);
3484}
3485
3486/*
3487 * fhstatfs_args(struct fhandle *u_fhp, struct statfs *buf)
3488 */
3489int
3490sys_fhstatfs(struct fhstatfs_args *uap)
3491{
3492 struct thread *td = curthread;
3493 struct proc *p = td->td_proc;
3494 struct statfs *sp;
3495 struct mount *mp;
3496 struct vnode *vp;
3497 struct statfs sb;
3498 char *fullpath, *freepath;
3499 fhandle_t fh;
3500 int error;
3501
3502 /*
3503 * Must be super user
3504 */
3505 if ((error = suser(td)))
3506 return (error);
3507
3508 if ((error = copyin(uap->u_fhp, &fh, sizeof(fhandle_t))) != 0)
3509 return (error);
3510
3511 if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL)
3512 return (ESTALE);
3513
3514 if (p != NULL && !chroot_visible_mnt(mp, p))
3515 return (ESTALE);
3516
3517 if ((error = VFS_FHTOVP(mp, &fh.fh_fid, &vp)))
3518 return (error);
3519 mp = vp->v_mount;
3520 sp = &mp->mnt_stat;
3521 vput(vp);
3522 if ((error = VFS_STATFS(mp, sp, p->p_ucred)) != 0)
3523 return (error);
3524
3525 error = cache_fullpath(p, &mp->mnt_ncmountpt, &fullpath, &freepath);
3526 if (error)
3527 return(error);
3528 bzero(sp->f_mntonname, sizeof(sp->f_mntonname));
3529 strlcpy(sp->f_mntonname, fullpath, sizeof(sp->f_mntonname));
3530 kfree(freepath, M_TEMP);
3531
3532 sp->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
3533 if (suser(td)) {
3534 bcopy(sp, &sb, sizeof(sb));
3535 sb.f_fsid.val[0] = sb.f_fsid.val[1] = 0;
3536 sp = &sb;
3537 }
3538 return (copyout(sp, uap->buf, sizeof(*sp)));
3539}
3540
3541/*
3542 * Syscall to push extended attribute configuration information into the
3543 * VFS. Accepts a path, which it converts to a mountpoint, as well as
3544 * a command (int cmd), and attribute name and misc data. For now, the
3545 * attribute name is left in userspace for consumption by the VFS_op.
3546 * It will probably be changed to be copied into sysspace by the
3547 * syscall in the future, once issues with various consumers of the
3548 * attribute code have raised their hands.
3549 *
3550 * Currently this is used only by UFS Extended Attributes.
3551 */
3552int
3553sys_extattrctl(struct extattrctl_args *uap)
3554{
3555 struct nlookupdata nd;
3556 struct mount *mp;
3557 struct vnode *vp;
3558 int error;
3559
3560 vp = NULL;
3561 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
3562 if (error == 0)
3563 error = nlookup(&nd);
3564 if (error == 0) {
3565 mp = nd.nl_nch.mount;
3566 error = VFS_EXTATTRCTL(mp, uap->cmd,
3567 uap->attrname, uap->arg,
3568 nd.nl_cred);
3569 }
3570 nlookup_done(&nd);
3571 return (error);
3572}
3573
3574/*
3575 * Syscall to set a named extended attribute on a file or directory.
3576 * Accepts attribute name, and a uio structure pointing to the data to set.
3577 * The uio is consumed in the style of writev(). The real work happens
3578 * in VOP_SETEXTATTR().
3579 */
3580int
3581sys_extattr_set_file(struct extattr_set_file_args *uap)
3582{
3583 char attrname[EXTATTR_MAXNAMELEN];
3584 struct iovec aiov[UIO_SMALLIOV];
3585 struct iovec *needfree;
3586 struct nlookupdata nd;
3587 struct iovec *iov;
3588 struct vnode *vp;
3589 struct uio auio;
3590 u_int iovlen;
3591 u_int cnt;
3592 int error;
3593 int i;
3594
3595 error = copyin(uap->attrname, attrname, EXTATTR_MAXNAMELEN);
3596 if (error)
3597 return (error);
3598
3599 vp = NULL;
3600 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
3601 if (error == 0)
3602 error = nlookup(&nd);
3603 if (error == 0)
3604 error = ncp_writechk(&nd.nl_nch);
3605 if (error == 0)
3606 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp);
3607 if (error) {
3608 nlookup_done(&nd);
3609 return (error);
3610 }
3611
3612 needfree = NULL;
3613 iovlen = uap->iovcnt * sizeof(struct iovec);
3614 if (uap->iovcnt > UIO_SMALLIOV) {
3615 if (uap->iovcnt > UIO_MAXIOV) {
3616 error = EINVAL;
3617 goto done;
3618 }
3619 MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
3620 needfree = iov;
3621 } else {
3622 iov = aiov;
3623 }
3624 auio.uio_iov = iov;
3625 auio.uio_iovcnt = uap->iovcnt;
3626 auio.uio_rw = UIO_WRITE;
3627 auio.uio_segflg = UIO_USERSPACE;
3628 auio.uio_td = nd.nl_td;
3629 auio.uio_offset = 0;
3630 if ((error = copyin(uap->iovp, iov, iovlen)))
3631 goto done;
3632 auio.uio_resid = 0;
3633 for (i = 0; i < uap->iovcnt; i++) {
3634 if (iov->iov_len > INT_MAX - auio.uio_resid) {
3635 error = EINVAL;
3636 goto done;
3637 }
3638 auio.uio_resid += iov->iov_len;
3639 iov++;
3640 }
3641 cnt = auio.uio_resid;
3642 error = VOP_SETEXTATTR(vp, attrname, &auio, nd.nl_cred);
3643 cnt -= auio.uio_resid;
3644 uap->sysmsg_result = cnt;
3645done:
3646 vput(vp);
3647 nlookup_done(&nd);
3648 if (needfree)
3649 FREE(needfree, M_IOV);
3650 return (error);
3651}
3652
3653/*
3654 * Syscall to get a named extended attribute on a file or directory.
3655 * Accepts attribute name, and a uio structure pointing to a buffer for the
3656 * data. The uio is consumed in the style of readv(). The real work
3657 * happens in VOP_GETEXTATTR();
3658 */
3659int
3660sys_extattr_get_file(struct extattr_get_file_args *uap)
3661{
3662 char attrname[EXTATTR_MAXNAMELEN];
3663 struct iovec aiov[UIO_SMALLIOV];
3664 struct iovec *needfree;
3665 struct nlookupdata nd;
3666 struct iovec *iov;
3667 struct vnode *vp;
3668 struct uio auio;
3669 u_int iovlen;
3670 u_int cnt;
3671 int error;
3672 int i;
3673
3674 error = copyin(uap->attrname, attrname, EXTATTR_MAXNAMELEN);
3675 if (error)
3676 return (error);
3677
3678 vp = NULL;
3679 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
3680 if (error == 0)
3681 error = nlookup(&nd);
3682 if (error == 0)
3683 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp);
3684 if (error) {
3685 nlookup_done(&nd);
3686 return (error);
3687 }
3688
3689 iovlen = uap->iovcnt * sizeof (struct iovec);
3690 needfree = NULL;
3691 if (uap->iovcnt > UIO_SMALLIOV) {
3692 if (uap->iovcnt > UIO_MAXIOV) {
3693 error = EINVAL;
3694 goto done;
3695 }
3696 MALLOC(iov, struct iovec *, iovlen, M_IOV, M_WAITOK);
3697 needfree = iov;
3698 } else {
3699 iov = aiov;
3700 }
3701 auio.uio_iov = iov;
3702 auio.uio_iovcnt = uap->iovcnt;
3703 auio.uio_rw = UIO_READ;
3704 auio.uio_segflg = UIO_USERSPACE;
3705 auio.uio_td = nd.nl_td;
3706 auio.uio_offset = 0;
3707 if ((error = copyin(uap->iovp, iov, iovlen)))
3708 goto done;
3709 auio.uio_resid = 0;
3710 for (i = 0; i < uap->iovcnt; i++) {
3711 if (iov->iov_len > INT_MAX - auio.uio_resid) {
3712 error = EINVAL;
3713 goto done;
3714 }
3715 auio.uio_resid += iov->iov_len;
3716 iov++;
3717 }
3718 cnt = auio.uio_resid;
3719 error = VOP_GETEXTATTR(vp, attrname, &auio, nd.nl_cred);
3720 cnt -= auio.uio_resid;
3721 uap->sysmsg_result = cnt;
3722done:
3723 vput(vp);
3724 nlookup_done(&nd);
3725 if (needfree)
3726 FREE(needfree, M_IOV);
3727 return(error);
3728}
3729
3730/*
3731 * Syscall to delete a named extended attribute from a file or directory.
3732 * Accepts attribute name. The real work happens in VOP_SETEXTATTR().
3733 */
3734int
3735sys_extattr_delete_file(struct extattr_delete_file_args *uap)
3736{
3737 char attrname[EXTATTR_MAXNAMELEN];
3738 struct nlookupdata nd;
3739 struct vnode *vp;
3740 int error;
3741
3742 error = copyin(uap->attrname, attrname, EXTATTR_MAXNAMELEN);
3743 if (error)
3744 return(error);
3745
3746 vp = NULL;
3747 error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW);
3748 if (error == 0)
3749 error = nlookup(&nd);
3750 if (error == 0)
3751 error = ncp_writechk(&nd.nl_nch);
3752 if (error == 0)
3753 error = cache_vget(&nd.nl_nch, nd.nl_cred, LK_EXCLUSIVE, &vp);
3754 if (error) {
3755 nlookup_done(&nd);
3756 return (error);
3757 }
3758
3759 error = VOP_SETEXTATTR(vp, attrname, NULL, nd.nl_cred);
3760 vput(vp);
3761 nlookup_done(&nd);
3762 return(error);
3763}
3764
3765/*
3766 * Determine if the mount is visible to the process.
3767 */
3768static int
3769chroot_visible_mnt(struct mount *mp, struct proc *p)
3770{
3771 struct nchandle nch;
3772
3773 /*
3774 * Traverse from the mount point upwards. If we hit the process
3775 * root then the mount point is visible to the process.
3776 */
3777 nch = mp->mnt_ncmountpt;
3778 while (nch.ncp) {
3779 if (nch.mount == p->p_fd->fd_nrdir.mount &&
3780 nch.ncp == p->p_fd->fd_nrdir.ncp) {
3781 return(1);
3782 }
3783 if (nch.ncp == nch.mount->mnt_ncmountpt.ncp) {
3784 nch = nch.mount->mnt_ncmounton;
3785 } else {
3786 nch.ncp = nch.ncp->nc_parent;
3787 }
3788 }
3789 return(0);
3790}
3791