2 * Copyright (c) 2005 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1989, 1991, 1993
36 * The Regents of the University of California. All rights reserved.
37 * (c) UNIX System Laboratories, Inc.
38 * All or some portions of this file are derived from material licensed
39 * to the University of California by American Telephone and Telegraph
40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41 * the permission of UNIX System Laboratories, Inc.
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by the University of
54 * California, Berkeley and its contributors.
55 * 4. Neither the name of the University nor the names of its contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * @(#)kern_descrip.c 8.6 (Berkeley) 4/19/94
72 * $FreeBSD: src/sys/kern/kern_descrip.c,v 1.81.2.19 2004/02/28 00:43:31 tegge Exp $
73 * $DragonFly: src/sys/kern/kern_descrip.c,v 1.79 2008/08/31 13:18:28 aggelos Exp $
76 #include "opt_compat.h"
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/malloc.h>
80 #include <sys/sysproto.h>
82 #include <sys/device.h>
84 #include <sys/filedesc.h>
85 #include <sys/kernel.h>
86 #include <sys/sysctl.h>
87 #include <sys/vnode.h>
89 #include <sys/nlookup.h>
92 #include <sys/filio.h>
93 #include <sys/fcntl.h>
94 #include <sys/unistd.h>
95 #include <sys/resourcevar.h>
96 #include <sys/event.h>
97 #include <sys/kern_syscall.h>
98 #include <sys/kcore.h>
99 #include <sys/kinfo.h>
103 #include <vm/vm_extern.h>
105 #include <sys/thread2.h>
106 #include <sys/file2.h>
107 #include <sys/spinlock2.h>
109 static void fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd);
110 static void fdreserve_locked (struct filedesc *fdp, int fd0, int incr);
111 static struct file *funsetfd_locked (struct filedesc *fdp, int fd);
112 static int checkfpclosed(struct filedesc *fdp, int fd, struct file *fp);
113 static void ffree(struct file *fp);
115 static MALLOC_DEFINE(M_FILEDESC, "file desc", "Open file descriptor table");
116 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "file desc to leader",
117 "file desc to leader structures");
118 MALLOC_DEFINE(M_FILE, "file", "Open file structure");
119 static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures");
121 static d_open_t fdopen;
124 #define CDEV_MAJOR 22
125 static struct dev_ops fildesc_ops = {
126 { "FD", CDEV_MAJOR, 0 },
131 * Descriptor management.
133 static struct filelist filehead = LIST_HEAD_INITIALIZER(&filehead);
134 static struct spinlock filehead_spin = SPINLOCK_INITIALIZER(&filehead_spin);
135 static int nfiles; /* actual number of open files */
139 * Fixup fd_freefile and fd_lastfile after a descriptor has been cleared.
141 * MPSAFE - must be called with fdp->fd_spin exclusively held
145 fdfixup_locked(struct filedesc *fdp, int fd)
147 if (fd < fdp->fd_freefile) {
148 fdp->fd_freefile = fd;
150 while (fdp->fd_lastfile >= 0 &&
151 fdp->fd_files[fdp->fd_lastfile].fp == NULL &&
152 fdp->fd_files[fdp->fd_lastfile].reserved == 0
159 * System calls on descriptors.
164 sys_getdtablesize(struct getdtablesize_args *uap)
166 struct proc *p = curproc;
167 struct plimit *limit = p->p_limit;
169 spin_lock_rd(&limit->p_spin);
171 min((int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
172 spin_unlock_rd(&limit->p_spin);
177 * Duplicate a file descriptor to a particular value.
179 * note: keep in mind that a potential race condition exists when closing
180 * descriptors from a shared descriptor table (via rfork).
185 sys_dup2(struct dup2_args *uap)
190 error = kern_dup(DUP_FIXED, uap->from, uap->to, &fd);
191 uap->sysmsg_fds[0] = fd;
197 * Duplicate a file descriptor.
202 sys_dup(struct dup_args *uap)
207 error = kern_dup(DUP_VARIABLE, uap->fd, 0, &fd);
208 uap->sysmsg_fds[0] = fd;
214 * MPALMOSTSAFE - acquires mplock for fp operations
217 kern_fcntl(int fd, int cmd, union fcntl_dat *dat, struct ucred *cred)
219 struct thread *td = curthread;
220 struct proc *p = td->td_proc;
226 int tmp, error, flg = F_POSIX;
231 * Operations on file descriptors that do not require a file pointer.
235 error = fgetfdflags(p->p_fd, fd, &tmp);
237 dat->fc_cloexec = (tmp & UF_EXCLOSE) ? FD_CLOEXEC : 0;
241 if (dat->fc_cloexec & FD_CLOEXEC)
242 error = fsetfdflags(p->p_fd, fd, UF_EXCLOSE);
244 error = fclrfdflags(p->p_fd, fd, UF_EXCLOSE);
248 error = kern_dup(DUP_VARIABLE, fd, newmin, &dat->fc_fd);
255 * Operations on file pointers
257 if ((fp = holdfp(p->p_fd, fd, -1)) == NULL)
263 dat->fc_flags = OFLAGS(fp->f_flag);
269 nflags = FFLAGS(dat->fc_flags & ~O_ACCMODE) & FCNTLFLAGS;
270 nflags |= oflags & ~FCNTLFLAGS;
273 if (((nflags ^ oflags) & O_APPEND) && (oflags & FAPPENDONLY))
275 if (error == 0 && ((nflags ^ oflags) & FASYNC)) {
276 tmp = nflags & FASYNC;
277 error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp,
285 error = fo_ioctl(fp, FIOGETOWN, (caddr_t)&dat->fc_owner,
290 error = fo_ioctl(fp, FIOSETOWN, (caddr_t)&dat->fc_owner,
296 /* Fall into F_SETLK */
299 if (fp->f_type != DTYPE_VNODE) {
303 vp = (struct vnode *)fp->f_data;
306 * copyin/lockop may block
308 if (dat->fc_flock.l_whence == SEEK_CUR)
309 dat->fc_flock.l_start += fp->f_offset;
311 switch (dat->fc_flock.l_type) {
313 if ((fp->f_flag & FREAD) == 0) {
317 p->p_leader->p_flag |= P_ADVLOCK;
318 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
319 &dat->fc_flock, flg);
322 if ((fp->f_flag & FWRITE) == 0) {
326 p->p_leader->p_flag |= P_ADVLOCK;
327 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
328 &dat->fc_flock, flg);
331 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
332 &dat->fc_flock, F_POSIX);
340 * It is possible to race a close() on the descriptor while
341 * we were blocked getting the lock. If this occurs the
342 * close might not have caught the lock.
344 if (checkfpclosed(p->p_fd, fd, fp)) {
345 dat->fc_flock.l_whence = SEEK_SET;
346 dat->fc_flock.l_start = 0;
347 dat->fc_flock.l_len = 0;
348 dat->fc_flock.l_type = F_UNLCK;
349 (void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
350 F_UNLCK, &dat->fc_flock, F_POSIX);
355 if (fp->f_type != DTYPE_VNODE) {
359 vp = (struct vnode *)fp->f_data;
361 * copyin/lockop may block
363 if (dat->fc_flock.l_type != F_RDLCK &&
364 dat->fc_flock.l_type != F_WRLCK &&
365 dat->fc_flock.l_type != F_UNLCK) {
369 if (dat->fc_flock.l_whence == SEEK_CUR)
370 dat->fc_flock.l_start += fp->f_offset;
371 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK,
372 &dat->fc_flock, F_POSIX);
385 * The file control system call.
390 sys_fcntl(struct fcntl_args *uap)
397 dat.fc_fd = uap->arg;
400 dat.fc_cloexec = uap->arg;
403 dat.fc_flags = uap->arg;
406 dat.fc_owner = uap->arg;
411 error = copyin((caddr_t)uap->arg, &dat.fc_flock,
412 sizeof(struct flock));
418 error = kern_fcntl(uap->fd, uap->cmd, &dat, curproc->p_ucred);
423 uap->sysmsg_result = dat.fc_fd;
426 uap->sysmsg_result = dat.fc_cloexec;
429 uap->sysmsg_result = dat.fc_flags;
432 uap->sysmsg_result = dat.fc_owner;
434 error = copyout(&dat.fc_flock, (caddr_t)uap->arg,
435 sizeof(struct flock));
444 * Common code for dup, dup2, and fcntl(F_DUPFD).
446 * The type flag can be either DUP_FIXED or DUP_VARIABLE. DUP_FIXED tells
447 * kern_dup() to destructively dup over an existing file descriptor if new
448 * is already open. DUP_VARIABLE tells kern_dup() to find the lowest
449 * unused file descriptor that is greater than or equal to new.
454 kern_dup(enum dup_type type, int old, int new, int *res)
456 struct thread *td = curthread;
457 struct proc *p = td->td_proc;
458 struct filedesc *fdp = p->p_fd;
466 * Verify that we have a valid descriptor to dup from and
467 * possibly to dup to.
470 spin_lock_wr(&fdp->fd_spin);
471 if (new < 0 || new > p->p_rlimit[RLIMIT_NOFILE].rlim_cur ||
472 new >= maxfilesperproc) {
473 spin_unlock_wr(&fdp->fd_spin);
476 if ((unsigned)old >= fdp->fd_nfiles || fdp->fd_files[old].fp == NULL) {
477 spin_unlock_wr(&fdp->fd_spin);
480 if (type == DUP_FIXED && old == new) {
482 spin_unlock_wr(&fdp->fd_spin);
485 fp = fdp->fd_files[old].fp;
486 oldflags = fdp->fd_files[old].fileflags;
487 fhold(fp); /* MPSAFE - can be called with a spinlock held */
490 * Allocate a new descriptor if DUP_VARIABLE, or expand the table
491 * if the requested descriptor is beyond the current table size.
493 * This can block. Retry if the source descriptor no longer matches
494 * or if our expectation in the expansion case races.
496 * If we are not expanding or allocating a new decriptor, then reset
497 * the target descriptor to a reserved state so we have a uniform
498 * setup for the next code block.
500 if (type == DUP_VARIABLE || new >= fdp->fd_nfiles) {
501 spin_unlock_wr(&fdp->fd_spin);
502 error = fdalloc(p, new, &newfd);
503 spin_lock_wr(&fdp->fd_spin);
505 spin_unlock_wr(&fdp->fd_spin);
512 if (old >= fdp->fd_nfiles || fdp->fd_files[old].fp != fp) {
513 fsetfd_locked(fdp, NULL, newfd);
514 spin_unlock_wr(&fdp->fd_spin);
519 * Check for expansion race
521 if (type != DUP_VARIABLE && new != newfd) {
522 fsetfd_locked(fdp, NULL, newfd);
523 spin_unlock_wr(&fdp->fd_spin);
528 * Check for ripout, newfd reused old (this case probably
532 fsetfd_locked(fdp, NULL, newfd);
533 spin_unlock_wr(&fdp->fd_spin);
540 if (fdp->fd_files[new].reserved) {
541 spin_unlock_wr(&fdp->fd_spin);
543 kprintf("Warning: dup(): target descriptor %d is reserved, waiting for it to be resolved\n", new);
544 tsleep(fdp, 0, "fdres", hz);
549 * If the target descriptor was never allocated we have
550 * to allocate it. If it was we have to clean out the
551 * old descriptor. delfp inherits the ref from the
554 delfp = fdp->fd_files[new].fp;
555 fdp->fd_files[new].fp = NULL;
556 fdp->fd_files[new].reserved = 1;
558 fdreserve_locked(fdp, new, 1);
559 if (new > fdp->fd_lastfile)
560 fdp->fd_lastfile = new;
566 * NOTE: still holding an exclusive spinlock
570 * If a descriptor is being overwritten we may hve to tell
571 * fdfree() to sleep to ensure that all relevant process
572 * leaders can be traversed in closef().
574 if (delfp != NULL && p->p_fdtol != NULL) {
575 fdp->fd_holdleaderscount++;
580 KASSERT(delfp == NULL || type == DUP_FIXED,
581 ("dup() picked an open file"));
584 * Duplicate the source descriptor, update lastfile. If the new
585 * descriptor was not allocated and we aren't replacing an existing
586 * descriptor we have to mark the descriptor as being in use.
588 * The fd_files[] array inherits fp's hold reference.
590 fsetfd_locked(fdp, fp, new);
591 fdp->fd_files[new].fileflags = oldflags & ~UF_EXCLOSE;
592 spin_unlock_wr(&fdp->fd_spin);
597 * If we dup'd over a valid file, we now own the reference to it
598 * and must dispose of it using closef() semantics (as if a
599 * close() were performed on it).
604 spin_lock_wr(&fdp->fd_spin);
605 fdp->fd_holdleaderscount--;
606 if (fdp->fd_holdleaderscount == 0 &&
607 fdp->fd_holdleaderswakeup != 0) {
608 fdp->fd_holdleaderswakeup = 0;
609 spin_unlock_wr(&fdp->fd_spin);
610 wakeup(&fdp->fd_holdleaderscount);
612 spin_unlock_wr(&fdp->fd_spin);
620 * If sigio is on the list associated with a process or process group,
621 * disable signalling from the device, remove sigio from the list and
625 funsetown(struct sigio *sigio)
630 *(sigio->sio_myref) = NULL;
632 if (sigio->sio_pgid < 0) {
633 SLIST_REMOVE(&sigio->sio_pgrp->pg_sigiolst, sigio,
635 } else /* if ((*sigiop)->sio_pgid > 0) */ {
636 SLIST_REMOVE(&sigio->sio_proc->p_sigiolst, sigio,
639 crfree(sigio->sio_ucred);
640 kfree(sigio, M_SIGIO);
643 /* Free a list of sigio structures. */
645 funsetownlst(struct sigiolst *sigiolst)
649 while ((sigio = SLIST_FIRST(sigiolst)) != NULL)
654 * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg).
656 * After permission checking, add a sigio structure to the sigio list for
657 * the process or process group.
660 fsetown(pid_t pgid, struct sigio **sigiop)
676 * Policy - Don't allow a process to FSETOWN a process
677 * in another session.
679 * Remove this test to allow maximum flexibility or
680 * restrict FSETOWN to the current process or process
681 * group for maximum safety.
683 if (proc->p_session != curproc->p_session)
687 } else /* if (pgid < 0) */ {
688 pgrp = pgfind(-pgid);
693 * Policy - Don't allow a process to FSETOWN a process
694 * in another session.
696 * Remove this test to allow maximum flexibility or
697 * restrict FSETOWN to the current process or process
698 * group for maximum safety.
700 if (pgrp->pg_session != curproc->p_session)
706 sigio = kmalloc(sizeof(struct sigio), M_SIGIO, M_WAITOK);
708 SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio);
709 sigio->sio_proc = proc;
711 SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio);
712 sigio->sio_pgrp = pgrp;
714 sigio->sio_pgid = pgid;
715 sigio->sio_ucred = crhold(curproc->p_ucred);
716 /* It would be convenient if p_ruid was in ucred. */
717 sigio->sio_ruid = curproc->p_ucred->cr_ruid;
718 sigio->sio_myref = sigiop;
726 * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg).
729 fgetown(struct sigio *sigio)
731 return (sigio != NULL ? sigio->sio_pgid : 0);
735 * Close many file descriptors.
740 sys_closefrom(struct closefrom_args *uap)
742 return(kern_closefrom(uap->fd));
746 * Close all file descriptors greater then or equal to fd
751 kern_closefrom(int fd)
753 struct thread *td = curthread;
754 struct proc *p = td->td_proc;
755 struct filedesc *fdp;
764 * NOTE: This function will skip unassociated descriptors and
765 * reserved descriptors that have not yet been assigned.
766 * fd_lastfile can change as a side effect of kern_close().
768 spin_lock_wr(&fdp->fd_spin);
769 while (fd <= fdp->fd_lastfile) {
770 if (fdp->fd_files[fd].fp != NULL) {
771 spin_unlock_wr(&fdp->fd_spin);
772 /* ok if this races another close */
773 if (kern_close(fd) == EINTR)
775 spin_lock_wr(&fdp->fd_spin);
779 spin_unlock_wr(&fdp->fd_spin);
784 * Close a file descriptor.
789 sys_close(struct close_args *uap)
791 return(kern_close(uap->fd));
795 * MPALMOSTSAFE - acquires mplock around knote_fdclose() calls
800 struct thread *td = curthread;
801 struct proc *p = td->td_proc;
802 struct filedesc *fdp;
810 spin_lock_wr(&fdp->fd_spin);
811 if ((fp = funsetfd_locked(fdp, fd)) == NULL) {
812 spin_unlock_wr(&fdp->fd_spin);
816 if (p->p_fdtol != NULL) {
818 * Ask fdfree() to sleep to ensure that all relevant
819 * process leaders can be traversed in closef().
821 fdp->fd_holdleaderscount++;
826 * we now hold the fp reference that used to be owned by the descriptor
829 spin_unlock_wr(&fdp->fd_spin);
830 if (fd < fdp->fd_knlistsize) {
832 if (fd < fdp->fd_knlistsize)
833 knote_fdclose(p, fd);
836 error = closef(fp, p);
838 spin_lock_wr(&fdp->fd_spin);
839 fdp->fd_holdleaderscount--;
840 if (fdp->fd_holdleaderscount == 0 &&
841 fdp->fd_holdleaderswakeup != 0) {
842 fdp->fd_holdleaderswakeup = 0;
843 spin_unlock_wr(&fdp->fd_spin);
844 wakeup(&fdp->fd_holdleaderscount);
846 spin_unlock_wr(&fdp->fd_spin);
853 * shutdown_args(int fd, int how)
856 kern_shutdown(int fd, int how)
858 struct thread *td = curthread;
859 struct proc *p = td->td_proc;
865 if ((fp = holdfp(p->p_fd, fd, -1)) == NULL)
867 error = fo_shutdown(fp, how);
874 sys_shutdown(struct shutdown_args *uap)
878 error = kern_shutdown(uap->s, uap->how);
887 kern_fstat(int fd, struct stat *ub)
889 struct thread *td = curthread;
890 struct proc *p = td->td_proc;
896 if ((fp = holdfp(p->p_fd, fd, -1)) == NULL)
898 error = fo_stat(fp, ub, p->p_ucred);
905 * Return status information about a file descriptor.
910 sys_fstat(struct fstat_args *uap)
915 error = kern_fstat(uap->fd, &st);
918 error = copyout(&st, uap->sb, sizeof(st));
923 * Return pathconf information about a file descriptor.
927 sys_fpathconf(struct fpathconf_args *uap)
929 struct thread *td = curthread;
930 struct proc *p = td->td_proc;
937 if ((fp = holdfp(p->p_fd, uap->fd, -1)) == NULL)
940 switch (fp->f_type) {
943 if (uap->name != _PC_PIPE_BUF) {
946 uap->sysmsg_result = PIPE_BUF;
952 vp = (struct vnode *)fp->f_data;
953 error = VOP_PATHCONF(vp, uap->name, &uap->sysmsg_reg);
964 SYSCTL_INT(_debug, OID_AUTO, fdexpand, CTLFLAG_RD, &fdexpand, 0, "");
967 * Grow the file table so it can hold through descriptor (want).
969 * The fdp's spinlock must be held exclusively on entry and may be held
970 * exclusively on return. The spinlock may be cycled by the routine.
975 fdgrow_locked(struct filedesc *fdp, int want)
977 struct fdnode *newfiles;
978 struct fdnode *oldfiles;
983 /* nf has to be of the form 2^n - 1 */
985 } while (nf <= want);
987 spin_unlock_wr(&fdp->fd_spin);
988 newfiles = kmalloc(nf * sizeof(struct fdnode), M_FILEDESC, M_WAITOK);
989 spin_lock_wr(&fdp->fd_spin);
992 * We could have raced another extend while we were not holding
995 if (fdp->fd_nfiles >= nf) {
996 spin_unlock_wr(&fdp->fd_spin);
997 kfree(newfiles, M_FILEDESC);
998 spin_lock_wr(&fdp->fd_spin);
1002 * Copy the existing ofile and ofileflags arrays
1003 * and zero the new portion of each array.
1005 extra = nf - fdp->fd_nfiles;
1006 bcopy(fdp->fd_files, newfiles, fdp->fd_nfiles * sizeof(struct fdnode));
1007 bzero(&newfiles[fdp->fd_nfiles], extra * sizeof(struct fdnode));
1009 oldfiles = fdp->fd_files;
1010 fdp->fd_files = newfiles;
1011 fdp->fd_nfiles = nf;
1013 if (oldfiles != fdp->fd_builtin_files) {
1014 spin_unlock_wr(&fdp->fd_spin);
1015 kfree(oldfiles, M_FILEDESC);
1016 spin_lock_wr(&fdp->fd_spin);
1022 * Number of nodes in right subtree, including the root.
1025 right_subtree_size(int n)
1027 return (n ^ (n | (n + 1)));
1034 right_ancestor(int n)
1036 return (n | (n + 1));
1043 left_ancestor(int n)
1045 return ((n & (n + 1)) - 1);
1049 * Traverse the in-place binary tree buttom-up adjusting the allocation
1050 * count so scans can determine where free descriptors are located.
1052 * MPSAFE - caller must be holding an exclusive spinlock on fdp
1056 fdreserve_locked(struct filedesc *fdp, int fd, int incr)
1059 fdp->fd_files[fd].allocated += incr;
1060 KKASSERT(fdp->fd_files[fd].allocated >= 0);
1061 fd = left_ancestor(fd);
1066 * Reserve a file descriptor for the process. If no error occurs, the
1067 * caller MUST at some point call fsetfd() or assign a file pointer
1068 * or dispose of the reservation.
1073 fdalloc(struct proc *p, int want, int *result)
1075 struct filedesc *fdp = p->p_fd;
1076 int fd, rsize, rsum, node, lim;
1078 spin_lock_rd(&p->p_limit->p_spin);
1079 lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
1080 spin_unlock_rd(&p->p_limit->p_spin);
1083 spin_lock_wr(&fdp->fd_spin);
1084 if (want >= fdp->fd_nfiles)
1085 fdgrow_locked(fdp, want);
1088 * Search for a free descriptor starting at the higher
1089 * of want or fd_freefile. If that fails, consider
1090 * expanding the ofile array.
1092 * NOTE! the 'allocated' field is a cumulative recursive allocation
1093 * count. If we happen to see a value of 0 then we can shortcut
1094 * our search. Otherwise we run through through the tree going
1095 * down branches we know have free descriptor(s) until we hit a
1096 * leaf node. The leaf node will be free but will not necessarily
1097 * have an allocated field of 0.
1100 /* move up the tree looking for a subtree with a free node */
1101 for (fd = max(want, fdp->fd_freefile); fd < min(fdp->fd_nfiles, lim);
1102 fd = right_ancestor(fd)) {
1103 if (fdp->fd_files[fd].allocated == 0)
1106 rsize = right_subtree_size(fd);
1107 if (fdp->fd_files[fd].allocated == rsize)
1108 continue; /* right subtree full */
1111 * Free fd is in the right subtree of the tree rooted at fd.
1112 * Call that subtree R. Look for the smallest (leftmost)
1113 * subtree of R with an unallocated fd: continue moving
1114 * down the left branch until encountering a full left
1115 * subtree, then move to the right.
1117 for (rsum = 0, rsize /= 2; rsize > 0; rsize /= 2) {
1119 rsum += fdp->fd_files[node].allocated;
1120 if (fdp->fd_files[fd].allocated == rsum + rsize) {
1121 fd = node; /* move to the right */
1122 if (fdp->fd_files[node].allocated == 0)
1131 * No space in current array. Expand?
1133 if (fdp->fd_nfiles >= lim) {
1134 spin_unlock_wr(&fdp->fd_spin);
1137 fdgrow_locked(fdp, want);
1141 KKASSERT(fd < fdp->fd_nfiles);
1142 if (fd > fdp->fd_lastfile)
1143 fdp->fd_lastfile = fd;
1144 if (want <= fdp->fd_freefile)
1145 fdp->fd_freefile = fd;
1147 KKASSERT(fdp->fd_files[fd].fp == NULL);
1148 KKASSERT(fdp->fd_files[fd].reserved == 0);
1149 fdp->fd_files[fd].fileflags = 0;
1150 fdp->fd_files[fd].reserved = 1;
1151 fdreserve_locked(fdp, fd, 1);
1152 spin_unlock_wr(&fdp->fd_spin);
1157 * Check to see whether n user file descriptors
1158 * are available to the process p.
1163 fdavail(struct proc *p, int n)
1165 struct filedesc *fdp = p->p_fd;
1166 struct fdnode *fdnode;
1169 spin_lock_rd(&p->p_limit->p_spin);
1170 lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
1171 spin_unlock_rd(&p->p_limit->p_spin);
1173 spin_lock_rd(&fdp->fd_spin);
1174 if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0) {
1175 spin_unlock_rd(&fdp->fd_spin);
1178 last = min(fdp->fd_nfiles, lim);
1179 fdnode = &fdp->fd_files[fdp->fd_freefile];
1180 for (i = last - fdp->fd_freefile; --i >= 0; ++fdnode) {
1181 if (fdnode->fp == NULL && --n <= 0) {
1182 spin_unlock_rd(&fdp->fd_spin);
1186 spin_unlock_rd(&fdp->fd_spin);
1191 * Revoke open descriptors referencing (f_data, f_type)
1193 * Any revoke executed within a prison is only able to
1194 * revoke descriptors for processes within that prison.
1196 * Returns 0 on success or an error code.
1198 struct fdrevoke_info {
1208 static int fdrevoke_check_callback(struct file *fp, void *vinfo);
1209 static int fdrevoke_proc_callback(struct proc *p, void *vinfo);
1212 fdrevoke(void *f_data, short f_type, struct ucred *cred)
1214 struct fdrevoke_info info;
1217 bzero(&info, sizeof(info));
1221 error = falloc(NULL, &info.nfp, NULL);
1226 * Scan the file pointer table once. dups do not dup file pointers,
1227 * only descriptors, so there is no leak. Set FREVOKED on the fps
1230 allfiles_scan_exclusive(fdrevoke_check_callback, &info);
1233 * If any fps were marked track down the related descriptors
1234 * and close them. Any dup()s at this point will notice
1235 * the FREVOKED already set in the fp and do the right thing.
1237 * Any fps with non-zero msgcounts (aka sent over a unix-domain
1238 * socket) bumped the intransit counter and will require a
1239 * scan. Races against fps leaving the socket are closed by
1240 * the socket code checking for FREVOKED.
1243 allproc_scan(fdrevoke_proc_callback, &info);
1245 unp_revoke_gc(info.nfp);
1251 * Locate matching file pointers directly.
1254 fdrevoke_check_callback(struct file *fp, void *vinfo)
1256 struct fdrevoke_info *info = vinfo;
1259 * File pointers already flagged for revokation are skipped.
1261 if (fp->f_flag & FREVOKED)
1265 * If revoking from a prison file pointers created outside of
1266 * that prison, or file pointers without creds, cannot be revoked.
1268 if (info->cred->cr_prison &&
1269 (fp->f_cred == NULL ||
1270 info->cred->cr_prison != fp->f_cred->cr_prison)) {
1275 * If the file pointer matches then mark it for revocation. The
1276 * flag is currently only used by unp_revoke_gc().
1278 * info->count is a heuristic and can race in a SMP environment.
1280 if (info->data == fp->f_data && info->type == fp->f_type) {
1281 atomic_set_int(&fp->f_flag, FREVOKED);
1282 info->count += fp->f_count;
1290 * Locate matching file pointers via process descriptor tables.
1293 fdrevoke_proc_callback(struct proc *p, void *vinfo)
1295 struct fdrevoke_info *info = vinfo;
1296 struct filedesc *fdp;
1300 if (p->p_stat == SIDL || p->p_stat == SZOMB)
1302 if (info->cred->cr_prison &&
1303 info->cred->cr_prison != p->p_ucred->cr_prison) {
1308 * If the controlling terminal of the process matches the
1309 * vnode being revoked we clear the controlling terminal.
1311 * The normal spec_close() may not catch this because it
1312 * uses curproc instead of p.
1314 if (p->p_session && info->type == DTYPE_VNODE &&
1315 info->data == p->p_session->s_ttyvp) {
1316 p->p_session->s_ttyvp = NULL;
1321 * Softref the fdp to prevent it from being destroyed
1323 spin_lock_wr(&p->p_spin);
1324 if ((fdp = p->p_fd) == NULL) {
1325 spin_unlock_wr(&p->p_spin);
1328 atomic_add_int(&fdp->fd_softrefs, 1);
1329 spin_unlock_wr(&p->p_spin);
1332 * Locate and close any matching file descriptors.
1334 spin_lock_wr(&fdp->fd_spin);
1335 for (n = 0; n < fdp->fd_nfiles; ++n) {
1336 if ((fp = fdp->fd_files[n].fp) == NULL)
1338 if (fp->f_flag & FREVOKED) {
1340 fdp->fd_files[n].fp = info->nfp;
1341 spin_unlock_wr(&fdp->fd_spin);
1343 spin_lock_wr(&fdp->fd_spin);
1347 spin_unlock_wr(&fdp->fd_spin);
1348 atomic_subtract_int(&fdp->fd_softrefs, 1);
1354 * Create a new open file structure and reserve a file decriptor
1355 * for the process that refers to it.
1357 * Root creds are checked using p, or assumed if p is NULL. If
1358 * resultfd is non-NULL then p must also be non-NULL. No file
1359 * descriptor is reserved if resultfd is NULL.
1361 * A file pointer with a refcount of 1 is returned. Note that the
1362 * file pointer is NOT associated with the descriptor. If falloc
1363 * returns success, fsetfd() MUST be called to either associate the
1364 * file pointer or clear the reservation.
1369 falloc(struct proc *p, struct file **resultfp, int *resultfd)
1371 static struct timeval lastfail;
1379 * Handle filetable full issues and root overfill.
1381 if (nfiles >= maxfiles - maxfilesrootres &&
1382 ((p && p->p_ucred->cr_ruid != 0) || nfiles >= maxfiles)) {
1383 if (ppsratecheck(&lastfail, &curfail, 1)) {
1384 kprintf("kern.maxfiles limit exceeded by uid %d, please see tuning(7).\n",
1385 (p ? p->p_ucred->cr_ruid : -1));
1392 * Allocate a new file descriptor.
1394 fp = kmalloc(sizeof(struct file), M_FILE, M_WAITOK | M_ZERO);
1395 spin_init(&fp->f_spin);
1397 fp->f_ops = &badfileops;
1400 fp->f_cred = crhold(p->p_ucred);
1402 fp->f_cred = crhold(proc0.p_ucred);
1403 spin_lock_wr(&filehead_spin);
1405 LIST_INSERT_HEAD(&filehead, fp, f_list);
1406 spin_unlock_wr(&filehead_spin);
1408 if ((error = fdalloc(p, 0, resultfd)) != 0) {
1425 checkfpclosed(struct filedesc *fdp, int fd, struct file *fp)
1429 spin_lock_rd(&fdp->fd_spin);
1430 if ((unsigned) fd >= fdp->fd_nfiles || fp != fdp->fd_files[fd].fp)
1434 spin_unlock_rd(&fdp->fd_spin);
1439 * Associate a file pointer with a previously reserved file descriptor.
1440 * This function always succeeds.
1442 * If fp is NULL, the file descriptor is returned to the pool.
1446 * MPSAFE (exclusive spinlock must be held on call)
1449 fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd)
1451 KKASSERT((unsigned)fd < fdp->fd_nfiles);
1452 KKASSERT(fdp->fd_files[fd].reserved != 0);
1455 fdp->fd_files[fd].fp = fp;
1456 fdp->fd_files[fd].reserved = 0;
1457 if (fp->f_type == DTYPE_KQUEUE) {
1458 if (fdp->fd_knlistsize < 0)
1459 fdp->fd_knlistsize = 0;
1462 fdp->fd_files[fd].reserved = 0;
1463 fdreserve_locked(fdp, fd, -1);
1464 fdfixup_locked(fdp, fd);
1472 fsetfd(struct proc *p, struct file *fp, int fd)
1474 struct filedesc *fdp = p->p_fd;
1476 spin_lock_wr(&fdp->fd_spin);
1477 fsetfd_locked(fdp, fp, fd);
1478 spin_unlock_wr(&fdp->fd_spin);
1482 * MPSAFE (exclusive spinlock must be held on call)
1486 funsetfd_locked(struct filedesc *fdp, int fd)
1490 if ((unsigned)fd >= fdp->fd_nfiles)
1492 if ((fp = fdp->fd_files[fd].fp) == NULL)
1494 fdp->fd_files[fd].fp = NULL;
1495 fdp->fd_files[fd].fileflags = 0;
1497 fdreserve_locked(fdp, fd, -1);
1498 fdfixup_locked(fdp, fd);
1506 fgetfdflags(struct filedesc *fdp, int fd, int *flagsp)
1510 spin_lock_rd(&fdp->fd_spin);
1511 if (((u_int)fd) >= fdp->fd_nfiles) {
1513 } else if (fdp->fd_files[fd].fp == NULL) {
1516 *flagsp = fdp->fd_files[fd].fileflags;
1519 spin_unlock_rd(&fdp->fd_spin);
1527 fsetfdflags(struct filedesc *fdp, int fd, int add_flags)
1531 spin_lock_wr(&fdp->fd_spin);
1532 if (((u_int)fd) >= fdp->fd_nfiles) {
1534 } else if (fdp->fd_files[fd].fp == NULL) {
1537 fdp->fd_files[fd].fileflags |= add_flags;
1540 spin_unlock_wr(&fdp->fd_spin);
1548 fclrfdflags(struct filedesc *fdp, int fd, int rem_flags)
1552 spin_lock_wr(&fdp->fd_spin);
1553 if (((u_int)fd) >= fdp->fd_nfiles) {
1555 } else if (fdp->fd_files[fd].fp == NULL) {
1558 fdp->fd_files[fd].fileflags &= ~rem_flags;
1561 spin_unlock_wr(&fdp->fd_spin);
1566 fsetcred(struct file *fp, struct ucred *cr)
1574 * Free a file descriptor.
1578 ffree(struct file *fp)
1580 KASSERT((fp->f_count == 0), ("ffree: fp_fcount not 0!"));
1581 spin_lock_wr(&filehead_spin);
1582 LIST_REMOVE(fp, f_list);
1584 spin_unlock_wr(&filehead_spin);
1586 if (fp->f_nchandle.ncp)
1587 cache_drop(&fp->f_nchandle);
1592 * called from init_main, initialize filedesc0 for proc0.
1595 fdinit_bootstrap(struct proc *p0, struct filedesc *fdp0, int cmask)
1599 fdp0->fd_refcnt = 1;
1600 fdp0->fd_cmask = cmask;
1601 fdp0->fd_files = fdp0->fd_builtin_files;
1602 fdp0->fd_nfiles = NDFILE;
1603 fdp0->fd_lastfile = -1;
1604 spin_init(&fdp0->fd_spin);
1608 * Build a new filedesc structure.
1613 fdinit(struct proc *p)
1615 struct filedesc *newfdp;
1616 struct filedesc *fdp = p->p_fd;
1618 newfdp = kmalloc(sizeof(struct filedesc), M_FILEDESC, M_WAITOK|M_ZERO);
1619 spin_lock_rd(&fdp->fd_spin);
1621 newfdp->fd_cdir = fdp->fd_cdir;
1622 vref(newfdp->fd_cdir);
1623 cache_copy(&fdp->fd_ncdir, &newfdp->fd_ncdir);
1627 * rdir may not be set in e.g. proc0 or anything vm_fork'd off of
1628 * proc0, but should unconditionally exist in other processes.
1631 newfdp->fd_rdir = fdp->fd_rdir;
1632 vref(newfdp->fd_rdir);
1633 cache_copy(&fdp->fd_nrdir, &newfdp->fd_nrdir);
1636 newfdp->fd_jdir = fdp->fd_jdir;
1637 vref(newfdp->fd_jdir);
1638 cache_copy(&fdp->fd_njdir, &newfdp->fd_njdir);
1640 spin_unlock_rd(&fdp->fd_spin);
1642 /* Create the file descriptor table. */
1643 newfdp->fd_refcnt = 1;
1644 newfdp->fd_cmask = cmask;
1645 newfdp->fd_files = newfdp->fd_builtin_files;
1646 newfdp->fd_nfiles = NDFILE;
1647 newfdp->fd_knlistsize = -1;
1648 newfdp->fd_lastfile = -1;
1649 spin_init(&newfdp->fd_spin);
1655 * Share a filedesc structure.
1660 fdshare(struct proc *p)
1662 struct filedesc *fdp;
1665 spin_lock_wr(&fdp->fd_spin);
1667 spin_unlock_wr(&fdp->fd_spin);
1672 * Copy a filedesc structure.
1677 fdcopy(struct proc *p)
1679 struct filedesc *fdp = p->p_fd;
1680 struct filedesc *newfdp;
1681 struct fdnode *fdnode;
1686 * Certain daemons might not have file descriptors.
1692 * Allocate the new filedesc and fd_files[] array. This can race
1693 * with operations by other threads on the fdp so we have to be
1696 newfdp = kmalloc(sizeof(struct filedesc), M_FILEDESC, M_WAITOK | M_ZERO);
1698 spin_lock_rd(&fdp->fd_spin);
1699 if (fdp->fd_lastfile < NDFILE) {
1700 newfdp->fd_files = newfdp->fd_builtin_files;
1704 * We have to allocate (N^2-1) entries for our in-place
1705 * binary tree. Allow the table to shrink.
1709 while (ni > fdp->fd_lastfile && ni > NDFILE) {
1713 spin_unlock_rd(&fdp->fd_spin);
1714 newfdp->fd_files = kmalloc(i * sizeof(struct fdnode),
1715 M_FILEDESC, M_WAITOK | M_ZERO);
1718 * Check for race, retry
1720 spin_lock_rd(&fdp->fd_spin);
1721 if (i <= fdp->fd_lastfile) {
1722 spin_unlock_rd(&fdp->fd_spin);
1723 kfree(newfdp->fd_files, M_FILEDESC);
1729 * Dup the remaining fields. vref() and cache_hold() can be
1730 * safely called while holding the read spinlock on fdp.
1732 * The read spinlock on fdp is still being held.
1734 * NOTE: vref and cache_hold calls for the case where the vnode
1735 * or cache entry already has at least one ref may be called
1736 * while holding spin locks.
1738 if ((newfdp->fd_cdir = fdp->fd_cdir) != NULL) {
1739 vref(newfdp->fd_cdir);
1740 cache_copy(&fdp->fd_ncdir, &newfdp->fd_ncdir);
1743 * We must check for fd_rdir here, at least for now because
1744 * the init process is created before we have access to the
1745 * rootvode to take a reference to it.
1747 if ((newfdp->fd_rdir = fdp->fd_rdir) != NULL) {
1748 vref(newfdp->fd_rdir);
1749 cache_copy(&fdp->fd_nrdir, &newfdp->fd_nrdir);
1751 if ((newfdp->fd_jdir = fdp->fd_jdir) != NULL) {
1752 vref(newfdp->fd_jdir);
1753 cache_copy(&fdp->fd_njdir, &newfdp->fd_njdir);
1755 newfdp->fd_refcnt = 1;
1756 newfdp->fd_nfiles = i;
1757 newfdp->fd_lastfile = fdp->fd_lastfile;
1758 newfdp->fd_freefile = fdp->fd_freefile;
1759 newfdp->fd_cmask = fdp->fd_cmask;
1760 newfdp->fd_knlist = NULL;
1761 newfdp->fd_knlistsize = -1;
1762 newfdp->fd_knhash = NULL;
1763 newfdp->fd_knhashmask = 0;
1764 spin_init(&newfdp->fd_spin);
1767 * Copy the descriptor table through (i). This also copies the
1768 * allocation state. Then go through and ref the file pointers
1769 * and clean up any KQ descriptors.
1771 * kq descriptors cannot be copied. Since we haven't ref'd the
1772 * copied files yet we can ignore the return value from funsetfd().
1774 * The read spinlock on fdp is still being held.
1776 bcopy(fdp->fd_files, newfdp->fd_files, i * sizeof(struct fdnode));
1777 for (i = 0 ; i < newfdp->fd_nfiles; ++i) {
1778 fdnode = &newfdp->fd_files[i];
1779 if (fdnode->reserved) {
1780 fdreserve_locked(newfdp, i, -1);
1781 fdnode->reserved = 0;
1782 fdfixup_locked(newfdp, i);
1783 } else if (fdnode->fp) {
1784 if (fdnode->fp->f_type == DTYPE_KQUEUE) {
1785 (void)funsetfd_locked(newfdp, i);
1791 spin_unlock_rd(&fdp->fd_spin);
1796 * Release a filedesc structure.
1798 * NOT MPSAFE (MPSAFE for refs > 1, but the final cleanup code is not MPSAFE)
1801 fdfree(struct proc *p, struct filedesc *repl)
1803 struct filedesc *fdp;
1804 struct fdnode *fdnode;
1806 struct filedesc_to_leader *fdtol;
1812 * Certain daemons might not have file descriptors.
1821 * Severe messing around to follow.
1823 spin_lock_wr(&fdp->fd_spin);
1825 /* Check for special need to clear POSIX style locks */
1827 if (fdtol != NULL) {
1828 KASSERT(fdtol->fdl_refcount > 0,
1829 ("filedesc_to_refcount botch: fdl_refcount=%d",
1830 fdtol->fdl_refcount));
1831 if (fdtol->fdl_refcount == 1 &&
1832 (p->p_leader->p_flag & P_ADVLOCK) != 0) {
1833 for (i = 0; i <= fdp->fd_lastfile; ++i) {
1834 fdnode = &fdp->fd_files[i];
1835 if (fdnode->fp == NULL ||
1836 fdnode->fp->f_type != DTYPE_VNODE) {
1841 spin_unlock_wr(&fdp->fd_spin);
1843 lf.l_whence = SEEK_SET;
1846 lf.l_type = F_UNLCK;
1847 vp = (struct vnode *)fp->f_data;
1848 (void) VOP_ADVLOCK(vp,
1849 (caddr_t)p->p_leader,
1854 spin_lock_wr(&fdp->fd_spin);
1858 if (fdtol->fdl_refcount == 1) {
1859 if (fdp->fd_holdleaderscount > 0 &&
1860 (p->p_leader->p_flag & P_ADVLOCK) != 0) {
1862 * close() or do_dup() has cleared a reference
1863 * in a shared file descriptor table.
1865 fdp->fd_holdleaderswakeup = 1;
1866 ssleep(&fdp->fd_holdleaderscount,
1867 &fdp->fd_spin, 0, "fdlhold", 0);
1870 if (fdtol->fdl_holdcount > 0) {
1872 * Ensure that fdtol->fdl_leader
1873 * remains valid in closef().
1875 fdtol->fdl_wakeup = 1;
1876 ssleep(fdtol, &fdp->fd_spin, 0, "fdlhold", 0);
1880 fdtol->fdl_refcount--;
1881 if (fdtol->fdl_refcount == 0 &&
1882 fdtol->fdl_holdcount == 0) {
1883 fdtol->fdl_next->fdl_prev = fdtol->fdl_prev;
1884 fdtol->fdl_prev->fdl_next = fdtol->fdl_next;
1889 if (fdtol != NULL) {
1890 spin_unlock_wr(&fdp->fd_spin);
1891 kfree(fdtol, M_FILEDESC_TO_LEADER);
1892 spin_lock_wr(&fdp->fd_spin);
1895 if (--fdp->fd_refcnt > 0) {
1896 spin_unlock_wr(&fdp->fd_spin);
1897 spin_lock_wr(&p->p_spin);
1899 spin_unlock_wr(&p->p_spin);
1904 * Even though we are the last reference to the structure allproc
1905 * scans may still reference the structure. Maintain proper
1906 * locks until we can replace p->p_fd.
1908 * Also note that kqueue's closef still needs to reference the
1909 * fdp via p->p_fd, so we have to close the descriptors before
1910 * we replace p->p_fd.
1912 for (i = 0; i <= fdp->fd_lastfile; ++i) {
1913 if (fdp->fd_files[i].fp) {
1914 fp = funsetfd_locked(fdp, i);
1916 spin_unlock_wr(&fdp->fd_spin);
1918 spin_lock_wr(&fdp->fd_spin);
1922 spin_unlock_wr(&fdp->fd_spin);
1925 * Interlock against an allproc scan operations (typically frevoke).
1927 spin_lock_wr(&p->p_spin);
1929 spin_unlock_wr(&p->p_spin);
1932 * Wait for any softrefs to go away. This race rarely occurs so
1933 * we can use a non-critical-path style poll/sleep loop. The
1934 * race only occurs against allproc scans.
1936 * No new softrefs can occur with the fdp disconnected from the
1939 if (fdp->fd_softrefs) {
1940 kprintf("pid %d: Warning, fdp race avoided\n", p->p_pid);
1941 while (fdp->fd_softrefs)
1942 tsleep(&fdp->fd_softrefs, 0, "fdsoft", 1);
1945 if (fdp->fd_files != fdp->fd_builtin_files)
1946 kfree(fdp->fd_files, M_FILEDESC);
1948 cache_drop(&fdp->fd_ncdir);
1949 vrele(fdp->fd_cdir);
1952 cache_drop(&fdp->fd_nrdir);
1953 vrele(fdp->fd_rdir);
1956 cache_drop(&fdp->fd_njdir);
1957 vrele(fdp->fd_jdir);
1960 kfree(fdp->fd_knlist, M_KQUEUE);
1962 kfree(fdp->fd_knhash, M_KQUEUE);
1963 kfree(fdp, M_FILEDESC);
1967 * Retrieve and reference the file pointer associated with a descriptor.
1972 holdfp(struct filedesc *fdp, int fd, int flag)
1976 spin_lock_rd(&fdp->fd_spin);
1977 if (((u_int)fd) >= fdp->fd_nfiles) {
1981 if ((fp = fdp->fd_files[fd].fp) == NULL)
1983 if ((fp->f_flag & flag) == 0 && flag != -1) {
1989 spin_unlock_rd(&fdp->fd_spin);
1994 * holdsock() - load the struct file pointer associated
1995 * with a socket into *fpp. If an error occurs, non-zero
1996 * will be returned and *fpp will be set to NULL.
2001 holdsock(struct filedesc *fdp, int fd, struct file **fpp)
2006 spin_lock_rd(&fdp->fd_spin);
2007 if ((unsigned)fd >= fdp->fd_nfiles) {
2012 if ((fp = fdp->fd_files[fd].fp) == NULL) {
2016 if (fp->f_type != DTYPE_SOCKET) {
2023 spin_unlock_rd(&fdp->fd_spin);
2029 * Convert a user file descriptor to a held file pointer.
2034 holdvnode(struct filedesc *fdp, int fd, struct file **fpp)
2039 spin_lock_rd(&fdp->fd_spin);
2040 if ((unsigned)fd >= fdp->fd_nfiles) {
2045 if ((fp = fdp->fd_files[fd].fp) == NULL) {
2049 if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO) {
2057 spin_unlock_rd(&fdp->fd_spin);
2063 * For setugid programs, we don't want to people to use that setugidness
2064 * to generate error messages which write to a file which otherwise would
2065 * otherwise be off-limits to the process.
2067 * This is a gross hack to plug the hole. A better solution would involve
2068 * a special vop or other form of generalized access control mechanism. We
2069 * go ahead and just reject all procfs file systems accesses as dangerous.
2071 * Since setugidsafety calls this only for fd 0, 1 and 2, this check is
2072 * sufficient. We also don't for check setugidness since we know we are.
2075 is_unsafe(struct file *fp)
2077 if (fp->f_type == DTYPE_VNODE &&
2078 ((struct vnode *)(fp->f_data))->v_tag == VT_PROCFS)
2084 * Make this setguid thing safe, if at all possible.
2086 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose()
2089 setugidsafety(struct proc *p)
2091 struct filedesc *fdp = p->p_fd;
2094 /* Certain daemons might not have file descriptors. */
2099 * note: fdp->fd_files may be reallocated out from under us while
2100 * we are blocked in a close. Be careful!
2102 for (i = 0; i <= fdp->fd_lastfile; i++) {
2105 if (fdp->fd_files[i].fp && is_unsafe(fdp->fd_files[i].fp)) {
2108 if (i < fdp->fd_knlistsize)
2109 knote_fdclose(p, i);
2111 * NULL-out descriptor prior to close to avoid
2112 * a race while close blocks.
2114 if ((fp = funsetfd_locked(fdp, i)) != NULL)
2121 * Close any files on exec?
2123 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose()
2126 fdcloseexec(struct proc *p)
2128 struct filedesc *fdp = p->p_fd;
2131 /* Certain daemons might not have file descriptors. */
2136 * We cannot cache fd_files since operations may block and rip
2137 * them out from under us.
2139 for (i = 0; i <= fdp->fd_lastfile; i++) {
2140 if (fdp->fd_files[i].fp != NULL &&
2141 (fdp->fd_files[i].fileflags & UF_EXCLOSE)) {
2144 if (i < fdp->fd_knlistsize)
2145 knote_fdclose(p, i);
2147 * NULL-out descriptor prior to close to avoid
2148 * a race while close blocks.
2150 if ((fp = funsetfd_locked(fdp, i)) != NULL)
2157 * It is unsafe for set[ug]id processes to be started with file
2158 * descriptors 0..2 closed, as these descriptors are given implicit
2159 * significance in the Standard C library. fdcheckstd() will create a
2160 * descriptor referencing /dev/null for each of stdin, stdout, and
2161 * stderr that is not already open.
2163 * NOT MPSAFE - calls falloc, vn_open, etc
2166 fdcheckstd(struct proc *p)
2168 struct nlookupdata nd;
2169 struct filedesc *fdp;
2172 int i, error, flags, devnull;
2179 for (i = 0; i < 3; i++) {
2180 if (fdp->fd_files[i].fp != NULL)
2183 if ((error = falloc(p, &fp, &devnull)) != 0)
2186 error = nlookup_init(&nd, "/dev/null", UIO_SYSSPACE,
2187 NLC_FOLLOW|NLC_LOCKVP);
2188 flags = FREAD | FWRITE;
2190 error = vn_open(&nd, fp, flags, 0);
2192 fsetfd(p, fp, devnull);
2194 fsetfd(p, NULL, devnull);
2199 KKASSERT(i == devnull);
2201 error = kern_dup(DUP_FIXED, devnull, i, &retval);
2210 * Internal form of close.
2211 * Decrement reference count on file structure.
2212 * Note: td and/or p may be NULL when closing a file
2213 * that was being passed in a message.
2215 * MPALMOSTSAFE - acquires mplock for VOP operations
2218 closef(struct file *fp, struct proc *p)
2222 struct filedesc_to_leader *fdtol;
2228 * POSIX record locking dictates that any close releases ALL
2229 * locks owned by this process. This is handled by setting
2230 * a flag in the unlock to free ONLY locks obeying POSIX
2231 * semantics, and not to free BSD-style file locks.
2232 * If the descriptor was in a message, POSIX-style locks
2233 * aren't passed with the descriptor.
2235 if (p != NULL && fp->f_type == DTYPE_VNODE &&
2236 (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS)
2239 if ((p->p_leader->p_flag & P_ADVLOCK) != 0) {
2240 lf.l_whence = SEEK_SET;
2243 lf.l_type = F_UNLCK;
2244 vp = (struct vnode *)fp->f_data;
2245 (void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
2249 if (fdtol != NULL) {
2251 * Handle special case where file descriptor table
2252 * is shared between multiple process leaders.
2254 for (fdtol = fdtol->fdl_next;
2255 fdtol != p->p_fdtol;
2256 fdtol = fdtol->fdl_next) {
2257 if ((fdtol->fdl_leader->p_flag &
2260 fdtol->fdl_holdcount++;
2261 lf.l_whence = SEEK_SET;
2264 lf.l_type = F_UNLCK;
2265 vp = (struct vnode *)fp->f_data;
2266 (void) VOP_ADVLOCK(vp,
2267 (caddr_t)fdtol->fdl_leader,
2268 F_UNLCK, &lf, F_POSIX);
2269 fdtol->fdl_holdcount--;
2270 if (fdtol->fdl_holdcount == 0 &&
2271 fdtol->fdl_wakeup != 0) {
2272 fdtol->fdl_wakeup = 0;
2285 * fhold() can only be called if f_count is already at least 1 (i.e. the
2286 * caller of fhold() already has a reference to the file pointer in some
2289 * f_count is not spin-locked. Instead, atomic ops are used for
2290 * incrementing, decrementing, and handling the 1->0 transition.
2293 fhold(struct file *fp)
2295 atomic_add_int(&fp->f_count, 1);
2299 * fdrop() - drop a reference to a descriptor
2301 * MPALMOSTSAFE - acquires mplock for final close sequence
2304 fdrop(struct file *fp)
2311 * A combined fetch and subtract is needed to properly detect
2312 * 1->0 transitions, otherwise two cpus dropping from a ref
2313 * count of 2 might both try to run the 1->0 code.
2315 if (atomic_fetchadd_int(&fp->f_count, -1) > 1)
2321 * The last reference has gone away, we own the fp structure free
2324 if (fp->f_count < 0)
2325 panic("fdrop: count < 0");
2326 if ((fp->f_flag & FHASLOCK) && fp->f_type == DTYPE_VNODE &&
2327 (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS)
2329 lf.l_whence = SEEK_SET;
2332 lf.l_type = F_UNLCK;
2333 vp = (struct vnode *)fp->f_data;
2334 (void) VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, 0);
2336 if (fp->f_ops != &badfileops)
2337 error = fo_close(fp);
2346 * Apply an advisory lock on a file descriptor.
2348 * Just attempt to get a record lock of the requested type on
2349 * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0).
2352 sys_flock(struct flock_args *uap)
2354 struct proc *p = curproc;
2360 if ((fp = holdfp(p->p_fd, uap->fd, -1)) == NULL)
2362 if (fp->f_type != DTYPE_VNODE) {
2366 vp = (struct vnode *)fp->f_data;
2367 lf.l_whence = SEEK_SET;
2370 if (uap->how & LOCK_UN) {
2371 lf.l_type = F_UNLCK;
2372 fp->f_flag &= ~FHASLOCK;
2373 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, 0);
2376 if (uap->how & LOCK_EX)
2377 lf.l_type = F_WRLCK;
2378 else if (uap->how & LOCK_SH)
2379 lf.l_type = F_RDLCK;
2384 fp->f_flag |= FHASLOCK;
2385 if (uap->how & LOCK_NB)
2386 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, 0);
2388 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, F_WAIT);
2395 * File Descriptor pseudo-device driver (/dev/fd/).
2397 * Opening minor device N dup()s the file (if any) connected to file
2398 * descriptor N belonging to the calling process. Note that this driver
2399 * consists of only the ``open()'' routine, because all subsequent
2400 * references to this file will be direct to the other driver.
2404 fdopen(struct dev_open_args *ap)
2406 thread_t td = curthread;
2408 KKASSERT(td->td_lwp != NULL);
2411 * XXX Kludge: set curlwp->lwp_dupfd to contain the value of the
2412 * the file descriptor being sought for duplication. The error
2413 * return ensures that the vnode for this device will be released
2414 * by vn_open. Open will detect this special error and take the
2415 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN
2416 * will simply report the error.
2418 td->td_lwp->lwp_dupfd = minor(ap->a_head.a_dev);
2423 * The caller has reserved the file descriptor dfd for us. On success we
2424 * must fsetfd() it. On failure the caller will clean it up.
2426 * NOT MPSAFE - isn't getting spinlocks, possibly other things
2429 dupfdopen(struct proc *p, int dfd, int sfd, int mode, int error)
2431 struct filedesc *fdp = p->p_fd;
2436 if ((wfp = holdfp(fdp, sfd, -1)) == NULL)
2440 * Close a revoke/dup race. Duping a descriptor marked as revoked
2441 * will dup a dummy descriptor instead of the real one.
2443 if (wfp->f_flag & FREVOKED) {
2444 kprintf("Warning: attempt to dup() a revoked descriptor\n");
2447 werror = falloc(NULL, &wfp, NULL);
2453 * There are two cases of interest here.
2455 * For ENODEV simply dup sfd to file descriptor dfd and return.
2457 * For ENXIO steal away the file structure from sfd and store it
2458 * dfd. sfd is effectively closed by this operation.
2460 * Any other error code is just returned.
2465 * Check that the mode the file is being opened for is a
2466 * subset of the mode of the existing descriptor.
2468 if (((mode & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag) {
2472 fdp->fd_files[dfd].fileflags = fdp->fd_files[sfd].fileflags;
2473 fsetfd(p, wfp, dfd);
2478 * Steal away the file pointer from dfd, and stuff it into indx.
2480 fdp->fd_files[dfd].fileflags = fdp->fd_files[sfd].fileflags;
2481 fsetfd(p, wfp, dfd);
2482 if ((xfp = funsetfd_locked(fdp, sfd)) != NULL)
2494 * NOT MPSAFE - I think these refer to a common file descriptor table
2495 * and we need to spinlock that to link fdtol in.
2497 struct filedesc_to_leader *
2498 filedesc_to_leader_alloc(struct filedesc_to_leader *old,
2499 struct proc *leader)
2501 struct filedesc_to_leader *fdtol;
2503 fdtol = kmalloc(sizeof(struct filedesc_to_leader),
2504 M_FILEDESC_TO_LEADER, M_WAITOK);
2505 fdtol->fdl_refcount = 1;
2506 fdtol->fdl_holdcount = 0;
2507 fdtol->fdl_wakeup = 0;
2508 fdtol->fdl_leader = leader;
2510 fdtol->fdl_next = old->fdl_next;
2511 fdtol->fdl_prev = old;
2512 old->fdl_next = fdtol;
2513 fdtol->fdl_next->fdl_prev = fdtol;
2515 fdtol->fdl_next = fdtol;
2516 fdtol->fdl_prev = fdtol;
2522 * Scan all file pointers in the system. The callback is made with
2523 * the master list spinlock held exclusively.
2528 allfiles_scan_exclusive(int (*callback)(struct file *, void *), void *data)
2533 spin_lock_wr(&filehead_spin);
2534 LIST_FOREACH(fp, &filehead, f_list) {
2535 res = callback(fp, data);
2539 spin_unlock_wr(&filehead_spin);
2543 * Get file structures.
2545 * NOT MPSAFE - process list scan, SYSCTL_OUT (probably not mpsafe)
2548 struct sysctl_kern_file_info {
2551 struct sysctl_req *req;
2554 static int sysctl_kern_file_callback(struct proc *p, void *data);
2557 sysctl_kern_file(SYSCTL_HANDLER_ARGS)
2559 struct sysctl_kern_file_info info;
2562 * Note: because the number of file descriptors is calculated
2563 * in different ways for sizing vs returning the data,
2564 * there is information leakage from the first loop. However,
2565 * it is of a similar order of magnitude to the leakage from
2566 * global system statistics such as kern.openfiles.
2568 * When just doing a count, note that we cannot just count
2569 * the elements and add f_count via the filehead list because
2570 * threaded processes share their descriptor table and f_count might
2571 * still be '1' in that case.
2573 * Since the SYSCTL op can block, we must hold the process to
2574 * prevent it being ripped out from under us either in the
2575 * file descriptor loop or in the greater LIST_FOREACH. The
2576 * process may be in varying states of disrepair. If the process
2577 * is in SZOMB we may have caught it just as it is being removed
2578 * from the allproc list, we must skip it in that case to maintain
2579 * an unbroken chain through the allproc list.
2584 allproc_scan(sysctl_kern_file_callback, &info);
2587 * When just calculating the size, overestimate a bit to try to
2588 * prevent system activity from causing the buffer-fill call
2591 if (req->oldptr == NULL) {
2592 info.count = (info.count + 16) + (info.count / 10);
2593 info.error = SYSCTL_OUT(req, NULL,
2594 info.count * sizeof(struct kinfo_file));
2596 return (info.error);
2600 sysctl_kern_file_callback(struct proc *p, void *data)
2602 struct sysctl_kern_file_info *info = data;
2603 struct kinfo_file kf;
2604 struct filedesc *fdp;
2609 if (p->p_stat == SIDL || p->p_stat == SZOMB)
2611 if (!PRISON_CHECK(info->req->td->td_proc->p_ucred, p->p_ucred) != 0)
2615 * Softref the fdp to prevent it from being destroyed
2617 spin_lock_wr(&p->p_spin);
2618 if ((fdp = p->p_fd) == NULL) {
2619 spin_unlock_wr(&p->p_spin);
2622 atomic_add_int(&fdp->fd_softrefs, 1);
2623 spin_unlock_wr(&p->p_spin);
2626 * The fdp's own spinlock prevents the contents from being
2629 spin_lock_rd(&fdp->fd_spin);
2630 for (n = 0; n < fdp->fd_nfiles; ++n) {
2631 if ((fp = fdp->fd_files[n].fp) == NULL)
2633 if (info->req->oldptr == NULL) {
2636 uid = p->p_ucred ? p->p_ucred->cr_uid : -1;
2637 kcore_make_file(&kf, fp, p->p_pid, uid, n);
2638 spin_unlock_rd(&fdp->fd_spin);
2639 info->error = SYSCTL_OUT(info->req, &kf, sizeof(kf));
2640 spin_lock_rd(&fdp->fd_spin);
2645 spin_unlock_rd(&fdp->fd_spin);
2646 atomic_subtract_int(&fdp->fd_softrefs, 1);
2652 SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD,
2653 0, 0, sysctl_kern_file, "S,file", "Entire file table");
2655 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW,
2656 &maxfilesperproc, 0, "Maximum files allowed open per process");
2658 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW,
2659 &maxfiles, 0, "Maximum number of files");
2661 SYSCTL_INT(_kern, OID_AUTO, maxfilesrootres, CTLFLAG_RW,
2662 &maxfilesrootres, 0, "Descriptors reserved for root use");
2664 SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD,
2665 &nfiles, 0, "System-wide number of open files");
2668 fildesc_drvinit(void *unused)
2672 for (fd = 0; fd < NUMFDESC; fd++) {
2673 make_dev(&fildesc_ops, fd,
2674 UID_BIN, GID_BIN, 0666, "fd/%d", fd);
2677 make_dev(&fildesc_ops, 0, UID_ROOT, GID_WHEEL, 0666, "stdin");
2678 make_dev(&fildesc_ops, 1, UID_ROOT, GID_WHEEL, 0666, "stdout");
2679 make_dev(&fildesc_ops, 2, UID_ROOT, GID_WHEEL, 0666, "stderr");
2685 struct fileops badfileops = {
2686 .fo_read = badfo_readwrite,
2687 .fo_write = badfo_readwrite,
2688 .fo_ioctl = badfo_ioctl,
2689 .fo_poll = badfo_poll,
2690 .fo_kqfilter = badfo_kqfilter,
2691 .fo_stat = badfo_stat,
2692 .fo_close = badfo_close,
2693 .fo_shutdown = badfo_shutdown
2713 badfo_ioctl(struct file *fp, u_long com, caddr_t data,
2714 struct ucred *cred, struct sysmsg *msgv)
2723 badfo_poll(struct file *fp, int events, struct ucred *cred)
2732 badfo_kqfilter(struct file *fp, struct knote *kn)
2738 badfo_stat(struct file *fp, struct stat *sb, struct ucred *cred)
2747 badfo_close(struct file *fp)
2756 badfo_shutdown(struct file *fp, int how)
2765 nofo_shutdown(struct file *fp, int how)
2767 return (EOPNOTSUPP);
2770 SYSINIT(fildescdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,
2771 fildesc_drvinit,NULL)