2 * Copyright (c) 2005 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1989, 1991, 1993
36 * The Regents of the University of California. All rights reserved.
37 * (c) UNIX System Laboratories, Inc.
38 * All or some portions of this file are derived from material licensed
39 * to the University of California by American Telephone and Telegraph
40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41 * the permission of UNIX System Laboratories, Inc.
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by the University of
54 * California, Berkeley and its contributors.
55 * 4. Neither the name of the University nor the names of its contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * @(#)kern_descrip.c 8.6 (Berkeley) 4/19/94
72 * $FreeBSD: src/sys/kern/kern_descrip.c,v 1.81.2.19 2004/02/28 00:43:31 tegge Exp $
73 * $DragonFly: src/sys/kern/kern_descrip.c,v 1.66 2006/05/27 01:57:41 dillon Exp $
76 #include "opt_compat.h"
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/malloc.h>
80 #include <sys/sysproto.h>
82 #include <sys/filedesc.h>
83 #include <sys/kernel.h>
84 #include <sys/sysctl.h>
85 #include <sys/vnode.h>
87 #include <sys/nlookup.h>
90 #include <sys/filio.h>
91 #include <sys/fcntl.h>
92 #include <sys/unistd.h>
93 #include <sys/resourcevar.h>
94 #include <sys/event.h>
95 #include <sys/kern_syscall.h>
96 #include <sys/kcore.h>
97 #include <sys/kinfo.h>
100 #include <vm/vm_extern.h>
102 #include <sys/thread2.h>
103 #include <sys/file2.h>
104 #include <sys/spinlock2.h>
106 static void fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd);
107 static void fdreserve_locked (struct filedesc *fdp, int fd0, int incr);
108 static struct file *funsetfd_locked (struct filedesc *fdp, int fd);
109 static int checkfpclosed(struct filedesc *fdp, int fd, struct file *fp);
110 static void ffree(struct file *fp);
112 static MALLOC_DEFINE(M_FILEDESC, "file desc", "Open file descriptor table");
113 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "file desc to leader",
114 "file desc to leader structures");
115 MALLOC_DEFINE(M_FILE, "file", "Open file structure");
116 static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures");
118 static d_open_t fdopen;
121 #define CDEV_MAJOR 22
122 static struct cdevsw fildesc_cdevsw = {
124 /* maj */ CDEV_MAJOR,
136 /* strategy */ nostrategy,
141 static int badfo_readwrite (struct file *fp, struct uio *uio,
142 struct ucred *cred, int flags);
143 static int badfo_ioctl (struct file *fp, u_long com, caddr_t data,
145 static int badfo_poll (struct file *fp, int events, struct ucred *cred);
146 static int badfo_kqfilter (struct file *fp, struct knote *kn);
147 static int badfo_stat (struct file *fp, struct stat *sb, struct ucred *cred);
148 static int badfo_close (struct file *fp);
149 static int badfo_shutdown (struct file *fp, int how);
152 * Descriptor management.
154 static struct filelist filehead = LIST_HEAD_INITIALIZER(&filehead);
155 static struct spinlock filehead_spin = SPINLOCK_INITIALIZER(&filehead_spin);
156 static int nfiles; /* actual number of open files */
160 * Fixup fd_freefile and fd_lastfile after a descriptor has been cleared.
162 * MPSAFE - must be called with fdp->fd_spin exclusively held
166 fdfixup_locked(struct filedesc *fdp, int fd)
168 if (fd < fdp->fd_freefile) {
169 fdp->fd_freefile = fd;
171 while (fdp->fd_lastfile >= 0 &&
172 fdp->fd_files[fdp->fd_lastfile].fp == NULL &&
173 fdp->fd_files[fdp->fd_lastfile].reserved == 0
180 * System calls on descriptors.
185 getdtablesize(struct getdtablesize_args *uap)
187 struct proc *p = curproc;
188 struct plimit *limit = p->p_limit;
190 spin_lock_rd(&limit->p_spin);
192 min((int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
193 spin_unlock_rd(&limit->p_spin);
198 * Duplicate a file descriptor to a particular value.
200 * note: keep in mind that a potential race condition exists when closing
201 * descriptors from a shared descriptor table (via rfork).
206 dup2(struct dup2_args *uap)
210 error = kern_dup(DUP_FIXED, uap->from, uap->to, uap->sysmsg_fds);
216 * Duplicate a file descriptor.
221 dup(struct dup_args *uap)
225 error = kern_dup(DUP_VARIABLE, uap->fd, 0, uap->sysmsg_fds);
231 * MPALMOSTSAFE - acquires mplock for fp operations
234 kern_fcntl(int fd, int cmd, union fcntl_dat *dat, struct ucred *cred)
236 struct thread *td = curthread;
237 struct proc *p = td->td_proc;
241 int tmp, error, flg = F_POSIX;
246 * Operations on file descriptors that do not require a file pointer.
250 error = fgetfdflags(p->p_fd, fd, &tmp);
252 dat->fc_cloexec = (tmp & UF_EXCLOSE) ? FD_CLOEXEC : 0;
256 if (dat->fc_cloexec & FD_CLOEXEC)
257 error = fsetfdflags(p->p_fd, fd, UF_EXCLOSE);
259 error = fclrfdflags(p->p_fd, fd, UF_EXCLOSE);
263 error = kern_dup(DUP_VARIABLE, fd, newmin, &dat->fc_fd);
270 * Operations on file pointers
272 if ((fp = holdfp(p->p_fd, fd, -1)) == NULL)
278 dat->fc_flags = OFLAGS(fp->f_flag);
283 fp->f_flag &= ~FCNTLFLAGS;
284 fp->f_flag |= FFLAGS(dat->fc_flags & ~O_ACCMODE) & FCNTLFLAGS;
285 tmp = fp->f_flag & FNONBLOCK;
286 error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, cred);
289 tmp = fp->f_flag & FASYNC;
290 error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, cred);
293 fp->f_flag &= ~FNONBLOCK;
295 fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, cred);
299 error = fo_ioctl(fp, FIOGETOWN, (caddr_t)&dat->fc_owner, cred);
303 error = fo_ioctl(fp, FIOSETOWN, (caddr_t)&dat->fc_owner, cred);
308 /* Fall into F_SETLK */
311 if (fp->f_type != DTYPE_VNODE) {
315 vp = (struct vnode *)fp->f_data;
318 * copyin/lockop may block
320 if (dat->fc_flock.l_whence == SEEK_CUR)
321 dat->fc_flock.l_start += fp->f_offset;
323 switch (dat->fc_flock.l_type) {
325 if ((fp->f_flag & FREAD) == 0) {
329 p->p_leader->p_flag |= P_ADVLOCK;
330 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
331 &dat->fc_flock, flg);
334 if ((fp->f_flag & FWRITE) == 0) {
338 p->p_leader->p_flag |= P_ADVLOCK;
339 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
340 &dat->fc_flock, flg);
343 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
344 &dat->fc_flock, F_POSIX);
352 * It is possible to race a close() on the descriptor while
353 * we were blocked getting the lock. If this occurs the
354 * close might not have caught the lock.
356 if (checkfpclosed(p->p_fd, fd, fp)) {
357 dat->fc_flock.l_whence = SEEK_SET;
358 dat->fc_flock.l_start = 0;
359 dat->fc_flock.l_len = 0;
360 dat->fc_flock.l_type = F_UNLCK;
361 (void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
362 F_UNLCK, &dat->fc_flock, F_POSIX);
367 if (fp->f_type != DTYPE_VNODE) {
371 vp = (struct vnode *)fp->f_data;
373 * copyin/lockop may block
375 if (dat->fc_flock.l_type != F_RDLCK &&
376 dat->fc_flock.l_type != F_WRLCK &&
377 dat->fc_flock.l_type != F_UNLCK) {
381 if (dat->fc_flock.l_whence == SEEK_CUR)
382 dat->fc_flock.l_start += fp->f_offset;
383 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK,
384 &dat->fc_flock, F_POSIX);
397 * The file control system call.
402 fcntl(struct fcntl_args *uap)
409 dat.fc_fd = uap->arg;
412 dat.fc_cloexec = uap->arg;
415 dat.fc_flags = uap->arg;
418 dat.fc_owner = uap->arg;
423 error = copyin((caddr_t)uap->arg, &dat.fc_flock,
424 sizeof(struct flock));
430 error = kern_fcntl(uap->fd, uap->cmd, &dat, curproc->p_ucred);
435 uap->sysmsg_result = dat.fc_fd;
438 uap->sysmsg_result = dat.fc_cloexec;
441 uap->sysmsg_result = dat.fc_flags;
444 uap->sysmsg_result = dat.fc_owner;
446 error = copyout(&dat.fc_flock, (caddr_t)uap->arg,
447 sizeof(struct flock));
456 * Common code for dup, dup2, and fcntl(F_DUPFD).
458 * The type flag can be either DUP_FIXED or DUP_VARIABLE. DUP_FIXED tells
459 * kern_dup() to destructively dup over an existing file descriptor if new
460 * is already open. DUP_VARIABLE tells kern_dup() to find the lowest
461 * unused file descriptor that is greater than or equal to new.
466 kern_dup(enum dup_type type, int old, int new, int *res)
468 struct thread *td = curthread;
469 struct proc *p = td->td_proc;
470 struct filedesc *fdp = p->p_fd;
478 * Verify that we have a valid descriptor to dup from and
479 * possibly to dup to.
482 spin_lock_wr(&fdp->fd_spin);
483 if (new < 0 || new > p->p_rlimit[RLIMIT_NOFILE].rlim_cur ||
484 new >= maxfilesperproc) {
485 spin_unlock_wr(&fdp->fd_spin);
488 if ((unsigned)old >= fdp->fd_nfiles || fdp->fd_files[old].fp == NULL) {
489 spin_unlock_wr(&fdp->fd_spin);
492 if (type == DUP_FIXED && old == new) {
494 spin_unlock_wr(&fdp->fd_spin);
497 fp = fdp->fd_files[old].fp;
498 oldflags = fdp->fd_files[old].fileflags;
499 fhold(fp); /* MPSAFE - can be called with a spinlock held */
502 * Allocate a new descriptor if DUP_VARIABLE, or expand the table
503 * if the requested descriptor is beyond the current table size.
505 * This can block. Retry if the source descriptor no longer matches
506 * or if our expectation in the expansion case races.
508 * If we are not expanding or allocating a new decriptor, then reset
509 * the target descriptor to a reserved state so we have a uniform
510 * setup for the next code block.
512 if (type == DUP_VARIABLE || new >= fdp->fd_nfiles) {
513 spin_unlock_wr(&fdp->fd_spin);
514 error = fdalloc(p, new, &newfd);
515 spin_lock_wr(&fdp->fd_spin);
517 spin_unlock_wr(&fdp->fd_spin);
524 if (old >= fdp->fd_nfiles || fdp->fd_files[old].fp != fp) {
525 fsetfd_locked(fdp, NULL, newfd);
526 spin_unlock_wr(&fdp->fd_spin);
531 * Check for expansion race
533 if (type != DUP_VARIABLE && new != newfd) {
534 fsetfd_locked(fdp, NULL, newfd);
535 spin_unlock_wr(&fdp->fd_spin);
540 * Check for ripout, newfd reused old (this case probably
544 fsetfd_locked(fdp, NULL, newfd);
545 spin_unlock_wr(&fdp->fd_spin);
552 if (fdp->fd_files[new].reserved) {
553 spin_unlock_wr(&fdp->fd_spin);
555 printf("Warning: dup(): target descriptor %d is reserved, waiting for it to be resolved\n", new);
556 tsleep(fdp, 0, "fdres", hz);
561 * If the target descriptor was never allocated we have
562 * to allocate it. If it was we have to clean out the
563 * old descriptor. delfp inherits the ref from the
566 delfp = fdp->fd_files[new].fp;
567 fdp->fd_files[new].fp = NULL;
568 fdp->fd_files[new].reserved = 1;
570 fdreserve_locked(fdp, new, 1);
571 if (new > fdp->fd_lastfile)
572 fdp->fd_lastfile = new;
578 * NOTE: still holding an exclusive spinlock
582 * If a descriptor is being overwritten we may hve to tell
583 * fdfree() to sleep to ensure that all relevant process
584 * leaders can be traversed in closef().
586 if (delfp != NULL && p->p_fdtol != NULL) {
587 fdp->fd_holdleaderscount++;
592 KASSERT(delfp == NULL || type == DUP_FIXED,
593 ("dup() picked an open file"));
596 * Duplicate the source descriptor, update lastfile. If the new
597 * descriptor was not allocated and we aren't replacing an existing
598 * descriptor we have to mark the descriptor as being in use.
600 * The fd_files[] array inherits fp's hold reference.
602 fsetfd_locked(fdp, fp, new);
603 fdp->fd_files[new].fileflags = oldflags & ~UF_EXCLOSE;
604 spin_unlock_wr(&fdp->fd_spin);
609 * If we dup'd over a valid file, we now own the reference to it
610 * and must dispose of it using closef() semantics (as if a
611 * close() were performed on it).
614 (void)closef(delfp, td);
616 spin_lock_wr(&fdp->fd_spin);
617 fdp->fd_holdleaderscount--;
618 if (fdp->fd_holdleaderscount == 0 &&
619 fdp->fd_holdleaderswakeup != 0) {
620 fdp->fd_holdleaderswakeup = 0;
621 spin_unlock_wr(&fdp->fd_spin);
622 wakeup(&fdp->fd_holdleaderscount);
624 spin_unlock_wr(&fdp->fd_spin);
632 * If sigio is on the list associated with a process or process group,
633 * disable signalling from the device, remove sigio from the list and
637 funsetown(struct sigio *sigio)
642 *(sigio->sio_myref) = NULL;
644 if (sigio->sio_pgid < 0) {
645 SLIST_REMOVE(&sigio->sio_pgrp->pg_sigiolst, sigio,
647 } else /* if ((*sigiop)->sio_pgid > 0) */ {
648 SLIST_REMOVE(&sigio->sio_proc->p_sigiolst, sigio,
651 crfree(sigio->sio_ucred);
652 free(sigio, M_SIGIO);
655 /* Free a list of sigio structures. */
657 funsetownlst(struct sigiolst *sigiolst)
661 while ((sigio = SLIST_FIRST(sigiolst)) != NULL)
666 * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg).
668 * After permission checking, add a sigio structure to the sigio list for
669 * the process or process group.
672 fsetown(pid_t pgid, struct sigio **sigiop)
688 * Policy - Don't allow a process to FSETOWN a process
689 * in another session.
691 * Remove this test to allow maximum flexibility or
692 * restrict FSETOWN to the current process or process
693 * group for maximum safety.
695 if (proc->p_session != curproc->p_session)
699 } else /* if (pgid < 0) */ {
700 pgrp = pgfind(-pgid);
705 * Policy - Don't allow a process to FSETOWN a process
706 * in another session.
708 * Remove this test to allow maximum flexibility or
709 * restrict FSETOWN to the current process or process
710 * group for maximum safety.
712 if (pgrp->pg_session != curproc->p_session)
718 sigio = malloc(sizeof(struct sigio), M_SIGIO, M_WAITOK);
720 SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio);
721 sigio->sio_proc = proc;
723 SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio);
724 sigio->sio_pgrp = pgrp;
726 sigio->sio_pgid = pgid;
727 sigio->sio_ucred = crhold(curproc->p_ucred);
728 /* It would be convenient if p_ruid was in ucred. */
729 sigio->sio_ruid = curproc->p_ucred->cr_ruid;
730 sigio->sio_myref = sigiop;
738 * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg).
741 fgetown(struct sigio *sigio)
743 return (sigio != NULL ? sigio->sio_pgid : 0);
747 * Close many file descriptors.
752 closefrom(struct closefrom_args *uap)
754 return(kern_closefrom(uap->fd));
758 * Close all file descriptors greater then or equal to fd
763 kern_closefrom(int fd)
765 struct thread *td = curthread;
766 struct proc *p = td->td_proc;
767 struct filedesc *fdp;
776 * NOTE: This function will skip unassociated descriptors and
777 * reserved descriptors that have not yet been assigned.
778 * fd_lastfile can change as a side effect of kern_close().
780 spin_lock_wr(&fdp->fd_spin);
781 while (fd <= fdp->fd_lastfile) {
782 if (fdp->fd_files[fd].fp != NULL) {
783 spin_unlock_wr(&fdp->fd_spin);
784 /* ok if this races another close */
785 if (kern_close(fd) == EINTR)
787 spin_lock_wr(&fdp->fd_spin);
791 spin_unlock_wr(&fdp->fd_spin);
796 * Close a file descriptor.
801 close(struct close_args *uap)
803 return(kern_close(uap->fd));
807 * MPALMOSTSAFE - acquires mplock around knote_fdclose() calls
812 struct thread *td = curthread;
813 struct proc *p = td->td_proc;
814 struct filedesc *fdp;
822 spin_lock_wr(&fdp->fd_spin);
823 if ((fp = funsetfd_locked(fdp, fd)) == NULL) {
824 spin_unlock_wr(&fdp->fd_spin);
828 if (p->p_fdtol != NULL) {
830 * Ask fdfree() to sleep to ensure that all relevant
831 * process leaders can be traversed in closef().
833 fdp->fd_holdleaderscount++;
838 * we now hold the fp reference that used to be owned by the descriptor
841 spin_unlock_wr(&fdp->fd_spin);
842 if (fd < fdp->fd_knlistsize) {
844 if (fd < fdp->fd_knlistsize)
845 knote_fdclose(p, fd);
848 error = closef(fp, td);
850 spin_lock_wr(&fdp->fd_spin);
851 fdp->fd_holdleaderscount--;
852 if (fdp->fd_holdleaderscount == 0 &&
853 fdp->fd_holdleaderswakeup != 0) {
854 fdp->fd_holdleaderswakeup = 0;
855 spin_unlock_wr(&fdp->fd_spin);
856 wakeup(&fdp->fd_holdleaderscount);
858 spin_unlock_wr(&fdp->fd_spin);
865 * shutdown_args(int fd, int how)
868 kern_shutdown(int fd, int how)
870 struct thread *td = curthread;
871 struct proc *p = td->td_proc;
877 if ((fp = holdfp(p->p_fd, fd, -1)) == NULL)
879 error = fo_shutdown(fp, how);
886 shutdown(struct shutdown_args *uap)
890 error = kern_shutdown(uap->s, uap->how);
896 kern_fstat(int fd, struct stat *ub)
898 struct thread *td = curthread;
899 struct proc *p = td->td_proc;
905 if ((fp = holdfp(p->p_fd, fd, -1)) == NULL)
907 error = fo_stat(fp, ub, p->p_ucred);
914 * Return status information about a file descriptor.
917 fstat(struct fstat_args *uap)
922 error = kern_fstat(uap->fd, &st);
925 error = copyout(&st, uap->sb, sizeof(st));
930 * Return pathconf information about a file descriptor.
934 fpathconf(struct fpathconf_args *uap)
936 struct thread *td = curthread;
937 struct proc *p = td->td_proc;
944 if ((fp = holdfp(p->p_fd, uap->fd, -1)) == NULL)
947 switch (fp->f_type) {
950 if (uap->name != _PC_PIPE_BUF) {
953 uap->sysmsg_result = PIPE_BUF;
959 vp = (struct vnode *)fp->f_data;
960 error = VOP_PATHCONF(vp, uap->name, uap->sysmsg_fds);
971 SYSCTL_INT(_debug, OID_AUTO, fdexpand, CTLFLAG_RD, &fdexpand, 0, "");
974 * Grow the file table so it can hold through descriptor (want).
976 * The fdp's spinlock must be held exclusively on entry and may be held
977 * exclusively on return. The spinlock may be cycled by the routine.
982 fdgrow_locked(struct filedesc *fdp, int want)
984 struct fdnode *newfiles;
985 struct fdnode *oldfiles;
990 /* nf has to be of the form 2^n - 1 */
992 } while (nf <= want);
994 spin_unlock_wr(&fdp->fd_spin);
995 newfiles = malloc(nf * sizeof(struct fdnode), M_FILEDESC, M_WAITOK);
996 spin_lock_wr(&fdp->fd_spin);
999 * We could have raced another extend while we were not holding
1002 if (fdp->fd_nfiles >= nf) {
1003 spin_unlock_wr(&fdp->fd_spin);
1004 free(newfiles, M_FILEDESC);
1005 spin_lock_wr(&fdp->fd_spin);
1009 * Copy the existing ofile and ofileflags arrays
1010 * and zero the new portion of each array.
1012 extra = nf - fdp->fd_nfiles;
1013 bcopy(fdp->fd_files, newfiles, fdp->fd_nfiles * sizeof(struct fdnode));
1014 bzero(&newfiles[fdp->fd_nfiles], extra * sizeof(struct fdnode));
1016 oldfiles = fdp->fd_files;
1017 fdp->fd_files = newfiles;
1018 fdp->fd_nfiles = nf;
1020 if (oldfiles != fdp->fd_builtin_files) {
1021 spin_unlock_wr(&fdp->fd_spin);
1022 free(oldfiles, M_FILEDESC);
1023 spin_lock_wr(&fdp->fd_spin);
1029 * Number of nodes in right subtree, including the root.
1032 right_subtree_size(int n)
1034 return (n ^ (n | (n + 1)));
1041 right_ancestor(int n)
1043 return (n | (n + 1));
1050 left_ancestor(int n)
1052 return ((n & (n + 1)) - 1);
1056 * Traverse the in-place binary tree buttom-up adjusting the allocation
1057 * count so scans can determine where free descriptors are located.
1059 * MPSAFE - caller must be holding an exclusive spinlock on fdp
1063 fdreserve_locked(struct filedesc *fdp, int fd, int incr)
1066 fdp->fd_files[fd].allocated += incr;
1067 KKASSERT(fdp->fd_files[fd].allocated >= 0);
1068 fd = left_ancestor(fd);
1073 * Reserve a file descriptor for the process. If no error occurs, the
1074 * caller MUST at some point call fsetfd() or assign a file pointer
1075 * or dispose of the reservation.
1080 fdalloc(struct proc *p, int want, int *result)
1082 struct filedesc *fdp = p->p_fd;
1083 int fd, rsize, rsum, node, lim;
1085 spin_lock_rd(&p->p_limit->p_spin);
1086 lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
1087 spin_unlock_rd(&p->p_limit->p_spin);
1090 spin_lock_wr(&fdp->fd_spin);
1091 if (want >= fdp->fd_nfiles)
1092 fdgrow_locked(fdp, want);
1095 * Search for a free descriptor starting at the higher
1096 * of want or fd_freefile. If that fails, consider
1097 * expanding the ofile array.
1099 * NOTE! the 'allocated' field is a cumulative recursive allocation
1100 * count. If we happen to see a value of 0 then we can shortcut
1101 * our search. Otherwise we run through through the tree going
1102 * down branches we know have free descriptor(s) until we hit a
1103 * leaf node. The leaf node will be free but will not necessarily
1104 * have an allocated field of 0.
1107 /* move up the tree looking for a subtree with a free node */
1108 for (fd = max(want, fdp->fd_freefile); fd < min(fdp->fd_nfiles, lim);
1109 fd = right_ancestor(fd)) {
1110 if (fdp->fd_files[fd].allocated == 0)
1113 rsize = right_subtree_size(fd);
1114 if (fdp->fd_files[fd].allocated == rsize)
1115 continue; /* right subtree full */
1118 * Free fd is in the right subtree of the tree rooted at fd.
1119 * Call that subtree R. Look for the smallest (leftmost)
1120 * subtree of R with an unallocated fd: continue moving
1121 * down the left branch until encountering a full left
1122 * subtree, then move to the right.
1124 for (rsum = 0, rsize /= 2; rsize > 0; rsize /= 2) {
1126 rsum += fdp->fd_files[node].allocated;
1127 if (fdp->fd_files[fd].allocated == rsum + rsize) {
1128 fd = node; /* move to the right */
1129 if (fdp->fd_files[node].allocated == 0)
1138 * No space in current array. Expand?
1140 if (fdp->fd_nfiles >= lim) {
1141 spin_unlock_wr(&fdp->fd_spin);
1144 fdgrow_locked(fdp, want);
1148 KKASSERT(fd < fdp->fd_nfiles);
1149 if (fd > fdp->fd_lastfile)
1150 fdp->fd_lastfile = fd;
1151 if (want <= fdp->fd_freefile)
1152 fdp->fd_freefile = fd;
1154 KKASSERT(fdp->fd_files[fd].fp == NULL);
1155 KKASSERT(fdp->fd_files[fd].reserved == 0);
1156 fdp->fd_files[fd].fileflags = 0;
1157 fdp->fd_files[fd].reserved = 1;
1158 fdreserve_locked(fdp, fd, 1);
1159 spin_unlock_wr(&fdp->fd_spin);
1164 * Check to see whether n user file descriptors
1165 * are available to the process p.
1170 fdavail(struct proc *p, int n)
1172 struct filedesc *fdp = p->p_fd;
1173 struct fdnode *fdnode;
1176 spin_lock_rd(&p->p_limit->p_spin);
1177 lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
1178 spin_unlock_rd(&p->p_limit->p_spin);
1180 spin_lock_rd(&fdp->fd_spin);
1181 if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0) {
1182 spin_unlock_rd(&fdp->fd_spin);
1185 last = min(fdp->fd_nfiles, lim);
1186 fdnode = &fdp->fd_files[fdp->fd_freefile];
1187 for (i = last - fdp->fd_freefile; --i >= 0; ++fdnode) {
1188 if (fdnode->fp == NULL && --n <= 0) {
1189 spin_unlock_rd(&fdp->fd_spin);
1193 spin_unlock_rd(&fdp->fd_spin);
1199 * Create a new open file structure and reserve a file decriptor
1200 * for the process that refers to it.
1202 * Root creds are checked using p, or assumed if p is NULL. If
1203 * resultfd is non-NULL then p must also be non-NULL. No file
1204 * descriptor is reserved if resultfd is NULL.
1206 * A file pointer with a refcount of 1 is returned. Note that the
1207 * file pointer is NOT associated with the descriptor. If falloc
1208 * returns success, fsetfd() MUST be called to either associate the
1209 * file pointer or clear the reservation.
1214 falloc(struct proc *p, struct file **resultfp, int *resultfd)
1216 static struct timeval lastfail;
1224 * Handle filetable full issues and root overfill.
1226 if (nfiles >= maxfiles - maxfilesrootres &&
1227 ((p && p->p_ucred->cr_ruid != 0) || nfiles >= maxfiles)) {
1228 if (ppsratecheck(&lastfail, &curfail, 1)) {
1229 printf("kern.maxfiles limit exceeded by uid %d, please see tuning(7).\n",
1230 (p ? p->p_ucred->cr_ruid : -1));
1237 * Allocate a new file descriptor.
1239 fp = malloc(sizeof(struct file), M_FILE, M_WAITOK | M_ZERO);
1240 spin_init(&fp->f_spin);
1242 fp->f_ops = &badfileops;
1245 fp->f_cred = crhold(p->p_ucred);
1247 fp->f_cred = crhold(proc0.p_ucred);
1248 spin_lock_wr(&filehead_spin);
1250 LIST_INSERT_HEAD(&filehead, fp, f_list);
1251 spin_unlock_wr(&filehead_spin);
1253 if ((error = fdalloc(p, 0, resultfd)) != 0) {
1270 checkfpclosed(struct filedesc *fdp, int fd, struct file *fp)
1274 spin_lock_rd(&fdp->fd_spin);
1275 if ((unsigned) fd >= fdp->fd_nfiles || fp != fdp->fd_files[fd].fp)
1279 spin_unlock_rd(&fdp->fd_spin);
1284 * Associate a file pointer with a previously reserved file descriptor.
1285 * This function always succeeds.
1287 * If fp is NULL, the file descriptor is returned to the pool.
1291 * MPSAFE (exclusive spinlock must be held on call)
1294 fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd)
1296 KKASSERT((unsigned)fd < fdp->fd_nfiles);
1297 KKASSERT(fdp->fd_files[fd].reserved != 0);
1300 fdp->fd_files[fd].fp = fp;
1301 fdp->fd_files[fd].reserved = 0;
1302 if (fp->f_type == DTYPE_KQUEUE) {
1303 if (fdp->fd_knlistsize < 0)
1304 fdp->fd_knlistsize = 0;
1307 fdp->fd_files[fd].reserved = 0;
1308 fdreserve_locked(fdp, fd, -1);
1309 fdfixup_locked(fdp, fd);
1317 fsetfd(struct proc *p, struct file *fp, int fd)
1319 struct filedesc *fdp = p->p_fd;
1321 spin_lock_wr(&fdp->fd_spin);
1322 fsetfd_locked(fdp, fp, fd);
1323 spin_unlock_wr(&fdp->fd_spin);
1327 * MPSAFE (exclusive spinlock must be held on call)
1331 funsetfd_locked(struct filedesc *fdp, int fd)
1335 if ((unsigned)fd >= fdp->fd_nfiles)
1337 if ((fp = fdp->fd_files[fd].fp) == NULL)
1339 fdp->fd_files[fd].fp = NULL;
1340 fdp->fd_files[fd].fileflags = 0;
1342 fdreserve_locked(fdp, fd, -1);
1343 fdfixup_locked(fdp, fd);
1351 fgetfdflags(struct filedesc *fdp, int fd, int *flagsp)
1355 spin_lock_rd(&fdp->fd_spin);
1356 if (((u_int)fd) >= fdp->fd_nfiles) {
1358 } else if (fdp->fd_files[fd].fp == NULL) {
1361 *flagsp = fdp->fd_files[fd].fileflags;
1364 spin_unlock_rd(&fdp->fd_spin);
1372 fsetfdflags(struct filedesc *fdp, int fd, int add_flags)
1376 spin_lock_wr(&fdp->fd_spin);
1377 if (((u_int)fd) >= fdp->fd_nfiles) {
1379 } else if (fdp->fd_files[fd].fp == NULL) {
1382 fdp->fd_files[fd].fileflags |= add_flags;
1385 spin_unlock_wr(&fdp->fd_spin);
1393 fclrfdflags(struct filedesc *fdp, int fd, int rem_flags)
1397 spin_lock_wr(&fdp->fd_spin);
1398 if (((u_int)fd) >= fdp->fd_nfiles) {
1400 } else if (fdp->fd_files[fd].fp == NULL) {
1403 fdp->fd_files[fd].fileflags &= ~rem_flags;
1406 spin_unlock_wr(&fdp->fd_spin);
1411 fsetcred(struct file *fp, struct ucred *cr)
1419 * Free a file descriptor.
1423 ffree(struct file *fp)
1425 KASSERT((fp->f_count == 0), ("ffree: fp_fcount not 0!"));
1426 spin_lock_wr(&filehead_spin);
1427 LIST_REMOVE(fp, f_list);
1429 spin_unlock_wr(&filehead_spin);
1432 cache_drop(fp->f_ncp);
1439 * called from init_main, initialize filedesc0 for proc0.
1442 fdinit_bootstrap(struct proc *p0, struct filedesc *fdp0, int cmask)
1446 fdp0->fd_refcnt = 1;
1447 fdp0->fd_cmask = cmask;
1448 fdp0->fd_files = fdp0->fd_builtin_files;
1449 fdp0->fd_nfiles = NDFILE;
1450 fdp0->fd_lastfile = -1;
1451 spin_init(&fdp0->fd_spin);
1455 * Build a new filedesc structure.
1460 fdinit(struct proc *p)
1462 struct filedesc *newfdp;
1463 struct filedesc *fdp = p->p_fd;
1465 newfdp = malloc(sizeof(struct filedesc), M_FILEDESC, M_WAITOK|M_ZERO);
1466 spin_lock_rd(&fdp->fd_spin);
1468 newfdp->fd_cdir = fdp->fd_cdir;
1469 vref(newfdp->fd_cdir);
1470 newfdp->fd_ncdir = cache_hold(fdp->fd_ncdir);
1474 * rdir may not be set in e.g. proc0 or anything vm_fork'd off of
1475 * proc0, but should unconditionally exist in other processes.
1478 newfdp->fd_rdir = fdp->fd_rdir;
1479 vref(newfdp->fd_rdir);
1480 newfdp->fd_nrdir = cache_hold(fdp->fd_nrdir);
1483 newfdp->fd_jdir = fdp->fd_jdir;
1484 vref(newfdp->fd_jdir);
1485 newfdp->fd_njdir = cache_hold(fdp->fd_njdir);
1487 spin_unlock_rd(&fdp->fd_spin);
1489 /* Create the file descriptor table. */
1490 newfdp->fd_refcnt = 1;
1491 newfdp->fd_cmask = cmask;
1492 newfdp->fd_files = newfdp->fd_builtin_files;
1493 newfdp->fd_nfiles = NDFILE;
1494 newfdp->fd_knlistsize = -1;
1495 newfdp->fd_lastfile = -1;
1496 spin_init(&newfdp->fd_spin);
1502 * Share a filedesc structure.
1507 fdshare(struct proc *p)
1509 struct filedesc *fdp;
1512 spin_lock_wr(&fdp->fd_spin);
1514 spin_unlock_wr(&fdp->fd_spin);
1519 * Copy a filedesc structure.
1524 fdcopy(struct proc *p)
1526 struct filedesc *fdp = p->p_fd;
1527 struct filedesc *newfdp;
1528 struct fdnode *fdnode;
1533 * Certain daemons might not have file descriptors.
1539 * Allocate the new filedesc and fd_files[] array. This can race
1540 * with operations by other threads on the fdp so we have to be
1543 newfdp = malloc(sizeof(struct filedesc), M_FILEDESC, M_WAITOK | M_ZERO);
1545 spin_lock_rd(&fdp->fd_spin);
1546 if (fdp->fd_lastfile < NDFILE) {
1547 newfdp->fd_files = newfdp->fd_builtin_files;
1551 * We have to allocate (N^2-1) entries for our in-place
1552 * binary tree. Allow the table to shrink.
1556 while (ni > fdp->fd_lastfile && ni > NDFILE) {
1560 spin_unlock_rd(&fdp->fd_spin);
1561 newfdp->fd_files = malloc(i * sizeof(struct fdnode),
1562 M_FILEDESC, M_WAITOK | M_ZERO);
1565 * Check for race, retry
1567 spin_lock_rd(&fdp->fd_spin);
1568 if (i <= fdp->fd_lastfile) {
1569 spin_unlock_rd(&fdp->fd_spin);
1570 free(newfdp->fd_files, M_FILEDESC);
1576 * Dup the remaining fields. vref() and cache_hold() can be
1577 * safely called while holding the read spinlock on fdp.
1579 * The read spinlock on fdp is still being held.
1581 * NOTE: vref and cache_hold calls for the case where the vnode
1582 * or cache entry already has at least one ref may be called
1583 * while holding spin locks.
1585 if ((newfdp->fd_cdir = fdp->fd_cdir) != NULL) {
1586 vref(newfdp->fd_cdir);
1587 newfdp->fd_ncdir = cache_hold(fdp->fd_ncdir);
1590 * We must check for fd_rdir here, at least for now because
1591 * the init process is created before we have access to the
1592 * rootvode to take a reference to it.
1594 if ((newfdp->fd_rdir = fdp->fd_rdir) != NULL) {
1595 vref(newfdp->fd_rdir);
1596 newfdp->fd_nrdir = cache_hold(fdp->fd_nrdir);
1598 if ((newfdp->fd_jdir = fdp->fd_jdir) != NULL) {
1599 vref(newfdp->fd_jdir);
1600 newfdp->fd_njdir = cache_hold(fdp->fd_njdir);
1602 newfdp->fd_refcnt = 1;
1603 newfdp->fd_nfiles = i;
1604 newfdp->fd_lastfile = fdp->fd_lastfile;
1605 newfdp->fd_freefile = fdp->fd_freefile;
1606 newfdp->fd_cmask = fdp->fd_cmask;
1607 newfdp->fd_knlist = NULL;
1608 newfdp->fd_knlistsize = -1;
1609 newfdp->fd_knhash = NULL;
1610 newfdp->fd_knhashmask = 0;
1611 spin_init(&newfdp->fd_spin);
1614 * Copy the descriptor table through (i). This also copies the
1615 * allocation state. Then go through and ref the file pointers
1616 * and clean up any KQ descriptors.
1618 * kq descriptors cannot be copied. Since we haven't ref'd the
1619 * copied files yet we can ignore the return value from funsetfd().
1621 * The read spinlock on fdp is still being held.
1623 bcopy(fdp->fd_files, newfdp->fd_files, i * sizeof(struct fdnode));
1624 for (i = 0 ; i < newfdp->fd_nfiles; ++i) {
1625 fdnode = &newfdp->fd_files[i];
1626 if (fdnode->reserved) {
1627 fdreserve_locked(newfdp, i, -1);
1628 fdnode->reserved = 0;
1629 fdfixup_locked(newfdp, i);
1630 } else if (fdnode->fp) {
1631 if (fdnode->fp->f_type == DTYPE_KQUEUE) {
1632 (void)funsetfd_locked(newfdp, i);
1638 spin_unlock_rd(&fdp->fd_spin);
1643 * Release a filedesc structure.
1645 * NOT MPSAFE (MPSAFE for refs > 1, but the final cleanup code is not MPSAFE)
1648 fdfree(struct proc *p)
1650 struct thread *td = p->p_thread;
1651 struct filedesc *fdp = p->p_fd;
1652 struct fdnode *fdnode;
1654 struct filedesc_to_leader *fdtol;
1659 /* Certain daemons might not have file descriptors. */
1664 * Severe messing around to follow
1666 spin_lock_wr(&fdp->fd_spin);
1668 /* Check for special need to clear POSIX style locks */
1670 if (fdtol != NULL) {
1671 KASSERT(fdtol->fdl_refcount > 0,
1672 ("filedesc_to_refcount botch: fdl_refcount=%d",
1673 fdtol->fdl_refcount));
1674 if (fdtol->fdl_refcount == 1 &&
1675 (p->p_leader->p_flag & P_ADVLOCK) != 0) {
1676 for (i = 0; i <= fdp->fd_lastfile; ++i) {
1677 fdnode = &fdp->fd_files[i];
1678 if (fdnode->fp == NULL ||
1679 fdnode->fp->f_type != DTYPE_VNODE) {
1684 spin_unlock_wr(&fdp->fd_spin);
1686 lf.l_whence = SEEK_SET;
1689 lf.l_type = F_UNLCK;
1690 vp = (struct vnode *)fp->f_data;
1691 (void) VOP_ADVLOCK(vp,
1692 (caddr_t)p->p_leader,
1697 spin_lock_wr(&fdp->fd_spin);
1701 if (fdtol->fdl_refcount == 1) {
1702 if (fdp->fd_holdleaderscount > 0 &&
1703 (p->p_leader->p_flag & P_ADVLOCK) != 0) {
1705 * close() or do_dup() has cleared a reference
1706 * in a shared file descriptor table.
1708 fdp->fd_holdleaderswakeup = 1;
1709 msleep(&fdp->fd_holdleaderscount,
1710 &fdp->fd_spin, 0, "fdlhold", 0);
1713 if (fdtol->fdl_holdcount > 0) {
1715 * Ensure that fdtol->fdl_leader
1716 * remains valid in closef().
1718 fdtol->fdl_wakeup = 1;
1719 msleep(fdtol, &fdp->fd_spin, 0, "fdlhold", 0);
1723 fdtol->fdl_refcount--;
1724 if (fdtol->fdl_refcount == 0 &&
1725 fdtol->fdl_holdcount == 0) {
1726 fdtol->fdl_next->fdl_prev = fdtol->fdl_prev;
1727 fdtol->fdl_prev->fdl_next = fdtol->fdl_next;
1732 if (fdtol != NULL) {
1733 spin_unlock_wr(&fdp->fd_spin);
1734 free(fdtol, M_FILEDESC_TO_LEADER);
1735 spin_lock_wr(&fdp->fd_spin);
1738 if (--fdp->fd_refcnt > 0) {
1739 spin_unlock_wr(&fdp->fd_spin);
1742 spin_unlock_wr(&fdp->fd_spin);
1745 * we are the last reference to the structure, we can
1746 * safely assume it will not change out from under us.
1748 for (i = 0; i <= fdp->fd_lastfile; ++i) {
1749 if (fdp->fd_files[i].fp)
1750 closef(fdp->fd_files[i].fp, td);
1752 if (fdp->fd_files != fdp->fd_builtin_files)
1753 free(fdp->fd_files, M_FILEDESC);
1755 cache_drop(fdp->fd_ncdir);
1756 vrele(fdp->fd_cdir);
1759 cache_drop(fdp->fd_nrdir);
1760 vrele(fdp->fd_rdir);
1763 cache_drop(fdp->fd_njdir);
1764 vrele(fdp->fd_jdir);
1767 free(fdp->fd_knlist, M_KQUEUE);
1769 free(fdp->fd_knhash, M_KQUEUE);
1770 free(fdp, M_FILEDESC);
1774 * Retrieve and reference the file pointer associated with a descriptor.
1779 holdfp(struct filedesc *fdp, int fd, int flag)
1783 spin_lock_rd(&fdp->fd_spin);
1784 if (((u_int)fd) >= fdp->fd_nfiles) {
1788 if ((fp = fdp->fd_files[fd].fp) == NULL)
1790 if ((fp->f_flag & flag) == 0 && flag != -1) {
1796 spin_unlock_rd(&fdp->fd_spin);
1801 * holdsock() - load the struct file pointer associated
1802 * with a socket into *fpp. If an error occurs, non-zero
1803 * will be returned and *fpp will be set to NULL.
1808 holdsock(struct filedesc *fdp, int fd, struct file **fpp)
1813 spin_lock_rd(&fdp->fd_spin);
1814 if ((unsigned)fd >= fdp->fd_nfiles) {
1819 if ((fp = fdp->fd_files[fd].fp) == NULL) {
1823 if (fp->f_type != DTYPE_SOCKET) {
1830 spin_unlock_rd(&fdp->fd_spin);
1836 * Convert a user file descriptor to a held file pointer.
1841 holdvnode(struct filedesc *fdp, int fd, struct file **fpp)
1846 spin_lock_rd(&fdp->fd_spin);
1847 if ((unsigned)fd >= fdp->fd_nfiles) {
1852 if ((fp = fdp->fd_files[fd].fp) == NULL) {
1856 if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO) {
1863 spin_unlock_rd(&fdp->fd_spin);
1869 * For setugid programs, we don't want to people to use that setugidness
1870 * to generate error messages which write to a file which otherwise would
1871 * otherwise be off-limits to the process.
1873 * This is a gross hack to plug the hole. A better solution would involve
1874 * a special vop or other form of generalized access control mechanism. We
1875 * go ahead and just reject all procfs file systems accesses as dangerous.
1877 * Since setugidsafety calls this only for fd 0, 1 and 2, this check is
1878 * sufficient. We also don't for check setugidness since we know we are.
1881 is_unsafe(struct file *fp)
1883 if (fp->f_type == DTYPE_VNODE &&
1884 ((struct vnode *)(fp->f_data))->v_tag == VT_PROCFS)
1890 * Make this setguid thing safe, if at all possible.
1892 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose()
1895 setugidsafety(struct proc *p)
1897 struct thread *td = p->p_thread;
1898 struct filedesc *fdp = p->p_fd;
1901 /* Certain daemons might not have file descriptors. */
1906 * note: fdp->fd_files may be reallocated out from under us while
1907 * we are blocked in a close. Be careful!
1909 for (i = 0; i <= fdp->fd_lastfile; i++) {
1912 if (fdp->fd_files[i].fp && is_unsafe(fdp->fd_files[i].fp)) {
1915 if (i < fdp->fd_knlistsize)
1916 knote_fdclose(p, i);
1918 * NULL-out descriptor prior to close to avoid
1919 * a race while close blocks.
1921 if ((fp = funsetfd_locked(fdp, i)) != NULL)
1928 * Close any files on exec?
1930 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose()
1933 fdcloseexec(struct proc *p)
1935 struct thread *td = p->p_thread;
1936 struct filedesc *fdp = p->p_fd;
1939 /* Certain daemons might not have file descriptors. */
1944 * We cannot cache fd_files since operations may block and rip
1945 * them out from under us.
1947 for (i = 0; i <= fdp->fd_lastfile; i++) {
1948 if (fdp->fd_files[i].fp != NULL &&
1949 (fdp->fd_files[i].fileflags & UF_EXCLOSE)) {
1952 if (i < fdp->fd_knlistsize)
1953 knote_fdclose(p, i);
1955 * NULL-out descriptor prior to close to avoid
1956 * a race while close blocks.
1958 if ((fp = funsetfd_locked(fdp, i)) != NULL)
1965 * It is unsafe for set[ug]id processes to be started with file
1966 * descriptors 0..2 closed, as these descriptors are given implicit
1967 * significance in the Standard C library. fdcheckstd() will create a
1968 * descriptor referencing /dev/null for each of stdin, stdout, and
1969 * stderr that is not already open.
1971 * NOT MPSAFE - calls falloc, vn_open, etc
1974 fdcheckstd(struct proc *p)
1976 struct nlookupdata nd;
1977 struct filedesc *fdp;
1980 int i, error, flags, devnull;
1987 for (i = 0; i < 3; i++) {
1988 if (fdp->fd_files[i].fp != NULL)
1991 if ((error = falloc(p, &fp, &devnull)) != 0)
1994 error = nlookup_init(&nd, "/dev/null", UIO_SYSSPACE,
1995 NLC_FOLLOW|NLC_LOCKVP);
1996 flags = FREAD | FWRITE;
1998 error = vn_open(&nd, fp, flags, 0);
2000 fsetfd(p, fp, devnull);
2002 fsetfd(p, NULL, devnull);
2007 KKASSERT(i == devnull);
2009 error = kern_dup(DUP_FIXED, devnull, i, &retval);
2018 * Internal form of close.
2019 * Decrement reference count on file structure.
2020 * Note: td and/or p may be NULL when closing a file
2021 * that was being passed in a message.
2023 * MPALMOSTSAFE - acquires mplock for VOP operations
2026 closef(struct file *fp, struct thread *td)
2030 struct filedesc_to_leader *fdtol;
2037 p = NULL; /* allow no proc association */
2039 p = td->td_proc; /* can also be NULL */
2042 * POSIX record locking dictates that any close releases ALL
2043 * locks owned by this process. This is handled by setting
2044 * a flag in the unlock to free ONLY locks obeying POSIX
2045 * semantics, and not to free BSD-style file locks.
2046 * If the descriptor was in a message, POSIX-style locks
2047 * aren't passed with the descriptor.
2049 if (p != NULL && fp->f_type == DTYPE_VNODE &&
2050 (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS)
2053 if ((p->p_leader->p_flag & P_ADVLOCK) != 0) {
2054 lf.l_whence = SEEK_SET;
2057 lf.l_type = F_UNLCK;
2058 vp = (struct vnode *)fp->f_data;
2059 (void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
2063 if (fdtol != NULL) {
2065 * Handle special case where file descriptor table
2066 * is shared between multiple process leaders.
2068 for (fdtol = fdtol->fdl_next;
2069 fdtol != p->p_fdtol;
2070 fdtol = fdtol->fdl_next) {
2071 if ((fdtol->fdl_leader->p_flag &
2074 fdtol->fdl_holdcount++;
2075 lf.l_whence = SEEK_SET;
2078 lf.l_type = F_UNLCK;
2079 vp = (struct vnode *)fp->f_data;
2080 (void) VOP_ADVLOCK(vp,
2081 (caddr_t)fdtol->fdl_leader,
2082 F_UNLCK, &lf, F_POSIX);
2083 fdtol->fdl_holdcount--;
2084 if (fdtol->fdl_holdcount == 0 &&
2085 fdtol->fdl_wakeup != 0) {
2086 fdtol->fdl_wakeup = 0;
2099 * fhold() can only be called if f_count is already at least 1 (i.e. the
2100 * caller of fhold() already has a reference to the file pointer in some
2103 * This is a rare case where callers are allowed to hold spinlocks, so
2104 * we can't ourselves. Since we are not obtaining the fp spinlock,
2105 * we have to use an atomic lock to interlock against fdrop().
2108 fhold(struct file *fp)
2110 atomic_add_int(&fp->f_count, 1);
2114 * A spinlock is required to handle 1->0 transitions on f_count. We have
2115 * to use atomic_sub_int so as not to race the atomic_add_int in fhold().
2117 * MPALMOSTSAFE - acquires mplock for final close sequence
2120 fdrop(struct file *fp)
2126 spin_lock_wr(&fp->f_spin);
2127 atomic_subtract_int(&fp->f_count, 1);
2128 if (fp->f_count > 0) {
2129 spin_unlock_wr(&fp->f_spin);
2132 spin_unlock_wr(&fp->f_spin);
2137 * The last reference has gone away, we own the fp structure free
2140 if (fp->f_count < 0)
2141 panic("fdrop: count < 0");
2142 if ((fp->f_flag & FHASLOCK) && fp->f_type == DTYPE_VNODE &&
2143 (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS)
2145 lf.l_whence = SEEK_SET;
2148 lf.l_type = F_UNLCK;
2149 vp = (struct vnode *)fp->f_data;
2150 (void) VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, 0);
2152 if (fp->f_ops != &badfileops)
2153 error = fo_close(fp);
2162 * Apply an advisory lock on a file descriptor.
2164 * Just attempt to get a record lock of the requested type on
2165 * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0).
2168 flock(struct flock_args *uap)
2170 struct proc *p = curproc;
2176 if ((fp = holdfp(p->p_fd, uap->fd, -1)) == NULL)
2178 if (fp->f_type != DTYPE_VNODE) {
2182 vp = (struct vnode *)fp->f_data;
2183 lf.l_whence = SEEK_SET;
2186 if (uap->how & LOCK_UN) {
2187 lf.l_type = F_UNLCK;
2188 fp->f_flag &= ~FHASLOCK;
2189 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, 0);
2192 if (uap->how & LOCK_EX)
2193 lf.l_type = F_WRLCK;
2194 else if (uap->how & LOCK_SH)
2195 lf.l_type = F_RDLCK;
2200 fp->f_flag |= FHASLOCK;
2201 if (uap->how & LOCK_NB)
2202 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, 0);
2204 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, F_WAIT);
2211 * File Descriptor pseudo-device driver (/dev/fd/).
2213 * Opening minor device N dup()s the file (if any) connected to file
2214 * descriptor N belonging to the calling process. Note that this driver
2215 * consists of only the ``open()'' routine, because all subsequent
2216 * references to this file will be direct to the other driver.
2220 fdopen(dev_t dev, int mode, int type, struct thread *td)
2222 KKASSERT(td->td_lwp != NULL);
2225 * XXX Kludge: set curlwp->lwp_dupfd to contain the value of the
2226 * the file descriptor being sought for duplication. The error
2227 * return ensures that the vnode for this device will be released
2228 * by vn_open. Open will detect this special error and take the
2229 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN
2230 * will simply report the error.
2232 td->td_lwp->lwp_dupfd = minor(dev);
2237 * The caller has reserved the file descriptor dfd for us. On success we
2238 * must fsetfd() it. On failure the caller will clean it up.
2240 * NOT MPSAFE - isn't getting spinlocks, possibly other things
2243 dupfdopen(struct proc *p, int dfd, int sfd, int mode, int error)
2245 struct filedesc *fdp = p->p_fd;
2249 if ((wfp = holdfp(fdp, sfd, -1)) == NULL)
2253 * There are two cases of interest here.
2255 * For ENODEV simply dup sfd to file descriptor dfd and return.
2257 * For ENXIO steal away the file structure from sfd and store it
2258 * dfd. sfd is effectively closed by this operation.
2260 * Any other error code is just returned.
2265 * Check that the mode the file is being opened for is a
2266 * subset of the mode of the existing descriptor.
2268 if (((mode & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag)
2270 fdp->fd_files[dfd].fileflags = fdp->fd_files[sfd].fileflags;
2271 fsetfd(p, wfp, dfd);
2276 * Steal away the file pointer from dfd, and stuff it into indx.
2278 fdp->fd_files[dfd].fileflags = fdp->fd_files[sfd].fileflags;
2279 fsetfd(p, wfp, dfd);
2280 if ((xfp = funsetfd_locked(fdp, sfd)) != NULL)
2282 KKASSERT(xfp == wfp); /* XXX MP RACE */
2293 * NOT MPSAFE - I think these refer to a common file descriptor table
2294 * and we need to spinlock that to link fdtol in.
2296 struct filedesc_to_leader *
2297 filedesc_to_leader_alloc(struct filedesc_to_leader *old,
2298 struct proc *leader)
2300 struct filedesc_to_leader *fdtol;
2302 fdtol = malloc(sizeof(struct filedesc_to_leader),
2303 M_FILEDESC_TO_LEADER, M_WAITOK);
2304 fdtol->fdl_refcount = 1;
2305 fdtol->fdl_holdcount = 0;
2306 fdtol->fdl_wakeup = 0;
2307 fdtol->fdl_leader = leader;
2309 fdtol->fdl_next = old->fdl_next;
2310 fdtol->fdl_prev = old;
2311 old->fdl_next = fdtol;
2312 fdtol->fdl_next->fdl_prev = fdtol;
2314 fdtol->fdl_next = fdtol;
2315 fdtol->fdl_prev = fdtol;
2321 * Scan all file pointers in the system. The callback is made with
2322 * both the master list spinlock held and the fp spinlock held,
2327 * WARNING: both the filehead spinlock and the file pointer spinlock are
2328 * held exclusively when the callback is made. The file pointer is not
2332 allfiles_scan_exclusive(int (*callback)(struct file *, void *), void *data)
2337 spin_lock_wr(&filehead_spin);
2338 LIST_FOREACH(fp, &filehead, f_list) {
2339 spin_lock_wr(&fp->f_spin);
2340 res = callback(fp, data);
2341 spin_unlock_wr(&fp->f_spin);
2345 spin_unlock_wr(&filehead_spin);
2349 * Get file structures.
2351 * NOT MPSAFE - process list scan, SYSCTL_OUT (probably not mpsafe)
2354 struct sysctl_kern_file_info {
2357 struct sysctl_req *req;
2360 static int sysctl_kern_file_callback(struct proc *p, void *data);
2363 sysctl_kern_file(SYSCTL_HANDLER_ARGS)
2365 struct sysctl_kern_file_info info;
2368 * Note: because the number of file descriptors is calculated
2369 * in different ways for sizing vs returning the data,
2370 * there is information leakage from the first loop. However,
2371 * it is of a similar order of magnitude to the leakage from
2372 * global system statistics such as kern.openfiles.
2374 * When just doing a count, note that we cannot just count
2375 * the elements and add f_count via the filehead list because
2376 * threaded processes share their descriptor table and f_count might
2377 * still be '1' in that case.
2379 * Since the SYSCTL op can block, we must hold the process to
2380 * prevent it being ripped out from under us either in the
2381 * file descriptor loop or in the greater LIST_FOREACH. The
2382 * process may be in varying states of disrepair. If the process
2383 * is in SZOMB we may have caught it just as it is being removed
2384 * from the allproc list, we must skip it in that case to maintain
2385 * an unbroken chain through the allproc list.
2390 allproc_scan(sysctl_kern_file_callback, &info);
2393 * When just calculating the size, overestimate a bit to try to
2394 * prevent system activity from causing the buffer-fill call
2397 if (req->oldptr == NULL) {
2398 info.count = (info.count + 16) + (info.count / 10);
2399 info.error = SYSCTL_OUT(req, NULL,
2400 info.count * sizeof(struct kinfo_file));
2402 return (info.error);
2406 sysctl_kern_file_callback(struct proc *p, void *data)
2408 struct sysctl_kern_file_info *info = data;
2409 struct kinfo_file kf;
2410 struct filedesc *fdp;
2415 if (p->p_stat == SIDL || (p->p_flag & P_ZOMBIE))
2417 if (!PRISON_CHECK(info->req->td->td_proc->p_ucred, p->p_ucred) != 0)
2419 if ((fdp = p->p_fd) == NULL)
2421 spin_lock_rd(&fdp->fd_spin);
2422 for (n = 0; n < fdp->fd_nfiles; ++n) {
2423 if ((fp = fdp->fd_files[n].fp) == NULL)
2425 if (info->req->oldptr == NULL) {
2428 uid = p->p_ucred ? p->p_ucred->cr_uid : -1;
2429 kcore_make_file(&kf, fp, p->p_pid, uid, n);
2430 spin_unlock_rd(&fdp->fd_spin);
2431 info->error = SYSCTL_OUT(info->req, &kf, sizeof(kf));
2432 spin_lock_rd(&fdp->fd_spin);
2437 spin_unlock_rd(&fdp->fd_spin);
2443 SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD,
2444 0, 0, sysctl_kern_file, "S,file", "Entire file table");
2446 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW,
2447 &maxfilesperproc, 0, "Maximum files allowed open per process");
2449 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW,
2450 &maxfiles, 0, "Maximum number of files");
2452 SYSCTL_INT(_kern, OID_AUTO, maxfilesrootres, CTLFLAG_RW,
2453 &maxfilesrootres, 0, "Descriptors reserved for root use");
2455 SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD,
2456 &nfiles, 0, "System-wide number of open files");
2459 fildesc_drvinit(void *unused)
2463 cdevsw_add(&fildesc_cdevsw, 0, 0);
2464 for (fd = 0; fd < NUMFDESC; fd++) {
2465 make_dev(&fildesc_cdevsw, fd,
2466 UID_BIN, GID_BIN, 0666, "fd/%d", fd);
2468 make_dev(&fildesc_cdevsw, 0, UID_ROOT, GID_WHEEL, 0666, "stdin");
2469 make_dev(&fildesc_cdevsw, 1, UID_ROOT, GID_WHEEL, 0666, "stdout");
2470 make_dev(&fildesc_cdevsw, 2, UID_ROOT, GID_WHEEL, 0666, "stderr");
2476 struct fileops badfileops = {
2506 badfo_ioctl(struct file *fp, u_long com, caddr_t data, struct ucred *cred)
2515 badfo_poll(struct file *fp, int events, struct ucred *cred)
2524 badfo_kqfilter(struct file *fp, struct knote *kn)
2530 badfo_stat(struct file *fp, struct stat *sb, struct ucred *cred)
2539 badfo_close(struct file *fp)
2548 badfo_shutdown(struct file *fp, int how)
2557 nofo_shutdown(struct file *fp, int how)
2559 return (EOPNOTSUPP);
2562 SYSINIT(fildescdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,
2563 fildesc_drvinit,NULL)