2 * Copyright (c) 2005 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1989, 1991, 1993
36 * The Regents of the University of California. All rights reserved.
37 * (c) UNIX System Laboratories, Inc.
38 * All or some portions of this file are derived from material licensed
39 * to the University of California by American Telephone and Telegraph
40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41 * the permission of UNIX System Laboratories, Inc.
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 4. Neither the name of the University nor the names of its contributors
52 * may be used to endorse or promote products derived from this software
53 * without specific prior written permission.
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
67 * @(#)kern_descrip.c 8.6 (Berkeley) 4/19/94
68 * $FreeBSD: src/sys/kern/kern_descrip.c,v 1.81.2.19 2004/02/28 00:43:31 tegge Exp $
71 #include "opt_compat.h"
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/malloc.h>
75 #include <sys/sysproto.h>
77 #include <sys/device.h>
79 #include <sys/filedesc.h>
80 #include <sys/kernel.h>
81 #include <sys/sysctl.h>
82 #include <sys/vnode.h>
84 #include <sys/nlookup.h>
86 #include <sys/filio.h>
87 #include <sys/fcntl.h>
88 #include <sys/unistd.h>
89 #include <sys/resourcevar.h>
90 #include <sys/event.h>
91 #include <sys/kern_syscall.h>
92 #include <sys/kcore.h>
93 #include <sys/kinfo.h>
97 #include <vm/vm_extern.h>
99 #include <sys/thread2.h>
100 #include <sys/file2.h>
101 #include <sys/spinlock2.h>
103 static void fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd);
104 static void fdreserve_locked (struct filedesc *fdp, int fd0, int incr);
105 static struct file *funsetfd_locked (struct filedesc *fdp, int fd);
106 static void ffree(struct file *fp);
108 static MALLOC_DEFINE(M_FILEDESC, "file desc", "Open file descriptor table");
109 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "file desc to leader",
110 "file desc to leader structures");
111 MALLOC_DEFINE(M_FILE, "file", "Open file structure");
112 static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures");
114 static struct krate krate_uidinfo = { .freq = 1 };
116 static d_open_t fdopen;
119 #define CDEV_MAJOR 22
120 static struct dev_ops fildesc_ops = {
126 * Descriptor management.
128 static struct filelist filehead = LIST_HEAD_INITIALIZER(&filehead);
129 static struct spinlock filehead_spin = SPINLOCK_INITIALIZER(&filehead_spin);
130 static int nfiles; /* actual number of open files */
134 * Fixup fd_freefile and fd_lastfile after a descriptor has been cleared.
136 * MPSAFE - must be called with fdp->fd_spin exclusively held
140 fdfixup_locked(struct filedesc *fdp, int fd)
142 if (fd < fdp->fd_freefile) {
143 fdp->fd_freefile = fd;
145 while (fdp->fd_lastfile >= 0 &&
146 fdp->fd_files[fdp->fd_lastfile].fp == NULL &&
147 fdp->fd_files[fdp->fd_lastfile].reserved == 0
154 * System calls on descriptors.
159 sys_getdtablesize(struct getdtablesize_args *uap)
161 struct proc *p = curproc;
162 struct plimit *limit = p->p_limit;
165 spin_lock(&limit->p_spin);
166 if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX)
169 dtsize = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur;
170 spin_unlock(&limit->p_spin);
172 if (dtsize > maxfilesperproc)
173 dtsize = maxfilesperproc;
174 if (dtsize < minfilesperproc)
175 dtsize = minfilesperproc;
176 if (p->p_ucred->cr_uid && dtsize > maxfilesperuser)
177 dtsize = maxfilesperuser;
178 uap->sysmsg_result = dtsize;
183 * Duplicate a file descriptor to a particular value.
185 * note: keep in mind that a potential race condition exists when closing
186 * descriptors from a shared descriptor table (via rfork).
191 sys_dup2(struct dup2_args *uap)
196 error = kern_dup(DUP_FIXED, uap->from, uap->to, &fd);
197 uap->sysmsg_fds[0] = fd;
203 * Duplicate a file descriptor.
208 sys_dup(struct dup_args *uap)
213 error = kern_dup(DUP_VARIABLE, uap->fd, 0, &fd);
214 uap->sysmsg_fds[0] = fd;
220 * MPALMOSTSAFE - acquires mplock for fp operations
223 kern_fcntl(int fd, int cmd, union fcntl_dat *dat, struct ucred *cred)
225 struct thread *td = curthread;
226 struct proc *p = td->td_proc;
232 int tmp, error, flg = F_POSIX;
237 * Operations on file descriptors that do not require a file pointer.
241 error = fgetfdflags(p->p_fd, fd, &tmp);
243 dat->fc_cloexec = (tmp & UF_EXCLOSE) ? FD_CLOEXEC : 0;
247 if (dat->fc_cloexec & FD_CLOEXEC)
248 error = fsetfdflags(p->p_fd, fd, UF_EXCLOSE);
250 error = fclrfdflags(p->p_fd, fd, UF_EXCLOSE);
254 error = kern_dup(DUP_VARIABLE, fd, newmin, &dat->fc_fd);
261 * Operations on file pointers
263 if ((fp = holdfp(p->p_fd, fd, -1)) == NULL)
268 dat->fc_flags = OFLAGS(fp->f_flag);
274 nflags = FFLAGS(dat->fc_flags & ~O_ACCMODE) & FCNTLFLAGS;
275 nflags |= oflags & ~FCNTLFLAGS;
278 if (((nflags ^ oflags) & O_APPEND) && (oflags & FAPPENDONLY))
280 if (error == 0 && ((nflags ^ oflags) & FASYNC)) {
281 tmp = nflags & FASYNC;
282 error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp,
290 error = fo_ioctl(fp, FIOGETOWN, (caddr_t)&dat->fc_owner,
295 error = fo_ioctl(fp, FIOSETOWN, (caddr_t)&dat->fc_owner,
301 /* Fall into F_SETLK */
304 if (fp->f_type != DTYPE_VNODE) {
308 vp = (struct vnode *)fp->f_data;
311 * copyin/lockop may block
313 if (dat->fc_flock.l_whence == SEEK_CUR)
314 dat->fc_flock.l_start += fp->f_offset;
316 switch (dat->fc_flock.l_type) {
318 if ((fp->f_flag & FREAD) == 0) {
322 if ((p->p_leader->p_flags & P_ADVLOCK) == 0) {
323 lwkt_gettoken(&p->p_leader->p_token);
324 p->p_leader->p_flags |= P_ADVLOCK;
325 lwkt_reltoken(&p->p_leader->p_token);
327 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
328 &dat->fc_flock, flg);
331 if ((fp->f_flag & FWRITE) == 0) {
335 if ((p->p_leader->p_flags & P_ADVLOCK) == 0) {
336 lwkt_gettoken(&p->p_leader->p_token);
337 p->p_leader->p_flags |= P_ADVLOCK;
338 lwkt_reltoken(&p->p_leader->p_token);
340 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
341 &dat->fc_flock, flg);
344 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
345 &dat->fc_flock, F_POSIX);
353 * It is possible to race a close() on the descriptor while
354 * we were blocked getting the lock. If this occurs the
355 * close might not have caught the lock.
357 if (checkfdclosed(p->p_fd, fd, fp)) {
358 dat->fc_flock.l_whence = SEEK_SET;
359 dat->fc_flock.l_start = 0;
360 dat->fc_flock.l_len = 0;
361 dat->fc_flock.l_type = F_UNLCK;
362 (void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
363 F_UNLCK, &dat->fc_flock, F_POSIX);
368 if (fp->f_type != DTYPE_VNODE) {
372 vp = (struct vnode *)fp->f_data;
374 * copyin/lockop may block
376 if (dat->fc_flock.l_type != F_RDLCK &&
377 dat->fc_flock.l_type != F_WRLCK &&
378 dat->fc_flock.l_type != F_UNLCK) {
382 if (dat->fc_flock.l_whence == SEEK_CUR)
383 dat->fc_flock.l_start += fp->f_offset;
384 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK,
385 &dat->fc_flock, F_POSIX);
397 * The file control system call.
402 sys_fcntl(struct fcntl_args *uap)
409 dat.fc_fd = uap->arg;
412 dat.fc_cloexec = uap->arg;
415 dat.fc_flags = uap->arg;
418 dat.fc_owner = uap->arg;
423 error = copyin((caddr_t)uap->arg, &dat.fc_flock,
424 sizeof(struct flock));
430 error = kern_fcntl(uap->fd, uap->cmd, &dat, curthread->td_ucred);
435 uap->sysmsg_result = dat.fc_fd;
438 uap->sysmsg_result = dat.fc_cloexec;
441 uap->sysmsg_result = dat.fc_flags;
444 uap->sysmsg_result = dat.fc_owner;
446 error = copyout(&dat.fc_flock, (caddr_t)uap->arg,
447 sizeof(struct flock));
456 * Common code for dup, dup2, and fcntl(F_DUPFD).
458 * The type flag can be either DUP_FIXED or DUP_VARIABLE. DUP_FIXED tells
459 * kern_dup() to destructively dup over an existing file descriptor if new
460 * is already open. DUP_VARIABLE tells kern_dup() to find the lowest
461 * unused file descriptor that is greater than or equal to new.
466 kern_dup(enum dup_type type, int old, int new, int *res)
468 struct thread *td = curthread;
469 struct proc *p = td->td_proc;
470 struct filedesc *fdp = p->p_fd;
479 * Verify that we have a valid descriptor to dup from and
480 * possibly to dup to.
482 * NOTE: maxfilesperuser is not applicable to dup()
485 if (p->p_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX)
488 dtsize = (int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur;
489 if (dtsize > maxfilesperproc)
490 dtsize = maxfilesperproc;
491 if (dtsize < minfilesperproc)
492 dtsize = minfilesperproc;
494 if (new < 0 || new > dtsize)
497 spin_lock(&fdp->fd_spin);
498 if ((unsigned)old >= fdp->fd_nfiles || fdp->fd_files[old].fp == NULL) {
499 spin_unlock(&fdp->fd_spin);
502 if (type == DUP_FIXED && old == new) {
504 spin_unlock(&fdp->fd_spin);
507 fp = fdp->fd_files[old].fp;
508 oldflags = fdp->fd_files[old].fileflags;
509 fhold(fp); /* MPSAFE - can be called with a spinlock held */
512 * Allocate a new descriptor if DUP_VARIABLE, or expand the table
513 * if the requested descriptor is beyond the current table size.
515 * This can block. Retry if the source descriptor no longer matches
516 * or if our expectation in the expansion case races.
518 * If we are not expanding or allocating a new decriptor, then reset
519 * the target descriptor to a reserved state so we have a uniform
520 * setup for the next code block.
522 if (type == DUP_VARIABLE || new >= fdp->fd_nfiles) {
523 spin_unlock(&fdp->fd_spin);
524 error = fdalloc(p, new, &newfd);
525 spin_lock(&fdp->fd_spin);
527 spin_unlock(&fdp->fd_spin);
534 if (old >= fdp->fd_nfiles || fdp->fd_files[old].fp != fp) {
535 fsetfd_locked(fdp, NULL, newfd);
536 spin_unlock(&fdp->fd_spin);
541 * Check for expansion race
543 if (type != DUP_VARIABLE && new != newfd) {
544 fsetfd_locked(fdp, NULL, newfd);
545 spin_unlock(&fdp->fd_spin);
550 * Check for ripout, newfd reused old (this case probably
554 fsetfd_locked(fdp, NULL, newfd);
555 spin_unlock(&fdp->fd_spin);
562 if (fdp->fd_files[new].reserved) {
563 spin_unlock(&fdp->fd_spin);
565 kprintf("Warning: dup(): target descriptor %d is reserved, waiting for it to be resolved\n", new);
566 tsleep(fdp, 0, "fdres", hz);
571 * If the target descriptor was never allocated we have
572 * to allocate it. If it was we have to clean out the
573 * old descriptor. delfp inherits the ref from the
576 delfp = fdp->fd_files[new].fp;
577 fdp->fd_files[new].fp = NULL;
578 fdp->fd_files[new].reserved = 1;
580 fdreserve_locked(fdp, new, 1);
581 if (new > fdp->fd_lastfile)
582 fdp->fd_lastfile = new;
588 * NOTE: still holding an exclusive spinlock
592 * If a descriptor is being overwritten we may hve to tell
593 * fdfree() to sleep to ensure that all relevant process
594 * leaders can be traversed in closef().
596 if (delfp != NULL && p->p_fdtol != NULL) {
597 fdp->fd_holdleaderscount++;
602 KASSERT(delfp == NULL || type == DUP_FIXED,
603 ("dup() picked an open file"));
606 * Duplicate the source descriptor, update lastfile. If the new
607 * descriptor was not allocated and we aren't replacing an existing
608 * descriptor we have to mark the descriptor as being in use.
610 * The fd_files[] array inherits fp's hold reference.
612 fsetfd_locked(fdp, fp, new);
613 fdp->fd_files[new].fileflags = oldflags & ~UF_EXCLOSE;
614 spin_unlock(&fdp->fd_spin);
619 * If we dup'd over a valid file, we now own the reference to it
620 * and must dispose of it using closef() semantics (as if a
621 * close() were performed on it).
624 if (SLIST_FIRST(&delfp->f_klist))
625 knote_fdclose(delfp, fdp, new);
628 spin_lock(&fdp->fd_spin);
629 fdp->fd_holdleaderscount--;
630 if (fdp->fd_holdleaderscount == 0 &&
631 fdp->fd_holdleaderswakeup != 0) {
632 fdp->fd_holdleaderswakeup = 0;
633 spin_unlock(&fdp->fd_spin);
634 wakeup(&fdp->fd_holdleaderscount);
636 spin_unlock(&fdp->fd_spin);
644 * If sigio is on the list associated with a process or process group,
645 * disable signalling from the device, remove sigio from the list and
651 funsetown(struct sigio **sigiop)
657 if ((sigio = *sigiop) != NULL) {
658 lwkt_gettoken(&proc_token); /* protect sigio */
659 KKASSERT(sigiop == sigio->sio_myref);
662 lwkt_reltoken(&proc_token);
667 if (sigio->sio_pgid < 0) {
668 pgrp = sigio->sio_pgrp;
669 sigio->sio_pgrp = NULL;
670 lwkt_gettoken(&pgrp->pg_token);
671 SLIST_REMOVE(&pgrp->pg_sigiolst, sigio, sigio, sio_pgsigio);
672 lwkt_reltoken(&pgrp->pg_token);
674 } else /* if ((*sigiop)->sio_pgid > 0) */ {
676 sigio->sio_proc = NULL;
678 lwkt_gettoken(&p->p_token);
679 SLIST_REMOVE(&p->p_sigiolst, sigio, sigio, sio_pgsigio);
680 lwkt_reltoken(&p->p_token);
683 crfree(sigio->sio_ucred);
684 sigio->sio_ucred = NULL;
685 kfree(sigio, M_SIGIO);
689 * Free a list of sigio structures. Caller is responsible for ensuring
690 * that the list is MPSAFE.
695 funsetownlst(struct sigiolst *sigiolst)
699 while ((sigio = SLIST_FIRST(sigiolst)) != NULL)
700 funsetown(sigio->sio_myref);
704 * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg).
706 * After permission checking, add a sigio structure to the sigio list for
707 * the process or process group.
712 fsetown(pid_t pgid, struct sigio **sigiop)
714 struct proc *proc = NULL;
715 struct pgrp *pgrp = NULL;
732 * Policy - Don't allow a process to FSETOWN a process
733 * in another session.
735 * Remove this test to allow maximum flexibility or
736 * restrict FSETOWN to the current process or process
737 * group for maximum safety.
739 if (proc->p_session != curproc->p_session) {
743 } else /* if (pgid < 0) */ {
744 pgrp = pgfind(-pgid);
751 * Policy - Don't allow a process to FSETOWN a process
752 * in another session.
754 * Remove this test to allow maximum flexibility or
755 * restrict FSETOWN to the current process or process
756 * group for maximum safety.
758 if (pgrp->pg_session != curproc->p_session) {
763 sigio = kmalloc(sizeof(struct sigio), M_SIGIO, M_WAITOK | M_ZERO);
765 KKASSERT(pgrp == NULL);
766 lwkt_gettoken(&proc->p_token);
767 SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio);
768 sigio->sio_proc = proc;
769 lwkt_reltoken(&proc->p_token);
771 KKASSERT(proc == NULL);
772 lwkt_gettoken(&pgrp->pg_token);
773 SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio);
774 sigio->sio_pgrp = pgrp;
775 lwkt_reltoken(&pgrp->pg_token);
778 sigio->sio_pgid = pgid;
779 sigio->sio_ucred = crhold(curthread->td_ucred);
780 /* It would be convenient if p_ruid was in ucred. */
781 sigio->sio_ruid = sigio->sio_ucred->cr_ruid;
782 sigio->sio_myref = sigiop;
784 lwkt_gettoken(&proc_token);
788 lwkt_reltoken(&proc_token);
799 * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg).
804 fgetown(struct sigio **sigiop)
809 lwkt_gettoken(&proc_token);
811 own = (sigio != NULL ? sigio->sio_pgid : 0);
812 lwkt_reltoken(&proc_token);
818 * Close many file descriptors.
823 sys_closefrom(struct closefrom_args *uap)
825 return(kern_closefrom(uap->fd));
829 * Close all file descriptors greater then or equal to fd
834 kern_closefrom(int fd)
836 struct thread *td = curthread;
837 struct proc *p = td->td_proc;
838 struct filedesc *fdp;
847 * NOTE: This function will skip unassociated descriptors and
848 * reserved descriptors that have not yet been assigned.
849 * fd_lastfile can change as a side effect of kern_close().
851 spin_lock(&fdp->fd_spin);
852 while (fd <= fdp->fd_lastfile) {
853 if (fdp->fd_files[fd].fp != NULL) {
854 spin_unlock(&fdp->fd_spin);
855 /* ok if this races another close */
856 if (kern_close(fd) == EINTR)
858 spin_lock(&fdp->fd_spin);
862 spin_unlock(&fdp->fd_spin);
867 * Close a file descriptor.
872 sys_close(struct close_args *uap)
874 return(kern_close(uap->fd));
883 struct thread *td = curthread;
884 struct proc *p = td->td_proc;
885 struct filedesc *fdp;
893 spin_lock(&fdp->fd_spin);
894 if ((fp = funsetfd_locked(fdp, fd)) == NULL) {
895 spin_unlock(&fdp->fd_spin);
899 if (p->p_fdtol != NULL) {
901 * Ask fdfree() to sleep to ensure that all relevant
902 * process leaders can be traversed in closef().
904 fdp->fd_holdleaderscount++;
909 * we now hold the fp reference that used to be owned by the descriptor
912 spin_unlock(&fdp->fd_spin);
913 if (SLIST_FIRST(&fp->f_klist))
914 knote_fdclose(fp, fdp, fd);
915 error = closef(fp, p);
917 spin_lock(&fdp->fd_spin);
918 fdp->fd_holdleaderscount--;
919 if (fdp->fd_holdleaderscount == 0 &&
920 fdp->fd_holdleaderswakeup != 0) {
921 fdp->fd_holdleaderswakeup = 0;
922 spin_unlock(&fdp->fd_spin);
923 wakeup(&fdp->fd_holdleaderscount);
925 spin_unlock(&fdp->fd_spin);
932 * shutdown_args(int fd, int how)
935 kern_shutdown(int fd, int how)
937 struct thread *td = curthread;
938 struct proc *p = td->td_proc;
944 if ((fp = holdfp(p->p_fd, fd, -1)) == NULL)
946 error = fo_shutdown(fp, how);
956 sys_shutdown(struct shutdown_args *uap)
960 error = kern_shutdown(uap->s, uap->how);
969 kern_fstat(int fd, struct stat *ub)
971 struct thread *td = curthread;
972 struct proc *p = td->td_proc;
978 if ((fp = holdfp(p->p_fd, fd, -1)) == NULL)
980 error = fo_stat(fp, ub, td->td_ucred);
987 * Return status information about a file descriptor.
992 sys_fstat(struct fstat_args *uap)
997 error = kern_fstat(uap->fd, &st);
1000 error = copyout(&st, uap->sb, sizeof(st));
1005 * Return pathconf information about a file descriptor.
1010 sys_fpathconf(struct fpathconf_args *uap)
1012 struct thread *td = curthread;
1013 struct proc *p = td->td_proc;
1018 if ((fp = holdfp(p->p_fd, uap->fd, -1)) == NULL)
1021 switch (fp->f_type) {
1024 if (uap->name != _PC_PIPE_BUF) {
1027 uap->sysmsg_result = PIPE_BUF;
1033 vp = (struct vnode *)fp->f_data;
1034 error = VOP_PATHCONF(vp, uap->name, &uap->sysmsg_reg);
1044 static int fdexpand;
1045 SYSCTL_INT(_debug, OID_AUTO, fdexpand, CTLFLAG_RD, &fdexpand, 0,
1046 "Number of times a file table has been expanded");
1049 * Grow the file table so it can hold through descriptor (want).
1051 * The fdp's spinlock must be held exclusively on entry and may be held
1052 * exclusively on return. The spinlock may be cycled by the routine.
1057 fdgrow_locked(struct filedesc *fdp, int want)
1059 struct fdnode *newfiles;
1060 struct fdnode *oldfiles;
1063 nf = fdp->fd_nfiles;
1065 /* nf has to be of the form 2^n - 1 */
1067 } while (nf <= want);
1069 spin_unlock(&fdp->fd_spin);
1070 newfiles = kmalloc(nf * sizeof(struct fdnode), M_FILEDESC, M_WAITOK);
1071 spin_lock(&fdp->fd_spin);
1074 * We could have raced another extend while we were not holding
1077 if (fdp->fd_nfiles >= nf) {
1078 spin_unlock(&fdp->fd_spin);
1079 kfree(newfiles, M_FILEDESC);
1080 spin_lock(&fdp->fd_spin);
1084 * Copy the existing ofile and ofileflags arrays
1085 * and zero the new portion of each array.
1087 extra = nf - fdp->fd_nfiles;
1088 bcopy(fdp->fd_files, newfiles, fdp->fd_nfiles * sizeof(struct fdnode));
1089 bzero(&newfiles[fdp->fd_nfiles], extra * sizeof(struct fdnode));
1091 oldfiles = fdp->fd_files;
1092 fdp->fd_files = newfiles;
1093 fdp->fd_nfiles = nf;
1095 if (oldfiles != fdp->fd_builtin_files) {
1096 spin_unlock(&fdp->fd_spin);
1097 kfree(oldfiles, M_FILEDESC);
1098 spin_lock(&fdp->fd_spin);
1104 * Number of nodes in right subtree, including the root.
1107 right_subtree_size(int n)
1109 return (n ^ (n | (n + 1)));
1116 right_ancestor(int n)
1118 return (n | (n + 1));
1125 left_ancestor(int n)
1127 return ((n & (n + 1)) - 1);
1131 * Traverse the in-place binary tree buttom-up adjusting the allocation
1132 * count so scans can determine where free descriptors are located.
1134 * MPSAFE - caller must be holding an exclusive spinlock on fdp
1138 fdreserve_locked(struct filedesc *fdp, int fd, int incr)
1141 fdp->fd_files[fd].allocated += incr;
1142 KKASSERT(fdp->fd_files[fd].allocated >= 0);
1143 fd = left_ancestor(fd);
1148 * Reserve a file descriptor for the process. If no error occurs, the
1149 * caller MUST at some point call fsetfd() or assign a file pointer
1150 * or dispose of the reservation.
1155 fdalloc(struct proc *p, int want, int *result)
1157 struct filedesc *fdp = p->p_fd;
1158 struct uidinfo *uip;
1159 int fd, rsize, rsum, node, lim;
1162 * Check dtable size limit
1164 spin_lock(&p->p_limit->p_spin);
1165 if (p->p_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX)
1168 lim = (int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur;
1169 spin_unlock(&p->p_limit->p_spin);
1171 if (lim > maxfilesperproc)
1172 lim = maxfilesperproc;
1173 if (lim < minfilesperproc)
1174 lim = minfilesperproc;
1179 * Check that the user has not run out of descriptors (non-root only).
1180 * As a safety measure the dtable is allowed to have at least
1181 * minfilesperproc open fds regardless of the maxfilesperuser limit.
1183 if (p->p_ucred->cr_uid && fdp->fd_nfiles >= minfilesperproc) {
1184 uip = p->p_ucred->cr_uidinfo;
1185 if (uip->ui_openfiles > maxfilesperuser) {
1186 krateprintf(&krate_uidinfo,
1187 "Warning: user %d pid %d (%s) ran out of "
1188 "file descriptors (%d/%d)\n",
1189 p->p_ucred->cr_uid, (int)p->p_pid,
1191 uip->ui_openfiles, maxfilesperuser);
1197 * Grow the dtable if necessary
1199 spin_lock(&fdp->fd_spin);
1200 if (want >= fdp->fd_nfiles)
1201 fdgrow_locked(fdp, want);
1204 * Search for a free descriptor starting at the higher
1205 * of want or fd_freefile. If that fails, consider
1206 * expanding the ofile array.
1208 * NOTE! the 'allocated' field is a cumulative recursive allocation
1209 * count. If we happen to see a value of 0 then we can shortcut
1210 * our search. Otherwise we run through through the tree going
1211 * down branches we know have free descriptor(s) until we hit a
1212 * leaf node. The leaf node will be free but will not necessarily
1213 * have an allocated field of 0.
1216 /* move up the tree looking for a subtree with a free node */
1217 for (fd = max(want, fdp->fd_freefile); fd < min(fdp->fd_nfiles, lim);
1218 fd = right_ancestor(fd)) {
1219 if (fdp->fd_files[fd].allocated == 0)
1222 rsize = right_subtree_size(fd);
1223 if (fdp->fd_files[fd].allocated == rsize)
1224 continue; /* right subtree full */
1227 * Free fd is in the right subtree of the tree rooted at fd.
1228 * Call that subtree R. Look for the smallest (leftmost)
1229 * subtree of R with an unallocated fd: continue moving
1230 * down the left branch until encountering a full left
1231 * subtree, then move to the right.
1233 for (rsum = 0, rsize /= 2; rsize > 0; rsize /= 2) {
1235 rsum += fdp->fd_files[node].allocated;
1236 if (fdp->fd_files[fd].allocated == rsum + rsize) {
1237 fd = node; /* move to the right */
1238 if (fdp->fd_files[node].allocated == 0)
1247 * No space in current array. Expand?
1249 if (fdp->fd_nfiles >= lim) {
1250 spin_unlock(&fdp->fd_spin);
1253 fdgrow_locked(fdp, want);
1257 KKASSERT(fd < fdp->fd_nfiles);
1258 if (fd > fdp->fd_lastfile)
1259 fdp->fd_lastfile = fd;
1260 if (want <= fdp->fd_freefile)
1261 fdp->fd_freefile = fd;
1263 KKASSERT(fdp->fd_files[fd].fp == NULL);
1264 KKASSERT(fdp->fd_files[fd].reserved == 0);
1265 fdp->fd_files[fd].fileflags = 0;
1266 fdp->fd_files[fd].reserved = 1;
1267 fdreserve_locked(fdp, fd, 1);
1268 spin_unlock(&fdp->fd_spin);
1273 * Check to see whether n user file descriptors
1274 * are available to the process p.
1279 fdavail(struct proc *p, int n)
1281 struct filedesc *fdp = p->p_fd;
1282 struct fdnode *fdnode;
1285 spin_lock(&p->p_limit->p_spin);
1286 if (p->p_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX)
1289 lim = (int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur;
1290 spin_unlock(&p->p_limit->p_spin);
1292 if (lim > maxfilesperproc)
1293 lim = maxfilesperproc;
1294 if (lim < minfilesperproc)
1295 lim = minfilesperproc;
1297 spin_lock(&fdp->fd_spin);
1298 if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0) {
1299 spin_unlock(&fdp->fd_spin);
1302 last = min(fdp->fd_nfiles, lim);
1303 fdnode = &fdp->fd_files[fdp->fd_freefile];
1304 for (i = last - fdp->fd_freefile; --i >= 0; ++fdnode) {
1305 if (fdnode->fp == NULL && --n <= 0) {
1306 spin_unlock(&fdp->fd_spin);
1310 spin_unlock(&fdp->fd_spin);
1315 * Revoke open descriptors referencing (f_data, f_type)
1317 * Any revoke executed within a prison is only able to
1318 * revoke descriptors for processes within that prison.
1320 * Returns 0 on success or an error code.
1322 struct fdrevoke_info {
1332 static int fdrevoke_check_callback(struct file *fp, void *vinfo);
1333 static int fdrevoke_proc_callback(struct proc *p, void *vinfo);
1336 fdrevoke(void *f_data, short f_type, struct ucred *cred)
1338 struct fdrevoke_info info;
1341 bzero(&info, sizeof(info));
1345 error = falloc(NULL, &info.nfp, NULL);
1350 * Scan the file pointer table once. dups do not dup file pointers,
1351 * only descriptors, so there is no leak. Set FREVOKED on the fps
1354 allfiles_scan_exclusive(fdrevoke_check_callback, &info);
1357 * If any fps were marked track down the related descriptors
1358 * and close them. Any dup()s at this point will notice
1359 * the FREVOKED already set in the fp and do the right thing.
1361 * Any fps with non-zero msgcounts (aka sent over a unix-domain
1362 * socket) bumped the intransit counter and will require a
1363 * scan. Races against fps leaving the socket are closed by
1364 * the socket code checking for FREVOKED.
1367 allproc_scan(fdrevoke_proc_callback, &info);
1369 unp_revoke_gc(info.nfp);
1375 * Locate matching file pointers directly.
1377 * WARNING: allfiles_scan_exclusive() holds a spinlock through these calls!
1380 fdrevoke_check_callback(struct file *fp, void *vinfo)
1382 struct fdrevoke_info *info = vinfo;
1385 * File pointers already flagged for revokation are skipped.
1387 if (fp->f_flag & FREVOKED)
1391 * If revoking from a prison file pointers created outside of
1392 * that prison, or file pointers without creds, cannot be revoked.
1394 if (info->cred->cr_prison &&
1395 (fp->f_cred == NULL ||
1396 info->cred->cr_prison != fp->f_cred->cr_prison)) {
1401 * If the file pointer matches then mark it for revocation. The
1402 * flag is currently only used by unp_revoke_gc().
1404 * info->count is a heuristic and can race in a SMP environment.
1406 if (info->data == fp->f_data && info->type == fp->f_type) {
1407 atomic_set_int(&fp->f_flag, FREVOKED);
1408 info->count += fp->f_count;
1416 * Locate matching file pointers via process descriptor tables.
1419 fdrevoke_proc_callback(struct proc *p, void *vinfo)
1421 struct fdrevoke_info *info = vinfo;
1422 struct filedesc *fdp;
1426 if (p->p_stat == SIDL || p->p_stat == SZOMB)
1428 if (info->cred->cr_prison &&
1429 info->cred->cr_prison != p->p_ucred->cr_prison) {
1434 * If the controlling terminal of the process matches the
1435 * vnode being revoked we clear the controlling terminal.
1437 * The normal spec_close() may not catch this because it
1438 * uses curproc instead of p.
1440 if (p->p_session && info->type == DTYPE_VNODE &&
1441 info->data == p->p_session->s_ttyvp) {
1442 p->p_session->s_ttyvp = NULL;
1447 * Softref the fdp to prevent it from being destroyed
1449 spin_lock(&p->p_spin);
1450 if ((fdp = p->p_fd) == NULL) {
1451 spin_unlock(&p->p_spin);
1454 atomic_add_int(&fdp->fd_softrefs, 1);
1455 spin_unlock(&p->p_spin);
1458 * Locate and close any matching file descriptors.
1460 spin_lock(&fdp->fd_spin);
1461 for (n = 0; n < fdp->fd_nfiles; ++n) {
1462 if ((fp = fdp->fd_files[n].fp) == NULL)
1464 if (fp->f_flag & FREVOKED) {
1466 fdp->fd_files[n].fp = info->nfp;
1467 spin_unlock(&fdp->fd_spin);
1468 knote_fdclose(fp, fdp, n); /* XXX */
1470 spin_lock(&fdp->fd_spin);
1474 spin_unlock(&fdp->fd_spin);
1475 atomic_subtract_int(&fdp->fd_softrefs, 1);
1481 * Create a new open file structure and reserve a file decriptor
1482 * for the process that refers to it.
1484 * Root creds are checked using lp, or assumed if lp is NULL. If
1485 * resultfd is non-NULL then lp must also be non-NULL. No file
1486 * descriptor is reserved (and no process context is needed) if
1489 * A file pointer with a refcount of 1 is returned. Note that the
1490 * file pointer is NOT associated with the descriptor. If falloc
1491 * returns success, fsetfd() MUST be called to either associate the
1492 * file pointer or clear the reservation.
1497 falloc(struct lwp *lp, struct file **resultfp, int *resultfd)
1499 static struct timeval lastfail;
1502 struct ucred *cred = lp ? lp->lwp_thread->td_ucred : proc0.p_ucred;
1508 * Handle filetable full issues and root overfill.
1510 if (nfiles >= maxfiles - maxfilesrootres &&
1511 (cred->cr_ruid != 0 || nfiles >= maxfiles)) {
1512 if (ppsratecheck(&lastfail, &curfail, 1)) {
1513 kprintf("kern.maxfiles limit exceeded by uid %d, "
1514 "please see tuning(7).\n",
1522 * Allocate a new file descriptor.
1524 fp = kmalloc(sizeof(struct file), M_FILE, M_WAITOK | M_ZERO);
1525 spin_init(&fp->f_spin);
1526 SLIST_INIT(&fp->f_klist);
1528 fp->f_ops = &badfileops;
1531 spin_lock(&filehead_spin);
1533 LIST_INSERT_HEAD(&filehead, fp, f_list);
1534 spin_unlock(&filehead_spin);
1536 if ((error = fdalloc(lp->lwp_proc, 0, resultfd)) != 0) {
1549 * Check for races against a file descriptor by determining that the
1550 * file pointer is still associated with the specified file descriptor,
1551 * and a close is not currently in progress.
1556 checkfdclosed(struct filedesc *fdp, int fd, struct file *fp)
1560 spin_lock_shared(&fdp->fd_spin);
1561 if ((unsigned)fd >= fdp->fd_nfiles || fp != fdp->fd_files[fd].fp)
1565 spin_unlock_shared(&fdp->fd_spin);
1570 * Associate a file pointer with a previously reserved file descriptor.
1571 * This function always succeeds.
1573 * If fp is NULL, the file descriptor is returned to the pool.
1577 * MPSAFE (exclusive spinlock must be held on call)
1580 fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd)
1582 KKASSERT((unsigned)fd < fdp->fd_nfiles);
1583 KKASSERT(fdp->fd_files[fd].reserved != 0);
1586 fdp->fd_files[fd].fp = fp;
1587 fdp->fd_files[fd].reserved = 0;
1589 fdp->fd_files[fd].reserved = 0;
1590 fdreserve_locked(fdp, fd, -1);
1591 fdfixup_locked(fdp, fd);
1599 fsetfd(struct filedesc *fdp, struct file *fp, int fd)
1601 spin_lock(&fdp->fd_spin);
1602 fsetfd_locked(fdp, fp, fd);
1603 spin_unlock(&fdp->fd_spin);
1607 * MPSAFE (exclusive spinlock must be held on call)
1611 funsetfd_locked(struct filedesc *fdp, int fd)
1615 if ((unsigned)fd >= fdp->fd_nfiles)
1617 if ((fp = fdp->fd_files[fd].fp) == NULL)
1619 fdp->fd_files[fd].fp = NULL;
1620 fdp->fd_files[fd].fileflags = 0;
1622 fdreserve_locked(fdp, fd, -1);
1623 fdfixup_locked(fdp, fd);
1631 fgetfdflags(struct filedesc *fdp, int fd, int *flagsp)
1635 spin_lock(&fdp->fd_spin);
1636 if (((u_int)fd) >= fdp->fd_nfiles) {
1638 } else if (fdp->fd_files[fd].fp == NULL) {
1641 *flagsp = fdp->fd_files[fd].fileflags;
1644 spin_unlock(&fdp->fd_spin);
1652 fsetfdflags(struct filedesc *fdp, int fd, int add_flags)
1656 spin_lock(&fdp->fd_spin);
1657 if (((u_int)fd) >= fdp->fd_nfiles) {
1659 } else if (fdp->fd_files[fd].fp == NULL) {
1662 fdp->fd_files[fd].fileflags |= add_flags;
1665 spin_unlock(&fdp->fd_spin);
1673 fclrfdflags(struct filedesc *fdp, int fd, int rem_flags)
1677 spin_lock(&fdp->fd_spin);
1678 if (((u_int)fd) >= fdp->fd_nfiles) {
1680 } else if (fdp->fd_files[fd].fp == NULL) {
1683 fdp->fd_files[fd].fileflags &= ~rem_flags;
1686 spin_unlock(&fdp->fd_spin);
1691 * Set/Change/Clear the creds for a fp and synchronize the uidinfo.
1694 fsetcred(struct file *fp, struct ucred *ncr)
1697 struct uidinfo *uip;
1700 if (ocr == NULL || ncr == NULL || ocr->cr_uidinfo != ncr->cr_uidinfo) {
1702 uip = ocr->cr_uidinfo;
1703 atomic_add_int(&uip->ui_openfiles, -1);
1706 uip = ncr->cr_uidinfo;
1707 atomic_add_int(&uip->ui_openfiles, 1);
1718 * Free a file descriptor.
1722 ffree(struct file *fp)
1724 KASSERT((fp->f_count == 0), ("ffree: fp_fcount not 0!"));
1725 spin_lock(&filehead_spin);
1726 LIST_REMOVE(fp, f_list);
1728 spin_unlock(&filehead_spin);
1730 if (fp->f_nchandle.ncp)
1731 cache_drop(&fp->f_nchandle);
1736 * called from init_main, initialize filedesc0 for proc0.
1739 fdinit_bootstrap(struct proc *p0, struct filedesc *fdp0, int cmask)
1743 fdp0->fd_refcnt = 1;
1744 fdp0->fd_cmask = cmask;
1745 fdp0->fd_files = fdp0->fd_builtin_files;
1746 fdp0->fd_nfiles = NDFILE;
1747 fdp0->fd_lastfile = -1;
1748 spin_init(&fdp0->fd_spin);
1752 * Build a new filedesc structure.
1757 fdinit(struct proc *p)
1759 struct filedesc *newfdp;
1760 struct filedesc *fdp = p->p_fd;
1762 newfdp = kmalloc(sizeof(struct filedesc), M_FILEDESC, M_WAITOK|M_ZERO);
1763 spin_lock(&fdp->fd_spin);
1765 newfdp->fd_cdir = fdp->fd_cdir;
1766 vref(newfdp->fd_cdir);
1767 cache_copy(&fdp->fd_ncdir, &newfdp->fd_ncdir);
1771 * rdir may not be set in e.g. proc0 or anything vm_fork'd off of
1772 * proc0, but should unconditionally exist in other processes.
1775 newfdp->fd_rdir = fdp->fd_rdir;
1776 vref(newfdp->fd_rdir);
1777 cache_copy(&fdp->fd_nrdir, &newfdp->fd_nrdir);
1780 newfdp->fd_jdir = fdp->fd_jdir;
1781 vref(newfdp->fd_jdir);
1782 cache_copy(&fdp->fd_njdir, &newfdp->fd_njdir);
1784 spin_unlock(&fdp->fd_spin);
1786 /* Create the file descriptor table. */
1787 newfdp->fd_refcnt = 1;
1788 newfdp->fd_cmask = cmask;
1789 newfdp->fd_files = newfdp->fd_builtin_files;
1790 newfdp->fd_nfiles = NDFILE;
1791 newfdp->fd_lastfile = -1;
1792 spin_init(&newfdp->fd_spin);
1798 * Share a filedesc structure.
1803 fdshare(struct proc *p)
1805 struct filedesc *fdp;
1808 spin_lock(&fdp->fd_spin);
1810 spin_unlock(&fdp->fd_spin);
1815 * Copy a filedesc structure.
1820 fdcopy(struct proc *p, struct filedesc **fpp)
1822 struct filedesc *fdp = p->p_fd;
1823 struct filedesc *newfdp;
1824 struct fdnode *fdnode;
1829 * Certain daemons might not have file descriptors.
1835 * Allocate the new filedesc and fd_files[] array. This can race
1836 * with operations by other threads on the fdp so we have to be
1839 newfdp = kmalloc(sizeof(struct filedesc),
1840 M_FILEDESC, M_WAITOK | M_ZERO | M_NULLOK);
1841 if (newfdp == NULL) {
1846 spin_lock(&fdp->fd_spin);
1847 if (fdp->fd_lastfile < NDFILE) {
1848 newfdp->fd_files = newfdp->fd_builtin_files;
1852 * We have to allocate (N^2-1) entries for our in-place
1853 * binary tree. Allow the table to shrink.
1857 while (ni > fdp->fd_lastfile && ni > NDFILE) {
1861 spin_unlock(&fdp->fd_spin);
1862 newfdp->fd_files = kmalloc(i * sizeof(struct fdnode),
1863 M_FILEDESC, M_WAITOK | M_ZERO);
1866 * Check for race, retry
1868 spin_lock(&fdp->fd_spin);
1869 if (i <= fdp->fd_lastfile) {
1870 spin_unlock(&fdp->fd_spin);
1871 kfree(newfdp->fd_files, M_FILEDESC);
1877 * Dup the remaining fields. vref() and cache_hold() can be
1878 * safely called while holding the read spinlock on fdp.
1880 * The read spinlock on fdp is still being held.
1882 * NOTE: vref and cache_hold calls for the case where the vnode
1883 * or cache entry already has at least one ref may be called
1884 * while holding spin locks.
1886 if ((newfdp->fd_cdir = fdp->fd_cdir) != NULL) {
1887 vref(newfdp->fd_cdir);
1888 cache_copy(&fdp->fd_ncdir, &newfdp->fd_ncdir);
1891 * We must check for fd_rdir here, at least for now because
1892 * the init process is created before we have access to the
1893 * rootvode to take a reference to it.
1895 if ((newfdp->fd_rdir = fdp->fd_rdir) != NULL) {
1896 vref(newfdp->fd_rdir);
1897 cache_copy(&fdp->fd_nrdir, &newfdp->fd_nrdir);
1899 if ((newfdp->fd_jdir = fdp->fd_jdir) != NULL) {
1900 vref(newfdp->fd_jdir);
1901 cache_copy(&fdp->fd_njdir, &newfdp->fd_njdir);
1903 newfdp->fd_refcnt = 1;
1904 newfdp->fd_nfiles = i;
1905 newfdp->fd_lastfile = fdp->fd_lastfile;
1906 newfdp->fd_freefile = fdp->fd_freefile;
1907 newfdp->fd_cmask = fdp->fd_cmask;
1908 spin_init(&newfdp->fd_spin);
1911 * Copy the descriptor table through (i). This also copies the
1912 * allocation state. Then go through and ref the file pointers
1913 * and clean up any KQ descriptors.
1915 * kq descriptors cannot be copied. Since we haven't ref'd the
1916 * copied files yet we can ignore the return value from funsetfd().
1918 * The read spinlock on fdp is still being held.
1920 bcopy(fdp->fd_files, newfdp->fd_files, i * sizeof(struct fdnode));
1921 for (i = 0 ; i < newfdp->fd_nfiles; ++i) {
1922 fdnode = &newfdp->fd_files[i];
1923 if (fdnode->reserved) {
1924 fdreserve_locked(newfdp, i, -1);
1925 fdnode->reserved = 0;
1926 fdfixup_locked(newfdp, i);
1927 } else if (fdnode->fp) {
1928 if (fdnode->fp->f_type == DTYPE_KQUEUE) {
1929 (void)funsetfd_locked(newfdp, i);
1935 spin_unlock(&fdp->fd_spin);
1941 * Release a filedesc structure.
1943 * NOT MPSAFE (MPSAFE for refs > 1, but the final cleanup code is not MPSAFE)
1946 fdfree(struct proc *p, struct filedesc *repl)
1948 struct filedesc *fdp;
1949 struct fdnode *fdnode;
1951 struct filedesc_to_leader *fdtol;
1957 * Certain daemons might not have file descriptors.
1966 * Severe messing around to follow.
1968 spin_lock(&fdp->fd_spin);
1970 /* Check for special need to clear POSIX style locks */
1972 if (fdtol != NULL) {
1973 KASSERT(fdtol->fdl_refcount > 0,
1974 ("filedesc_to_refcount botch: fdl_refcount=%d",
1975 fdtol->fdl_refcount));
1976 if (fdtol->fdl_refcount == 1 &&
1977 (p->p_leader->p_flags & P_ADVLOCK) != 0) {
1978 for (i = 0; i <= fdp->fd_lastfile; ++i) {
1979 fdnode = &fdp->fd_files[i];
1980 if (fdnode->fp == NULL ||
1981 fdnode->fp->f_type != DTYPE_VNODE) {
1986 spin_unlock(&fdp->fd_spin);
1988 lf.l_whence = SEEK_SET;
1991 lf.l_type = F_UNLCK;
1992 vp = (struct vnode *)fp->f_data;
1993 (void) VOP_ADVLOCK(vp,
1994 (caddr_t)p->p_leader,
1999 spin_lock(&fdp->fd_spin);
2003 if (fdtol->fdl_refcount == 1) {
2004 if (fdp->fd_holdleaderscount > 0 &&
2005 (p->p_leader->p_flags & P_ADVLOCK) != 0) {
2007 * close() or do_dup() has cleared a reference
2008 * in a shared file descriptor table.
2010 fdp->fd_holdleaderswakeup = 1;
2011 ssleep(&fdp->fd_holdleaderscount,
2012 &fdp->fd_spin, 0, "fdlhold", 0);
2015 if (fdtol->fdl_holdcount > 0) {
2017 * Ensure that fdtol->fdl_leader
2018 * remains valid in closef().
2020 fdtol->fdl_wakeup = 1;
2021 ssleep(fdtol, &fdp->fd_spin, 0, "fdlhold", 0);
2025 fdtol->fdl_refcount--;
2026 if (fdtol->fdl_refcount == 0 &&
2027 fdtol->fdl_holdcount == 0) {
2028 fdtol->fdl_next->fdl_prev = fdtol->fdl_prev;
2029 fdtol->fdl_prev->fdl_next = fdtol->fdl_next;
2034 if (fdtol != NULL) {
2035 spin_unlock(&fdp->fd_spin);
2036 kfree(fdtol, M_FILEDESC_TO_LEADER);
2037 spin_lock(&fdp->fd_spin);
2040 if (--fdp->fd_refcnt > 0) {
2041 spin_unlock(&fdp->fd_spin);
2042 spin_lock(&p->p_spin);
2044 spin_unlock(&p->p_spin);
2049 * Even though we are the last reference to the structure allproc
2050 * scans may still reference the structure. Maintain proper
2051 * locks until we can replace p->p_fd.
2053 * Also note that kqueue's closef still needs to reference the
2054 * fdp via p->p_fd, so we have to close the descriptors before
2055 * we replace p->p_fd.
2057 for (i = 0; i <= fdp->fd_lastfile; ++i) {
2058 if (fdp->fd_files[i].fp) {
2059 fp = funsetfd_locked(fdp, i);
2061 spin_unlock(&fdp->fd_spin);
2062 if (SLIST_FIRST(&fp->f_klist))
2063 knote_fdclose(fp, fdp, i);
2065 spin_lock(&fdp->fd_spin);
2069 spin_unlock(&fdp->fd_spin);
2072 * Interlock against an allproc scan operations (typically frevoke).
2074 spin_lock(&p->p_spin);
2076 spin_unlock(&p->p_spin);
2079 * Wait for any softrefs to go away. This race rarely occurs so
2080 * we can use a non-critical-path style poll/sleep loop. The
2081 * race only occurs against allproc scans.
2083 * No new softrefs can occur with the fdp disconnected from the
2086 if (fdp->fd_softrefs) {
2087 kprintf("pid %d: Warning, fdp race avoided\n", p->p_pid);
2088 while (fdp->fd_softrefs)
2089 tsleep(&fdp->fd_softrefs, 0, "fdsoft", 1);
2092 if (fdp->fd_files != fdp->fd_builtin_files)
2093 kfree(fdp->fd_files, M_FILEDESC);
2095 cache_drop(&fdp->fd_ncdir);
2096 vrele(fdp->fd_cdir);
2099 cache_drop(&fdp->fd_nrdir);
2100 vrele(fdp->fd_rdir);
2103 cache_drop(&fdp->fd_njdir);
2104 vrele(fdp->fd_jdir);
2106 kfree(fdp, M_FILEDESC);
2110 * Retrieve and reference the file pointer associated with a descriptor.
2115 holdfp(struct filedesc *fdp, int fd, int flag)
2119 spin_lock_shared(&fdp->fd_spin);
2120 if (((u_int)fd) >= fdp->fd_nfiles) {
2124 if ((fp = fdp->fd_files[fd].fp) == NULL)
2126 if ((fp->f_flag & flag) == 0 && flag != -1) {
2132 spin_unlock_shared(&fdp->fd_spin);
2137 * holdsock() - load the struct file pointer associated
2138 * with a socket into *fpp. If an error occurs, non-zero
2139 * will be returned and *fpp will be set to NULL.
2144 holdsock(struct filedesc *fdp, int fd, struct file **fpp)
2149 spin_lock_shared(&fdp->fd_spin);
2150 if ((unsigned)fd >= fdp->fd_nfiles) {
2155 if ((fp = fdp->fd_files[fd].fp) == NULL) {
2159 if (fp->f_type != DTYPE_SOCKET) {
2166 spin_unlock_shared(&fdp->fd_spin);
2172 * Convert a user file descriptor to a held file pointer.
2177 holdvnode(struct filedesc *fdp, int fd, struct file **fpp)
2182 spin_lock_shared(&fdp->fd_spin);
2183 if ((unsigned)fd >= fdp->fd_nfiles) {
2188 if ((fp = fdp->fd_files[fd].fp) == NULL) {
2192 if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO) {
2200 spin_unlock_shared(&fdp->fd_spin);
2206 * For setugid programs, we don't want to people to use that setugidness
2207 * to generate error messages which write to a file which otherwise would
2208 * otherwise be off-limits to the process.
2210 * This is a gross hack to plug the hole. A better solution would involve
2211 * a special vop or other form of generalized access control mechanism. We
2212 * go ahead and just reject all procfs file systems accesses as dangerous.
2214 * Since setugidsafety calls this only for fd 0, 1 and 2, this check is
2215 * sufficient. We also don't for check setugidness since we know we are.
2218 is_unsafe(struct file *fp)
2220 if (fp->f_type == DTYPE_VNODE &&
2221 ((struct vnode *)(fp->f_data))->v_tag == VT_PROCFS)
2227 * Make this setguid thing safe, if at all possible.
2229 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose()
2232 setugidsafety(struct proc *p)
2234 struct filedesc *fdp = p->p_fd;
2237 /* Certain daemons might not have file descriptors. */
2242 * note: fdp->fd_files may be reallocated out from under us while
2243 * we are blocked in a close. Be careful!
2245 for (i = 0; i <= fdp->fd_lastfile; i++) {
2248 if (fdp->fd_files[i].fp && is_unsafe(fdp->fd_files[i].fp)) {
2252 * NULL-out descriptor prior to close to avoid
2253 * a race while close blocks.
2255 if ((fp = funsetfd_locked(fdp, i)) != NULL) {
2256 knote_fdclose(fp, fdp, i);
2264 * Close any files on exec?
2266 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose()
2269 fdcloseexec(struct proc *p)
2271 struct filedesc *fdp = p->p_fd;
2274 /* Certain daemons might not have file descriptors. */
2279 * We cannot cache fd_files since operations may block and rip
2280 * them out from under us.
2282 for (i = 0; i <= fdp->fd_lastfile; i++) {
2283 if (fdp->fd_files[i].fp != NULL &&
2284 (fdp->fd_files[i].fileflags & UF_EXCLOSE)) {
2288 * NULL-out descriptor prior to close to avoid
2289 * a race while close blocks.
2291 if ((fp = funsetfd_locked(fdp, i)) != NULL) {
2292 knote_fdclose(fp, fdp, i);
2300 * It is unsafe for set[ug]id processes to be started with file
2301 * descriptors 0..2 closed, as these descriptors are given implicit
2302 * significance in the Standard C library. fdcheckstd() will create a
2303 * descriptor referencing /dev/null for each of stdin, stdout, and
2304 * stderr that is not already open.
2306 * NOT MPSAFE - calls falloc, vn_open, etc
2309 fdcheckstd(struct lwp *lp)
2311 struct nlookupdata nd;
2312 struct filedesc *fdp;
2315 int i, error, flags, devnull;
2317 fdp = lp->lwp_proc->p_fd;
2322 for (i = 0; i < 3; i++) {
2323 if (fdp->fd_files[i].fp != NULL)
2326 if ((error = falloc(lp, &fp, &devnull)) != 0)
2329 error = nlookup_init(&nd, "/dev/null", UIO_SYSSPACE,
2330 NLC_FOLLOW|NLC_LOCKVP);
2331 flags = FREAD | FWRITE;
2333 error = vn_open(&nd, fp, flags, 0);
2335 fsetfd(fdp, fp, devnull);
2337 fsetfd(fdp, NULL, devnull);
2342 KKASSERT(i == devnull);
2344 error = kern_dup(DUP_FIXED, devnull, i, &retval);
2353 * Internal form of close.
2354 * Decrement reference count on file structure.
2355 * Note: td and/or p may be NULL when closing a file
2356 * that was being passed in a message.
2358 * MPALMOSTSAFE - acquires mplock for VOP operations
2361 closef(struct file *fp, struct proc *p)
2365 struct filedesc_to_leader *fdtol;
2371 * POSIX record locking dictates that any close releases ALL
2372 * locks owned by this process. This is handled by setting
2373 * a flag in the unlock to free ONLY locks obeying POSIX
2374 * semantics, and not to free BSD-style file locks.
2375 * If the descriptor was in a message, POSIX-style locks
2376 * aren't passed with the descriptor.
2378 if (p != NULL && fp->f_type == DTYPE_VNODE &&
2379 (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS)
2381 if ((p->p_leader->p_flags & P_ADVLOCK) != 0) {
2382 lf.l_whence = SEEK_SET;
2385 lf.l_type = F_UNLCK;
2386 vp = (struct vnode *)fp->f_data;
2387 (void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
2391 if (fdtol != NULL) {
2392 lwkt_gettoken(&p->p_token);
2394 * Handle special case where file descriptor table
2395 * is shared between multiple process leaders.
2397 for (fdtol = fdtol->fdl_next;
2398 fdtol != p->p_fdtol;
2399 fdtol = fdtol->fdl_next) {
2400 if ((fdtol->fdl_leader->p_flags &
2403 fdtol->fdl_holdcount++;
2404 lf.l_whence = SEEK_SET;
2407 lf.l_type = F_UNLCK;
2408 vp = (struct vnode *)fp->f_data;
2409 (void) VOP_ADVLOCK(vp,
2410 (caddr_t)fdtol->fdl_leader,
2411 F_UNLCK, &lf, F_POSIX);
2412 fdtol->fdl_holdcount--;
2413 if (fdtol->fdl_holdcount == 0 &&
2414 fdtol->fdl_wakeup != 0) {
2415 fdtol->fdl_wakeup = 0;
2419 lwkt_reltoken(&p->p_token);
2428 * fhold() can only be called if f_count is already at least 1 (i.e. the
2429 * caller of fhold() already has a reference to the file pointer in some
2432 * f_count is not spin-locked. Instead, atomic ops are used for
2433 * incrementing, decrementing, and handling the 1->0 transition.
2436 fhold(struct file *fp)
2438 atomic_add_int(&fp->f_count, 1);
2442 * fdrop() - drop a reference to a descriptor
2444 * MPALMOSTSAFE - acquires mplock for final close sequence
2447 fdrop(struct file *fp)
2454 * A combined fetch and subtract is needed to properly detect
2455 * 1->0 transitions, otherwise two cpus dropping from a ref
2456 * count of 2 might both try to run the 1->0 code.
2458 if (atomic_fetchadd_int(&fp->f_count, -1) > 1)
2461 KKASSERT(SLIST_FIRST(&fp->f_klist) == NULL);
2464 * The last reference has gone away, we own the fp structure free
2467 if (fp->f_count < 0)
2468 panic("fdrop: count < 0");
2469 if ((fp->f_flag & FHASLOCK) && fp->f_type == DTYPE_VNODE &&
2470 (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS)
2472 lf.l_whence = SEEK_SET;
2475 lf.l_type = F_UNLCK;
2476 vp = (struct vnode *)fp->f_data;
2477 (void) VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, 0);
2479 if (fp->f_ops != &badfileops)
2480 error = fo_close(fp);
2488 * Apply an advisory lock on a file descriptor.
2490 * Just attempt to get a record lock of the requested type on
2491 * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0).
2496 sys_flock(struct flock_args *uap)
2498 struct proc *p = curproc;
2504 if ((fp = holdfp(p->p_fd, uap->fd, -1)) == NULL)
2506 if (fp->f_type != DTYPE_VNODE) {
2510 vp = (struct vnode *)fp->f_data;
2511 lf.l_whence = SEEK_SET;
2514 if (uap->how & LOCK_UN) {
2515 lf.l_type = F_UNLCK;
2516 fp->f_flag &= ~FHASLOCK;
2517 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, 0);
2520 if (uap->how & LOCK_EX)
2521 lf.l_type = F_WRLCK;
2522 else if (uap->how & LOCK_SH)
2523 lf.l_type = F_RDLCK;
2528 fp->f_flag |= FHASLOCK;
2529 if (uap->how & LOCK_NB)
2530 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, 0);
2532 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, F_WAIT);
2539 * File Descriptor pseudo-device driver (/dev/fd/).
2541 * Opening minor device N dup()s the file (if any) connected to file
2542 * descriptor N belonging to the calling process. Note that this driver
2543 * consists of only the ``open()'' routine, because all subsequent
2544 * references to this file will be direct to the other driver.
2547 fdopen(struct dev_open_args *ap)
2549 thread_t td = curthread;
2551 KKASSERT(td->td_lwp != NULL);
2554 * XXX Kludge: set curlwp->lwp_dupfd to contain the value of the
2555 * the file descriptor being sought for duplication. The error
2556 * return ensures that the vnode for this device will be released
2557 * by vn_open. Open will detect this special error and take the
2558 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN
2559 * will simply report the error.
2561 td->td_lwp->lwp_dupfd = minor(ap->a_head.a_dev);
2566 * The caller has reserved the file descriptor dfd for us. On success we
2567 * must fsetfd() it. On failure the caller will clean it up.
2572 dupfdopen(struct filedesc *fdp, int dfd, int sfd, int mode, int error)
2578 if ((wfp = holdfp(fdp, sfd, -1)) == NULL)
2582 * Close a revoke/dup race. Duping a descriptor marked as revoked
2583 * will dup a dummy descriptor instead of the real one.
2585 if (wfp->f_flag & FREVOKED) {
2586 kprintf("Warning: attempt to dup() a revoked descriptor\n");
2589 werror = falloc(NULL, &wfp, NULL);
2595 * There are two cases of interest here.
2597 * For ENODEV simply dup sfd to file descriptor dfd and return.
2599 * For ENXIO steal away the file structure from sfd and store it
2600 * dfd. sfd is effectively closed by this operation.
2602 * Any other error code is just returned.
2607 * Check that the mode the file is being opened for is a
2608 * subset of the mode of the existing descriptor.
2610 if (((mode & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag) {
2614 spin_lock(&fdp->fd_spin);
2615 fdp->fd_files[dfd].fileflags = fdp->fd_files[sfd].fileflags;
2616 fsetfd_locked(fdp, wfp, dfd);
2617 spin_unlock(&fdp->fd_spin);
2622 * Steal away the file pointer from dfd, and stuff it into indx.
2624 spin_lock(&fdp->fd_spin);
2625 fdp->fd_files[dfd].fileflags = fdp->fd_files[sfd].fileflags;
2626 fsetfd(fdp, wfp, dfd);
2627 if ((xfp = funsetfd_locked(fdp, sfd)) != NULL) {
2628 spin_unlock(&fdp->fd_spin);
2631 spin_unlock(&fdp->fd_spin);
2643 * NOT MPSAFE - I think these refer to a common file descriptor table
2644 * and we need to spinlock that to link fdtol in.
2646 struct filedesc_to_leader *
2647 filedesc_to_leader_alloc(struct filedesc_to_leader *old,
2648 struct proc *leader)
2650 struct filedesc_to_leader *fdtol;
2652 fdtol = kmalloc(sizeof(struct filedesc_to_leader),
2653 M_FILEDESC_TO_LEADER, M_WAITOK | M_ZERO);
2654 fdtol->fdl_refcount = 1;
2655 fdtol->fdl_holdcount = 0;
2656 fdtol->fdl_wakeup = 0;
2657 fdtol->fdl_leader = leader;
2659 fdtol->fdl_next = old->fdl_next;
2660 fdtol->fdl_prev = old;
2661 old->fdl_next = fdtol;
2662 fdtol->fdl_next->fdl_prev = fdtol;
2664 fdtol->fdl_next = fdtol;
2665 fdtol->fdl_prev = fdtol;
2671 * Scan all file pointers in the system. The callback is made with
2672 * the master list spinlock held exclusively.
2677 allfiles_scan_exclusive(int (*callback)(struct file *, void *), void *data)
2682 spin_lock(&filehead_spin);
2683 LIST_FOREACH(fp, &filehead, f_list) {
2684 res = callback(fp, data);
2688 spin_unlock(&filehead_spin);
2692 * Get file structures.
2694 * NOT MPSAFE - process list scan, SYSCTL_OUT (probably not mpsafe)
2697 struct sysctl_kern_file_info {
2700 struct sysctl_req *req;
2703 static int sysctl_kern_file_callback(struct proc *p, void *data);
2706 sysctl_kern_file(SYSCTL_HANDLER_ARGS)
2708 struct sysctl_kern_file_info info;
2711 * Note: because the number of file descriptors is calculated
2712 * in different ways for sizing vs returning the data,
2713 * there is information leakage from the first loop. However,
2714 * it is of a similar order of magnitude to the leakage from
2715 * global system statistics such as kern.openfiles.
2717 * When just doing a count, note that we cannot just count
2718 * the elements and add f_count via the filehead list because
2719 * threaded processes share their descriptor table and f_count might
2720 * still be '1' in that case.
2722 * Since the SYSCTL op can block, we must hold the process to
2723 * prevent it being ripped out from under us either in the
2724 * file descriptor loop or in the greater LIST_FOREACH. The
2725 * process may be in varying states of disrepair. If the process
2726 * is in SZOMB we may have caught it just as it is being removed
2727 * from the allproc list, we must skip it in that case to maintain
2728 * an unbroken chain through the allproc list.
2733 allproc_scan(sysctl_kern_file_callback, &info);
2736 * When just calculating the size, overestimate a bit to try to
2737 * prevent system activity from causing the buffer-fill call
2740 if (req->oldptr == NULL) {
2741 info.count = (info.count + 16) + (info.count / 10);
2742 info.error = SYSCTL_OUT(req, NULL,
2743 info.count * sizeof(struct kinfo_file));
2745 return (info.error);
2749 sysctl_kern_file_callback(struct proc *p, void *data)
2751 struct sysctl_kern_file_info *info = data;
2752 struct kinfo_file kf;
2753 struct filedesc *fdp;
2758 if (p->p_stat == SIDL || p->p_stat == SZOMB)
2760 if (!PRISON_CHECK(info->req->td->td_ucred, p->p_ucred) != 0)
2764 * Softref the fdp to prevent it from being destroyed
2766 spin_lock(&p->p_spin);
2767 if ((fdp = p->p_fd) == NULL) {
2768 spin_unlock(&p->p_spin);
2771 atomic_add_int(&fdp->fd_softrefs, 1);
2772 spin_unlock(&p->p_spin);
2775 * The fdp's own spinlock prevents the contents from being
2778 spin_lock_shared(&fdp->fd_spin);
2779 for (n = 0; n < fdp->fd_nfiles; ++n) {
2780 if ((fp = fdp->fd_files[n].fp) == NULL)
2782 if (info->req->oldptr == NULL) {
2785 uid = p->p_ucred ? p->p_ucred->cr_uid : -1;
2786 kcore_make_file(&kf, fp, p->p_pid, uid, n);
2787 spin_unlock_shared(&fdp->fd_spin);
2788 info->error = SYSCTL_OUT(info->req, &kf, sizeof(kf));
2789 spin_lock_shared(&fdp->fd_spin);
2794 spin_unlock_shared(&fdp->fd_spin);
2795 atomic_subtract_int(&fdp->fd_softrefs, 1);
2801 SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD,
2802 0, 0, sysctl_kern_file, "S,file", "Entire file table");
2804 SYSCTL_INT(_kern, OID_AUTO, minfilesperproc, CTLFLAG_RW,
2805 &minfilesperproc, 0, "Minimum files allowed open per process");
2806 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW,
2807 &maxfilesperproc, 0, "Maximum files allowed open per process");
2808 SYSCTL_INT(_kern, OID_AUTO, maxfilesperuser, CTLFLAG_RW,
2809 &maxfilesperuser, 0, "Maximum files allowed open per user");
2811 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW,
2812 &maxfiles, 0, "Maximum number of files");
2814 SYSCTL_INT(_kern, OID_AUTO, maxfilesrootres, CTLFLAG_RW,
2815 &maxfilesrootres, 0, "Descriptors reserved for root use");
2817 SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD,
2818 &nfiles, 0, "System-wide number of open files");
2821 fildesc_drvinit(void *unused)
2825 for (fd = 0; fd < NUMFDESC; fd++) {
2826 make_dev(&fildesc_ops, fd,
2827 UID_BIN, GID_BIN, 0666, "fd/%d", fd);
2830 make_dev(&fildesc_ops, 0, UID_ROOT, GID_WHEEL, 0666, "stdin");
2831 make_dev(&fildesc_ops, 1, UID_ROOT, GID_WHEEL, 0666, "stdout");
2832 make_dev(&fildesc_ops, 2, UID_ROOT, GID_WHEEL, 0666, "stderr");
2838 struct fileops badfileops = {
2839 .fo_read = badfo_readwrite,
2840 .fo_write = badfo_readwrite,
2841 .fo_ioctl = badfo_ioctl,
2842 .fo_kqfilter = badfo_kqfilter,
2843 .fo_stat = badfo_stat,
2844 .fo_close = badfo_close,
2845 .fo_shutdown = badfo_shutdown
2859 badfo_ioctl(struct file *fp, u_long com, caddr_t data,
2860 struct ucred *cred, struct sysmsg *msgv)
2866 * Must return an error to prevent registration, typically
2867 * due to a revoked descriptor (file_filtops assigned).
2870 badfo_kqfilter(struct file *fp, struct knote *kn)
2872 return (EOPNOTSUPP);
2879 badfo_stat(struct file *fp, struct stat *sb, struct ucred *cred)
2888 badfo_close(struct file *fp)
2897 badfo_shutdown(struct file *fp, int how)
2906 nofo_shutdown(struct file *fp, int how)
2908 return (EOPNOTSUPP);
2911 SYSINIT(fildescdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,
2912 fildesc_drvinit,NULL)