2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department, and code derived from software contributed to
9 * Berkeley by William Jolitz.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * from: Utah $Hdr: mem.c 1.13 89/10/08$
36 * from: @(#)mem.c 7.2 (Berkeley) 5/9/91
37 * $FreeBSD: src/sys/i386/i386/mem.c,v 1.79.2.9 2003/01/04 22:58:01 njl Exp $
44 #include <sys/param.h>
45 #include <sys/systm.h>
48 #include <sys/fcntl.h>
49 #include <sys/filio.h>
50 #include <sys/interrupt.h>
51 #include <sys/kernel.h>
52 #include <sys/malloc.h>
53 #include <sys/memrange.h>
56 #include <sys/queue.h>
57 #include <sys/random.h>
58 #include <sys/signalvar.h>
60 #include <sys/vnode.h>
61 #include <sys/sysctl.h>
63 #include <sys/signal2.h>
64 #include <sys/spinlock2.h>
68 #include <vm/vm_map.h>
69 #include <vm/vm_extern.h>
72 static d_open_t mmopen;
73 static d_close_t mmclose;
74 static d_read_t mmread;
75 static d_write_t mmwrite;
76 static d_ioctl_t mmioctl;
78 static d_mmap_t memmmap;
80 static d_kqfilter_t mmkqfilter;
81 static int memuksmap(vm_map_backing_t ba, int op, cdev_t dev, vm_page_t fake);
84 static struct dev_ops mem_ops = {
85 { "mem", 0, D_MPSAFE | D_QUICK },
91 .d_kqfilter = mmkqfilter,
98 static struct dev_ops mem_ops_mem = {
99 { "mem", 0, D_MEM | D_MPSAFE | D_QUICK },
105 .d_kqfilter = mmkqfilter,
109 .d_uksmap = memuksmap
112 static struct dev_ops mem_ops_noq = {
113 { "mem", 0, D_MPSAFE },
119 .d_kqfilter = mmkqfilter,
123 .d_uksmap = memuksmap
126 static int rand_bolt;
128 static cdev_t zerodev = NULL;
129 static struct lock mem_lock = LOCK_INITIALIZER("memlk", 0, 0);
131 MALLOC_DEFINE(M_MEMDESC, "memdesc", "memory range descriptors");
132 static int mem_ioctl (cdev_t, u_long, caddr_t, int, struct ucred *);
133 static int random_ioctl (cdev_t, u_long, caddr_t, int, struct ucred *);
135 struct mem_range_softc mem_range_softc;
137 static int seedenable;
138 SYSCTL_INT(_kern, OID_AUTO, seedenable, CTLFLAG_RW, &seedenable, 0, "");
141 mmopen(struct dev_open_args *ap)
143 cdev_t dev = ap->a_head.a_dev;
146 switch (minor(dev)) {
150 * /dev/mem and /dev/kmem
152 error = caps_priv_check(ap->a_cred, SYSCAP_RESTRICTEDROOT |
153 __SYSCAP_NOROOTTEST);
155 if (ap->a_oflags & FWRITE) {
156 if (securelevel > 0 || kernel_mem_readonly)
167 * Cannot be written to from RESTRICTEDROOT environments.
170 if (ap->a_oflags & FWRITE) {
171 error = caps_priv_check(ap->a_cred,
172 SYSCAP_RESTRICTEDROOT |
173 __SYSCAP_NOROOTTEST);
178 * /dev/kpmap can only be opened for reading.
181 if (ap->a_oflags & FWRITE)
188 error = caps_priv_check(ap->a_cred, SYSCAP_RESTRICTEDROOT);
190 if (securelevel > 0 || kernel_mem_readonly)
193 error = cpu_set_iopl();
204 mmclose(struct dev_close_args *ap)
206 cdev_t dev = ap->a_head.a_dev;
209 switch (minor(dev)) {
211 error = cpu_clr_iopl();
222 mmrw(cdev_t dev, struct uio *uio, int flags)
232 while (uio->uio_resid > 0 && error == 0) {
234 if (iov->iov_len == 0) {
237 if (uio->uio_iovcnt < 0)
241 switch (minor(dev)) {
244 * minor device 0 is physical memory, /dev/mem
247 v &= ~(long)PAGE_MASK;
248 pmap_kenter((vm_offset_t)ptvmmap, v);
249 o = (int)uio->uio_offset & PAGE_MASK;
250 c = (u_int)(PAGE_SIZE - ((uintptr_t)iov->iov_base & PAGE_MASK));
251 c = min(c, (u_int)(PAGE_SIZE - o));
252 c = min(c, (u_int)iov->iov_len);
253 error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio);
254 pmap_kremove((vm_offset_t)ptvmmap);
259 * minor device 1 is kernel memory, /dev/kmem
261 vm_offset_t saddr, eaddr;
267 * Make sure that all of the pages are currently
268 * resident so that we don't create any zero-fill
271 saddr = trunc_page(uio->uio_offset);
272 eaddr = round_page(uio->uio_offset + c);
277 * Make sure the kernel addresses are mapped.
278 * platform_direct_mapped() can be used to bypass
279 * default mapping via the page table (virtual kernels
280 * contain a lot of out-of-band data).
283 if (uio->uio_rw != UIO_READ)
284 prot |= VM_PROT_WRITE;
285 error = kvm_access_check(saddr, eaddr, prot);
288 error = uiomove((caddr_t)(vm_offset_t)uio->uio_offset,
294 * minor device 2 (/dev/null) is EOF/RATHOLE
296 if (uio->uio_rw == UIO_READ)
302 * minor device 3 (/dev/random) is source of filth
303 * on read, seeder on write
306 buf = kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK);
307 c = min(iov->iov_len, PAGE_SIZE);
308 if (uio->uio_rw == UIO_WRITE) {
309 error = uiomove(buf, (int)c, uio);
314 error = add_buffer_randomness_src(buf, c, RAND_SRC_SEEDING);
315 } else if (error == 0) {
319 poolsize = read_random(buf, c, 0);
323 if ((flags & IO_NDELAY) != 0)
324 return (EWOULDBLOCK);
327 c = min(c, poolsize);
328 error = uiomove(buf, (int)c, uio);
333 * minor device 4 (/dev/urandom) is source of muck
334 * on read, writes are disallowed.
336 c = min(iov->iov_len, PAGE_SIZE);
337 if (uio->uio_rw == UIO_WRITE) {
341 if (CURSIG(curthread->td_lwp) != 0) {
343 * Use tsleep() to get the error code right.
344 * It should return immediately.
346 error = tsleep(&rand_bolt, PCATCH, "urand", 1);
347 if (error != 0 && error != EWOULDBLOCK)
351 buf = kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK);
352 poolsize = read_random(buf, c, 1);
353 c = min(c, poolsize);
354 error = uiomove(buf, (int)c, uio);
356 /* case 5: read/write not supported, mmap only */
357 /* case 6: read/write not supported, mmap only */
360 * minor device 12 (/dev/zero) is source of nulls
361 * on read, write are disallowed.
363 if (uio->uio_rw == UIO_WRITE) {
368 zbuf = (caddr_t)kmalloc(PAGE_SIZE, M_TEMP,
371 c = min(iov->iov_len, PAGE_SIZE);
372 error = uiomove(zbuf, (int)c, uio);
379 iov->iov_base = (char *)iov->iov_base + c;
381 uio->uio_offset += c;
390 mmread(struct dev_read_args *ap)
392 return(mmrw(ap->a_head.a_dev, ap->a_uio, ap->a_ioflag));
396 mmwrite(struct dev_write_args *ap)
398 return(mmrw(ap->a_head.a_dev, ap->a_uio, ap->a_ioflag));
401 /*******************************************************\
402 * allow user processes to MMAP some memory sections *
403 * instead of going through read/write *
404 \*******************************************************/
406 static int user_kernel_mapping(vm_map_backing_t ba, int num,
407 vm_ooffset_t offset, vm_ooffset_t *resultp);
410 memuksmap(vm_map_backing_t ba, int op, cdev_t dev, vm_page_t fake)
421 * We only need to track mappings for /dev/lpmap, all process
422 * mappings will be deleted when the process exits and we
423 * do not need to track kernel mappings.
425 if (minor(dev) == 7) {
427 spin_lock(&lp->lwp_spin);
428 TAILQ_INSERT_TAIL(&lp->lwp_lpmap_backing_list,
430 spin_unlock(&lp->lwp_spin);
435 * We only need to track mappings for /dev/lpmap, all process
436 * mappings will be deleted when the process exits and we
437 * do not need to track kernel mappings.
439 if (minor(dev) == 7) {
441 spin_lock(&lp->lwp_spin);
442 TAILQ_REMOVE(&lp->lwp_lpmap_backing_list, ba, entry);
443 spin_unlock(&lp->lwp_spin);
447 switch (minor(dev)) {
450 * minor device 0 is physical memory
452 fake->phys_addr = ptoa(fake->pindex);
456 * minor device 1 is kernel memory
458 fake->phys_addr = vtophys(ptoa(fake->pindex));
464 * minor device 5 is /dev/upmap (see sys/upmap.h)
465 * minor device 6 is /dev/kpmap (see sys/upmap.h)
466 * minor device 7 is /dev/lpmap (see sys/upmap.h)
469 error = user_kernel_mapping(ba,
473 fake->phys_addr = result;
488 mmioctl(struct dev_ioctl_args *ap)
490 cdev_t dev = ap->a_head.a_dev;
493 lockmgr(&mem_lock, LK_EXCLUSIVE);
495 switch (minor(dev)) {
497 error = mem_ioctl(dev, ap->a_cmd, ap->a_data,
498 ap->a_fflag, ap->a_cred);
502 error = random_ioctl(dev, ap->a_cmd, ap->a_data,
503 ap->a_fflag, ap->a_cred);
510 lockmgr(&mem_lock, LK_RELEASE);
516 * Operations for changing memory attributes.
518 * This is basically just an ioctl shim for mem_range_attr_get
519 * and mem_range_attr_set.
522 mem_ioctl(cdev_t dev, u_long cmd, caddr_t data, int flags, struct ucred *cred)
525 struct mem_range_op *mo = (struct mem_range_op *)data;
526 struct mem_range_desc *md;
528 /* is this for us? */
529 if ((cmd != MEMRANGE_GET) &&
530 (cmd != MEMRANGE_SET))
533 /* any chance we can handle this? */
534 if (mem_range_softc.mr_op == NULL)
537 /* do we have any descriptors? */
538 if (mem_range_softc.mr_ndesc == 0)
543 nd = imin(mo->mo_arg[0], mem_range_softc.mr_ndesc);
545 md = (struct mem_range_desc *)
546 kmalloc(nd * sizeof(struct mem_range_desc),
547 M_MEMDESC, M_WAITOK);
548 error = mem_range_attr_get(md, &nd);
550 error = copyout(md, mo->mo_desc,
551 nd * sizeof(struct mem_range_desc));
552 kfree(md, M_MEMDESC);
554 nd = mem_range_softc.mr_ndesc;
560 md = (struct mem_range_desc *)kmalloc(sizeof(struct mem_range_desc),
561 M_MEMDESC, M_WAITOK);
562 error = copyin(mo->mo_desc, md, sizeof(struct mem_range_desc));
563 /* clamp description string */
564 md->mr_owner[sizeof(md->mr_owner) - 1] = 0;
566 error = mem_range_attr_set(md, &mo->mo_arg[0]);
567 kfree(md, M_MEMDESC);
574 * Implementation-neutral, kernel-callable functions for manipulating
575 * memory range attributes.
578 mem_range_attr_get(struct mem_range_desc *mrd, int *arg)
580 /* can we handle this? */
581 if (mem_range_softc.mr_op == NULL)
585 *arg = mem_range_softc.mr_ndesc;
587 bcopy(mem_range_softc.mr_desc, mrd, (*arg) * sizeof(struct mem_range_desc));
593 mem_range_attr_set(struct mem_range_desc *mrd, int *arg)
595 /* can we handle this? */
596 if (mem_range_softc.mr_op == NULL)
599 return (mem_range_softc.mr_op->set(&mem_range_softc, mrd, arg));
603 mem_range_AP_init(void)
605 if (mem_range_softc.mr_op && mem_range_softc.mr_op->initAP)
606 mem_range_softc.mr_op->initAP(&mem_range_softc);
610 random_ioctl(cdev_t dev, u_long cmd, caddr_t data, int flags, struct ucred *cred)
616 * Even inspecting the state is privileged, since it gives a hint
617 * about how easily the randomness might be guessed.
622 /* Really handled in upper layer */
626 intr = *(int16_t *)data;
627 if ((error = caps_priv_check(cred, SYSCAP_RESTRICTEDROOT)) != 0)
629 if (intr < 0 || intr >= MAX_INTS)
631 register_randintr(intr);
634 intr = *(int16_t *)data;
635 if ((error = caps_priv_check(cred, SYSCAP_RESTRICTEDROOT)) != 0)
637 if (intr < 0 || intr >= MAX_INTS)
639 unregister_randintr(intr);
645 intr = *(int16_t *)data;
646 if ((error = caps_priv_check(cred, SYSCAP_RESTRICTEDROOT)) != 0)
648 if (intr < 0 || intr >= MAX_INTS)
650 intr = next_registered_randintr(intr);
651 if (intr == MAX_INTS)
653 *(u_int16_t *)data = intr;
663 mm_filter_read(struct knote *kn, long hint)
669 mm_filter_write(struct knote *kn, long hint)
675 dummy_filter_detach(struct knote *kn) {}
677 /* Implemented in kern_nrandom.c */
678 static struct filterops random_read_filtops =
679 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, dummy_filter_detach, random_filter_read };
681 static struct filterops mm_read_filtops =
682 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, dummy_filter_detach, mm_filter_read };
684 static struct filterops mm_write_filtops =
685 { FILTEROP_ISFD|FILTEROP_MPSAFE, NULL, dummy_filter_detach, mm_filter_write };
688 mmkqfilter(struct dev_kqfilter_args *ap)
690 struct knote *kn = ap->a_kn;
691 cdev_t dev = ap->a_head.a_dev;
694 switch (kn->kn_filter) {
696 switch (minor(dev)) {
698 kn->kn_fop = &random_read_filtops;
701 kn->kn_fop = &mm_read_filtops;
706 kn->kn_fop = &mm_write_filtops;
709 ap->a_result = EOPNOTSUPP;
717 iszerodev(cdev_t dev)
719 return (zerodev == dev);
723 * /dev/lpmap, /dev/upmap, /dev/kpmap.
726 user_kernel_mapping(vm_map_backing_t ba, int num, vm_ooffset_t offset,
727 vm_ooffset_t *resultp)
742 * /dev/upmap - maps RW per-process shared user-kernel area.
746 * If this is a child currently in vfork the pmap is shared
747 * with the parent! We need to actually set-up the parent's
748 * p_upmap, not the child's, and we need to set the invfork
749 * flag. Userland will probably adjust its static state so
750 * it must be consistent with the parent or userland will be
751 * really badly confused.
753 * (this situation can happen when user code in vfork() calls
754 * libc's getpid() or some other function which then decides
755 * it wants the upmap).
760 if (p->p_flags & P_PPWAIT) {
770 * Create the kernel structure as required, set the invfork
771 * flag if we are faulting in on a vfork().
773 if (p->p_upmap == NULL)
774 proc_usermap(p, invfork);
775 if (p->p_upmap && invfork)
776 p->p_upmap->invfork = invfork;
779 * Extract address for pmap
782 offset < roundup2(sizeof(*p->p_upmap), PAGE_SIZE)) {
783 /* only good for current process */
784 *resultp = pmap_kextract((vm_offset_t)p->p_upmap +
791 * /dev/kpmap - maps RO shared kernel global page
793 * Extract address for pmap
796 offset < roundup2(sizeof(*kpmap), PAGE_SIZE)) {
797 *resultp = pmap_kextract((vm_offset_t)kpmap + offset);
803 * /dev/lpmap - maps RW per-thread shared user-kernel area.
810 * Create the kernel structure as required
812 if (lp->lwp_lpmap == NULL)
813 lwp_usermap(lp, -1); /* second arg not yet XXX */
816 * Extract address for pmap
819 offset < roundup2(sizeof(*lp->lwp_lpmap), PAGE_SIZE)) {
820 /* only good for current process */
821 *resultp = pmap_kextract((vm_offset_t)lp->lwp_lpmap +
833 mem_drvinit(void *unused)
836 /* Initialise memory range handling */
837 if (mem_range_softc.mr_op != NULL)
838 mem_range_softc.mr_op->init(&mem_range_softc);
840 make_dev(&mem_ops_mem, 0, UID_ROOT, GID_KMEM, 0640, "mem");
841 make_dev(&mem_ops_mem, 1, UID_ROOT, GID_KMEM, 0640, "kmem");
842 make_dev(&mem_ops, 2, UID_ROOT, GID_WHEEL, 0666, "null");
843 make_dev(&mem_ops, 3, UID_ROOT, GID_WHEEL, 0644, "random");
844 make_dev(&mem_ops, 4, UID_ROOT, GID_WHEEL, 0644, "urandom");
845 make_dev(&mem_ops, 5, UID_ROOT, GID_WHEEL, 0666, "upmap");
846 make_dev(&mem_ops, 6, UID_ROOT, GID_WHEEL, 0444, "kpmap");
847 make_dev(&mem_ops, 7, UID_ROOT, GID_WHEEL, 0666, "lpmap");
848 zerodev = make_dev(&mem_ops, 12, UID_ROOT, GID_WHEEL, 0666, "zero");
849 make_dev(&mem_ops_noq, 14, UID_ROOT, GID_WHEEL, 0600, "io");
852 SYSINIT(memdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE + CDEV_MAJOR, mem_drvinit,