From: Matthew Dillon Date: Thu, 28 Dec 2006 21:24:02 +0000 (+0000) Subject: Make kernel_map, buffer_map, clean_map, exec_map, and pager_map direct X-Git-Tag: v2.0.1~3899 X-Git-Url: https://gitweb.dragonflybsd.org/dragonfly.git/commitdiff_plain/e4846942819ebf777de481efddc19fb1b0f669f2 Make kernel_map, buffer_map, clean_map, exec_map, and pager_map direct structural declarations instead of pointers. Clean up all related code, in particular kmem_suballoc(). Remove the offset calculation for kernel_object. kernel_object's page indices used to be relative to the start of kernel virtual memory in order to improve the performance of VM page scanning algorithms. The optimization is no longer needed now that VM objects use Red-Black trees. Removal of the offset simplifies a number of calculations and makes the code more readable. --- diff --git a/sys/ddb/db_aout.c b/sys/ddb/db_aout.c index 167f276467..0edec107f9 100644 --- a/sys/ddb/db_aout.c +++ b/sys/ddb/db_aout.c @@ -24,7 +24,7 @@ * rights to redistribute these changes. * * $FreeBSD: src/sys/ddb/db_aout.c,v 1.27 1999/08/28 00:41:05 peter Exp $ - * $DragonFly: src/sys/ddb/db_aout.c,v 1.7 2006/12/23 00:27:02 swildner Exp $ + * $DragonFly: src/sys/ddb/db_aout.c,v 1.8 2006/12/28 21:24:01 dillon Exp $ */ /* @@ -361,7 +361,7 @@ read_symtab_from_file(struct file *fp, char *symtab_name) table_size = sizeof(int) + symsize + strsize; table_size = (table_size + sizeof(int)-1) & ~(sizeof(int)-1); - symtab = kmem_alloc_wired(kernel_map, table_size); + symtab = kmem_alloc_wired(&kernel_map, table_size); *(int *)symtab = symsize; diff --git a/sys/ddb/db_break.c b/sys/ddb/db_break.c index e801efd865..e910a4f5be 100644 --- a/sys/ddb/db_break.c +++ b/sys/ddb/db_break.c @@ -24,7 +24,7 @@ * rights to redistribute these changes. * * $FreeBSD: src/sys/ddb/db_break.c,v 1.18 1999/08/28 00:41:05 peter Exp $ - * $DragonFly: src/sys/ddb/db_break.c,v 1.6 2006/11/07 17:51:22 dillon Exp $ + * $DragonFly: src/sys/ddb/db_break.c,v 1.7 2006/12/28 21:24:01 dillon Exp $ */ /* @@ -295,8 +295,8 @@ boolean_t db_map_equal(vm_map_t map1, vm_map_t map2) { return ((map1 == map2) || - ((map1 == NULL) && (map2 == kernel_map)) || - ((map1 == kernel_map) && (map2 == NULL))); + ((map1 == NULL) && (map2 == &kernel_map)) || + ((map1 == &kernel_map) && (map2 == NULL))); } boolean_t @@ -306,7 +306,7 @@ db_map_current(vm_map_t map) thread_t thread; return ((map == NULL) || - (map == kernel_map) || + (map == &kernel_map) || (((thread = current_thread()) != NULL) && (map == thread->task->map))); #else @@ -317,5 +317,5 @@ db_map_current(vm_map_t map) vm_map_t db_map_addr(vm_offset_t addr) { - return kernel_map; + return &kernel_map; } diff --git a/sys/dev/netif/pdq_layer/pdqvar.h b/sys/dev/netif/pdq_layer/pdqvar.h index b68516369a..821bf662e3 100644 --- a/sys/dev/netif/pdq_layer/pdqvar.h +++ b/sys/dev/netif/pdq_layer/pdqvar.h @@ -22,7 +22,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD: src/sys/dev/pdq/pdqvar.h,v 1.3.2.1 2002/05/14 21:02:11 gallatin Exp $ - * $DragonFly: src/sys/dev/netif/pdq_layer/Attic/pdqvar.h,v 1.11 2006/10/25 20:55:58 dillon Exp $ + * $DragonFly: src/sys/dev/netif/pdq_layer/Attic/pdqvar.h,v 1.12 2006/12/28 21:23:57 dillon Exp $ * */ @@ -92,10 +92,10 @@ enum _pdq_type_t { #define PDQ_OS_MEMFREE(p, n) kfree((void *) p, M_DEVBUF) #if defined(__DragonFly__) || defined(__FreeBSD__) #define PDQ_OS_MEMALLOC_CONTIG(n) vm_page_alloc_contig(n, 0, 0xffffffff, PAGE_SIZE) -#define PDQ_OS_MEMFREE_CONTIG(p, n) kmem_free(kernel_map, (vm_offset_t) p, n) +#define PDQ_OS_MEMFREE_CONTIG(p, n) kmem_free(&kernel_map, (vm_offset_t) p, n) #else -#define PDQ_OS_MEMALLOC_CONTIG(n) kmem_alloc(kernel_map, round_page(n)) -#define PDQ_OS_MEMFREE_CONTIG(p, n) kmem_free(kernel_map, (vm_offset_t) p, n) +#define PDQ_OS_MEMALLOC_CONTIG(n) kmem_alloc(&kernel_map, round_page(n)) +#define PDQ_OS_MEMFREE_CONTIG(p, n) kmem_free(&kernel_map, (vm_offset_t) p, n) #endif /* __FreeBSD__ */ #include diff --git a/sys/dev/raid/dpt/dpt_control.c b/sys/dev/raid/dpt/dpt_control.c index 21ce1e7be4..974300f6a9 100644 --- a/sys/dev/raid/dpt/dpt_control.c +++ b/sys/dev/raid/dpt/dpt_control.c @@ -37,7 +37,7 @@ */ #ident "$FreeBSD: src/sys/dev/dpt/dpt_control.c,v 1.16 1999/09/25 18:23:48 phk Exp $" -#ident "$DragonFly: src/sys/dev/raid/dpt/dpt_control.c,v 1.13 2006/12/22 23:26:23 swildner Exp $" +#ident "$DragonFly: src/sys/dev/raid/dpt/dpt_control.c,v 1.14 2006/12/28 21:23:58 dillon Exp $" #include "opt_dpt.h" @@ -164,7 +164,7 @@ dpt_physmap(u_int32_t req_paddr, vm_size_t req_size) paddr = req_paddr & 0xfffff000; offset = req_paddr - paddr; - va = kmem_alloc_pageable(kernel_map, size); + va = kmem_alloc_pageable(&kernel_map, size); if (va == (vm_offset_t) 0) return (va); @@ -193,7 +193,7 @@ dpt_unphysmap(u_int8_t * vaddr, vm_size_t size) pmap_kremove((vm_offset_t) vaddr + ndx); } - kmem_free(kernel_map, (vm_offset_t) vaddr, size); + kmem_free(&kernel_map, (vm_offset_t) vaddr, size); } /** diff --git a/sys/dev/video/bktr/bktr_core.c b/sys/dev/video/bktr/bktr_core.c index 438a978479..bddf3c8d3d 100644 --- a/sys/dev/video/bktr/bktr_core.c +++ b/sys/dev/video/bktr/bktr_core.c @@ -62,7 +62,7 @@ * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD: src/sys/dev/bktr/bktr_core.c,v 1.138 2005/01/09 17:42:03 cognet Exp - * $DragonFly: src/sys/dev/video/bktr/bktr_core.c,v 1.20 2006/12/22 23:26:26 swildner Exp $ + * $DragonFly: src/sys/dev/video/bktr/bktr_core.c,v 1.21 2006/12/28 21:23:59 dillon Exp $ */ /* @@ -1681,7 +1681,7 @@ video_ioctl( bktr_ptr_t bktr, int unit, ioctl_cmd_t cmd, caddr_t arg, struct thr buf = get_bktr_mem(unit, temp*PAGE_SIZE); if (buf != 0) { - kmem_free(kernel_map, bktr->bigbuf, + kmem_free(&kernel_map, bktr->bigbuf, (bktr->alloc_pages * PAGE_SIZE)); bktr->bigbuf = buf; diff --git a/sys/dev/video/i386/vesa/vesa.c b/sys/dev/video/i386/vesa/vesa.c index 260ce63322..aac796810c 100644 --- a/sys/dev/video/i386/vesa/vesa.c +++ b/sys/dev/video/i386/vesa/vesa.c @@ -24,7 +24,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD: src/sys/i386/isa/vesa.c,v 1.32.2.1 2002/08/13 02:42:33 rwatson Exp $ - * $DragonFly: src/sys/dev/video/i386/vesa/vesa.c,v 1.16 2006/12/22 23:26:27 swildner Exp $ + * $DragonFly: src/sys/dev/video/i386/vesa/vesa.c,v 1.17 2006/12/28 21:24:00 dillon Exp $ */ #include "opt_vga.h" @@ -845,7 +845,7 @@ vesa_unmap_buffer(vm_offset_t vaddr, size_t size) #if VESA_DEBUG > 1 kprintf("vesa_unmap_buffer: vaddr:%x size:%x\n", vaddr, size); #endif - kmem_free(kernel_map, vaddr, size); + kmem_free(&kernel_map, vaddr, size); } /* entry points */ diff --git a/sys/dev/video/meteor/meteor.c b/sys/dev/video/meteor/meteor.c index fbd1b11d21..75b267c75d 100644 --- a/sys/dev/video/meteor/meteor.c +++ b/sys/dev/video/meteor/meteor.c @@ -29,7 +29,7 @@ * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD: src/sys/pci/meteor.c,v 1.49 1999/09/25 18:24:41 phk Exp $ - * $DragonFly: src/sys/dev/video/meteor/meteor.c,v 1.20 2006/12/22 23:26:27 swildner Exp $ + * $DragonFly: src/sys/dev/video/meteor/meteor.c,v 1.21 2006/12/28 21:24:01 dillon Exp $ */ /* Change History: @@ -1220,7 +1220,8 @@ meteor_close(struct dev_close_args *ap) #ifdef METEOR_DEALLOC_PAGES if (mtr->bigbuf != NULL) { - kmem_free(kernel_map,mtr->bigbuf,(mtr->alloc_pages*PAGE_SIZE)); + kmem_free(&kernel_map, mtr->bigbuf, + (mtr->alloc_pages * PAGE_SIZE)); mtr->bigbuf = NULL; mtr->alloc_pages = 0; } @@ -1228,7 +1229,7 @@ meteor_close(struct dev_close_args *ap) #ifdef METEOR_DEALLOC_ABOVE if (mtr->bigbuf != NULL && mtr->alloc_pages > METEOR_DEALLOC_ABOVE) { temp = METEOR_DEALLOC_ABOVE - mtr->alloc_pages; - kmem_free(kernel_map, + kmem_free(&kernel_map, mtr->bigbuf+((mtr->alloc_pages - temp) * PAGE_SIZE), (temp * PAGE_SIZE)); mtr->alloc_pages = METEOR_DEALLOC_ABOVE; @@ -1759,7 +1760,7 @@ meteor_ioctl(struct dev_ioctl_args *ap) ) { buf = get_meteor_mem(unit, temp*PAGE_SIZE); if(buf != 0) { - kmem_free(kernel_map, mtr->bigbuf, + kmem_free(&kernel_map, mtr->bigbuf, (mtr->alloc_pages * PAGE_SIZE)); mtr->bigbuf = buf; mtr->alloc_pages = temp; diff --git a/sys/emulation/linux/i386/imgact_linux.c b/sys/emulation/linux/i386/imgact_linux.c index 48d88ca5ad..6e9195a5bb 100644 --- a/sys/emulation/linux/i386/imgact_linux.c +++ b/sys/emulation/linux/i386/imgact_linux.c @@ -29,7 +29,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD: src/sys/i386/linux/imgact_linux.c,v 1.35.2.2 2001/11/03 01:41:08 ps Exp $ - * $DragonFly: src/sys/emulation/linux/i386/imgact_linux.c,v 1.9 2006/12/23 00:27:02 swildner Exp $ + * $DragonFly: src/sys/emulation/linux/i386/imgact_linux.c,v 1.10 2006/12/28 21:24:02 dillon Exp $ */ #include @@ -134,7 +134,7 @@ exec_linux_imgact(struct image_params *imgp) if (error) return error; - error = vm_mmap(kernel_map, &buffer, + error = vm_mmap(&kernel_map, &buffer, round_page(a_out->a_text + a_out->a_data + file_offset), VM_PROT_READ, VM_PROT_READ, 0, (caddr_t) imgp->vp, trunc_page(file_offset)); @@ -144,7 +144,7 @@ exec_linux_imgact(struct image_params *imgp) error = copyout((caddr_t)(void *)(uintptr_t)(buffer + file_offset), (caddr_t)vmaddr, a_out->a_text + a_out->a_data); - vm_map_remove(kernel_map, buffer, + vm_map_remove(&kernel_map, buffer, buffer + round_page(a_out->a_text + a_out->a_data + file_offset)); if (error) diff --git a/sys/emulation/linux/linux_misc.c b/sys/emulation/linux/linux_misc.c index 89e1e8626b..272c079066 100644 --- a/sys/emulation/linux/linux_misc.c +++ b/sys/emulation/linux/linux_misc.c @@ -26,7 +26,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD: src/sys/compat/linux/linux_misc.c,v 1.85.2.9 2002/09/24 08:11:41 mdodd Exp $ - * $DragonFly: src/sys/emulation/linux/linux_misc.c,v 1.32 2006/12/23 00:27:02 swildner Exp $ + * $DragonFly: src/sys/emulation/linux/linux_misc.c,v 1.33 2006/12/28 21:24:02 dillon Exp $ */ #include "opt_compat.h" @@ -312,7 +312,7 @@ sys_linux_uselib(struct linux_uselib_args *args) locked = 0; /* Pull in executable header into kernel_map */ - error = vm_mmap(kernel_map, (vm_offset_t *)&a_out, PAGE_SIZE, + error = vm_mmap(&kernel_map, (vm_offset_t *)&a_out, PAGE_SIZE, VM_PROT_READ, VM_PROT_READ, 0, (caddr_t)vp, 0); if (error) goto cleanup; @@ -393,7 +393,7 @@ sys_linux_uselib(struct linux_uselib_args *args) goto cleanup; /* map file into kernel_map */ - error = vm_mmap(kernel_map, &buffer, + error = vm_mmap(&kernel_map, &buffer, round_page(a_out->a_text + a_out->a_data + file_offset), VM_PROT_READ, VM_PROT_READ, 0, (caddr_t)vp, trunc_page(file_offset)); @@ -405,7 +405,7 @@ sys_linux_uselib(struct linux_uselib_args *args) (caddr_t)vmaddr, a_out->a_text + a_out->a_data); /* release temporary kernel space */ - vm_map_remove(kernel_map, buffer, buffer + + vm_map_remove(&kernel_map, buffer, buffer + round_page(a_out->a_text + a_out->a_data + file_offset)); if (error) @@ -459,7 +459,7 @@ cleanup: } /* Release the kernel mapping. */ if (a_out) { - vm_map_remove(kernel_map, (vm_offset_t)a_out, + vm_map_remove(&kernel_map, (vm_offset_t)a_out, (vm_offset_t)a_out + PAGE_SIZE); } nlookup_done(&nd); diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c index d7e37caa7f..dfb648524e 100644 --- a/sys/kern/imgact_elf.c +++ b/sys/kern/imgact_elf.c @@ -27,7 +27,7 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD: src/sys/kern/imgact_elf.c,v 1.73.2.13 2002/12/28 19:49:41 dillon Exp $ - * $DragonFly: src/sys/kern/imgact_elf.c,v 1.45 2006/12/23 00:35:03 swildner Exp $ + * $DragonFly: src/sys/kern/imgact_elf.c,v 1.46 2006/12/28 21:24:01 dillon Exp $ */ #include @@ -353,7 +353,7 @@ elf_load_section(struct proc *p, struct vmspace *vmspace, struct vnode *vp, if (copy_len != 0) { vm_object_reference(object); - rv = vm_map_find(exec_map, + rv = vm_map_find(&exec_map, object, trunc_page(offset + filsz), &data_buf, @@ -369,7 +369,7 @@ elf_load_section(struct proc *p, struct vmspace *vmspace, struct vnode *vp, /* send the page fragment to user space */ error = copyout((caddr_t)data_buf, (caddr_t)map_addr, copy_len); - vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE); + vm_map_remove(&exec_map, data_buf, data_buf + PAGE_SIZE); if (error) { return (error); } diff --git a/sys/kern/imgact_gzip.c b/sys/kern/imgact_gzip.c index a0a361c7de..b4ecf84a69 100644 --- a/sys/kern/imgact_gzip.c +++ b/sys/kern/imgact_gzip.c @@ -7,7 +7,7 @@ * ---------------------------------------------------------------------------- * * $FreeBSD: src/sys/kern/imgact_gzip.c,v 1.40.2.1 2001/11/03 01:41:08 ps Exp $ - * $DragonFly: src/sys/kern/imgact_gzip.c,v 1.8 2006/12/23 23:47:54 swildner Exp $ + * $DragonFly: src/sys/kern/imgact_gzip.c,v 1.9 2006/12/28 21:24:01 dillon Exp $ * * This module handles execution of a.out files which have been run through * "gzip". This saves diskspace, but wastes cpu-cycles and VM. @@ -132,9 +132,8 @@ exec_gzip_imgact(struct image_params *imgp) } if (igz.inbuf) { - error2 = - vm_map_remove(kernel_map, (vm_offset_t) igz.inbuf, - (vm_offset_t) igz.inbuf + PAGE_SIZE); + error2 = vm_map_remove(&kernel_map, (vm_offset_t)igz.inbuf, + (vm_offset_t)igz.inbuf + PAGE_SIZE); } if (igz.error || error || error2) { kprintf("Output=%lu ", igz.output); @@ -289,8 +288,8 @@ NextByte(void *vp) return igz->inbuf[(igz->idx++) - igz->offset]; } if (igz->inbuf) { - error = vm_map_remove(kernel_map, (vm_offset_t) igz->inbuf, - (vm_offset_t) igz->inbuf + PAGE_SIZE); + error = vm_map_remove(&kernel_map, (vm_offset_t)igz->inbuf, + (vm_offset_t)igz->inbuf + PAGE_SIZE); if (error) { igz->where = __LINE__; igz->error = error; @@ -299,7 +298,7 @@ NextByte(void *vp) } igz->offset = igz->idx & ~PAGE_MASK; - error = vm_mmap(kernel_map, /* map */ + error = vm_mmap(&kernel_map, /* map */ (vm_offset_t *) & igz->inbuf, /* address */ PAGE_SIZE, /* size */ VM_PROT_READ, /* protection */ diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c index 28f5783944..6d079ca09a 100644 --- a/sys/kern/init_main.c +++ b/sys/kern/init_main.c @@ -40,7 +40,7 @@ * * @(#)init_main.c 8.9 (Berkeley) 1/21/94 * $FreeBSD: src/sys/kern/init_main.c,v 1.134.2.8 2003/06/06 20:21:32 tegge Exp $ - * $DragonFly: src/sys/kern/init_main.c,v 1.69 2006/12/27 20:41:57 dillon Exp $ + * $DragonFly: src/sys/kern/init_main.c,v 1.70 2006/12/28 21:24:01 dillon Exp $ */ #include "opt_init_path.h" @@ -366,9 +366,10 @@ proc0_init(void *dummy __unused) pmap_pinit0(vmspace_pmap(&vmspace0)); p->p_vmspace = &vmspace0; vmspace0.vm_refcnt = 1; - vm_map_init(&vmspace0.vm_map, round_page(VM_MIN_USER_ADDRESS), - trunc_page(VM_MAX_USER_ADDRESS)); - vmspace0.vm_map.pmap = vmspace_pmap(&vmspace0); + vm_map_init(&vmspace0.vm_map, + round_page(VM_MIN_USER_ADDRESS), + trunc_page(VM_MAX_USER_ADDRESS), + vmspace_pmap(&vmspace0)); /* * We continue to place resource usage info and signal diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c index 8959e17622..c4416c5962 100644 --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -24,7 +24,7 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/kern/kern_exec.c,v 1.107.2.15 2002/07/30 15:40:46 nectar Exp $ - * $DragonFly: src/sys/kern/kern_exec.c,v 1.51 2006/12/23 00:35:04 swildner Exp $ + * $DragonFly: src/sys/kern/kern_exec.c,v 1.52 2006/12/28 21:24:01 dillon Exp $ */ #include @@ -688,7 +688,7 @@ exec_copyin_args(struct image_args *args, char *fname, size_t length; bzero(args, sizeof(*args)); - args->buf = (char *) kmem_alloc_wait(exec_map, PATH_MAX + ARG_MAX); + args->buf = (char *) kmem_alloc_wait(&exec_map, PATH_MAX + ARG_MAX); if (args->buf == NULL) return (ENOMEM); args->begin_argv = args->buf; @@ -779,7 +779,7 @@ void exec_free_args(struct image_args *args) { if (args->buf) { - kmem_free_wakeup(exec_map, + kmem_free_wakeup(&exec_map, (vm_offset_t)args->buf, PATH_MAX + ARG_MAX); args->buf = NULL; } diff --git a/sys/kern/kern_msfbuf.c b/sys/kern/kern_msfbuf.c index 58814adbed..c5e0b9b08d 100644 --- a/sys/kern/kern_msfbuf.c +++ b/sys/kern/kern_msfbuf.c @@ -36,7 +36,7 @@ * Copyright (c) 1998 David Greenman. All rights reserved. * src/sys/kern/kern_sfbuf.c,v 1.7 2004/05/13 19:46:18 dillon * - * $DragonFly: src/sys/kern/kern_msfbuf.c,v 1.18 2006/09/05 00:55:45 dillon Exp $ + * $DragonFly: src/sys/kern/kern_msfbuf.c,v 1.19 2006/12/28 21:24:01 dillon Exp $ */ /* * MSFBUFs cache linear multi-page ephermal mappings and operate similar @@ -118,8 +118,8 @@ msf_buf_init(void *__dummy) TAILQ_INIT(&msf_buf_freelist); - msf_base = kmem_alloc_nofault(kernel_map, - msf_buf_count * XIO_INTERNAL_SIZE); + msf_base = kmem_alloc_nofault(&kernel_map, + msf_buf_count * XIO_INTERNAL_SIZE); msf_bufs = kmalloc(msf_buf_count * sizeof(struct msf_buf), M_MSFBUF, M_WAITOK|M_ZERO); diff --git a/sys/kern/kern_sfbuf.c b/sys/kern/kern_sfbuf.c index 7d8f993d17..d3dd434cac 100644 --- a/sys/kern/kern_sfbuf.c +++ b/sys/kern/kern_sfbuf.c @@ -22,7 +22,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $DragonFly: src/sys/kern/kern_sfbuf.c,v 1.13 2006/09/05 00:55:45 dillon Exp $ + * $DragonFly: src/sys/kern/kern_sfbuf.c,v 1.14 2006/12/28 21:24:01 dillon Exp $ */ #include @@ -88,7 +88,7 @@ sf_buf_init(void *arg) sf_buf_hashtable = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask); TAILQ_INIT(&sf_buf_freelist); - sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE); + sf_base = kmem_alloc_nofault(&kernel_map, nsfbufs * PAGE_SIZE); sf_bufs = kmalloc(nsfbufs * sizeof(struct sf_buf), M_TEMP, M_WAITOK | M_ZERO); for (i = 0; i < nsfbufs; i++) { diff --git a/sys/kern/kern_slaballoc.c b/sys/kern/kern_slaballoc.c index 03133050f6..1cd787e52d 100644 --- a/sys/kern/kern_slaballoc.c +++ b/sys/kern/kern_slaballoc.c @@ -33,7 +33,7 @@ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $DragonFly: src/sys/kern/kern_slaballoc.c,v 1.45 2006/12/28 18:29:03 dillon Exp $ + * $DragonFly: src/sys/kern/kern_slaballoc.c,v 1.46 2006/12/28 21:24:01 dillon Exp $ * * This module implements a slab allocator drop-in replacement for the * kernel malloc(). @@ -1035,13 +1035,11 @@ kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) { vm_size_t i; vm_offset_t addr; - vm_offset_t offset; int count, vmflags, base_vmflags; thread_t td; - vm_map_t map = kernel_map; size = round_page(size); - addr = vm_map_min(map); + addr = vm_map_min(&kernel_map); /* * Reserve properly aligned space from kernel_map. RNOWAIT allocations @@ -1055,9 +1053,9 @@ kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) } count = vm_map_entry_reserve(MAP_RESERVE_COUNT); crit_enter(); - vm_map_lock(map); - if (vm_map_findspace(map, vm_map_min(map), size, align, &addr)) { - vm_map_unlock(map); + vm_map_lock(&kernel_map); + if (vm_map_findspace(&kernel_map, addr, size, align, &addr)) { + vm_map_unlock(&kernel_map); if ((flags & M_NULLOK) == 0) panic("kmem_slab_alloc(): kernel_map ran out of space!"); crit_exit(); @@ -1065,10 +1063,13 @@ kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) rel_mplock(); return(NULL); } - offset = addr - KvaStart; + + /* + * kernel_object maps 1:1 to kernel_map. + */ vm_object_reference(&kernel_object); - vm_map_insert(map, &count, - &kernel_object, offset, addr, addr + size, + vm_map_insert(&kernel_map, &count, + &kernel_object, addr, addr, addr + size, VM_MAPTYPE_NORMAL, VM_PROT_ALL, VM_PROT_ALL, 0); @@ -1091,7 +1092,6 @@ kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) */ for (i = 0; i < size; i += PAGE_SIZE) { vm_page_t m; - vm_pindex_t idx = OFF_TO_IDX(offset + i); /* * VM_ALLOC_NORMAL can only be set if we are not preempting. @@ -1109,7 +1109,7 @@ kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) vmflags |= VM_ALLOC_NORMAL; } - m = vm_page_alloc(&kernel_object, idx, vmflags); + m = vm_page_alloc(&kernel_object, OFF_TO_IDX(addr + i), vmflags); /* * If the allocation failed we either return NULL or we retry. @@ -1123,13 +1123,13 @@ kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) if (m == NULL) { if (flags & M_WAITOK) { if (td->td_preempted) { - vm_map_unlock(map); + vm_map_unlock(&kernel_map); lwkt_yield(); - vm_map_lock(map); + vm_map_lock(&kernel_map); } else { - vm_map_unlock(map); + vm_map_unlock(&kernel_map); vm_wait(); - vm_map_lock(map); + vm_map_lock(&kernel_map); } i -= PAGE_SIZE; /* retry */ continue; @@ -1140,11 +1140,11 @@ kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) */ while (i != 0) { i -= PAGE_SIZE; - m = vm_page_lookup(&kernel_object, OFF_TO_IDX(offset + i)); + m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i)); vm_page_free(m); } - vm_map_delete(map, addr, addr + size, &count); - vm_map_unlock(map); + vm_map_delete(&kernel_map, addr, addr + size, &count); + vm_map_unlock(&kernel_map); crit_exit(); vm_map_entry_release(count); rel_mplock(); @@ -1158,7 +1158,7 @@ kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) * Mark the map entry as non-pageable using a routine that allows us to * populate the underlying pages. */ - vm_map_set_wired_quick(map, addr, size, &count); + vm_map_set_wired_quick(&kernel_map, addr, size, &count); crit_exit(); /* @@ -1167,7 +1167,7 @@ kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) for (i = 0; i < size; i += PAGE_SIZE) { vm_page_t m; - m = vm_page_lookup(&kernel_object, OFF_TO_IDX(offset + i)); + m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i)); m->valid = VM_PAGE_BITS_ALL; vm_page_wire(m); vm_page_wakeup(m); @@ -1177,7 +1177,7 @@ kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) vm_page_flag_clear(m, PG_ZERO); vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED); } - vm_map_unlock(map); + vm_map_unlock(&kernel_map); vm_map_entry_release(count); rel_mplock(); return((void *)addr); @@ -1193,7 +1193,7 @@ kmem_slab_free(void *ptr, vm_size_t size) { get_mplock(); crit_enter(); - vm_map_remove(kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size); + vm_map_remove(&kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size); crit_exit(); rel_mplock(); } diff --git a/sys/kern/link_elf.c b/sys/kern/link_elf.c index 7e07ae03ab..4cdb32a147 100644 --- a/sys/kern/link_elf.c +++ b/sys/kern/link_elf.c @@ -24,7 +24,7 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/kern/link_elf.c,v 1.24 1999/12/24 15:33:36 bde Exp $ - * $DragonFly: src/sys/kern/link_elf.c,v 1.24 2006/12/23 00:35:04 swildner Exp $ + * $DragonFly: src/sys/kern/link_elf.c,v 1.25 2006/12/28 21:24:01 dillon Exp $ */ #include @@ -554,8 +554,8 @@ link_elf_load_file(const char* filename, linker_file_t* result) goto out; } vm_object_reference(ef->object); - ef->address = (caddr_t) vm_map_min(kernel_map); - error = vm_map_find(kernel_map, ef->object, 0, + ef->address = (caddr_t)vm_map_min(&kernel_map); + error = vm_map_find(&kernel_map, ef->object, 0, (vm_offset_t *)&ef->address, mapsize, 1, VM_MAPTYPE_NORMAL, @@ -581,7 +581,7 @@ link_elf_load_file(const char* filename, linker_file_t* result) UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid); if (error) { #ifdef SPARSE_MAPPING - vm_map_remove(kernel_map, (vm_offset_t) ef->address, + vm_map_remove(&kernel_map, (vm_offset_t) ef->address, (vm_offset_t) ef->address + (ef->object->size << PAGE_SHIFT)); vm_object_deallocate(ef->object); @@ -598,10 +598,10 @@ link_elf_load_file(const char* filename, linker_file_t* result) /* * Wire down the pages */ - vm_map_wire(kernel_map, - (vm_offset_t) segbase, - (vm_offset_t) segbase + segs[i]->p_memsz, - 0); + vm_map_wire(&kernel_map, + (vm_offset_t) segbase, + (vm_offset_t) segbase + segs[i]->p_memsz, + 0); #endif } @@ -610,7 +610,7 @@ link_elf_load_file(const char* filename, linker_file_t* result) lf = linker_make_file(filename, ef, &link_elf_file_ops); if (lf == NULL) { #ifdef SPARSE_MAPPING - vm_map_remove(kernel_map, (vm_offset_t) ef->address, + vm_map_remove(&kernel_map, (vm_offset_t) ef->address, (vm_offset_t) ef->address + (ef->object->size << PAGE_SHIFT)); vm_object_deallocate(ef->object); @@ -711,7 +711,7 @@ link_elf_unload_file(linker_file_t file) if (ef) { #ifdef SPARSE_MAPPING if (ef->object) { - vm_map_remove(kernel_map, (vm_offset_t) ef->address, + vm_map_remove(&kernel_map, (vm_offset_t) ef->address, (vm_offset_t) ef->address + (ef->object->size << PAGE_SHIFT)); vm_object_deallocate(ef->object); diff --git a/sys/kern/lwkt_thread.c b/sys/kern/lwkt_thread.c index e220a67406..f2916c53c8 100644 --- a/sys/kern/lwkt_thread.c +++ b/sys/kern/lwkt_thread.c @@ -31,7 +31,7 @@ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * $DragonFly: src/sys/kern/lwkt_thread.c,v 1.104 2006/12/23 00:35:04 swildner Exp $ + * $DragonFly: src/sys/kern/lwkt_thread.c,v 1.105 2006/12/28 21:24:01 dillon Exp $ */ /* @@ -265,7 +265,7 @@ lwkt_alloc_thread(struct thread *td, int stksize, int cpu, int flags) if ((stack = td->td_kstack) != NULL && td->td_kstack_size != stksize) { if (flags & TDF_ALLOCATED_STACK) { #ifdef _KERNEL - kmem_free(kernel_map, (vm_offset_t)stack, td->td_kstack_size); + kmem_free(&kernel_map, (vm_offset_t)stack, td->td_kstack_size); #else libcaps_free_stack(stack, td->td_kstack_size); #endif @@ -274,7 +274,7 @@ lwkt_alloc_thread(struct thread *td, int stksize, int cpu, int flags) } if (stack == NULL) { #ifdef _KERNEL - stack = (void *)kmem_alloc(kernel_map, stksize); + stack = (void *)kmem_alloc(&kernel_map, stksize); #else stack = libcaps_alloc_stack(stksize); #endif @@ -412,7 +412,7 @@ lwkt_free_thread(thread_t td) crit_exit_gd(gd); if (td->td_kstack && (td->td_flags & TDF_ALLOCATED_STACK)) { #ifdef _KERNEL - kmem_free(kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size); + kmem_free(&kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size); #else libcaps_free_stack(td->td_kstack, td->td_kstack_size); #endif diff --git a/sys/kern/sys_pipe.c b/sys/kern/sys_pipe.c index 0bc567194a..4251549789 100644 --- a/sys/kern/sys_pipe.c +++ b/sys/kern/sys_pipe.c @@ -17,7 +17,7 @@ * are met. * * $FreeBSD: src/sys/kern/sys_pipe.c,v 1.60.2.13 2002/08/05 15:05:15 des Exp $ - * $DragonFly: src/sys/kern/sys_pipe.c,v 1.43 2006/12/23 23:47:54 swildner Exp $ + * $DragonFly: src/sys/kern/sys_pipe.c,v 1.44 2006/12/28 21:24:01 dillon Exp $ */ /* @@ -315,9 +315,9 @@ pipespace(struct pipe *cpipe, int size) */ if (object == NULL || object->size != npages) { object = vm_object_allocate(OBJT_DEFAULT, npages); - buffer = (caddr_t) vm_map_min(kernel_map); + buffer = (caddr_t)vm_map_min(&kernel_map); - error = vm_map_find(kernel_map, object, 0, + error = vm_map_find(&kernel_map, object, 0, (vm_offset_t *)&buffer, size, 1, VM_MAPTYPE_NORMAL, @@ -677,7 +677,7 @@ pipe_build_write_buffer(struct pipe *wpipe, struct uio *uio) case PIPE_SFBUF2: if (wpipe->pipe_kva == NULL) { wpipe->pipe_kva = - kmem_alloc_nofault(kernel_map, XIO_INTERNAL_SIZE); + kmem_alloc_nofault(&kernel_map, XIO_INTERNAL_SIZE); wpipe->pipe_kvamask = 0; } if (wpipe->pipe_feature == PIPE_KMEM) { @@ -732,7 +732,7 @@ pipe_clone_write_buffer(struct pipe *wpipe) xio_release(&wpipe->pipe_map); if (wpipe->pipe_kva) { pmap_qremove(wpipe->pipe_kva, XIO_INTERNAL_PAGES); - kmem_free(kernel_map, wpipe->pipe_kva, XIO_INTERNAL_SIZE); + kmem_free(&kernel_map, wpipe->pipe_kva, XIO_INTERNAL_SIZE); wpipe->pipe_kva = NULL; } } @@ -803,7 +803,7 @@ retry: xio_release(&wpipe->pipe_map); if (wpipe->pipe_kva) { pmap_qremove(wpipe->pipe_kva, XIO_INTERNAL_PAGES); - kmem_free(kernel_map, wpipe->pipe_kva, XIO_INTERNAL_SIZE); + kmem_free(&kernel_map, wpipe->pipe_kva, XIO_INTERNAL_SIZE); wpipe->pipe_kva = NULL; } pipeunlock(wpipe); @@ -1348,7 +1348,7 @@ pipe_free_kmem(struct pipe *cpipe) if (cpipe->pipe_buffer.buffer != NULL) { if (cpipe->pipe_buffer.size > PIPE_SIZE) --pipe_nbig; - kmem_free(kernel_map, + kmem_free(&kernel_map, (vm_offset_t)cpipe->pipe_buffer.buffer, cpipe->pipe_buffer.size); cpipe->pipe_buffer.buffer = NULL; @@ -1399,7 +1399,7 @@ pipeclose(struct pipe *cpipe) if (cpipe->pipe_kva) { pmap_qremove(cpipe->pipe_kva, XIO_INTERNAL_PAGES); - kmem_free(kernel_map, cpipe->pipe_kva, XIO_INTERNAL_SIZE); + kmem_free(&kernel_map, cpipe->pipe_kva, XIO_INTERNAL_SIZE); cpipe->pipe_kva = NULL; } diff --git a/sys/kern/sys_process.c b/sys/kern/sys_process.c index 49acb152f7..d0497c4bd0 100644 --- a/sys/kern/sys_process.c +++ b/sys/kern/sys_process.c @@ -29,7 +29,7 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/kern/sys_process.c,v 1.51.2.6 2003/01/08 03:06:45 kan Exp $ - * $DragonFly: src/sys/kern/sys_process.c,v 1.25 2006/12/23 23:47:54 swildner Exp $ + * $DragonFly: src/sys/kern/sys_process.c,v 1.26 2006/12/28 21:24:01 dillon Exp $ */ #include @@ -82,7 +82,7 @@ pread (struct proc *procp, unsigned int addr, unsigned int *retval) { vm_map_lookup_done (tmap, out_entry, 0); /* Find space in kernel_map for the page we're interested in */ - rv = vm_map_find (kernel_map, object, IDX_TO_OFF(pindex), + rv = vm_map_find (&kernel_map, object, IDX_TO_OFF(pindex), &kva, PAGE_SIZE, 0, VM_MAPTYPE_NORMAL, @@ -92,13 +92,13 @@ pread (struct proc *procp, unsigned int addr, unsigned int *retval) { if (!rv) { vm_object_reference (object); - rv = vm_map_wire (kernel_map, kva, kva + PAGE_SIZE, 0); + rv = vm_map_wire (&kernel_map, kva, kva + PAGE_SIZE, 0); if (!rv) { *retval = 0; bcopy ((caddr_t)kva + page_offset, retval, sizeof *retval); } - vm_map_remove (kernel_map, kva, kva + PAGE_SIZE); + vm_map_remove (&kernel_map, kva, kva + PAGE_SIZE); } return rv; @@ -172,7 +172,7 @@ pwrite (struct proc *procp, unsigned int addr, unsigned int datum) { return EFAULT; /* Find space in kernel_map for the page we're interested in */ - rv = vm_map_find (kernel_map, object, IDX_TO_OFF(pindex), + rv = vm_map_find (&kernel_map, object, IDX_TO_OFF(pindex), &kva, PAGE_SIZE, 0, VM_MAPTYPE_NORMAL, @@ -181,11 +181,11 @@ pwrite (struct proc *procp, unsigned int addr, unsigned int datum) { if (!rv) { vm_object_reference (object); - rv = vm_map_wire (kernel_map, kva, kva + PAGE_SIZE, 0); + rv = vm_map_wire (&kernel_map, kva, kva + PAGE_SIZE, 0); if (!rv) { bcopy (&datum, (caddr_t)kva + page_offset, sizeof datum); } - vm_map_remove (kernel_map, kva, kva + PAGE_SIZE); + vm_map_remove (&kernel_map, kva, kva + PAGE_SIZE); } if (fix_prot) diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c index 2eecd873ba..c4eb33308f 100644 --- a/sys/kern/vfs_bio.c +++ b/sys/kern/vfs_bio.c @@ -12,7 +12,7 @@ * John S. Dyson. * * $FreeBSD: src/sys/kern/vfs_bio.c,v 1.242.2.20 2003/05/28 18:38:10 alc Exp $ - * $DragonFly: src/sys/kern/vfs_bio.c,v 1.84 2006/12/28 18:29:03 dillon Exp $ + * $DragonFly: src/sys/kern/vfs_bio.c,v 1.85 2006/12/28 21:24:01 dillon Exp $ */ /* @@ -441,10 +441,10 @@ bufinit(void) * from buf_daemon. */ - bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); + bogus_offset = kmem_alloc_pageable(&kernel_map, PAGE_SIZE); bogus_page = vm_page_alloc(&kernel_object, - ((bogus_offset - KvaStart) >> PAGE_SHIFT), - VM_ALLOC_NORMAL); + (bogus_offset >> PAGE_SHIFT), + VM_ALLOC_NORMAL); vmstats.v_wire_count++; } @@ -543,14 +543,14 @@ bfreekva(struct buf *bp) if (bp->b_kvasize) { ++buffreekvacnt; count = vm_map_entry_reserve(MAP_RESERVE_COUNT); - vm_map_lock(buffer_map); + vm_map_lock(&buffer_map); bufspace -= bp->b_kvasize; - vm_map_delete(buffer_map, + vm_map_delete(&buffer_map, (vm_offset_t) bp->b_kvabase, (vm_offset_t) bp->b_kvabase + bp->b_kvasize, &count ); - vm_map_unlock(buffer_map); + vm_map_unlock(&buffer_map); vm_map_entry_release(count); bp->b_kvasize = 0; bufspacewakeup(); @@ -1791,16 +1791,16 @@ restart: bfreekva(bp); count = vm_map_entry_reserve(MAP_RESERVE_COUNT); - vm_map_lock(buffer_map); + vm_map_lock(&buffer_map); - if (vm_map_findspace(buffer_map, - vm_map_min(buffer_map), maxsize, + if (vm_map_findspace(&buffer_map, + vm_map_min(&buffer_map), maxsize, maxsize, &addr)) { /* * Uh oh. Buffer map is too fragmented. We * must defragment the map. */ - vm_map_unlock(buffer_map); + vm_map_unlock(&buffer_map); vm_map_entry_release(count); ++bufdefragcnt; defrag = 1; @@ -1809,7 +1809,7 @@ restart: goto restart; } if (addr) { - vm_map_insert(buffer_map, &count, + vm_map_insert(&buffer_map, &count, NULL, 0, addr, addr + maxsize, VM_MAPTYPE_NORMAL, @@ -1821,7 +1821,7 @@ restart: bufspace += bp->b_kvasize; ++bufreusecnt; } - vm_map_unlock(buffer_map); + vm_map_unlock(&buffer_map); vm_map_entry_release(count); } bp->b_data = bp->b_kvabase; @@ -3342,8 +3342,8 @@ tryagain: * process we are. */ p = vm_page_alloc(&kernel_object, - ((pg - KvaStart) >> PAGE_SHIFT), - VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM); + (pg >> PAGE_SHIFT), + VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM); if (!p) { vm_pageout_deficit += (to - from) >> PAGE_SHIFT; vm_wait(); diff --git a/sys/platform/pc32/i386/machdep.c b/sys/platform/pc32/i386/machdep.c index 80fff55292..415c30be35 100644 --- a/sys/platform/pc32/i386/machdep.c +++ b/sys/platform/pc32/i386/machdep.c @@ -36,7 +36,7 @@ * * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 * $FreeBSD: src/sys/i386/i386/machdep.c,v 1.385.2.30 2003/05/31 08:48:05 alc Exp $ - * $DragonFly: src/sys/platform/pc32/i386/machdep.c,v 1.107 2006/12/27 20:41:59 dillon Exp $ + * $DragonFly: src/sys/platform/pc32/i386/machdep.c,v 1.108 2006/12/28 21:24:02 dillon Exp $ */ #include "use_apm.h" @@ -248,7 +248,7 @@ cpu_startup(void *dummy) vm_offset_t minaddr; vm_offset_t maxaddr; vm_size_t size = 0; - int firstaddr; + vm_offset_t firstaddr; if (boothowto & RB_VERBOSE) bootverbose++; @@ -331,10 +331,8 @@ again: * Do not allow the buffer_map to be more then 1/2 the size of the * kernel_map. */ - if (nbuf > (kernel_map->max_offset - kernel_map->min_offset) / - (BKVASIZE * 2)) { - nbuf = (kernel_map->max_offset - kernel_map->min_offset) / - (BKVASIZE * 2); + if (nbuf > (virtual_end - virtual_start) / (BKVASIZE * 2)) { + nbuf = (virtual_end - virtual_start) / (BKVASIZE * 2); kprintf("Warning: nbufs capped at %d\n", nbuf); } @@ -355,7 +353,7 @@ again: */ if (firstaddr == 0) { size = (vm_size_t)(v - firstaddr); - firstaddr = (int)kmem_alloc(kernel_map, round_page(size)); + firstaddr = kmem_alloc(&kernel_map, round_page(size)); if (firstaddr == 0) panic("startup: no room for tables"); goto again; @@ -367,16 +365,16 @@ again: if ((vm_size_t)(v - firstaddr) != size) panic("startup: table size inconsistency"); - clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva, - (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size); - buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva, - (nbuf*BKVASIZE)); - buffer_map->system_map = 1; - pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva, - (nswbuf*MAXPHYS) + pager_map_size); - pager_map->system_map = 1; - exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, - (16*(ARG_MAX+(PAGE_SIZE*3)))); + kmem_suballoc(&kernel_map, &clean_map, &clean_sva, &clean_eva, + (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size); + kmem_suballoc(&clean_map, &buffer_map, &buffer_sva, &buffer_eva, + (nbuf*BKVASIZE)); + buffer_map.system_map = 1; + kmem_suballoc(&clean_map, &pager_map, &pager_sva, &pager_eva, + (nswbuf*MAXPHYS) + pager_map_size); + pager_map.system_map = 1; + kmem_suballoc(&kernel_map, &exec_map, &minaddr, &maxaddr, + (16*(ARG_MAX+(PAGE_SIZE*3)))); #if defined(USERCONFIG) userconfig(); @@ -2076,7 +2074,7 @@ f00f_hack(void *unused) r_idt.rd_limit = sizeof(idt0) - 1; - tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2); + tmp = kmem_alloc(&kernel_map, PAGE_SIZE * 2); if (tmp == 0) panic("kmem_alloc returned 0"); if (((unsigned int)tmp & (PAGE_SIZE-1)) != 0) @@ -2087,7 +2085,7 @@ f00f_hack(void *unused) r_idt.rd_base = (int)new_idt; lidt(&r_idt); idt = new_idt; - if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE, + if (vm_map_protect(&kernel_map, tmp, tmp + PAGE_SIZE, VM_PROT_READ, FALSE) != KERN_SUCCESS) panic("vm_map_protect failed"); return; diff --git a/sys/platform/pc32/i386/mp_machdep.c b/sys/platform/pc32/i386/mp_machdep.c index 746ece3896..67c4b73e67 100644 --- a/sys/platform/pc32/i386/mp_machdep.c +++ b/sys/platform/pc32/i386/mp_machdep.c @@ -23,7 +23,7 @@ * SUCH DAMAGE. * * $FreeBSD: src/sys/i386/i386/mp_machdep.c,v 1.115.2.15 2003/03/14 21:22:35 jhb Exp $ - * $DragonFly: src/sys/platform/pc32/i386/mp_machdep.c,v 1.54 2006/12/23 00:27:03 swildner Exp $ + * $DragonFly: src/sys/platform/pc32/i386/mp_machdep.c,v 1.55 2006/12/28 21:24:02 dillon Exp $ */ #include "opt_cpu.h" @@ -2075,7 +2075,7 @@ start_all_aps(u_int boot_addr) pg = x * i386_btop(sizeof(struct privatespace)); /* allocate new private data page(s) */ - gd = (struct mdglobaldata *)kmem_alloc(kernel_map, + gd = (struct mdglobaldata *)kmem_alloc(&kernel_map, MDGLOBALDATA_BASEALLOC_SIZE); /* wire it into the private page table page */ for (i = 0; i < MDGLOBALDATA_BASEALLOC_SIZE; i += PAGE_SIZE) { @@ -2090,7 +2090,7 @@ start_all_aps(u_int boot_addr) SMPpt[pg + 3] = 0; /* *gd_PMAP1 */ /* allocate and set up an idle stack data page */ - stack = (char *)kmem_alloc(kernel_map, UPAGES*PAGE_SIZE); + stack = (char *)kmem_alloc(&kernel_map, UPAGES*PAGE_SIZE); for (i = 0; i < UPAGES; i++) { SMPpt[pg + 4 + i] = (pt_entry_t) (PG_V | PG_RW | vtophys_pte(PAGE_SIZE * i + stack)); @@ -2111,7 +2111,7 @@ start_all_aps(u_int boot_addr) gd->gd_CADDR2 = ps->CPAGE2; gd->gd_CADDR3 = ps->CPAGE3; gd->gd_PADDR1 = (unsigned *)ps->PPAGE1; - gd->mi.gd_ipiq = (void *)kmem_alloc(kernel_map, sizeof(lwkt_ipiq) * (mp_naps + 1)); + gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, sizeof(lwkt_ipiq) * (mp_naps + 1)); bzero(gd->mi.gd_ipiq, sizeof(lwkt_ipiq) * (mp_naps + 1)); /* setup a vector to our boot code */ @@ -2154,7 +2154,7 @@ start_all_aps(u_int boot_addr) /* build our map of 'other' CPUs */ mycpu->gd_other_cpus = smp_startup_mask & ~(1 << mycpu->gd_cpuid); - mycpu->gd_ipiq = (void *)kmem_alloc(kernel_map, sizeof(lwkt_ipiq) * ncpus); + mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, sizeof(lwkt_ipiq) * ncpus); bzero(mycpu->gd_ipiq, sizeof(lwkt_ipiq) * ncpus); /* fill in our (BSP) APIC version */ diff --git a/sys/platform/pc32/i386/pmap.c b/sys/platform/pc32/i386/pmap.c index 35b62163a3..fa69d4b696 100644 --- a/sys/platform/pc32/i386/pmap.c +++ b/sys/platform/pc32/i386/pmap.c @@ -40,7 +40,7 @@ * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 * $FreeBSD: src/sys/i386/i386/pmap.c,v 1.250.2.18 2002/03/06 22:48:53 silby Exp $ - * $DragonFly: src/sys/platform/pc32/i386/pmap.c,v 1.65 2006/12/28 18:29:04 dillon Exp $ + * $DragonFly: src/sys/platform/pc32/i386/pmap.c,v 1.66 2006/12/28 21:24:02 dillon Exp $ */ /* @@ -533,7 +533,7 @@ pmap_init(void) if (initial_pvs < MINPV) initial_pvs = MINPV; pvzone = &pvzone_store; - pvinit = (struct pv_entry *) kmem_alloc(kernel_map, + pvinit = (struct pv_entry *) kmem_alloc(&kernel_map, initial_pvs * sizeof (struct pv_entry)); zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit, vm_page_array_size); @@ -1037,7 +1037,7 @@ void pmap_pinit0(struct pmap *pmap) { pmap->pm_pdir = - (pd_entry_t *)kmem_alloc_pageable(kernel_map, PAGE_SIZE); + (pd_entry_t *)kmem_alloc_pageable(&kernel_map, PAGE_SIZE); pmap_kenter((vm_offset_t)pmap->pm_pdir, (vm_offset_t) IdlePTD); pmap->pm_count = 1; pmap->pm_active = 0; @@ -1061,7 +1061,7 @@ pmap_pinit(struct pmap *pmap) */ if (pmap->pm_pdir == NULL) { pmap->pm_pdir = - (pd_entry_t *)kmem_alloc_pageable(kernel_map, PAGE_SIZE); + (pd_entry_t *)kmem_alloc_pageable(&kernel_map, PAGE_SIZE); } /* @@ -3110,7 +3110,7 @@ pmap_mapdev(vm_paddr_t pa, vm_size_t size) offset = pa & PAGE_MASK; size = roundup(offset + size, PAGE_SIZE); - va = kmem_alloc_nofault(kernel_map, size); + va = kmem_alloc_nofault(&kernel_map, size); if (!va) panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); @@ -3137,7 +3137,7 @@ pmap_unmapdev(vm_offset_t va, vm_size_t size) offset = va & PAGE_MASK; size = roundup(offset + size, PAGE_SIZE); pmap_qremove(va, size >> PAGE_SHIFT); - kmem_free(kernel_map, base, size); + kmem_free(&kernel_map, base, size); } /* diff --git a/sys/platform/pc32/i386/sys_machdep.c b/sys/platform/pc32/i386/sys_machdep.c index 160744b0d3..6d64ffbeca 100644 --- a/sys/platform/pc32/i386/sys_machdep.c +++ b/sys/platform/pc32/i386/sys_machdep.c @@ -32,7 +32,7 @@ * * from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91 * $FreeBSD: src/sys/i386/i386/sys_machdep.c,v 1.47.2.3 2002/10/07 17:20:00 jhb Exp $ - * $DragonFly: src/sys/platform/pc32/i386/sys_machdep.c,v 1.28 2006/12/23 00:27:03 swildner Exp $ + * $DragonFly: src/sys/platform/pc32/i386/sys_machdep.c,v 1.29 2006/12/28 21:24:02 dillon Exp $ * */ @@ -126,8 +126,8 @@ i386_extend_pcb(struct lwp *lp) 0 /* granularity */ }; - ext = (struct pcb_ext *)kmem_alloc(kernel_map, ctob(IOPAGES+1)); - if (ext == 0) + ext = (struct pcb_ext *)kmem_alloc(&kernel_map, ctob(IOPAGES+1)); + if (ext == NULL) return (ENOMEM); bzero(ext, sizeof(struct pcb_ext)); ext->ext_tss.tss_esp0 = (unsigned)((char *)lp->lwp_thread->td_pcb - 16); @@ -301,8 +301,8 @@ user_ldt_alloc(struct pcb *pcb, int len) return NULL; new_ldt->ldt_len = len = NEW_MAX_LD(len); - new_ldt->ldt_base = (caddr_t)kmem_alloc(kernel_map, - len * sizeof(union descriptor)); + new_ldt->ldt_base = (caddr_t)kmem_alloc(&kernel_map, + len * sizeof(union descriptor)); if (new_ldt->ldt_base == NULL) { FREE(new_ldt, M_SUBPROC); return NULL; @@ -342,8 +342,8 @@ user_ldt_free(struct pcb *pcb) crit_exit(); if (--pcb_ldt->ldt_refcnt == 0) { - kmem_free(kernel_map, (vm_offset_t)pcb_ldt->ldt_base, - pcb_ldt->ldt_len * sizeof(union descriptor)); + kmem_free(&kernel_map, (vm_offset_t)pcb_ldt->ldt_base, + pcb_ldt->ldt_len * sizeof(union descriptor)); FREE(pcb_ldt, M_SUBPROC); } } @@ -430,8 +430,8 @@ ki386_set_ldt(struct lwp *lp, char *args, int *res) return ENOMEM; if (pcb_ldt) { pcb_ldt->ldt_sd = new_ldt->ldt_sd; - kmem_free(kernel_map, (vm_offset_t)pcb_ldt->ldt_base, - pcb_ldt->ldt_len * sizeof(union descriptor)); + kmem_free(&kernel_map, (vm_offset_t)pcb_ldt->ldt_base, + pcb_ldt->ldt_len * sizeof(union descriptor)); pcb_ldt->ldt_base = new_ldt->ldt_base; pcb_ldt->ldt_len = new_ldt->ldt_len; FREE(new_ldt, M_SUBPROC); @@ -451,18 +451,18 @@ ki386_set_ldt(struct lwp *lp, char *args, int *res) } descs_size = uap->num * sizeof(union descriptor); - descs = (union descriptor *)kmem_alloc(kernel_map, descs_size); + descs = (union descriptor *)kmem_alloc(&kernel_map, descs_size); if (descs == NULL) return (ENOMEM); error = copyin(&uap->descs[0], descs, descs_size); if (error) { - kmem_free(kernel_map, (vm_offset_t)descs, descs_size); + kmem_free(&kernel_map, (vm_offset_t)descs, descs_size); return (error); } /* Check descriptors for access violations */ error = check_descs(descs, uap->num); if (error) { - kmem_free(kernel_map, (vm_offset_t)descs, descs_size); + kmem_free(&kernel_map, (vm_offset_t)descs, descs_size); return (error); } @@ -479,7 +479,7 @@ ki386_set_ldt(struct lwp *lp, char *args, int *res) *res = uap->start; crit_exit(); - kmem_free(kernel_map, (vm_offset_t)descs, descs_size); + kmem_free(&kernel_map, (vm_offset_t)descs, descs_size); return (0); } diff --git a/sys/platform/pc32/i386/trap.c b/sys/platform/pc32/i386/trap.c index 95f2742996..2e1311d57d 100644 --- a/sys/platform/pc32/i386/trap.c +++ b/sys/platform/pc32/i386/trap.c @@ -36,7 +36,7 @@ * * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 * $FreeBSD: src/sys/i386/i386/trap.c,v 1.147.2.11 2003/02/27 19:09:59 luoqi Exp $ - * $DragonFly: src/sys/platform/pc32/i386/trap.c,v 1.88 2006/12/28 18:29:04 dillon Exp $ + * $DragonFly: src/sys/platform/pc32/i386/trap.c,v 1.89 2006/12/28 21:24:02 dillon Exp $ */ /* @@ -976,7 +976,7 @@ trap_pfault(struct trapframe *frame, int usermode, vm_offset_t eva) * always have pte pages mapped, we just have to fault * the page. */ - rv = vm_fault(kernel_map, va, ftype, VM_FAULT_NORMAL); + rv = vm_fault(&kernel_map, va, ftype, VM_FAULT_NORMAL); } if (rv == KERN_SUCCESS) @@ -1029,7 +1029,7 @@ trap_pfault(struct trapframe *frame, int usermode, vm_offset_t eva) if (usermode) goto nogo; - map = kernel_map; + map = &kernel_map; } else { /* * This is a fault on non-kernel virtual memory. @@ -1050,7 +1050,7 @@ trap_pfault(struct trapframe *frame, int usermode, vm_offset_t eva) else ftype = VM_PROT_READ; - if (map != kernel_map) { + if (map != &kernel_map) { /* * Keep swapout from messing with us during this * critical time. diff --git a/sys/platform/pc32/i386/vm_machdep.c b/sys/platform/pc32/i386/vm_machdep.c index 5557293c65..4659dc6822 100644 --- a/sys/platform/pc32/i386/vm_machdep.c +++ b/sys/platform/pc32/i386/vm_machdep.c @@ -39,7 +39,7 @@ * from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91 * Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$ * $FreeBSD: src/sys/i386/i386/vm_machdep.c,v 1.132.2.9 2003/01/25 19:02:23 dillon Exp $ - * $DragonFly: src/sys/platform/pc32/i386/vm_machdep.c,v 1.49 2006/12/23 00:27:03 swildner Exp $ + * $DragonFly: src/sys/platform/pc32/i386/vm_machdep.c,v 1.50 2006/12/28 21:24:02 dillon Exp $ */ #include "use_npx.h" @@ -257,7 +257,7 @@ cpu_proc_exit(void) pcb->pcb_ext = NULL; td->td_switch(td); crit_exit(); - kmem_free(kernel_map, (vm_offset_t)ext, ctob(IOPAGES + 1)); + kmem_free(&kernel_map, (vm_offset_t)ext, ctob(IOPAGES + 1)); } user_ldt_free(pcb); if (pcb->pcb_flags & PCB_DBREGS) { diff --git a/sys/vfs/procfs/procfs_mem.c b/sys/vfs/procfs/procfs_mem.c index 4ab6fb798b..70819b7e73 100644 --- a/sys/vfs/procfs/procfs_mem.c +++ b/sys/vfs/procfs/procfs_mem.c @@ -38,7 +38,7 @@ * @(#)procfs_mem.c 8.5 (Berkeley) 6/15/94 * * $FreeBSD: src/sys/miscfs/procfs/procfs_mem.c,v 1.46.2.3 2002/01/22 17:22:59 nectar Exp $ - * $DragonFly: src/sys/vfs/procfs/procfs_mem.c,v 1.11 2004/10/12 19:29:31 dillon Exp $ + * $DragonFly: src/sys/vfs/procfs/procfs_mem.c,v 1.12 2006/12/28 21:24:02 dillon Exp $ */ /* @@ -96,7 +96,7 @@ procfs_rwmem(struct proc *curp, struct proc *p, struct uio *uio) writing = uio->uio_rw == UIO_WRITE; reqprot = writing ? (VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE) : VM_PROT_READ; - kva = kmem_alloc_pageable(kernel_map, PAGE_SIZE); + kva = kmem_alloc_pageable(&kernel_map, PAGE_SIZE); /* * Only map in one page at a time. We don't have to, but it @@ -214,7 +214,7 @@ again: crit_exit(); } while (error == 0 && uio->uio_resid > 0); - kmem_free(kernel_map, kva, PAGE_SIZE); + kmem_free(&kernel_map, kva, PAGE_SIZE); vmspace_free(vm); return (error); } diff --git a/sys/vm/vm_contig.c b/sys/vm/vm_contig.c index 8fbc518e46..d839273175 100644 --- a/sys/vm/vm_contig.c +++ b/sys/vm/vm_contig.c @@ -64,7 +64,7 @@ * SUCH DAMAGE. * * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 - * $DragonFly: src/sys/vm/vm_contig.c,v 1.20 2006/12/28 18:29:08 dillon Exp $ + * $DragonFly: src/sys/vm/vm_contig.c,v 1.21 2006/12/28 21:24:02 dillon Exp $ */ /* @@ -381,7 +381,7 @@ vm_contig_pg_free(int start, u_long size) * * Map previously allocated (vm_contig_pg_alloc) range of pages from * vm_page_array[] into the KVA. Once mapped, the pages are part of - * the Kernel, and are to free'ed with kmem_free(kernel_map, addr, size). + * the Kernel, and are to free'ed with kmem_free(&kernel_map, addr, size). */ vm_offset_t vm_contig_pg_kmap(int start, u_long size, vm_map_t map, int flags) @@ -415,9 +415,13 @@ vm_contig_pg_kmap(int start, u_long size, vm_map_t map, int flags) crit_exit(); return (0); } + + /* + * kernel_object maps 1:1 to kernel_map. + */ vm_object_reference(&kernel_object); vm_map_insert(map, &count, - &kernel_object, addr - KvaStart, + &kernel_object, addr, addr, addr + size, VM_MAPTYPE_NORMAL, VM_PROT_ALL, VM_PROT_ALL, @@ -428,8 +432,7 @@ vm_contig_pg_kmap(int start, u_long size, vm_map_t map, int flags) tmp_addr = addr; for (i = start; i < (start + size / PAGE_SIZE); i++) { vm_page_t m = &pga[i]; - vm_page_insert(m, &kernel_object, - OFF_TO_IDX(tmp_addr - KvaStart)); + vm_page_insert(m, &kernel_object, OFF_TO_IDX(tmp_addr)); if ((flags & M_ZERO) && !(m->flags & PG_ZERO)) pmap_zero_page(VM_PAGE_TO_PHYS(m)); m->flags = 0; @@ -452,7 +455,7 @@ contigmalloc( unsigned long boundary) { return contigmalloc_map(size, type, flags, low, high, alignment, - boundary, kernel_map); + boundary, &kernel_map); } void * @@ -485,7 +488,7 @@ contigmalloc_map( void contigfree(void *addr, unsigned long size, struct malloc_type *type) { - kmem_free(kernel_map, (vm_offset_t)addr, size); + kmem_free(&kernel_map, (vm_offset_t)addr, size); } vm_offset_t @@ -496,5 +499,5 @@ vm_page_alloc_contig( vm_offset_t alignment) { return ((vm_offset_t)contigmalloc_map(size, M_DEVBUF, M_NOWAIT, low, - high, alignment, 0ul, kernel_map)); + high, alignment, 0ul, &kernel_map)); } diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h index acaef574c8..76748b8199 100644 --- a/sys/vm/vm_extern.h +++ b/sys/vm/vm_extern.h @@ -32,7 +32,7 @@ * * @(#)vm_extern.h 8.2 (Berkeley) 1/12/94 * $FreeBSD: src/sys/vm/vm_extern.h,v 1.46.2.3 2003/01/13 22:51:17 dillon Exp $ - * $DragonFly: src/sys/vm/vm_extern.h,v 1.20 2006/12/28 18:29:08 dillon Exp $ + * $DragonFly: src/sys/vm/vm_extern.h,v 1.21 2006/12/28 21:24:02 dillon Exp $ */ #ifndef _VM_VM_EXTERN_H_ @@ -83,7 +83,7 @@ vm_offset_t kmem_alloc_wait (vm_map_t, vm_size_t); void kmem_free (vm_map_t, vm_offset_t, vm_size_t); void kmem_free_wakeup (vm_map_t, vm_offset_t, vm_size_t); void kmem_init (vm_offset_t, vm_offset_t); -vm_map_t kmem_suballoc (vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t); +void kmem_suballoc (vm_map_t, vm_map_t, vm_offset_t *, vm_offset_t *, vm_size_t); void munmapfd (struct proc *, int); int swaponvp (struct thread *, struct vnode *, u_long); void swapout_procs (int); diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index 66c55fe0ba..97765ae13e 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -67,7 +67,7 @@ * rights to redistribute these changes. * * $FreeBSD: src/sys/vm/vm_fault.c,v 1.108.2.8 2002/02/26 05:49:27 silby Exp $ - * $DragonFly: src/sys/vm/vm_fault.c,v 1.32 2006/12/28 18:29:08 dillon Exp $ + * $DragonFly: src/sys/vm/vm_fault.c,v 1.33 2006/12/28 21:24:02 dillon Exp $ */ /* @@ -791,7 +791,7 @@ readrest: * around having the machine panic on a kernel space * fault w/ I/O error. */ - if (((fs->map != kernel_map) && (rv == VM_PAGER_ERROR)) || + if (((fs->map != &kernel_map) && (rv == VM_PAGER_ERROR)) || (rv == VM_PAGER_BAD)) { vm_page_free(fs->m); fs->m = NULL; diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c index b818203888..229b20095e 100644 --- a/sys/vm/vm_glue.c +++ b/sys/vm/vm_glue.c @@ -60,7 +60,7 @@ * rights to redistribute these changes. * * $FreeBSD: src/sys/vm/vm_glue.c,v 1.94.2.4 2003/01/13 22:51:17 dillon Exp $ - * $DragonFly: src/sys/vm/vm_glue.c,v 1.46 2006/12/23 00:41:31 swildner Exp $ + * $DragonFly: src/sys/vm/vm_glue.c,v 1.47 2006/12/28 21:24:02 dillon Exp $ */ #include "opt_vm.h" @@ -142,16 +142,16 @@ kernacc(c_caddr_t addr, int len, int rw) /* * Nominal kernel memory access - check access via kernel_map. */ - if ((vm_offset_t)addr + len > kernel_map->max_offset || + if ((vm_offset_t)addr + len > kernel_map.max_offset || (vm_offset_t)addr + len < (vm_offset_t)addr) { return (FALSE); } prot = rw; saddr = trunc_page((vm_offset_t)addr); eaddr = round_page((vm_offset_t)addr + len); - vm_map_lock_read(kernel_map); - rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); - vm_map_unlock_read(kernel_map); + vm_map_lock_read(&kernel_map); + rv = vm_map_check_protection(&kernel_map, saddr, eaddr, prot); + vm_map_unlock_read(&kernel_map); return (rv == TRUE); } diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c index 66404cd1b1..0ce7d5b6a0 100644 --- a/sys/vm/vm_kern.c +++ b/sys/vm/vm_kern.c @@ -62,7 +62,7 @@ * rights to redistribute these changes. * * $FreeBSD: src/sys/vm/vm_kern.c,v 1.61.2.2 2002/03/12 18:25:26 tegge Exp $ - * $DragonFly: src/sys/vm/vm_kern.c,v 1.25 2006/12/28 18:29:08 dillon Exp $ + * $DragonFly: src/sys/vm/vm_kern.c,v 1.26 2006/12/28 21:24:02 dillon Exp $ */ /* @@ -85,10 +85,10 @@ #include #include -vm_map_t kernel_map=0; -vm_map_t exec_map=0; -vm_map_t clean_map=0; -vm_map_t buffer_map=0; +struct vm_map kernel_map; +struct vm_map exec_map; +struct vm_map clean_map; +struct vm_map buffer_map; /* * kmem_alloc_pageable: @@ -149,7 +149,6 @@ vm_offset_t kmem_alloc3(vm_map_t map, vm_size_t size, int kmflags) { vm_offset_t addr; - vm_offset_t offset; vm_offset_t i; int count; @@ -177,10 +176,9 @@ kmem_alloc3(vm_map_t map, vm_size_t size, int kmflags) vm_map_entry_release(count); return (0); } - offset = addr - KvaStart; vm_object_reference(&kernel_object); vm_map_insert(map, &count, - &kernel_object, offset, addr, addr + size, + &kernel_object, addr, addr, addr + size, VM_MAPTYPE_NORMAL, VM_PROT_ALL, VM_PROT_ALL, 0); @@ -211,7 +209,7 @@ kmem_alloc3(vm_map_t map, vm_size_t size, int kmflags) for (i = 0; i < size; i += PAGE_SIZE) { vm_page_t mem; - mem = vm_page_grab(&kernel_object, OFF_TO_IDX(offset + i), + mem = vm_page_grab(&kernel_object, OFF_TO_IDX(addr + i), VM_ALLOC_ZERO | VM_ALLOC_NORMAL | VM_ALLOC_RETRY); if ((mem->flags & PG_ZERO) == 0) vm_page_zero_fill(mem); @@ -247,22 +245,21 @@ kmem_free(vm_map_t map, vm_offset_t addr, vm_size_t size) /* * kmem_suballoc: * - * Allocates a map to manage a subrange - * of the kernel virtual address space. - * - * Arguments are as follows: + * Used to break a system map into smaller maps, usually to reduce + * contention and to provide large KVA spaces for subsystems like the + * buffer cache. * * parent Map to take range from + * result * size Size of range to find * min, max Returned endpoints of map * pageable Can the region be paged */ -vm_map_t -kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max, - vm_size_t size) +void +kmem_suballoc(vm_map_t parent, vm_map_t result, + vm_offset_t *min, vm_offset_t *max, vm_size_t size) { int ret; - vm_map_t result; size = round_page(size); @@ -279,12 +276,9 @@ kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max, } *max = *min + size; pmap_reference(vm_map_pmap(parent)); - result = vm_map_create(vm_map_pmap(parent), *min, *max); - if (result == NULL) - panic("kmem_suballoc: cannot create submap"); + vm_map_init(result, *min, *max, vm_map_pmap(parent)); if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS) panic("kmem_suballoc: unable to change range to submap"); - return (result); } /* @@ -356,10 +350,13 @@ kmem_free_wakeup(vm_map_t map, vm_offset_t addr, vm_size_t size) /* * kmem_init: * - * Create the kernel map; insert a mapping covering kernel text, - * data, bss, and all space allocated thus far (`boostrap' data). - * That is, the area (0,start) and (end,KvaEnd) must be marked - * as allocated. + * Create the kernel_map and insert mappings to cover areas already + * allocated or reserved thus far. That is, the area (KvaStart,start) + * and (end,KvaEnd) must be marked as allocated. + * + * We could use a min_offset of 0 instead of KvaStart, but since the + * min_offset is not used for any calculations other then a bounds check + * it does not effect readability. KvaStart is more appropriate. * * Depend on the zalloc bootstrap cache to get our vm_map_entry_t. */ @@ -369,22 +366,25 @@ kmem_init(vm_offset_t start, vm_offset_t end) vm_map_t m; int count; - m = vm_map_create(kernel_pmap, KvaStart, KvaEnd); + m = vm_map_create(&kernel_map, kernel_pmap, KvaStart, KvaEnd); vm_map_lock(m); /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ - kernel_map = m; - kernel_map->system_map = 1; + m->system_map = 1; count = vm_map_entry_reserve(MAP_RESERVE_COUNT); - vm_map_insert(m, &count, NULL, (vm_offset_t) 0, - KvaStart, start, - VM_MAPTYPE_NORMAL, - VM_PROT_ALL, VM_PROT_ALL, - 0); - vm_map_insert(m, &count, NULL, (vm_offset_t) 0, - end, KvaEnd, - VM_MAPTYPE_NORMAL, - VM_PROT_ALL, VM_PROT_ALL, - 0); + if (KvaStart != start) { + vm_map_insert(m, &count, NULL, (vm_offset_t) 0, + KvaStart, start, + VM_MAPTYPE_NORMAL, + VM_PROT_ALL, VM_PROT_ALL, + 0); + } + if (KvaEnd != end) { + vm_map_insert(m, &count, NULL, (vm_offset_t) 0, + end, KvaEnd, + VM_MAPTYPE_NORMAL, + VM_PROT_ALL, VM_PROT_ALL, + 0); + } /* ... and ending with the completion of the above `insert' */ vm_map_unlock(m); vm_map_entry_release(count); diff --git a/sys/vm/vm_kern.h b/sys/vm/vm_kern.h index 08b15482df..a24e0bf574 100644 --- a/sys/vm/vm_kern.h +++ b/sys/vm/vm_kern.h @@ -62,7 +62,7 @@ * rights to redistribute these changes. * * $FreeBSD: src/sys/vm/vm_kern.h,v 1.22 2000/02/16 21:11:31 dillon Exp $ - * $DragonFly: src/sys/vm/vm_kern.h,v 1.10 2006/05/20 02:42:15 dillon Exp $ + * $DragonFly: src/sys/vm/vm_kern.h,v 1.11 2006/12/28 21:24:02 dillon Exp $ */ #ifndef _VM_VM_KERN_H_ @@ -82,10 +82,10 @@ #define KM_KRESERVE 0x0002 /* Kernel memory management definitions. */ -extern vm_map_t buffer_map; -extern vm_map_t kernel_map; -extern vm_map_t clean_map; -extern vm_map_t exec_map; +extern struct vm_map buffer_map; +extern struct vm_map kernel_map; +extern struct vm_map clean_map; +extern struct vm_map exec_map; extern u_int vm_kmem_size; extern vm_offset_t kernel_vm_end; diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index 5164a400c9..704c540202 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -62,7 +62,7 @@ * rights to redistribute these changes. * * $FreeBSD: src/sys/vm/vm_map.c,v 1.187.2.19 2003/05/27 00:47:02 alc Exp $ - * $DragonFly: src/sys/vm/vm_map.c,v 1.53 2006/12/28 18:29:08 dillon Exp $ + * $DragonFly: src/sys/vm/vm_map.c,v 1.54 2006/12/28 21:24:02 dillon Exp $ */ /* @@ -197,7 +197,7 @@ vmspace_alloc(vm_offset_t min, vm_offset_t max) vm = zalloc(vmspace_zone); bzero(&vm->vm_startcopy, (char *)&vm->vm_endcopy - (char *)&vm->vm_startcopy); - vm_map_init(&vm->vm_map, min, max); + vm_map_init(&vm->vm_map, min, max, NULL); pmap_pinit(vmspace_pmap(vm)); vm->vm_map.pmap = vmspace_pmap(vm); /* XXX */ vm->vm_refcnt = 1; @@ -327,13 +327,11 @@ vmspace_swap_count(struct vmspace *vmspace) * the given lower and upper address bounds. */ vm_map_t -vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max) +vm_map_create(vm_map_t result, pmap_t pmap, vm_offset_t min, vm_offset_t max) { - vm_map_t result; - - result = zalloc(mapzone); - vm_map_init(result, min, max); - result->pmap = pmap; + if (result == NULL) + result = zalloc(mapzone); + vm_map_init(result, min, max, pmap); return (result); } @@ -343,7 +341,7 @@ vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max) * The pmap is set elsewhere. */ void -vm_map_init(struct vm_map *map, vm_offset_t min, vm_offset_t max) +vm_map_init(struct vm_map *map, vm_offset_t min, vm_offset_t max, pmap_t pmap) { map->header.next = map->header.prev = &map->header; RB_INIT(&map->rb_root); @@ -353,6 +351,7 @@ vm_map_init(struct vm_map *map, vm_offset_t min, vm_offset_t max) map->infork = 0; map->min_offset = min; map->max_offset = max; + map->pmap = pmap; map->first_free = &map->header; map->hint = &map->header; map->timestamp = 0; @@ -959,7 +958,7 @@ retry: break; } map->hint = entry; - if (map == kernel_map) { + if (map == &kernel_map) { vm_offset_t ksize; if ((ksize = round_page(start + length)) > kernel_vm_end) { pmap_growkernel(ksize); diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h index 7ac2590b23..86eed80a9c 100644 --- a/sys/vm/vm_map.h +++ b/sys/vm/vm_map.h @@ -62,7 +62,7 @@ * rights to redistribute these changes. * * $FreeBSD: src/sys/vm/vm_map.h,v 1.54.2.5 2003/01/13 22:51:17 dillon Exp $ - * $DragonFly: src/sys/vm/vm_map.h,v 1.27 2006/12/23 00:41:31 swildner Exp $ + * $DragonFly: src/sys/vm/vm_map.h,v 1.28 2006/12/28 21:24:02 dillon Exp $ */ /* @@ -430,7 +430,7 @@ int vm_map_entry_reserve(int); int vm_map_entry_kreserve(int); void vm_map_entry_release(int); void vm_map_entry_krelease(int); -vm_map_t vm_map_create (struct pmap *, vm_offset_t, vm_offset_t); +vm_map_t vm_map_create (vm_map_t, struct pmap *, vm_offset_t, vm_offset_t); int vm_map_delete (vm_map_t, vm_offset_t, vm_offset_t, int *); int vm_map_find (vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t, @@ -440,7 +440,7 @@ int vm_map_find (vm_map_t, vm_object_t, vm_ooffset_t, int); int vm_map_findspace (vm_map_t, vm_offset_t, vm_size_t, vm_offset_t, vm_offset_t *); int vm_map_inherit (vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t); -void vm_map_init (struct vm_map *, vm_offset_t, vm_offset_t); +void vm_map_init (struct vm_map *, vm_offset_t, vm_offset_t, pmap_t); int vm_map_insert (vm_map_t, int *, vm_object_t, vm_ooffset_t, vm_offset_t, vm_offset_t, vm_maptype_t, diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index ec891d33b0..0a301495c9 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -62,7 +62,7 @@ * rights to redistribute these changes. * * $FreeBSD: src/sys/vm/vm_object.c,v 1.171.2.8 2003/05/26 19:17:56 alc Exp $ - * $DragonFly: src/sys/vm/vm_object.c,v 1.28 2006/12/28 18:29:08 dillon Exp $ + * $DragonFly: src/sys/vm/vm_object.c,v 1.29 2006/12/28 21:24:02 dillon Exp $ */ /* @@ -191,7 +191,7 @@ vm_object_init(void) { TAILQ_INIT(&vm_object_list); - _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(KvaSize), + _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(KvaEnd), &kernel_object); obj_zone = &obj_zone_store; @@ -1769,11 +1769,11 @@ vm_object_in_map(vm_object_t object) allproc_scan(vm_object_in_map_callback, &info); if (info.rv) return 1; - if( _vm_object_in_map( kernel_map, object, 0)) + if( _vm_object_in_map(&kernel_map, object, 0)) return 1; - if( _vm_object_in_map( pager_map, object, 0)) + if( _vm_object_in_map(&pager_map, object, 0)) return 1; - if( _vm_object_in_map( buffer_map, object, 0)) + if( _vm_object_in_map(&buffer_map, object, 0)) return 1; return 0; } diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c index d0bfb11aaf..6c0a015fc3 100644 --- a/sys/vm/vm_pager.c +++ b/sys/vm/vm_pager.c @@ -62,7 +62,7 @@ * rights to redistribute these changes. * * $FreeBSD: src/sys/vm/vm_pager.c,v 1.54.2.2 2001/11/18 07:11:00 dillon Exp $ - * $DragonFly: src/sys/vm/vm_pager.c,v 1.22 2006/05/03 20:44:49 dillon Exp $ + * $DragonFly: src/sys/vm/vm_pager.c,v 1.23 2006/12/28 21:24:02 dillon Exp $ */ /* @@ -176,7 +176,8 @@ int npagers = sizeof(pagertab) / sizeof(pagertab[0]); #define PAGER_MAP_SIZE (8 * 1024 * 1024) int pager_map_size = PAGER_MAP_SIZE; -vm_map_t pager_map; +struct vm_map pager_map; + static int bswneeded; static vm_offset_t swapbkva; /* swap buffers kva */ static TAILQ_HEAD(swqueue, buf) bswlist; @@ -208,7 +209,7 @@ vm_pager_bufferinit(void) /* * Reserve KVM space for pbuf data. */ - swapbkva = kmem_alloc_pageable(pager_map, nswbuf * MAXPHYS); + swapbkva = kmem_alloc_pageable(&pager_map, nswbuf * MAXPHYS); if (!swapbkva) panic("Not enough pager_map VM space for physical buffers"); diff --git a/sys/vm/vm_pager.h b/sys/vm/vm_pager.h index 148b00ed5d..e216876ad6 100644 --- a/sys/vm/vm_pager.h +++ b/sys/vm/vm_pager.h @@ -37,7 +37,7 @@ * * @(#)vm_pager.h 8.4 (Berkeley) 1/12/94 * $FreeBSD: src/sys/vm/vm_pager.h,v 1.24.2.2 2002/12/31 09:34:51 dillon Exp $ - * $DragonFly: src/sys/vm/vm_pager.h,v 1.8 2006/05/20 02:42:15 dillon Exp $ + * $DragonFly: src/sys/vm/vm_pager.h,v 1.9 2006/12/28 21:24:02 dillon Exp $ */ /* @@ -102,7 +102,7 @@ MALLOC_DECLARE(M_VMPGDATA); struct vnode; -extern vm_map_t pager_map; +extern struct vm_map pager_map; extern int pager_map_size; extern struct pagerops *pagertab[]; diff --git a/sys/vm/vm_zone.c b/sys/vm/vm_zone.c index 68bf4e4a5e..b57cc1d16a 100644 --- a/sys/vm/vm_zone.c +++ b/sys/vm/vm_zone.c @@ -12,7 +12,7 @@ * John S. Dyson. * * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $ - * $DragonFly: src/sys/vm/vm_zone.c,v 1.22 2006/12/20 18:14:44 dillon Exp $ + * $DragonFly: src/sys/vm/vm_zone.c,v 1.23 2006/12/28 21:24:02 dillon Exp $ */ #include @@ -179,7 +179,7 @@ zinitna(vm_zone_t z, vm_object_t obj, char *name, int size, totsize = round_page(z->zsize * nentries); zone_kmem_kvaspace += totsize; - z->zkva = kmem_alloc_pageable(kernel_map, totsize); + z->zkva = kmem_alloc_pageable(&kernel_map, totsize); if (z->zkva == 0) { zlist = z->znext; return 0; @@ -353,7 +353,7 @@ zget(vm_zone_t z) */ nbytes = z->zalloc * PAGE_SIZE; - item = (void *)kmem_alloc3(kernel_map, nbytes, KM_KRESERVE); + item = (void *)kmem_alloc3(&kernel_map, nbytes, KM_KRESERVE); /* note: z might be modified due to blocking */ if (item != NULL) { @@ -369,7 +369,7 @@ zget(vm_zone_t z) */ nbytes = z->zalloc * PAGE_SIZE; - item = (void *)kmem_alloc3(kernel_map, nbytes, 0); + item = (void *)kmem_alloc3(&kernel_map, nbytes, 0); /* note: z might be modified due to blocking */ if (item != NULL) {