From 1eeaf6b2bb3621f01159723feaba3aa2c5d933fd Mon Sep 17 00:00:00 2001 From: Aaron LI Date: Thu, 20 May 2021 22:40:00 +0800 Subject: [PATCH] vm: Change 'kernel_map' global to type of 'struct vm_map *' Change the global variable 'kernel_map' from type 'struct vm_map' to a pointer to this struct. This simplify the code a bit since all invocations take its address. This change also aligns with NetBSD's 'kernal_map' that it's also a pointer, which also helps the porting of NVMM. No functional changes. --- sys/ddb/db_break.c | 8 ++++---- sys/dev/drm/drm_scatter.c | 4 ++-- sys/dev/drm/linux_dma.c | 2 +- sys/dev/drm/linux_vmalloc.c | 4 ++-- sys/dev/video/bktr/bktr_core.c | 8 ++++---- sys/kern/imgact_gzip.c | 6 +++--- sys/kern/kern_slaballoc.c | 22 ++++++++++---------- sys/kern/kern_synch.c | 2 +- sys/kern/kern_timeout.c | 4 ++-- sys/kern/link_elf_obj.c | 8 ++++---- sys/kern/lwkt_thread.c | 15 +++++++------- sys/kern/sys_pipe.c | 6 +++--- sys/kern/sys_process.c | 12 +++++------ sys/kern/vfs_bio.c | 2 +- sys/libkern/arc4random.c | 2 +- sys/net/netisr.c | 2 +- sys/platform/pc64/x86_64/machdep.c | 4 ++-- sys/platform/pc64/x86_64/mp_machdep.c | 8 ++++---- sys/platform/pc64/x86_64/pmap.c | 26 ++++++++++++------------ sys/platform/pc64/x86_64/trap.c | 4 ++-- sys/platform/vkernel64/platform/pmap.c | 22 ++++++++++---------- sys/platform/vkernel64/x86_64/autoconf.c | 8 ++++---- sys/platform/vkernel64/x86_64/mp.c | 4 ++-- sys/platform/vkernel64/x86_64/trap.c | 4 ++-- sys/vfs/procfs/procfs_mem.c | 4 ++-- sys/vm/vm_contig.c | 10 ++++----- sys/vm/vm_fault.c | 8 ++++---- sys/vm/vm_glue.c | 4 ++-- sys/vm/vm_kern.c | 12 ++++++----- sys/vm/vm_kern.h | 2 +- sys/vm/vm_map.c | 4 ++-- sys/vm/vm_object.c | 2 +- sys/vm/vm_page.c | 2 +- sys/vm/vm_pager.c | 2 +- sys/vm/vm_zone.c | 8 ++++---- 35 files changed, 124 insertions(+), 121 deletions(-) diff --git a/sys/ddb/db_break.c b/sys/ddb/db_break.c index e910a4f5be..27d8379d98 100644 --- a/sys/ddb/db_break.c +++ b/sys/ddb/db_break.c @@ -295,8 +295,8 @@ boolean_t db_map_equal(vm_map_t map1, vm_map_t map2) { return ((map1 == map2) || - ((map1 == NULL) && (map2 == &kernel_map)) || - ((map1 == &kernel_map) && (map2 == NULL))); + ((map1 == NULL) && (map2 == kernel_map)) || + ((map1 == kernel_map) && (map2 == NULL))); } boolean_t @@ -306,7 +306,7 @@ db_map_current(vm_map_t map) thread_t thread; return ((map == NULL) || - (map == &kernel_map) || + (map == kernel_map) || (((thread = current_thread()) != NULL) && (map == thread->task->map))); #else @@ -317,5 +317,5 @@ db_map_current(vm_map_t map) vm_map_t db_map_addr(vm_offset_t addr) { - return &kernel_map; + return kernel_map; } diff --git a/sys/dev/drm/drm_scatter.c b/sys/dev/drm/drm_scatter.c index 87dc5766b7..f376d0e0c5 100644 --- a/sys/dev/drm/drm_scatter.c +++ b/sys/dev/drm/drm_scatter.c @@ -41,7 +41,7 @@ static void drm_sg_cleanup(struct drm_sg_mem * entry) return; if (entry->vaddr != 0) - kmem_free(&kernel_map, entry->vaddr, IDX_TO_OFF(entry->pages)); + kmem_free(kernel_map, entry->vaddr, IDX_TO_OFF(entry->pages)); kfree(entry->busaddr); kfree(entry); @@ -81,7 +81,7 @@ int drm_legacy_sg_alloc(struct drm_device *dev, void *data, entry->busaddr = kmalloc(entry->pages * sizeof(*entry->busaddr), M_DRM, M_WAITOK | M_ZERO); - entry->vaddr = kmem_alloc_attr(&kernel_map, size, + entry->vaddr = kmem_alloc_attr(kernel_map, size, VM_SUBSYS_DRM_SCAT, M_WAITOK | M_ZERO, 0, BUS_SPACE_MAXADDR_32BIT, VM_MEMATTR_WRITE_COMBINING); diff --git a/sys/dev/drm/linux_dma.c b/sys/dev/drm/linux_dma.c index dee880db4b..9254ee2ade 100644 --- a/sys/dev/drm/linux_dma.c +++ b/sys/dev/drm/linux_dma.c @@ -56,5 +56,5 @@ dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle) { - kmem_free(&kernel_map, (vm_offset_t)cpu_addr, size); + kmem_free(kernel_map, (vm_offset_t)cpu_addr, size); } diff --git a/sys/dev/drm/linux_vmalloc.c b/sys/dev/drm/linux_vmalloc.c index 41b7c9717a..e07f8ea771 100644 --- a/sys/dev/drm/linux_vmalloc.c +++ b/sys/dev/drm/linux_vmalloc.c @@ -53,7 +53,7 @@ vmap(struct page **pages, unsigned int count, vmp = kmalloc(sizeof(struct vmap), M_DRM, M_WAITOK | M_ZERO); size = count * PAGE_SIZE; - off = kmem_alloc_nofault(&kernel_map, size, + off = kmem_alloc_nofault(kernel_map, size, VM_SUBSYS_DRM_VMAP, PAGE_SIZE); if (off == 0) return (NULL); @@ -79,7 +79,7 @@ vunmap(const void *addr) size = vmp->npages * PAGE_SIZE; pmap_qremove((vm_offset_t)addr, vmp->npages); - kmem_free(&kernel_map, (vm_offset_t)addr, size); + kmem_free(kernel_map, (vm_offset_t)addr, size); goto found; } } diff --git a/sys/dev/video/bktr/bktr_core.c b/sys/dev/video/bktr/bktr_core.c index 9c44088c52..f5a77e4741 100644 --- a/sys/dev/video/bktr/bktr_core.c +++ b/sys/dev/video/bktr/bktr_core.c @@ -1664,10 +1664,10 @@ video_ioctl( bktr_ptr_t bktr, int unit, ioctl_cmd_t cmd, caddr_t arg, struct thr if ((int) temp > bktr->alloc_pages && bktr->video.addr == 0) { - buf = get_bktr_mem(unit, temp*PAGE_SIZE); - if (buf != 0) { - kmem_free(&kernel_map, bktr->bigbuf, - (bktr->alloc_pages * PAGE_SIZE)); + buf = get_bktr_mem(unit, temp*PAGE_SIZE); + if (buf != 0) { + kmem_free(kernel_map, bktr->bigbuf, + (bktr->alloc_pages * PAGE_SIZE)); bktr->bigbuf = buf; bktr->alloc_pages = temp; diff --git a/sys/kern/imgact_gzip.c b/sys/kern/imgact_gzip.c index 997b0bb0c0..752dad176d 100644 --- a/sys/kern/imgact_gzip.c +++ b/sys/kern/imgact_gzip.c @@ -131,7 +131,7 @@ exec_gzip_imgact(struct image_params *imgp) } if (igz.inbuf) { - error2 = vm_map_remove(&kernel_map, (vm_offset_t)igz.inbuf, + error2 = vm_map_remove(kernel_map, (vm_offset_t)igz.inbuf, (vm_offset_t)igz.inbuf + PAGE_SIZE); } if (igz.error || error || error2) { @@ -287,7 +287,7 @@ NextByte(void *vp) return igz->inbuf[(igz->idx++) - igz->offset]; } if (igz->inbuf) { - error = vm_map_remove(&kernel_map, (vm_offset_t)igz->inbuf, + error = vm_map_remove(kernel_map, (vm_offset_t)igz->inbuf, (vm_offset_t)igz->inbuf + PAGE_SIZE); if (error) { igz->where = __LINE__; @@ -297,7 +297,7 @@ NextByte(void *vp) } igz->offset = igz->idx & ~PAGE_MASK; - error = vm_mmap(&kernel_map, /* map */ + error = vm_mmap(kernel_map, /* map */ (vm_offset_t *) & igz->inbuf, /* address */ PAGE_SIZE, /* size */ VM_PROT_READ, /* protection */ diff --git a/sys/kern/kern_slaballoc.c b/sys/kern/kern_slaballoc.c index b66836faba..1329d88c8c 100644 --- a/sys/kern/kern_slaballoc.c +++ b/sys/kern/kern_slaballoc.c @@ -1684,13 +1684,13 @@ kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) thread_t td; size = round_page(size); - addr = vm_map_min(&kernel_map); + addr = vm_map_min(kernel_map); count = vm_map_entry_reserve(MAP_RESERVE_COUNT); crit_enter(); - vm_map_lock(&kernel_map); - if (vm_map_findspace(&kernel_map, addr, size, align, 0, &addr)) { - vm_map_unlock(&kernel_map); + vm_map_lock(kernel_map); + if (vm_map_findspace(kernel_map, addr, size, align, 0, &addr)) { + vm_map_unlock(kernel_map); if ((flags & M_NULLOK) == 0) panic("kmem_slab_alloc(): kernel_map ran out of space!"); vm_map_entry_release(count); @@ -1703,7 +1703,7 @@ kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) */ vm_object_hold(&kernel_object); vm_object_reference_locked(&kernel_object); - vm_map_insert(&kernel_map, &count, + vm_map_insert(kernel_map, &count, &kernel_object, NULL, addr, NULL, addr, addr + size, @@ -1711,8 +1711,8 @@ kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) VM_SUBSYS_KMALLOC, VM_PROT_ALL, VM_PROT_ALL, 0); vm_object_drop(&kernel_object); - vm_map_set_wired_quick(&kernel_map, addr, size, &count); - vm_map_unlock(&kernel_map); + vm_map_set_wired_quick(kernel_map, addr, size, &count); + vm_map_unlock(kernel_map); td = curthread; @@ -1784,9 +1784,9 @@ kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) /* page should already be busy */ vm_page_free(m); } - vm_map_lock(&kernel_map); - vm_map_delete(&kernel_map, addr, addr + size, &count); - vm_map_unlock(&kernel_map); + vm_map_lock(kernel_map); + vm_map_delete(kernel_map, addr, addr + size, &count); + vm_map_unlock(kernel_map); vm_object_drop(&kernel_object); vm_map_entry_release(count); @@ -1840,6 +1840,6 @@ void kmem_slab_free(void *ptr, vm_size_t size) { crit_enter(); - vm_map_remove(&kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size); + vm_map_remove(kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size); crit_exit(); } diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index eb350831d4..9e9f657529 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -1546,7 +1546,7 @@ sleep_gdinit(globaldata_t gd) n = TCHASHSHIFT(slpque_tablesize) + 1; hash_size = sizeof(struct tslpque) * n; - gd->gd_tsleep_hash = (void *)kmem_alloc3(&kernel_map, hash_size, + gd->gd_tsleep_hash = (void *)kmem_alloc3(kernel_map, hash_size, VM_SUBSYS_GD, KM_CPU(gd->gd_cpuid)); memset(gd->gd_tsleep_hash, 0, hash_size); diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c index 5baa5ac530..847004bb5d 100644 --- a/sys/kern/kern_timeout.c +++ b/sys/kern/kern_timeout.c @@ -397,14 +397,14 @@ swi_softclock_setup(void *arg) softclock_pcpu_t sc; int wheel_sz; - sc = (void *)kmem_alloc3(&kernel_map, sizeof(*sc), + sc = (void *)kmem_alloc3(kernel_map, sizeof(*sc), VM_SUBSYS_GD, KM_CPU(cpu)); memset(sc, 0, sizeof(*sc)); TAILQ_INIT(&sc->freelist); softclock_pcpu_ary[cpu] = sc; wheel_sz = sizeof(*sc->callwheel) * cwheelsize; - sc->callwheel = (void *)kmem_alloc3(&kernel_map, wheel_sz, + sc->callwheel = (void *)kmem_alloc3(kernel_map, wheel_sz, VM_SUBSYS_GD, KM_CPU(cpu)); memset(sc->callwheel, 0, wheel_sz); for (i = 0; i < cwheelsize; ++i) { diff --git a/sys/kern/link_elf_obj.c b/sys/kern/link_elf_obj.c index 69bfe48f35..6e3558f290 100644 --- a/sys/kern/link_elf_obj.c +++ b/sys/kern/link_elf_obj.c @@ -645,7 +645,7 @@ link_elf_obj_load_file(const char *filename, linker_file_t * result) } vm_object_hold(ef->object); vm_object_reference_locked(ef->object); - ef->address = (caddr_t) vm_map_min(&kernel_map); + ef->address = (caddr_t) vm_map_min(kernel_map); ef->bytes = 0; /* @@ -660,7 +660,7 @@ link_elf_obj_load_file(const char *filename, linker_file_t * result) vm_object_drop(ef->object); #else mapbase = KERNBASE; - error = vm_map_find(&kernel_map, ef->object, NULL, + error = vm_map_find(kernel_map, ef->object, NULL, 0, &mapbase, round_page(mapsize), PAGE_SIZE, TRUE, VM_MAPTYPE_NORMAL, VM_SUBSYS_IMGACT, @@ -672,7 +672,7 @@ link_elf_obj_load_file(const char *filename, linker_file_t * result) goto out; } /* Wire the pages */ - error = vm_map_wire(&kernel_map, mapbase, + error = vm_map_wire(kernel_map, mapbase, mapbase + round_page(mapsize), 0); #endif if (error != KERN_SUCCESS) { @@ -900,7 +900,7 @@ link_elf_obj_unload_file(linker_file_t file) #if defined(__x86_64__) && defined(_KERNEL_VIRTUAL) vkernel_module_memory_free((vm_offset_t)ef->address, ef->bytes); #else - vm_map_remove(&kernel_map, (vm_offset_t) ef->address, + vm_map_remove(kernel_map, (vm_offset_t) ef->address, (vm_offset_t) ef->address + (ef->object->size << PAGE_SHIFT)); #endif diff --git a/sys/kern/lwkt_thread.c b/sys/kern/lwkt_thread.c index 8a67435b96..57f851e2ba 100644 --- a/sys/kern/lwkt_thread.c +++ b/sys/kern/lwkt_thread.c @@ -266,7 +266,7 @@ _lwkt_thread_dtor(void *obj, void *privdata) KASSERT((td->td_flags & TDF_ALLOCATED_STACK) && td->td_kstack && td->td_kstack_size > 0, ("_lwkt_thread_dtor: corrupted stack")); - kmem_free(&kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size); + kmem_free(kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size); td->td_kstack = NULL; td->td_flags = 0; } @@ -383,16 +383,17 @@ lwkt_alloc_thread(struct thread *td, int stksize, int cpu, int flags) */ if ((stack = td->td_kstack) != NULL && td->td_kstack_size != stksize) { if (flags & TDF_ALLOCATED_STACK) { - kmem_free(&kernel_map, (vm_offset_t)stack, td->td_kstack_size); + kmem_free(kernel_map, (vm_offset_t)stack, td->td_kstack_size); stack = NULL; } } if (stack == NULL) { - if (cpu < 0) - stack = (void *)kmem_alloc_stack(&kernel_map, stksize, 0); - else - stack = (void *)kmem_alloc_stack(&kernel_map, stksize, + if (cpu < 0) { + stack = (void *)kmem_alloc_stack(kernel_map, stksize, 0); + } else { + stack = (void *)kmem_alloc_stack(kernel_map, stksize, KM_CPU(cpu)); + } flags |= TDF_ALLOCATED_STACK; } if (cpu < 0) { @@ -515,7 +516,7 @@ lwkt_free_thread(thread_t td) /* client-allocated struct with internally allocated stack */ KASSERT(td->td_kstack && td->td_kstack_size > 0, ("lwkt_free_thread: corrupted stack")); - kmem_free(&kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size); + kmem_free(kernel_map, (vm_offset_t)td->td_kstack, td->td_kstack_size); td->td_kstack = NULL; td->td_kstack_size = 0; } diff --git a/sys/kern/sys_pipe.c b/sys/kern/sys_pipe.c index 49610af671..5cd39de7c1 100644 --- a/sys/kern/sys_pipe.c +++ b/sys/kern/sys_pipe.c @@ -374,9 +374,9 @@ pipespace(struct pipe *pipe, struct pipebuf *pb, size_t size) */ if (object == NULL || object->size != npages) { object = vm_object_allocate(OBJT_DEFAULT, npages); - buffer = (caddr_t)vm_map_min(&kernel_map); + buffer = (caddr_t)vm_map_min(kernel_map); - error = vm_map_find(&kernel_map, object, NULL, + error = vm_map_find(kernel_map, object, NULL, 0, (vm_offset_t *)&buffer, size, PAGE_SIZE, TRUE, VM_MAPTYPE_NORMAL, VM_SUBSYS_PIPE, @@ -1182,7 +1182,7 @@ static void pipe_free_kmem(struct pipebuf *pb) { if (pb->buffer != NULL) { - kmem_free(&kernel_map, (vm_offset_t)pb->buffer, pb->size); + kmem_free(kernel_map, (vm_offset_t)pb->buffer, pb->size); pb->buffer = NULL; pb->object = NULL; } diff --git a/sys/kern/sys_process.c b/sys/kern/sys_process.c index aed04724c9..c35e83e615 100644 --- a/sys/kern/sys_process.c +++ b/sys/kern/sys_process.c @@ -92,7 +92,7 @@ pread (struct proc *procp, unsigned int addr, unsigned int *retval) vm_map_lookup_done (tmap, out_entry, 0); /* Find space in kernel_map for the page we're interested in */ - rv = vm_map_find (&kernel_map, object, NULL, + rv = vm_map_find (kernel_map, object, NULL, IDX_TO_OFF(pindex), &kva, PAGE_SIZE, PAGE_SIZE, FALSE, VM_MAPTYPE_NORMAL, VM_SUBSYS_PROC, @@ -101,13 +101,13 @@ pread (struct proc *procp, unsigned int addr, unsigned int *retval) if (!rv) { vm_object_reference XXX (object); - rv = vm_map_wire (&kernel_map, kva, kva + PAGE_SIZE, 0); + rv = vm_map_wire (kernel_map, kva, kva + PAGE_SIZE, 0); if (!rv) { *retval = 0; bcopy ((caddr_t)kva + page_offset, retval, sizeof *retval); } - vm_map_remove (&kernel_map, kva, kva + PAGE_SIZE); + vm_map_remove (kernel_map, kva, kva + PAGE_SIZE); } return rv; @@ -186,7 +186,7 @@ pwrite (struct proc *procp, unsigned int addr, unsigned int datum) return EFAULT; /* Find space in kernel_map for the page we're interested in */ - rv = vm_map_find (&kernel_map, object, NULL, + rv = vm_map_find (kernel_map, object, NULL, IDX_TO_OFF(pindex), &kva, PAGE_SIZE, PAGE_SIZE, FALSE, VM_MAPTYPE_NORMAL, VM_SUBSYS_PROC, @@ -194,11 +194,11 @@ pwrite (struct proc *procp, unsigned int addr, unsigned int datum) if (!rv) { vm_object_reference XXX (object); - rv = vm_map_wire (&kernel_map, kva, kva + PAGE_SIZE, 0); + rv = vm_map_wire (kernel_map, kva, kva + PAGE_SIZE, 0); if (!rv) { bcopy (&datum, (caddr_t)kva + page_offset, sizeof datum); } - vm_map_remove (&kernel_map, kva, kva + PAGE_SIZE); + vm_map_remove (kernel_map, kva, kva + PAGE_SIZE); } if (fix_prot) diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c index e1795d5ed3..f6a78fd506 100644 --- a/sys/kern/vfs_bio.c +++ b/sys/kern/vfs_bio.c @@ -698,7 +698,7 @@ bufinit(void *dummy __unused) * from buf_daemon. */ - bogus_offset = kmem_alloc_pageable(&kernel_map, PAGE_SIZE, + bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE, VM_SUBSYS_BOGUS); vm_object_hold(&kernel_object); bogus_page = vm_page_alloc(&kernel_object, diff --git a/sys/libkern/arc4random.c b/sys/libkern/arc4random.c index 5234e455c7..40c12e3cda 100644 --- a/sys/libkern/arc4random.c +++ b/sys/libkern/arc4random.c @@ -174,7 +174,7 @@ arc4_init_pcpu(int cpuid) KASSERT(arc4_data_pcpu[cpuid] == NULL, ("arc4 was initialized on cpu%d", cpuid)); - d = (void *)kmem_alloc3(&kernel_map, sizeof(*d), VM_SUBSYS_GD, + d = (void *)kmem_alloc3(kernel_map, sizeof(*d), VM_SUBSYS_GD, KM_CPU(cpuid)); memset(d, 0, sizeof(*d)); diff --git a/sys/net/netisr.c b/sys/net/netisr.c index d1c12f1e19..a2ed295ee3 100644 --- a/sys/net/netisr.c +++ b/sys/net/netisr.c @@ -217,7 +217,7 @@ netisr_init(void) for (i = 0; i < ncpus; ++i) { struct netisr_data *nd; - nd = (void *)kmem_alloc3(&kernel_map, sizeof(*nd), + nd = (void *)kmem_alloc3(kernel_map, sizeof(*nd), VM_SUBSYS_GD, KM_CPU(i)); memset(nd, 0, sizeof(*nd)); TAILQ_INIT(&nd->netrulist); diff --git a/sys/platform/pc64/x86_64/machdep.c b/sys/platform/pc64/x86_64/machdep.c index 86fd535fa3..0b02d445e1 100644 --- a/sys/platform/pc64/x86_64/machdep.c +++ b/sys/platform/pc64/x86_64/machdep.c @@ -476,7 +476,7 @@ again: */ if (firstaddr == 0) { size = (vm_size_t)(v - firstaddr); - firstaddr = kmem_alloc(&kernel_map, round_page(size), + firstaddr = kmem_alloc(kernel_map, round_page(size), VM_SUBSYS_BUF); if (firstaddr == 0) panic("startup: no room for tables"); @@ -495,7 +495,7 @@ again: if ((vm_size_t)(v - firstaddr) != size) panic("startup: table size inconsistency"); - kmem_suballoc(&kernel_map, &clean_map, &clean_sva, &clean_eva, + kmem_suballoc(kernel_map, &clean_map, &clean_sva, &clean_eva, ((vm_offset_t)(nbuf + 16) * MAXBSIZE) + ((nswbuf_mem + nswbuf_kva) * MAXPHYS) + pager_map_size); kmem_suballoc(&clean_map, &buffer_map, &buffer_sva, &buffer_eva, diff --git a/sys/platform/pc64/x86_64/mp_machdep.c b/sys/platform/pc64/x86_64/mp_machdep.c index 421974a02d..456dc5e93e 100644 --- a/sys/platform/pc64/x86_64/mp_machdep.c +++ b/sys/platform/pc64/x86_64/mp_machdep.c @@ -467,7 +467,7 @@ start_all_aps(u_int boot_addr) /* This is a bit verbose, it will go away soon. */ pssize = sizeof(struct privatespace); - ps = (void *)kmem_alloc3(&kernel_map, pssize, VM_SUBSYS_GD, + ps = (void *)kmem_alloc3(kernel_map, pssize, VM_SUBSYS_GD, KM_CPU(x)); CPU_prvspace[x] = ps; #if 0 @@ -481,7 +481,7 @@ start_all_aps(u_int boot_addr) mi_gdinit(&gd->mi, x); cpu_gdinit(gd, x); ipiq_size = sizeof(struct lwkt_ipiq) * (naps + 1); - gd->mi.gd_ipiq = (void *)kmem_alloc3(&kernel_map, ipiq_size, + gd->mi.gd_ipiq = (void *)kmem_alloc3(kernel_map, ipiq_size, VM_SUBSYS_IPIQ, KM_CPU(x)); bzero(gd->mi.gd_ipiq, ipiq_size); @@ -541,7 +541,7 @@ start_all_aps(u_int boot_addr) gd->gd_acpi_id = CPUID_TO_ACPIID(mycpu->gd_cpuid); ipiq_size = sizeof(struct lwkt_ipiq) * ncpus; - mycpu->gd_ipiq = (void *)kmem_alloc3(&kernel_map, ipiq_size, + mycpu->gd_ipiq = (void *)kmem_alloc3(kernel_map, ipiq_size, VM_SUBSYS_IPIQ, KM_CPU(0)); bzero(mycpu->gd_ipiq, ipiq_size); @@ -1642,7 +1642,7 @@ mp_bsp_simple_setup(void) gd->gd_acpi_id = CPUID_TO_ACPIID(mycpu->gd_cpuid); ipiq_size = sizeof(struct lwkt_ipiq) * ncpus; - mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size, + mycpu->gd_ipiq = (void *)kmem_alloc(kernel_map, ipiq_size, VM_SUBSYS_IPIQ); bzero(mycpu->gd_ipiq, ipiq_size); diff --git a/sys/platform/pc64/x86_64/pmap.c b/sys/platform/pc64/x86_64/pmap.c index 57b0aef0bd..95d37204f3 100644 --- a/sys/platform/pc64/x86_64/pmap.c +++ b/sys/platform/pc64/x86_64/pmap.c @@ -1416,7 +1416,7 @@ pmap_init(void) if (initial_pvs < MINPV) initial_pvs = MINPV; pvzone = &pvzone_store; - pvinit = (void *)kmem_alloc(&kernel_map, + pvinit = (void *)kmem_alloc(kernel_map, initial_pvs * sizeof (struct pv_entry), VM_SUBSYS_PVENTRY); zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), @@ -2351,7 +2351,7 @@ pmap_pinit(struct pmap *pmap) */ if (pmap->pm_pml4 == NULL) { pmap->pm_pml4 = - (pml4_entry_t *)kmem_alloc_pageable(&kernel_map, + (pml4_entry_t *)kmem_alloc_pageable(kernel_map, PAGE_SIZE * 2, VM_SUBSYS_PML4); pmap->pm_pml4_iso = (void *)((char *)pmap->pm_pml4 + PAGE_SIZE); @@ -2478,7 +2478,7 @@ pmap_puninit(pmap_t pmap) } if (pmap->pm_pml4) { KKASSERT(pmap->pm_pml4 != (void *)(PTOV_OFFSET + KPML4phys)); - kmem_free(&kernel_map, + kmem_free(kernel_map, (vm_offset_t)pmap->pm_pml4, PAGE_SIZE * 2); pmap->pm_pml4 = NULL; pmap->pm_pml4_iso = NULL; @@ -3230,8 +3230,8 @@ pmap_growkernel(vm_offset_t kstart, vm_offset_t kend) break; kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(vm_offset_t)(PAGE_SIZE * NPTEPG - 1); - if (kernel_vm_end - 1 >= vm_map_max(&kernel_map)) { - kernel_vm_end = vm_map_max(&kernel_map); + if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { + kernel_vm_end = vm_map_max(kernel_map); break; } } @@ -3254,8 +3254,8 @@ pmap_growkernel(vm_offset_t kstart, vm_offset_t kend) kstart = rounddown2(kstart, (vm_offset_t)(PAGE_SIZE * NPTEPG)); kend = roundup2(kend, (vm_offset_t)(PAGE_SIZE * NPTEPG)); - if (kend - 1 >= vm_map_max(&kernel_map)) - kend = vm_map_max(&kernel_map); + if (kend - 1 >= vm_map_max(kernel_map)) + kend = vm_map_max(kernel_map); while (kstart < kend) { pt = pmap_pt(&kernel_pmap, kstart); @@ -3293,8 +3293,8 @@ pmap_growkernel(vm_offset_t kstart, vm_offset_t kend) if ((*pt & kernel_pmap.pmap_bits[PG_V_IDX]) != 0) { kstart = (kstart + PAGE_SIZE * NPTEPG) & ~(vm_offset_t)(PAGE_SIZE * NPTEPG - 1); - if (kstart - 1 >= vm_map_max(&kernel_map)) { - kstart = vm_map_max(&kernel_map); + if (kstart - 1 >= vm_map_max(kernel_map)) { + kstart = vm_map_max(kernel_map); break; } continue; @@ -3324,8 +3324,8 @@ pmap_growkernel(vm_offset_t kstart, vm_offset_t kend) kstart = (kstart + PAGE_SIZE * NPTEPG) & ~(vm_offset_t)(PAGE_SIZE * NPTEPG - 1); - if (kstart - 1 >= vm_map_max(&kernel_map)) { - kstart = vm_map_max(&kernel_map); + if (kstart - 1 >= vm_map_max(kernel_map)) { + kstart = vm_map_max(kernel_map); break; } } @@ -5955,7 +5955,7 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode) offset = pa & PAGE_MASK; size = roundup(offset + size, PAGE_SIZE); - va = kmem_alloc_nofault(&kernel_map, size, VM_SUBSYS_MAPDEV, PAGE_SIZE); + va = kmem_alloc_nofault(kernel_map, size, VM_SUBSYS_MAPDEV, PAGE_SIZE); if (va == 0) panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); @@ -5985,7 +5985,7 @@ pmap_unmapdev(vm_offset_t va, vm_size_t size) offset = va & PAGE_MASK; size = roundup(offset + size, PAGE_SIZE); pmap_qremove(va, size >> PAGE_SHIFT); - kmem_free(&kernel_map, base, size); + kmem_free(kernel_map, base, size); } /* diff --git a/sys/platform/pc64/x86_64/trap.c b/sys/platform/pc64/x86_64/trap.c index 6e0973904a..1decc6b74d 100644 --- a/sys/platform/pc64/x86_64/trap.c +++ b/sys/platform/pc64/x86_64/trap.c @@ -874,7 +874,7 @@ trap_pfault(struct trapframe *frame, int usermode) goto nogo; } - map = &kernel_map; + map = kernel_map; } else { /* * This is a fault on non-kernel virtual memory. @@ -940,7 +940,7 @@ trap_pfault(struct trapframe *frame, int usermode) lwkt_tokref_t stop = td->td_toks_stop; - if (map != &kernel_map) { + if (map != kernel_map) { /* * Keep swapout from messing with us during this * critical time. diff --git a/sys/platform/vkernel64/platform/pmap.c b/sys/platform/vkernel64/platform/pmap.c index 9a29271b4f..7682f901c0 100644 --- a/sys/platform/vkernel64/platform/pmap.c +++ b/sys/platform/vkernel64/platform/pmap.c @@ -662,7 +662,7 @@ pmap_init(void) initial_pvs = MINPV; pvzone = &pvzone_store; pvinit = (struct pv_entry *) - kmem_alloc(&kernel_map, + kmem_alloc(kernel_map, initial_pvs * sizeof (struct pv_entry), VM_SUBSYS_PVENTRY); zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit, @@ -1237,7 +1237,7 @@ pmap_pinit(struct pmap *pmap) */ if (pmap->pm_pml4 == NULL) { pmap->pm_pml4 = (pml4_entry_t *) - kmem_alloc_pageable(&kernel_map, PAGE_SIZE, + kmem_alloc_pageable(kernel_map, PAGE_SIZE, VM_SUBSYS_PML4); } @@ -1299,7 +1299,7 @@ pmap_puninit(pmap_t pmap) KKASSERT(pmap->pm_stats.wired_count == 0); } if (pmap->pm_pml4) { - kmem_free(&kernel_map, (vm_offset_t)pmap->pm_pml4, PAGE_SIZE); + kmem_free(kernel_map, (vm_offset_t)pmap->pm_pml4, PAGE_SIZE); pmap->pm_pml4 = NULL; } if (pmap->pm_pteobj) { @@ -1634,15 +1634,15 @@ pmap_growkernel(vm_offset_t kstart, vm_offset_t kend) rounddown2(kernel_vm_end + PAGE_SIZE * NPTEPG, PAGE_SIZE * NPTEPG); nkpt++; - if (kernel_vm_end - 1 >= vm_map_max(&kernel_map)) { - kernel_vm_end = vm_map_max(&kernel_map); + if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { + kernel_vm_end = vm_map_max(kernel_map); break; } } } addr = roundup2(addr, PAGE_SIZE * NPTEPG); - if (addr - 1 >= vm_map_max(&kernel_map)) - addr = vm_map_max(&kernel_map); + if (addr - 1 >= vm_map_max(kernel_map)) + addr = vm_map_max(kernel_map); while (kernel_vm_end < addr) { pde = pmap_pde(&kernel_pmap, kernel_vm_end); if (pde == NULL) { @@ -1669,8 +1669,8 @@ pmap_growkernel(vm_offset_t kstart, vm_offset_t kend) kernel_vm_end = rounddown2(kernel_vm_end + PAGE_SIZE * NPTEPG, PAGE_SIZE * NPTEPG); - if (kernel_vm_end - 1 >= vm_map_max(&kernel_map)) { - kernel_vm_end = vm_map_max(&kernel_map); + if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { + kernel_vm_end = vm_map_max(kernel_map); break; } continue; @@ -1699,8 +1699,8 @@ pmap_growkernel(vm_offset_t kstart, vm_offset_t kend) kernel_vm_end = rounddown2(kernel_vm_end + PAGE_SIZE * NPTEPG, PAGE_SIZE * NPTEPG); - if (kernel_vm_end - 1 >= vm_map_max(&kernel_map)) { - kernel_vm_end = vm_map_max(&kernel_map); + if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) { + kernel_vm_end = vm_map_max(kernel_map); break; } } diff --git a/sys/platform/vkernel64/x86_64/autoconf.c b/sys/platform/vkernel64/x86_64/autoconf.c index 20f7cc555b..fa12a5c989 100644 --- a/sys/platform/vkernel64/x86_64/autoconf.c +++ b/sys/platform/vkernel64/x86_64/autoconf.c @@ -172,17 +172,17 @@ cpu_startup(void *dummy) /* * Allocate memory for the buffer cache */ - buf = (void *)kmem_alloc(&kernel_map, + buf = (void *)kmem_alloc(kernel_map, nbuf * sizeof(struct buf), VM_SUBSYS_BUF); - swbuf_mem = (void *)kmem_alloc(&kernel_map, + swbuf_mem = (void *)kmem_alloc(kernel_map, nswbuf_mem * sizeof(struct buf), VM_SUBSYS_BUF); - swbuf_kva = (void *)kmem_alloc(&kernel_map, + swbuf_kva = (void *)kmem_alloc(kernel_map, nswbuf_kva * sizeof(struct buf), VM_SUBSYS_BUF); - kmem_suballoc(&kernel_map, &clean_map, &clean_sva, &clean_eva, + kmem_suballoc(kernel_map, &clean_map, &clean_sva, &clean_eva, (nbuf * MAXBSIZE * 2) + (nswbuf_mem + nswbuf_kva) * MAXPHYS + pager_map_size); diff --git a/sys/platform/vkernel64/x86_64/mp.c b/sys/platform/vkernel64/x86_64/mp.c index 4462424932..b25b292c2c 100644 --- a/sys/platform/vkernel64/x86_64/mp.c +++ b/sys/platform/vkernel64/x86_64/mp.c @@ -173,7 +173,7 @@ mp_start(void) * cpu0 initialization */ ipiq_size = sizeof(struct lwkt_ipiq) * ncpus; - mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size, + mycpu->gd_ipiq = (void *)kmem_alloc(kernel_map, ipiq_size, VM_SUBSYS_IPIQ); bzero(mycpu->gd_ipiq, ipiq_size); @@ -442,7 +442,7 @@ start_all_aps(u_int boot_addr) #endif ipiq_size = sizeof(struct lwkt_ipiq) * (naps + 1); - gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size, + gd->mi.gd_ipiq = (void *)kmem_alloc(kernel_map, ipiq_size, VM_SUBSYS_IPIQ); bzero(gd->mi.gd_ipiq, ipiq_size); diff --git a/sys/platform/vkernel64/x86_64/trap.c b/sys/platform/vkernel64/x86_64/trap.c index b20ebe9fb4..9b7da3a193 100644 --- a/sys/platform/vkernel64/x86_64/trap.c +++ b/sys/platform/vkernel64/x86_64/trap.c @@ -797,7 +797,7 @@ trap_pfault(struct trapframe *frame, int usermode, vm_offset_t eva) /* * This is a fault on kernel virtual memory. */ - map = &kernel_map; + map = kernel_map; } else { /* * This is a fault on non-kernel virtual memory. @@ -820,7 +820,7 @@ trap_pfault(struct trapframe *frame, int usermode, vm_offset_t eva) else ftype = VM_PROT_READ; - if (map != &kernel_map) { + if (map != kernel_map) { /* * Keep swapout from messing with us during this * critical time. diff --git a/sys/vfs/procfs/procfs_mem.c b/sys/vfs/procfs/procfs_mem.c index 7c4c10dd86..1a16874143 100644 --- a/sys/vfs/procfs/procfs_mem.c +++ b/sys/vfs/procfs/procfs_mem.c @@ -100,7 +100,7 @@ procfs_rwmem(struct proc *curp, struct proc *p, struct uio *uio) if (writing) reqprot |= VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE; - kva = kmem_alloc_pageable(&kernel_map, PAGE_SIZE, VM_SUBSYS_PROC); + kva = kmem_alloc_pageable(kernel_map, PAGE_SIZE, VM_SUBSYS_PROC); /* * Only map in one page at a time. We don't have to, but it @@ -171,7 +171,7 @@ procfs_rwmem(struct proc *curp, struct proc *p, struct uio *uio) } while (error == 0 && uio->uio_resid > 0); vmspace_drop(vm); - kmem_free(&kernel_map, kva, PAGE_SIZE); + kmem_free(kernel_map, kva, PAGE_SIZE); return (error); } diff --git a/sys/vm/vm_contig.c b/sys/vm/vm_contig.c index 2ab8f6d061..ef99603df4 100644 --- a/sys/vm/vm_contig.c +++ b/sys/vm/vm_contig.c @@ -504,7 +504,7 @@ vm_contig_pg_free(vm_pindex_t start, u_long size) * * Map previously allocated (vm_contig_pg_alloc) range of pages from * vm_page_array[] into the KVA. Once mapped, the pages are part of - * the Kernel, and are to free'ed with kmem_free(&kernel_map, addr, size). + * the Kernel, and are to free'ed with kmem_free(kernel_map, addr, size). * * No requirements. */ @@ -519,7 +519,7 @@ vm_contig_pg_kmap(vm_pindex_t start, u_long size, vm_map_t map, int flags) if (size == 0) panic("vm_contig_pg_kmap: size must not be 0"); size = round_page(size); - addr = kmem_alloc_pageable(&kernel_map, size, VM_SUBSYS_CONTIG); + addr = kmem_alloc_pageable(kernel_map, size, VM_SUBSYS_CONTIG); if (addr) { pa = VM_PAGE_TO_PHYS(&pga[start]); for (offset = 0; offset < size; offset += PAGE_SIZE) @@ -545,7 +545,7 @@ contigmalloc( unsigned long boundary) { return contigmalloc_map(size, type, flags, low, high, alignment, - boundary, &kernel_map); + boundary, kernel_map); } /* @@ -591,7 +591,7 @@ contigfree(void *addr, unsigned long size, struct malloc_type *type) pa = pmap_kextract((vm_offset_t)addr); pmap_qremove((vm_offset_t)addr, size / PAGE_SIZE); - kmem_free(&kernel_map, (vm_offset_t)addr, size); + kmem_free(kernel_map, (vm_offset_t)addr, size); m = PHYS_TO_VM_PAGE(pa); vm_page_free_contig(m, size); @@ -605,5 +605,5 @@ kmem_alloc_contig(vm_offset_t size, vm_paddr_t low, vm_paddr_t high, vm_offset_t alignment) { return ((vm_offset_t)contigmalloc_map(size, M_DEVBUF, M_NOWAIT, low, - high, alignment, 0ul, &kernel_map)); + high, alignment, 0ul, kernel_map)); } diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index 21457063e8..72ff646cce 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -461,7 +461,7 @@ RetryFault: (fs.fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE) { if (result == KERN_INVALID_ADDRESS && growstack && - map != &kernel_map && curproc != NULL) { + map != kernel_map && curproc != NULL) { result = vm_map_growstack(map, vaddr); if (result == KERN_SUCCESS) { growstack = 0; @@ -802,7 +802,7 @@ done2: p = td->td_proc; if ((fault_flags & VM_FAULT_USERMODE) && lp && p->p_limit && map->pmap && vm_pageout_memuse_mode >= 1 && - map != &kernel_map) { + map != kernel_map) { vm_pindex_t limit; vm_pindex_t size; @@ -1101,7 +1101,7 @@ RetryFault: (fs.fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE) { if (result == KERN_INVALID_ADDRESS && growstack && - map != &kernel_map && curproc != NULL) { + map != kernel_map && curproc != NULL) { result = vm_map_growstack(map, vaddr); if (result == KERN_SUCCESS) { growstack = 0; @@ -1976,7 +1976,7 @@ readrest: * around having the machine panic on a kernel space * fault w/ I/O error. */ - if (((fs->map != &kernel_map) && + if (((fs->map != kernel_map) && (rv == VM_PAGER_ERROR)) || (rv == VM_PAGER_BAD)) { if (fs->m) { /* from just above */ diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c index f05f9b46e5..4696c1a2c5 100644 --- a/sys/vm/vm_glue.c +++ b/sys/vm/vm_glue.c @@ -125,14 +125,14 @@ kernacc(c_caddr_t addr, int len, int rw) /* * Nominal kernel memory access - check access via kernel_map. */ - if ((vm_offset_t)addr + len > vm_map_max(&kernel_map) || + if ((vm_offset_t)addr + len > vm_map_max(kernel_map) || (vm_offset_t)addr + len < (vm_offset_t)addr) { return (FALSE); } prot = rw; saddr = trunc_page((vm_offset_t)addr); eaddr = round_page((vm_offset_t)addr + len); - rv = vm_map_check_protection(&kernel_map, saddr, eaddr, prot, FALSE); + rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot, FALSE); return (rv == TRUE); } diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c index 35b6c9c1f7..b45b1cd0cd 100644 --- a/sys/vm/vm_kern.c +++ b/sys/vm/vm_kern.c @@ -84,7 +84,9 @@ #include #include -struct vm_map kernel_map; +static struct vm_map kernel_map_store; +struct vm_map *kernel_map = &kernel_map_store; + struct vm_map clean_map; struct vm_map buffer_map; @@ -110,8 +112,8 @@ kmem_alloc_swapbacked(kmem_anon_desc_t *kp, vm_size_t size, vm_subsys_t id) npages = size / PAGE_SIZE; if (kp->map == NULL) - kp->map = &kernel_map; - kp->data = vm_map_min(&kernel_map); + kp->map = kernel_map; + kp->data = vm_map_min(kernel_map); kp->size = size; kp->object = vm_object_allocate(OBJT_DEFAULT, npages); @@ -426,7 +428,7 @@ kmem_alloc_attr(vm_map_t map, vm_size_t size, vm_subsys_t id, vm_map_entry_release(count); return (0); } - offset = addr - vm_map_min(&kernel_map); + offset = addr - vm_map_min(kernel_map); vm_object_hold(&kernel_object); vm_object_reference_locked(&kernel_object); vm_map_insert(map, &count, @@ -493,7 +495,7 @@ kmem_init(void) vm_map_t m; int count; - m = &kernel_map; + m = kernel_map; vm_map_init(m, KvaStart, KvaEnd, &kernel_pmap); vm_map_lock(m); /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ diff --git a/sys/vm/vm_kern.h b/sys/vm/vm_kern.h index 11a3c973aa..c38d1f2f66 100644 --- a/sys/vm/vm_kern.h +++ b/sys/vm/vm_kern.h @@ -100,7 +100,7 @@ typedef struct kmem_anon_desc kmem_anon_desc_t; /* Kernel memory management definitions. */ extern struct vm_map buffer_map; -extern struct vm_map kernel_map; +extern struct vm_map *kernel_map; extern struct vm_map clean_map; extern u_int vm_kmem_size; diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index b3bf826925..050987c490 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -1559,7 +1559,7 @@ vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length, * fill 128G worth of page tables!). Therefore we must not * retry. */ - if (map == &kernel_map) { + if (map == kernel_map) { vm_offset_t kstop; kstop = round_page(start + length); @@ -4506,7 +4506,7 @@ RetryLookup: * to improve concurrent fault performance. This is only * applicable to userspace. */ - if (map != &kernel_map && + if (map != kernel_map && entry->maptype == VM_MAPTYPE_NORMAL && ((entry->ba.start ^ entry->ba.end) & ~MAP_ENTRY_PARTITION_MASK) && diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index a6d2205f1e..2227cb7287 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -1795,7 +1795,7 @@ vm_object_in_map(vm_object_t object) allproc_scan(vm_object_in_map_callback, &info, 0); if (info.rv) return 1; - if( _vm_object_in_map(&kernel_map, object, 0)) + if( _vm_object_in_map(kernel_map, object, 0)) return 1; if( _vm_object_in_map(&pager_map, object, 0)) return 1; diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 18e39c42ec..e86fd3ea6e 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -857,7 +857,7 @@ vm_page_startup_finish(void *dummy __unused) /* * hash table for vm_page_lookup_quick() */ - mp = (void *)kmem_alloc3(&kernel_map, + mp = (void *)kmem_alloc3(kernel_map, (vm_page_hash_size + VM_PAGE_HASH_SET) * sizeof(*vm_page_hash), VM_SUBSYS_VMPGHASH, KM_CPU(0)); diff --git a/sys/vm/vm_pager.c b/sys/vm/vm_pager.c index b44da7c523..632730f2cc 100644 --- a/sys/vm/vm_pager.c +++ b/sys/vm/vm_pager.c @@ -314,7 +314,7 @@ vm_pager_bufferinit(void *dummy __unused) * systems. */ nswbuf_raw = nbuf * 2; - swbuf_raw = (void *)kmem_alloc3(&kernel_map, + swbuf_raw = (void *)kmem_alloc3(kernel_map, round_page(nswbuf_raw * sizeof(struct buf)), VM_SUBSYS_BUFDATA, KM_NOTLBSYNC); diff --git a/sys/vm/vm_zone.c b/sys/vm/vm_zone.c index c5f571b7b8..19db284f8d 100644 --- a/sys/vm/vm_zone.c +++ b/sys/vm/vm_zone.c @@ -328,7 +328,7 @@ zinitna(vm_zone_t z, char *name, size_t size, long nentries, uint32_t flags) totsize = round_page((size_t)z->zsize * nentries); atomic_add_long(&zone_kmem_kvaspace, totsize); - z->zkva = kmem_alloc_pageable(&kernel_map, totsize, + z->zkva = kmem_alloc_pageable(kernel_map, totsize, VM_SUBSYS_ZALLOC); if (z->zkva == 0) { LIST_REMOVE(z, zlink); @@ -483,7 +483,7 @@ zdestroy(vm_zone_t z) */ KKASSERT((z->zflags & ZONE_INTERRUPT) == 0); for (i = 0; i < z->zkmcur; i++) { - kmem_free(&kernel_map, z->zkmvec[i], + kmem_free(kernel_map, z->zkmvec[i], (size_t)z->zalloc * PAGE_SIZE); atomic_subtract_long(&zone_kern_pages, z->zalloc); } @@ -629,7 +629,7 @@ zget(vm_zone_t z, int *tryagainp) nbytes = (size_t)z->zalloc * PAGE_SIZE; z->zpagecount += z->zalloc; /* Track total memory use */ - item = (void *)kmem_alloc3(&kernel_map, nbytes, + item = (void *)kmem_alloc3(kernel_map, nbytes, VM_SUBSYS_ZALLOC, KM_KRESERVE); /* note: z might be modified due to blocking */ @@ -647,7 +647,7 @@ zget(vm_zone_t z, int *tryagainp) nbytes = (size_t)z->zalloc * PAGE_SIZE; z->zpagecount += z->zalloc; /* Track total memory use */ - item = (void *)kmem_alloc3(&kernel_map, nbytes, + item = (void *)kmem_alloc3(kernel_map, nbytes, VM_SUBSYS_ZALLOC, 0); /* note: z might be modified due to blocking */ -- 2.41.0