kernel - Tag vm_map_entry structure, slight optimization to zalloc, misc.
authorMatthew Dillon <dillon@apollo.backplane.com>
Sat, 17 Dec 2016 03:39:46 +0000 (19:39 -0800)
committerMatthew Dillon <dillon@apollo.backplane.com>
Sat, 17 Dec 2016 03:39:46 +0000 (19:39 -0800)
* Tag the vm_map_entry structure, allowing debugging programs to
  break-down how KMEM is being used more easily.

  This requires an additional argument to vm_map_find() and most
  kmem_alloc*() functions.

* Remove the page chunking parameter to zinit() and zinitna().  It was
  only being used degeneratively.  Increase the chunking from one page
  to four pages, which will reduce the amount of vm_map_entry spam in
  the kernel_map.

* Use atomic ops when adjusting zone_kern_pages.

38 files changed:
sys/dev/disk/dm/snapshot/dm_target_snapshot.c
sys/dev/drm/drm_scatter.c
sys/dev/drm/i915/i915_gem.c
sys/dev/drm/include/linux/vmalloc.h
sys/dev/drm/ttm/ttm_bo_util.c
sys/kern/imgact_aout.c
sys/kern/imgact_elf.c
sys/kern/imgact_gzip.c
sys/kern/init_main.c
sys/kern/kern_slaballoc.c
sys/kern/link_elf_obj.c
sys/kern/sys_pipe.c
sys/kern/sys_process.c
sys/kern/sysv_shm.c
sys/kern/vfs_bio.c
sys/netbt/bt_proto.c
sys/netbt/rfcomm_session.c
sys/platform/pc64/x86_64/efirt.c
sys/platform/pc64/x86_64/machdep.c
sys/platform/pc64/x86_64/mp_machdep.c
sys/platform/pc64/x86_64/pmap.c
sys/platform/vkernel64/platform/pmap.c
sys/platform/vkernel64/x86_64/autoconf.c
sys/platform/vkernel64/x86_64/mp.c
sys/vfs/hammer2/hammer2_bulkfree.c
sys/vfs/procfs/procfs_mem.c
sys/vm/swap_pager.c
sys/vm/vm_contig.c
sys/vm/vm_extern.h
sys/vm/vm_kern.c
sys/vm/vm_map.c
sys/vm/vm_map.h
sys/vm/vm_mmap.c
sys/vm/vm_object.c
sys/vm/vm_pager.c
sys/vm/vm_unix.c
sys/vm/vm_zone.c
sys/vm/vm_zone.h

index 62387a7..7e5d806 100644 (file)
@@ -125,8 +125,9 @@ dm_target_snapshot_init(dm_table_entry_t *table_en, int argc, char **argv)
        if ((dmp_snap = dm_pdev_insert(argv[0])) == NULL)
                return ENOENT;
 
-       if ((tsc = kmem_alloc(sizeof(dm_target_snapshot_config_t), KM_NOSLEEP))
-           == NULL)
+       tsc = kmem_alloc(sizeof(dm_target_snapshot_config_t),
+                        VM_SUBSYS_DM, KM_NOSLEEP);
+       if (tsc == NULL)
                return 1;
 
        tsc->tsc_persistent_dev = 0;
@@ -271,8 +272,9 @@ dm_target_snapshot_orig_init(dm_table_entry_t *table_en, int argc, char **argv)
        if ((dmp_real = dm_pdev_insert(argv[0])) == NULL)
                return ENOENT;
 
-       if ((tsoc = kmem_alloc(sizeof(dm_target_snapshot_origin_config_t), KM_NOSLEEP))
-           == NULL)
+       tsoc = kmem_alloc(sizeof(dm_target_snapshot_origin_config_t),
+                         VM_SUBSYS_DM, KM_NOSLEEP);
+       if (tsoc == NULL)
                return 1;
 
        tsoc->tsoc_real_dev = dmp_real;
@@ -308,7 +310,8 @@ dm_target_snapshot_orig_table(void *target_config)
 
        kprintf("real_dev name %s\n", tsoc->tsoc_real_dev->name);
 
-       if ((params = kmem_alloc(prm_len, KM_NOSLEEP)) == NULL)
+       params = kmem_alloc(prm_len, VM_SUBSYS_DM, KM_NOSLEEP);
+       if (params == NULL)
                return NULL;
 
        kprintf("%s\n", tsoc->tsoc_real_dev->name);
index d63500f..5ba653b 100644 (file)
@@ -74,8 +74,10 @@ int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
        entry->busaddr = kmalloc(entry->pages * sizeof(*entry->busaddr),
            M_DRM, M_WAITOK | M_ZERO);
 
-       entry->vaddr = kmem_alloc_attr(&kernel_map, size, M_WAITOK | M_ZERO,
-           0, BUS_SPACE_MAXADDR_32BIT, VM_MEMATTR_WRITE_COMBINING);
+       entry->vaddr = kmem_alloc_attr(&kernel_map, size,
+                                      VM_SUBSYS_DRM_SCAT, M_WAITOK | M_ZERO,
+                                      0, BUS_SPACE_MAXADDR_32BIT,
+                                      VM_MEMATTR_WRITE_COMBINING);
        if (entry->vaddr == 0) {
                drm_sg_cleanup(entry);
                return (-ENOMEM);
index a5f3c5b..1bc7154 100644 (file)
@@ -1745,7 +1745,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
                         args->offset, &addr, args->size,
                         256 * 1024, /* align */
                         TRUE, /* fitit */
-                        VM_MAPTYPE_NORMAL, /* maptype */
+                        VM_MAPTYPE_NORMAL, VM_SUBSYS_DRM_GEM,
                         VM_PROT_READ | VM_PROT_WRITE, /* prot */
                         VM_PROT_READ | VM_PROT_WRITE, /* max */
                         MAP_SHARED /* cow */);
index 36ac021..b753929 100644 (file)
@@ -37,7 +37,8 @@ vmap(struct vm_page **pages, unsigned int count,
        size_t size;
 
        size = count * PAGE_SIZE;
-       off = kmem_alloc_nofault(&kernel_map, size, PAGE_SIZE);
+       off = kmem_alloc_nofault(&kernel_map, size,
+                                VM_SUBSYS_DRM_VMAP, PAGE_SIZE);
        if (off == 0)
                return (NULL);
 
index 6f5ca05..8075d24 100644 (file)
@@ -536,8 +536,11 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
                        ttm_io_prot(mem->placement);
                map->bo_kmap_type = ttm_bo_map_vmap;
                map->num_pages = num_pages;
-               map->virtual = (void *)kmem_alloc_nofault(&kernel_map,
-                   num_pages * PAGE_SIZE, PAGE_SIZE);
+               map->virtual =
+                       (void *)kmem_alloc_nofault(&kernel_map,
+                                                  num_pages * PAGE_SIZE,
+                                                  VM_SUBSYS_DRM_TTM,
+                                                  PAGE_SIZE);
                if (map->virtual != NULL) {
                        for (i = 0; i < num_pages; i++) {
                                /* XXXKIB hack */
index d27009f..faca67a 100644 (file)
@@ -188,6 +188,7 @@ exec_aout_imgact(struct image_params *imgp)
                file_offset,
                virtual_offset, text_end,
                VM_MAPTYPE_NORMAL,
+               VM_SUBSYS_IMGACT,
                VM_PROT_READ | VM_PROT_EXECUTE, VM_PROT_ALL,
                MAP_COPY_ON_WRITE | MAP_PREFAULT | MAP_PREFAULT_RELOCK);
 
@@ -205,6 +206,7 @@ exec_aout_imgact(struct image_params *imgp)
                        file_offset + a_out->a_text,
                        text_end, data_end,
                        VM_MAPTYPE_NORMAL,
+                       VM_SUBSYS_IMGACT,
                        VM_PROT_ALL, VM_PROT_ALL,
                        MAP_COPY_ON_WRITE | MAP_PREFAULT | MAP_PREFAULT_RELOCK);
                if (error) {
@@ -220,6 +222,7 @@ exec_aout_imgact(struct image_params *imgp)
                error = vm_map_insert(map, &count, NULL, NULL,
                        0, data_end, data_end + bss_size,
                        VM_MAPTYPE_NORMAL,
+                       VM_SUBSYS_IMGACT,
                        VM_PROT_ALL, VM_PROT_ALL,
                        0);
                if (error) {
index 67b1153..aeee27d 100644 (file)
@@ -319,8 +319,8 @@ __elfN(load_section)(struct proc *p, struct vmspace *vmspace, struct vnode *vp,
                                      map_addr,         /* virtual start */
                                      map_addr + map_len,/* virtual end */
                                      VM_MAPTYPE_NORMAL,
-                                     prot, VM_PROT_ALL,
-                                     cow);
+                                     VM_SUBSYS_IMGACT,
+                                     prot, VM_PROT_ALL, cow);
                vm_map_unlock(&vmspace->vm_map);
                vm_map_entry_release(count);
 
@@ -361,8 +361,8 @@ __elfN(load_section)(struct proc *p, struct vmspace *vmspace, struct vnode *vp,
                                        map_addr,
                                        map_addr + map_len,
                                        VM_MAPTYPE_NORMAL,
-                                       VM_PROT_ALL, VM_PROT_ALL,
-                                       0);
+                                       VM_SUBSYS_IMGACT,
+                                       VM_PROT_ALL, VM_PROT_ALL, 0);
                vm_map_unlock(&vmspace->vm_map);
                vm_map_entry_release(count);
                if (rv != KERN_SUCCESS) {
index 5096495..f93104e 100644 (file)
@@ -249,7 +249,8 @@ do_aout_hdr(struct imgact_gzip * gz)
                error = vm_map_find(&vmspace->vm_map,
                                    NULL, NULL,
                                    0, &vmaddr, gz->bss_size, PAGE_SIZE,
-                                   FALSE, VM_MAPTYPE_NORMAL,
+                                   FALSE,
+                                   VM_MAPTYPE_NORMAL, VM_SUBSYS_IMGACT,
                                    VM_PROT_ALL, VM_PROT_ALL, 0);
                if (error) {
                        gz->where = __LINE__;
index 79f7411..4ed9335 100644 (file)
@@ -588,8 +588,8 @@ start_init(void *dummy, struct trapframe *frame)
        addr = trunc_page(USRSTACK - PAGE_SIZE);
        error = vm_map_find(&p->p_vmspace->vm_map, NULL, NULL,
                            0, &addr, PAGE_SIZE,
-                           PAGE_SIZE,
-                           FALSE, VM_MAPTYPE_NORMAL,
+                           PAGE_SIZE, FALSE,
+                           VM_MAPTYPE_NORMAL, VM_SUBSYS_INIT,
                            VM_PROT_ALL, VM_PROT_ALL, 0);
        if (error)
                panic("init: couldn't allocate argument space");
index 1b04d15..96e8ef8 100644 (file)
@@ -1538,8 +1538,8 @@ kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags)
                  &kernel_object, NULL,
                  addr, addr, addr + size,
                  VM_MAPTYPE_NORMAL,
-                 VM_PROT_ALL, VM_PROT_ALL,
-                 0);
+                 VM_SUBSYS_KMALLOC,
+                 VM_PROT_ALL, VM_PROT_ALL, 0);
     vm_object_drop(&kernel_object);
     vm_map_set_wired_quick(&kernel_map, addr, size, &count);
     vm_map_unlock(&kernel_map);
index 9668310..69bfe48 100644 (file)
@@ -662,8 +662,8 @@ link_elf_obj_load_file(const char *filename, linker_file_t * result)
        mapbase = KERNBASE;
        error = vm_map_find(&kernel_map, ef->object, NULL,
                            0, &mapbase, round_page(mapsize),
-                           PAGE_SIZE,
-                           TRUE, VM_MAPTYPE_NORMAL,
+                           PAGE_SIZE, TRUE,
+                           VM_MAPTYPE_NORMAL, VM_SUBSYS_IMGACT,
                            VM_PROT_ALL, VM_PROT_ALL, FALSE);
        vm_object_drop(ef->object);
        if (error) {
index d197d04..03a22d5 100644 (file)
@@ -346,8 +346,8 @@ pipespace(struct pipe *cpipe, int size)
 
                error = vm_map_find(&kernel_map, object, NULL,
                                    0, (vm_offset_t *)&buffer, size,
-                                   PAGE_SIZE,
-                                   1, VM_MAPTYPE_NORMAL,
+                                   PAGE_SIZE, TRUE,
+                                   VM_MAPTYPE_NORMAL, VM_SUBSYS_PIPE,
                                    VM_PROT_ALL, VM_PROT_ALL, 0);
 
                if (error != KERN_SUCCESS) {
index dccbeff..f8d03a4 100644 (file)
@@ -87,8 +87,8 @@ pread (struct proc *procp, unsigned int addr, unsigned int *retval) {
        /* Find space in kernel_map for the page we're interested in */
        rv = vm_map_find (&kernel_map, object, NULL,
                          IDX_TO_OFF(pindex), &kva, PAGE_SIZE,
-                         PAGE_SIZE,
-                         0, VM_MAPTYPE_NORMAL,
+                         PAGE_SIZE, FALSE,
+                         VM_MAPTYPE_NORMAL, VM_SUBSYS_PROC,
                          VM_PROT_ALL, VM_PROT_ALL, 0);
 
        if (!rv) {
@@ -173,8 +173,8 @@ pwrite (struct proc *procp, unsigned int addr, unsigned int datum) {
        /* Find space in kernel_map for the page we're interested in */
        rv = vm_map_find (&kernel_map, object, NULL,
                          IDX_TO_OFF(pindex), &kva, PAGE_SIZE,
-                         PAGE_SIZE,
-                         0, VM_MAPTYPE_NORMAL,
+                         PAGE_SIZE, FALSE,
+                         VM_MAPTYPE_NORMAL, VM_SUBSYS_PROC,
                          VM_PROT_ALL, VM_PROT_ALL, 0);
        if (!rv) {
                vm_object_reference XXX (object);
index bff0495..f13bdb9 100644 (file)
@@ -337,7 +337,7 @@ again:
                         0, &attach_va, size,
                         align,
                         ((flags & MAP_FIXED) ? 0 : 1), 
-                        VM_MAPTYPE_NORMAL,
+                        VM_MAPTYPE_NORMAL, VM_SUBSYS_SHMEM,
                         prot, prot, 0);
        vm_object_drop(shm_handle->shm_object);
        if (rv != KERN_SUCCESS) {
index 1ead0d3..a60ccba 100644 (file)
@@ -727,7 +727,8 @@ bufinit(void *dummy __unused)
         * from buf_daemon.
         */
 
-       bogus_offset = kmem_alloc_pageable(&kernel_map, PAGE_SIZE);
+       bogus_offset = kmem_alloc_pageable(&kernel_map, PAGE_SIZE,
+                                          VM_SUBSYS_BOGUS);
        vm_object_hold(&kernel_object);
        bogus_page = vm_page_alloc(&kernel_object,
                                   (bogus_offset >> PAGE_SHIFT),
index 0a1ca9b..986ffa2 100644 (file)
@@ -145,15 +145,16 @@ static void
 netbt_init(void)
 {
        l2cap_pdu_pool = zinit("l2cap_pdu", sizeof(struct l2cap_pdu), 1,
-           ZONE_DESTROYABLE, 1);
+                              ZONE_DESTROYABLE);
        if (l2cap_pdu_pool == NULL)
                goto fail;
        l2cap_req_pool = zinit("l2cap_req", sizeof(struct l2cap_req), 1,
-           ZONE_DESTROYABLE, 1);
+                              ZONE_DESTROYABLE);
        if (l2cap_req_pool == NULL)
                goto fail;
        rfcomm_credit_pool = zinit("rfcomm_credit",
-           sizeof(struct rfcomm_credit), 1, ZONE_DESTROYABLE, 1);
+                                  sizeof(struct rfcomm_credit), 1,
+                                  ZONE_DESTROYABLE);
        if (rfcomm_credit_pool == NULL)
                goto fail;
        return;
index 47c81ce..01d97fe 100644 (file)
@@ -159,7 +159,8 @@ void
 rfcomm_init(void)
 {
        rfcomm_credit_pool = zinit("rfcomm_credit",
-           sizeof(struct rfcomm_credit), 0, 0, 0);
+                                  sizeof(struct rfcomm_credit),
+                                  0, 0);
 }
 
 /*
index e00b4a8..46f6d10 100644 (file)
@@ -220,6 +220,7 @@ efi_create_1t1_map(struct efi_md *map, int ndesc, int descsz)
        result = vm_map_insert(&efi_vmspace->vm_map, &count, efi_obj, NULL,
                              0, 0, VM_MAX_USER_ADDRESS,
                              VM_MAPTYPE_NORMAL,
+                             VM_SUBSYS_EFI,
                              VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE,
                              VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE,
                              0);
index 85c6816..6a2f8ac 100644 (file)
@@ -455,7 +455,8 @@ again:
         */
        if (firstaddr == 0) {
                size = (vm_size_t)(v - firstaddr);
-               firstaddr = kmem_alloc(&kernel_map, round_page(size));
+               firstaddr = kmem_alloc(&kernel_map, round_page(size),
+                                      VM_SUBSYS_BUF);
                if (firstaddr == 0)
                        panic("startup: no room for tables");
                goto again;
index 476b989..ec8f013 100644 (file)
@@ -449,7 +449,7 @@ start_all_aps(u_int boot_addr)
                /* This is a bit verbose, it will go away soon.  */
 
                pssize = sizeof(struct privatespace);
-               ps = (void *)kmem_alloc(&kernel_map, pssize);
+               ps = (void *)kmem_alloc(&kernel_map, pssize, VM_SUBSYS_GD);
                CPU_prvspace[x] = ps;
 #if 0
                kprintf("ps %d %p %d\n", x, ps, pssize);
@@ -462,7 +462,8 @@ start_all_aps(u_int boot_addr)
                mi_gdinit(&gd->mi, x);
                cpu_gdinit(gd, x);
                ipiq_size = sizeof(struct lwkt_ipiq) * (naps + 1);
-               gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size);
+               gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size,
+                                                   VM_SUBSYS_IPIQ);
                bzero(gd->mi.gd_ipiq, ipiq_size);
 
                gd->gd_acpi_id = CPUID_TO_ACPIID(gd->mi.gd_cpuid);
@@ -520,7 +521,8 @@ start_all_aps(u_int boot_addr)
        gd->gd_acpi_id = CPUID_TO_ACPIID(mycpu->gd_cpuid);
 
        ipiq_size = sizeof(struct lwkt_ipiq) * ncpus;
-       mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size);
+       mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size,
+                                           VM_SUBSYS_IPIQ);
        bzero(mycpu->gd_ipiq, ipiq_size);
 
        /* restore the warmstart vector */
@@ -1557,7 +1559,8 @@ mp_bsp_simple_setup(void)
        gd->gd_acpi_id = CPUID_TO_ACPIID(mycpu->gd_cpuid);
 
        ipiq_size = sizeof(struct lwkt_ipiq) * ncpus;
-       mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size);
+       mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size,
+                                           VM_SUBSYS_IPIQ);
        bzero(mycpu->gd_ipiq, ipiq_size);
 
        pmap_set_opt();
index 67c5429..7b0c9cc 100644 (file)
@@ -1107,7 +1107,8 @@ pmap_init(void)
                initial_pvs = MINPV;
        pvzone = &pvzone_store;
        pvinit = (void *)kmem_alloc(&kernel_map,
-                                   initial_pvs * sizeof (struct pv_entry));
+                                   initial_pvs * sizeof (struct pv_entry),
+                                   VM_SUBSYS_PVENTRY);
        zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry),
                  pvinit, initial_pvs);
 
@@ -1140,7 +1141,7 @@ pmap_init2(void)
        if (entry_max <= 0)
                entry_max = 1;
 
-       zinitna(pvzone, &pvzone_obj, NULL, 0, entry_max, ZONE_INTERRUPT, 1);
+       zinitna(pvzone, &pvzone_obj, NULL, 0, entry_max, ZONE_INTERRUPT);
 }
 
 /*
@@ -1759,7 +1760,9 @@ pmap_pinit(struct pmap *pmap)
         */
        if (pmap->pm_pml4 == NULL) {
                pmap->pm_pml4 =
-                   (pml4_entry_t *)kmem_alloc_pageable(&kernel_map, PAGE_SIZE);
+                   (pml4_entry_t *)kmem_alloc_pageable(&kernel_map,
+                                                       PAGE_SIZE,
+                                                       VM_SUBSYS_PML4);
        }
 
        /*
@@ -5029,7 +5032,7 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
        offset = pa & PAGE_MASK;
        size = roundup(offset + size, PAGE_SIZE);
 
-       va = kmem_alloc_nofault(&kernel_map, size, PAGE_SIZE);
+       va = kmem_alloc_nofault(&kernel_map, size, VM_SUBSYS_MAPDEV, PAGE_SIZE);
        if (va == 0)
                panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
 
index 3151887..f2df3b6 100644 (file)
@@ -648,8 +648,10 @@ pmap_init(void)
        if (initial_pvs < MINPV)
                initial_pvs = MINPV;
        pvzone = &pvzone_store;
-       pvinit = (struct pv_entry *) kmem_alloc(&kernel_map,
-               initial_pvs * sizeof (struct pv_entry));
+       pvinit = (struct pv_entry *)
+               kmem_alloc(&kernel_map,
+                          initial_pvs * sizeof (struct pv_entry),
+                          VM_SUBSYS_PVENTRY);
        zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit,
                initial_pvs);
 
@@ -673,7 +675,7 @@ pmap_init2(void)
        pv_entry_max = shpgperproc * maxproc + vm_page_array_size;
        TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
        pv_entry_high_water = 9 * (pv_entry_max / 10);
-       zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1);
+       zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT);
 }
 
 
@@ -1212,8 +1214,9 @@ pmap_pinit(struct pmap *pmap)
         * page directory table.
         */
        if (pmap->pm_pml4 == NULL) {
-               pmap->pm_pml4 =
-                   (pml4_entry_t *)kmem_alloc_pageable(&kernel_map, PAGE_SIZE);
+               pmap->pm_pml4 = (pml4_entry_t *)
+                       kmem_alloc_pageable(&kernel_map, PAGE_SIZE,
+                                           VM_SUBSYS_PML4);
        }
 
        /*
index 17b5088..a7ef51b 100644 (file)
@@ -168,9 +168,15 @@ cpu_startup(void *dummy)
        /*
         * Allocate memory for the buffer cache
         */
-       buf = (void *)kmem_alloc(&kernel_map, nbuf * sizeof(struct buf));
-       swbuf_mem = (void *)kmem_alloc(&kernel_map, nswbuf_mem * sizeof(struct buf));
-       swbuf_kva = (void *)kmem_alloc(&kernel_map, nswbuf_kva * sizeof(struct buf));
+       buf = (void *)kmem_alloc(&kernel_map,
+                                nbuf * sizeof(struct buf),
+                                VM_SUBSYS_BUF);
+       swbuf_mem = (void *)kmem_alloc(&kernel_map,
+                                      nswbuf_mem * sizeof(struct buf),
+                                      VM_SUBSYS_BUF);
+       swbuf_kva = (void *)kmem_alloc(&kernel_map,
+                                      nswbuf_kva * sizeof(struct buf),
+                                      VM_SUBSYS_BUF);
 
 #ifdef DIRECTIO
         ffs_rawread_setup();
index 36aa4d6..4487834 100644 (file)
@@ -173,7 +173,8 @@ mp_start(void)
         * cpu0 initialization
         */
        ipiq_size = sizeof(struct lwkt_ipiq) * ncpus;
-       mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size);
+       mycpu->gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size,
+                                           VM_SUBSYS_IPIQ);
        bzero(mycpu->gd_ipiq, ipiq_size);
 
        /*
@@ -439,7 +440,8 @@ start_all_aps(u_int boot_addr)
 #endif
 
                ipiq_size = sizeof(struct lwkt_ipiq) * (mp_naps + 1);
-                gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size);
+                gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, ipiq_size,
+                                                   VM_SUBSYS_IPIQ);
                 bzero(gd->mi.gd_ipiq, ipiq_size);
 
                 /*
index d820121..07ba720 100644 (file)
@@ -307,7 +307,7 @@ hammer2_bulkfree_pass(hammer2_dev_t *hmp, hammer2_ioc_bulkfree_t *bfi)
        size = (bfi->size + HAMMER2_FREEMAP_LEVELN_PSIZE - 1) &
               ~(size_t)(HAMMER2_FREEMAP_LEVELN_PSIZE - 1);
        cbinfo.hmp = hmp;
-       cbinfo.bmap = kmem_alloc_swapbacked(&cbinfo.kp, size);
+       cbinfo.bmap = kmem_alloc_swapbacked(&cbinfo.kp, size, VM_SUBSYS_HAMMER);
        cbinfo.saved_mirror_tid = hmp->voldata.mirror_tid;
 
        cbinfo.dedup = kmalloc(sizeof(*cbinfo.dedup) * HAMMER2_DEDUP_HEUR_SIZE,
index 1b155a6..a1779f5 100644 (file)
@@ -104,7 +104,7 @@ procfs_rwmem(struct proc *curp, struct proc *p, struct uio *uio)
        if (writing)
                reqprot |= VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE;
 
-       kva = kmem_alloc_pageable(&kernel_map, PAGE_SIZE);
+       kva = kmem_alloc_pageable(&kernel_map, PAGE_SIZE, VM_SUBSYS_PROC);
 
        /*
         * Only map in one page at a time.  We don't have to, but it
index 1a78830..6e9bd74 100644 (file)
@@ -396,8 +396,7 @@ swap_pager_swap_init(void)
                        "SWAPMETA", 
                        sizeof(struct swblock), 
                        n,
-                       ZONE_INTERRUPT, 
-                       1);
+                       ZONE_INTERRUPT);
                if (swap_zone != NULL)
                        break;
                /*
index 1f54b57..a6b2989 100644 (file)
@@ -482,7 +482,7 @@ vm_contig_pg_kmap(int start, u_long size, vm_map_t map, int flags)
        if (size == 0)
                panic("vm_contig_pg_kmap: size must not be 0");
        size = round_page(size);
-       addr = kmem_alloc_pageable(&kernel_map, size);
+       addr = kmem_alloc_pageable(&kernel_map, size, VM_SUBSYS_CONTIG);
        if (addr) {
                pa = VM_PAGE_TO_PHYS(&pga[start]);
                for (offset = 0; offset < size; offset += PAGE_SIZE)
index d68bc95..7417054 100644 (file)
@@ -74,12 +74,12 @@ int swapon (struct proc *, void *, int *);
 
 int grow (struct proc *, size_t);
 int kernacc(c_caddr_t, int, int);
-vm_offset_t kmem_alloc3 (vm_map_t, vm_size_t, int flags);
-vm_offset_t kmem_alloc_nofault (vm_map_t, vm_size_t, vm_size_t);
-vm_offset_t kmem_alloc_pageable (vm_map_t, vm_size_t);
-vm_offset_t kmem_alloc_wait (vm_map_t, vm_size_t);
-vm_offset_t kmem_alloc_attr(vm_map_t map, vm_size_t size, int flags,
-       vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr);
+vm_offset_t kmem_alloc3 (vm_map_t, vm_size_t, vm_subsys_t id, int flags);
+vm_offset_t kmem_alloc_nofault (vm_map_t, vm_size_t, vm_subsys_t id, vm_size_t);
+vm_offset_t kmem_alloc_pageable (vm_map_t, vm_size_t, vm_subsys_t id);
+vm_offset_t kmem_alloc_wait (vm_map_t, vm_size_t, vm_subsys_t id);
+vm_offset_t kmem_alloc_attr(vm_map_t map, vm_size_t size, vm_subsys_t id,
+       int flags, vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr);
 void kmem_free (vm_map_t, vm_offset_t, vm_size_t);
 void kmem_free_wakeup (vm_map_t, vm_offset_t, vm_size_t);
 void kmem_init (void);
@@ -114,7 +114,8 @@ void vmspace_ref (struct vmspace *);
 void vmspace_rel (struct vmspace *);
 void vmspace_relexit (struct vmspace *);
 void vmspace_exitfree (struct proc *);
-void *kmem_alloc_swapbacked(kmem_anon_desc_t *kp, vm_size_t size);
+void *kmem_alloc_swapbacked(kmem_anon_desc_t *kp, vm_size_t size,
+                       vm_subsys_t id);
 void kmem_free_swapbacked(kmem_anon_desc_t *kp);
 
 struct vmspace *vmspace_fork (struct vmspace *);
@@ -127,16 +128,16 @@ void vm_object_print (/* db_expr_t */ long, boolean_t, /* db_expr_t */ long,
 
 static __inline
 vm_offset_t
-kmem_alloc (vm_map_t map, vm_size_t size)
+kmem_alloc (vm_map_t map, vm_size_t size, vm_subsys_t id)
 {
-       return(kmem_alloc3(map, size, 0));
+       return(kmem_alloc3(map, size, id, 0));
 }
 
 static __inline
 vm_offset_t
 kmem_alloc_stack (vm_map_t map, vm_size_t size)
 {
-       return(kmem_alloc3(map, size, KM_STACK));
+       return(kmem_alloc3(map, size, VM_SUBSYS_STACK, KM_STACK));
 }
 
 #endif                         /* _KERNEL */
index e764360..bca5c6b 100644 (file)
@@ -92,7 +92,7 @@ struct vm_map buffer_map;
  * Allocate pageable swap-backed anonymous memory
  */
 void *
-kmem_alloc_swapbacked(kmem_anon_desc_t *kp, vm_size_t size)
+kmem_alloc_swapbacked(kmem_anon_desc_t *kp, vm_size_t size, vm_subsys_t id)
 {
        int error;
        vm_pindex_t npages;
@@ -108,8 +108,8 @@ kmem_alloc_swapbacked(kmem_anon_desc_t *kp, vm_size_t size)
 
        error = vm_map_find(kp->map, kp->object, NULL, 0,
                            &kp->data, size,
-                           PAGE_SIZE,
-                           1, VM_MAPTYPE_NORMAL,
+                           PAGE_SIZE, TRUE,
+                           VM_MAPTYPE_NORMAL, id,
                            VM_PROT_ALL, VM_PROT_ALL, 0);
        if (error) {
                kprintf("kmem_alloc_swapbacked: %zd bytes failed %d\n",
@@ -148,7 +148,7 @@ kmem_free_swapbacked(kmem_anon_desc_t *kp)
  * No requirements.
  */
 vm_offset_t
-kmem_alloc_pageable(vm_map_t map, vm_size_t size)
+kmem_alloc_pageable(vm_map_t map, vm_size_t size, vm_subsys_t id)
 {
        vm_offset_t addr;
        int result;
@@ -157,8 +157,8 @@ kmem_alloc_pageable(vm_map_t map, vm_size_t size)
        addr = vm_map_min(map);
        result = vm_map_find(map, NULL, NULL,
                             (vm_offset_t) 0, &addr, size,
-                            PAGE_SIZE,
-                            TRUE, VM_MAPTYPE_NORMAL,
+                            PAGE_SIZE, TRUE,
+                            VM_MAPTYPE_NORMAL, id,
                             VM_PROT_ALL, VM_PROT_ALL, 0);
        if (result != KERN_SUCCESS)
                return (0);
@@ -171,7 +171,8 @@ kmem_alloc_pageable(vm_map_t map, vm_size_t size)
  * No requirements.
  */
 vm_offset_t
-kmem_alloc_nofault(vm_map_t map, vm_size_t size, vm_size_t align)
+kmem_alloc_nofault(vm_map_t map, vm_size_t size, vm_subsys_t id,
+                  vm_size_t align)
 {
        vm_offset_t addr;
        int result;
@@ -180,8 +181,8 @@ kmem_alloc_nofault(vm_map_t map, vm_size_t size, vm_size_t align)
        addr = vm_map_min(map);
        result = vm_map_find(map, NULL, NULL,
                             (vm_offset_t) 0, &addr, size,
-                            align,
-                            TRUE, VM_MAPTYPE_NORMAL,
+                            align, TRUE,
+                            VM_MAPTYPE_NORMAL, id,
                             VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
        if (result != KERN_SUCCESS)
                return (0);
@@ -194,7 +195,7 @@ kmem_alloc_nofault(vm_map_t map, vm_size_t size, vm_size_t align)
  * No requirements.
  */
 vm_offset_t
-kmem_alloc3(vm_map_t map, vm_size_t size, int kmflags)
+kmem_alloc3(vm_map_t map, vm_size_t size, vm_subsys_t id, int kmflags)
 {
        vm_offset_t addr;
        vm_offset_t gstart;
@@ -239,7 +240,7 @@ kmem_alloc3(vm_map_t map, vm_size_t size, int kmflags)
        vm_map_insert(map, &count,
                      &kernel_object, NULL,
                      addr, addr, addr + size,
-                     VM_MAPTYPE_NORMAL,
+                     VM_MAPTYPE_NORMAL, id,
                      VM_PROT_ALL, VM_PROT_ALL, cow);
        vm_object_drop(&kernel_object);
 
@@ -328,8 +329,8 @@ kmem_suballoc(vm_map_t parent, vm_map_t result,
        *min = (vm_offset_t) vm_map_min(parent);
        ret = vm_map_find(parent, NULL, NULL,
                          (vm_offset_t) 0, min, size,
-                         PAGE_SIZE,
-                         TRUE, VM_MAPTYPE_UNSPECIFIED,
+                         PAGE_SIZE, TRUE,
+                         VM_MAPTYPE_UNSPECIFIED, VM_SUBSYS_SYSMAP,
                          VM_PROT_ALL, VM_PROT_ALL, 0);
        if (ret != KERN_SUCCESS) {
                kprintf("kmem_suballoc: bad status return of %d.\n", ret);
@@ -349,7 +350,7 @@ kmem_suballoc(vm_map_t parent, vm_map_t result,
  * No requirements.
  */
 vm_offset_t
-kmem_alloc_wait(vm_map_t map, vm_size_t size)
+kmem_alloc_wait(vm_map_t map, vm_size_t size, vm_subsys_t id)
 {
        vm_offset_t addr;
        int count;
@@ -380,9 +381,8 @@ kmem_alloc_wait(vm_map_t map, vm_size_t size)
        vm_map_insert(map, &count,
                      NULL, NULL,
                      (vm_offset_t) 0, addr, addr + size,
-                     VM_MAPTYPE_NORMAL,
-                     VM_PROT_ALL, VM_PROT_ALL,
-                     0);
+                     VM_MAPTYPE_NORMAL, id,
+                     VM_PROT_ALL, VM_PROT_ALL, 0);
        vm_map_unlock(map);
        vm_map_entry_release(count);
 
@@ -398,7 +398,8 @@ kmem_alloc_wait(vm_map_t map, vm_size_t size)
  *  given flags, then the pages are zeroed before they are mapped.
  */
 vm_offset_t
-kmem_alloc_attr(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
+kmem_alloc_attr(vm_map_t map, vm_size_t size, vm_subsys_t id,
+               int flags, vm_paddr_t low,
                vm_paddr_t high, vm_memattr_t memattr)
 {
        vm_offset_t addr, i, offset;
@@ -420,7 +421,7 @@ kmem_alloc_attr(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
        vm_map_insert(map, &count,
                      &kernel_object, NULL,
                      offset, addr, addr + size,
-                     VM_MAPTYPE_NORMAL,
+                     VM_MAPTYPE_NORMAL, id,
                      VM_PROT_ALL, VM_PROT_ALL, 0);
        vm_map_unlock(map);
        vm_map_entry_release(count);
@@ -490,7 +491,7 @@ kmem_init(void)
                        vm_map_insert(m, &count,
                                      NULL, NULL,
                                      (vm_offset_t) 0, addr, virtual2_start,
-                                     VM_MAPTYPE_NORMAL,
+                                     VM_MAPTYPE_NORMAL, VM_SUBSYS_RESERVED,
                                      VM_PROT_ALL, VM_PROT_ALL, 0);
                }
                addr = virtual2_end;
@@ -499,7 +500,7 @@ kmem_init(void)
                vm_map_insert(m, &count,
                              NULL, NULL,
                              (vm_offset_t) 0, addr, virtual_start,
-                             VM_MAPTYPE_NORMAL,
+                             VM_MAPTYPE_NORMAL, VM_SUBSYS_RESERVED,
                              VM_PROT_ALL, VM_PROT_ALL, 0);
        }
        addr = virtual_end;
@@ -507,7 +508,7 @@ kmem_init(void)
                vm_map_insert(m, &count,
                              NULL, NULL,
                              (vm_offset_t) 0, addr, KvaEnd,
-                             VM_MAPTYPE_NORMAL,
+                             VM_MAPTYPE_NORMAL, VM_SUBSYS_RESERVED,
                              VM_PROT_ALL, VM_PROT_ALL, 0);
        }
        /* ... and ending with the completion of the above `insert' */
index 09fc299..05ee2ab 100644 (file)
@@ -198,8 +198,8 @@ vm_init2(void)
                                                vmspace_ctor, vmspace_dtor,
                                                NULL);
        zinitna(mapentzone, &mapentobj, NULL, 0, 0, 
-               ZONE_USE_RESERVE | ZONE_SPECIAL, 1);
-       zinitna(mapzone, &mapobj, NULL, 0, 0, 0, 1);
+               ZONE_USE_RESERVE | ZONE_SPECIAL);
+       zinitna(mapzone, &mapobj, NULL, 0, 0, 0);
        pmap_init2();
        vm_object_init2();
 }
@@ -986,7 +986,7 @@ vm_map_lookup_entry(vm_map_t map, vm_offset_t address, vm_map_entry_t *entry)
 int
 vm_map_insert(vm_map_t map, int *countp, void *map_object, void *map_aux,
              vm_ooffset_t offset, vm_offset_t start, vm_offset_t end,
-             vm_maptype_t maptype,
+             vm_maptype_t maptype, vm_subsys_t id,
              vm_prot_t prot, vm_prot_t max, int cow)
 {
        vm_map_entry_t new_entry;
@@ -1070,6 +1070,7 @@ vm_map_insert(vm_map_t map, int *countp, void *map_object, void *map_aux,
                 (prev_entry->eflags == protoeflags) &&
                 (prev_entry->end == start) &&
                 (prev_entry->wired_count == 0) &&
+                (prev_entry->id == id) &&
                 prev_entry->maptype == maptype &&
                 maptype == VM_MAPTYPE_NORMAL &&
                 ((prev_entry->object.vm_object == NULL) ||
@@ -1125,6 +1126,7 @@ vm_map_insert(vm_map_t map, int *countp, void *map_object, void *map_aux,
        new_entry = vm_map_entry_create(map, countp);
        new_entry->start = start;
        new_entry->end = end;
+       new_entry->id = id;
 
        new_entry->maptype = maptype;
        new_entry->eflags = protoeflags;
@@ -1337,11 +1339,9 @@ vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
 int
 vm_map_find(vm_map_t map, void *map_object, void *map_aux,
            vm_ooffset_t offset, vm_offset_t *addr,
-           vm_size_t length, vm_size_t align,
-           boolean_t fitit,
-           vm_maptype_t maptype,
-           vm_prot_t prot, vm_prot_t max,
-           int cow)
+           vm_size_t length, vm_size_t align, boolean_t fitit,
+           vm_maptype_t maptype, vm_subsys_t id,
+           vm_prot_t prot, vm_prot_t max, int cow)
 {
        vm_offset_t start;
        vm_object_t object;
@@ -1371,7 +1371,7 @@ vm_map_find(vm_map_t map, void *map_object, void *map_aux,
        }
        result = vm_map_insert(map, &count, map_object, map_aux,
                               offset, start, start + length,
-                              maptype, prot, max, cow);
+                              maptype, id, prot, max, cow);
        if (object)
                vm_object_drop(object);
        vm_map_unlock(map);
@@ -1419,6 +1419,7 @@ vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry, int *countp)
                     (prev->protection == entry->protection) &&
                     (prev->max_protection == entry->max_protection) &&
                     (prev->inheritance == entry->inheritance) &&
+                    (prev->id == entry->id) &&
                     (prev->wired_count == entry->wired_count)) {
                        if (map->first_free == prev)
                                map->first_free = entry;
@@ -1445,6 +1446,7 @@ vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry, int *countp)
                    (next->protection == entry->protection) &&
                    (next->max_protection == entry->max_protection) &&
                    (next->inheritance == entry->inheritance) &&
+                   (next->id == entry->id) &&
                    (next->wired_count == entry->wired_count)) {
                        if (map->first_free == next)
                                map->first_free = entry;
@@ -3677,7 +3679,7 @@ vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
                           0, addrbos + max_ssize - init_ssize,
                           addrbos + max_ssize,
                           VM_MAPTYPE_NORMAL,
-                          prot, max, cow);
+                          VM_SUBSYS_STACK, prot, max, cow);
 
        /* Now set the avail_ssize amount */
        if (rv == KERN_SUCCESS) {
@@ -3830,7 +3832,7 @@ Retry:
        rv = vm_map_insert(map, &count, NULL, NULL,
                           0, addr, stack_entry->start,
                           VM_MAPTYPE_NORMAL,
-                          VM_PROT_ALL, VM_PROT_ALL, 0);
+                          VM_SUBSYS_STACK, VM_PROT_ALL, VM_PROT_ALL, 0);
 
        /* Adjust the available stack space by the amount we grew. */
        if (rv == KERN_SUCCESS) {
index 107cf47..366fce0 100644 (file)
@@ -131,6 +131,45 @@ union vm_map_aux {
        void    *map_aux;
 };
 
+/*
+ * vm_map_entry identifiers, used as a debugging aid
+ */
+typedef enum {
+       VM_SUBSYS_UNKNOWN,
+       VM_SUBSYS_KMALLOC,
+       VM_SUBSYS_STACK,
+       VM_SUBSYS_IMGACT,
+       VM_SUBSYS_EFI,
+       VM_SUBSYS_RESERVED,
+       VM_SUBSYS_INIT,
+       VM_SUBSYS_PIPE,
+       VM_SUBSYS_PROC,
+       VM_SUBSYS_SHMEM,
+       VM_SUBSYS_SYSMAP,
+       VM_SUBSYS_MMAP,
+       VM_SUBSYS_BRK,
+       VM_SUBSYS_BOGUS,
+       VM_SUBSYS_BUF,
+       VM_SUBSYS_BUFDATA,
+       VM_SUBSYS_GD,
+       VM_SUBSYS_IPIQ,
+       VM_SUBSYS_PVENTRY,
+       VM_SUBSYS_PML4,
+       VM_SUBSYS_MAPDEV,
+       VM_SUBSYS_ZALLOC,
+
+       VM_SUBSYS_DM,
+       VM_SUBSYS_CONTIG,
+       VM_SUBSYS_DRM,
+       VM_SUBSYS_DRM_GEM,
+       VM_SUBSYS_DRM_SCAT,
+       VM_SUBSYS_DRM_VMAP,
+       VM_SUBSYS_DRM_TTM,
+       VM_SUBSYS_HAMMER,
+
+       VM_SUBSYS_LIMIT         /* end of list */
+} vm_subsys_t;
+
 /*
  *     Address map entries consist of start and end addresses,
  *     a VM object (or sharing map) and offset into that object,
@@ -158,6 +197,7 @@ struct vm_map_entry {
        vm_prot_t max_protection;       /* maximum protection */
        vm_inherit_t inheritance;       /* inheritance */
        int wired_count;                /* can be paged if = 0 */
+       vm_subsys_t id;                 /* subsystem id */
 };
 
 #define MAP_ENTRY_NOSYNC               0x0001
@@ -535,8 +575,8 @@ vm_map_t vm_map_create (vm_map_t, struct pmap *, vm_offset_t, vm_offset_t);
 int vm_map_delete (vm_map_t, vm_offset_t, vm_offset_t, int *);
 int vm_map_find (vm_map_t, void *, void *,
                 vm_ooffset_t, vm_offset_t *, vm_size_t,
-                vm_size_t,
-                boolean_t, vm_maptype_t,
+                vm_size_t, boolean_t,
+                vm_maptype_t, vm_subsys_t id,
                 vm_prot_t, vm_prot_t, int);
 int vm_map_findspace (vm_map_t, vm_offset_t, vm_size_t, vm_size_t,
                      int, vm_offset_t *);
@@ -545,7 +585,7 @@ int vm_map_inherit (vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t);
 void vm_map_init (struct vm_map *, vm_offset_t, vm_offset_t, pmap_t);
 int vm_map_insert (vm_map_t, int *, void *, void *,
                   vm_ooffset_t, vm_offset_t, vm_offset_t,
-                  vm_maptype_t,
+                  vm_maptype_t, vm_subsys_t id,
                   vm_prot_t, vm_prot_t, int);
 int vm_map_lookup (vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *,
     vm_pindex_t *, vm_prot_t *, boolean_t *);
index 31bd309..2dbe6c3 100644 (file)
@@ -1423,8 +1423,8 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
        if (uksmap) {
                rv = vm_map_find(map, uksmap, vp->v_rdev,
                                 foff, addr, size,
-                                align,
-                                fitit, VM_MAPTYPE_UKSMAP,
+                                align, fitit,
+                                VM_MAPTYPE_UKSMAP, VM_SUBSYS_MMAP,
                                 prot, maxprot, docow);
        } else if (flags & MAP_STACK) {
                rv = vm_map_stack(map, *addr, size, flags,
@@ -1432,14 +1432,14 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
        } else if (flags & MAP_VPAGETABLE) {
                rv = vm_map_find(map, object, NULL,
                                 foff, addr, size,
-                                align,
-                                fitit, VM_MAPTYPE_VPAGETABLE,
+                                align, fitit,
+                                VM_MAPTYPE_VPAGETABLE, VM_SUBSYS_MMAP,
                                 prot, maxprot, docow);
        } else {
                rv = vm_map_find(map, object, NULL,
                                 foff, addr, size,
-                                align,
-                                fitit, VM_MAPTYPE_NORMAL,
+                                align, fitit,
+                                VM_MAPTYPE_NORMAL, VM_SUBSYS_MMAP,
                                 prot, maxprot, docow);
        }
 
index faeca71..3497544 100644 (file)
@@ -437,7 +437,7 @@ vm_object_init(void)
 void
 vm_object_init2(void)
 {
-       zinitna(obj_zone, NULL, NULL, 0, 0, ZONE_PANICFAIL, 1);
+       zinitna(obj_zone, NULL, NULL, 0, 0, ZONE_PANICFAIL);
 }
 
 /*
index f435b7e..4b823ca 100644 (file)
@@ -232,10 +232,12 @@ vm_pager_bufferinit(void *dummy __unused)
        /*
         * Reserve KVM space for pbuf data.
         */
-       swapbkva_mem = kmem_alloc_pageable(&pager_map, nswbuf_mem * MAXPHYS);
+       swapbkva_mem = kmem_alloc_pageable(&pager_map, nswbuf_mem * MAXPHYS,
+                                          VM_SUBSYS_BUFDATA);
        if (!swapbkva_mem)
                panic("Not enough pager_map VM space for physical buffers");
-       swapbkva_kva = kmem_alloc_pageable(&pager_map, nswbuf_kva * MAXPHYS);
+       swapbkva_kva = kmem_alloc_pageable(&pager_map, nswbuf_kva * MAXPHYS,
+                                          VM_SUBSYS_BUFDATA);
        if (!swapbkva_kva)
                panic("Not enough pager_map VM space for physical buffers");
 
@@ -315,6 +317,7 @@ vm_pager_bufferinit(void *dummy __unused)
        nswbuf_raw = nbuf * 2;
        swbuf_raw = (void *)kmem_alloc3(&kernel_map,
                                round_page(nswbuf_raw * sizeof(struct buf)),
+                               VM_SUBSYS_BUFDATA,
                                KM_NOTLBSYNC);
        smp_invltlb();
        bp = swbuf_raw;
index a24e707..9c2fe51 100644 (file)
@@ -114,8 +114,8 @@ sys_obreak(struct obreak_args *uap)
                }
                rv = vm_map_find(&vm->vm_map, NULL, NULL,
                                 0, &old, diff,
-                                PAGE_SIZE,
-                                FALSE, VM_MAPTYPE_NORMAL,
+                                PAGE_SIZE, FALSE,
+                                VM_MAPTYPE_NORMAL, VM_SUBSYS_BRK,
                                 VM_PROT_ALL, VM_PROT_ALL, 0);
                if (rv != KERN_SUCCESS) {
                        error = ENOMEM;
index 5ae2744..a28e448 100644 (file)
@@ -244,7 +244,7 @@ static long zone_kmem_kvaspace;
  */
 int
 zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
-       int nentries, int flags, int zalloc)
+       int nentries, int flags)
 {
        size_t totsize;
 
@@ -291,7 +291,8 @@ zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
                totsize = round_page((size_t)z->zsize * nentries);
                atomic_add_long(&zone_kmem_kvaspace, totsize);
 
-               z->zkva = kmem_alloc_pageable(&kernel_map, totsize);
+               z->zkva = kmem_alloc_pageable(&kernel_map, totsize,
+                                             VM_SUBSYS_ZALLOC);
                if (z->zkva == 0) {
                        LIST_REMOVE(z, zlink);
                        return 0;
@@ -320,10 +321,11 @@ zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
                z->zfreemin = PAGE_SIZE / z->zsize;
 
        z->zpagecount = 0;
-       if (zalloc)
-               z->zalloc = zalloc;
-       else
-               z->zalloc = 1;
+
+       /*
+        * Reduce kernel_map spam by allocating in chunks of 4 pages.
+        */
+       z->zalloc = 4;
 
        /*
         * Populate the interrrupt zone at creation time rather than
@@ -349,7 +351,7 @@ zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
  * No requirements.
  */
 vm_zone_t
-zinit(char *name, int size, int nentries, int flags, int zalloc)
+zinit(char *name, int size, int nentries, int flags)
 {
        vm_zone_t z;
 
@@ -359,7 +361,7 @@ zinit(char *name, int size, int nentries, int flags, int zalloc)
 
        z->zflags = 0;
        if (zinitna(z, NULL, name, size, nentries,
-                   flags & ~ZONE_DESTROYABLE, zalloc) == 0) {
+                   flags & ~ZONE_DESTROYABLE) == 0) {
                kfree(z, M_ZONE);
                return NULL;
        }
@@ -467,7 +469,7 @@ zdestroy(vm_zone_t z)
                vm_object_drop(z->zobj);
                atomic_subtract_int(&zone_kmem_pages, z->zpagecount);
        } else {
-               for (i=0; i < z->zkmcur; i++) {
+               for (i = 0; i < z->zkmcur; i++) {
                        kmem_free(&kernel_map, z->zkmvec[i],
                                  (size_t)z->zalloc * PAGE_SIZE);
                        atomic_subtract_int(&zone_kern_pages, z->zalloc);
@@ -564,11 +566,12 @@ zget(vm_zone_t z)
                 */
                nbytes = (size_t)z->zalloc * PAGE_SIZE;
 
-               item = (void *)kmem_alloc3(&kernel_map, nbytes, KM_KRESERVE);
+               item = (void *)kmem_alloc3(&kernel_map, nbytes,
+                                          VM_SUBSYS_ZALLOC, KM_KRESERVE);
 
                /* note: z might be modified due to blocking */
                if (item != NULL) {
-                       zone_kern_pages += z->zalloc;   /* not MP-safe XXX */
+                       atomic_add_int(&zone_kern_pages, z->zalloc);
                        bzero(item, nbytes);
                } else {
                        nbytes = 0;
@@ -580,11 +583,12 @@ zget(vm_zone_t z)
                 */
                nbytes = (size_t)z->zalloc * PAGE_SIZE;
 
-               item = (void *)kmem_alloc3(&kernel_map, nbytes, 0);
+               item = (void *)kmem_alloc3(&kernel_map, nbytes,
+                                          VM_SUBSYS_ZALLOC, 0);
 
                /* note: z might be modified due to blocking */
                if (item != NULL) {
-                       zone_kern_pages += z->zalloc;   /* not MP-safe XXX */
+                       atomic_add_int(&zone_kern_pages, z->zalloc);
                        bzero(item, nbytes);
 
                        if (z->zflags & ZONE_DESTROYABLE) {
index ccad79e..2f9db99 100644 (file)
@@ -66,10 +66,9 @@ typedef struct vm_zone {
 
 
 void           zerror (int) __dead2;
-vm_zone_t      zinit (char *name, int size, int nentries, int flags,
-                          int zalloc);
+vm_zone_t      zinit (char *name, int size, int nentries, int flags);
 int            zinitna (vm_zone_t z, struct vm_object *obj, char *name,
-                            int size, int nentries, int flags, int zalloc);
+                            int size, int nentries, int flags);
 void *         zalloc (vm_zone_t z);
 void           zfree (vm_zone_t z, void *item);
 void           zbootinit (vm_zone_t z, char *name, int size, void *item,