kernel - pmap (i386) - Reduce kmem use for foreign pmap mapping
authorMatthew Dillon <dillon@apollo.backplane.com>
Thu, 4 Mar 2010 01:53:37 +0000 (17:53 -0800)
committerMatthew Dillon <dillon@apollo.backplane.com>
Thu, 4 Mar 2010 01:53:37 +0000 (17:53 -0800)
* We've been having problems running out of KVA on i386 systems due to
  numerous reasons.  KVA use by the kernel is just too tight.

* Reserve space for foreign pmap page table mappings on a cpu-by-cpu basis
  instead of for SMP_MAXCPU.  This reduces KVM use from 68MB to (ncpu*4MB).
  Use the APT entry for cpu0 and use kmem_alloc_nofault() for the APs.

  This frees up 52MB of KVA which doesn't sound like a lot but actually
  is.

* Add an alignment argument to kmem_alloc_nofault() and vm_map_find().

* vm_map_findspace() already had an alignment argument, but adjust the
  value passed to be at least PAGE_SIZE (this has no operational effect
  but is more correct).

25 files changed:
sys/bus/pci/i386/pci_cfgreg.c
sys/bus/pci/x86_64/pci_cfgreg.c
sys/emulation/linux/i386/imgact_linux.c
sys/emulation/linux/linux_misc.c
sys/kern/imgact_gzip.c
sys/kern/init_main.c
sys/kern/kern_msfbuf.c
sys/kern/kern_sfbuf.c
sys/kern/link_elf.c
sys/kern/link_elf_obj.c
sys/kern/sys_pipe.c
sys/kern/sys_process.c
sys/kern/sysv_shm.c
sys/platform/pc32/i386/mp_machdep.c
sys/platform/pc32/i386/pmap.c
sys/platform/pc32/include/pmap.h
sys/platform/pc64/x86_64/pmap.c
sys/platform/vkernel/platform/pmap.c
sys/vm/vm_contig.c
sys/vm/vm_extern.h
sys/vm/vm_kern.c
sys/vm/vm_map.c
sys/vm/vm_map.h
sys/vm/vm_mmap.c
sys/vm/vm_unix.c

index 9eb9da1..94aad48 100644 (file)
@@ -554,7 +554,8 @@ pcie_cfgregopen(uint64_t base, uint8_t minbus, uint8_t maxbus)
                if (pcie_array == NULL)
                        return (0);
 
-               va = kmem_alloc_nofault(&kernel_map, PCIE_CACHE * PAGE_SIZE);
+               va = kmem_alloc_nofault(&kernel_map, PCIE_CACHE * PAGE_SIZE,
+                                       PAGE_SIZE);
                if (va == 0) {
                        kfree(pcie_array, M_DEVBUF);
                        return (0);
index cdc0419..77d24c5 100644 (file)
@@ -564,7 +564,8 @@ pcie_cfgregopen(uint64_t base, uint8_t minbus, uint8_t maxbus)
                if (pcie_array == NULL)
                        return (0);
 
-               va = kmem_alloc_nofault(&kernel_map, PCIE_CACHE * PAGE_SIZE);
+               va = kmem_alloc_nofault(&kernel_map, PCIE_CACHE * PAGE_SIZE,
+                                       PAGE_SIZE);
                if (va == 0) {
                        kfree(pcie_array, M_DEVBUF);
                        return (0);
index 6e9195a..df908db 100644 (file)
@@ -128,8 +128,9 @@ exec_linux_imgact(struct image_params *imgp)
         */
        vmaddr = virtual_offset;
        error = vm_map_find(&vmspace->vm_map, NULL, 0, &vmaddr,
-                           a_out->a_text + a_out->a_data + bss_size, FALSE,
-                           VM_MAPTYPE_NORMAL,
+                           a_out->a_text + a_out->a_data + bss_size,
+                           PAGE_SIZE,
+                           FALSE, VM_MAPTYPE_NORMAL,
                            VM_PROT_ALL, VM_PROT_ALL, 0);
        if (error)
            return error;
@@ -199,8 +200,8 @@ exec_linux_imgact(struct image_params *imgp)
        if (bss_size != 0) {
            vmaddr = virtual_offset + a_out->a_text + a_out->a_data;
            error = vm_map_find(&vmspace->vm_map, NULL, 0, &vmaddr, 
-                               bss_size, FALSE,
-                               VM_MAPTYPE_NORMAL,
+                               bss_size, PAGE_SIZE,
+                               FALSE, VM_MAPTYPE_NORMAL,
                                VM_PROT_ALL, VM_PROT_ALL,
                                0);
            if (error)
index 5c34294..799abc3 100644 (file)
@@ -405,8 +405,8 @@ sys_linux_uselib(struct linux_uselib_args *args)
                /* get anon user mapping, read+write+execute */
                error = vm_map_find(&p->p_vmspace->vm_map, NULL, 0,
                                    &vmaddr, a_out->a_text + a_out->a_data,
-                                   FALSE,
-                                   VM_MAPTYPE_NORMAL,
+                                   PAGE_SIZE,
+                                   FALSE, VM_MAPTYPE_NORMAL,
                                    VM_PROT_ALL, VM_PROT_ALL,
                                    0);
                if (error)
@@ -462,8 +462,8 @@ sys_linux_uselib(struct linux_uselib_args *args)
                /* allocate some 'anon' space */
                error = vm_map_find(&p->p_vmspace->vm_map, NULL, 0,
                                    &vmaddr, bss_size,
-                                   FALSE,
-                                   VM_MAPTYPE_NORMAL,
+                                   PAGE_SIZE,
+                                   FALSE, VM_MAPTYPE_NORMAL,
                                    VM_PROT_ALL, VM_PROT_ALL,
                                    0);
                if (error)
index b4ecf84..17625ab 100644 (file)
@@ -248,9 +248,8 @@ do_aout_hdr(struct imgact_gzip * gz)
                        gz->a_out.a_data;
                error = vm_map_find(&vmspace->vm_map,
                                    NULL, 0,
-                                   &vmaddr, gz->bss_size,
-                                   FALSE,
-                                   VM_MAPTYPE_NORMAL,
+                                   &vmaddr, gz->bss_size, PAGE_SIZE,
+                                   FALSE, VM_MAPTYPE_NORMAL,
                                    VM_PROT_ALL, VM_PROT_ALL,
                                    0);
                if (error) {
index 569bde2..52b864f 100644 (file)
@@ -528,9 +528,9 @@ start_init(void *dummy, struct trapframe *frame)
         * Need just enough stack to hold the faked-up "execve()" arguments.
         */
        addr = trunc_page(USRSTACK - PAGE_SIZE);
-       error = vm_map_find(&p->p_vmspace->vm_map, NULL, 0, &addr, PAGE_SIZE,
-                           FALSE, 
-                           VM_MAPTYPE_NORMAL,
+       error = vm_map_find(&p->p_vmspace->vm_map, NULL, 0, &addr,
+                           PAGE_SIZE, PAGE_SIZE,
+                           FALSE, VM_MAPTYPE_NORMAL,
                            VM_PROT_ALL, VM_PROT_ALL,
                            0);
        if (error)
index 4f26358..ac07d61 100644 (file)
@@ -119,7 +119,8 @@ msf_buf_init(void *__dummy)
        TAILQ_INIT(&msf_buf_freelist);
 
        msf_base = kmem_alloc_nofault(&kernel_map,
-                                     msf_buf_count * XIO_INTERNAL_SIZE);
+                                     msf_buf_count * XIO_INTERNAL_SIZE,
+                                     PAGE_SIZE);
 
        msf_bufs = kmalloc(msf_buf_count * sizeof(struct msf_buf), M_MSFBUF,
                        M_WAITOK|M_ZERO);
index 487a091..8e439d5 100644 (file)
@@ -88,7 +88,8 @@ sf_buf_init(void *arg)
 
        sf_buf_hashtable = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
        TAILQ_INIT(&sf_buf_freelist);
-       sf_base = kmem_alloc_nofault(&kernel_map, nsfbufs * PAGE_SIZE);
+       sf_base = kmem_alloc_nofault(&kernel_map, nsfbufs * PAGE_SIZE,
+                                   PAGE_SIZE);
        sf_bufs = kmalloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
                            M_WAITOK | M_ZERO);
        for (i = 0; i < nsfbufs; i++) {
index 5dce43b..9029368 100644 (file)
@@ -561,9 +561,9 @@ link_elf_load_file(const char* filename, linker_file_t* result)
     vm_object_reference(ef->object);
     ef->address = (caddr_t)vm_map_min(&kernel_map);
     error = vm_map_find(&kernel_map, ef->object, 0,
-                       (vm_offset_t *)&ef->address, mapsize,
-                       1,
-                       VM_MAPTYPE_NORMAL,
+                       (vm_offset_t *)&ef->address,
+                       mapsize, PAGE_SIZE,
+                       1, VM_MAPTYPE_NORMAL,
                        VM_PROT_ALL, VM_PROT_ALL,
                        0);
     if (error) {
index 81d99ee..2d6da67 100644 (file)
@@ -662,7 +662,8 @@ link_elf_obj_load_file(const char *filename, linker_file_t * result)
         */
        mapbase = KERNBASE;
        error = vm_map_find(&kernel_map, ef->object, 0, &mapbase,
-                           round_page(mapsize), TRUE, VM_MAPTYPE_NORMAL,
+                           round_page(mapsize), PAGE_SIZE,
+                           TRUE, VM_MAPTYPE_NORMAL,
                            VM_PROT_ALL, VM_PROT_ALL, FALSE);
        if (error) {
                vm_object_deallocate(ef->object);
index 9203d13..13c0c69 100644 (file)
@@ -353,9 +353,9 @@ pipespace(struct pipe *cpipe, int size)
                buffer = (caddr_t)vm_map_min(&kernel_map);
 
                error = vm_map_find(&kernel_map, object, 0,
-                                   (vm_offset_t *)&buffer, size,
-                                   1,
-                                   VM_MAPTYPE_NORMAL,
+                                   (vm_offset_t *)&buffer,
+                                   size, PAGE_SIZE,
+                                   1, VM_MAPTYPE_NORMAL,
                                    VM_PROT_ALL, VM_PROT_ALL,
                                    0);
 
index 7f74d31..10ba639 100644 (file)
@@ -87,9 +87,9 @@ pread (struct proc *procp, unsigned int addr, unsigned int *retval) {
 
        /* Find space in kernel_map for the page we're interested in */
        rv = vm_map_find (&kernel_map, object, IDX_TO_OFF(pindex),
-                         &kva, PAGE_SIZE, 
-                         0, 
-                         VM_MAPTYPE_NORMAL,
+                         &kva,
+                         PAGE_SIZE, PAGE_SIZE,
+                         0, VM_MAPTYPE_NORMAL,
                          VM_PROT_ALL, VM_PROT_ALL,
                          0);
 
@@ -177,9 +177,9 @@ pwrite (struct proc *procp, unsigned int addr, unsigned int datum) {
 
        /* Find space in kernel_map for the page we're interested in */
        rv = vm_map_find (&kernel_map, object, IDX_TO_OFF(pindex),
-                         &kva, PAGE_SIZE,
-                         0,
-                         VM_MAPTYPE_NORMAL,
+                         &kva,
+                         PAGE_SIZE, PAGE_SIZE,
+                         0, VM_MAPTYPE_NORMAL,
                          VM_PROT_ALL, VM_PROT_ALL,
                          0);
        if (!rv) {
index a462399..8aff6e8 100644 (file)
@@ -329,7 +329,8 @@ again:
        vm_object_reference(shm_handle->shm_object);
        rv = vm_map_find(&p->p_vmspace->vm_map, 
                         shm_handle->shm_object, 0,
-                        &attach_va, size,
+                        &attach_va,
+                        size, PAGE_SIZE,
                         ((flags & MAP_FIXED) ? 0 : 1), 
                         VM_MAPTYPE_NORMAL,
                         prot, prot,
index f0b1501..9078ecd 100644 (file)
@@ -2278,12 +2278,18 @@ start_all_aps(u_int boot_addr)
                gd->gd_CMAP2 = &SMPpt[pg + 1];
                gd->gd_CMAP3 = &SMPpt[pg + 2];
                gd->gd_PMAP1 = &SMPpt[pg + 3];
-               gd->gd_GDMAP1 = &PTD[KGDTDI+x];
                gd->gd_CADDR1 = ps->CPAGE1;
                gd->gd_CADDR2 = ps->CPAGE2;
                gd->gd_CADDR3 = ps->CPAGE3;
                gd->gd_PADDR1 = (unsigned *)ps->PPAGE1;
-               gd->gd_GDADDR1= (unsigned *)VADDR(KGDTDI+x, 0);
+
+               /*
+                * Per-cpu pmap for get_ptbase().
+                */
+               gd->gd_GDADDR1= (unsigned *)
+                       kmem_alloc_nofault(&kernel_map, SEG_SIZE, SEG_SIZE);
+               gd->gd_GDMAP1 = &PTD[(vm_offset_t)gd->gd_GDADDR1 >> PDRSHIFT];
+
                gd->mi.gd_ipiq = (void *)kmem_alloc(&kernel_map, sizeof(lwkt_ipiq) * (mp_naps + 1));
                bzero(gd->mi.gd_ipiq, sizeof(lwkt_ipiq) * (mp_naps + 1));
 
@@ -2351,7 +2357,6 @@ start_all_aps(u_int boot_addr)
        return ncpus - 1;
 }
 
-
 /*
  * load the 1st level AP boot code into base memory.
  */
index 0b483e9..d8927fb 100644 (file)
@@ -474,12 +474,12 @@ pmap_bootstrap(vm_paddr_t firstaddr, vm_paddr_t loadaddr)
        gd->gd_CMAP2 = &SMPpt[pg + 1];
        gd->gd_CMAP3 = &SMPpt[pg + 2];
        gd->gd_PMAP1 = &SMPpt[pg + 3];
-       gd->gd_GDMAP1 = &PTD[KGDTDI];
+       gd->gd_GDMAP1 = &PTD[APTDPTDI];
        gd->gd_CADDR1 = CPU_prvspace[0].CPAGE1;
        gd->gd_CADDR2 = CPU_prvspace[0].CPAGE2;
        gd->gd_CADDR3 = CPU_prvspace[0].CPAGE3;
        gd->gd_PADDR1 = (unsigned *)CPU_prvspace[0].PPAGE1;
-       gd->gd_GDADDR1= (unsigned *)VADDR(KGDTDI, 0);
+       gd->gd_GDADDR1= (unsigned *)VADDR(APTDPTDI, 0);
 
        cpu_invltlb();
 }
@@ -1282,7 +1282,7 @@ pmap_release_free_page(struct pmap *pmap, vm_page_t p)
         */
        if (p->pindex == PTDPTDI) {
                bzero(pde + KPTDI, nkpt * PTESIZE);
-               bzero(pde + KGDTDI, (NPDEPG - KGDTDI) * PTESIZE);
+               bzero(pde + MPPTDI, (NPDEPG - MPPTDI) * PTESIZE);
                vm_page_flag_set(p, PG_ZERO);
                vm_page_wakeup(p);
        } else {
@@ -3291,7 +3291,7 @@ pmap_mapdev(vm_paddr_t pa, vm_size_t size)
        offset = pa & PAGE_MASK;
        size = roundup(offset + size, PAGE_SIZE);
 
-       va = kmem_alloc_nofault(&kernel_map, size);
+       va = kmem_alloc_nofault(&kernel_map, size, PAGE_SIZE);
        if (!va)
                panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
 
index 7973fcd..fdf6385 100644 (file)
 #define        NKPT            30                      /* starting general kptds */
 #endif
 
-#define NKGDPDE                SMP_MAXCPU              /* 16 typical */
-
 #ifndef NKPDE
-#define NKPDE  (KVA_PAGES - NKGDPDE - 2)       /* max general kptds */
+#define NKPDE  (KVA_PAGES - 2) /* max general kptds */
 #endif
-#if NKPDE > KVA_PAGES - NKGDPDE - 2
-#error "Maximum NKPDE is KVA_PAGES - NKGDPDE - 2"
+#if NKPDE > KVA_PAGES - 2
+#error "Maximum NKPDE is KVA_PAGES - 2"
 #endif
 
 /*
  *               of special PTDs.
  *
  *     +---------------+ End of kernel memory
- *     |   APTDPTDI    | currently unused alt page table map
+ *     |   APTDPTDI    | alt page table map for cpu 0
  *     +---------------+
  *     |    MPPTDI     | globaldata array
  *     +---------------+
  *     |               |
- *     |               | per-cpu page table self-maps
- *     |KGDTDI[NKGDPDE]|
- *     +---------------+
- *     |               |
  *     |               |
  *     |               |
  *     |               | general kernel page table pages
  * directory itself and any indexes >= KPTDI will correspond to the
  * common kernel page directory pages since all pmaps map the same ones.
  *
- * We no longer use APTmap or APTDpde (corresponding to APTDPTDI).  This
- * was a global page table map for accessing pmaps other then the current
- * pmap.  Instead we now implement an alternative pmap for EACH cpu
- * use the ptds at KGDTDI.
+ * APTmap / APTDpde are now used by cpu 0 as its alternative page table
+ * mapping via gd_GDMAP1 and GD_GDADDR1.  The remaining cpus allocate
+ * their own dynamically.
  *
  * Even though the maps are per-cpu the PTD entries are stored in the
  * individual pmaps and obviously not replicated so each process pmap
  */
 #define        APTDPTDI        (NPDEPG-1)      /* alt ptd entry that points to APTD */
 #define MPPTDI         (APTDPTDI-1)    /* globaldata array ptd entry */
-#define KGDTDI         (MPPTDI-NKGDPDE) /* per-cpu page table mappings */
-#define        KPTDI           (KGDTDI-NKPDE)  /* start of kernel virtual pde's */
+#define        KPTDI           (MPPTDI-NKPDE)  /* start of kernel virtual pde's */
 #define        PTDPTDI         (KPTDI-1)       /* ptd entry that points to ptd! */
 #define        UMAXPTDI        (PTDPTDI-1)     /* ptd entry for user space end */
 
index 6a24eb1..cd343db 100644 (file)
@@ -3597,7 +3597,7 @@ pmap_mapdev(vm_paddr_t pa, vm_size_t size)
        offset = pa & PAGE_MASK;
        size = roundup(offset + size, PAGE_SIZE);
 
-       va = kmem_alloc_nofault(&kernel_map, size);
+       va = kmem_alloc_nofault(&kernel_map, size, PAGE_SIZE);
        if (va == 0)
                panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
 
@@ -3624,7 +3624,7 @@ pmap_mapdev_uncacheable(vm_paddr_t pa, vm_size_t size)
        offset = pa & PAGE_MASK;
        size = roundup(offset + size, PAGE_SIZE);
 
-       va = kmem_alloc_nofault(&kernel_map, size);
+       va = kmem_alloc_nofault(&kernel_map, size, PAGE_SIZE);
        if (va == 0)
                panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
 
index 0c8594f..1ef6682 100644 (file)
@@ -2843,7 +2843,7 @@ pmap_mapdev(vm_paddr_t pa, vm_size_t size)
        offset = pa & PAGE_MASK;
        size = roundup(offset + size, PAGE_SIZE);
 
-       va = kmem_alloc_nofault(&kernel_map, size);
+       va = kmem_alloc_nofault(&kernel_map, size, PAGE_SIZE);
        if (!va)
                panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
 
index 4de57af..056dce9 100644 (file)
@@ -403,7 +403,7 @@ vm_contig_pg_kmap(int start, u_long size, vm_map_t map, int flags)
         */
        count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
        vm_map_lock(map);
-       if (vm_map_findspace(map, vm_map_min(map), size, 1, 0, &addr) !=
+       if (vm_map_findspace(map, vm_map_min(map), size, PAGE_SIZE, 0, &addr) !=
            KERN_SUCCESS) {
                /*
                 * XXX We almost never run out of kernel virtual
index 5bcb989..1ea62be 100644 (file)
@@ -76,7 +76,7 @@ int swapon (struct proc *, void *, int *);
 int grow (struct proc *, size_t);
 int kernacc(c_caddr_t, int, int);
 vm_offset_t kmem_alloc3 (vm_map_t, vm_size_t, int flags);
-vm_offset_t kmem_alloc_nofault (vm_map_t, vm_size_t);
+vm_offset_t kmem_alloc_nofault (vm_map_t, vm_size_t, vm_size_t);
 vm_offset_t kmem_alloc_pageable (vm_map_t, vm_size_t);
 vm_offset_t kmem_alloc_wait (vm_map_t, vm_size_t);
 void kmem_free (vm_map_t, vm_offset_t, vm_size_t);
index 792eef5..817861c 100644 (file)
@@ -106,9 +106,8 @@ kmem_alloc_pageable(vm_map_t map, vm_size_t size)
        size = round_page(size);
        addr = vm_map_min(map);
        result = vm_map_find(map, NULL, (vm_offset_t) 0,
-                            &addr, size,
-                            TRUE, 
-                            VM_MAPTYPE_NORMAL,
+                            &addr, size, PAGE_SIZE,
+                            TRUE, VM_MAPTYPE_NORMAL,
                             VM_PROT_ALL, VM_PROT_ALL,
                             0);
        if (result != KERN_SUCCESS) {
@@ -123,7 +122,7 @@ kmem_alloc_pageable(vm_map_t map, vm_size_t size)
  *     Same as kmem_alloc_pageable, except that it create a nofault entry.
  */
 vm_offset_t
-kmem_alloc_nofault(vm_map_t map, vm_size_t size)
+kmem_alloc_nofault(vm_map_t map, vm_size_t size, vm_size_t align)
 {
        vm_offset_t addr;
        int result;
@@ -131,9 +130,8 @@ kmem_alloc_nofault(vm_map_t map, vm_size_t size)
        size = round_page(size);
        addr = vm_map_min(map);
        result = vm_map_find(map, NULL, (vm_offset_t) 0,
-                            &addr, size,
-                            TRUE,
-                            VM_MAPTYPE_NORMAL,
+                            &addr, size, align,
+                            TRUE, VM_MAPTYPE_NORMAL,
                             VM_PROT_ALL, VM_PROT_ALL,
                             MAP_NOFAULT);
        if (result != KERN_SUCCESS) {
@@ -169,7 +167,7 @@ kmem_alloc3(vm_map_t map, vm_size_t size, int kmflags)
         * offset within the kernel map.
         */
        vm_map_lock(map);
-       if (vm_map_findspace(map, vm_map_min(map), size, 1, 0, &addr)) {
+       if (vm_map_findspace(map, vm_map_min(map), size, PAGE_SIZE, 0, &addr)) {
                vm_map_unlock(map);
                if (kmflags & KM_KRESERVE)
                        vm_map_entry_krelease(count);
@@ -266,9 +264,8 @@ kmem_suballoc(vm_map_t parent, vm_map_t result,
 
        *min = (vm_offset_t) vm_map_min(parent);
        ret = vm_map_find(parent, NULL, (vm_offset_t) 0,
-                         min, size,
-                         TRUE,
-                         VM_MAPTYPE_UNSPECIFIED,
+                         min, size, PAGE_SIZE,
+                         TRUE, VM_MAPTYPE_UNSPECIFIED,
                          VM_PROT_ALL, VM_PROT_ALL,
                          0);
        if (ret != KERN_SUCCESS) {
@@ -307,8 +304,8 @@ kmem_alloc_wait(vm_map_t map, vm_size_t size)
                 * to lock out sleepers/wakers.
                 */
                vm_map_lock(map);
-               if (vm_map_findspace(map, vm_map_min(map), size,
-                                    1, 0, &addr) == 0) {
+               if (vm_map_findspace(map, vm_map_min(map),
+                                    size, PAGE_SIZE, 0, &addr) == 0) {
                        break;
                }
                /* no space now; see if we can ever get space */
index c7e13d9..f1290f8 100644 (file)
@@ -989,7 +989,7 @@ vm_map_insert(vm_map_t map, int *countp,
  */
 int
 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length,
-                vm_offset_t align, int flags, vm_offset_t *addr)
+                vm_size_t align, int flags, vm_offset_t *addr)
 {
        vm_map_entry_t entry, next;
        vm_offset_t end;
@@ -1100,7 +1100,7 @@ retry:
  */
 int
 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
-           vm_offset_t *addr,  vm_size_t length,
+           vm_offset_t *addr,  vm_size_t length, vm_size_t align,
            boolean_t fitit,
            vm_maptype_t maptype,
            vm_prot_t prot, vm_prot_t max,
@@ -1115,7 +1115,7 @@ vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
        count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
        vm_map_lock(map);
        if (fitit) {
-               if (vm_map_findspace(map, start, length, 1, 0, addr)) {
+               if (vm_map_findspace(map, start, length, align, 0, addr)) {
                        vm_map_unlock(map);
                        vm_map_entry_release(count);
                        return (KERN_NO_SPACE);
index e33e98c..4188233 100644 (file)
@@ -440,12 +440,11 @@ void vm_map_entry_krelease(int);
 vm_map_t vm_map_create (vm_map_t, struct pmap *, vm_offset_t, vm_offset_t);
 int vm_map_delete (vm_map_t, vm_offset_t, vm_offset_t, int *);
 int vm_map_find (vm_map_t, vm_object_t, vm_ooffset_t,
-                vm_offset_t *, vm_size_t, 
-                boolean_t, 
-                vm_maptype_t,
+                vm_offset_t *, vm_size_t, vm_size_t,
+                boolean_t, vm_maptype_t,
                 vm_prot_t, vm_prot_t, 
                 int);
-int vm_map_findspace (vm_map_t, vm_offset_t, vm_size_t, vm_offset_t,
+int vm_map_findspace (vm_map_t, vm_offset_t, vm_size_t, vm_size_t,
                      int, vm_offset_t *);
 int vm_map_inherit (vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t);
 void vm_map_init (struct vm_map *, vm_offset_t, vm_offset_t, pmap_t);
index 15e25c8..fd05e4d 100644 (file)
@@ -1257,11 +1257,13 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot,
                rv = vm_map_stack(map, *addr, size, flags,
                                  prot, maxprot, docow);
        } else if (flags & MAP_VPAGETABLE) {
-               rv = vm_map_find(map, object, foff, addr, size, fitit,
-                                VM_MAPTYPE_VPAGETABLE, prot, maxprot, docow);
+               rv = vm_map_find(map, object, foff, addr, size, PAGE_SIZE,
+                                fitit, VM_MAPTYPE_VPAGETABLE,
+                                prot, maxprot, docow);
        } else {
-               rv = vm_map_find(map, object, foff, addr, size, fitit,
-                                VM_MAPTYPE_NORMAL, prot, maxprot, docow);
+               rv = vm_map_find(map, object, foff, addr, size, PAGE_SIZE,
+                                fitit, VM_MAPTYPE_NORMAL,
+                                prot, maxprot, docow);
        }
 
        if (rv != KERN_SUCCESS) {
index b9745a1..808b4a5 100644 (file)
@@ -111,10 +111,9 @@ sys_obreak(struct obreak_args *uap)
                        error = ENOMEM;
                        goto done;
                }
-               rv = vm_map_find(&vm->vm_map, NULL, 0,
-                                &old, diff,
-                                FALSE,
-                                VM_MAPTYPE_NORMAL,
+               rv = vm_map_find(&vm->vm_map, NULL, 0, &old,
+                                diff, PAGE_SIZE,
+                                FALSE, VM_MAPTYPE_NORMAL,
                                 VM_PROT_ALL, VM_PROT_ALL,
                                 0);
                if (rv != KERN_SUCCESS) {