From 7fa7744ba7130d1ba1a3fb602ee16882632e380e Mon Sep 17 00:00:00 2001 From: Hiten Pandya Date: Wed, 15 Oct 2003 16:48:04 +0000 Subject: [PATCH] Second contigmalloc() cleanup: * Move the contigmalloc/vm_contig_pg API into its own file, vm_contig.c. * Give contigmalloc1() a more sensible to reflect its purpose, contigmalloc_map(). --- sys/conf/files | 3 +- sys/kern/uipc_mbuf.c | 4 +- sys/vm/vm_contig.c | 406 +++++++++++++++++++++++++++++++++++++++++++ sys/vm/vm_kern.h | 4 +- sys/vm/vm_page.c | 316 +-------------------------------- 5 files changed, 413 insertions(+), 320 deletions(-) create mode 100644 sys/vm/vm_contig.c diff --git a/sys/conf/files b/sys/conf/files index 448f4c41cc..71e2413cc9 100644 --- a/sys/conf/files +++ b/sys/conf/files @@ -1,5 +1,5 @@ # $FreeBSD: src/sys/conf/files,v 1.340.2.137 2003/06/04 17:10:30 sam Exp $ -# $DragonFly: src/sys/conf/files,v 1.17 2003/10/13 18:01:23 dillon Exp $ +# $DragonFly: src/sys/conf/files,v 1.18 2003/10/15 16:48:03 hmp Exp $ # # The long compile-with and dependency lines are required because of # limitations in config: backslash-newline doesn't work in strings, and @@ -1303,6 +1303,7 @@ vm/default_pager.c standard vm/device_pager.c standard vm/phys_pager.c standard vm/swap_pager.c standard +vm/vm_contig.c standard vm/vm_fault.c standard vm/vm_glue.c standard vm/vm_init.c standard diff --git a/sys/kern/uipc_mbuf.c b/sys/kern/uipc_mbuf.c index 2a23006674..b2a900186d 100644 --- a/sys/kern/uipc_mbuf.c +++ b/sys/kern/uipc_mbuf.c @@ -32,7 +32,7 @@ * * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $ - * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.12 2003/08/26 21:09:02 rob Exp $ + * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.13 2003/10/15 16:48:03 hmp Exp $ */ #include "opt_param.h" @@ -354,7 +354,7 @@ m_clalloc(ncl, how) mbstat.m_wait++; p = 0; } else { - p = contigmalloc1(MCLBYTES * ncl, M_DEVBUF, M_WAITOK, 0ul, + p = contigmalloc_map(MCLBYTES * ncl, M_DEVBUF, M_WAITOK, 0ul, ~0ul, PAGE_SIZE, 0, mb_map); } #else diff --git a/sys/vm/vm_contig.c b/sys/vm/vm_contig.c new file mode 100644 index 0000000000..21adeb8708 --- /dev/null +++ b/sys/vm/vm_contig.c @@ -0,0 +1,406 @@ +/* + * Copyright (c) 2003 Hiten Pandya . + * All rights reserved. + * + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * The Mach Operating System project at Carnegie-Mellon University. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 + * $DragonFly: src/sys/vm/vm_contig.c,v 1.1 2003/10/15 16:48:04 hmp Exp $ + */ + +/* + * Copyright (c) 1987, 1990 Carnegie-Mellon University. + * All rights reserved. + * + * Authors: Avadis Tevanian, Jr., Michael Wayne Young + * + * Permission to use, copy, modify and distribute this software and + * its documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie the + * rights to redistribute these changes. + */ + +/* + * Contiguous memory allocation API. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * vm_contig_pg_clean: + * + * Do a thorough cleanup of the specified 'queue', which can be either + * PQ_ACTIVE or PQ_INACTIVE by doing a walkthrough. If the page is not + * marked dirty, it is shoved into the page cache, provided no one has + * currently aqcuired it, otherwise localized action per object type + * is taken for cleanup: + * + * In the OBJT_VNODE case, the whole page range is cleaned up + * using the vm_object_page_clean() routine, by specyfing a + * start and end of '0'. + * + * Otherwise if the object is of any other type, the generic + * pageout (daemon) flush routine is invoked. + */ +static int +vm_contig_pg_clean(int queue) +{ + vm_object_t object; + vm_page_t m, m_tmp, next; + + for (m = TAILQ_FIRST(&vm_page_queues[queue].pl); m != NULL; m = next) { + KASSERT(m->queue == queue, + ("vm_contig_clean: page %p's queue is not %d", m, queue)); + + next = TAILQ_NEXT(m, pageq); + + if (vm_page_sleep_busy(m, TRUE, "vpctw0")) + return (TRUE); + + vm_page_test_dirty(m); + if (m->dirty) { + object = m->object; + if (object->type == OBJT_VNODE) { + vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, + curthread); + vm_object_page_clean(object, 0, 0, OBJPC_SYNC); + VOP_UNLOCK(object->handle, 0, curthread); + return (TRUE); + } else if (object->type == OBJT_SWAP || + object->type == OBJT_DEFAULT) { + m_tmp = m; + vm_pageout_flush(&m_tmp, 1, 0); + return (TRUE); + } + } + + if ((m->dirty == 0) && (m->busy == 0) && (m->hold_count == 0)) + vm_page_cache(m); + } + + return (FALSE); +} + +/* + * vm_contig_pg_alloc: + * + * Allocate contiguous pages from the VM. This function does not + * map the allocated pages into the kernel map, otherwise it is + * impossible to make large allocations (i.e. >2G). + * + * Malloc()'s data structures have been used for collection of + * statistics and for allocations of less than a page. + * + */ +int +vm_contig_pg_alloc( + unsigned long size, + unsigned long low, + unsigned long high, + unsigned long alignment, + unsigned long boundary) +{ + int i, s, start, pass; + vm_offset_t phys; + vm_page_t pga = vm_page_array; + + size = round_page(size); + if (size == 0) + panic("vm_contig_pg_alloc: size must not be 0"); + if ((alignment & (alignment - 1)) != 0) + panic("vm_contig_pg_alloc: alignment must be a power of 2"); + if ((boundary & (boundary - 1)) != 0) + panic("vm_contig_pg_alloc: boundary must be a power of 2"); + + start = 0; + for (pass = 0; pass <= 1; pass++) { + s = splvm(); +again: + /* + * Find first page in array that is free, within range, aligned, and + * such that the boundary won't be crossed. + */ + for (i = start; i < vmstats.v_page_count; i++) { + int pqtype; + phys = VM_PAGE_TO_PHYS(&pga[i]); + pqtype = pga[i].queue - pga[i].pc; + if (((pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) && + (phys >= low) && (phys < high) && + ((phys & (alignment - 1)) == 0) && + (((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0)) + break; + } + + /* + * If we cannot find the page in the given range, or we have + * crossed the boundary, call the vm_contig_pg_clean() function + * for flushing out the queues, and returning it back to + * normal state. + */ + if ((i == vmstats.v_page_count) || + ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) { + +again1: + if (vm_contig_pg_clean(PQ_INACTIVE)) + goto again1; + if (vm_contig_pg_clean(PQ_ACTIVE)) + goto again1; + + splx(s); + continue; /* next pass */ + } + start = i; + + /* + * Check successive pages for contiguous and free. + */ + for (i = start + 1; i < (start + size / PAGE_SIZE); i++) { + int pqtype; + pqtype = pga[i].queue - pga[i].pc; + if ((VM_PAGE_TO_PHYS(&pga[i]) != + (VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) || + ((pqtype != PQ_FREE) && (pqtype != PQ_CACHE))) { + start++; + goto again; + } + } + + for (i = start; i < (start + size / PAGE_SIZE); i++) { + int pqtype; + vm_page_t m = &pga[i]; + + pqtype = m->queue - m->pc; + if (pqtype == PQ_CACHE) { + vm_page_busy(m); + vm_page_free(m); + } + vm_page_unqueue_nowakeup(m); + m->valid = VM_PAGE_BITS_ALL; + if (m->flags & PG_ZERO) + vm_page_zero_count--; + m->flags = 0; + KASSERT(m->dirty == 0, + ("vm_contig_pg_alloc: page %p was dirty", m)); + m->wire_count = 0; + m->busy = 0; + m->object = NULL; + } + + /* + * Our job is done, return the index page of vm_page_array. + */ + + splx(s); + return (start); /* aka &pga[start] */ + } + + /* + * Failed. + */ + splx(s); + return (-1); +} + +/* + * vm_contig_pg_free: + * + * Remove pages previously allocated by vm_contig_pg_alloc, and + * assume all references to the pages have been removed, and that + * it is OK to add them back to the free list. + */ +void +vm_contig_pg_free(int start, u_long size) +{ + vm_page_t pga = vm_page_array; + int i; + + size = round_page(size); + if (size == 0) + panic("vm_contig_pg_free: size must not be 0"); + + for (i = start; i < (start + size / PAGE_SIZE); i++) { + vm_page_free(&pga[i]); + } +} + +/* + * vm_contig_pg_kmap: + * + * Map previously allocated (vm_contig_pg_alloc) range of pages from + * vm_page_array[] into the KVA. Once mapped, the pages are part of + * the Kernel, and are to free'ed with kmem_free(kernel_map, addr, size). + */ +vm_offset_t +vm_contig_pg_kmap(int start, u_long size, vm_map_t map) +{ + vm_offset_t addr, tmp_addr; + vm_page_t pga = vm_page_array; + int i, s, count; + + size = round_page(size); + if (size == 0) + panic("vm_contig_pg_kmap: size must not be 0"); + + s = splvm(); /* XXX: is this really needed? */ + + /* + * We've found a contiguous chunk that meets our requirements. + * Allocate KVM, and assign phys pages and return a kernel VM + * pointer. + */ + count = vm_map_entry_reserve(MAP_RESERVE_COUNT); + vm_map_lock(map); + if (vm_map_findspace(map, vm_map_min(map), size, 1, &addr) != + KERN_SUCCESS) { + /* + * XXX We almost never run out of kernel virtual + * space, so we don't make the allocated memory + * above available. + */ + vm_map_unlock(map); + vm_map_entry_release(count); + splx(s); + return (0); + } + vm_object_reference(kernel_object); + vm_map_insert(map, &count, + kernel_object, addr - VM_MIN_KERNEL_ADDRESS, + addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0); + vm_map_unlock(map); + vm_map_entry_release(count); + + tmp_addr = addr; + for (i = start; i < (start + size / PAGE_SIZE); i++) { + vm_page_t m = &pga[i]; + vm_page_insert(m, kernel_object, + OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS)); + tmp_addr += PAGE_SIZE; + } + vm_map_wire(map, addr, addr + size, FALSE); + + splx(s); + return (addr); +} + +void * +contigmalloc( + unsigned long size, /* should be size_t here and for malloc() */ + struct malloc_type *type, + int flags, + unsigned long low, + unsigned long high, + unsigned long alignment, + unsigned long boundary) +{ + return contigmalloc_map(size, type, flags, low, high, alignment, + boundary, kernel_map); +} + +void * +contigmalloc_map( + unsigned long size, /* should be size_t here and for malloc() */ + struct malloc_type *type, + int flags, + unsigned long low, + unsigned long high, + unsigned long alignment, + unsigned long boundary, + vm_map_t map) +{ + int index; + void *rv; + + index = vm_contig_pg_alloc(size, low, high, alignment, boundary); + if (index < 0) { + printf("contigmalloc_map: failed in index < 0 case!"); + return NULL; + } + + rv = (void *) vm_contig_pg_kmap(index, size, map); + if (!rv) + vm_contig_pg_free(index, size); + + return rv; +} + +void +contigfree(void *addr, unsigned long size, struct malloc_type *type) +{ + kmem_free(kernel_map, (vm_offset_t)addr, size); +} + +vm_offset_t +vm_page_alloc_contig( + vm_offset_t size, + vm_offset_t low, + vm_offset_t high, + vm_offset_t alignment) +{ + return ((vm_offset_t)contigmalloc_map(size, M_DEVBUF, M_NOWAIT, low, + high, alignment, 0ul, kernel_map)); +} diff --git a/sys/vm/vm_kern.h b/sys/vm/vm_kern.h index 57108c6fbf..6ace1c82c4 100644 --- a/sys/vm/vm_kern.h +++ b/sys/vm/vm_kern.h @@ -62,7 +62,7 @@ * rights to redistribute these changes. * * $FreeBSD: src/sys/vm/vm_kern.h,v 1.22 2000/02/16 21:11:31 dillon Exp $ - * $DragonFly: src/sys/vm/vm_kern.h,v 1.4 2003/09/26 19:23:34 dillon Exp $ + * $DragonFly: src/sys/vm/vm_kern.h,v 1.5 2003/10/15 16:48:04 hmp Exp $ */ #ifndef _VM_VM_KERN_H_ @@ -83,7 +83,7 @@ extern u_int vm_kmem_size; extern vm_offset_t kernel_vm_end; /* XXX - elsewhere? */ struct malloc_type; -extern void *contigmalloc1(u_long, struct malloc_type *, int, u_long, u_long, +extern void *contigmalloc_map(u_long, struct malloc_type *, int, u_long, u_long, u_long, u_long, vm_map_t); #endif /* _VM_VM_KERN_H_ */ diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 7faf6021a0..5cdee09382 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -35,7 +35,7 @@ * * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91 * $FreeBSD: src/sys/vm/vm_page.c,v 1.147.2.18 2002/03/10 05:03:19 alc Exp $ - * $DragonFly: src/sys/vm/vm_page.c,v 1.13 2003/10/15 16:03:04 hmp Exp $ + * $DragonFly: src/sys/vm/vm_page.c,v 1.14 2003/10/15 16:48:04 hmp Exp $ */ /* @@ -1702,320 +1702,6 @@ vm_page_test_dirty(vm_page_t m) } } -/* - * vm_contig_pg_clean: - * - * Do a thorough cleanup of the specified 'queue', which can be either - * PQ_ACTIVE or PQ_INACTIVE by doing a walkthrough. If the page is not - * marked dirty, it is shoved into the page cache, provided no one has - * currently aqcuired it, otherwise localized action per object type - * is taken for cleanup: - * - * In the OBJT_VNODE case, the whole page range is cleaned up - * using the vm_object_page_clean() routine, by specyfing a - * start and end of '0'. - * - * Otherwise if the object is of any other type, the generic - * pageout (daemon) flush routine is invoked. - */ -static int -vm_contig_pg_clean(int queue) -{ - vm_object_t object; - vm_page_t m, m_tmp, next; - - for (m = TAILQ_FIRST(&vm_page_queues[queue].pl); m != NULL; m = next) { - KASSERT(m->queue == queue, - ("vm_contig_clean: page %p's queue is not %d", m, queue)); - - next = TAILQ_NEXT(m, pageq); - - if (vm_page_sleep_busy(m, TRUE, "vpctw0")) - return (TRUE); - - vm_page_test_dirty(m); - if (m->dirty) { - object = m->object; - if (object->type == OBJT_VNODE) { - vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, - curthread); - vm_object_page_clean(object, 0, 0, OBJPC_SYNC); - VOP_UNLOCK(object->handle, 0, curthread); - return (TRUE); - } else if (object->type == OBJT_SWAP || - object->type == OBJT_DEFAULT) { - m_tmp = m; - vm_pageout_flush(&m_tmp, 1, 0); - return (TRUE); - } - } - - if ((m->dirty == 0) && (m->busy == 0) && (m->hold_count == 0)) - vm_page_cache(m); - } - - return (FALSE); -} - -/* - * vm_contig_pg_alloc: - * - * Allocate contiguous pages from the VM. This function does not - * map the allocated pages into the kernel map, otherwise it is - * impossible to make large allocations (i.e. >2G). - * - * Malloc()'s data structures have been used for collection of - * statistics and for allocations of less than a page. - * - */ -int -vm_contig_pg_alloc( - unsigned long size, - unsigned long low, - unsigned long high, - unsigned long alignment, - unsigned long boundary) -{ - int i, s, start, pass; - vm_offset_t phys; - vm_page_t pga = vm_page_array; - - size = round_page(size); - if (size == 0) - panic("vm_contig_pg_alloc: size must not be 0"); - if ((alignment & (alignment - 1)) != 0) - panic("vm_contig_pg_alloc: alignment must be a power of 2"); - if ((boundary & (boundary - 1)) != 0) - panic("vm_contig_pg_alloc: boundary must be a power of 2"); - - start = 0; - for (pass = 0; pass <= 1; pass++) { - s = splvm(); -again: - /* - * Find first page in array that is free, within range, aligned, and - * such that the boundary won't be crossed. - */ - for (i = start; i < vmstats.v_page_count; i++) { - int pqtype; - phys = VM_PAGE_TO_PHYS(&pga[i]); - pqtype = pga[i].queue - pga[i].pc; - if (((pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) && - (phys >= low) && (phys < high) && - ((phys & (alignment - 1)) == 0) && - (((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0)) - break; - } - - /* - * If we cannot find the page in the given range, or we have - * crossed the boundary, call the vm_contig_pg_clean() function - * for flushing out the queues, and returning it back to - * normal state. - */ - if ((i == vmstats.v_page_count) || - ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) { - -again1: - if (vm_contig_pg_clean(PQ_INACTIVE)) - goto again1; - if (vm_contig_pg_clean(PQ_ACTIVE)) - goto again1; - - splx(s); - continue; /* next pass */ - } - start = i; - - /* - * Check successive pages for contiguous and free. - */ - for (i = start + 1; i < (start + size / PAGE_SIZE); i++) { - int pqtype; - pqtype = pga[i].queue - pga[i].pc; - if ((VM_PAGE_TO_PHYS(&pga[i]) != - (VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) || - ((pqtype != PQ_FREE) && (pqtype != PQ_CACHE))) { - start++; - goto again; - } - } - - for (i = start; i < (start + size / PAGE_SIZE); i++) { - int pqtype; - vm_page_t m = &pga[i]; - - pqtype = m->queue - m->pc; - if (pqtype == PQ_CACHE) { - vm_page_busy(m); - vm_page_free(m); - } - vm_page_unqueue_nowakeup(m); - m->valid = VM_PAGE_BITS_ALL; - if (m->flags & PG_ZERO) - vm_page_zero_count--; - m->flags = 0; - KASSERT(m->dirty == 0, - ("vm_contig_pg_alloc: page %p was dirty", m)); - m->wire_count = 0; - m->busy = 0; - m->object = NULL; - } - - /* - * Our job is done, return the index page of vm_page_array. - */ - - splx(s); - return (start); /* aka &pga[start] */ - } - - /* - * Failed. - */ - splx(s); - return (-1); -} - -/* - * vm_contig_pg_free: - * - * Remove pages previously allocated by vm_contig_pg_alloc, and - * assume all references to the pages have been removed, and that - * it is OK to add them back to the free list. - */ -void -vm_contig_pg_free(int start, u_long size) -{ - vm_page_t pga = vm_page_array; - int i; - - size = round_page(size); - if (size == 0) - panic("vm_contig_pg_free: size must not be 0"); - - for (i = start; i < (start + size / PAGE_SIZE); i++) { - vm_page_free(&pga[i]); - } -} - -/* - * vm_contig_pg_kmap: - * - * Map previously allocated (vm_contig_pg_alloc) range of pages from - * vm_page_array[] into the KVA. Once mapped, the pages are part of - * the Kernel, and are to free'ed with kmem_free(kernel_map, addr, size). - */ -vm_offset_t -vm_contig_pg_kmap(int start, u_long size, vm_map_t map) -{ - vm_offset_t addr, tmp_addr; - vm_page_t pga = vm_page_array; - int i, s, count; - - size = round_page(size); - if (size == 0) - panic("vm_contig_pg_kmap: size must not be 0"); - - s = splvm(); /* XXX: is this really needed? */ - - /* - * We've found a contiguous chunk that meets our requirements. - * Allocate KVM, and assign phys pages and return a kernel VM - * pointer. - */ - count = vm_map_entry_reserve(MAP_RESERVE_COUNT); - vm_map_lock(map); - if (vm_map_findspace(map, vm_map_min(map), size, 1, &addr) != - KERN_SUCCESS) { - /* - * XXX We almost never run out of kernel virtual - * space, so we don't make the allocated memory - * above available. - */ - vm_map_unlock(map); - vm_map_entry_release(count); - splx(s); - return (0); - } - vm_object_reference(kernel_object); - vm_map_insert(map, &count, - kernel_object, addr - VM_MIN_KERNEL_ADDRESS, - addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0); - vm_map_unlock(map); - vm_map_entry_release(count); - - tmp_addr = addr; - for (i = start; i < (start + size / PAGE_SIZE); i++) { - vm_page_t m = &pga[i]; - vm_page_insert(m, kernel_object, - OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS)); - tmp_addr += PAGE_SIZE; - } - vm_map_wire(map, addr, addr + size, FALSE); - - splx(s); - return (addr); -} - -void * -contigmalloc( - unsigned long size, /* should be size_t here and for malloc() */ - struct malloc_type *type, - int flags, - unsigned long low, - unsigned long high, - unsigned long alignment, - unsigned long boundary) -{ - return contigmalloc1(size, type, flags, low, high, alignment, boundary, - kernel_map); -} - -void * -contigmalloc1( - unsigned long size, /* should be size_t here and for malloc() */ - struct malloc_type *type, - int flags, - unsigned long low, - unsigned long high, - unsigned long alignment, - unsigned long boundary, - vm_map_t map) -{ - int index; - void *rv; - - index = vm_contig_pg_alloc(size, low, high, alignment, boundary); - if (index < 0) { - printf("Contigmalloc1 failed in index < 0 case!"); - return NULL; - } - - rv = (void *) vm_contig_pg_kmap(index, size, map); - if (!rv) - vm_contig_pg_free(index, size); - - return rv; -} - -void -contigfree(void *addr, unsigned long size, struct malloc_type *type) -{ - kmem_free(kernel_map, (vm_offset_t)addr, size); -} - -vm_offset_t -vm_page_alloc_contig( - vm_offset_t size, - vm_offset_t low, - vm_offset_t high, - vm_offset_t alignment) -{ - return ((vm_offset_t)contigmalloc1(size, M_DEVBUF, M_NOWAIT, low, high, - alignment, 0ul, kernel_map)); -} - #include "opt_ddb.h" #ifdef DDB #include -- 2.41.0