2 * Copyright (c) 2003 Hiten Pandya <hmp@backplane.com>.
5 * Copyright (c) 1991 Regents of the University of California.
8 * This code is derived from software contributed to Berkeley by
9 * The Mach Operating System project at Carnegie-Mellon University.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the University of
22 * California, Berkeley and its contributors.
23 * 4. Neither the name of the University nor the names of its contributors
24 * may be used to endorse or promote products derived from this software
25 * without specific prior written permission.
27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
40 * $DragonFly: src/sys/vm/vm_contig.c,v 1.1 2003/10/15 16:48:04 hmp Exp $
44 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
45 * All rights reserved.
47 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
49 * Permission to use, copy, modify and distribute this software and
50 * its documentation is hereby granted, provided that both the copyright
51 * notice and this permission notice appear in all copies of the
52 * software, derivative works or modified versions, and any portions
53 * thereof, and that both notices appear in supporting documentation.
55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
59 * Carnegie Mellon requests users of this software to return to
61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
62 * School of Computer Science
63 * Carnegie Mellon University
64 * Pittsburgh PA 15213-3890
66 * any improvements or extensions that they make and grant Carnegie the
67 * rights to redistribute these changes.
71 * Contiguous memory allocation API.
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/malloc.h>
79 #include <sys/vmmeter.h>
80 #include <sys/vnode.h>
83 #include <vm/vm_param.h>
84 #include <vm/vm_kern.h>
86 #include <vm/vm_map.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_pageout.h>
90 #include <vm/vm_pager.h>
91 #include <vm/vm_extern.h>
92 #include <vm/vm_page2.h>
97 * Do a thorough cleanup of the specified 'queue', which can be either
98 * PQ_ACTIVE or PQ_INACTIVE by doing a walkthrough. If the page is not
99 * marked dirty, it is shoved into the page cache, provided no one has
100 * currently aqcuired it, otherwise localized action per object type
101 * is taken for cleanup:
103 * In the OBJT_VNODE case, the whole page range is cleaned up
104 * using the vm_object_page_clean() routine, by specyfing a
105 * start and end of '0'.
107 * Otherwise if the object is of any other type, the generic
108 * pageout (daemon) flush routine is invoked.
111 vm_contig_pg_clean(int queue)
114 vm_page_t m, m_tmp, next;
116 for (m = TAILQ_FIRST(&vm_page_queues[queue].pl); m != NULL; m = next) {
117 KASSERT(m->queue == queue,
118 ("vm_contig_clean: page %p's queue is not %d", m, queue));
120 next = TAILQ_NEXT(m, pageq);
122 if (vm_page_sleep_busy(m, TRUE, "vpctw0"))
125 vm_page_test_dirty(m);
128 if (object->type == OBJT_VNODE) {
129 vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY,
131 vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
132 VOP_UNLOCK(object->handle, 0, curthread);
134 } else if (object->type == OBJT_SWAP ||
135 object->type == OBJT_DEFAULT) {
137 vm_pageout_flush(&m_tmp, 1, 0);
142 if ((m->dirty == 0) && (m->busy == 0) && (m->hold_count == 0))
150 * vm_contig_pg_alloc:
152 * Allocate contiguous pages from the VM. This function does not
153 * map the allocated pages into the kernel map, otherwise it is
154 * impossible to make large allocations (i.e. >2G).
156 * Malloc()'s data structures have been used for collection of
157 * statistics and for allocations of less than a page.
165 unsigned long alignment,
166 unsigned long boundary)
168 int i, s, start, pass;
170 vm_page_t pga = vm_page_array;
172 size = round_page(size);
174 panic("vm_contig_pg_alloc: size must not be 0");
175 if ((alignment & (alignment - 1)) != 0)
176 panic("vm_contig_pg_alloc: alignment must be a power of 2");
177 if ((boundary & (boundary - 1)) != 0)
178 panic("vm_contig_pg_alloc: boundary must be a power of 2");
181 for (pass = 0; pass <= 1; pass++) {
185 * Find first page in array that is free, within range, aligned, and
186 * such that the boundary won't be crossed.
188 for (i = start; i < vmstats.v_page_count; i++) {
190 phys = VM_PAGE_TO_PHYS(&pga[i]);
191 pqtype = pga[i].queue - pga[i].pc;
192 if (((pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) &&
193 (phys >= low) && (phys < high) &&
194 ((phys & (alignment - 1)) == 0) &&
195 (((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0))
200 * If we cannot find the page in the given range, or we have
201 * crossed the boundary, call the vm_contig_pg_clean() function
202 * for flushing out the queues, and returning it back to
205 if ((i == vmstats.v_page_count) ||
206 ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
209 if (vm_contig_pg_clean(PQ_INACTIVE))
211 if (vm_contig_pg_clean(PQ_ACTIVE))
215 continue; /* next pass */
220 * Check successive pages for contiguous and free.
222 for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
224 pqtype = pga[i].queue - pga[i].pc;
225 if ((VM_PAGE_TO_PHYS(&pga[i]) !=
226 (VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
227 ((pqtype != PQ_FREE) && (pqtype != PQ_CACHE))) {
233 for (i = start; i < (start + size / PAGE_SIZE); i++) {
235 vm_page_t m = &pga[i];
237 pqtype = m->queue - m->pc;
238 if (pqtype == PQ_CACHE) {
242 vm_page_unqueue_nowakeup(m);
243 m->valid = VM_PAGE_BITS_ALL;
244 if (m->flags & PG_ZERO)
245 vm_page_zero_count--;
247 KASSERT(m->dirty == 0,
248 ("vm_contig_pg_alloc: page %p was dirty", m));
255 * Our job is done, return the index page of vm_page_array.
259 return (start); /* aka &pga[start] */
272 * Remove pages previously allocated by vm_contig_pg_alloc, and
273 * assume all references to the pages have been removed, and that
274 * it is OK to add them back to the free list.
277 vm_contig_pg_free(int start, u_long size)
279 vm_page_t pga = vm_page_array;
282 size = round_page(size);
284 panic("vm_contig_pg_free: size must not be 0");
286 for (i = start; i < (start + size / PAGE_SIZE); i++) {
287 vm_page_free(&pga[i]);
294 * Map previously allocated (vm_contig_pg_alloc) range of pages from
295 * vm_page_array[] into the KVA. Once mapped, the pages are part of
296 * the Kernel, and are to free'ed with kmem_free(kernel_map, addr, size).
299 vm_contig_pg_kmap(int start, u_long size, vm_map_t map)
301 vm_offset_t addr, tmp_addr;
302 vm_page_t pga = vm_page_array;
305 size = round_page(size);
307 panic("vm_contig_pg_kmap: size must not be 0");
309 s = splvm(); /* XXX: is this really needed? */
312 * We've found a contiguous chunk that meets our requirements.
313 * Allocate KVM, and assign phys pages and return a kernel VM
316 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
318 if (vm_map_findspace(map, vm_map_min(map), size, 1, &addr) !=
321 * XXX We almost never run out of kernel virtual
322 * space, so we don't make the allocated memory
326 vm_map_entry_release(count);
330 vm_object_reference(kernel_object);
331 vm_map_insert(map, &count,
332 kernel_object, addr - VM_MIN_KERNEL_ADDRESS,
333 addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0);
335 vm_map_entry_release(count);
338 for (i = start; i < (start + size / PAGE_SIZE); i++) {
339 vm_page_t m = &pga[i];
340 vm_page_insert(m, kernel_object,
341 OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS));
342 tmp_addr += PAGE_SIZE;
344 vm_map_wire(map, addr, addr + size, FALSE);
352 unsigned long size, /* should be size_t here and for malloc() */
353 struct malloc_type *type,
357 unsigned long alignment,
358 unsigned long boundary)
360 return contigmalloc_map(size, type, flags, low, high, alignment,
361 boundary, kernel_map);
366 unsigned long size, /* should be size_t here and for malloc() */
367 struct malloc_type *type,
371 unsigned long alignment,
372 unsigned long boundary,
378 index = vm_contig_pg_alloc(size, low, high, alignment, boundary);
380 printf("contigmalloc_map: failed in index < 0 case!");
384 rv = (void *) vm_contig_pg_kmap(index, size, map);
386 vm_contig_pg_free(index, size);
392 contigfree(void *addr, unsigned long size, struct malloc_type *type)
394 kmem_free(kernel_map, (vm_offset_t)addr, size);
398 vm_page_alloc_contig(
402 vm_offset_t alignment)
404 return ((vm_offset_t)contigmalloc_map(size, M_DEVBUF, M_NOWAIT, low,
405 high, alignment, 0ul, kernel_map));