4 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved.
6 * This code is derived from software contributed to The DragonFly Project
7 * by Hiten Pandya <hmp@backplane.com>.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * Copyright (c) 1991 Regents of the University of California.
39 * All rights reserved.
41 * This code is derived from software contributed to Berkeley by
42 * The Mach Operating System project at Carnegie-Mellon University.
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 * 3. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
69 * $DragonFly: src/sys/vm/vm_contig.c,v 1.21 2006/12/28 21:24:02 dillon Exp $
73 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
74 * All rights reserved.
76 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
78 * Permission to use, copy, modify and distribute this software and
79 * its documentation is hereby granted, provided that both the copyright
80 * notice and this permission notice appear in all copies of the
81 * software, derivative works or modified versions, and any portions
82 * thereof, and that both notices appear in supporting documentation.
84 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
85 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
86 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
88 * Carnegie Mellon requests users of this software to return to
90 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
91 * School of Computer Science
92 * Carnegie Mellon University
93 * Pittsburgh PA 15213-3890
95 * any improvements or extensions that they make and grant Carnegie the
96 * rights to redistribute these changes.
100 * Contiguous memory allocation API.
103 #include <sys/param.h>
104 #include <sys/systm.h>
105 #include <sys/malloc.h>
106 #include <sys/proc.h>
107 #include <sys/lock.h>
108 #include <sys/vmmeter.h>
109 #include <sys/vnode.h>
112 #include <vm/vm_param.h>
113 #include <vm/vm_kern.h>
115 #include <vm/vm_map.h>
116 #include <vm/vm_object.h>
117 #include <vm/vm_page.h>
118 #include <vm/vm_pageout.h>
119 #include <vm/vm_pager.h>
120 #include <vm/vm_extern.h>
122 #include <sys/thread2.h>
123 #include <vm/vm_page2.h>
126 * vm_contig_pg_clean:
128 * Do a thorough cleanup of the specified 'queue', which can be either
129 * PQ_ACTIVE or PQ_INACTIVE by doing a walkthrough. If the page is not
130 * marked dirty, it is shoved into the page cache, provided no one has
131 * currently aqcuired it, otherwise localized action per object type
132 * is taken for cleanup:
134 * In the OBJT_VNODE case, the whole page range is cleaned up
135 * using the vm_object_page_clean() routine, by specyfing a
136 * start and end of '0'.
138 * Otherwise if the object is of any other type, the generic
139 * pageout (daemon) flush routine is invoked.
141 * The caller must hold vm_token.
144 vm_contig_pg_clean(int queue)
147 vm_page_t m, m_tmp, next;
149 ASSERT_LWKT_TOKEN_HELD(&vm_token);
151 for (m = TAILQ_FIRST(&vm_page_queues[queue].pl); m != NULL; m = next) {
152 KASSERT(m->queue == queue,
153 ("vm_contig_clean: page %p's queue is not %d",
155 next = TAILQ_NEXT(m, pageq);
157 if (m->flags & PG_MARKER)
160 if (vm_page_sleep_busy(m, TRUE, "vpctw0"))
163 vm_page_test_dirty(m);
166 if (object->type == OBJT_VNODE) {
167 vn_lock(object->handle, LK_EXCLUSIVE|LK_RETRY);
168 vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
169 vn_unlock(((struct vnode *)object->handle));
171 } else if (object->type == OBJT_SWAP ||
172 object->type == OBJT_DEFAULT) {
174 vm_pageout_flush(&m_tmp, 1, 0);
178 KKASSERT(m->busy == 0);
179 if (m->dirty == 0 && m->hold_count == 0) {
188 * vm_contig_pg_flush:
190 * Attempt to flush (count) pages from the given page queue. This may or
191 * may not succeed. Take up to <count> passes and delay 1/20 of a second
194 * The caller must hold vm_token.
197 vm_contig_pg_flush(int queue, int count)
200 if (!vm_contig_pg_clean(queue))
206 * vm_contig_pg_alloc:
208 * Allocate contiguous pages from the VM. This function does not
209 * map the allocated pages into the kernel map, otherwise it is
210 * impossible to make large allocations (i.e. >2G).
212 * Malloc()'s data structures have been used for collection of
213 * statistics and for allocations of less than a page.
215 * The caller must hold vm_token.
218 vm_contig_pg_alloc(unsigned long size, vm_paddr_t low, vm_paddr_t high,
219 unsigned long alignment, unsigned long boundary, int mflags)
223 vm_page_t pga = vm_page_array;
227 size = round_page(size);
229 panic("vm_contig_pg_alloc: size must not be 0");
230 if ((alignment & (alignment - 1)) != 0)
231 panic("vm_contig_pg_alloc: alignment must be a power of 2");
232 if ((boundary & (boundary - 1)) != 0)
233 panic("vm_contig_pg_alloc: boundary must be a power of 2");
239 * Three passes (0, 1, 2). Each pass scans the VM page list for
240 * free or cached pages. After each pass if the entire scan failed
241 * we attempt to flush inactive pages and reset the start index back
242 * to 0. For passes 1 and 2 we also attempt to flush active pages.
244 for (pass = 0; pass < 3; pass++) {
246 * Find first page in array that is free, within range,
247 * aligned, and such that the boundary won't be crossed.
250 for (i = start; i < vmstats.v_page_count; i++) {
252 phys = VM_PAGE_TO_PHYS(m);
253 pqtype = m->queue - m->pc;
254 if (((pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) &&
255 (phys >= low) && (phys < high) &&
256 ((phys & (alignment - 1)) == 0) &&
257 (((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0) &&
258 m->busy == 0 && m->wire_count == 0 &&
259 m->hold_count == 0 && (m->flags & PG_BUSY) == 0
267 * If we cannot find the page in the given range, or we have
268 * crossed the boundary, call the vm_contig_pg_clean() function
269 * for flushing out the queues, and returning it back to
272 if ((i == vmstats.v_page_count) ||
273 ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
276 * Best effort flush of all inactive pages.
277 * This is quite quick, for now stall all
278 * callers, even if they've specified M_NOWAIT.
280 vm_contig_pg_flush(PQ_INACTIVE,
281 vmstats.v_inactive_count);
283 crit_exit(); /* give interrupts a chance */
287 * Best effort flush of active pages.
289 * This is very, very slow.
290 * Only do this if the caller has agreed to M_WAITOK.
292 * If enough pages are flushed, we may succeed on
293 * next (final) pass, if not the caller, contigmalloc(),
294 * will fail in the index < 0 case.
296 if (pass > 0 && (mflags & M_WAITOK)) {
297 vm_contig_pg_flush (PQ_ACTIVE,
298 vmstats.v_active_count);
302 * We're already too high in the address space
303 * to succeed, reset to 0 for the next iteration.
306 crit_exit(); /* give interrupts a chance */
308 continue; /* next pass */
313 * Check successive pages for contiguous and free.
315 * (still in critical section)
317 for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
319 pqtype = m->queue - m->pc;
320 if ((VM_PAGE_TO_PHYS(&m[0]) !=
321 (VM_PAGE_TO_PHYS(&m[-1]) + PAGE_SIZE)) ||
322 ((pqtype != PQ_FREE) && (pqtype != PQ_CACHE)) ||
323 m->busy || m->wire_count ||
324 m->hold_count || (m->flags & PG_BUSY)
332 * (still in critical section)
334 for (i = start; i < (start + size / PAGE_SIZE); i++) {
336 pqtype = m->queue - m->pc;
337 if (pqtype == PQ_CACHE) {
341 KKASSERT(m->object == NULL);
342 vm_page_unqueue_nowakeup(m);
343 m->valid = VM_PAGE_BITS_ALL;
344 if (m->flags & PG_ZERO)
345 vm_page_zero_count--;
346 KASSERT(m->dirty == 0,
347 ("vm_contig_pg_alloc: page %p was dirty", m));
352 * Clear all flags except PG_ZERO and PG_WANTED. This
353 * also clears PG_BUSY.
355 vm_page_flag_clear(m, ~(PG_ZERO|PG_WANTED));
359 * Our job is done, return the index page of vm_page_array.
362 return (start); /* aka &pga[start] */
375 * Remove pages previously allocated by vm_contig_pg_alloc, and
376 * assume all references to the pages have been removed, and that
377 * it is OK to add them back to the free list.
379 * Caller must ensure no races on the page range in question.
380 * No other requirements.
383 vm_contig_pg_free(int start, u_long size)
385 vm_page_t pga = vm_page_array;
389 size = round_page(size);
391 panic("vm_contig_pg_free: size must not be 0");
393 lwkt_gettoken(&vm_token);
394 for (i = start; i < (start + size / PAGE_SIZE); i++) {
399 lwkt_reltoken(&vm_token);
405 * Map previously allocated (vm_contig_pg_alloc) range of pages from
406 * vm_page_array[] into the KVA. Once mapped, the pages are part of
407 * the Kernel, and are to free'ed with kmem_free(&kernel_map, addr, size).
412 vm_contig_pg_kmap(int start, u_long size, vm_map_t map, int flags)
414 vm_offset_t addr, tmp_addr;
415 vm_page_t pga = vm_page_array;
418 size = round_page(size);
420 panic("vm_contig_pg_kmap: size must not be 0");
423 lwkt_gettoken(&vm_token);
426 * We've found a contiguous chunk that meets our requirements.
427 * Allocate KVM, and assign phys pages and return a kernel VM
430 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
432 if (vm_map_findspace(map, vm_map_min(map), size, PAGE_SIZE, 0, &addr) !=
435 * XXX We almost never run out of kernel virtual
436 * space, so we don't make the allocated memory
440 vm_map_entry_release(count);
441 lwkt_reltoken(&vm_token);
447 * kernel_object maps 1:1 to kernel_map.
449 vm_object_reference(&kernel_object);
450 vm_map_insert(map, &count,
451 &kernel_object, addr,
454 VM_PROT_ALL, VM_PROT_ALL,
457 vm_map_entry_release(count);
460 for (i = start; i < (start + size / PAGE_SIZE); i++) {
461 vm_page_t m = &pga[i];
462 vm_page_insert(m, &kernel_object, OFF_TO_IDX(tmp_addr));
463 if ((flags & M_ZERO) && !(m->flags & PG_ZERO))
464 pmap_zero_page(VM_PAGE_TO_PHYS(m));
466 tmp_addr += PAGE_SIZE;
468 vm_map_wire(map, addr, addr + size, 0);
470 lwkt_reltoken(&vm_token);
480 unsigned long size, /* should be size_t here and for malloc() */
481 struct malloc_type *type,
485 unsigned long alignment,
486 unsigned long boundary)
488 return contigmalloc_map(size, type, flags, low, high, alignment,
489 boundary, &kernel_map);
497 unsigned long size, /* should be size_t here and for malloc() */
498 struct malloc_type *type,
502 unsigned long alignment,
503 unsigned long boundary,
509 lwkt_gettoken(&vm_token);
510 index = vm_contig_pg_alloc(size, low, high, alignment, boundary, flags);
512 kprintf("contigmalloc_map: failed size %lu low=%llx "
513 "high=%llx align=%lu boundary=%lu flags=%08x\n",
514 size, (long long)low, (long long)high,
515 alignment, boundary, flags);
516 lwkt_reltoken(&vm_token);
520 rv = (void *)vm_contig_pg_kmap(index, size, map, flags);
522 vm_contig_pg_free(index, size);
523 lwkt_reltoken(&vm_token);
532 contigfree(void *addr, unsigned long size, struct malloc_type *type)
534 kmem_free(&kernel_map, (vm_offset_t)addr, size);
541 vm_page_alloc_contig(
545 vm_offset_t alignment)
547 return ((vm_offset_t)contigmalloc_map(size, M_DEVBUF, M_NOWAIT, low,
548 high, alignment, 0ul, &kernel_map));