2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
54 * Carnegie Mellon requests users of this software to return to
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
64 * $FreeBSD: src/sys/vm/vm_kern.c,v 1.61.2.2 2002/03/12 18:25:26 tegge Exp $
65 * $DragonFly: src/sys/vm/vm_kern.c,v 1.5 2003/07/26 22:10:02 rob Exp $
69 * Kernel memory management.
72 #include <sys/param.h>
73 #include <sys/systm.h>
75 #include <sys/malloc.h>
78 #include <vm/vm_param.h>
81 #include <vm/vm_map.h>
82 #include <vm/vm_object.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_pageout.h>
85 #include <vm/vm_extern.h>
87 vm_map_t kernel_map=0;
91 vm_map_t buffer_map=0;
96 * kmem_alloc_pageable:
98 * Allocate pageable memory to the kernel's address map.
99 * "map" must be kernel_map or a submap of kernel_map.
103 kmem_alloc_pageable(map, size)
110 size = round_page(size);
111 addr = vm_map_min(map);
112 result = vm_map_find(map, NULL, (vm_offset_t) 0,
113 &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0);
114 if (result != KERN_SUCCESS) {
121 * kmem_alloc_nofault:
123 * Same as kmem_alloc_pageable, except that it create a nofault entry.
127 kmem_alloc_nofault(map, size)
134 size = round_page(size);
135 addr = vm_map_min(map);
136 result = vm_map_find(map, NULL, (vm_offset_t) 0,
137 &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
138 if (result != KERN_SUCCESS) {
145 * Allocate wired-down memory in the kernel's address map
149 kmem_alloc(map, size)
157 size = round_page(size);
160 * Use the kernel object for wired-down kernel pages. Assume that no
161 * region of the kernel object is referenced more than once.
165 * Locate sufficient space in the map. This will give us the final
166 * virtual address for the new memory, and thus will tell us the
167 * offset within the kernel map.
170 if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
174 offset = addr - VM_MIN_KERNEL_ADDRESS;
175 vm_object_reference(kernel_object);
176 vm_map_insert(map, kernel_object, offset, addr, addr + size,
177 VM_PROT_ALL, VM_PROT_ALL, 0);
181 * Guarantee that there are pages already in this object before
182 * calling vm_map_pageable. This is to prevent the following
185 * 1) Threads have swapped out, so that there is a pager for the
186 * kernel_object. 2) The kmsg zone is empty, and so we are
187 * kmem_allocing a new page for it. 3) vm_map_pageable calls vm_fault;
188 * there is no page, but there is a pager, so we call
189 * pager_data_request. But the kmsg zone is empty, so we must
190 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when
191 * we get the data back from the pager, it will be (very stale)
192 * non-zero data. kmem_alloc is defined to return zero-filled memory.
194 * We're intentionally not activating the pages we allocate to prevent a
195 * race with page-out. vm_map_pageable will wire the pages.
198 for (i = 0; i < size; i += PAGE_SIZE) {
201 mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i),
202 VM_ALLOC_ZERO | VM_ALLOC_RETRY);
203 if ((mem->flags & PG_ZERO) == 0)
204 vm_page_zero_fill(mem);
205 mem->valid = VM_PAGE_BITS_ALL;
206 vm_page_flag_clear(mem, PG_ZERO);
211 * And finally, mark the data as non-pageable.
214 (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE);
222 * Release a region of kernel virtual memory allocated
223 * with kmem_alloc, and return the physical pages
224 * associated with that region.
226 * This routine may not block on kernel maps.
229 kmem_free(map, addr, size)
234 (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size));
240 * Allocates a map to manage a subrange
241 * of the kernel virtual address space.
243 * Arguments are as follows:
245 * parent Map to take range from
246 * size Size of range to find
247 * min, max Returned endpoints of map
248 * pageable Can the region be paged
251 kmem_suballoc(parent, min, max, size)
253 vm_offset_t *min, *max;
259 size = round_page(size);
261 *min = (vm_offset_t) vm_map_min(parent);
262 ret = vm_map_find(parent, NULL, (vm_offset_t) 0,
263 min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0);
264 if (ret != KERN_SUCCESS) {
265 printf("kmem_suballoc: bad status return of %d.\n", ret);
266 panic("kmem_suballoc");
269 pmap_reference(vm_map_pmap(parent));
270 result = vm_map_create(vm_map_pmap(parent), *min, *max);
272 panic("kmem_suballoc: cannot create submap");
273 if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS)
274 panic("kmem_suballoc: unable to change range to submap");
281 * Allocate wired-down memory in the kernel's address map for the higher
282 * level kernel memory allocator (kern/kern_malloc.c). We cannot use
283 * kmem_alloc() because we may need to allocate memory at interrupt
284 * level where we cannot block (canwait == FALSE).
286 * This routine has its own private kernel submap (kmem_map) and object
287 * (kmem_object). This, combined with the fact that only malloc uses
288 * this routine, ensures that we will never block in map or object waits.
290 * Note that this still only works in a uni-processor environment and
291 * when called at splhigh().
293 * We don't worry about expanding the map (adding entries) since entries
294 * for wired maps are statically allocated.
296 * NOTE: This routine is not supposed to block if M_NOWAIT is set, but
297 * I have not verified that it actually does not block.
300 kmem_malloc(map, size, flags)
305 vm_offset_t offset, i;
306 vm_map_entry_t entry;
310 if (map != kmem_map && map != mb_map)
311 panic("kmem_malloc: map != {kmem,mb}_map");
313 size = round_page(size);
314 addr = vm_map_min(map);
317 * Locate sufficient space in the map. This will give us the final
318 * virtual address for the new memory, and thus will tell us the
319 * offset within the kernel map.
322 if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
326 printf("Out of mbuf clusters - adjust NMBCLUSTERS or increase maxusers!\n");
329 if ((flags & M_NOWAIT) == 0)
330 panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated",
331 (long)size, (long)map->size);
334 offset = addr - VM_MIN_KERNEL_ADDRESS;
335 vm_object_reference(kmem_object);
336 vm_map_insert(map, kmem_object, offset, addr, addr + size,
337 VM_PROT_ALL, VM_PROT_ALL, 0);
339 for (i = 0; i < size; i += PAGE_SIZE) {
341 * Note: if M_NOWAIT specified alone, allocate from
342 * interrupt-safe queues only (just the free list). If
343 * M_USE_RESERVE is also specified, we can also
344 * allocate from the cache. Neither of the latter two
345 * flags may be specified from an interrupt since interrupts
346 * are not allowed to mess with the cache queue.
349 m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i),
350 ((flags & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) ?
355 * Ran out of space, free everything up and return. Don't need
356 * to lock page queues here as we know that the pages we got
357 * aren't on any queues.
360 if ((flags & M_NOWAIT) == 0) {
367 * Free the pages before removing the map entry.
368 * They are already marked busy. Calling
369 * vm_map_delete before the pages has been freed or
370 * unbusied will cause a deadlock.
374 m = vm_page_lookup(kmem_object,
375 OFF_TO_IDX(offset + i));
378 vm_map_delete(map, addr, addr + size);
382 vm_page_flag_clear(m, PG_ZERO);
383 m->valid = VM_PAGE_BITS_ALL;
387 * Mark map entry as non-pageable. Assert: vm_map_insert() will never
388 * be able to extend the previous entry so there will be a new entry
389 * exactly corresponding to this address range and it will have
392 if (!vm_map_lookup_entry(map, addr, &entry) ||
393 entry->start != addr || entry->end != addr + size ||
394 entry->wired_count != 0)
395 panic("kmem_malloc: entry not found or misaligned");
396 entry->wired_count = 1;
398 vm_map_simplify_entry(map, entry);
401 * Loop thru pages, entering them in the pmap. (We cannot add them to
402 * the wired count without wrapping the vm_page_queue_lock in
405 for (i = 0; i < size; i += PAGE_SIZE) {
406 m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i));
410 * Because this is kernel_pmap, this call will not block.
412 pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1);
413 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED);
423 * Allocates pageable memory from a sub-map of the kernel. If the submap
424 * has no room, the caller sleeps waiting for more memory in the submap.
426 * This routine may block.
430 kmem_alloc_wait(map, size)
436 size = round_page(size);
440 * To make this work for more than one map, use the map's lock
441 * to lock out sleepers/wakers.
444 if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0)
446 /* no space now; see if we can ever get space */
447 if (vm_map_max(map) - vm_map_min(map) < size) {
452 tsleep(map, 0, "kmaw", 0);
454 vm_map_insert(map, NULL, (vm_offset_t) 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0);
462 * Returns memory to a submap of the kernel, and wakes up any processes
463 * waiting for memory in that map.
466 kmem_free_wakeup(map, addr, size)
472 (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size));
480 * Create the kernel map; insert a mapping covering kernel text,
481 * data, bss, and all space allocated thus far (`boostrap' data). The
482 * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and
483 * `start' as allocated, and the range between `start' and `end' as free.
487 kmem_init(start, end)
488 vm_offset_t start, end;
492 m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
494 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */
496 kernel_map->system_map = 1;
497 (void) vm_map_insert(m, NULL, (vm_offset_t) 0,
498 VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0);
499 /* ... and ending with the completion of the above `insert' */