Rename printf -> kprintf in sys/ and add some defines where necessary
[dragonfly.git] / sys / vm / vm_kern.c
CommitLineData
984263bc
MD
1/*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94
37 *
38 *
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
41 *
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
43 *
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
49 *
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
53 *
54 * Carnegie Mellon requests users of this software to return to
55 *
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
60 *
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
63 *
64 * $FreeBSD: src/sys/vm/vm_kern.c,v 1.61.2.2 2002/03/12 18:25:26 tegge Exp $
086c1d7e 65 * $DragonFly: src/sys/vm/vm_kern.c,v 1.24 2006/12/23 00:41:31 swildner Exp $
984263bc
MD
66 */
67
68/*
69 * Kernel memory management.
70 */
71
72#include <sys/param.h>
73#include <sys/systm.h>
74#include <sys/proc.h>
75#include <sys/malloc.h>
76
77#include <vm/vm.h>
78#include <vm/vm_param.h>
79#include <sys/lock.h>
80#include <vm/pmap.h>
81#include <vm/vm_map.h>
82#include <vm/vm_object.h>
83#include <vm/vm_page.h>
84#include <vm/vm_pageout.h>
e1359933 85#include <vm/vm_kern.h>
984263bc
MD
86#include <vm/vm_extern.h>
87
88vm_map_t kernel_map=0;
984263bc
MD
89vm_map_t exec_map=0;
90vm_map_t clean_map=0;
91vm_map_t buffer_map=0;
984263bc
MD
92
93/*
94 * kmem_alloc_pageable:
95 *
96 * Allocate pageable memory to the kernel's address map.
97 * "map" must be kernel_map or a submap of kernel_map.
98 */
984263bc 99vm_offset_t
51f9d31c 100kmem_alloc_pageable(vm_map_t map, vm_size_t size)
984263bc
MD
101{
102 vm_offset_t addr;
5f910b2f 103 int result;
984263bc
MD
104
105 size = round_page(size);
106 addr = vm_map_min(map);
107 result = vm_map_find(map, NULL, (vm_offset_t) 0,
1b874851
MD
108 &addr, size,
109 TRUE,
110 VM_MAPTYPE_NORMAL,
111 VM_PROT_ALL, VM_PROT_ALL,
112 0);
984263bc
MD
113 if (result != KERN_SUCCESS) {
114 return (0);
115 }
116 return (addr);
117}
118
119/*
120 * kmem_alloc_nofault:
121 *
122 * Same as kmem_alloc_pageable, except that it create a nofault entry.
123 */
984263bc 124vm_offset_t
51f9d31c 125kmem_alloc_nofault(vm_map_t map, vm_size_t size)
984263bc
MD
126{
127 vm_offset_t addr;
5f910b2f 128 int result;
984263bc
MD
129
130 size = round_page(size);
131 addr = vm_map_min(map);
132 result = vm_map_find(map, NULL, (vm_offset_t) 0,
1b874851
MD
133 &addr, size,
134 TRUE,
135 VM_MAPTYPE_NORMAL,
136 VM_PROT_ALL, VM_PROT_ALL,
137 MAP_NOFAULT);
984263bc
MD
138 if (result != KERN_SUCCESS) {
139 return (0);
140 }
141 return (addr);
142}
143
144/*
145 * Allocate wired-down memory in the kernel's address map
146 * or a submap.
147 */
148vm_offset_t
e1359933 149kmem_alloc3(vm_map_t map, vm_size_t size, int kmflags)
984263bc
MD
150{
151 vm_offset_t addr;
5f910b2f 152 vm_offset_t offset;
984263bc 153 vm_offset_t i;
a108bf71 154 int count;
984263bc
MD
155
156 size = round_page(size);
157
e1359933
MD
158 if (kmflags & KM_KRESERVE)
159 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT);
160 else
161 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
a108bf71 162
984263bc
MD
163 /*
164 * Use the kernel object for wired-down kernel pages. Assume that no
165 * region of the kernel object is referenced more than once.
a108bf71 166 *
984263bc
MD
167 * Locate sufficient space in the map. This will give us the final
168 * virtual address for the new memory, and thus will tell us the
169 * offset within the kernel map.
170 */
171 vm_map_lock(map);
e9bb90e8 172 if (vm_map_findspace(map, vm_map_min(map), size, 1, &addr)) {
984263bc 173 vm_map_unlock(map);
e1359933
MD
174 if (kmflags & KM_KRESERVE)
175 vm_map_entry_krelease(count);
176 else
177 vm_map_entry_release(count);
984263bc
MD
178 return (0);
179 }
180 offset = addr - VM_MIN_KERNEL_ADDRESS;
181 vm_object_reference(kernel_object);
a108bf71 182 vm_map_insert(map, &count,
1b874851
MD
183 kernel_object, offset, addr, addr + size,
184 VM_MAPTYPE_NORMAL,
185 VM_PROT_ALL, VM_PROT_ALL,
186 0);
984263bc 187 vm_map_unlock(map);
e1359933
MD
188 if (kmflags & KM_KRESERVE)
189 vm_map_entry_krelease(count);
190 else
191 vm_map_entry_release(count);
984263bc
MD
192
193 /*
194 * Guarantee that there are pages already in this object before
cde87949 195 * calling vm_map_wire. This is to prevent the following
984263bc
MD
196 * scenario:
197 *
198 * 1) Threads have swapped out, so that there is a pager for the
199 * kernel_object. 2) The kmsg zone is empty, and so we are
cde87949 200 * kmem_allocing a new page for it. 3) vm_map_wire calls vm_fault;
984263bc
MD
201 * there is no page, but there is a pager, so we call
202 * pager_data_request. But the kmsg zone is empty, so we must
203 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when
204 * we get the data back from the pager, it will be (very stale)
205 * non-zero data. kmem_alloc is defined to return zero-filled memory.
206 *
207 * We're intentionally not activating the pages we allocate to prevent a
cde87949 208 * race with page-out. vm_map_wire will wire the pages.
984263bc
MD
209 */
210
211 for (i = 0; i < size; i += PAGE_SIZE) {
212 vm_page_t mem;
213
214 mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i),
dc1fd4b3 215 VM_ALLOC_ZERO | VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
984263bc
MD
216 if ((mem->flags & PG_ZERO) == 0)
217 vm_page_zero_fill(mem);
218 mem->valid = VM_PAGE_BITS_ALL;
219 vm_page_flag_clear(mem, PG_ZERO);
220 vm_page_wakeup(mem);
221 }
222
223 /*
224 * And finally, mark the data as non-pageable.
225 */
226
418ff780 227 vm_map_wire(map, (vm_offset_t) addr, addr + size, kmflags);
984263bc
MD
228
229 return (addr);
230}
231
232/*
233 * kmem_free:
234 *
235 * Release a region of kernel virtual memory allocated
236 * with kmem_alloc, and return the physical pages
237 * associated with that region.
238 *
239 * This routine may not block on kernel maps.
240 */
241void
51f9d31c 242kmem_free(vm_map_t map, vm_offset_t addr, vm_size_t size)
984263bc 243{
418ff780 244 vm_map_remove(map, trunc_page(addr), round_page(addr + size));
984263bc
MD
245}
246
247/*
248 * kmem_suballoc:
249 *
250 * Allocates a map to manage a subrange
251 * of the kernel virtual address space.
252 *
253 * Arguments are as follows:
254 *
255 * parent Map to take range from
256 * size Size of range to find
257 * min, max Returned endpoints of map
258 * pageable Can the region be paged
259 */
260vm_map_t
57e43348
MD
261kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
262 vm_size_t size)
984263bc 263{
5f910b2f 264 int ret;
984263bc
MD
265 vm_map_t result;
266
267 size = round_page(size);
268
269 *min = (vm_offset_t) vm_map_min(parent);
270 ret = vm_map_find(parent, NULL, (vm_offset_t) 0,
1b874851
MD
271 min, size,
272 TRUE,
273 VM_MAPTYPE_UNSPECIFIED,
274 VM_PROT_ALL, VM_PROT_ALL,
275 0);
984263bc 276 if (ret != KERN_SUCCESS) {
086c1d7e 277 kprintf("kmem_suballoc: bad status return of %d.\n", ret);
984263bc
MD
278 panic("kmem_suballoc");
279 }
280 *max = *min + size;
281 pmap_reference(vm_map_pmap(parent));
282 result = vm_map_create(vm_map_pmap(parent), *min, *max);
283 if (result == NULL)
284 panic("kmem_suballoc: cannot create submap");
285 if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS)
286 panic("kmem_suballoc: unable to change range to submap");
287 return (result);
288}
289
290/*
291 * kmem_malloc:
292 *
293 * Allocate wired-down memory in the kernel's address map for the higher
294 * level kernel memory allocator (kern/kern_malloc.c). We cannot use
295 * kmem_alloc() because we may need to allocate memory at interrupt
296 * level where we cannot block (canwait == FALSE).
297 *
984263bc
MD
298 * We don't worry about expanding the map (adding entries) since entries
299 * for wired maps are statically allocated.
300 *
dc1fd4b3
MD
301 * NOTE: Please see kmem_slab_alloc() for a better explanation of the
302 * M_* flags.
984263bc
MD
303 */
304vm_offset_t
a108bf71 305kmem_malloc(vm_map_t map, vm_size_t size, int flags)
984263bc 306{
5f910b2f 307 vm_offset_t offset, i;
984263bc
MD
308 vm_map_entry_t entry;
309 vm_offset_t addr;
310 vm_page_t m;
550c0adc 311 int count, vmflags, wanted_reserve;
dc1fd4b3 312 thread_t td;
984263bc 313
90775e29
MD
314 if (map != kernel_map)
315 panic("kmem_malloc: map != kernel_map");
984263bc
MD
316
317 size = round_page(size);
318 addr = vm_map_min(map);
319
320 /*
321 * Locate sufficient space in the map. This will give us the final
322 * virtual address for the new memory, and thus will tell us the
c397c465
MD
323 * offset within the kernel map. If we are unable to allocate space
324 * and neither RNOWAIT or NULLOK is set, we panic.
984263bc 325 */
e1359933 326 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
984263bc 327 vm_map_lock(map);
e9bb90e8 328 if (vm_map_findspace(map, vm_map_min(map), size, 1, &addr)) {
984263bc 329 vm_map_unlock(map);
e1359933 330 vm_map_entry_release(count);
8cb2bf45 331 if ((flags & M_NULLOK) == 0) {
dc1fd4b3
MD
332 panic("kmem_malloc(%ld): kernel_map too small: "
333 "%ld total allocated",
984263bc 334 (long)size, (long)map->size);
dc1fd4b3 335 }
984263bc
MD
336 return (0);
337 }
338 offset = addr - VM_MIN_KERNEL_ADDRESS;
339 vm_object_reference(kmem_object);
a108bf71 340 vm_map_insert(map, &count,
1b874851
MD
341 kmem_object, offset, addr, addr + size,
342 VM_MAPTYPE_NORMAL,
343 VM_PROT_ALL, VM_PROT_ALL,
344 0);
984263bc 345
dc1fd4b3
MD
346 td = curthread;
347 wanted_reserve = 0;
348
550c0adc
JS
349 vmflags = VM_ALLOC_SYSTEM; /* XXX M_USE_RESERVE? */
350 if ((flags & (M_WAITOK|M_RNOWAIT)) == 0)
351 panic("kmem_malloc: bad flags %08x (%p)\n", flags, ((int **)&map)[-1]);
352 if (flags & M_USE_INTERRUPT_RESERVE)
353 vmflags |= VM_ALLOC_INTERRUPT;
c397c465 354
550c0adc 355 for (i = 0; i < size; i += PAGE_SIZE) {
c397c465
MD
356 /*
357 * Only allocate PQ_CACHE pages for M_WAITOK requests and
358 * then only if we are not preempting.
359 */
360 if (flags & M_WAITOK) {
dc1fd4b3 361 if (td->td_preempted) {
550c0adc 362 vmflags &= ~VM_ALLOC_NORMAL;
dc1fd4b3
MD
363 wanted_reserve = 1;
364 } else {
365 vmflags |= VM_ALLOC_NORMAL;
366 wanted_reserve = 0;
367 }
368 }
369
370 m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), vmflags);
984263bc
MD
371
372 /*
c397c465 373 * Ran out of space, free everything up and return. Don't need
984263bc
MD
374 * to lock page queues here as we know that the pages we got
375 * aren't on any queues.
dc1fd4b3 376 *
c397c465 377 * If M_WAITOK is set we can yield or block.
984263bc
MD
378 */
379 if (m == NULL) {
c397c465 380 if (flags & M_WAITOK) {
dc1fd4b3 381 if (wanted_reserve) {
dc1fd4b3
MD
382 vm_map_unlock(map);
383 lwkt_yield();
384 vm_map_lock(map);
385 } else {
dc1fd4b3 386 vm_map_unlock(map);
659c6a07 387 vm_wait();
dc1fd4b3
MD
388 vm_map_lock(map);
389 }
390 i -= PAGE_SIZE; /* retry */
391 continue;
984263bc
MD
392 }
393 /*
394 * Free the pages before removing the map entry.
395 * They are already marked busy. Calling
396 * vm_map_delete before the pages has been freed or
397 * unbusied will cause a deadlock.
398 */
399 while (i != 0) {
400 i -= PAGE_SIZE;
401 m = vm_page_lookup(kmem_object,
402 OFF_TO_IDX(offset + i));
403 vm_page_free(m);
404 }
a108bf71 405 vm_map_delete(map, addr, addr + size, &count);
984263bc 406 vm_map_unlock(map);
e1359933 407 vm_map_entry_release(count);
984263bc
MD
408 return (0);
409 }
410 vm_page_flag_clear(m, PG_ZERO);
411 m->valid = VM_PAGE_BITS_ALL;
412 }
413
414 /*
415 * Mark map entry as non-pageable. Assert: vm_map_insert() will never
416 * be able to extend the previous entry so there will be a new entry
417 * exactly corresponding to this address range and it will have
418 * wired_count == 0.
419 */
420 if (!vm_map_lookup_entry(map, addr, &entry) ||
421 entry->start != addr || entry->end != addr + size ||
422 entry->wired_count != 0)
423 panic("kmem_malloc: entry not found or misaligned");
424 entry->wired_count = 1;
425
a108bf71 426 vm_map_simplify_entry(map, entry, &count);
984263bc
MD
427
428 /*
429 * Loop thru pages, entering them in the pmap. (We cannot add them to
430 * the wired count without wrapping the vm_page_queue_lock in
431 * splimp...)
432 */
433 for (i = 0; i < size; i += PAGE_SIZE) {
434 m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i));
435 vm_page_wire(m);
436 vm_page_wakeup(m);
437 /*
438 * Because this is kernel_pmap, this call will not block.
439 */
440 pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1);
441 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED);
442 }
443 vm_map_unlock(map);
e1359933 444 vm_map_entry_release(count);
984263bc
MD
445
446 return (addr);
447}
448
449/*
450 * kmem_alloc_wait:
451 *
452 * Allocates pageable memory from a sub-map of the kernel. If the submap
453 * has no room, the caller sleeps waiting for more memory in the submap.
454 *
455 * This routine may block.
456 */
457
458vm_offset_t
a108bf71 459kmem_alloc_wait(vm_map_t map, vm_size_t size)
984263bc
MD
460{
461 vm_offset_t addr;
a108bf71 462 int count;
984263bc
MD
463
464 size = round_page(size);
465
e1359933 466 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
a108bf71 467
984263bc
MD
468 for (;;) {
469 /*
470 * To make this work for more than one map, use the map's lock
471 * to lock out sleepers/wakers.
472 */
473 vm_map_lock(map);
e9bb90e8 474 if (vm_map_findspace(map, vm_map_min(map), size, 1, &addr) == 0)
984263bc
MD
475 break;
476 /* no space now; see if we can ever get space */
477 if (vm_map_max(map) - vm_map_min(map) < size) {
e1359933 478 vm_map_entry_release(count);
984263bc
MD
479 vm_map_unlock(map);
480 return (0);
481 }
482 vm_map_unlock(map);
377d4740 483 tsleep(map, 0, "kmaw", 0);
984263bc 484 }
a108bf71 485 vm_map_insert(map, &count,
1b874851
MD
486 NULL, (vm_offset_t) 0,
487 addr, addr + size,
488 VM_MAPTYPE_NORMAL,
489 VM_PROT_ALL, VM_PROT_ALL,
490 0);
984263bc 491 vm_map_unlock(map);
e1359933 492 vm_map_entry_release(count);
984263bc
MD
493 return (addr);
494}
495
496/*
497 * kmem_free_wakeup:
498 *
499 * Returns memory to a submap of the kernel, and wakes up any processes
500 * waiting for memory in that map.
501 */
502void
57e43348 503kmem_free_wakeup(vm_map_t map, vm_offset_t addr, vm_size_t size)
984263bc 504{
a108bf71
MD
505 int count;
506
e1359933 507 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
984263bc 508 vm_map_lock(map);
418ff780 509 vm_map_delete(map, trunc_page(addr), round_page(addr + size), &count);
984263bc
MD
510 wakeup(map);
511 vm_map_unlock(map);
e1359933 512 vm_map_entry_release(count);
984263bc
MD
513}
514
515/*
516 * kmem_init:
517 *
518 * Create the kernel map; insert a mapping covering kernel text,
519 * data, bss, and all space allocated thus far (`boostrap' data). The
520 * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and
521 * `start' as allocated, and the range between `start' and `end' as free.
a108bf71
MD
522 *
523 * Depend on the zalloc bootstrap cache to get our vm_map_entry_t.
984263bc 524 */
984263bc 525void
a108bf71 526kmem_init(vm_offset_t start, vm_offset_t end)
984263bc 527{
5f910b2f 528 vm_map_t m;
a108bf71 529 int count;
984263bc
MD
530
531 m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end);
532 vm_map_lock(m);
533 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */
534 kernel_map = m;
535 kernel_map->system_map = 1;
a108bf71 536 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
418ff780 537 vm_map_insert(m, &count, NULL, (vm_offset_t) 0,
1b874851
MD
538 VM_MIN_KERNEL_ADDRESS, start,
539 VM_MAPTYPE_NORMAL,
540 VM_PROT_ALL, VM_PROT_ALL,
541 0);
984263bc
MD
542 /* ... and ending with the completion of the above `insert' */
543 vm_map_unlock(m);
a108bf71
MD
544 vm_map_entry_release(count);
545}