2 * KERN_SLABALLOC.C - Kernel SLAB memory allocator
4 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * $DragonFly: src/sys/kern/kern_slaballoc.c,v 1.3 2003/08/28 17:24:38 dillon Exp $
30 * This module implements a slab allocator drop-in replacement for the
33 * A slab allocator reserves a ZONE for each chunk size, then lays the
34 * chunks out in an array within the zone. Allocation and deallocation
35 * is nearly instantanious, and fragmentation/overhead losses are limited
36 * to a fixed worst-case amount.
38 * The downside of this slab implementation is in the chunk size
39 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu.
40 * In a kernel implementation all this memory will be physical so
41 * the zone size is adjusted downward on machines with less physical
42 * memory. The upside is that overhead is bounded... this is the *worst*
45 * Slab management is done on a per-cpu basis and no locking or mutexes
46 * are required, only a critical section. When one cpu frees memory
47 * belonging to another cpu's slab manager an asynchronous IPI message
48 * will be queued to execute the operation. In addition, both the
49 * high level slab allocator and the low level zone allocator optimize
50 * M_ZERO requests, and the slab allocator does not have to pre initialize
51 * the linked list of chunks.
53 * XXX Balancing is needed between cpus. Balance will be handled through
54 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
56 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
57 * the new zone should be restricted to M_USE_RESERVE requests only.
59 * Alloc Size Chunking Number of zones
69 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383)
71 * Allocations >= ZALLOC_ZONE_LIMIT go directly to kmem.
73 * API REQUIREMENTS AND SIDE EFFECTS
75 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
76 * have remained compatible with the following API requirements:
78 * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty)
79 * + malloc(0) is allowed and returns non-NULL (ahc driver)
80 * + ability to allocate arbitrarily large chunks of memory
85 #if defined(USE_SLAB_ALLOCATOR)
87 #if !defined(NO_KMEM_MAP)
88 #error "NO_KMEM_MAP must be defined when USE_SLAB_ALLOCATOR is defined"
91 #include <sys/param.h>
92 #include <sys/systm.h>
93 #include <sys/kernel.h>
94 #include <sys/slaballoc.h>
96 #include <sys/vmmeter.h>
98 #include <sys/thread.h>
99 #include <sys/globaldata.h>
102 #include <vm/vm_param.h>
103 #include <vm/vm_kern.h>
104 #include <vm/vm_extern.h>
105 #include <vm/vm_object.h>
107 #include <vm/vm_map.h>
108 #include <vm/vm_page.h>
109 #include <vm/vm_pageout.h>
111 #include <machine/cpu.h>
113 #include <sys/thread2.h>
115 #define arysize(ary) (sizeof(ary)/sizeof((ary)[0]))
118 * Fixed globals (not per-cpu)
121 static int ZonePageCount;
122 static int ZonePageLimit;
124 static struct malloc_type *kmemstatistics;
125 static struct kmemusage *kmemusage;
126 static int32_t weirdary[16];
128 static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags);
129 static void kmem_slab_free(void *ptr, vm_size_t bytes);
132 * Misc constants. Note that allocations that are exact multiples of
133 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
134 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
136 #define MIN_CHUNK_SIZE 8 /* in bytes */
137 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1)
138 #define ZONE_RELS_THRESH 2 /* threshold number of zones */
139 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK)
142 * The WEIRD_ADDR is used as known text to copy into free objects to
143 * try to create deterministic failure cases if the data is accessed after
146 #define WEIRD_ADDR 0xdeadc0de
147 #define MAX_COPY sizeof(weirdary)
148 #define ZERO_LENGTH_PTR ((void *)-8)
151 * Misc global malloc buckets
154 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
155 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
156 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
158 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
159 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
162 * Initialize the slab memory allocator. We have to choose a zone size based
163 * on available physical memory. We choose a zone side which is approximately
164 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of
165 * 128K. The zone size is limited to the bounds set in slaballoc.h
166 * (typically 32K min, 128K max).
168 static void kmeminit(void *dummy);
170 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL)
173 kmeminit(void *dummy)
180 limsize = (vm_poff_t)vmstats.v_page_count * PAGE_SIZE;
181 if (limsize > VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)
182 limsize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
184 usesize = (int)(limsize / 1024); /* convert to KB */
186 ZoneSize = ZALLOC_MIN_ZONE_SIZE;
187 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize)
189 ZoneMask = ZoneSize - 1;
190 ZonePageLimit = PAGE_SIZE * 4;
191 ZonePageCount = ZoneSize / PAGE_SIZE;
193 npg = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE;
194 kmemusage = kmem_slab_alloc(npg * sizeof(struct kmemusage), PAGE_SIZE, M_ZERO);
196 for (i = 0; i < arysize(weirdary); ++i)
197 weirdary[i] = WEIRD_ADDR;
200 printf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024);
204 * Initialize a malloc type tracking structure. NOTE! counters and such
205 * need to be made per-cpu (maybe with a MAXCPU array).
208 malloc_init(void *data)
210 struct malloc_type *type = data;
213 if (type->ks_magic != M_MAGIC)
214 panic("malloc type lacks magic");
216 if (type->ks_limit != 0)
219 if (vmstats.v_page_count == 0)
220 panic("malloc_init not allowed before vm init");
222 limsize = (vm_poff_t)vmstats.v_page_count * PAGE_SIZE;
223 if (limsize > VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)
224 limsize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
225 type->ks_limit = limsize / 10;
227 type->ks_next = kmemstatistics;
228 kmemstatistics = type;
232 malloc_uninit(void *data)
234 struct malloc_type *type = data;
235 struct malloc_type *t;
237 if (type->ks_magic != M_MAGIC)
238 panic("malloc type lacks magic");
240 if (vmstats.v_page_count == 0)
241 panic("malloc_uninit not allowed before vm init");
243 if (type->ks_limit == 0)
244 panic("malloc_uninit on uninitialized type");
247 if (type->ks_memuse != 0) {
248 printf("malloc_uninit: %ld bytes of '%s' still allocated\n",
249 type->ks_memuse, type->ks_shortdesc);
252 if (type == kmemstatistics) {
253 kmemstatistics = type->ks_next;
255 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
256 if (t->ks_next == type) {
257 t->ks_next = type->ks_next;
262 type->ks_next = NULL;
267 * Calculate the zone index for the allocation request size and set the
268 * allocation request size to that particular zone's chunk size.
271 zoneindex(unsigned long *bytes)
273 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */
275 *bytes = n = (n + 7) & ~7;
276 return(n / 8 - 1); /* 8 byte chunks, 16 zones */
279 *bytes = n = (n + 15) & ~15;
284 *bytes = n = (n + 31) & ~31;
288 *bytes = n = (n + 63) & ~63;
292 *bytes = n = (n + 127) & ~127;
293 return(n / 128 + 31);
296 *bytes = n = (n + 255) & ~255;
297 return(n / 256 + 39);
299 *bytes = n = (n + 511) & ~511;
300 return(n / 512 + 47);
302 #if ZALLOC_ZONE_LIMIT > 8192
304 *bytes = n = (n + 1023) & ~1023;
305 return(n / 1024 + 55);
308 #if ZALLOC_ZONE_LIMIT > 16384
310 *bytes = n = (n + 2047) & ~2047;
311 return(n / 2048 + 63);
314 panic("Unexpected byte count %d", n);
319 * malloc() (SLAB ALLOCATOR)
321 * Allocate memory via the slab allocator. If the request is too large,
322 * or if it page-aligned beyond a certain size, we fall back to the
323 * KMEM subsystem. A SLAB tracking descriptor must be specified, use
324 * &SlabMisc if you don't care.
326 * M_NOWAIT - return NULL instead of blocking.
327 * M_ZERO - zero the returned memory.
328 * M_USE_RESERVE - allocate out of the system reserve if necessary
331 malloc(unsigned long size, struct malloc_type *type, int flags)
338 slgd = &mycpu->gd_slab;
341 * XXX silly to have this in the critical path.
343 if (type->ks_limit == 0) {
345 if (type->ks_limit == 0)
352 * Handle the case where the limit is reached. Panic if can't return
353 * NULL. XXX the original malloc code looped, but this tended to
354 * simply deadlock the computer.
356 while (type->ks_memuse >= type->ks_limit) {
357 if (flags & (M_NOWAIT|M_NULLOK))
359 panic("%s: malloc limit exceeded", type->ks_shortdesc);
363 * Handle the degenerate size == 0 case. Yes, this does happen.
364 * Return a special pointer. This is to maintain compatibility with
365 * the original malloc implementation. Certain devices, such as the
366 * adaptec driver, not only allocate 0 bytes, they check for NULL and
367 * also realloc() later on. Joy.
370 return(ZERO_LENGTH_PTR);
373 * Handle large allocations directly. There should not be very many of
374 * these so performance is not a big issue.
376 * Guarentee page alignment for allocations in multiples of PAGE_SIZE
378 if (size >= ZALLOC_ZONE_LIMIT || (size & PAGE_MASK) == 0) {
379 struct kmemusage *kup;
381 size = round_page(size);
382 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags);
385 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */
387 kup->ku_pagecnt = size / PAGE_SIZE;
393 * Attempt to allocate out of an existing zone. First try the free list,
394 * then allocate out of unallocated space. If we find a good zone move
395 * it to the head of the list so later allocations find it quickly
396 * (we might have thousands of zones in the list).
398 * Note: zoneindex() will panic of size is too large.
400 zi = zoneindex(&size);
401 KKASSERT(zi < NZONES);
403 if ((z = slgd->ZoneAry[zi]) != NULL) {
404 KKASSERT(z->z_NFree > 0);
407 * Remove us from the ZoneAry[] when we become empty
409 if (--z->z_NFree == 0) {
410 slgd->ZoneAry[zi] = z->z_Next;
415 * Locate a chunk in a free page. This attempts to localize
416 * reallocations into earlier pages without us having to sort
417 * the chunk list. A chunk may still overlap a page boundary.
419 while (z->z_FirstFreePg < ZonePageCount) {
420 if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) {
423 * Diagnostic: c_Next is not total garbage.
425 KKASSERT(chunk->c_Next == NULL ||
426 ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) ==
427 ((intptr_t)chunk & IN_SAME_PAGE_MASK));
430 if ((uintptr_t)chunk < VM_MIN_KERNEL_ADDRESS)
431 panic("chunk %p FFPG %d/%d", chunk, z->z_FirstFreePg, ZonePageCount);
432 if (chunk->c_Next && (uintptr_t)chunk->c_Next < VM_MIN_KERNEL_ADDRESS)
433 panic("chunkNEXT %p %p FFPG %d/%d", chunk, chunk->c_Next, z->z_FirstFreePg, ZonePageCount);
435 z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next;
442 * No chunks are available but NFree said we had some memory, so
443 * it must be available in the never-before-used-memory area
444 * governed by UIndex. The consequences are very serious if our zone
445 * got corrupted so we use an explicit panic rather then a KASSERT.
447 if (z->z_UIndex + 1 != z->z_NMax)
448 z->z_UIndex = z->z_UIndex + 1;
451 if (z->z_UIndex == z->z_UEndIndex)
452 panic("slaballoc: corrupted zone");
453 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
454 if ((z->z_Flags & SLZF_UNOTZEROD) == 0)
460 * If all zones are exhausted we need to allocate a new zone for this
461 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see
462 * UAlloc use above in regards to M_ZERO. Note that when we are reusing
463 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and
464 * we do not pre-zero it because we do not want to mess up the L1 cache.
466 * At least one subsystem, the tty code (see CROUND) expects power-of-2
467 * allocations to be power-of-2 aligned. We maintain compatibility by
468 * adjusting the base offset below.
473 if ((z = slgd->FreeZones) != NULL) {
474 slgd->FreeZones = z->z_Next;
476 bzero(z, sizeof(SLZone));
477 z->z_Flags |= SLZF_UNOTZEROD;
479 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO);
485 * Guarentee power-of-2 alignment for power-of-2-sized chunks.
486 * Otherwise just 8-byte align the data.
488 if ((size | (size - 1)) + 1 == (size << 1))
489 off = (sizeof(SLZone) + size - 1) & ~(size - 1);
491 off = (sizeof(SLZone) + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK;
492 z->z_Magic = ZALLOC_SLAB_MAGIC;
494 z->z_NMax = (ZoneSize - off) / size;
495 z->z_NFree = z->z_NMax - 1;
496 z->z_BasePtr = (char *)z + off;
497 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax;
498 z->z_ChunkSize = size;
499 z->z_FirstFreePg = ZonePageCount;
500 z->z_Cpu = mycpu->gd_cpuid;
501 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
502 z->z_Next = slgd->ZoneAry[zi];
503 slgd->ZoneAry[zi] = z;
504 if ((z->z_Flags & SLZF_UNOTZEROD) == 0)
505 flags &= ~M_ZERO; /* already zero'd */
508 * Slide the base index for initial allocations out of the next
509 * zone we create so we do not over-weight the lower part of the
512 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
513 & (ZALLOC_MAX_ZONE_SIZE - 1);
520 type->ks_memuse += size;
528 realloc(void *ptr, unsigned long size, struct malloc_type *type, int flags)
534 if (ptr == NULL || ptr == ZERO_LENGTH_PTR)
535 return(malloc(size, type, flags));
542 * Handle oversized allocations. XXX we really should require that a
543 * size be passed to free() instead of this nonsense.
546 struct kmemusage *kup;
549 if (kup->ku_pagecnt) {
550 osize = kup->ku_pagecnt << PAGE_SHIFT;
551 if (osize == round_page(size))
553 if ((nptr = malloc(size, type, flags)) == NULL)
555 bcopy(ptr, nptr, min(size, osize));
562 * Get the original allocation's zone. If the new request winds up
563 * using the same chunk size we do not have to do anything.
565 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
566 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
569 if (z->z_ChunkSize == size)
573 * Allocate memory for the new request size. Note that zoneindex has
574 * already adjusted the request size to the appropriate chunk size, which
575 * should optimize our bcopy(). Then copy and return the new pointer.
577 if ((nptr = malloc(size, type, flags)) == NULL)
579 bcopy(ptr, nptr, min(size, z->z_ChunkSize));
585 * free() (SLAB ALLOCATOR)
587 * Free the specified chunk of memory. The byte count is not strictly
588 * required but if DIAGNOSTIC is set we use it as a sanity check.
592 free_remote(void *ptr)
594 free(ptr, *(struct malloc_type **)ptr);
598 free(void *ptr, struct malloc_type *type)
605 slgd = &mycpu->gd_slab;
608 * Handle special 0-byte allocations
610 if (ptr == ZERO_LENGTH_PTR)
614 * Handle oversized allocations. XXX we really should require that a
615 * size be passed to free() instead of this nonsense.
618 struct kmemusage *kup;
622 if (kup->ku_pagecnt) {
623 size = kup->ku_pagecnt << PAGE_SHIFT;
626 type->ks_memuse -= size;
628 KKASSERT(sizeof(weirdary) <= size);
629 bcopy(weirdary, ptr, sizeof(weirdary));
631 kmem_slab_free(ptr, size); /* may block */
637 * Zone case. Figure out the zone based on the fact that it is
640 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
641 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
644 * If we do not own the zone then forward the request to the
645 * cpu that does. The freeing code does not need the byte count
646 * unless DIAGNOSTIC is set.
648 if (z->z_Cpu != mycpu->gd_cpuid) {
649 *(struct malloc_type **)ptr = type;
650 lwkt_send_ipiq(z->z_Cpu, free_remote, ptr);
654 if (type->ks_magic != M_MAGIC)
655 panic("free: malloc type lacks magic");
658 pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT;
663 * Diagnostic: attempt to detect a double-free (not perfect).
665 if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) {
667 for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) {
669 panic("Double free at %p", chunk);
675 * Put weird data into the memory to detect modifications after freeing,
676 * illegal pointer use after freeing (we should fault on the odd address),
677 * and so forth. XXX needs more work, see the old malloc code.
680 if (z->z_ChunkSize < sizeof(weirdary))
681 bcopy(weirdary, chunk, z->z_ChunkSize);
683 bcopy(weirdary, chunk, sizeof(weirdary));
687 * Add this free non-zero'd chunk to a linked list for reuse, adjust
691 if ((uintptr_t)chunk < VM_MIN_KERNEL_ADDRESS)
692 panic("BADFREE %p\n", chunk);
694 chunk->c_Next = z->z_PageAry[pgno];
695 z->z_PageAry[pgno] = chunk;
697 if (chunk->c_Next && (uintptr_t)chunk->c_Next < VM_MIN_KERNEL_ADDRESS)
700 if (z->z_FirstFreePg > pgno)
701 z->z_FirstFreePg = pgno;
704 * Bump the number of free chunks. If it becomes non-zero the zone
705 * must be added back onto the appropriate list.
707 if (z->z_NFree++ == 0) {
708 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
709 slgd->ZoneAry[z->z_ZoneIndex] = z;
713 type->ks_memuse -= z->z_ChunkSize;
716 * If the zone becomes totally free, and there are other zones we
717 * can allocate from, move this zone to the FreeZones list. Implement
718 * hysteresis on the FreeZones list to improve performance.
720 * XXX try not to block on the kernel_map lock.
722 if (z->z_NFree == z->z_NMax &&
723 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z)
727 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; z != *pz; pz = &(*pz)->z_Next)
731 if (slgd->NFreeZones == ZONE_RELS_THRESH &&
732 lockstatus(&kernel_map->lock, NULL) == 0) {
735 z->z_Next = slgd->FreeZones->z_Next;
736 oz = slgd->FreeZones;
738 kmem_slab_free(oz, ZoneSize); /* may block */
740 z->z_Next = slgd->FreeZones;
751 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the
752 * specified alignment. M_* flags are expected in the flags field.
754 * Alignment must be a multiple of PAGE_SIZE.
756 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(),
757 * but when we move zalloc() over to use this function as its backend
758 * we will have to switch to kreserve/krelease and call reserve(0)
759 * after the new space is made available.
762 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags)
768 vm_map_t map = kernel_map;
770 size = round_page(size);
771 addr = vm_map_min(map);
774 * Reserve properly aligned space from kernel_map
776 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
779 if (vm_map_findspace(map, vm_map_min(map), size, align, &addr)) {
781 if ((flags & (M_NOWAIT|M_NULLOK)) == 0)
782 panic("kmem_slab_alloc(): kernel_map ran out of space!");
784 vm_map_entry_release(count);
787 offset = addr - VM_MIN_KERNEL_ADDRESS;
788 vm_object_reference(kernel_object);
789 vm_map_insert(map, &count,
790 kernel_object, offset, addr, addr + size,
791 VM_PROT_ALL, VM_PROT_ALL, 0);
794 * Allocate the pages. Do not mess with the PG_ZERO flag yet.
796 for (i = 0; i < size; i += PAGE_SIZE) {
798 vm_pindex_t idx = OFF_TO_IDX(offset + i);
799 int zero = (flags & M_ZERO) ? VM_ALLOC_ZERO : 0;
801 if ((flags & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
802 m = vm_page_alloc(kernel_object, idx, VM_ALLOC_INTERRUPT|zero);
804 m = vm_page_alloc(kernel_object, idx, VM_ALLOC_SYSTEM|zero);
806 if ((flags & M_NOWAIT) == 0) {
810 i -= PAGE_SIZE; /* retry */
815 m = vm_page_lookup(kernel_object, OFF_TO_IDX(offset + i));
818 vm_map_delete(map, addr, addr + size, &count);
821 vm_map_entry_release(count);
827 * Mark the map entry as non-pageable using a routine that allows us to
828 * populate the underlying pages.
830 vm_map_set_wired_quick(map, addr, size, &count);
834 * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO.
836 for (i = 0; i < size; i += PAGE_SIZE) {
839 m = vm_page_lookup(kernel_object, OFF_TO_IDX(offset + i));
840 m->valid = VM_PAGE_BITS_ALL;
843 pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1);
844 if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO))
845 bzero((char *)addr + i, PAGE_SIZE);
846 vm_page_flag_clear(m, PG_ZERO);
847 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED);
850 vm_map_entry_release(count);
851 return((void *)addr);
855 kmem_slab_free(void *ptr, vm_size_t size)
858 vm_map_remove(kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size);