4 * KERN_SLABALLOC.C - Kernel SLAB memory allocator
6 * Copyright (c) 2003,2004,2010 The DragonFly Project. All rights reserved.
8 * This code is derived from software contributed to The DragonFly Project
9 * by Matthew Dillon <dillon@backplane.com>
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
21 * 3. Neither the name of The DragonFly Project nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific, prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * $DragonFly: src/sys/kern/kern_slaballoc.c,v 1.55 2008/10/22 01:42:17 dillon Exp $
40 * This module implements a slab allocator drop-in replacement for the
43 * A slab allocator reserves a ZONE for each chunk size, then lays the
44 * chunks out in an array within the zone. Allocation and deallocation
45 * is nearly instantanious, and fragmentation/overhead losses are limited
46 * to a fixed worst-case amount.
48 * The downside of this slab implementation is in the chunk size
49 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu.
50 * In a kernel implementation all this memory will be physical so
51 * the zone size is adjusted downward on machines with less physical
52 * memory. The upside is that overhead is bounded... this is the *worst*
55 * Slab management is done on a per-cpu basis and no locking or mutexes
56 * are required, only a critical section. When one cpu frees memory
57 * belonging to another cpu's slab manager an asynchronous IPI message
58 * will be queued to execute the operation. In addition, both the
59 * high level slab allocator and the low level zone allocator optimize
60 * M_ZERO requests, and the slab allocator does not have to pre initialize
61 * the linked list of chunks.
63 * XXX Balancing is needed between cpus. Balance will be handled through
64 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
66 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
67 * the new zone should be restricted to M_USE_RESERVE requests only.
69 * Alloc Size Chunking Number of zones
79 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383)
81 * Allocations >= ZoneLimit go directly to kmem.
83 * API REQUIREMENTS AND SIDE EFFECTS
85 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
86 * have remained compatible with the following API requirements:
88 * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty)
89 * + all power-of-2 sized allocations are power-of-2 aligned (twe)
90 * + malloc(0) is allowed and returns non-NULL (ahc driver)
91 * + ability to allocate arbitrarily large chunks of memory
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/kernel.h>
99 #include <sys/slaballoc.h>
100 #include <sys/mbuf.h>
101 #include <sys/vmmeter.h>
102 #include <sys/lock.h>
103 #include <sys/thread.h>
104 #include <sys/globaldata.h>
105 #include <sys/sysctl.h>
109 #include <vm/vm_param.h>
110 #include <vm/vm_kern.h>
111 #include <vm/vm_extern.h>
112 #include <vm/vm_object.h>
114 #include <vm/vm_map.h>
115 #include <vm/vm_page.h>
116 #include <vm/vm_pageout.h>
118 #include <machine/cpu.h>
120 #include <sys/thread2.h>
122 #define arysize(ary) (sizeof(ary)/sizeof((ary)[0]))
124 #define MEMORY_STRING "ptr=%p type=%p size=%d flags=%04x"
125 #define MEMORY_ARG_SIZE (sizeof(void *) * 2 + sizeof(unsigned long) + \
128 #if !defined(KTR_MEMORY)
129 #define KTR_MEMORY KTR_ALL
131 KTR_INFO_MASTER(memory);
132 KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin", 0);
133 KTR_INFO(KTR_MEMORY, memory, malloc_end, 1, MEMORY_STRING, MEMORY_ARG_SIZE);
134 KTR_INFO(KTR_MEMORY, memory, free_zero, 2, MEMORY_STRING, MEMORY_ARG_SIZE);
135 KTR_INFO(KTR_MEMORY, memory, free_ovsz, 3, MEMORY_STRING, MEMORY_ARG_SIZE);
136 KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 4, MEMORY_STRING, MEMORY_ARG_SIZE);
137 KTR_INFO(KTR_MEMORY, memory, free_chunk, 5, MEMORY_STRING, MEMORY_ARG_SIZE);
139 KTR_INFO(KTR_MEMORY, memory, free_request, 6, MEMORY_STRING, MEMORY_ARG_SIZE);
140 KTR_INFO(KTR_MEMORY, memory, free_rem_beg, 7, MEMORY_STRING, MEMORY_ARG_SIZE);
141 KTR_INFO(KTR_MEMORY, memory, free_rem_end, 8, MEMORY_STRING, MEMORY_ARG_SIZE);
143 KTR_INFO(KTR_MEMORY, memory, free_beg, 9, "free begin", 0);
144 KTR_INFO(KTR_MEMORY, memory, free_end, 10, "free end", 0);
146 #define logmemory(name, ptr, type, size, flags) \
147 KTR_LOG(memory_ ## name, ptr, type, size, flags)
148 #define logmemory_quick(name) \
149 KTR_LOG(memory_ ## name)
152 * Fixed globals (not per-cpu)
155 static int ZoneLimit;
156 static int ZonePageCount;
157 static uintptr_t ZoneMask;
158 static int ZoneBigAlloc; /* in KB */
159 static int ZoneGenAlloc; /* in KB */
160 struct malloc_type *kmemstatistics; /* exported to vmstat */
161 static struct kmemusage *kmemusage;
162 static int32_t weirdary[16];
164 static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags);
165 static void kmem_slab_free(void *ptr, vm_size_t bytes);
167 #if defined(INVARIANTS)
168 static void chunk_mark_allocated(SLZone *z, void *chunk);
169 static void chunk_mark_free(SLZone *z, void *chunk);
171 #define chunk_mark_allocated(z, chunk)
172 #define chunk_mark_free(z, chunk)
176 * Misc constants. Note that allocations that are exact multiples of
177 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
178 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
180 #define MIN_CHUNK_SIZE 8 /* in bytes */
181 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1)
182 #define ZONE_RELS_THRESH 2 /* threshold number of zones */
183 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK)
186 * The WEIRD_ADDR is used as known text to copy into free objects to
187 * try to create deterministic failure cases if the data is accessed after
190 #define WEIRD_ADDR 0xdeadc0de
191 #define MAX_COPY sizeof(weirdary)
192 #define ZERO_LENGTH_PTR ((void *)-8)
195 * Misc global malloc buckets
198 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
199 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
200 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
202 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
203 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
206 * Initialize the slab memory allocator. We have to choose a zone size based
207 * on available physical memory. We choose a zone side which is approximately
208 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of
209 * 128K. The zone size is limited to the bounds set in slaballoc.h
210 * (typically 32K min, 128K max).
212 static void kmeminit(void *dummy);
216 SYSINIT(kmem, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL)
220 * If enabled any memory allocated without M_ZERO is initialized to -1.
222 static int use_malloc_pattern;
223 SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW,
224 &use_malloc_pattern, 0, "");
227 SYSCTL_INT(_kern, OID_AUTO, zone_big_alloc, CTLFLAG_RD, &ZoneBigAlloc, 0, "");
228 SYSCTL_INT(_kern, OID_AUTO, zone_gen_alloc, CTLFLAG_RD, &ZoneGenAlloc, 0, "");
231 kmeminit(void *dummy)
238 limsize = (size_t)vmstats.v_page_count * PAGE_SIZE;
239 if (limsize > KvaSize)
242 usesize = (int)(limsize / 1024); /* convert to KB */
244 ZoneSize = ZALLOC_MIN_ZONE_SIZE;
245 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize)
247 ZoneLimit = ZoneSize / 4;
248 if (ZoneLimit > ZALLOC_ZONE_LIMIT)
249 ZoneLimit = ZALLOC_ZONE_LIMIT;
250 ZoneMask = ~(uintptr_t)(ZoneSize - 1);
251 ZonePageCount = ZoneSize / PAGE_SIZE;
253 npg = KvaSize / PAGE_SIZE;
254 kmemusage = kmem_slab_alloc(npg * sizeof(struct kmemusage),
255 PAGE_SIZE, M_WAITOK|M_ZERO);
257 for (i = 0; i < arysize(weirdary); ++i)
258 weirdary[i] = WEIRD_ADDR;
260 ZeroPage = kmem_slab_alloc(PAGE_SIZE, PAGE_SIZE, M_WAITOK|M_ZERO);
263 kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024);
267 * Initialize a malloc type tracking structure.
270 malloc_init(void *data)
272 struct malloc_type *type = data;
275 if (type->ks_magic != M_MAGIC)
276 panic("malloc type lacks magic");
278 if (type->ks_limit != 0)
281 if (vmstats.v_page_count == 0)
282 panic("malloc_init not allowed before vm init");
284 limsize = (size_t)vmstats.v_page_count * PAGE_SIZE;
285 if (limsize > KvaSize)
287 type->ks_limit = limsize / 10;
289 type->ks_next = kmemstatistics;
290 kmemstatistics = type;
294 malloc_uninit(void *data)
296 struct malloc_type *type = data;
297 struct malloc_type *t;
303 if (type->ks_magic != M_MAGIC)
304 panic("malloc type lacks magic");
306 if (vmstats.v_page_count == 0)
307 panic("malloc_uninit not allowed before vm init");
309 if (type->ks_limit == 0)
310 panic("malloc_uninit on uninitialized type");
313 /* Make sure that all pending kfree()s are finished. */
314 lwkt_synchronize_ipiqs("muninit");
319 * memuse is only correct in aggregation. Due to memory being allocated
320 * on one cpu and freed on another individual array entries may be
321 * negative or positive (canceling each other out).
323 for (i = ttl = 0; i < ncpus; ++i)
324 ttl += type->ks_memuse[i];
326 kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n",
327 ttl, type->ks_shortdesc, i);
330 if (type == kmemstatistics) {
331 kmemstatistics = type->ks_next;
333 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
334 if (t->ks_next == type) {
335 t->ks_next = type->ks_next;
340 type->ks_next = NULL;
345 * Increase the kmalloc pool limit for the specified pool. No changes
346 * are the made if the pool would shrink.
349 kmalloc_raise_limit(struct malloc_type *type, size_t bytes)
351 if (type->ks_limit == 0)
355 if (type->ks_limit < bytes)
356 type->ks_limit = bytes;
360 * Dynamically create a malloc pool. This function is a NOP if *typep is
364 kmalloc_create(struct malloc_type **typep, const char *descr)
366 struct malloc_type *type;
368 if (*typep == NULL) {
369 type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO);
370 type->ks_magic = M_MAGIC;
371 type->ks_shortdesc = descr;
378 * Destroy a dynamically created malloc pool. This function is a NOP if
379 * the pool has already been destroyed.
382 kmalloc_destroy(struct malloc_type **typep)
384 if (*typep != NULL) {
385 malloc_uninit(*typep);
386 kfree(*typep, M_TEMP);
392 * Calculate the zone index for the allocation request size and set the
393 * allocation request size to that particular zone's chunk size.
396 zoneindex(unsigned long *bytes)
398 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */
400 *bytes = n = (n + 7) & ~7;
401 return(n / 8 - 1); /* 8 byte chunks, 16 zones */
404 *bytes = n = (n + 15) & ~15;
409 *bytes = n = (n + 31) & ~31;
413 *bytes = n = (n + 63) & ~63;
417 *bytes = n = (n + 127) & ~127;
418 return(n / 128 + 31);
421 *bytes = n = (n + 255) & ~255;
422 return(n / 256 + 39);
424 *bytes = n = (n + 511) & ~511;
425 return(n / 512 + 47);
427 #if ZALLOC_ZONE_LIMIT > 8192
429 *bytes = n = (n + 1023) & ~1023;
430 return(n / 1024 + 55);
433 #if ZALLOC_ZONE_LIMIT > 16384
435 *bytes = n = (n + 2047) & ~2047;
436 return(n / 2048 + 63);
439 panic("Unexpected byte count %d", n);
444 * kmalloc() (SLAB ALLOCATOR)
446 * Allocate memory via the slab allocator. If the request is too large,
447 * or if it page-aligned beyond a certain size, we fall back to the
448 * KMEM subsystem. A SLAB tracking descriptor must be specified, use
449 * &SlabMisc if you don't care.
451 * M_RNOWAIT - don't block.
452 * M_NULLOK - return NULL instead of blocking.
453 * M_ZERO - zero the returned memory.
454 * M_USE_RESERVE - allow greater drawdown of the free list
455 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted
460 kmalloc(unsigned long size, struct malloc_type *type, int flags)
468 struct globaldata *gd;
474 logmemory_quick(malloc_beg);
479 * XXX silly to have this in the critical path.
481 if (type->ks_limit == 0) {
483 if (type->ks_limit == 0)
490 * Handle the case where the limit is reached. Panic if we can't return
491 * NULL. The original malloc code looped, but this tended to
492 * simply deadlock the computer.
494 * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used
495 * to determine if a more complete limit check should be done. The
496 * actual memory use is tracked via ks_memuse[cpu].
498 while (type->ks_loosememuse >= type->ks_limit) {
502 for (i = ttl = 0; i < ncpus; ++i)
503 ttl += type->ks_memuse[i];
504 type->ks_loosememuse = ttl; /* not MP synchronized */
505 if ((ssize_t)ttl < 0) /* deal with occassional race */
507 if (ttl >= type->ks_limit) {
508 if (flags & M_NULLOK) {
509 logmemory(malloc_end, NULL, type, size, flags);
512 panic("%s: malloc limit exceeded", type->ks_shortdesc);
517 * Handle the degenerate size == 0 case. Yes, this does happen.
518 * Return a special pointer. This is to maintain compatibility with
519 * the original malloc implementation. Certain devices, such as the
520 * adaptec driver, not only allocate 0 bytes, they check for NULL and
521 * also realloc() later on. Joy.
524 logmemory(malloc_end, ZERO_LENGTH_PTR, type, size, flags);
525 return(ZERO_LENGTH_PTR);
529 * Handle hysteresis from prior frees here in malloc(). We cannot
530 * safely manipulate the kernel_map in free() due to free() possibly
531 * being called via an IPI message or from sensitive interrupt code.
533 * NOTE: ku_pagecnt must be cleared before we free the slab or we
534 * might race another cpu allocating the kva and setting
537 while (slgd->NFreeZones > ZONE_RELS_THRESH && (flags & M_RNOWAIT) == 0) {
539 if (slgd->NFreeZones > ZONE_RELS_THRESH) { /* crit sect race */
540 struct kmemusage *kup;
543 slgd->FreeZones = z->z_Next;
547 kmem_slab_free(z, ZoneSize); /* may block */
548 atomic_add_int(&ZoneGenAlloc, -(int)ZoneSize / 1024);
554 * XXX handle oversized frees that were queued from kfree().
556 while (slgd->FreeOvZones && (flags & M_RNOWAIT) == 0) {
558 if ((z = slgd->FreeOvZones) != NULL) {
561 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC);
562 slgd->FreeOvZones = z->z_Next;
563 tsize = z->z_ChunkSize;
564 kmem_slab_free(z, tsize); /* may block */
565 atomic_add_int(&ZoneBigAlloc, -(int)tsize / 1024);
571 * Handle large allocations directly. There should not be very many of
572 * these so performance is not a big issue.
574 * The backend allocator is pretty nasty on a SMP system. Use the
575 * slab allocator for one and two page-sized chunks even though we lose
576 * some efficiency. XXX maybe fix mmio and the elf loader instead.
578 if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) {
579 struct kmemusage *kup;
581 size = round_page(size);
582 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags);
584 logmemory(malloc_end, NULL, type, size, flags);
587 atomic_add_int(&ZoneBigAlloc, (int)size / 1024);
588 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */
589 flags |= M_PASSIVE_ZERO;
591 kup->ku_pagecnt = size / PAGE_SIZE;
597 * Attempt to allocate out of an existing zone. First try the free list,
598 * then allocate out of unallocated space. If we find a good zone move
599 * it to the head of the list so later allocations find it quickly
600 * (we might have thousands of zones in the list).
602 * Note: zoneindex() will panic of size is too large.
604 zi = zoneindex(&size);
605 KKASSERT(zi < NZONES);
608 if ((z = slgd->ZoneAry[zi]) != NULL) {
610 * Locate a chunk - we have to have at least one. If this is the
611 * last chunk go ahead and do the work to retrieve chunks freed
612 * from remote cpus, and if the zone is still empty move it off
615 if (--z->z_NFree <= 0) {
616 KKASSERT(z->z_NFree == 0);
620 * WARNING! This code competes with other cpus. It is ok
621 * for us to not drain RChunks here but we might as well, and
622 * it is ok if more accumulate after we're done.
624 * Set RSignal before pulling rchunks off, indicating that we
625 * will be moving ourselves off of the ZoneAry. Remote ends will
626 * read RSignal before putting rchunks on thus interlocking
627 * their IPI signaling.
629 if (z->z_RChunks == NULL)
630 atomic_swap_int(&z->z_RSignal, 1);
632 while ((bchunk = z->z_RChunks) != NULL) {
634 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) {
635 *z->z_LChunksp = bchunk;
637 chunk_mark_free(z, bchunk);
638 z->z_LChunksp = &bchunk->c_Next;
639 bchunk = bchunk->c_Next;
647 * Remove from the zone list if no free chunks remain.
650 if (z->z_NFree == 0) {
651 slgd->ZoneAry[zi] = z->z_Next;
659 * Fast path, we have chunks available in z_LChunks.
661 chunk = z->z_LChunks;
663 chunk_mark_allocated(z, chunk);
664 z->z_LChunks = chunk->c_Next;
665 if (z->z_LChunks == NULL)
666 z->z_LChunksp = &z->z_LChunks;
671 * No chunks are available in LChunks, the free chunk MUST be
672 * in the never-before-used memory area, controlled by UIndex.
674 * The consequences are very serious if our zone got corrupted so
675 * we use an explicit panic rather than a KASSERT.
677 if (z->z_UIndex + 1 != z->z_NMax)
682 if (z->z_UIndex == z->z_UEndIndex)
683 panic("slaballoc: corrupted zone");
685 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
686 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
688 flags |= M_PASSIVE_ZERO;
690 chunk_mark_allocated(z, chunk);
695 * If all zones are exhausted we need to allocate a new zone for this
696 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see
697 * UAlloc use above in regards to M_ZERO. Note that when we are reusing
698 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and
699 * we do not pre-zero it because we do not want to mess up the L1 cache.
701 * At least one subsystem, the tty code (see CROUND) expects power-of-2
702 * allocations to be power-of-2 aligned. We maintain compatibility by
703 * adjusting the base offset below.
707 struct kmemusage *kup;
709 if ((z = slgd->FreeZones) != NULL) {
710 slgd->FreeZones = z->z_Next;
712 bzero(z, sizeof(SLZone));
713 z->z_Flags |= SLZF_UNOTZEROD;
715 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO);
718 atomic_add_int(&ZoneGenAlloc, (int)ZoneSize / 1024);
722 * How big is the base structure?
724 #if defined(INVARIANTS)
726 * Make room for z_Bitmap. An exact calculation is somewhat more
727 * complicated so don't make an exact calculation.
729 off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]);
730 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8);
732 off = sizeof(SLZone);
736 * Guarentee power-of-2 alignment for power-of-2-sized chunks.
737 * Otherwise just 8-byte align the data.
739 if ((size | (size - 1)) + 1 == (size << 1))
740 off = (off + size - 1) & ~(size - 1);
742 off = (off + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK;
743 z->z_Magic = ZALLOC_SLAB_MAGIC;
745 z->z_NMax = (ZoneSize - off) / size;
746 z->z_NFree = z->z_NMax - 1;
747 z->z_BasePtr = (char *)z + off;
748 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax;
749 z->z_ChunkSize = size;
751 z->z_Cpu = gd->gd_cpuid;
752 z->z_LChunksp = &z->z_LChunks;
753 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
754 z->z_Next = slgd->ZoneAry[zi];
755 slgd->ZoneAry[zi] = z;
756 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
757 flags &= ~M_ZERO; /* already zero'd */
758 flags |= M_PASSIVE_ZERO;
761 kup->ku_pagecnt = -(z->z_Cpu + 1); /* -1 to -(N+1) */
762 chunk_mark_allocated(z, chunk);
765 * Slide the base index for initial allocations out of the next
766 * zone we create so we do not over-weight the lower part of the
769 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
770 & (ZALLOC_MAX_ZONE_SIZE - 1);
774 ++type->ks_inuse[gd->gd_cpuid];
775 type->ks_memuse[gd->gd_cpuid] += size;
776 type->ks_loosememuse += size; /* not MP synchronized */
782 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) {
783 if (use_malloc_pattern) {
784 for (i = 0; i < size; i += sizeof(int)) {
785 *(int *)((char *)chunk + i) = -1;
788 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */
791 logmemory(malloc_end, chunk, type, size, flags);
795 logmemory(malloc_end, NULL, type, size, flags);
800 * kernel realloc. (SLAB ALLOCATOR) (MP SAFE)
802 * Generally speaking this routine is not called very often and we do
803 * not attempt to optimize it beyond reusing the same pointer if the
804 * new size fits within the chunking of the old pointer's zone.
807 krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags)
809 struct kmemusage *kup;
814 KKASSERT((flags & M_ZERO) == 0); /* not supported */
816 if (ptr == NULL || ptr == ZERO_LENGTH_PTR)
817 return(kmalloc(size, type, flags));
824 * Handle oversized allocations. XXX we really should require that a
825 * size be passed to free() instead of this nonsense.
828 if (kup->ku_pagecnt > 0) {
829 osize = kup->ku_pagecnt << PAGE_SHIFT;
830 if (osize == round_page(size))
832 if ((nptr = kmalloc(size, type, flags)) == NULL)
834 bcopy(ptr, nptr, min(size, osize));
840 * Get the original allocation's zone. If the new request winds up
841 * using the same chunk size we do not have to do anything.
843 z = (SLZone *)((uintptr_t)ptr & ZoneMask);
845 KKASSERT(kup->ku_pagecnt < 0);
846 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
849 * Allocate memory for the new request size. Note that zoneindex has
850 * already adjusted the request size to the appropriate chunk size, which
851 * should optimize our bcopy(). Then copy and return the new pointer.
853 * Resizing a non-power-of-2 allocation to a power-of-2 size does not
854 * necessary align the result.
856 * We can only zoneindex (to align size to the chunk size) if the new
857 * size is not too large.
859 if (size < ZoneLimit) {
861 if (z->z_ChunkSize == size)
864 if ((nptr = kmalloc(size, type, flags)) == NULL)
866 bcopy(ptr, nptr, min(size, z->z_ChunkSize));
872 * Return the kmalloc limit for this type, in bytes.
875 kmalloc_limit(struct malloc_type *type)
877 if (type->ks_limit == 0) {
879 if (type->ks_limit == 0)
883 return(type->ks_limit);
887 * Allocate a copy of the specified string.
889 * (MP SAFE) (MAY BLOCK)
892 kstrdup(const char *str, struct malloc_type *type)
894 int zlen; /* length inclusive of terminating NUL */
899 zlen = strlen(str) + 1;
900 nstr = kmalloc(zlen, type, M_WAITOK);
901 bcopy(str, nstr, zlen);
907 * Notify our cpu that a remote cpu has freed some chunks in a zone that
908 * we own. Due to MP races we might no longer own the zone, use the
909 * kmemusage array to check.
913 kfree_remote(void *ptr)
915 struct kmemusage *kup;
922 * Do not dereference (z) until we validate that its storage is
925 slgd = &mycpu->gd_slab;
929 if (kup->ku_pagecnt == -((int)mycpuid + 1)) { /* -1 to -(N+1) */
930 logmemory(free_rem_beg, z, NULL, 0, 0);
931 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
932 KKASSERT(z->z_Cpu == mycpu->gd_cpuid);
936 * Indicate that we will no longer be off of the ZoneAry by
943 * Atomically extract the bchunks list and then process it back
944 * into the lchunks list. We want to append our bchunks to the
945 * lchunks list and not prepend since we likely do not have
946 * cache mastership of the related data (not that it helps since
947 * we are using c_Next).
949 while ((bchunk = z->z_RChunks) != NULL) {
951 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) {
952 *z->z_LChunksp = bchunk;
954 chunk_mark_free(z, bchunk);
955 z->z_LChunksp = &bchunk->c_Next;
956 bchunk = bchunk->c_Next;
962 if (z->z_NFree && nfree == 0) {
963 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
964 slgd->ZoneAry[z->z_ZoneIndex] = z;
968 * If the zone becomes totally free, and there are other zones we
969 * can allocate from, move this zone to the FreeZones list. Since
970 * this code can be called from an IPI callback, do *NOT* try to mess
971 * with kernel_map here. Hysteresis will be performed at malloc() time.
973 if (z->z_NFree == z->z_NMax &&
974 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z)
976 struct kmemusage *kup;
979 for (pz = &slgd->ZoneAry[z->z_ZoneIndex];
981 pz = &(*pz)->z_Next) {
986 z->z_Next = slgd->FreeZones;
992 logmemory(free_rem_end, z, bchunk, 0, 0);
999 * free (SLAB ALLOCATOR)
1001 * Free a memory block previously allocated by malloc. Note that we do not
1002 * attempt to update ks_loosememuse as MP races could prevent us from
1003 * checking memory limits in malloc.
1008 kfree(void *ptr, struct malloc_type *type)
1013 struct globaldata *gd;
1014 struct kmemusage *kup;
1021 logmemory_quick(free_beg);
1023 slgd = &gd->gd_slab;
1026 panic("trying to free NULL pointer");
1029 * Handle special 0-byte allocations
1031 if (ptr == ZERO_LENGTH_PTR) {
1032 logmemory(free_zero, ptr, type, -1, 0);
1033 logmemory_quick(free_end);
1038 * Panic on bad malloc type
1040 if (type->ks_magic != M_MAGIC)
1041 panic("free: malloc type lacks magic");
1044 * Handle oversized allocations. XXX we really should require that a
1045 * size be passed to free() instead of this nonsense.
1047 * This code is never called via an ipi.
1050 if (kup->ku_pagecnt > 0) {
1051 size = kup->ku_pagecnt << PAGE_SHIFT;
1052 kup->ku_pagecnt = 0;
1054 KKASSERT(sizeof(weirdary) <= size);
1055 bcopy(weirdary, ptr, sizeof(weirdary));
1058 * NOTE: For oversized allocations we do not record the
1059 * originating cpu. It gets freed on the cpu calling
1060 * kfree(). The statistics are in aggregate.
1062 * note: XXX we have still inherited the interrupts-can't-block
1063 * assumption. An interrupt thread does not bump
1064 * gd_intr_nesting_level so check TDF_INTTHREAD. This is
1065 * primarily until we can fix softupdate's assumptions about free().
1068 --type->ks_inuse[gd->gd_cpuid];
1069 type->ks_memuse[gd->gd_cpuid] -= size;
1070 if (mycpu->gd_intr_nesting_level ||
1071 (gd->gd_curthread->td_flags & TDF_INTTHREAD))
1073 logmemory(free_ovsz_delayed, ptr, type, size, 0);
1075 z->z_Magic = ZALLOC_OVSZ_MAGIC;
1076 z->z_Next = slgd->FreeOvZones;
1077 z->z_ChunkSize = size;
1078 slgd->FreeOvZones = z;
1082 logmemory(free_ovsz, ptr, type, size, 0);
1083 kmem_slab_free(ptr, size); /* may block */
1084 atomic_add_int(&ZoneBigAlloc, -(int)size / 1024);
1086 logmemory_quick(free_end);
1091 * Zone case. Figure out the zone based on the fact that it is
1094 z = (SLZone *)((uintptr_t)ptr & ZoneMask);
1096 KKASSERT(kup->ku_pagecnt < 0);
1097 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1100 * If we do not own the zone then use atomic ops to free to the
1101 * remote cpu linked list and notify the target zone using a
1104 * The target zone cannot be deallocated while we own a chunk of it,
1105 * so the zone header's storage is stable until the very moment
1106 * we adjust z_RChunks. After that we cannot safely dereference (z).
1108 * (no critical section needed)
1110 if (z->z_CpuGd != gd) {
1113 * Making these adjustments now allow us to avoid passing (type)
1114 * to the remote cpu. Note that ks_inuse/ks_memuse is being
1115 * adjusted on OUR cpu, not the zone cpu, but it should all still
1116 * sum up properly and cancel out.
1119 --type->ks_inuse[gd->gd_cpuid];
1120 type->ks_memuse[gd->gd_cpuid] -= z->z_ChunkSize;
1124 * WARNING! This code competes with other cpus. Once we
1125 * successfully link the chunk to RChunks the remote
1126 * cpu can rip z's storage out from under us.
1128 rsignal = z->z_RSignal;
1133 bchunk = z->z_RChunks;
1135 chunk->c_Next = bchunk;
1138 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, chunk))
1141 /* z cannot be dereferenced now */
1144 * We have to signal the remote cpu if our actions will cause
1145 * the remote zone to be placed back on ZoneAry so it can
1146 * move the zone back on.
1148 * We only need to deal with NULL->non-NULL RChunk transitions
1149 * and only if z_RSignal is set. We interlock by reading rsignal
1150 * before adding our chunk to RChunks. This should result in
1151 * virtually no IPI traffic.
1153 * We can use a passive IPI to reduce overhead even further.
1155 if (bchunk == NULL && rsignal) {
1156 logmemory(free_request, ptr, type, z->z_ChunkSize, 0);
1157 lwkt_send_ipiq_passive(z->z_CpuGd, kfree_remote, z);
1160 panic("Corrupt SLZone");
1162 logmemory_quick(free_end);
1169 logmemory(free_chunk, ptr, type, z->z_ChunkSize, 0);
1173 chunk_mark_free(z, chunk);
1176 * Put weird data into the memory to detect modifications after freeing,
1177 * illegal pointer use after freeing (we should fault on the odd address),
1178 * and so forth. XXX needs more work, see the old malloc code.
1181 if (z->z_ChunkSize < sizeof(weirdary))
1182 bcopy(weirdary, chunk, z->z_ChunkSize);
1184 bcopy(weirdary, chunk, sizeof(weirdary));
1188 * Add this free non-zero'd chunk to a linked list for reuse. Add
1189 * to the front of the linked list so it is more likely to be
1190 * reallocated, since it is already in our L1 cache.
1193 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd)
1194 panic("BADFREE %p", chunk);
1196 chunk->c_Next = z->z_LChunks;
1197 z->z_LChunks = chunk;
1198 if (chunk->c_Next == NULL)
1199 z->z_LChunksp = &chunk->c_Next;
1202 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart)
1207 * Bump the number of free chunks. If it becomes non-zero the zone
1208 * must be added back onto the appropriate list.
1210 if (z->z_NFree++ == 0) {
1211 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
1212 slgd->ZoneAry[z->z_ZoneIndex] = z;
1215 --type->ks_inuse[z->z_Cpu];
1216 type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize;
1219 * If the zone becomes totally free, and there are other zones we
1220 * can allocate from, move this zone to the FreeZones list. Since
1221 * this code can be called from an IPI callback, do *NOT* try to mess
1222 * with kernel_map here. Hysteresis will be performed at malloc() time.
1224 if (z->z_NFree == z->z_NMax &&
1225 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z)
1228 struct kmemusage *kup;
1230 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; z != *pz; pz = &(*pz)->z_Next)
1234 z->z_Next = slgd->FreeZones;
1235 slgd->FreeZones = z;
1238 kup->ku_pagecnt = 0;
1240 logmemory_quick(free_end);
1244 #if defined(INVARIANTS)
1247 * Helper routines for sanity checks
1251 chunk_mark_allocated(SLZone *z, void *chunk)
1253 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1256 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0);
1257 KASSERT(bitdex >= 0 && bitdex < z->z_NMax,
1258 ("memory chunk %p bit index %d is illegal", chunk, bitdex));
1259 bitptr = &z->z_Bitmap[bitdex >> 5];
1261 KASSERT((*bitptr & (1 << bitdex)) == 0,
1262 ("memory chunk %p is already allocated!", chunk));
1263 *bitptr |= 1 << bitdex;
1268 chunk_mark_free(SLZone *z, void *chunk)
1270 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1273 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0);
1274 KASSERT(bitdex >= 0 && bitdex < z->z_NMax,
1275 ("memory chunk %p bit index %d is illegal!", chunk, bitdex));
1276 bitptr = &z->z_Bitmap[bitdex >> 5];
1278 KASSERT((*bitptr & (1 << bitdex)) != 0,
1279 ("memory chunk %p is already free!", chunk));
1280 *bitptr &= ~(1 << bitdex);
1288 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the
1289 * specified alignment. M_* flags are expected in the flags field.
1291 * Alignment must be a multiple of PAGE_SIZE.
1293 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(),
1294 * but when we move zalloc() over to use this function as its backend
1295 * we will have to switch to kreserve/krelease and call reserve(0)
1296 * after the new space is made available.
1298 * Interrupt code which has preempted other code is not allowed to
1299 * use PQ_CACHE pages. However, if an interrupt thread is run
1300 * non-preemptively or blocks and then runs non-preemptively, then
1301 * it is free to use PQ_CACHE pages.
1304 kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags)
1308 int count, vmflags, base_vmflags;
1311 size = round_page(size);
1312 addr = vm_map_min(&kernel_map);
1315 * Reserve properly aligned space from kernel_map. RNOWAIT allocations
1318 if (flags & M_RNOWAIT) {
1319 if (lwkt_trytoken(&vm_token) == 0)
1322 lwkt_gettoken(&vm_token);
1324 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1326 vm_map_lock(&kernel_map);
1327 if (vm_map_findspace(&kernel_map, addr, size, align, 0, &addr)) {
1328 vm_map_unlock(&kernel_map);
1329 if ((flags & M_NULLOK) == 0)
1330 panic("kmem_slab_alloc(): kernel_map ran out of space!");
1331 vm_map_entry_release(count);
1333 lwkt_reltoken(&vm_token);
1338 * kernel_object maps 1:1 to kernel_map.
1340 vm_object_reference(&kernel_object);
1341 vm_map_insert(&kernel_map, &count,
1342 &kernel_object, addr, addr, addr + size,
1344 VM_PROT_ALL, VM_PROT_ALL,
1351 base_vmflags |= VM_ALLOC_ZERO;
1352 if (flags & M_USE_RESERVE)
1353 base_vmflags |= VM_ALLOC_SYSTEM;
1354 if (flags & M_USE_INTERRUPT_RESERVE)
1355 base_vmflags |= VM_ALLOC_INTERRUPT;
1356 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) {
1357 panic("kmem_slab_alloc: bad flags %08x (%p)",
1358 flags, ((int **)&size)[-1]);
1363 * Allocate the pages. Do not mess with the PG_ZERO flag yet.
1365 for (i = 0; i < size; i += PAGE_SIZE) {
1369 * VM_ALLOC_NORMAL can only be set if we are not preempting.
1371 * VM_ALLOC_SYSTEM is automatically set if we are preempting and
1372 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is
1373 * implied in this case), though I'm not sure if we really need to
1376 vmflags = base_vmflags;
1377 if (flags & M_WAITOK) {
1378 if (td->td_preempted)
1379 vmflags |= VM_ALLOC_SYSTEM;
1381 vmflags |= VM_ALLOC_NORMAL;
1384 m = vm_page_alloc(&kernel_object, OFF_TO_IDX(addr + i), vmflags);
1387 * If the allocation failed we either return NULL or we retry.
1389 * If M_WAITOK is specified we wait for more memory and retry.
1390 * If M_WAITOK is specified from a preemption we yield instead of
1391 * wait. Livelock will not occur because the interrupt thread
1392 * will not be preempting anyone the second time around after the
1396 if (flags & M_WAITOK) {
1397 if (td->td_preempted) {
1398 vm_map_unlock(&kernel_map);
1400 vm_map_lock(&kernel_map);
1402 vm_map_unlock(&kernel_map);
1404 vm_map_lock(&kernel_map);
1406 i -= PAGE_SIZE; /* retry */
1411 * We were unable to recover, cleanup and return NULL
1413 * (vm_token already held)
1417 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i));
1418 /* page should already be busy */
1421 vm_map_delete(&kernel_map, addr, addr + size, &count);
1422 vm_map_unlock(&kernel_map);
1423 vm_map_entry_release(count);
1425 lwkt_reltoken(&vm_token);
1433 * Mark the map entry as non-pageable using a routine that allows us to
1434 * populate the underlying pages.
1436 * The pages were busied by the allocations above.
1438 vm_map_set_wired_quick(&kernel_map, addr, size, &count);
1442 * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO.
1444 lwkt_gettoken(&vm_token);
1445 for (i = 0; i < size; i += PAGE_SIZE) {
1448 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i));
1449 m->valid = VM_PAGE_BITS_ALL;
1450 /* page should already be busy */
1453 pmap_enter(&kernel_pmap, addr + i, m, VM_PROT_ALL, 1);
1454 if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO))
1455 bzero((char *)addr + i, PAGE_SIZE);
1456 vm_page_flag_clear(m, PG_ZERO);
1457 KKASSERT(m->flags & (PG_WRITEABLE | PG_MAPPED));
1458 vm_page_flag_set(m, PG_REFERENCED);
1460 lwkt_reltoken(&vm_token);
1461 vm_map_unlock(&kernel_map);
1462 vm_map_entry_release(count);
1463 lwkt_reltoken(&vm_token);
1464 return((void *)addr);
1471 kmem_slab_free(void *ptr, vm_size_t size)
1474 lwkt_gettoken(&vm_token);
1475 vm_map_remove(&kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size);
1476 lwkt_reltoken(&vm_token);