2 * NMALLOC.C - New Malloc (ported from kernel slab allocator)
4 * Copyright (c) 2003,2004,2009,2010 The DragonFly Project. All rights reserved.
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com> and by
8 * Venkatesh Srinivas <me@endeavour.zapto.org>.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * $Id: nmalloc.c,v 1.37 2010/07/23 08:20:35 vsrinivas Exp $
40 * This module implements a slab allocator drop-in replacement for the
43 * A slab allocator reserves a ZONE for each chunk size, then lays the
44 * chunks out in an array within the zone. Allocation and deallocation
45 * is nearly instantaneous, and overhead losses are limited to a fixed
48 * The slab allocator does not have to pre-initialize the list of
49 * free chunks for each zone, and the underlying VM will not be
50 * touched at all beyond the zone header until an actual allocation
53 * Slab management and locking is done on a per-zone basis.
55 * Alloc Size Chunking Number of zones
66 * Allocations >= ZoneLimit (16K) go directly to mmap and a hash table
67 * is used to locate for free. One and Two-page allocations use the
68 * zone mechanic to avoid excessive mmap()/munmap() calls.
70 * API FEATURES AND SIDE EFFECTS
72 * + power-of-2 sized allocations up to a page will be power-of-2 aligned.
73 * Above that power-of-2 sized allocations are page-aligned. Non
74 * power-of-2 sized allocations are aligned the same as the chunk
75 * size for their zone.
76 * + malloc(0) returns a special non-NULL value
77 * + ability to allocate arbitrarily large chunks of memory
78 * + realloc will reuse the passed pointer if possible, within the
79 * limitations of the zone chunking.
81 * Multithreaded enhancements for small allocations introduced August 2010.
82 * These are in the spirit of 'libumem'. See:
83 * Bonwick, J.; Adams, J. (2001). "Magazines and Vmem: Extending the
84 * slab allocator to many CPUs and arbitrary resources". In Proc. 2001
85 * USENIX Technical Conference. USENIX Association.
89 * The value of the environment variable MALLOC_OPTIONS is a character string
90 * containing various flags to tune nmalloc.
92 * 'U' / ['u'] Generate / do not generate utrace entries for ktrace(1)
93 * This will generate utrace events for all malloc,
94 * realloc, and free calls. There are tools (mtrplay) to
95 * replay and allocation pattern or to graph heap structure
96 * (mtrgraph) which can interpret these logs.
97 * 'Z' / ['z'] Zero out / do not zero all allocations.
98 * Each new byte of memory allocated by malloc, realloc, or
99 * reallocf will be initialized to 0. This is intended for
100 * debugging and will affect performance negatively.
101 * 'H' / ['h'] Pass a hint to the kernel about pages unused by the
102 * allocation functions.
105 /* cc -shared -fPIC -g -O -I/usr/src/lib/libc/include -o nmalloc.so nmalloc.c */
107 #include "libc_private.h"
109 #include <sys/param.h>
110 #include <sys/types.h>
111 #include <sys/mman.h>
112 #include <sys/queue.h>
114 #include <sys/ktrace.h>
126 #include "spinlock.h"
127 #include "un-namespace.h"
130 * Linked list of large allocations
132 typedef struct bigalloc {
133 struct bigalloc *next; /* hash link */
134 void *base; /* base pointer */
135 u_long bytes; /* bytes allocated */
139 * Note that any allocations which are exact multiples of PAGE_SIZE, or
140 * which are >= ZALLOC_ZONE_LIMIT, will fall through to the kmem subsystem.
142 #define ZALLOC_ZONE_LIMIT (16 * 1024) /* max slab-managed alloc */
143 #define ZALLOC_MIN_ZONE_SIZE (32 * 1024) /* minimum zone size */
144 #define ZALLOC_MAX_ZONE_SIZE (128 * 1024) /* maximum zone size */
145 #define ZALLOC_ZONE_SIZE (64 * 1024)
146 #define ZALLOC_SLAB_MAGIC 0x736c6162 /* magic sanity */
147 #define ZALLOC_SLAB_SLIDE 20 /* L1-cache skip */
149 #if ZALLOC_ZONE_LIMIT == 16384
151 #elif ZALLOC_ZONE_LIMIT == 32768
154 #error "I couldn't figure out NZONES"
158 * Chunk structure for free elements
160 typedef struct slchunk {
161 struct slchunk *c_Next;
165 * The IN-BAND zone header is placed at the beginning of each zone.
169 typedef struct slzone {
170 int32_t z_Magic; /* magic number for sanity check */
171 int z_NFree; /* total free chunks / ualloc space */
172 struct slzone *z_Next; /* ZoneAry[] link if z_NFree non-zero */
173 int z_NMax; /* maximum free chunks */
174 char *z_BasePtr; /* pointer to start of chunk array */
175 int z_UIndex; /* current initial allocation index */
176 int z_UEndIndex; /* last (first) allocation index */
177 int z_ChunkSize; /* chunk size for validation */
178 int z_FirstFreePg; /* chunk list on a page-by-page basis */
181 struct slchunk *z_PageAry[ZALLOC_ZONE_SIZE / PAGE_SIZE];
182 #if defined(INVARIANTS)
183 __uint32_t z_Bitmap[]; /* bitmap of free chunks / sanity */
187 typedef struct slglobaldata {
189 slzone_t ZoneAry[NZONES];/* linked list of zones NFree > 0 */
193 #define SLZF_UNOTZEROD 0x0001
195 #define FASTSLABREALLOC 0x02
198 * Misc constants. Note that allocations that are exact multiples of
199 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
200 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
202 #define MIN_CHUNK_SIZE 8 /* in bytes */
203 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1)
204 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK)
207 * The WEIRD_ADDR is used as known text to copy into free objects to
208 * try to create deterministic failure cases if the data is accessed after
211 * WARNING: A limited number of spinlocks are available, BIGXSIZE should
212 * not be larger then 64.
214 #define WEIRD_ADDR 0xdeadc0de
215 #define MAX_COPY sizeof(weirdary)
216 #define ZERO_LENGTH_PTR ((void *)&malloc_dummy_pointer)
218 #define BIGHSHIFT 10 /* bigalloc hash table */
219 #define BIGHSIZE (1 << BIGHSHIFT)
220 #define BIGHMASK (BIGHSIZE - 1)
221 #define BIGXSIZE (BIGHSIZE / 16) /* bigalloc lock table */
222 #define BIGXMASK (BIGXSIZE - 1)
224 #define SAFLAG_ZERO 0x0001
225 #define SAFLAG_PASSIVE 0x0002
231 #define arysize(ary) (sizeof(ary)/sizeof((ary)[0]))
233 #define MASSERT(exp) do { if (__predict_false(!(exp))) \
234 _mpanic("assertion: %s in %s", \
242 #define M_MAX_ROUNDS 64
243 #define M_ZONE_ROUNDS 64
244 #define M_LOW_ROUNDS 32
245 #define M_INIT_ROUNDS 8
246 #define M_BURST_FACTOR 8
247 #define M_BURST_NSCALE 2
249 #define M_BURST 0x0001
250 #define M_BURST_EARLY 0x0002
253 SLIST_ENTRY(magazine) nextmagazine;
256 int capacity; /* Max rounds in this magazine */
257 int rounds; /* Current number of free rounds */
258 int burst_factor; /* Number of blocks to prefill with */
259 int low_factor; /* Free till low_factor from full mag */
260 void *objects[M_MAX_ROUNDS];
263 SLIST_HEAD(magazinelist, magazine);
265 static spinlock_t zone_mag_lock;
266 static struct magazine zone_magazine = {
267 .flags = M_BURST | M_BURST_EARLY,
268 .capacity = M_ZONE_ROUNDS,
270 .burst_factor = M_BURST_FACTOR,
271 .low_factor = M_LOW_ROUNDS
274 #define MAGAZINE_FULL(mp) (mp->rounds == mp->capacity)
275 #define MAGAZINE_NOTFULL(mp) (mp->rounds < mp->capacity)
276 #define MAGAZINE_EMPTY(mp) (mp->rounds == 0)
277 #define MAGAZINE_NOTEMPTY(mp) (mp->rounds != 0)
279 /* Each thread will have a pair of magazines per size-class (NZONES)
280 * The loaded magazine will support immediate allocations, the previous
281 * magazine will either be full or empty and can be swapped at need */
282 typedef struct magazine_pair {
283 struct magazine *loaded;
284 struct magazine *prev;
287 /* A depot is a collection of magazines for a single zone. */
288 typedef struct magazine_depot {
289 struct magazinelist full;
290 struct magazinelist empty;
294 typedef struct thr_mags {
295 magazine_pair mags[NZONES];
296 struct magazine *newmag;
301 * With this attribute set, do not require a function call for accessing
302 * this variable when the code is compiled -fPIC. Empty for libc_rtld
306 #define TLS_ATTRIBUTE
308 #define TLS_ATTRIBUTE __attribute__ ((tls_model ("initial-exec")))
311 static int mtmagazine_free_live;
312 static __thread thr_mags thread_mags TLS_ATTRIBUTE;
313 static pthread_key_t thread_mags_key;
314 static pthread_once_t thread_mags_once = PTHREAD_ONCE_INIT;
315 static magazine_depot depots[NZONES];
318 * Fixed globals (not per-cpu)
320 static const int ZoneSize = ZALLOC_ZONE_SIZE;
321 static const int ZoneLimit = ZALLOC_ZONE_LIMIT;
322 static const int ZonePageCount = ZALLOC_ZONE_SIZE / PAGE_SIZE;
323 static const int ZoneMask = ZALLOC_ZONE_SIZE - 1;
325 static int opt_madvise = 0;
326 static int opt_utrace = 0;
327 static int g_malloc_flags = 0;
328 static struct slglobaldata SLGlobalData;
329 static bigalloc_t bigalloc_array[BIGHSIZE];
330 static spinlock_t bigspin_array[BIGXSIZE];
331 static int malloc_panic;
332 static int malloc_dummy_pointer;
334 static const int32_t weirdary[16] = {
335 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
336 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
337 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
338 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR
341 static void *_slaballoc(size_t size, int flags);
342 static void *_slabrealloc(void *ptr, size_t size);
343 static void _slabfree(void *ptr, int, bigalloc_t *);
344 static void *_vmem_alloc(size_t bytes, size_t align, int flags);
345 static void _vmem_free(void *ptr, size_t bytes);
346 static void *magazine_alloc(struct magazine *, int *);
347 static int magazine_free(struct magazine *, void *);
348 static void *mtmagazine_alloc(int zi);
349 static int mtmagazine_free(int zi, void *);
350 static void mtmagazine_init(void);
351 static void mtmagazine_destructor(void *);
352 static slzone_t zone_alloc(int flags);
353 static void zone_free(void *z);
354 static void _mpanic(const char *ctl, ...) __printflike(1, 2);
355 static void malloc_init(void) __constructor(101);
356 #if defined(INVARIANTS)
357 static void chunk_mark_allocated(slzone_t z, void *chunk);
358 static void chunk_mark_free(slzone_t z, void *chunk);
361 struct nmalloc_utrace {
367 #define UTRACE(a, b, c) \
369 struct nmalloc_utrace ut = { \
374 utrace(&ut, sizeof(ut)); \
379 * If enabled any memory allocated without M_ZERO is initialized to -1.
381 static int use_malloc_pattern;
387 const char *p = NULL;
389 if (issetugid() == 0)
390 p = getenv("MALLOC_OPTIONS");
392 for (; p != NULL && *p != '\0'; p++) {
394 case 'u': opt_utrace = 0; break;
395 case 'U': opt_utrace = 1; break;
396 case 'h': opt_madvise = 0; break;
397 case 'H': opt_madvise = 1; break;
398 case 'z': g_malloc_flags = 0; break;
399 case 'Z': g_malloc_flags = SAFLAG_ZERO; break;
405 UTRACE((void *) -1, 0, NULL);
409 * We have to install a handler for nmalloc thread teardowns when
410 * the thread is created. We cannot delay this because destructors in
411 * sophisticated userland programs can call malloc() for the first time
412 * during their thread exit.
414 * This routine is called directly from pthreads.
417 _nmalloc_thr_init(void)
422 * Disallow mtmagazine operations until the mtmagazine is
428 if (mtmagazine_free_live == 0) {
429 mtmagazine_free_live = 1;
430 pthread_once(&thread_mags_once, mtmagazine_init);
432 pthread_setspecific(thread_mags_key, tp);
440 slgd_lock(slglobaldata_t slgd)
443 _SPINLOCK(&slgd->Spinlock);
447 slgd_unlock(slglobaldata_t slgd)
450 _SPINUNLOCK(&slgd->Spinlock);
454 depot_lock(magazine_depot *dp)
457 _SPINLOCK(&dp->lock);
461 depot_unlock(magazine_depot *dp)
464 _SPINUNLOCK(&dp->lock);
468 zone_magazine_lock(void)
471 _SPINLOCK(&zone_mag_lock);
475 zone_magazine_unlock(void)
478 _SPINUNLOCK(&zone_mag_lock);
482 swap_mags(magazine_pair *mp)
484 struct magazine *tmp;
486 mp->loaded = mp->prev;
491 * bigalloc hashing and locking support.
493 * Return an unmasked hash code for the passed pointer.
496 _bigalloc_hash(void *ptr)
500 hv = ((int)(intptr_t)ptr >> PAGE_SHIFT) ^
501 ((int)(intptr_t)ptr >> (PAGE_SHIFT + BIGHSHIFT));
507 * Lock the hash chain and return a pointer to its base for the specified
510 static __inline bigalloc_t *
511 bigalloc_lock(void *ptr)
513 int hv = _bigalloc_hash(ptr);
516 bigp = &bigalloc_array[hv & BIGHMASK];
518 _SPINLOCK(&bigspin_array[hv & BIGXMASK]);
523 * Lock the hash chain and return a pointer to its base for the specified
526 * BUT, if the hash chain is empty, just return NULL and do not bother
529 static __inline bigalloc_t *
530 bigalloc_check_and_lock(void *ptr)
532 int hv = _bigalloc_hash(ptr);
535 bigp = &bigalloc_array[hv & BIGHMASK];
539 _SPINLOCK(&bigspin_array[hv & BIGXMASK]);
545 bigalloc_unlock(void *ptr)
550 hv = _bigalloc_hash(ptr);
551 _SPINUNLOCK(&bigspin_array[hv & BIGXMASK]);
556 * Calculate the zone index for the allocation request size and set the
557 * allocation request size to that particular zone's chunk size.
560 zoneindex(size_t *bytes, size_t *chunking)
562 size_t n = (unsigned int)*bytes; /* unsigned for shift opt */
564 *bytes = n = (n + 7) & ~7;
566 return(n / 8 - 1); /* 8 byte chunks, 16 zones */
569 *bytes = n = (n + 15) & ~15;
575 *bytes = n = (n + 31) & ~31;
580 *bytes = n = (n + 63) & ~63;
585 *bytes = n = (n + 127) & ~127;
587 return(n / 128 + 31);
590 *bytes = n = (n + 255) & ~255;
592 return(n / 256 + 39);
594 *bytes = n = (n + 511) & ~511;
596 return(n / 512 + 47);
598 #if ZALLOC_ZONE_LIMIT > 8192
600 *bytes = n = (n + 1023) & ~1023;
602 return(n / 1024 + 55);
605 #if ZALLOC_ZONE_LIMIT > 16384
607 *bytes = n = (n + 2047) & ~2047;
609 return(n / 2048 + 63);
612 _mpanic("Unexpected byte count %zu", n);
617 * malloc() - call internal slab allocator
624 ptr = _slaballoc(size, 0);
628 UTRACE(0, size, ptr);
632 #define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4))
635 * calloc() - call internal slab allocator
638 calloc(size_t number, size_t size)
642 if ((number >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
643 number > 0 && SIZE_MAX / number < size) {
648 ptr = _slaballoc(number * size, SAFLAG_ZERO);
652 UTRACE(0, number * size, ptr);
657 * realloc() (SLAB ALLOCATOR)
659 * We do not attempt to optimize this routine beyond reusing the same
660 * pointer if the new size fits within the chunking of the old pointer's
664 realloc(void *ptr, size_t size)
667 ret = _slabrealloc(ptr, size);
671 UTRACE(ptr, size, ret);
678 * Allocate (size) bytes with a alignment of (alignment), where (alignment)
679 * is a power of 2 >= sizeof(void *).
681 * The slab allocator will allocate on power-of-2 boundaries up to
682 * at least PAGE_SIZE. We use the zoneindex mechanic to find a
683 * zone matching the requirements, and _vmem_alloc() otherwise.
686 posix_memalign(void **memptr, size_t alignment, size_t size)
694 * OpenGroup spec issue 6 checks
696 if ((alignment | (alignment - 1)) + 1 != (alignment << 1)) {
700 if (alignment < sizeof(void *)) {
706 * Our zone mechanism guarantees same-sized alignment for any
707 * power-of-2 allocation. If size is a power-of-2 and reasonable
708 * we can just call _slaballoc() and be done. We round size up
709 * to the nearest alignment boundary to improve our odds of
710 * it becoming a power-of-2 if it wasn't before.
712 if (size <= alignment)
715 size = (size + alignment - 1) & ~(size_t)(alignment - 1);
716 if (size < PAGE_SIZE && (size | (size - 1)) + 1 == (size << 1)) {
717 *memptr = _slaballoc(size, 0);
718 return(*memptr ? 0 : ENOMEM);
722 * Otherwise locate a zone with a chunking that matches
723 * the requested alignment, within reason. Consider two cases:
725 * (1) A 1K allocation on a 32-byte alignment. The first zoneindex
726 * we find will be the best fit because the chunking will be
727 * greater or equal to the alignment.
729 * (2) A 513 allocation on a 256-byte alignment. In this case
730 * the first zoneindex we find will be for 576 byte allocations
731 * with a chunking of 64, which is not sufficient. To fix this
732 * we simply find the nearest power-of-2 >= size and use the
733 * same side-effect of _slaballoc() which guarantees
734 * same-alignment on a power-of-2 allocation.
736 if (size < PAGE_SIZE) {
737 zi = zoneindex(&size, &chunking);
738 if (chunking >= alignment) {
739 *memptr = _slaballoc(size, 0);
740 return(*memptr ? 0 : ENOMEM);
746 while (alignment < size)
748 *memptr = _slaballoc(alignment, 0);
749 return(*memptr ? 0 : ENOMEM);
753 * If the slab allocator cannot handle it use vmem_alloc().
755 * Alignment must be adjusted up to at least PAGE_SIZE in this case.
757 if (alignment < PAGE_SIZE)
758 alignment = PAGE_SIZE;
759 if (size < alignment)
761 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
762 *memptr = _vmem_alloc(size, alignment, 0);
766 big = _slaballoc(sizeof(struct bigalloc), 0);
768 _vmem_free(*memptr, size);
772 bigp = bigalloc_lock(*memptr);
777 bigalloc_unlock(*memptr);
783 * free() (SLAB ALLOCATOR) - do the obvious
789 _slabfree(ptr, 0, NULL);
793 * _slaballoc() (SLAB ALLOCATOR)
795 * Allocate memory via the slab allocator. If the request is too large,
796 * or if it page-aligned beyond a certain size, we fall back to the
800 _slaballoc(size_t size, int flags)
814 * Handle the degenerate size == 0 case. Yes, this does happen.
815 * Return a special pointer. This is to maintain compatibility with
816 * the original malloc implementation. Certain devices, such as the
817 * adaptec driver, not only allocate 0 bytes, they check for NULL and
818 * also realloc() later on. Joy.
821 return(ZERO_LENGTH_PTR);
823 /* Capture global flags */
824 flags |= g_malloc_flags;
827 * Handle large allocations directly. There should not be very many
828 * of these so performance is not a big issue.
830 * The backend allocator is pretty nasty on a SMP system. Use the
831 * slab allocator for one and two page-sized chunks even though we
832 * lose some efficiency.
834 if (size >= ZoneLimit ||
835 ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) {
840 * Page-align and cache-color in case of virtually indexed
841 * physically tagged L1 caches (aka SandyBridge). No sweat
842 * otherwise, so just do it.
844 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
845 if ((size & 8191) == 0)
848 chunk = _vmem_alloc(size, PAGE_SIZE, flags);
852 big = _slaballoc(sizeof(struct bigalloc), 0);
854 _vmem_free(chunk, size);
857 bigp = bigalloc_lock(chunk);
862 bigalloc_unlock(chunk);
867 /* Compute allocation zone; zoneindex will panic on excessive sizes */
868 zi = zoneindex(&size, &chunking);
869 MASSERT(zi < NZONES);
871 obj = mtmagazine_alloc(zi);
873 if (flags & SAFLAG_ZERO)
878 slgd = &SLGlobalData;
882 * Attempt to allocate out of an existing zone. If all zones are
883 * exhausted pull one off the free list or allocate a new one.
885 if ((z = slgd->ZoneAry[zi]) == NULL) {
886 z = zone_alloc(flags);
891 * How big is the base structure?
893 #if defined(INVARIANTS)
895 * Make room for z_Bitmap. An exact calculation is
896 * somewhat more complicated so don't make an exact
899 off = offsetof(struct slzone,
900 z_Bitmap[(ZoneSize / size + 31) / 32]);
901 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8);
903 off = sizeof(struct slzone);
907 * Align the storage in the zone based on the chunking.
909 * Guarantee power-of-2 alignment for power-of-2-sized
910 * chunks. Otherwise align based on the chunking size
911 * (typically 8 or 16 bytes for small allocations).
913 * NOTE: Allocations >= ZoneLimit are governed by the
914 * bigalloc code and typically only guarantee page-alignment.
916 * Set initial conditions for UIndex near the zone header
917 * to reduce unecessary page faults, vs semi-randomization
918 * to improve L1 cache saturation.
920 if ((size | (size - 1)) + 1 == (size << 1))
921 off = roundup2(off, size);
923 off = roundup2(off, chunking);
924 z->z_Magic = ZALLOC_SLAB_MAGIC;
926 z->z_NMax = (ZoneSize - off) / size;
927 z->z_NFree = z->z_NMax;
928 z->z_BasePtr = (char *)z + off;
929 z->z_UIndex = z->z_UEndIndex = 0;
930 z->z_ChunkSize = size;
931 z->z_FirstFreePg = ZonePageCount;
932 z->z_Next = slgd->ZoneAry[zi];
933 slgd->ZoneAry[zi] = z;
934 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
935 flags &= ~SAFLAG_ZERO; /* already zero'd */
936 flags |= SAFLAG_PASSIVE;
940 * Slide the base index for initial allocations out of the
941 * next zone we create so we do not over-weight the lower
942 * part of the cpu memory caches.
944 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
945 & (ZALLOC_MAX_ZONE_SIZE - 1);
949 * Ok, we have a zone from which at least one chunk is available.
951 * Remove us from the ZoneAry[] when we become empty
953 MASSERT(z->z_NFree > 0);
955 if (--z->z_NFree == 0) {
956 slgd->ZoneAry[zi] = z->z_Next;
961 * Locate a chunk in a free page. This attempts to localize
962 * reallocations into earlier pages without us having to sort
963 * the chunk list. A chunk may still overlap a page boundary.
965 while (z->z_FirstFreePg < ZonePageCount) {
966 if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) {
969 * Diagnostic: c_Next is not total garbage.
971 MASSERT(chunk->c_Next == NULL ||
972 ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) ==
973 ((intptr_t)chunk & IN_SAME_PAGE_MASK));
976 chunk_mark_allocated(z, chunk);
978 MASSERT((uintptr_t)chunk & ZoneMask);
979 z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next;
986 * No chunks are available but NFree said we had some memory,
987 * so it must be available in the never-before-used-memory
988 * area governed by UIndex. The consequences are very
989 * serious if our zone got corrupted so we use an explicit
990 * panic rather then a KASSERT.
992 chunk = (slchunk_t)(z->z_BasePtr + z->z_UIndex * size);
994 if (++z->z_UIndex == z->z_NMax)
996 if (z->z_UIndex == z->z_UEndIndex) {
998 _mpanic("slaballoc: corrupted zone");
1001 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
1002 flags &= ~SAFLAG_ZERO;
1003 flags |= SAFLAG_PASSIVE;
1005 #if defined(INVARIANTS)
1006 chunk_mark_allocated(z, chunk);
1011 if (flags & SAFLAG_ZERO) {
1014 } else if ((flags & (SAFLAG_ZERO|SAFLAG_PASSIVE)) == 0) {
1015 if (use_malloc_pattern) {
1016 for (i = 0; i < size; i += sizeof(int)) {
1017 *(int *)((char *)chunk + i) = -1;
1020 /* avoid accidental double-free check */
1021 chunk->c_Next = (void *)-1;
1031 * Reallocate memory within the chunk
1034 _slabrealloc(void *ptr, size_t size)
1041 if (ptr == NULL || ptr == ZERO_LENGTH_PTR) {
1042 return(_slaballoc(size, 0));
1047 return(ZERO_LENGTH_PTR);
1051 * Handle oversized allocations.
1053 if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) {
1057 while ((big = *bigp) != NULL) {
1058 if (big->base == ptr) {
1059 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
1060 bigbytes = big->bytes;
1061 if (bigbytes == size) {
1062 bigalloc_unlock(ptr);
1066 bigalloc_unlock(ptr);
1067 if ((nptr = _slaballoc(size, 0)) == NULL) {
1069 bigp = bigalloc_lock(ptr);
1072 bigalloc_unlock(ptr);
1075 if (size > bigbytes)
1077 bcopy(ptr, nptr, size);
1078 _slabfree(ptr, FASTSLABREALLOC, &big);
1083 bigalloc_unlock(ptr);
1087 * Get the original allocation's zone. If the new request winds
1088 * up using the same chunk size we do not have to do anything.
1090 * NOTE: We don't have to lock the globaldata here, the fields we
1091 * access here will not change at least as long as we have control
1092 * over the allocation.
1094 z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
1095 MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1098 * Use zoneindex() to chunk-align the new size, as long as the
1099 * new size is not too large.
1101 if (size < ZoneLimit) {
1102 zoneindex(&size, &chunking);
1103 if (z->z_ChunkSize == size) {
1109 * Allocate memory for the new request size and copy as appropriate.
1111 if ((nptr = _slaballoc(size, 0)) != NULL) {
1112 if (size > z->z_ChunkSize)
1113 size = z->z_ChunkSize;
1114 bcopy(ptr, nptr, size);
1115 _slabfree(ptr, 0, NULL);
1122 * free (SLAB ALLOCATOR)
1124 * Free a memory block previously allocated by malloc. Note that we do not
1125 * attempt to uplodate ks_loosememuse as MP races could prevent us from
1126 * checking memory limits in malloc.
1129 * FASTSLABREALLOC Fast call from realloc, *rbigp already
1135 _slabfree(void *ptr, int flags, bigalloc_t *rbigp)
1141 slglobaldata_t slgd;
1146 /* Fast realloc path for big allocations */
1147 if (flags & FASTSLABREALLOC) {
1149 goto fastslabrealloc;
1153 * Handle NULL frees and special 0-byte allocations
1157 if (ptr == ZERO_LENGTH_PTR)
1161 * Handle oversized allocations.
1163 if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) {
1164 while ((big = *bigp) != NULL) {
1165 if (big->base == ptr) {
1167 bigalloc_unlock(ptr);
1170 _slabfree(big, 0, NULL);
1172 MASSERT(sizeof(weirdary) <= size);
1173 bcopy(weirdary, ptr, sizeof(weirdary));
1175 _vmem_free(ptr, size);
1180 bigalloc_unlock(ptr);
1184 * Zone case. Figure out the zone based on the fact that it is
1187 z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
1188 MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1190 size = z->z_ChunkSize;
1191 zi = z->z_ZoneIndex;
1193 if (g_malloc_flags & SAFLAG_ZERO)
1196 if (mtmagazine_free(zi, ptr) == 0)
1199 pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT;
1201 slgd = &SLGlobalData;
1206 * Attempt to detect a double-free. To reduce overhead we only check
1207 * if there appears to be link pointer at the base of the data.
1209 if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) {
1212 for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) {
1214 _mpanic("Double free at %p", chunk);
1217 chunk_mark_free(z, chunk);
1221 * Put weird data into the memory to detect modifications after
1222 * freeing, illegal pointer use after freeing (we should fault on
1223 * the odd address), and so forth.
1226 if (z->z_ChunkSize < sizeof(weirdary))
1227 bcopy(weirdary, chunk, z->z_ChunkSize);
1229 bcopy(weirdary, chunk, sizeof(weirdary));
1233 * Add this free non-zero'd chunk to a linked list for reuse, adjust
1236 chunk->c_Next = z->z_PageAry[pgno];
1237 z->z_PageAry[pgno] = chunk;
1238 if (z->z_FirstFreePg > pgno)
1239 z->z_FirstFreePg = pgno;
1242 * Bump the number of free chunks. If it becomes non-zero the zone
1243 * must be added back onto the appropriate list.
1245 if (z->z_NFree++ == 0) {
1246 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
1247 slgd->ZoneAry[z->z_ZoneIndex] = z;
1251 * If the zone becomes totally free then release it.
1253 if (z->z_NFree == z->z_NMax) {
1256 pz = &slgd->ZoneAry[z->z_ZoneIndex];
1258 pz = &(*pz)->z_Next;
1263 /* slgd lock released */
1269 #if defined(INVARIANTS)
1271 * Helper routines for sanity checks
1275 chunk_mark_allocated(slzone_t z, void *chunk)
1277 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1280 MASSERT(bitdex >= 0 && bitdex < z->z_NMax);
1281 bitptr = &z->z_Bitmap[bitdex >> 5];
1283 MASSERT((*bitptr & (1 << bitdex)) == 0);
1284 *bitptr |= 1 << bitdex;
1289 chunk_mark_free(slzone_t z, void *chunk)
1291 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1294 MASSERT(bitdex >= 0 && bitdex < z->z_NMax);
1295 bitptr = &z->z_Bitmap[bitdex >> 5];
1297 MASSERT((*bitptr & (1 << bitdex)) != 0);
1298 *bitptr &= ~(1 << bitdex);
1304 * Allocate and return a magazine. NULL is returned and *burst is adjusted
1305 * if the magazine is empty.
1307 static __inline void *
1308 magazine_alloc(struct magazine *mp, int *burst)
1314 if (MAGAZINE_NOTEMPTY(mp)) {
1315 obj = mp->objects[--mp->rounds];
1320 * Return burst factor to caller along with NULL
1322 if ((mp->flags & M_BURST) && (burst != NULL)) {
1323 *burst = mp->burst_factor;
1325 /* Reduce burst factor by NSCALE; if it hits 1, disable BURST */
1326 if ((mp->flags & M_BURST) && (mp->flags & M_BURST_EARLY) &&
1328 mp->burst_factor -= M_BURST_NSCALE;
1329 if (mp->burst_factor <= 1) {
1330 mp->burst_factor = 1;
1331 mp->flags &= ~(M_BURST);
1332 mp->flags &= ~(M_BURST_EARLY);
1339 magazine_free(struct magazine *mp, void *p)
1341 if (mp != NULL && MAGAZINE_NOTFULL(mp)) {
1342 mp->objects[mp->rounds++] = p;
1350 mtmagazine_alloc(int zi)
1353 struct magazine *mp, *emptymag;
1358 * Do not try to access per-thread magazines while the mtmagazine
1359 * is being initialized or destroyed.
1366 * Primary per-thread allocation loop
1370 * If the loaded magazine has rounds, allocate and return
1372 mp = tp->mags[zi].loaded;
1373 obj = magazine_alloc(mp, NULL);
1378 * If the prev magazine is full, swap with the loaded
1379 * magazine and retry.
1381 mp = tp->mags[zi].prev;
1382 if (mp && MAGAZINE_FULL(mp)) {
1383 MASSERT(mp->rounds != 0);
1384 swap_mags(&tp->mags[zi]); /* prev now empty */
1389 * Try to get a full magazine from the depot. Cycle
1390 * through depot(full)->loaded->prev->depot(empty).
1391 * Retry if a full magazine was available from the depot.
1393 * Return NULL (caller will fall through) if no magazines
1394 * can be found anywhere.
1398 emptymag = tp->mags[zi].prev;
1400 SLIST_INSERT_HEAD(&d->empty, emptymag, nextmagazine);
1401 tp->mags[zi].prev = tp->mags[zi].loaded;
1402 mp = SLIST_FIRST(&d->full); /* loaded magazine */
1403 tp->mags[zi].loaded = mp;
1405 SLIST_REMOVE_HEAD(&d->full, nextmagazine);
1406 MASSERT(MAGAZINE_NOTEMPTY(mp));
1418 mtmagazine_free(int zi, void *ptr)
1421 struct magazine *mp, *loadedmag;
1426 * Do not try to access per-thread magazines while the mtmagazine
1427 * is being initialized or destroyed.
1434 * Primary per-thread freeing loop
1438 * Make sure a new magazine is available in case we have
1439 * to use it. Staging the newmag allows us to avoid
1440 * some locking/reentrancy complexity.
1442 * Temporarily disable the per-thread caches for this
1443 * allocation to avoid reentrancy and/or to avoid a
1444 * stack overflow if the [zi] happens to be the same that
1445 * would be used to allocate the new magazine.
1447 if (tp->newmag == NULL) {
1449 tp->newmag = _slaballoc(sizeof(struct magazine),
1452 if (tp->newmag == NULL) {
1459 * If the loaded magazine has space, free directly to it
1461 rc = magazine_free(tp->mags[zi].loaded, ptr);
1466 * If the prev magazine is empty, swap with the loaded
1467 * magazine and retry.
1469 mp = tp->mags[zi].prev;
1470 if (mp && MAGAZINE_EMPTY(mp)) {
1471 MASSERT(mp->rounds == 0);
1472 swap_mags(&tp->mags[zi]); /* prev now full */
1477 * Try to get an empty magazine from the depot. Cycle
1478 * through depot(empty)->loaded->prev->depot(full).
1479 * Retry if an empty magazine was available from the depot.
1484 if ((loadedmag = tp->mags[zi].prev) != NULL)
1485 SLIST_INSERT_HEAD(&d->full, loadedmag, nextmagazine);
1486 tp->mags[zi].prev = tp->mags[zi].loaded;
1487 mp = SLIST_FIRST(&d->empty);
1489 tp->mags[zi].loaded = mp;
1490 SLIST_REMOVE_HEAD(&d->empty, nextmagazine);
1491 MASSERT(MAGAZINE_NOTFULL(mp));
1495 mp->capacity = M_MAX_ROUNDS;
1498 tp->mags[zi].loaded = mp;
1507 mtmagazine_init(void)
1511 error = pthread_key_create(&thread_mags_key, mtmagazine_destructor);
1517 * This function is only used by the thread exit destructor
1520 mtmagazine_drain(struct magazine *mp)
1524 while (MAGAZINE_NOTEMPTY(mp)) {
1525 obj = magazine_alloc(mp, NULL);
1526 _slabfree(obj, 0, NULL);
1531 * mtmagazine_destructor()
1533 * When a thread exits, we reclaim all its resources; all its magazines are
1534 * drained and the structures are freed.
1536 * WARNING! The destructor can be called multiple times if the larger user
1537 * program has its own destructors which run after ours which
1538 * allocate or free memory.
1541 mtmagazine_destructor(void *thrp)
1543 thr_mags *tp = thrp;
1544 struct magazine *mp;
1548 * Prevent further use of mtmagazines while we are destructing
1549 * them, as well as for any destructors which are run after us
1550 * prior to the thread actually being destroyed.
1554 for (i = 0; i < NZONES; i++) {
1555 mp = tp->mags[i].loaded;
1556 tp->mags[i].loaded = NULL;
1558 if (MAGAZINE_NOTEMPTY(mp))
1559 mtmagazine_drain(mp);
1560 _slabfree(mp, 0, NULL);
1563 mp = tp->mags[i].prev;
1564 tp->mags[i].prev = NULL;
1566 if (MAGAZINE_NOTEMPTY(mp))
1567 mtmagazine_drain(mp);
1568 _slabfree(mp, 0, NULL);
1575 _slabfree(mp, 0, NULL);
1582 * Attempt to allocate a zone from the zone magazine; the zone magazine has
1583 * M_BURST_EARLY enabled, so honor the burst request from the magazine.
1586 zone_alloc(int flags)
1588 slglobaldata_t slgd = &SLGlobalData;
1593 zone_magazine_lock();
1596 z = magazine_alloc(&zone_magazine, &burst);
1597 if (z == NULL && burst == 1) {
1598 zone_magazine_unlock();
1599 z = _vmem_alloc(ZoneSize * burst, ZoneSize, flags);
1600 } else if (z == NULL) {
1601 z = _vmem_alloc(ZoneSize * burst, ZoneSize, flags);
1603 for (i = 1; i < burst; i++) {
1604 j = magazine_free(&zone_magazine,
1605 (char *) z + (ZoneSize * i));
1609 zone_magazine_unlock();
1611 z->z_Flags |= SLZF_UNOTZEROD;
1612 zone_magazine_unlock();
1621 * Release a zone and unlock the slgd lock.
1626 slglobaldata_t slgd = &SLGlobalData;
1627 void *excess[M_ZONE_ROUNDS - M_LOW_ROUNDS] = {};
1630 zone_magazine_lock();
1633 bzero(z, sizeof(struct slzone));
1636 madvise(z, ZoneSize, MADV_FREE);
1638 i = magazine_free(&zone_magazine, z);
1641 * If we failed to free, collect excess magazines; release the zone
1642 * magazine lock, and then free to the system via _vmem_free. Re-enable
1643 * BURST mode for the magazine.
1646 j = zone_magazine.rounds - zone_magazine.low_factor;
1647 for (i = 0; i < j; i++) {
1648 excess[i] = magazine_alloc(&zone_magazine, NULL);
1649 MASSERT(excess[i] != NULL);
1652 zone_magazine_unlock();
1654 for (i = 0; i < j; i++)
1655 _vmem_free(excess[i], ZoneSize);
1657 _vmem_free(z, ZoneSize);
1659 zone_magazine_unlock();
1666 * Directly map memory in PAGE_SIZE'd chunks with the specified
1669 * Alignment must be a multiple of PAGE_SIZE.
1671 * Size must be >= alignment.
1674 _vmem_alloc(size_t size, size_t align, int flags)
1681 * Map anonymous private memory.
1683 addr = mmap(NULL, size, PROT_READ|PROT_WRITE,
1684 MAP_PRIVATE|MAP_ANON, -1, 0);
1685 if (addr == MAP_FAILED)
1689 * Check alignment. The misaligned offset is also the excess
1690 * amount. If misaligned unmap the excess so we have a chance of
1691 * mapping at the next alignment point and recursively try again.
1693 * BBBBBBBBBBB BBBBBBBBBBB BBBBBBBBBBB block alignment
1694 * aaaaaaaaa aaaaaaaaaaa aa mis-aligned allocation
1695 * xxxxxxxxx final excess calculation
1696 * ^ returned address
1698 excess = (uintptr_t)addr & (align - 1);
1701 excess = align - excess;
1704 munmap(save + excess, size - excess);
1705 addr = _vmem_alloc(size, align, flags);
1706 munmap(save, excess);
1708 return((void *)addr);
1714 * Free a chunk of memory allocated with _vmem_alloc()
1717 _vmem_free(void *ptr, size_t size)
1723 * Panic on fatal conditions
1726 _mpanic(const char *ctl, ...)
1730 if (malloc_panic == 0) {
1733 vfprintf(stderr, ctl, va);
1734 fprintf(stderr, "\n");