2 * NMALLOC.C - New Malloc (ported from kernel slab allocator)
4 * Copyright (c) 2003,2004,2009,2010 The DragonFly Project. All rights reserved.
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com> and by
8 * Venkatesh Srinivas <me@endeavour.zapto.org>.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * $Id: nmalloc.c,v 1.37 2010/07/23 08:20:35 vsrinivas Exp $
40 * This module implements a slab allocator drop-in replacement for the
43 * A slab allocator reserves a ZONE for each chunk size, then lays the
44 * chunks out in an array within the zone. Allocation and deallocation
45 * is nearly instantaneous, and overhead losses are limited to a fixed
48 * The slab allocator does not have to pre-initialize the list of
49 * free chunks for each zone, and the underlying VM will not be
50 * touched at all beyond the zone header until an actual allocation
53 * Slab management and locking is done on a per-zone basis.
55 * Alloc Size Chunking Number of zones
66 * Allocations >= ZoneLimit (16K) go directly to mmap and a hash table
67 * is used to locate for free. One and Two-page allocations use the
68 * zone mechanic to avoid excessive mmap()/munmap() calls.
70 * API FEATURES AND SIDE EFFECTS
72 * + power-of-2 sized allocations up to a page will be power-of-2 aligned.
73 * Above that power-of-2 sized allocations are page-aligned. Non
74 * power-of-2 sized allocations are aligned the same as the chunk
75 * size for their zone.
76 * + malloc(0) returns a special non-NULL value
77 * + ability to allocate arbitrarily large chunks of memory
78 * + realloc will reuse the passed pointer if possible, within the
79 * limitations of the zone chunking.
81 * Multithreaded enhancements for small allocations introduced August 2010.
82 * These are in the spirit of 'libumem'. See:
83 * Bonwick, J.; Adams, J. (2001). "Magazines and Vmem: Extending the
84 * slab allocator to many CPUs and arbitrary resources". In Proc. 2001
85 * USENIX Technical Conference. USENIX Association.
89 * The value of the environment variable MALLOC_OPTIONS is a character string
90 * containing various flags to tune nmalloc.
92 * 'U' / ['u'] Generate / do not generate utrace entries for ktrace(1)
93 * This will generate utrace events for all malloc,
94 * realloc, and free calls. There are tools (mtrplay) to
95 * replay and allocation pattern or to graph heap structure
96 * (mtrgraph) which can interpret these logs.
97 * 'Z' / ['z'] Zero out / do not zero all allocations.
98 * Each new byte of memory allocated by malloc, realloc, or
99 * reallocf will be initialized to 0. This is intended for
100 * debugging and will affect performance negatively.
101 * 'H' / ['h'] Pass a hint to the kernel about pages unused by the
102 * allocation functions.
105 /* cc -shared -fPIC -g -O -I/usr/src/lib/libc/include -o nmalloc.so nmalloc.c */
107 #include "libc_private.h"
109 #include <sys/param.h>
110 #include <sys/types.h>
111 #include <sys/mman.h>
112 #include <sys/queue.h>
114 #include <sys/ktrace.h>
126 #include "spinlock.h"
127 #include "un-namespace.h"
129 static char rcsid[] = "$Id: nmalloc.c,v 1.37 2010/07/23 08:20:35 sv5679 Exp $";
132 * Linked list of large allocations
134 typedef struct bigalloc {
135 struct bigalloc *next; /* hash link */
136 void *base; /* base pointer */
137 u_long bytes; /* bytes allocated */
141 * Note that any allocations which are exact multiples of PAGE_SIZE, or
142 * which are >= ZALLOC_ZONE_LIMIT, will fall through to the kmem subsystem.
144 #define ZALLOC_ZONE_LIMIT (16 * 1024) /* max slab-managed alloc */
145 #define ZALLOC_MIN_ZONE_SIZE (32 * 1024) /* minimum zone size */
146 #define ZALLOC_MAX_ZONE_SIZE (128 * 1024) /* maximum zone size */
147 #define ZALLOC_ZONE_SIZE (64 * 1024)
148 #define ZALLOC_SLAB_MAGIC 0x736c6162 /* magic sanity */
149 #define ZALLOC_SLAB_SLIDE 20 /* L1-cache skip */
151 #if ZALLOC_ZONE_LIMIT == 16384
153 #elif ZALLOC_ZONE_LIMIT == 32768
156 #error "I couldn't figure out NZONES"
160 * Chunk structure for free elements
162 typedef struct slchunk {
163 struct slchunk *c_Next;
167 * The IN-BAND zone header is placed at the beginning of each zone.
171 typedef struct slzone {
172 int32_t z_Magic; /* magic number for sanity check */
173 int z_NFree; /* total free chunks / ualloc space */
174 struct slzone *z_Next; /* ZoneAry[] link if z_NFree non-zero */
175 int z_NMax; /* maximum free chunks */
176 char *z_BasePtr; /* pointer to start of chunk array */
177 int z_UIndex; /* current initial allocation index */
178 int z_UEndIndex; /* last (first) allocation index */
179 int z_ChunkSize; /* chunk size for validation */
180 int z_FirstFreePg; /* chunk list on a page-by-page basis */
183 struct slchunk *z_PageAry[ZALLOC_ZONE_SIZE / PAGE_SIZE];
184 #if defined(INVARIANTS)
185 __uint32_t z_Bitmap[]; /* bitmap of free chunks / sanity */
189 typedef struct slglobaldata {
191 slzone_t ZoneAry[NZONES];/* linked list of zones NFree > 0 */
195 #define SLZF_UNOTZEROD 0x0001
197 #define MAG_NORECURSE 0x01
198 #define FASTSLABREALLOC 0x02
201 * Misc constants. Note that allocations that are exact multiples of
202 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
203 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
205 #define MIN_CHUNK_SIZE 8 /* in bytes */
206 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1)
207 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK)
210 * The WEIRD_ADDR is used as known text to copy into free objects to
211 * try to create deterministic failure cases if the data is accessed after
214 * WARNING: A limited number of spinlocks are available, BIGXSIZE should
215 * not be larger then 64.
217 #define WEIRD_ADDR 0xdeadc0de
218 #define MAX_COPY sizeof(weirdary)
219 #define ZERO_LENGTH_PTR ((void *)&malloc_dummy_pointer)
221 #define BIGHSHIFT 10 /* bigalloc hash table */
222 #define BIGHSIZE (1 << BIGHSHIFT)
223 #define BIGHMASK (BIGHSIZE - 1)
224 #define BIGXSIZE (BIGHSIZE / 16) /* bigalloc lock table */
225 #define BIGXMASK (BIGXSIZE - 1)
227 #define SAFLAG_ZERO 0x0001
228 #define SAFLAG_PASSIVE 0x0002
234 #define arysize(ary) (sizeof(ary)/sizeof((ary)[0]))
236 #define MASSERT(exp) do { if (__predict_false(!(exp))) \
237 _mpanic("assertion: %s in %s", \
245 #define M_MAX_ROUNDS 64
246 #define M_ZONE_ROUNDS 64
247 #define M_LOW_ROUNDS 32
248 #define M_INIT_ROUNDS 8
249 #define M_BURST_FACTOR 8
250 #define M_BURST_NSCALE 2
252 #define M_BURST 0x0001
253 #define M_BURST_EARLY 0x0002
256 SLIST_ENTRY(magazine) nextmagazine;
259 int capacity; /* Max rounds in this magazine */
260 int rounds; /* Current number of free rounds */
261 int burst_factor; /* Number of blocks to prefill with */
262 int low_factor; /* Free till low_factor from full mag */
263 void *objects[M_MAX_ROUNDS];
266 SLIST_HEAD(magazinelist, magazine);
268 static spinlock_t zone_mag_lock;
269 static struct magazine zone_magazine = {
270 .flags = M_BURST | M_BURST_EARLY,
271 .capacity = M_ZONE_ROUNDS,
273 .burst_factor = M_BURST_FACTOR,
274 .low_factor = M_LOW_ROUNDS
277 #define MAGAZINE_FULL(mp) (mp->rounds == mp->capacity)
278 #define MAGAZINE_NOTFULL(mp) (mp->rounds < mp->capacity)
279 #define MAGAZINE_EMPTY(mp) (mp->rounds == 0)
280 #define MAGAZINE_NOTEMPTY(mp) (mp->rounds != 0)
282 /* Each thread will have a pair of magazines per size-class (NZONES)
283 * The loaded magazine will support immediate allocations, the previous
284 * magazine will either be full or empty and can be swapped at need */
285 typedef struct magazine_pair {
286 struct magazine *loaded;
287 struct magazine *prev;
290 /* A depot is a collection of magazines for a single zone. */
291 typedef struct magazine_depot {
292 struct magazinelist full;
293 struct magazinelist empty;
294 pthread_spinlock_t lock;
297 typedef struct thr_mags {
298 magazine_pair mags[NZONES];
302 /* With this attribute set, do not require a function call for accessing
303 * this variable when the code is compiled -fPIC */
304 #define TLS_ATTRIBUTE __attribute__ ((tls_model ("initial-exec")));
306 static int mtmagazine_free_live = 0;
307 static __thread thr_mags thread_mags TLS_ATTRIBUTE;
308 static pthread_key_t thread_mags_key;
309 static pthread_once_t thread_mags_once = PTHREAD_ONCE_INIT;
310 static magazine_depot depots[NZONES];
313 * Fixed globals (not per-cpu)
315 static const int ZoneSize = ZALLOC_ZONE_SIZE;
316 static const int ZoneLimit = ZALLOC_ZONE_LIMIT;
317 static const int ZonePageCount = ZALLOC_ZONE_SIZE / PAGE_SIZE;
318 static const int ZoneMask = ZALLOC_ZONE_SIZE - 1;
320 static int opt_madvise = 0;
321 static int opt_utrace = 0;
322 static int malloc_started = 0;
323 static int g_malloc_flags = 0;
324 static spinlock_t malloc_init_lock;
325 static struct slglobaldata SLGlobalData;
326 static bigalloc_t bigalloc_array[BIGHSIZE];
327 static spinlock_t bigspin_array[BIGXSIZE];
328 static int malloc_panic;
329 static int malloc_dummy_pointer;
331 static const int32_t weirdary[16] = {
332 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
333 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
334 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
335 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR
338 static void *_slaballoc(size_t size, int flags);
339 static void *_slabrealloc(void *ptr, size_t size);
340 static void _slabfree(void *ptr, int, bigalloc_t *);
341 static void *_vmem_alloc(size_t bytes, size_t align, int flags);
342 static void _vmem_free(void *ptr, size_t bytes);
343 static void *magazine_alloc(struct magazine *, int *);
344 static int magazine_free(struct magazine *, void *);
345 static void *mtmagazine_alloc(int zi);
346 static int mtmagazine_free(int zi, void *);
347 static void mtmagazine_init(void);
348 static void mtmagazine_destructor(void *);
349 static slzone_t zone_alloc(int flags);
350 static void zone_free(void *z);
351 static void _mpanic(const char *ctl, ...);
352 static void malloc_init(void);
353 #if defined(INVARIANTS)
354 static void chunk_mark_allocated(slzone_t z, void *chunk);
355 static void chunk_mark_free(slzone_t z, void *chunk);
358 struct nmalloc_utrace {
364 #define UTRACE(a, b, c) \
366 struct nmalloc_utrace ut = { \
371 utrace(&ut, sizeof(ut)); \
376 * If enabled any memory allocated without M_ZERO is initialized to -1.
378 static int use_malloc_pattern;
384 const char *p = NULL;
387 _SPINLOCK(&malloc_init_lock);
388 if (malloc_started) {
389 _SPINUNLOCK(&malloc_init_lock);
394 if (issetugid() == 0)
395 p = getenv("MALLOC_OPTIONS");
397 for (; p != NULL && *p != '\0'; p++) {
399 case 'u': opt_utrace = 0; break;
400 case 'U': opt_utrace = 1; break;
401 case 'h': opt_madvise = 0; break;
402 case 'H': opt_madvise = 1; break;
403 case 'z': g_malloc_flags = 0; break;
404 case 'Z': g_malloc_flags = SAFLAG_ZERO; break;
413 _SPINUNLOCK(&malloc_init_lock);
415 UTRACE((void *) -1, 0, NULL);
422 slgd_lock(slglobaldata_t slgd)
425 _SPINLOCK(&slgd->Spinlock);
429 slgd_unlock(slglobaldata_t slgd)
432 _SPINUNLOCK(&slgd->Spinlock);
436 depot_lock(magazine_depot *dp)
439 pthread_spin_lock(&dp->lock);
443 depot_unlock(magazine_depot *dp)
446 pthread_spin_unlock(&dp->lock);
450 zone_magazine_lock(void)
453 _SPINLOCK(&zone_mag_lock);
457 zone_magazine_unlock(void)
460 _SPINUNLOCK(&zone_mag_lock);
464 swap_mags(magazine_pair *mp)
466 struct magazine *tmp;
468 mp->loaded = mp->prev;
473 * bigalloc hashing and locking support.
475 * Return an unmasked hash code for the passed pointer.
478 _bigalloc_hash(void *ptr)
482 hv = ((int)(intptr_t)ptr >> PAGE_SHIFT) ^
483 ((int)(intptr_t)ptr >> (PAGE_SHIFT + BIGHSHIFT));
489 * Lock the hash chain and return a pointer to its base for the specified
492 static __inline bigalloc_t *
493 bigalloc_lock(void *ptr)
495 int hv = _bigalloc_hash(ptr);
498 bigp = &bigalloc_array[hv & BIGHMASK];
500 _SPINLOCK(&bigspin_array[hv & BIGXMASK]);
505 * Lock the hash chain and return a pointer to its base for the specified
508 * BUT, if the hash chain is empty, just return NULL and do not bother
511 static __inline bigalloc_t *
512 bigalloc_check_and_lock(void *ptr)
514 int hv = _bigalloc_hash(ptr);
517 bigp = &bigalloc_array[hv & BIGHMASK];
521 _SPINLOCK(&bigspin_array[hv & BIGXMASK]);
527 bigalloc_unlock(void *ptr)
532 hv = _bigalloc_hash(ptr);
533 _SPINUNLOCK(&bigspin_array[hv & BIGXMASK]);
538 * Calculate the zone index for the allocation request size and set the
539 * allocation request size to that particular zone's chunk size.
542 zoneindex(size_t *bytes, size_t *chunking)
544 size_t n = (unsigned int)*bytes; /* unsigned for shift opt */
546 *bytes = n = (n + 7) & ~7;
548 return(n / 8 - 1); /* 8 byte chunks, 16 zones */
551 *bytes = n = (n + 15) & ~15;
557 *bytes = n = (n + 31) & ~31;
562 *bytes = n = (n + 63) & ~63;
567 *bytes = n = (n + 127) & ~127;
569 return(n / 128 + 31);
572 *bytes = n = (n + 255) & ~255;
574 return(n / 256 + 39);
576 *bytes = n = (n + 511) & ~511;
578 return(n / 512 + 47);
580 #if ZALLOC_ZONE_LIMIT > 8192
582 *bytes = n = (n + 1023) & ~1023;
584 return(n / 1024 + 55);
587 #if ZALLOC_ZONE_LIMIT > 16384
589 *bytes = n = (n + 2047) & ~2047;
591 return(n / 2048 + 63);
594 _mpanic("Unexpected byte count %d", n);
599 * malloc() - call internal slab allocator
606 ptr = _slaballoc(size, 0);
610 UTRACE(0, size, ptr);
615 * calloc() - call internal slab allocator
618 calloc(size_t number, size_t size)
622 ptr = _slaballoc(number * size, SAFLAG_ZERO);
626 UTRACE(0, number * size, ptr);
631 * realloc() (SLAB ALLOCATOR)
633 * We do not attempt to optimize this routine beyond reusing the same
634 * pointer if the new size fits within the chunking of the old pointer's
638 realloc(void *ptr, size_t size)
641 ret = _slabrealloc(ptr, size);
645 UTRACE(ptr, size, ret);
652 * Allocate (size) bytes with a alignment of (alignment), where (alignment)
653 * is a power of 2 >= sizeof(void *).
655 * The slab allocator will allocate on power-of-2 boundaries up to
656 * at least PAGE_SIZE. We use the zoneindex mechanic to find a
657 * zone matching the requirements, and _vmem_alloc() otherwise.
660 posix_memalign(void **memptr, size_t alignment, size_t size)
668 * OpenGroup spec issue 6 checks
670 if ((alignment | (alignment - 1)) + 1 != (alignment << 1)) {
674 if (alignment < sizeof(void *)) {
680 * Our zone mechanism guarantees same-sized alignment for any
681 * power-of-2 allocation. If size is a power-of-2 and reasonable
682 * we can just call _slaballoc() and be done. We round size up
683 * to the nearest alignment boundary to improve our odds of
684 * it becoming a power-of-2 if it wasn't before.
686 if (size <= alignment)
689 size = (size + alignment - 1) & ~(size_t)(alignment - 1);
690 if (size < PAGE_SIZE && (size | (size - 1)) + 1 == (size << 1)) {
691 *memptr = _slaballoc(size, 0);
692 return(*memptr ? 0 : ENOMEM);
696 * Otherwise locate a zone with a chunking that matches
697 * the requested alignment, within reason. Consider two cases:
699 * (1) A 1K allocation on a 32-byte alignment. The first zoneindex
700 * we find will be the best fit because the chunking will be
701 * greater or equal to the alignment.
703 * (2) A 513 allocation on a 256-byte alignment. In this case
704 * the first zoneindex we find will be for 576 byte allocations
705 * with a chunking of 64, which is not sufficient. To fix this
706 * we simply find the nearest power-of-2 >= size and use the
707 * same side-effect of _slaballoc() which guarantees
708 * same-alignment on a power-of-2 allocation.
710 if (size < PAGE_SIZE) {
711 zi = zoneindex(&size, &chunking);
712 if (chunking >= alignment) {
713 *memptr = _slaballoc(size, 0);
714 return(*memptr ? 0 : ENOMEM);
720 while (alignment < size)
722 *memptr = _slaballoc(alignment, 0);
723 return(*memptr ? 0 : ENOMEM);
727 * If the slab allocator cannot handle it use vmem_alloc().
729 * Alignment must be adjusted up to at least PAGE_SIZE in this case.
731 if (alignment < PAGE_SIZE)
732 alignment = PAGE_SIZE;
733 if (size < alignment)
735 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
736 *memptr = _vmem_alloc(size, alignment, 0);
740 big = _slaballoc(sizeof(struct bigalloc), 0);
742 _vmem_free(*memptr, size);
746 bigp = bigalloc_lock(*memptr);
751 bigalloc_unlock(*memptr);
757 * free() (SLAB ALLOCATOR) - do the obvious
763 _slabfree(ptr, 0, NULL);
767 * _slaballoc() (SLAB ALLOCATOR)
769 * Allocate memory via the slab allocator. If the request is too large,
770 * or if it page-aligned beyond a certain size, we fall back to the
774 _slaballoc(size_t size, int flags)
791 * Handle the degenerate size == 0 case. Yes, this does happen.
792 * Return a special pointer. This is to maintain compatibility with
793 * the original malloc implementation. Certain devices, such as the
794 * adaptec driver, not only allocate 0 bytes, they check for NULL and
795 * also realloc() later on. Joy.
798 return(ZERO_LENGTH_PTR);
800 /* Capture global flags */
801 flags |= g_malloc_flags;
804 * Handle large allocations directly. There should not be very many
805 * of these so performance is not a big issue.
807 * The backend allocator is pretty nasty on a SMP system. Use the
808 * slab allocator for one and two page-sized chunks even though we
809 * lose some efficiency.
811 if (size >= ZoneLimit ||
812 ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) {
816 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
817 chunk = _vmem_alloc(size, PAGE_SIZE, flags);
821 big = _slaballoc(sizeof(struct bigalloc), 0);
823 _vmem_free(chunk, size);
826 bigp = bigalloc_lock(chunk);
831 bigalloc_unlock(chunk);
836 /* Compute allocation zone; zoneindex will panic on excessive sizes */
837 zi = zoneindex(&size, &chunking);
838 MASSERT(zi < NZONES);
840 obj = mtmagazine_alloc(zi);
842 if (flags & SAFLAG_ZERO)
847 slgd = &SLGlobalData;
851 * Attempt to allocate out of an existing zone. If all zones are
852 * exhausted pull one off the free list or allocate a new one.
854 if ((z = slgd->ZoneAry[zi]) == NULL) {
856 z = zone_alloc(flags);
861 * How big is the base structure?
863 #if defined(INVARIANTS)
865 * Make room for z_Bitmap. An exact calculation is
866 * somewhat more complicated so don't make an exact
869 off = offsetof(struct slzone,
870 z_Bitmap[(ZoneSize / size + 31) / 32]);
871 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8);
873 off = sizeof(struct slzone);
877 * Align the storage in the zone based on the chunking.
879 * Guarantee power-of-2 alignment for power-of-2-sized
880 * chunks. Otherwise align based on the chunking size
881 * (typically 8 or 16 bytes for small allocations).
883 * NOTE: Allocations >= ZoneLimit are governed by the
884 * bigalloc code and typically only guarantee page-alignment.
886 * Set initial conditions for UIndex near the zone header
887 * to reduce unecessary page faults, vs semi-randomization
888 * to improve L1 cache saturation.
890 if ((size | (size - 1)) + 1 == (size << 1))
891 off = (off + size - 1) & ~(size - 1);
893 off = (off + chunking - 1) & ~(chunking - 1);
894 z->z_Magic = ZALLOC_SLAB_MAGIC;
896 z->z_NMax = (ZoneSize - off) / size;
897 z->z_NFree = z->z_NMax;
898 z->z_BasePtr = (char *)z + off;
899 z->z_UIndex = z->z_UEndIndex = 0;
900 z->z_ChunkSize = size;
901 z->z_FirstFreePg = ZonePageCount;
902 z->z_Next = slgd->ZoneAry[zi];
903 slgd->ZoneAry[zi] = z;
904 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
905 flags &= ~SAFLAG_ZERO; /* already zero'd */
906 flags |= SAFLAG_PASSIVE;
910 * Slide the base index for initial allocations out of the
911 * next zone we create so we do not over-weight the lower
912 * part of the cpu memory caches.
914 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
915 & (ZALLOC_MAX_ZONE_SIZE - 1);
919 * Ok, we have a zone from which at least one chunk is available.
921 * Remove us from the ZoneAry[] when we become empty
923 MASSERT(z->z_NFree > 0);
925 if (--z->z_NFree == 0) {
926 slgd->ZoneAry[zi] = z->z_Next;
931 * Locate a chunk in a free page. This attempts to localize
932 * reallocations into earlier pages without us having to sort
933 * the chunk list. A chunk may still overlap a page boundary.
935 while (z->z_FirstFreePg < ZonePageCount) {
936 if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) {
939 * Diagnostic: c_Next is not total garbage.
941 MASSERT(chunk->c_Next == NULL ||
942 ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) ==
943 ((intptr_t)chunk & IN_SAME_PAGE_MASK));
946 chunk_mark_allocated(z, chunk);
948 MASSERT((uintptr_t)chunk & ZoneMask);
949 z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next;
956 * No chunks are available but NFree said we had some memory,
957 * so it must be available in the never-before-used-memory
958 * area governed by UIndex. The consequences are very
959 * serious if our zone got corrupted so we use an explicit
960 * panic rather then a KASSERT.
962 chunk = (slchunk_t)(z->z_BasePtr + z->z_UIndex * size);
964 if (++z->z_UIndex == z->z_NMax)
966 if (z->z_UIndex == z->z_UEndIndex) {
968 _mpanic("slaballoc: corrupted zone");
971 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
972 flags &= ~SAFLAG_ZERO;
973 flags |= SAFLAG_PASSIVE;
975 #if defined(INVARIANTS)
976 chunk_mark_allocated(z, chunk);
981 if (flags & SAFLAG_ZERO) {
984 } else if ((flags & (SAFLAG_ZERO|SAFLAG_PASSIVE)) == 0) {
985 if (use_malloc_pattern) {
986 for (i = 0; i < size; i += sizeof(int)) {
987 *(int *)((char *)chunk + i) = -1;
990 /* avoid accidental double-free check */
991 chunk->c_Next = (void *)-1;
1001 * Reallocate memory within the chunk
1004 _slabrealloc(void *ptr, size_t size)
1011 if (ptr == NULL || ptr == ZERO_LENGTH_PTR)
1012 return(_slaballoc(size, 0));
1016 return(ZERO_LENGTH_PTR);
1020 * Handle oversized allocations.
1022 if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) {
1026 while ((big = *bigp) != NULL) {
1027 if (big->base == ptr) {
1028 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
1029 bigbytes = big->bytes;
1030 if (bigbytes == size) {
1031 bigalloc_unlock(ptr);
1035 bigalloc_unlock(ptr);
1036 if ((nptr = _slaballoc(size, 0)) == NULL) {
1038 bigp = bigalloc_lock(ptr);
1041 bigalloc_unlock(ptr);
1044 if (size > bigbytes)
1046 bcopy(ptr, nptr, size);
1047 _slabfree(ptr, FASTSLABREALLOC, &big);
1052 bigalloc_unlock(ptr);
1056 * Get the original allocation's zone. If the new request winds
1057 * up using the same chunk size we do not have to do anything.
1059 * NOTE: We don't have to lock the globaldata here, the fields we
1060 * access here will not change at least as long as we have control
1061 * over the allocation.
1063 z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
1064 MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1067 * Use zoneindex() to chunk-align the new size, as long as the
1068 * new size is not too large.
1070 if (size < ZoneLimit) {
1071 zoneindex(&size, &chunking);
1072 if (z->z_ChunkSize == size)
1077 * Allocate memory for the new request size and copy as appropriate.
1079 if ((nptr = _slaballoc(size, 0)) != NULL) {
1080 if (size > z->z_ChunkSize)
1081 size = z->z_ChunkSize;
1082 bcopy(ptr, nptr, size);
1083 _slabfree(ptr, 0, NULL);
1090 * free (SLAB ALLOCATOR)
1092 * Free a memory block previously allocated by malloc. Note that we do not
1093 * attempt to uplodate ks_loosememuse as MP races could prevent us from
1094 * checking memory limits in malloc.
1097 * MAG_NORECURSE Skip magazine layer
1098 * FASTSLABREALLOC Fast call from realloc
1102 _slabfree(void *ptr, int flags, bigalloc_t *rbigp)
1108 slglobaldata_t slgd;
1113 /* Fast realloc path for big allocations */
1114 if (flags & FASTSLABREALLOC) {
1116 goto fastslabrealloc;
1120 * Handle NULL frees and special 0-byte allocations
1124 if (ptr == ZERO_LENGTH_PTR)
1127 /* Ensure that a destructor is in-place for thread-exit */
1128 if (mtmagazine_free_live == 0) {
1129 mtmagazine_free_live = 1;
1130 pthread_once(&thread_mags_once, &mtmagazine_init);
1134 * Handle oversized allocations.
1136 if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) {
1137 while ((big = *bigp) != NULL) {
1138 if (big->base == ptr) {
1139 if ((flags & FASTSLABREALLOC) == 0) {
1141 bigalloc_unlock(ptr);
1145 _slabfree(big, 0, NULL);
1147 MASSERT(sizeof(weirdary) <= size);
1148 bcopy(weirdary, ptr, sizeof(weirdary));
1150 _vmem_free(ptr, size);
1155 bigalloc_unlock(ptr);
1159 * Zone case. Figure out the zone based on the fact that it is
1162 z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
1163 MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1165 size = z->z_ChunkSize;
1166 zi = z->z_ZoneIndex;
1168 if (g_malloc_flags & SAFLAG_ZERO)
1171 if (((flags & MAG_NORECURSE) == 0) &&
1172 (mtmagazine_free(zi, ptr) == 0))
1175 pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT;
1177 slgd = &SLGlobalData;
1182 * Attempt to detect a double-free. To reduce overhead we only check
1183 * if there appears to be link pointer at the base of the data.
1185 if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) {
1188 for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) {
1190 _mpanic("Double free at %p", chunk);
1193 chunk_mark_free(z, chunk);
1197 * Put weird data into the memory to detect modifications after
1198 * freeing, illegal pointer use after freeing (we should fault on
1199 * the odd address), and so forth.
1202 if (z->z_ChunkSize < sizeof(weirdary))
1203 bcopy(weirdary, chunk, z->z_ChunkSize);
1205 bcopy(weirdary, chunk, sizeof(weirdary));
1209 * Add this free non-zero'd chunk to a linked list for reuse, adjust
1212 chunk->c_Next = z->z_PageAry[pgno];
1213 z->z_PageAry[pgno] = chunk;
1214 if (z->z_FirstFreePg > pgno)
1215 z->z_FirstFreePg = pgno;
1218 * Bump the number of free chunks. If it becomes non-zero the zone
1219 * must be added back onto the appropriate list.
1221 if (z->z_NFree++ == 0) {
1222 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
1223 slgd->ZoneAry[z->z_ZoneIndex] = z;
1227 * If the zone becomes totally free then release it.
1229 if (z->z_NFree == z->z_NMax) {
1232 pz = &slgd->ZoneAry[z->z_ZoneIndex];
1234 pz = &(*pz)->z_Next;
1244 #if defined(INVARIANTS)
1246 * Helper routines for sanity checks
1250 chunk_mark_allocated(slzone_t z, void *chunk)
1252 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1255 MASSERT(bitdex >= 0 && bitdex < z->z_NMax);
1256 bitptr = &z->z_Bitmap[bitdex >> 5];
1258 MASSERT((*bitptr & (1 << bitdex)) == 0);
1259 *bitptr |= 1 << bitdex;
1264 chunk_mark_free(slzone_t z, void *chunk)
1266 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1269 MASSERT(bitdex >= 0 && bitdex < z->z_NMax);
1270 bitptr = &z->z_Bitmap[bitdex >> 5];
1272 MASSERT((*bitptr & (1 << bitdex)) != 0);
1273 *bitptr &= ~(1 << bitdex);
1278 static __inline void *
1279 magazine_alloc(struct magazine *mp, int *burst)
1284 if (mp != NULL && MAGAZINE_NOTEMPTY(mp)) {
1285 obj = mp->objects[--mp->rounds];
1289 /* Return burst factor to caller */
1290 if ((mp->flags & M_BURST) && (burst != NULL)) {
1291 *burst = mp->burst_factor;
1294 /* Reduce burst factor by NSCALE; if it hits 1, disable BURST */
1295 if ((mp->flags & M_BURST) && (mp->flags & M_BURST_EARLY) &&
1297 mp->burst_factor -= M_BURST_NSCALE;
1298 if (mp->burst_factor <= 1) {
1299 mp->burst_factor = 1;
1300 mp->flags &= ~(M_BURST);
1301 mp->flags &= ~(M_BURST_EARLY);
1311 magazine_free(struct magazine *mp, void *p)
1313 if (mp != NULL && MAGAZINE_NOTFULL(mp)) {
1314 mp->objects[mp->rounds++] = p;
1322 mtmagazine_alloc(int zi)
1325 struct magazine *mp, *emptymag;
1332 /* If the loaded magazine has rounds, allocate and return */
1333 if (((mp = tp->mags[zi].loaded) != NULL) &&
1334 MAGAZINE_NOTEMPTY(mp)) {
1335 obj = magazine_alloc(mp, NULL);
1339 /* If the prev magazine is full, swap with loaded and retry */
1340 if (((mp = tp->mags[zi].prev) != NULL) &&
1341 MAGAZINE_FULL(mp)) {
1342 swap_mags(&tp->mags[zi]);
1346 /* Lock the depot and check if it has any full magazines; if so
1347 * we return the prev to the emptymag list, move loaded to prev
1348 * load a full magazine, and retry */
1352 if (!SLIST_EMPTY(&d->full)) {
1353 emptymag = tp->mags[zi].prev;
1354 tp->mags[zi].prev = tp->mags[zi].loaded;
1355 tp->mags[zi].loaded = SLIST_FIRST(&d->full);
1356 SLIST_REMOVE_HEAD(&d->full, nextmagazine);
1358 /* Return emptymag to the depot */
1359 if (emptymag != NULL)
1360 SLIST_INSERT_HEAD(&d->empty, emptymag, nextmagazine);
1374 mtmagazine_free(int zi, void *ptr)
1377 struct magazine *mp, *loadedmag, *newmag;
1383 if (tp->init == 0) {
1384 pthread_setspecific(thread_mags_key, tp);
1389 /* If the loaded magazine has space, free directly to it */
1390 if (((mp = tp->mags[zi].loaded) != NULL) &&
1391 MAGAZINE_NOTFULL(mp)) {
1392 rc = magazine_free(mp, ptr);
1396 /* If the prev magazine is empty, swap with loaded and retry */
1397 if (((mp = tp->mags[zi].prev) != NULL) &&
1398 MAGAZINE_EMPTY(mp)) {
1399 swap_mags(&tp->mags[zi]);
1403 /* Lock the depot; if there are any empty magazines, move the
1404 * prev to the depot's fullmag list, move loaded to previous,
1405 * and move a new emptymag to loaded, and retry. */
1410 if (!SLIST_EMPTY(&d->empty)) {
1411 loadedmag = tp->mags[zi].prev;
1412 tp->mags[zi].prev = tp->mags[zi].loaded;
1413 tp->mags[zi].loaded = SLIST_FIRST(&d->empty);
1414 SLIST_REMOVE_HEAD(&d->empty, nextmagazine);
1416 /* Return loadedmag to the depot */
1417 if (loadedmag != NULL)
1418 SLIST_INSERT_HEAD(&d->full, loadedmag,
1424 /* Allocate an empty magazine, add it to the depot, retry */
1425 newmag = _slaballoc(sizeof(struct magazine), SAFLAG_ZERO);
1426 if (newmag != NULL) {
1427 newmag->capacity = M_MAX_ROUNDS;
1430 SLIST_INSERT_HEAD(&d->empty, newmag, nextmagazine);
1443 mtmagazine_init(void) {
1445 i = pthread_key_create(&thread_mags_key,&mtmagazine_destructor);
1451 mtmagazine_drain(struct magazine *mp)
1455 while (MAGAZINE_NOTEMPTY(mp)) {
1456 obj = magazine_alloc(mp, NULL);
1457 _slabfree(obj, MAG_NORECURSE, NULL);
1462 * mtmagazine_destructor()
1464 * When a thread exits, we reclaim all its resources; all its magazines are
1465 * drained and the structures are freed.
1468 mtmagazine_destructor(void *thrp)
1470 thr_mags *tp = thrp;
1471 struct magazine *mp;
1474 for (i = 0; i < NZONES; i++) {
1475 mp = tp->mags[i].loaded;
1476 if (mp != NULL && MAGAZINE_NOTEMPTY(mp))
1477 mtmagazine_drain(mp);
1478 _slabfree(mp, MAG_NORECURSE, NULL);
1480 mp = tp->mags[i].prev;
1481 if (mp != NULL && MAGAZINE_NOTEMPTY(mp))
1482 mtmagazine_drain(mp);
1483 _slabfree(mp, MAG_NORECURSE, NULL);
1490 * Attempt to allocate a zone from the zone magazine; the zone magazine has
1491 * M_BURST_EARLY enabled, so honor the burst request from the magazine.
1494 zone_alloc(int flags)
1496 slglobaldata_t slgd = &SLGlobalData;
1501 zone_magazine_lock();
1504 z = magazine_alloc(&zone_magazine, &burst);
1507 zone_magazine_unlock();
1509 z = _vmem_alloc(ZoneSize * burst, ZoneSize, flags);
1511 for (i = 1; i < burst; i++) {
1512 j = magazine_free(&zone_magazine,
1513 (char *) z + (ZoneSize * i));
1518 zone_magazine_unlock();
1520 z->z_Flags |= SLZF_UNOTZEROD;
1521 zone_magazine_unlock();
1531 * Releases the slgd lock prior to unmap, if unmapping is necessary
1536 slglobaldata_t slgd = &SLGlobalData;
1537 void *excess[M_ZONE_ROUNDS - M_LOW_ROUNDS] = {};
1540 zone_magazine_lock();
1543 bzero(z, sizeof(struct slzone));
1546 madvise(z, ZoneSize, MADV_FREE);
1548 i = magazine_free(&zone_magazine, z);
1550 /* If we failed to free, collect excess magazines; release the zone
1551 * magazine lock, and then free to the system via _vmem_free. Re-enable
1552 * BURST mode for the magazine. */
1554 j = zone_magazine.rounds - zone_magazine.low_factor;
1555 for (i = 0; i < j; i++) {
1556 excess[i] = magazine_alloc(&zone_magazine, NULL);
1557 MASSERT(excess[i] != NULL);
1560 zone_magazine_unlock();
1562 for (i = 0; i < j; i++)
1563 _vmem_free(excess[i], ZoneSize);
1565 _vmem_free(z, ZoneSize);
1567 zone_magazine_unlock();
1574 * Directly map memory in PAGE_SIZE'd chunks with the specified
1577 * Alignment must be a multiple of PAGE_SIZE.
1579 * Size must be >= alignment.
1582 _vmem_alloc(size_t size, size_t align, int flags)
1589 * Map anonymous private memory.
1591 addr = mmap(NULL, size, PROT_READ|PROT_WRITE,
1592 MAP_PRIVATE|MAP_ANON, -1, 0);
1593 if (addr == MAP_FAILED)
1597 * Check alignment. The misaligned offset is also the excess
1598 * amount. If misaligned unmap the excess so we have a chance of
1599 * mapping at the next alignment point and recursively try again.
1601 * BBBBBBBBBBB BBBBBBBBBBB BBBBBBBBBBB block alignment
1602 * aaaaaaaaa aaaaaaaaaaa aa mis-aligned allocation
1603 * xxxxxxxxx final excess calculation
1604 * ^ returned address
1606 excess = (uintptr_t)addr & (align - 1);
1609 excess = align - excess;
1612 munmap(save + excess, size - excess);
1613 addr = _vmem_alloc(size, align, flags);
1614 munmap(save, excess);
1616 return((void *)addr);
1622 * Free a chunk of memory allocated with _vmem_alloc()
1625 _vmem_free(void *ptr, size_t size)
1631 * Panic on fatal conditions
1634 _mpanic(const char *ctl, ...)
1638 if (malloc_panic == 0) {
1641 vfprintf(stderr, ctl, va);
1642 fprintf(stderr, "\n");