2 * NMALLOC.C - New Malloc (ported from kernel slab allocator)
4 * Copyright (c) 2003,2004,2009,2010 The DragonFly Project. All rights reserved.
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com> and by
8 * Venkatesh Srinivas <me@endeavour.zapto.org>.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * $Id: nmalloc.c,v 1.37 2010/07/23 08:20:35 vsrinivas Exp $
40 * This module implements a slab allocator drop-in replacement for the
43 * A slab allocator reserves a ZONE for each chunk size, then lays the
44 * chunks out in an array within the zone. Allocation and deallocation
45 * is nearly instantaneous, and overhead losses are limited to a fixed
48 * The slab allocator does not have to pre-initialize the list of
49 * free chunks for each zone, and the underlying VM will not be
50 * touched at all beyond the zone header until an actual allocation
53 * Slab management and locking is done on a per-zone basis.
55 * Alloc Size Chunking Number of zones
66 * Allocations >= ZoneLimit (16K) go directly to mmap and a hash table
67 * is used to locate for free. One and Two-page allocations use the
68 * zone mechanic to avoid excessive mmap()/munmap() calls.
70 * API FEATURES AND SIDE EFFECTS
72 * + power-of-2 sized allocations up to a page will be power-of-2 aligned.
73 * Above that power-of-2 sized allocations are page-aligned. Non
74 * power-of-2 sized allocations are aligned the same as the chunk
75 * size for their zone.
76 * + malloc(0) returns a special non-NULL value
77 * + ability to allocate arbitrarily large chunks of memory
78 * + realloc will reuse the passed pointer if possible, within the
79 * limitations of the zone chunking.
81 * Multithreaded enhancements for small allocations introduced August 2010.
82 * These are in the spirit of 'libumem'. See:
83 * Bonwick, J.; Adams, J. (2001). "Magazines and Vmem: Extending the
84 * slab allocator to many CPUs and arbitrary resources". In Proc. 2001
85 * USENIX Technical Conference. USENIX Association.
89 * The value of the environment variable MALLOC_OPTIONS is a character string
90 * containing various flags to tune nmalloc.
92 * 'U' / ['u'] Generate / do not generate utrace entries for ktrace(1)
93 * This will generate utrace events for all malloc,
94 * realloc, and free calls. There are tools (mtrplay) to
95 * replay and allocation pattern or to graph heap structure
96 * (mtrgraph) which can interpret these logs.
97 * 'Z' / ['z'] Zero out / do not zero all allocations.
98 * Each new byte of memory allocated by malloc, realloc, or
99 * reallocf will be initialized to 0. This is intended for
100 * debugging and will affect performance negatively.
101 * 'H' / ['h'] Pass a hint to the kernel about pages unused by the
102 * allocation functions.
105 /* cc -shared -fPIC -g -O -I/usr/src/lib/libc/include -o nmalloc.so nmalloc.c */
107 #include "libc_private.h"
109 #include <sys/param.h>
110 #include <sys/types.h>
111 #include <sys/mman.h>
112 #include <sys/queue.h>
114 #include <sys/ktrace.h>
126 #include "spinlock.h"
127 #include "un-namespace.h"
129 static char rcsid[] = "$Id: nmalloc.c,v 1.37 2010/07/23 08:20:35 sv5679 Exp $";
132 * Linked list of large allocations
134 typedef struct bigalloc {
135 struct bigalloc *next; /* hash link */
136 void *base; /* base pointer */
137 u_long bytes; /* bytes allocated */
141 * Note that any allocations which are exact multiples of PAGE_SIZE, or
142 * which are >= ZALLOC_ZONE_LIMIT, will fall through to the kmem subsystem.
144 #define ZALLOC_ZONE_LIMIT (16 * 1024) /* max slab-managed alloc */
145 #define ZALLOC_MIN_ZONE_SIZE (32 * 1024) /* minimum zone size */
146 #define ZALLOC_MAX_ZONE_SIZE (128 * 1024) /* maximum zone size */
147 #define ZALLOC_ZONE_SIZE (64 * 1024)
148 #define ZALLOC_SLAB_MAGIC 0x736c6162 /* magic sanity */
149 #define ZALLOC_SLAB_SLIDE 20 /* L1-cache skip */
151 #if ZALLOC_ZONE_LIMIT == 16384
153 #elif ZALLOC_ZONE_LIMIT == 32768
156 #error "I couldn't figure out NZONES"
160 * Chunk structure for free elements
162 typedef struct slchunk {
163 struct slchunk *c_Next;
167 * The IN-BAND zone header is placed at the beginning of each zone.
171 typedef struct slzone {
172 int32_t z_Magic; /* magic number for sanity check */
173 int z_NFree; /* total free chunks / ualloc space */
174 struct slzone *z_Next; /* ZoneAry[] link if z_NFree non-zero */
175 int z_NMax; /* maximum free chunks */
176 char *z_BasePtr; /* pointer to start of chunk array */
177 int z_UIndex; /* current initial allocation index */
178 int z_UEndIndex; /* last (first) allocation index */
179 int z_ChunkSize; /* chunk size for validation */
180 int z_FirstFreePg; /* chunk list on a page-by-page basis */
183 struct slchunk *z_PageAry[ZALLOC_ZONE_SIZE / PAGE_SIZE];
184 #if defined(INVARIANTS)
185 __uint32_t z_Bitmap[]; /* bitmap of free chunks / sanity */
189 typedef struct slglobaldata {
191 slzone_t ZoneAry[NZONES];/* linked list of zones NFree > 0 */
195 #define SLZF_UNOTZEROD 0x0001
197 #define FASTSLABREALLOC 0x02
200 * Misc constants. Note that allocations that are exact multiples of
201 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
202 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
204 #define MIN_CHUNK_SIZE 8 /* in bytes */
205 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1)
206 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK)
209 * The WEIRD_ADDR is used as known text to copy into free objects to
210 * try to create deterministic failure cases if the data is accessed after
213 * WARNING: A limited number of spinlocks are available, BIGXSIZE should
214 * not be larger then 64.
216 #define WEIRD_ADDR 0xdeadc0de
217 #define MAX_COPY sizeof(weirdary)
218 #define ZERO_LENGTH_PTR ((void *)&malloc_dummy_pointer)
220 #define BIGHSHIFT 10 /* bigalloc hash table */
221 #define BIGHSIZE (1 << BIGHSHIFT)
222 #define BIGHMASK (BIGHSIZE - 1)
223 #define BIGXSIZE (BIGHSIZE / 16) /* bigalloc lock table */
224 #define BIGXMASK (BIGXSIZE - 1)
226 #define SAFLAG_ZERO 0x0001
227 #define SAFLAG_PASSIVE 0x0002
233 #define arysize(ary) (sizeof(ary)/sizeof((ary)[0]))
235 #define MASSERT(exp) do { if (__predict_false(!(exp))) \
236 _mpanic("assertion: %s in %s", \
244 #define M_MAX_ROUNDS 64
245 #define M_ZONE_ROUNDS 64
246 #define M_LOW_ROUNDS 32
247 #define M_INIT_ROUNDS 8
248 #define M_BURST_FACTOR 8
249 #define M_BURST_NSCALE 2
251 #define M_BURST 0x0001
252 #define M_BURST_EARLY 0x0002
255 SLIST_ENTRY(magazine) nextmagazine;
258 int capacity; /* Max rounds in this magazine */
259 int rounds; /* Current number of free rounds */
260 int burst_factor; /* Number of blocks to prefill with */
261 int low_factor; /* Free till low_factor from full mag */
262 void *objects[M_MAX_ROUNDS];
265 SLIST_HEAD(magazinelist, magazine);
267 static spinlock_t zone_mag_lock;
268 static struct magazine zone_magazine = {
269 .flags = M_BURST | M_BURST_EARLY,
270 .capacity = M_ZONE_ROUNDS,
272 .burst_factor = M_BURST_FACTOR,
273 .low_factor = M_LOW_ROUNDS
276 #define MAGAZINE_FULL(mp) (mp->rounds == mp->capacity)
277 #define MAGAZINE_NOTFULL(mp) (mp->rounds < mp->capacity)
278 #define MAGAZINE_EMPTY(mp) (mp->rounds == 0)
279 #define MAGAZINE_NOTEMPTY(mp) (mp->rounds != 0)
281 /* Each thread will have a pair of magazines per size-class (NZONES)
282 * The loaded magazine will support immediate allocations, the previous
283 * magazine will either be full or empty and can be swapped at need */
284 typedef struct magazine_pair {
285 struct magazine *loaded;
286 struct magazine *prev;
289 /* A depot is a collection of magazines for a single zone. */
290 typedef struct magazine_depot {
291 struct magazinelist full;
292 struct magazinelist empty;
293 pthread_spinlock_t lock;
296 typedef struct thr_mags {
297 magazine_pair mags[NZONES];
298 struct magazine *newmag;
302 /* With this attribute set, do not require a function call for accessing
303 * this variable when the code is compiled -fPIC */
304 #define TLS_ATTRIBUTE __attribute__ ((tls_model ("initial-exec")));
306 static int mtmagazine_free_live;
307 static __thread thr_mags thread_mags TLS_ATTRIBUTE;
308 static pthread_key_t thread_mags_key;
309 static pthread_once_t thread_mags_once = PTHREAD_ONCE_INIT;
310 static magazine_depot depots[NZONES];
313 * Fixed globals (not per-cpu)
315 static const int ZoneSize = ZALLOC_ZONE_SIZE;
316 static const int ZoneLimit = ZALLOC_ZONE_LIMIT;
317 static const int ZonePageCount = ZALLOC_ZONE_SIZE / PAGE_SIZE;
318 static const int ZoneMask = ZALLOC_ZONE_SIZE - 1;
320 static int opt_madvise = 0;
321 static int opt_utrace = 0;
322 static int malloc_started = 0;
323 static int g_malloc_flags = 0;
324 static spinlock_t malloc_init_lock;
325 static struct slglobaldata SLGlobalData;
326 static bigalloc_t bigalloc_array[BIGHSIZE];
327 static spinlock_t bigspin_array[BIGXSIZE];
328 static int malloc_panic;
329 static int malloc_dummy_pointer;
331 static const int32_t weirdary[16] = {
332 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
333 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
334 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
335 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR
338 static void *_slaballoc(size_t size, int flags);
339 static void *_slabrealloc(void *ptr, size_t size);
340 static void _slabfree(void *ptr, int, bigalloc_t *);
341 static void *_vmem_alloc(size_t bytes, size_t align, int flags);
342 static void _vmem_free(void *ptr, size_t bytes);
343 static void *magazine_alloc(struct magazine *, int *);
344 static int magazine_free(struct magazine *, void *);
345 static void *mtmagazine_alloc(int zi);
346 static int mtmagazine_free(int zi, void *);
347 static void mtmagazine_init(void);
348 static void mtmagazine_destructor(void *);
349 static slzone_t zone_alloc(int flags);
350 static void zone_free(void *z);
351 static void _mpanic(const char *ctl, ...);
352 static void malloc_init(void);
353 #if defined(INVARIANTS)
354 static void chunk_mark_allocated(slzone_t z, void *chunk);
355 static void chunk_mark_free(slzone_t z, void *chunk);
358 struct nmalloc_utrace {
364 #define UTRACE(a, b, c) \
366 struct nmalloc_utrace ut = { \
371 utrace(&ut, sizeof(ut)); \
376 * If enabled any memory allocated without M_ZERO is initialized to -1.
378 static int use_malloc_pattern;
384 const char *p = NULL;
387 _SPINLOCK(&malloc_init_lock);
388 if (malloc_started) {
389 _SPINUNLOCK(&malloc_init_lock);
394 if (issetugid() == 0)
395 p = getenv("MALLOC_OPTIONS");
397 for (; p != NULL && *p != '\0'; p++) {
399 case 'u': opt_utrace = 0; break;
400 case 'U': opt_utrace = 1; break;
401 case 'h': opt_madvise = 0; break;
402 case 'H': opt_madvise = 1; break;
403 case 'z': g_malloc_flags = 0; break;
404 case 'Z': g_malloc_flags = SAFLAG_ZERO; break;
413 _SPINUNLOCK(&malloc_init_lock);
415 UTRACE((void *) -1, 0, NULL);
419 * We have to install a handler for nmalloc thread teardowns when
420 * the thread is created. We cannot delay this because destructors in
421 * sophisticated userland programs can call malloc() for the first time
422 * during their thread exit.
424 * This routine is called directly from pthreads.
427 _nmalloc_thr_init(void)
432 * Disallow mtmagazine operations until the mtmagazine is
438 pthread_setspecific(thread_mags_key, tp);
439 if (mtmagazine_free_live == 0) {
440 mtmagazine_free_live = 1;
441 pthread_once(&thread_mags_once, mtmagazine_init);
450 slgd_lock(slglobaldata_t slgd)
453 _SPINLOCK(&slgd->Spinlock);
457 slgd_unlock(slglobaldata_t slgd)
460 _SPINUNLOCK(&slgd->Spinlock);
464 depot_lock(magazine_depot *dp)
467 pthread_spin_lock(&dp->lock);
471 depot_unlock(magazine_depot *dp)
474 pthread_spin_unlock(&dp->lock);
478 zone_magazine_lock(void)
481 _SPINLOCK(&zone_mag_lock);
485 zone_magazine_unlock(void)
488 _SPINUNLOCK(&zone_mag_lock);
492 swap_mags(magazine_pair *mp)
494 struct magazine *tmp;
496 mp->loaded = mp->prev;
501 * bigalloc hashing and locking support.
503 * Return an unmasked hash code for the passed pointer.
506 _bigalloc_hash(void *ptr)
510 hv = ((int)(intptr_t)ptr >> PAGE_SHIFT) ^
511 ((int)(intptr_t)ptr >> (PAGE_SHIFT + BIGHSHIFT));
517 * Lock the hash chain and return a pointer to its base for the specified
520 static __inline bigalloc_t *
521 bigalloc_lock(void *ptr)
523 int hv = _bigalloc_hash(ptr);
526 bigp = &bigalloc_array[hv & BIGHMASK];
528 _SPINLOCK(&bigspin_array[hv & BIGXMASK]);
533 * Lock the hash chain and return a pointer to its base for the specified
536 * BUT, if the hash chain is empty, just return NULL and do not bother
539 static __inline bigalloc_t *
540 bigalloc_check_and_lock(void *ptr)
542 int hv = _bigalloc_hash(ptr);
545 bigp = &bigalloc_array[hv & BIGHMASK];
549 _SPINLOCK(&bigspin_array[hv & BIGXMASK]);
555 bigalloc_unlock(void *ptr)
560 hv = _bigalloc_hash(ptr);
561 _SPINUNLOCK(&bigspin_array[hv & BIGXMASK]);
566 * Calculate the zone index for the allocation request size and set the
567 * allocation request size to that particular zone's chunk size.
570 zoneindex(size_t *bytes, size_t *chunking)
572 size_t n = (unsigned int)*bytes; /* unsigned for shift opt */
574 *bytes = n = (n + 7) & ~7;
576 return(n / 8 - 1); /* 8 byte chunks, 16 zones */
579 *bytes = n = (n + 15) & ~15;
585 *bytes = n = (n + 31) & ~31;
590 *bytes = n = (n + 63) & ~63;
595 *bytes = n = (n + 127) & ~127;
597 return(n / 128 + 31);
600 *bytes = n = (n + 255) & ~255;
602 return(n / 256 + 39);
604 *bytes = n = (n + 511) & ~511;
606 return(n / 512 + 47);
608 #if ZALLOC_ZONE_LIMIT > 8192
610 *bytes = n = (n + 1023) & ~1023;
612 return(n / 1024 + 55);
615 #if ZALLOC_ZONE_LIMIT > 16384
617 *bytes = n = (n + 2047) & ~2047;
619 return(n / 2048 + 63);
622 _mpanic("Unexpected byte count %d", n);
627 * malloc() - call internal slab allocator
634 ptr = _slaballoc(size, 0);
638 UTRACE(0, size, ptr);
643 * calloc() - call internal slab allocator
646 calloc(size_t number, size_t size)
650 ptr = _slaballoc(number * size, SAFLAG_ZERO);
654 UTRACE(0, number * size, ptr);
659 * realloc() (SLAB ALLOCATOR)
661 * We do not attempt to optimize this routine beyond reusing the same
662 * pointer if the new size fits within the chunking of the old pointer's
666 realloc(void *ptr, size_t size)
669 ret = _slabrealloc(ptr, size);
673 UTRACE(ptr, size, ret);
680 * Allocate (size) bytes with a alignment of (alignment), where (alignment)
681 * is a power of 2 >= sizeof(void *).
683 * The slab allocator will allocate on power-of-2 boundaries up to
684 * at least PAGE_SIZE. We use the zoneindex mechanic to find a
685 * zone matching the requirements, and _vmem_alloc() otherwise.
688 posix_memalign(void **memptr, size_t alignment, size_t size)
696 * OpenGroup spec issue 6 checks
698 if ((alignment | (alignment - 1)) + 1 != (alignment << 1)) {
702 if (alignment < sizeof(void *)) {
708 * Our zone mechanism guarantees same-sized alignment for any
709 * power-of-2 allocation. If size is a power-of-2 and reasonable
710 * we can just call _slaballoc() and be done. We round size up
711 * to the nearest alignment boundary to improve our odds of
712 * it becoming a power-of-2 if it wasn't before.
714 if (size <= alignment)
717 size = (size + alignment - 1) & ~(size_t)(alignment - 1);
718 if (size < PAGE_SIZE && (size | (size - 1)) + 1 == (size << 1)) {
719 *memptr = _slaballoc(size, 0);
720 return(*memptr ? 0 : ENOMEM);
724 * Otherwise locate a zone with a chunking that matches
725 * the requested alignment, within reason. Consider two cases:
727 * (1) A 1K allocation on a 32-byte alignment. The first zoneindex
728 * we find will be the best fit because the chunking will be
729 * greater or equal to the alignment.
731 * (2) A 513 allocation on a 256-byte alignment. In this case
732 * the first zoneindex we find will be for 576 byte allocations
733 * with a chunking of 64, which is not sufficient. To fix this
734 * we simply find the nearest power-of-2 >= size and use the
735 * same side-effect of _slaballoc() which guarantees
736 * same-alignment on a power-of-2 allocation.
738 if (size < PAGE_SIZE) {
739 zi = zoneindex(&size, &chunking);
740 if (chunking >= alignment) {
741 *memptr = _slaballoc(size, 0);
742 return(*memptr ? 0 : ENOMEM);
748 while (alignment < size)
750 *memptr = _slaballoc(alignment, 0);
751 return(*memptr ? 0 : ENOMEM);
755 * If the slab allocator cannot handle it use vmem_alloc().
757 * Alignment must be adjusted up to at least PAGE_SIZE in this case.
759 if (alignment < PAGE_SIZE)
760 alignment = PAGE_SIZE;
761 if (size < alignment)
763 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
764 *memptr = _vmem_alloc(size, alignment, 0);
768 big = _slaballoc(sizeof(struct bigalloc), 0);
770 _vmem_free(*memptr, size);
774 bigp = bigalloc_lock(*memptr);
779 bigalloc_unlock(*memptr);
785 * free() (SLAB ALLOCATOR) - do the obvious
791 _slabfree(ptr, 0, NULL);
795 * _slaballoc() (SLAB ALLOCATOR)
797 * Allocate memory via the slab allocator. If the request is too large,
798 * or if it page-aligned beyond a certain size, we fall back to the
802 _slaballoc(size_t size, int flags)
819 * Handle the degenerate size == 0 case. Yes, this does happen.
820 * Return a special pointer. This is to maintain compatibility with
821 * the original malloc implementation. Certain devices, such as the
822 * adaptec driver, not only allocate 0 bytes, they check for NULL and
823 * also realloc() later on. Joy.
826 return(ZERO_LENGTH_PTR);
828 /* Capture global flags */
829 flags |= g_malloc_flags;
832 * Handle large allocations directly. There should not be very many
833 * of these so performance is not a big issue.
835 * The backend allocator is pretty nasty on a SMP system. Use the
836 * slab allocator for one and two page-sized chunks even though we
837 * lose some efficiency.
839 if (size >= ZoneLimit ||
840 ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) {
844 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
845 chunk = _vmem_alloc(size, PAGE_SIZE, flags);
849 big = _slaballoc(sizeof(struct bigalloc), 0);
851 _vmem_free(chunk, size);
854 bigp = bigalloc_lock(chunk);
859 bigalloc_unlock(chunk);
864 /* Compute allocation zone; zoneindex will panic on excessive sizes */
865 zi = zoneindex(&size, &chunking);
866 MASSERT(zi < NZONES);
868 obj = mtmagazine_alloc(zi);
870 if (flags & SAFLAG_ZERO)
875 slgd = &SLGlobalData;
879 * Attempt to allocate out of an existing zone. If all zones are
880 * exhausted pull one off the free list or allocate a new one.
882 if ((z = slgd->ZoneAry[zi]) == NULL) {
883 z = zone_alloc(flags);
888 * How big is the base structure?
890 #if defined(INVARIANTS)
892 * Make room for z_Bitmap. An exact calculation is
893 * somewhat more complicated so don't make an exact
896 off = offsetof(struct slzone,
897 z_Bitmap[(ZoneSize / size + 31) / 32]);
898 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8);
900 off = sizeof(struct slzone);
904 * Align the storage in the zone based on the chunking.
906 * Guarantee power-of-2 alignment for power-of-2-sized
907 * chunks. Otherwise align based on the chunking size
908 * (typically 8 or 16 bytes for small allocations).
910 * NOTE: Allocations >= ZoneLimit are governed by the
911 * bigalloc code and typically only guarantee page-alignment.
913 * Set initial conditions for UIndex near the zone header
914 * to reduce unecessary page faults, vs semi-randomization
915 * to improve L1 cache saturation.
917 if ((size | (size - 1)) + 1 == (size << 1))
918 off = (off + size - 1) & ~(size - 1);
920 off = (off + chunking - 1) & ~(chunking - 1);
921 z->z_Magic = ZALLOC_SLAB_MAGIC;
923 z->z_NMax = (ZoneSize - off) / size;
924 z->z_NFree = z->z_NMax;
925 z->z_BasePtr = (char *)z + off;
926 z->z_UIndex = z->z_UEndIndex = 0;
927 z->z_ChunkSize = size;
928 z->z_FirstFreePg = ZonePageCount;
929 z->z_Next = slgd->ZoneAry[zi];
930 slgd->ZoneAry[zi] = z;
931 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
932 flags &= ~SAFLAG_ZERO; /* already zero'd */
933 flags |= SAFLAG_PASSIVE;
937 * Slide the base index for initial allocations out of the
938 * next zone we create so we do not over-weight the lower
939 * part of the cpu memory caches.
941 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
942 & (ZALLOC_MAX_ZONE_SIZE - 1);
946 * Ok, we have a zone from which at least one chunk is available.
948 * Remove us from the ZoneAry[] when we become empty
950 MASSERT(z->z_NFree > 0);
952 if (--z->z_NFree == 0) {
953 slgd->ZoneAry[zi] = z->z_Next;
958 * Locate a chunk in a free page. This attempts to localize
959 * reallocations into earlier pages without us having to sort
960 * the chunk list. A chunk may still overlap a page boundary.
962 while (z->z_FirstFreePg < ZonePageCount) {
963 if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) {
966 * Diagnostic: c_Next is not total garbage.
968 MASSERT(chunk->c_Next == NULL ||
969 ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) ==
970 ((intptr_t)chunk & IN_SAME_PAGE_MASK));
973 chunk_mark_allocated(z, chunk);
975 MASSERT((uintptr_t)chunk & ZoneMask);
976 z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next;
983 * No chunks are available but NFree said we had some memory,
984 * so it must be available in the never-before-used-memory
985 * area governed by UIndex. The consequences are very
986 * serious if our zone got corrupted so we use an explicit
987 * panic rather then a KASSERT.
989 chunk = (slchunk_t)(z->z_BasePtr + z->z_UIndex * size);
991 if (++z->z_UIndex == z->z_NMax)
993 if (z->z_UIndex == z->z_UEndIndex) {
995 _mpanic("slaballoc: corrupted zone");
998 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
999 flags &= ~SAFLAG_ZERO;
1000 flags |= SAFLAG_PASSIVE;
1002 #if defined(INVARIANTS)
1003 chunk_mark_allocated(z, chunk);
1008 if (flags & SAFLAG_ZERO) {
1011 } else if ((flags & (SAFLAG_ZERO|SAFLAG_PASSIVE)) == 0) {
1012 if (use_malloc_pattern) {
1013 for (i = 0; i < size; i += sizeof(int)) {
1014 *(int *)((char *)chunk + i) = -1;
1017 /* avoid accidental double-free check */
1018 chunk->c_Next = (void *)-1;
1028 * Reallocate memory within the chunk
1031 _slabrealloc(void *ptr, size_t size)
1038 if (ptr == NULL || ptr == ZERO_LENGTH_PTR)
1039 return(_slaballoc(size, 0));
1043 return(ZERO_LENGTH_PTR);
1047 * Handle oversized allocations.
1049 if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) {
1053 while ((big = *bigp) != NULL) {
1054 if (big->base == ptr) {
1055 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
1056 bigbytes = big->bytes;
1057 if (bigbytes == size) {
1058 bigalloc_unlock(ptr);
1062 bigalloc_unlock(ptr);
1063 if ((nptr = _slaballoc(size, 0)) == NULL) {
1065 bigp = bigalloc_lock(ptr);
1068 bigalloc_unlock(ptr);
1071 if (size > bigbytes)
1073 bcopy(ptr, nptr, size);
1074 _slabfree(ptr, FASTSLABREALLOC, &big);
1079 bigalloc_unlock(ptr);
1083 * Get the original allocation's zone. If the new request winds
1084 * up using the same chunk size we do not have to do anything.
1086 * NOTE: We don't have to lock the globaldata here, the fields we
1087 * access here will not change at least as long as we have control
1088 * over the allocation.
1090 z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
1091 MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1094 * Use zoneindex() to chunk-align the new size, as long as the
1095 * new size is not too large.
1097 if (size < ZoneLimit) {
1098 zoneindex(&size, &chunking);
1099 if (z->z_ChunkSize == size)
1104 * Allocate memory for the new request size and copy as appropriate.
1106 if ((nptr = _slaballoc(size, 0)) != NULL) {
1107 if (size > z->z_ChunkSize)
1108 size = z->z_ChunkSize;
1109 bcopy(ptr, nptr, size);
1110 _slabfree(ptr, 0, NULL);
1117 * free (SLAB ALLOCATOR)
1119 * Free a memory block previously allocated by malloc. Note that we do not
1120 * attempt to uplodate ks_loosememuse as MP races could prevent us from
1121 * checking memory limits in malloc.
1124 * FASTSLABREALLOC Fast call from realloc, *rbigp already
1130 _slabfree(void *ptr, int flags, bigalloc_t *rbigp)
1136 slglobaldata_t slgd;
1141 /* Fast realloc path for big allocations */
1142 if (flags & FASTSLABREALLOC) {
1144 goto fastslabrealloc;
1148 * Handle NULL frees and special 0-byte allocations
1152 if (ptr == ZERO_LENGTH_PTR)
1156 * Handle oversized allocations.
1158 if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) {
1159 while ((big = *bigp) != NULL) {
1160 if (big->base == ptr) {
1162 bigalloc_unlock(ptr);
1165 _slabfree(big, 0, NULL);
1167 MASSERT(sizeof(weirdary) <= size);
1168 bcopy(weirdary, ptr, sizeof(weirdary));
1170 _vmem_free(ptr, size);
1175 bigalloc_unlock(ptr);
1179 * Zone case. Figure out the zone based on the fact that it is
1182 z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
1183 MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1185 size = z->z_ChunkSize;
1186 zi = z->z_ZoneIndex;
1188 if (g_malloc_flags & SAFLAG_ZERO)
1191 if (mtmagazine_free(zi, ptr) == 0)
1194 pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT;
1196 slgd = &SLGlobalData;
1201 * Attempt to detect a double-free. To reduce overhead we only check
1202 * if there appears to be link pointer at the base of the data.
1204 if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) {
1207 for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) {
1209 _mpanic("Double free at %p", chunk);
1212 chunk_mark_free(z, chunk);
1216 * Put weird data into the memory to detect modifications after
1217 * freeing, illegal pointer use after freeing (we should fault on
1218 * the odd address), and so forth.
1221 if (z->z_ChunkSize < sizeof(weirdary))
1222 bcopy(weirdary, chunk, z->z_ChunkSize);
1224 bcopy(weirdary, chunk, sizeof(weirdary));
1228 * Add this free non-zero'd chunk to a linked list for reuse, adjust
1231 chunk->c_Next = z->z_PageAry[pgno];
1232 z->z_PageAry[pgno] = chunk;
1233 if (z->z_FirstFreePg > pgno)
1234 z->z_FirstFreePg = pgno;
1237 * Bump the number of free chunks. If it becomes non-zero the zone
1238 * must be added back onto the appropriate list.
1240 if (z->z_NFree++ == 0) {
1241 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
1242 slgd->ZoneAry[z->z_ZoneIndex] = z;
1246 * If the zone becomes totally free then release it.
1248 if (z->z_NFree == z->z_NMax) {
1251 pz = &slgd->ZoneAry[z->z_ZoneIndex];
1253 pz = &(*pz)->z_Next;
1258 /* slgd lock released */
1264 #if defined(INVARIANTS)
1266 * Helper routines for sanity checks
1270 chunk_mark_allocated(slzone_t z, void *chunk)
1272 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1275 MASSERT(bitdex >= 0 && bitdex < z->z_NMax);
1276 bitptr = &z->z_Bitmap[bitdex >> 5];
1278 MASSERT((*bitptr & (1 << bitdex)) == 0);
1279 *bitptr |= 1 << bitdex;
1284 chunk_mark_free(slzone_t z, void *chunk)
1286 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1289 MASSERT(bitdex >= 0 && bitdex < z->z_NMax);
1290 bitptr = &z->z_Bitmap[bitdex >> 5];
1292 MASSERT((*bitptr & (1 << bitdex)) != 0);
1293 *bitptr &= ~(1 << bitdex);
1299 * Allocate and return a magazine. NULL is returned and *burst is adjusted
1300 * if the magazine is empty.
1302 static __inline void *
1303 magazine_alloc(struct magazine *mp, int *burst)
1309 if (MAGAZINE_NOTEMPTY(mp)) {
1310 obj = mp->objects[--mp->rounds];
1315 * Return burst factor to caller along with NULL
1317 if ((mp->flags & M_BURST) && (burst != NULL)) {
1318 *burst = mp->burst_factor;
1320 /* Reduce burst factor by NSCALE; if it hits 1, disable BURST */
1321 if ((mp->flags & M_BURST) && (mp->flags & M_BURST_EARLY) &&
1323 mp->burst_factor -= M_BURST_NSCALE;
1324 if (mp->burst_factor <= 1) {
1325 mp->burst_factor = 1;
1326 mp->flags &= ~(M_BURST);
1327 mp->flags &= ~(M_BURST_EARLY);
1334 magazine_free(struct magazine *mp, void *p)
1336 if (mp != NULL && MAGAZINE_NOTFULL(mp)) {
1337 mp->objects[mp->rounds++] = p;
1345 mtmagazine_alloc(int zi)
1348 struct magazine *mp, *emptymag;
1353 * Do not try to access per-thread magazines while the mtmagazine
1354 * is being initialized or destroyed.
1361 * Primary per-thread allocation loop
1365 * If the loaded magazine has rounds, allocate and return
1367 mp = tp->mags[zi].loaded;
1368 obj = magazine_alloc(mp, NULL);
1373 * If the prev magazine is full, swap with the loaded
1374 * magazine and retry.
1376 mp = tp->mags[zi].prev;
1377 if (mp && MAGAZINE_FULL(mp)) {
1378 MASSERT(mp->rounds != 0);
1379 swap_mags(&tp->mags[zi]); /* prev now empty */
1384 * Try to get a full magazine from the depot. Cycle
1385 * through depot(full)->loaded->prev->depot(empty).
1386 * Retry if a full magazine was available from the depot.
1388 * Return NULL (caller will fall through) if no magazines
1389 * can be found anywhere.
1393 emptymag = tp->mags[zi].prev;
1395 SLIST_INSERT_HEAD(&d->empty, emptymag, nextmagazine);
1396 tp->mags[zi].prev = tp->mags[zi].loaded;
1397 mp = SLIST_FIRST(&d->full); /* loaded magazine */
1398 tp->mags[zi].loaded = mp;
1400 SLIST_REMOVE_HEAD(&d->full, nextmagazine);
1401 MASSERT(MAGAZINE_NOTEMPTY(mp));
1413 mtmagazine_free(int zi, void *ptr)
1416 struct magazine *mp, *loadedmag;
1421 * Do not try to access per-thread magazines while the mtmagazine
1422 * is being initialized or destroyed.
1429 * Primary per-thread freeing loop
1433 * Make sure a new magazine is available in case we have
1434 * to use it. Staging the newmag allows us to avoid
1435 * some locking/reentrancy complexity.
1437 * Temporarily disable the per-thread caches for this
1438 * allocation to avoid reentrancy and/or to avoid a
1439 * stack overflow if the [zi] happens to be the same that
1440 * would be used to allocate the new magazine.
1442 if (tp->newmag == NULL) {
1444 tp->newmag = _slaballoc(sizeof(struct magazine),
1447 if (tp->newmag == NULL) {
1454 * If the loaded magazine has space, free directly to it
1456 rc = magazine_free(tp->mags[zi].loaded, ptr);
1461 * If the prev magazine is empty, swap with the loaded
1462 * magazine and retry.
1464 mp = tp->mags[zi].prev;
1465 if (mp && MAGAZINE_EMPTY(mp)) {
1466 MASSERT(mp->rounds == 0);
1467 swap_mags(&tp->mags[zi]); /* prev now full */
1472 * Try to get an empty magazine from the depot. Cycle
1473 * through depot(empty)->loaded->prev->depot(full).
1474 * Retry if an empty magazine was available from the depot.
1479 if ((loadedmag = tp->mags[zi].prev) != NULL)
1480 SLIST_INSERT_HEAD(&d->full, loadedmag, nextmagazine);
1481 tp->mags[zi].prev = tp->mags[zi].loaded;
1482 mp = SLIST_FIRST(&d->empty);
1484 tp->mags[zi].loaded = mp;
1485 SLIST_REMOVE_HEAD(&d->empty, nextmagazine);
1486 MASSERT(MAGAZINE_NOTFULL(mp));
1490 mp->capacity = M_MAX_ROUNDS;
1493 tp->mags[zi].loaded = mp;
1502 mtmagazine_init(void)
1506 error = pthread_key_create(&thread_mags_key, mtmagazine_destructor);
1512 * This function is only used by the thread exit destructor
1515 mtmagazine_drain(struct magazine *mp)
1519 while (MAGAZINE_NOTEMPTY(mp)) {
1520 obj = magazine_alloc(mp, NULL);
1521 _slabfree(obj, 0, NULL);
1526 * mtmagazine_destructor()
1528 * When a thread exits, we reclaim all its resources; all its magazines are
1529 * drained and the structures are freed.
1531 * WARNING! The destructor can be called multiple times if the larger user
1532 * program has its own destructors which run after ours which
1533 * allocate or free memory.
1536 mtmagazine_destructor(void *thrp)
1538 thr_mags *tp = thrp;
1539 struct magazine *mp;
1543 * Prevent further use of mtmagazines while we are destructing
1544 * them, as well as for any destructors which are run after us
1545 * prior to the thread actually being destroyed.
1549 for (i = 0; i < NZONES; i++) {
1550 mp = tp->mags[i].loaded;
1551 tp->mags[i].loaded = NULL;
1553 if (MAGAZINE_NOTEMPTY(mp))
1554 mtmagazine_drain(mp);
1555 _slabfree(mp, 0, NULL);
1558 mp = tp->mags[i].prev;
1559 tp->mags[i].prev = NULL;
1561 if (MAGAZINE_NOTEMPTY(mp))
1562 mtmagazine_drain(mp);
1563 _slabfree(mp, 0, NULL);
1570 _slabfree(mp, 0, NULL);
1577 * Attempt to allocate a zone from the zone magazine; the zone magazine has
1578 * M_BURST_EARLY enabled, so honor the burst request from the magazine.
1581 zone_alloc(int flags)
1583 slglobaldata_t slgd = &SLGlobalData;
1588 zone_magazine_lock();
1591 z = magazine_alloc(&zone_magazine, &burst);
1592 if (z == NULL && burst == 1) {
1593 zone_magazine_unlock();
1594 z = _vmem_alloc(ZoneSize * burst, ZoneSize, flags);
1595 } else if (z == NULL) {
1596 z = _vmem_alloc(ZoneSize * burst, ZoneSize, flags);
1598 for (i = 1; i < burst; i++) {
1599 j = magazine_free(&zone_magazine,
1600 (char *) z + (ZoneSize * i));
1604 zone_magazine_unlock();
1606 z->z_Flags |= SLZF_UNOTZEROD;
1607 zone_magazine_unlock();
1616 * Release a zone and unlock the slgd lock.
1621 slglobaldata_t slgd = &SLGlobalData;
1622 void *excess[M_ZONE_ROUNDS - M_LOW_ROUNDS] = {};
1625 zone_magazine_lock();
1628 bzero(z, sizeof(struct slzone));
1631 madvise(z, ZoneSize, MADV_FREE);
1633 i = magazine_free(&zone_magazine, z);
1636 * If we failed to free, collect excess magazines; release the zone
1637 * magazine lock, and then free to the system via _vmem_free. Re-enable
1638 * BURST mode for the magazine.
1641 j = zone_magazine.rounds - zone_magazine.low_factor;
1642 for (i = 0; i < j; i++) {
1643 excess[i] = magazine_alloc(&zone_magazine, NULL);
1644 MASSERT(excess[i] != NULL);
1647 zone_magazine_unlock();
1649 for (i = 0; i < j; i++)
1650 _vmem_free(excess[i], ZoneSize);
1652 _vmem_free(z, ZoneSize);
1654 zone_magazine_unlock();
1661 * Directly map memory in PAGE_SIZE'd chunks with the specified
1664 * Alignment must be a multiple of PAGE_SIZE.
1666 * Size must be >= alignment.
1669 _vmem_alloc(size_t size, size_t align, int flags)
1676 * Map anonymous private memory.
1678 addr = mmap(NULL, size, PROT_READ|PROT_WRITE,
1679 MAP_PRIVATE|MAP_ANON, -1, 0);
1680 if (addr == MAP_FAILED)
1684 * Check alignment. The misaligned offset is also the excess
1685 * amount. If misaligned unmap the excess so we have a chance of
1686 * mapping at the next alignment point and recursively try again.
1688 * BBBBBBBBBBB BBBBBBBBBBB BBBBBBBBBBB block alignment
1689 * aaaaaaaaa aaaaaaaaaaa aa mis-aligned allocation
1690 * xxxxxxxxx final excess calculation
1691 * ^ returned address
1693 excess = (uintptr_t)addr & (align - 1);
1696 excess = align - excess;
1699 munmap(save + excess, size - excess);
1700 addr = _vmem_alloc(size, align, flags);
1701 munmap(save, excess);
1703 return((void *)addr);
1709 * Free a chunk of memory allocated with _vmem_alloc()
1712 _vmem_free(void *ptr, size_t size)
1718 * Panic on fatal conditions
1721 _mpanic(const char *ctl, ...)
1725 if (malloc_panic == 0) {
1728 vfprintf(stderr, ctl, va);
1729 fprintf(stderr, "\n");