2 * NMALLOC.C - New Malloc (ported from kernel slab allocator)
4 * Copyright (c) 2003,2004,2009,2010 The DragonFly Project. All rights reserved.
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com> and by
8 * Venkatesh Srinivas <me@endeavour.zapto.org>.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * $Id: nmalloc.c,v 1.37 2010/07/23 08:20:35 vsrinivas Exp $
40 * This module implements a slab allocator drop-in replacement for the
43 * A slab allocator reserves a ZONE for each chunk size, then lays the
44 * chunks out in an array within the zone. Allocation and deallocation
45 * is nearly instantaneous, and overhead losses are limited to a fixed
48 * The slab allocator does not have to pre-initialize the list of
49 * free chunks for each zone, and the underlying VM will not be
50 * touched at all beyond the zone header until an actual allocation
53 * Slab management and locking is done on a per-zone basis.
55 * Alloc Size Chunking Number of zones
66 * Allocations >= ZoneLimit (16K) go directly to mmap and a hash table
67 * is used to locate for free. One and Two-page allocations use the
68 * zone mechanic to avoid excessive mmap()/munmap() calls.
70 * API FEATURES AND SIDE EFFECTS
72 * + power-of-2 sized allocations up to a page will be power-of-2 aligned.
73 * Above that power-of-2 sized allocations are page-aligned. Non
74 * power-of-2 sized allocations are aligned the same as the chunk
75 * size for their zone.
76 * + malloc(0) returns a special non-NULL value
77 * + ability to allocate arbitrarily large chunks of memory
78 * + realloc will reuse the passed pointer if possible, within the
79 * limitations of the zone chunking.
81 * Multithreaded enhancements for small allocations introduced August 2010.
82 * These are in the spirit of 'libumem'. See:
83 * Bonwick, J.; Adams, J. (2001). "Magazines and Vmem: Extending the
84 * slab allocator to many CPUs and arbitrary resources". In Proc. 2001
85 * USENIX Technical Conference. USENIX Association.
87 * Oversized allocations employ the BIGCACHE mechanic whereby large
88 * allocations may be handed significantly larger buffers, allowing them
89 * to avoid mmap/munmap operations even through significant realloc()s.
90 * The excess space is only trimmed if too many large allocations have been
91 * given this treatment.
95 * The value of the environment variable MALLOC_OPTIONS is a character string
96 * containing various flags to tune nmalloc.
98 * 'U' / ['u'] Generate / do not generate utrace entries for ktrace(1)
99 * This will generate utrace events for all malloc,
100 * realloc, and free calls. There are tools (mtrplay) to
101 * replay and allocation pattern or to graph heap structure
102 * (mtrgraph) which can interpret these logs.
103 * 'Z' / ['z'] Zero out / do not zero all allocations.
104 * Each new byte of memory allocated by malloc, realloc, or
105 * reallocf will be initialized to 0. This is intended for
106 * debugging and will affect performance negatively.
107 * 'H' / ['h'] Pass a hint to the kernel about pages unused by the
108 * allocation functions.
111 /* cc -shared -fPIC -g -O -I/usr/src/lib/libc/include -o nmalloc.so nmalloc.c */
113 #include "libc_private.h"
115 #include <sys/param.h>
116 #include <sys/types.h>
117 #include <sys/mman.h>
118 #include <sys/queue.h>
120 #include <sys/ktrace.h>
131 #include <machine/atomic.h>
133 #include "spinlock.h"
134 #include "un-namespace.h"
138 * Linked list of large allocations
140 typedef struct bigalloc {
141 struct bigalloc *next; /* hash link */
142 void *base; /* base pointer */
143 u_long active; /* bytes active */
144 u_long bytes; /* bytes allocated */
148 * Note that any allocations which are exact multiples of PAGE_SIZE, or
149 * which are >= ZALLOC_ZONE_LIMIT, will fall through to the kmem subsystem.
151 #define ZALLOC_ZONE_LIMIT (16 * 1024) /* max slab-managed alloc */
152 #define ZALLOC_MIN_ZONE_SIZE (32 * 1024) /* minimum zone size */
153 #define ZALLOC_MAX_ZONE_SIZE (128 * 1024) /* maximum zone size */
154 #define ZALLOC_ZONE_SIZE (64 * 1024)
155 #define ZALLOC_SLAB_MAGIC 0x736c6162 /* magic sanity */
156 #define ZALLOC_SLAB_SLIDE 20 /* L1-cache skip */
158 #if ZALLOC_ZONE_LIMIT == 16384
160 #elif ZALLOC_ZONE_LIMIT == 32768
163 #error "I couldn't figure out NZONES"
167 * Chunk structure for free elements
169 typedef struct slchunk {
170 struct slchunk *c_Next;
174 * The IN-BAND zone header is placed at the beginning of each zone.
178 typedef struct slzone {
179 int32_t z_Magic; /* magic number for sanity check */
180 int z_NFree; /* total free chunks / ualloc space */
181 struct slzone *z_Next; /* ZoneAry[] link if z_NFree non-zero */
182 int z_NMax; /* maximum free chunks */
183 char *z_BasePtr; /* pointer to start of chunk array */
184 int z_UIndex; /* current initial allocation index */
185 int z_UEndIndex; /* last (first) allocation index */
186 int z_ChunkSize; /* chunk size for validation */
187 int z_FirstFreePg; /* chunk list on a page-by-page basis */
190 struct slchunk *z_PageAry[ZALLOC_ZONE_SIZE / PAGE_SIZE];
191 #if defined(INVARIANTS)
192 __uint32_t z_Bitmap[]; /* bitmap of free chunks / sanity */
196 typedef struct slglobaldata {
198 slzone_t ZoneAry[NZONES];/* linked list of zones NFree > 0 */
202 #define SLZF_UNOTZEROD 0x0001
204 #define FASTSLABREALLOC 0x02
207 * Misc constants. Note that allocations that are exact multiples of
208 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
209 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
211 #define MIN_CHUNK_SIZE 8 /* in bytes */
212 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1)
213 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK)
216 * The WEIRD_ADDR is used as known text to copy into free objects to
217 * try to create deterministic failure cases if the data is accessed after
220 * WARNING: A limited number of spinlocks are available, BIGXSIZE should
221 * not be larger then 64.
223 #define WEIRD_ADDR 0xdeadc0de
224 #define MAX_COPY sizeof(weirdary)
225 #define ZERO_LENGTH_PTR ((void *)&malloc_dummy_pointer)
227 #define BIGHSHIFT 10 /* bigalloc hash table */
228 #define BIGHSIZE (1 << BIGHSHIFT)
229 #define BIGHMASK (BIGHSIZE - 1)
230 #define BIGXSIZE (BIGHSIZE / 16) /* bigalloc lock table */
231 #define BIGXMASK (BIGXSIZE - 1)
234 * BIGCACHE caches oversized allocations. Note that a linear search is
235 * performed, so do not make the cache too large.
237 * BIGCACHE will garbage-collect excess space when the excess exceeds the
238 * specified value. A relatively large number should be used here because
239 * garbage collection is expensive.
242 #define BIGCACHE_MASK (BIGCACHE - 1)
243 #define BIGCACHE_LIMIT (1024 * 1024) /* size limit */
244 #define BIGCACHE_EXCESS (16 * 1024 * 1024) /* garbage collect */
246 #define SAFLAG_ZERO 0x0001
247 #define SAFLAG_PASSIVE 0x0002
253 #define arysize(ary) (sizeof(ary)/sizeof((ary)[0]))
255 #define MASSERT(exp) do { if (__predict_false(!(exp))) \
256 _mpanic("assertion: %s in %s", \
264 #define M_MAX_ROUNDS 64
265 #define M_ZONE_ROUNDS 64
266 #define M_LOW_ROUNDS 32
267 #define M_INIT_ROUNDS 8
268 #define M_BURST_FACTOR 8
269 #define M_BURST_NSCALE 2
271 #define M_BURST 0x0001
272 #define M_BURST_EARLY 0x0002
275 SLIST_ENTRY(magazine) nextmagazine;
278 int capacity; /* Max rounds in this magazine */
279 int rounds; /* Current number of free rounds */
280 int burst_factor; /* Number of blocks to prefill with */
281 int low_factor; /* Free till low_factor from full mag */
282 void *objects[M_MAX_ROUNDS];
285 SLIST_HEAD(magazinelist, magazine);
287 static spinlock_t zone_mag_lock;
288 static struct magazine zone_magazine = {
289 .flags = M_BURST | M_BURST_EARLY,
290 .capacity = M_ZONE_ROUNDS,
292 .burst_factor = M_BURST_FACTOR,
293 .low_factor = M_LOW_ROUNDS
296 #define MAGAZINE_FULL(mp) (mp->rounds == mp->capacity)
297 #define MAGAZINE_NOTFULL(mp) (mp->rounds < mp->capacity)
298 #define MAGAZINE_EMPTY(mp) (mp->rounds == 0)
299 #define MAGAZINE_NOTEMPTY(mp) (mp->rounds != 0)
302 * Each thread will have a pair of magazines per size-class (NZONES)
303 * The loaded magazine will support immediate allocations, the previous
304 * magazine will either be full or empty and can be swapped at need
306 typedef struct magazine_pair {
307 struct magazine *loaded;
308 struct magazine *prev;
311 /* A depot is a collection of magazines for a single zone. */
312 typedef struct magazine_depot {
313 struct magazinelist full;
314 struct magazinelist empty;
318 typedef struct thr_mags {
319 magazine_pair mags[NZONES];
320 struct magazine *newmag;
325 * With this attribute set, do not require a function call for accessing
326 * this variable when the code is compiled -fPIC. Empty for libc_rtld
330 #define TLS_ATTRIBUTE
332 #define TLS_ATTRIBUTE __attribute__ ((tls_model ("initial-exec")))
335 static int mtmagazine_free_live;
336 static __thread thr_mags thread_mags TLS_ATTRIBUTE;
337 static pthread_key_t thread_mags_key;
338 static pthread_once_t thread_mags_once = PTHREAD_ONCE_INIT;
339 static magazine_depot depots[NZONES];
342 * Fixed globals (not per-cpu)
344 static const int ZoneSize = ZALLOC_ZONE_SIZE;
345 static const int ZoneLimit = ZALLOC_ZONE_LIMIT;
346 static const int ZonePageCount = ZALLOC_ZONE_SIZE / PAGE_SIZE;
347 static const int ZoneMask = ZALLOC_ZONE_SIZE - 1;
349 static int opt_madvise = 0;
350 static int opt_utrace = 0;
351 static int g_malloc_flags = 0;
352 static struct slglobaldata SLGlobalData;
353 static bigalloc_t bigalloc_array[BIGHSIZE];
354 static spinlock_t bigspin_array[BIGXSIZE];
355 static volatile void *bigcache_array[BIGCACHE]; /* atomic swap */
356 static volatile size_t bigcache_size_array[BIGCACHE]; /* SMP races ok */
357 static volatile int bigcache_index; /* SMP races ok */
358 static int malloc_panic;
359 static int malloc_dummy_pointer;
360 static size_t excess_alloc; /* excess big allocs */
362 static const int32_t weirdary[16] = {
363 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
364 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
365 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
366 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR
369 static void *_slaballoc(size_t size, int flags);
370 static void *_slabrealloc(void *ptr, size_t size);
371 static void _slabfree(void *ptr, int, bigalloc_t *);
372 static void *_vmem_alloc(size_t bytes, size_t align, int flags);
373 static void _vmem_free(void *ptr, size_t bytes);
374 static void *magazine_alloc(struct magazine *, int *);
375 static int magazine_free(struct magazine *, void *);
376 static void *mtmagazine_alloc(int zi);
377 static int mtmagazine_free(int zi, void *);
378 static void mtmagazine_init(void);
379 static void mtmagazine_destructor(void *);
380 static slzone_t zone_alloc(int flags);
381 static void zone_free(void *z);
382 static void _mpanic(const char *ctl, ...) __printflike(1, 2);
383 static void malloc_init(void) __constructor(101);
384 #if defined(INVARIANTS)
385 static void chunk_mark_allocated(slzone_t z, void *chunk);
386 static void chunk_mark_free(slzone_t z, void *chunk);
389 struct nmalloc_utrace {
395 #define UTRACE(a, b, c) \
397 struct nmalloc_utrace ut = { \
402 utrace(&ut, sizeof(ut)); \
407 * If enabled any memory allocated without M_ZERO is initialized to -1.
409 static int use_malloc_pattern;
415 const char *p = NULL;
417 if (issetugid() == 0)
418 p = getenv("MALLOC_OPTIONS");
420 for (; p != NULL && *p != '\0'; p++) {
422 case 'u': opt_utrace = 0; break;
423 case 'U': opt_utrace = 1; break;
424 case 'h': opt_madvise = 0; break;
425 case 'H': opt_madvise = 1; break;
426 case 'z': g_malloc_flags = 0; break;
427 case 'Z': g_malloc_flags = SAFLAG_ZERO; break;
433 UTRACE((void *) -1, 0, NULL);
437 * We have to install a handler for nmalloc thread teardowns when
438 * the thread is created. We cannot delay this because destructors in
439 * sophisticated userland programs can call malloc() for the first time
440 * during their thread exit.
442 * This routine is called directly from pthreads.
445 _nmalloc_thr_init(void)
450 * Disallow mtmagazine operations until the mtmagazine is
456 if (mtmagazine_free_live == 0) {
457 mtmagazine_free_live = 1;
458 pthread_once(&thread_mags_once, mtmagazine_init);
460 pthread_setspecific(thread_mags_key, tp);
468 slgd_lock(slglobaldata_t slgd)
471 _SPINLOCK(&slgd->Spinlock);
475 slgd_unlock(slglobaldata_t slgd)
478 _SPINUNLOCK(&slgd->Spinlock);
482 depot_lock(magazine_depot *dp)
485 _SPINLOCK(&dp->lock);
489 depot_unlock(magazine_depot *dp)
492 _SPINUNLOCK(&dp->lock);
496 zone_magazine_lock(void)
499 _SPINLOCK(&zone_mag_lock);
503 zone_magazine_unlock(void)
506 _SPINUNLOCK(&zone_mag_lock);
510 swap_mags(magazine_pair *mp)
512 struct magazine *tmp;
514 mp->loaded = mp->prev;
519 * bigalloc hashing and locking support.
521 * Return an unmasked hash code for the passed pointer.
524 _bigalloc_hash(void *ptr)
528 hv = ((int)(intptr_t)ptr >> PAGE_SHIFT) ^
529 ((int)(intptr_t)ptr >> (PAGE_SHIFT + BIGHSHIFT));
535 * Lock the hash chain and return a pointer to its base for the specified
538 static __inline bigalloc_t *
539 bigalloc_lock(void *ptr)
541 int hv = _bigalloc_hash(ptr);
544 bigp = &bigalloc_array[hv & BIGHMASK];
546 _SPINLOCK(&bigspin_array[hv & BIGXMASK]);
551 * Lock the hash chain and return a pointer to its base for the specified
554 * BUT, if the hash chain is empty, just return NULL and do not bother
557 static __inline bigalloc_t *
558 bigalloc_check_and_lock(void *ptr)
560 int hv = _bigalloc_hash(ptr);
563 bigp = &bigalloc_array[hv & BIGHMASK];
567 _SPINLOCK(&bigspin_array[hv & BIGXMASK]);
573 bigalloc_unlock(void *ptr)
578 hv = _bigalloc_hash(ptr);
579 _SPINUNLOCK(&bigspin_array[hv & BIGXMASK]);
584 * Find a bigcache entry that might work for the allocation. SMP races are
585 * ok here except for the swap (that is, it is ok if bigcache_size_array[i]
586 * is wrong or if a NULL or too-small big is returned).
588 * Generally speaking it is ok to find a large entry even if the bytes
589 * requested are relatively small (but still oversized), because we really
590 * don't know *what* the application is going to do with the buffer.
594 bigcache_find_alloc(size_t bytes)
596 bigalloc_t big = NULL;
600 for (i = 0; i < BIGCACHE; ++i) {
601 test = bigcache_size_array[i];
603 bigcache_size_array[i] = 0;
604 big = atomic_swap_ptr(&bigcache_array[i], NULL);
612 * Free a bigcache entry, possibly returning one that the caller really must
613 * free. This is used to cache recent oversized memory blocks. Only
614 * big blocks smaller than BIGCACHE_LIMIT will be cached this way, so try
615 * to collect the biggest ones we can that are under the limit.
619 bigcache_find_free(bigalloc_t big)
625 b = ++bigcache_index;
626 for (i = 0; i < BIGCACHE; ++i) {
627 j = (b + i) & BIGCACHE_MASK;
628 if (bigcache_size_array[j] < big->bytes) {
629 bigcache_size_array[j] = big->bytes;
630 big = atomic_swap_ptr(&bigcache_array[j], big);
639 handle_excess_big(void)
645 if (excess_alloc <= BIGCACHE_EXCESS)
648 for (i = 0; i < BIGHSIZE; ++i) {
649 bigp = &bigalloc_array[i];
653 _SPINLOCK(&bigspin_array[i & BIGXMASK]);
654 for (big = *bigp; big; big = big->next) {
655 if (big->active < big->bytes) {
656 MASSERT((big->active & PAGE_MASK) == 0);
657 MASSERT((big->bytes & PAGE_MASK) == 0);
658 munmap((char *)big->base + big->active,
659 big->bytes - big->active);
660 atomic_add_long(&excess_alloc,
661 big->active - big->bytes);
662 big->bytes = big->active;
666 _SPINUNLOCK(&bigspin_array[i & BIGXMASK]);
671 * Calculate the zone index for the allocation request size and set the
672 * allocation request size to that particular zone's chunk size.
675 zoneindex(size_t *bytes, size_t *chunking)
677 size_t n = (unsigned int)*bytes; /* unsigned for shift opt */
680 * This used to be 8-byte chunks and 16 zones for n < 128.
681 * However some instructions may require 16-byte alignment
682 * (aka SIMD) and programs might not request an aligned size
683 * (aka GCC-7), so change this as follows:
685 * 0-15 bytes 8-byte alignment in two zones (0-1)
686 * 16-127 bytes 16-byte alignment in four zones (3-10)
687 * zone index 2 and 11-15 are currently unused.
690 *bytes = n = (n + 7) & ~7;
692 return(n / 8 - 1); /* 8 byte chunks, 2 zones */
693 /* zones 0,1, zone 2 is unused */
696 *bytes = n = (n + 15) & ~15;
698 return(n / 16 + 2); /* 16 byte chunks, 8 zones */
699 /* zones 3-10, zones 11-15 unused */
702 *bytes = n = (n + 15) & ~15;
708 *bytes = n = (n + 31) & ~31;
713 *bytes = n = (n + 63) & ~63;
718 *bytes = n = (n + 127) & ~127;
720 return(n / 128 + 31);
723 *bytes = n = (n + 255) & ~255;
725 return(n / 256 + 39);
727 *bytes = n = (n + 511) & ~511;
729 return(n / 512 + 47);
731 #if ZALLOC_ZONE_LIMIT > 8192
733 *bytes = n = (n + 1023) & ~1023;
735 return(n / 1024 + 55);
738 #if ZALLOC_ZONE_LIMIT > 16384
740 *bytes = n = (n + 2047) & ~2047;
742 return(n / 2048 + 63);
745 _mpanic("Unexpected byte count %zu", n);
750 * malloc() - call internal slab allocator
757 ptr = _slaballoc(size, 0);
761 UTRACE(0, size, ptr);
765 #define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4))
768 * calloc() - call internal slab allocator
771 calloc(size_t number, size_t size)
775 if ((number >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
776 number > 0 && SIZE_MAX / number < size) {
781 ptr = _slaballoc(number * size, SAFLAG_ZERO);
785 UTRACE(0, number * size, ptr);
790 * realloc() (SLAB ALLOCATOR)
792 * We do not attempt to optimize this routine beyond reusing the same
793 * pointer if the new size fits within the chunking of the old pointer's
797 realloc(void *ptr, size_t size)
800 ret = _slabrealloc(ptr, size);
804 UTRACE(ptr, size, ret);
811 * Allocate (size) bytes with a alignment of (alignment), where (alignment)
812 * is a power of 2 >= sizeof(void *).
814 * The slab allocator will allocate on power-of-2 boundaries up to
815 * at least PAGE_SIZE. We use the zoneindex mechanic to find a
816 * zone matching the requirements, and _vmem_alloc() otherwise.
819 posix_memalign(void **memptr, size_t alignment, size_t size)
827 * OpenGroup spec issue 6 checks
829 if ((alignment | (alignment - 1)) + 1 != (alignment << 1)) {
833 if (alignment < sizeof(void *)) {
839 * Our zone mechanism guarantees same-sized alignment for any
840 * power-of-2 allocation. If size is a power-of-2 and reasonable
841 * we can just call _slaballoc() and be done. We round size up
842 * to the nearest alignment boundary to improve our odds of
843 * it becoming a power-of-2 if it wasn't before.
845 if (size <= alignment)
848 size = (size + alignment - 1) & ~(size_t)(alignment - 1);
849 if (size < PAGE_SIZE && (size | (size - 1)) + 1 == (size << 1)) {
850 *memptr = _slaballoc(size, 0);
851 return(*memptr ? 0 : ENOMEM);
855 * Otherwise locate a zone with a chunking that matches
856 * the requested alignment, within reason. Consider two cases:
858 * (1) A 1K allocation on a 32-byte alignment. The first zoneindex
859 * we find will be the best fit because the chunking will be
860 * greater or equal to the alignment.
862 * (2) A 513 allocation on a 256-byte alignment. In this case
863 * the first zoneindex we find will be for 576 byte allocations
864 * with a chunking of 64, which is not sufficient. To fix this
865 * we simply find the nearest power-of-2 >= size and use the
866 * same side-effect of _slaballoc() which guarantees
867 * same-alignment on a power-of-2 allocation.
869 if (size < PAGE_SIZE) {
870 zi = zoneindex(&size, &chunking);
871 if (chunking >= alignment) {
872 *memptr = _slaballoc(size, 0);
873 return(*memptr ? 0 : ENOMEM);
879 while (alignment < size)
881 *memptr = _slaballoc(alignment, 0);
882 return(*memptr ? 0 : ENOMEM);
886 * If the slab allocator cannot handle it use vmem_alloc().
888 * Alignment must be adjusted up to at least PAGE_SIZE in this case.
890 if (alignment < PAGE_SIZE)
891 alignment = PAGE_SIZE;
892 if (size < alignment)
894 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
895 *memptr = _vmem_alloc(size, alignment, 0);
899 big = _slaballoc(sizeof(struct bigalloc), 0);
901 _vmem_free(*memptr, size);
905 bigp = bigalloc_lock(*memptr);
908 big->bytes = size; /* no excess */
911 bigalloc_unlock(*memptr);
917 * free() (SLAB ALLOCATOR) - do the obvious
923 _slabfree(ptr, 0, NULL);
927 * _slaballoc() (SLAB ALLOCATOR)
929 * Allocate memory via the slab allocator. If the request is too large,
930 * or if it page-aligned beyond a certain size, we fall back to the
934 _slaballoc(size_t size, int flags)
948 * Handle the degenerate size == 0 case. Yes, this does happen.
949 * Return a special pointer. This is to maintain compatibility with
950 * the original malloc implementation. Certain devices, such as the
951 * adaptec driver, not only allocate 0 bytes, they check for NULL and
952 * also realloc() later on. Joy.
955 return(ZERO_LENGTH_PTR);
957 /* Capture global flags */
958 flags |= g_malloc_flags;
961 * Handle large allocations directly. There should not be very many
962 * of these so performance is not a big issue.
964 * The backend allocator is pretty nasty on a SMP system. Use the
965 * slab allocator for one and two page-sized chunks even though we
966 * lose some efficiency.
968 if (size >= ZoneLimit ||
969 ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) {
974 * Page-align and cache-color in case of virtually indexed
975 * physically tagged L1 caches (aka SandyBridge). No sweat
976 * otherwise, so just do it.
978 * (don't count as excess).
980 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
981 if ((size & (PAGE_SIZE * 2 - 1)) == 0)
985 * Try to reuse a cached big block to avoid mmap'ing. If it
986 * turns out not to fit our requirements we throw it away
987 * and allocate normally.
990 if (size <= BIGCACHE_LIMIT) {
991 big = bigcache_find_alloc(size);
992 if (big && big->bytes < size) {
993 _slabfree(big->base, FASTSLABREALLOC, &big);
999 if (flags & SAFLAG_ZERO)
1002 chunk = _vmem_alloc(size, PAGE_SIZE, flags);
1006 big = _slaballoc(sizeof(struct bigalloc), 0);
1008 _vmem_free(chunk, size);
1016 bigp = bigalloc_lock(chunk);
1017 if (big->active < big->bytes) {
1018 atomic_add_long(&excess_alloc,
1019 big->bytes - big->active);
1023 bigalloc_unlock(chunk);
1024 handle_excess_big();
1029 /* Compute allocation zone; zoneindex will panic on excessive sizes */
1030 zi = zoneindex(&size, &chunking);
1031 MASSERT(zi < NZONES);
1033 obj = mtmagazine_alloc(zi);
1035 if (flags & SAFLAG_ZERO)
1040 slgd = &SLGlobalData;
1044 * Attempt to allocate out of an existing zone. If all zones are
1045 * exhausted pull one off the free list or allocate a new one.
1047 if ((z = slgd->ZoneAry[zi]) == NULL) {
1048 z = zone_alloc(flags);
1053 * How big is the base structure?
1055 #if defined(INVARIANTS)
1057 * Make room for z_Bitmap. An exact calculation is
1058 * somewhat more complicated so don't make an exact
1061 off = offsetof(struct slzone,
1062 z_Bitmap[(ZoneSize / size + 31) / 32]);
1063 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8);
1065 off = sizeof(struct slzone);
1069 * Align the storage in the zone based on the chunking.
1071 * Guarantee power-of-2 alignment for power-of-2-sized
1072 * chunks. Otherwise align based on the chunking size
1073 * (typically 8 or 16 bytes for small allocations).
1075 * NOTE: Allocations >= ZoneLimit are governed by the
1076 * bigalloc code and typically only guarantee page-alignment.
1078 * Set initial conditions for UIndex near the zone header
1079 * to reduce unecessary page faults, vs semi-randomization
1080 * to improve L1 cache saturation.
1082 if ((size | (size - 1)) + 1 == (size << 1))
1083 off = roundup2(off, size);
1085 off = roundup2(off, chunking);
1086 z->z_Magic = ZALLOC_SLAB_MAGIC;
1087 z->z_ZoneIndex = zi;
1088 z->z_NMax = (ZoneSize - off) / size;
1089 z->z_NFree = z->z_NMax;
1090 z->z_BasePtr = (char *)z + off;
1091 z->z_UIndex = z->z_UEndIndex = 0;
1092 z->z_ChunkSize = size;
1093 z->z_FirstFreePg = ZonePageCount;
1094 z->z_Next = slgd->ZoneAry[zi];
1095 slgd->ZoneAry[zi] = z;
1096 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
1097 flags &= ~SAFLAG_ZERO; /* already zero'd */
1098 flags |= SAFLAG_PASSIVE;
1102 * Slide the base index for initial allocations out of the
1103 * next zone we create so we do not over-weight the lower
1104 * part of the cpu memory caches.
1106 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
1107 & (ZALLOC_MAX_ZONE_SIZE - 1);
1111 * Ok, we have a zone from which at least one chunk is available.
1113 * Remove us from the ZoneAry[] when we become empty
1115 MASSERT(z->z_NFree > 0);
1117 if (--z->z_NFree == 0) {
1118 slgd->ZoneAry[zi] = z->z_Next;
1123 * Locate a chunk in a free page. This attempts to localize
1124 * reallocations into earlier pages without us having to sort
1125 * the chunk list. A chunk may still overlap a page boundary.
1127 while (z->z_FirstFreePg < ZonePageCount) {
1128 if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) {
1131 * Diagnostic: c_Next is not total garbage.
1133 MASSERT(chunk->c_Next == NULL ||
1134 ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) ==
1135 ((intptr_t)chunk & IN_SAME_PAGE_MASK));
1138 chunk_mark_allocated(z, chunk);
1140 MASSERT((uintptr_t)chunk & ZoneMask);
1141 z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next;
1148 * No chunks are available but NFree said we had some memory,
1149 * so it must be available in the never-before-used-memory
1150 * area governed by UIndex. The consequences are very
1151 * serious if our zone got corrupted so we use an explicit
1152 * panic rather then a KASSERT.
1154 chunk = (slchunk_t)(z->z_BasePtr + z->z_UIndex * size);
1156 if (++z->z_UIndex == z->z_NMax)
1158 if (z->z_UIndex == z->z_UEndIndex) {
1159 if (z->z_NFree != 0)
1160 _mpanic("slaballoc: corrupted zone");
1163 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
1164 flags &= ~SAFLAG_ZERO;
1165 flags |= SAFLAG_PASSIVE;
1167 #if defined(INVARIANTS)
1168 chunk_mark_allocated(z, chunk);
1173 if (flags & SAFLAG_ZERO) {
1176 } else if ((flags & (SAFLAG_ZERO|SAFLAG_PASSIVE)) == 0) {
1177 if (use_malloc_pattern) {
1178 for (i = 0; i < size; i += sizeof(int)) {
1179 *(int *)((char *)chunk + i) = -1;
1182 /* avoid accidental double-free check */
1183 chunk->c_Next = (void *)-1;
1193 * Reallocate memory within the chunk
1196 _slabrealloc(void *ptr, size_t size)
1203 if (ptr == NULL || ptr == ZERO_LENGTH_PTR) {
1204 return(_slaballoc(size, 0));
1209 return(ZERO_LENGTH_PTR);
1213 * Handle oversized allocations.
1215 if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) {
1219 while ((big = *bigp) != NULL) {
1220 if (big->base == ptr) {
1221 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
1222 bigbytes = big->bytes;
1225 * If it already fits determine if it makes
1226 * sense to shrink/reallocate. Try to optimize
1227 * programs which stupidly make incremental
1228 * reallocations larger or smaller by scaling
1229 * the allocation. Also deal with potential
1232 if (size >= (bigbytes >> 1) &&
1234 if (big->active != size) {
1235 atomic_add_long(&excess_alloc,
1240 bigalloc_unlock(ptr);
1245 * For large reallocations, allocate more space
1246 * than we need to try to avoid excessive
1247 * reallocations later on.
1249 chunking = size + (size >> 3);
1250 chunking = (chunking + PAGE_MASK) &
1254 * Try to allocate adjacently in case the
1255 * program is idiotically realloc()ing a
1256 * huge memory block just slightly bigger.
1257 * (llvm's llc tends to do this a lot).
1259 * (MAP_TRYFIXED forces mmap to fail if there
1260 * is already something at the address).
1262 if (chunking > bigbytes) {
1264 int errno_save = errno;
1266 addr = mmap((char *)ptr + bigbytes,
1267 chunking - bigbytes,
1268 PROT_READ|PROT_WRITE,
1269 MAP_PRIVATE|MAP_ANON|
1273 if (addr == (char *)ptr + bigbytes) {
1274 atomic_add_long(&excess_alloc,
1279 big->bytes = chunking;
1281 bigalloc_unlock(ptr);
1285 MASSERT((void *)addr == MAP_FAILED);
1289 * Failed, unlink big and allocate fresh.
1290 * (note that we have to leave (big) intact
1291 * in case the slaballoc fails).
1294 bigalloc_unlock(ptr);
1295 if ((nptr = _slaballoc(size, 0)) == NULL) {
1297 bigp = bigalloc_lock(ptr);
1300 bigalloc_unlock(ptr);
1303 if (size > bigbytes)
1305 bcopy(ptr, nptr, size);
1306 atomic_add_long(&excess_alloc, big->active -
1308 _slabfree(ptr, FASTSLABREALLOC, &big);
1314 bigalloc_unlock(ptr);
1315 handle_excess_big();
1319 * Get the original allocation's zone. If the new request winds
1320 * up using the same chunk size we do not have to do anything.
1322 * NOTE: We don't have to lock the globaldata here, the fields we
1323 * access here will not change at least as long as we have control
1324 * over the allocation.
1326 z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
1327 MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1330 * Use zoneindex() to chunk-align the new size, as long as the
1331 * new size is not too large.
1333 if (size < ZoneLimit) {
1334 zoneindex(&size, &chunking);
1335 if (z->z_ChunkSize == size) {
1341 * Allocate memory for the new request size and copy as appropriate.
1343 if ((nptr = _slaballoc(size, 0)) != NULL) {
1344 if (size > z->z_ChunkSize)
1345 size = z->z_ChunkSize;
1346 bcopy(ptr, nptr, size);
1347 _slabfree(ptr, 0, NULL);
1354 * free (SLAB ALLOCATOR)
1356 * Free a memory block previously allocated by malloc. Note that we do not
1357 * attempt to uplodate ks_loosememuse as MP races could prevent us from
1358 * checking memory limits in malloc.
1361 * FASTSLABREALLOC Fast call from realloc, *rbigp already
1367 _slabfree(void *ptr, int flags, bigalloc_t *rbigp)
1373 slglobaldata_t slgd;
1378 /* Fast realloc path for big allocations */
1379 if (flags & FASTSLABREALLOC) {
1381 goto fastslabrealloc;
1385 * Handle NULL frees and special 0-byte allocations
1389 if (ptr == ZERO_LENGTH_PTR)
1393 * Handle oversized allocations.
1395 if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) {
1396 while ((big = *bigp) != NULL) {
1397 if (big->base == ptr) {
1399 atomic_add_long(&excess_alloc, big->active -
1401 bigalloc_unlock(ptr);
1404 * Try to stash the block we are freeing,
1405 * potentially receiving another block in
1406 * return which must be freed.
1409 if (big->bytes <= BIGCACHE_LIMIT) {
1410 big = bigcache_find_free(big);
1414 ptr = big->base; /* reload */
1416 _slabfree(big, 0, NULL);
1418 MASSERT(sizeof(weirdary) <= size);
1419 bcopy(weirdary, ptr, sizeof(weirdary));
1421 _vmem_free(ptr, size);
1426 bigalloc_unlock(ptr);
1427 handle_excess_big();
1431 * Zone case. Figure out the zone based on the fact that it is
1434 z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
1435 MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1437 size = z->z_ChunkSize;
1438 zi = z->z_ZoneIndex;
1440 if (g_malloc_flags & SAFLAG_ZERO)
1443 if (mtmagazine_free(zi, ptr) == 0)
1446 pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT;
1448 slgd = &SLGlobalData;
1453 * Attempt to detect a double-free. To reduce overhead we only check
1454 * if there appears to be link pointer at the base of the data.
1456 if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) {
1459 for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) {
1461 _mpanic("Double free at %p", chunk);
1464 chunk_mark_free(z, chunk);
1468 * Put weird data into the memory to detect modifications after
1469 * freeing, illegal pointer use after freeing (we should fault on
1470 * the odd address), and so forth.
1473 if (z->z_ChunkSize < sizeof(weirdary))
1474 bcopy(weirdary, chunk, z->z_ChunkSize);
1476 bcopy(weirdary, chunk, sizeof(weirdary));
1480 * Add this free non-zero'd chunk to a linked list for reuse, adjust
1483 chunk->c_Next = z->z_PageAry[pgno];
1484 z->z_PageAry[pgno] = chunk;
1485 if (z->z_FirstFreePg > pgno)
1486 z->z_FirstFreePg = pgno;
1489 * Bump the number of free chunks. If it becomes non-zero the zone
1490 * must be added back onto the appropriate list.
1492 if (z->z_NFree++ == 0) {
1493 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
1494 slgd->ZoneAry[z->z_ZoneIndex] = z;
1498 * If the zone becomes totally free then release it.
1500 if (z->z_NFree == z->z_NMax) {
1503 pz = &slgd->ZoneAry[z->z_ZoneIndex];
1505 pz = &(*pz)->z_Next;
1510 /* slgd lock released */
1516 #if defined(INVARIANTS)
1518 * Helper routines for sanity checks
1522 chunk_mark_allocated(slzone_t z, void *chunk)
1524 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1527 MASSERT(bitdex >= 0 && bitdex < z->z_NMax);
1528 bitptr = &z->z_Bitmap[bitdex >> 5];
1530 MASSERT((*bitptr & (1 << bitdex)) == 0);
1531 *bitptr |= 1 << bitdex;
1536 chunk_mark_free(slzone_t z, void *chunk)
1538 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1541 MASSERT(bitdex >= 0 && bitdex < z->z_NMax);
1542 bitptr = &z->z_Bitmap[bitdex >> 5];
1544 MASSERT((*bitptr & (1 << bitdex)) != 0);
1545 *bitptr &= ~(1 << bitdex);
1551 * Allocate and return a magazine. NULL is returned and *burst is adjusted
1552 * if the magazine is empty.
1554 static __inline void *
1555 magazine_alloc(struct magazine *mp, int *burst)
1561 if (MAGAZINE_NOTEMPTY(mp)) {
1562 obj = mp->objects[--mp->rounds];
1567 * Return burst factor to caller along with NULL
1569 if ((mp->flags & M_BURST) && (burst != NULL)) {
1570 *burst = mp->burst_factor;
1572 /* Reduce burst factor by NSCALE; if it hits 1, disable BURST */
1573 if ((mp->flags & M_BURST) && (mp->flags & M_BURST_EARLY) &&
1575 mp->burst_factor -= M_BURST_NSCALE;
1576 if (mp->burst_factor <= 1) {
1577 mp->burst_factor = 1;
1578 mp->flags &= ~(M_BURST);
1579 mp->flags &= ~(M_BURST_EARLY);
1586 magazine_free(struct magazine *mp, void *p)
1588 if (mp != NULL && MAGAZINE_NOTFULL(mp)) {
1589 mp->objects[mp->rounds++] = p;
1597 mtmagazine_alloc(int zi)
1600 struct magazine *mp, *emptymag;
1605 * Do not try to access per-thread magazines while the mtmagazine
1606 * is being initialized or destroyed.
1613 * Primary per-thread allocation loop
1617 * If the loaded magazine has rounds, allocate and return
1619 mp = tp->mags[zi].loaded;
1620 obj = magazine_alloc(mp, NULL);
1625 * If the prev magazine is full, swap with the loaded
1626 * magazine and retry.
1628 mp = tp->mags[zi].prev;
1629 if (mp && MAGAZINE_FULL(mp)) {
1630 MASSERT(mp->rounds != 0);
1631 swap_mags(&tp->mags[zi]); /* prev now empty */
1636 * Try to get a full magazine from the depot. Cycle
1637 * through depot(full)->loaded->prev->depot(empty).
1638 * Retry if a full magazine was available from the depot.
1640 * Return NULL (caller will fall through) if no magazines
1641 * can be found anywhere.
1645 emptymag = tp->mags[zi].prev;
1647 SLIST_INSERT_HEAD(&d->empty, emptymag, nextmagazine);
1648 tp->mags[zi].prev = tp->mags[zi].loaded;
1649 mp = SLIST_FIRST(&d->full); /* loaded magazine */
1650 tp->mags[zi].loaded = mp;
1652 SLIST_REMOVE_HEAD(&d->full, nextmagazine);
1653 MASSERT(MAGAZINE_NOTEMPTY(mp));
1665 mtmagazine_free(int zi, void *ptr)
1668 struct magazine *mp, *loadedmag;
1673 * Do not try to access per-thread magazines while the mtmagazine
1674 * is being initialized or destroyed.
1681 * Primary per-thread freeing loop
1685 * Make sure a new magazine is available in case we have
1686 * to use it. Staging the newmag allows us to avoid
1687 * some locking/reentrancy complexity.
1689 * Temporarily disable the per-thread caches for this
1690 * allocation to avoid reentrancy and/or to avoid a
1691 * stack overflow if the [zi] happens to be the same that
1692 * would be used to allocate the new magazine.
1694 if (tp->newmag == NULL) {
1696 tp->newmag = _slaballoc(sizeof(struct magazine),
1699 if (tp->newmag == NULL) {
1706 * If the loaded magazine has space, free directly to it
1708 rc = magazine_free(tp->mags[zi].loaded, ptr);
1713 * If the prev magazine is empty, swap with the loaded
1714 * magazine and retry.
1716 mp = tp->mags[zi].prev;
1717 if (mp && MAGAZINE_EMPTY(mp)) {
1718 MASSERT(mp->rounds == 0);
1719 swap_mags(&tp->mags[zi]); /* prev now full */
1724 * Try to get an empty magazine from the depot. Cycle
1725 * through depot(empty)->loaded->prev->depot(full).
1726 * Retry if an empty magazine was available from the depot.
1731 if ((loadedmag = tp->mags[zi].prev) != NULL)
1732 SLIST_INSERT_HEAD(&d->full, loadedmag, nextmagazine);
1733 tp->mags[zi].prev = tp->mags[zi].loaded;
1734 mp = SLIST_FIRST(&d->empty);
1736 tp->mags[zi].loaded = mp;
1737 SLIST_REMOVE_HEAD(&d->empty, nextmagazine);
1738 MASSERT(MAGAZINE_NOTFULL(mp));
1742 mp->capacity = M_MAX_ROUNDS;
1745 tp->mags[zi].loaded = mp;
1754 mtmagazine_init(void)
1758 error = pthread_key_create(&thread_mags_key, mtmagazine_destructor);
1764 * This function is only used by the thread exit destructor
1767 mtmagazine_drain(struct magazine *mp)
1771 while (MAGAZINE_NOTEMPTY(mp)) {
1772 obj = magazine_alloc(mp, NULL);
1773 _slabfree(obj, 0, NULL);
1778 * mtmagazine_destructor()
1780 * When a thread exits, we reclaim all its resources; all its magazines are
1781 * drained and the structures are freed.
1783 * WARNING! The destructor can be called multiple times if the larger user
1784 * program has its own destructors which run after ours which
1785 * allocate or free memory.
1788 mtmagazine_destructor(void *thrp)
1790 thr_mags *tp = thrp;
1791 struct magazine *mp;
1795 * Prevent further use of mtmagazines while we are destructing
1796 * them, as well as for any destructors which are run after us
1797 * prior to the thread actually being destroyed.
1801 for (i = 0; i < NZONES; i++) {
1802 mp = tp->mags[i].loaded;
1803 tp->mags[i].loaded = NULL;
1805 if (MAGAZINE_NOTEMPTY(mp))
1806 mtmagazine_drain(mp);
1807 _slabfree(mp, 0, NULL);
1810 mp = tp->mags[i].prev;
1811 tp->mags[i].prev = NULL;
1813 if (MAGAZINE_NOTEMPTY(mp))
1814 mtmagazine_drain(mp);
1815 _slabfree(mp, 0, NULL);
1822 _slabfree(mp, 0, NULL);
1829 * Attempt to allocate a zone from the zone magazine; the zone magazine has
1830 * M_BURST_EARLY enabled, so honor the burst request from the magazine.
1833 zone_alloc(int flags)
1835 slglobaldata_t slgd = &SLGlobalData;
1840 zone_magazine_lock();
1843 z = magazine_alloc(&zone_magazine, &burst);
1844 if (z == NULL && burst == 1) {
1845 zone_magazine_unlock();
1846 z = _vmem_alloc(ZoneSize * burst, ZoneSize, flags);
1847 } else if (z == NULL) {
1848 z = _vmem_alloc(ZoneSize * burst, ZoneSize, flags);
1850 for (i = 1; i < burst; i++) {
1851 j = magazine_free(&zone_magazine,
1852 (char *) z + (ZoneSize * i));
1856 zone_magazine_unlock();
1858 z->z_Flags |= SLZF_UNOTZEROD;
1859 zone_magazine_unlock();
1868 * Release a zone and unlock the slgd lock.
1873 slglobaldata_t slgd = &SLGlobalData;
1874 void *excess[M_ZONE_ROUNDS - M_LOW_ROUNDS] = {};
1877 zone_magazine_lock();
1880 bzero(z, sizeof(struct slzone));
1883 madvise(z, ZoneSize, MADV_FREE);
1885 i = magazine_free(&zone_magazine, z);
1888 * If we failed to free, collect excess magazines; release the zone
1889 * magazine lock, and then free to the system via _vmem_free. Re-enable
1890 * BURST mode for the magazine.
1893 j = zone_magazine.rounds - zone_magazine.low_factor;
1894 for (i = 0; i < j; i++) {
1895 excess[i] = magazine_alloc(&zone_magazine, NULL);
1896 MASSERT(excess[i] != NULL);
1899 zone_magazine_unlock();
1901 for (i = 0; i < j; i++)
1902 _vmem_free(excess[i], ZoneSize);
1904 _vmem_free(z, ZoneSize);
1906 zone_magazine_unlock();
1913 * Directly map memory in PAGE_SIZE'd chunks with the specified
1916 * Alignment must be a multiple of PAGE_SIZE.
1918 * Size must be >= alignment.
1921 _vmem_alloc(size_t size, size_t align, int flags)
1928 * Map anonymous private memory.
1930 addr = mmap(NULL, size, PROT_READ|PROT_WRITE,
1931 MAP_PRIVATE|MAP_ANON, -1, 0);
1932 if (addr == MAP_FAILED)
1936 * Check alignment. The misaligned offset is also the excess
1937 * amount. If misaligned unmap the excess so we have a chance of
1938 * mapping at the next alignment point and recursively try again.
1940 * BBBBBBBBBBB BBBBBBBBBBB BBBBBBBBBBB block alignment
1941 * aaaaaaaaa aaaaaaaaaaa aa mis-aligned allocation
1942 * xxxxxxxxx final excess calculation
1943 * ^ returned address
1945 excess = (uintptr_t)addr & (align - 1);
1948 excess = align - excess;
1951 munmap(save + excess, size - excess);
1952 addr = _vmem_alloc(size, align, flags);
1953 munmap(save, excess);
1955 return((void *)addr);
1961 * Free a chunk of memory allocated with _vmem_alloc()
1964 _vmem_free(void *ptr, size_t size)
1970 * Panic on fatal conditions
1973 _mpanic(const char *ctl, ...)
1977 if (malloc_panic == 0) {
1980 vfprintf(stderr, ctl, va);
1981 fprintf(stderr, "\n");