libc - Fix livelock in nmalloc
[dragonfly.git] / lib / libc / stdlib / nmalloc.c
CommitLineData
82949828
MD
1/*
2 * NMALLOC.C - New Malloc (ported from kernel slab allocator)
3 *
0bb7d8c8 4 * Copyright (c) 2003,2004,2009,2010 The DragonFly Project. All rights reserved.
82949828
MD
5 *
6 * This code is derived from software contributed to The DragonFly Project
0bb7d8c8
VS
7 * by Matthew Dillon <dillon@backplane.com> and by
8 * Venkatesh Srinivas <me@endeavour.zapto.org>.
82949828
MD
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
19 * distribution.
20 * 3. Neither the name of The DragonFly Project nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific, prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
0bb7d8c8
VS
36 *
37 * $Id: nmalloc.c,v 1.37 2010/07/23 08:20:35 vsrinivas Exp $
82949828
MD
38 */
39/*
40 * This module implements a slab allocator drop-in replacement for the
41 * libc malloc().
42 *
43 * A slab allocator reserves a ZONE for each chunk size, then lays the
44 * chunks out in an array within the zone. Allocation and deallocation
0bb7d8c8 45 * is nearly instantaneous, and overhead losses are limited to a fixed
82949828
MD
46 * worst-case amount.
47 *
48 * The slab allocator does not have to pre-initialize the list of
49 * free chunks for each zone, and the underlying VM will not be
50 * touched at all beyond the zone header until an actual allocation
51 * needs it.
52 *
53 * Slab management and locking is done on a per-zone basis.
54 *
55 * Alloc Size Chunking Number of zones
56 * 0-127 8 16
57 * 128-255 16 8
58 * 256-511 32 8
59 * 512-1023 64 8
60 * 1024-2047 128 8
61 * 2048-4095 256 8
62 * 4096-8191 512 8
63 * 8192-16383 1024 8
64 * 16384-32767 2048 8
65 *
66 * Allocations >= ZoneLimit (16K) go directly to mmap and a hash table
67 * is used to locate for free. One and Two-page allocations use the
68 * zone mechanic to avoid excessive mmap()/munmap() calls.
69 *
70 * API FEATURES AND SIDE EFFECTS
71 *
72 * + power-of-2 sized allocations up to a page will be power-of-2 aligned.
73 * Above that power-of-2 sized allocations are page-aligned. Non
74 * power-of-2 sized allocations are aligned the same as the chunk
75 * size for their zone.
76 * + malloc(0) returns a special non-NULL value
77 * + ability to allocate arbitrarily large chunks of memory
78 * + realloc will reuse the passed pointer if possible, within the
79 * limitations of the zone chunking.
0bb7d8c8
VS
80 *
81 * Multithreaded enhancements for small allocations introduced August 2010.
82 * These are in the spirit of 'libumem'. See:
83 * Bonwick, J.; Adams, J. (2001). "Magazines and Vmem: Extending the
84 * slab allocator to many CPUs and arbitrary resources". In Proc. 2001
85 * USENIX Technical Conference. USENIX Association.
86 *
87 * TUNING
88 *
89 * The value of the environment variable MALLOC_OPTIONS is a character string
90 * containing various flags to tune nmalloc.
91 *
92 * 'U' / ['u'] Generate / do not generate utrace entries for ktrace(1)
93 * This will generate utrace events for all malloc,
94 * realloc, and free calls. There are tools (mtrplay) to
95 * replay and allocation pattern or to graph heap structure
96 * (mtrgraph) which can interpret these logs.
97 * 'Z' / ['z'] Zero out / do not zero all allocations.
98 * Each new byte of memory allocated by malloc, realloc, or
99 * reallocf will be initialized to 0. This is intended for
100 * debugging and will affect performance negatively.
101 * 'H' / ['h'] Pass a hint to the kernel about pages unused by the
102 * allocation functions.
82949828
MD
103 */
104
0bb7d8c8
VS
105/* cc -shared -fPIC -g -O -I/usr/src/lib/libc/include -o nmalloc.so nmalloc.c */
106
82949828
MD
107#include "libc_private.h"
108
109#include <sys/param.h>
110#include <sys/types.h>
111#include <sys/mman.h>
0bb7d8c8
VS
112#include <sys/queue.h>
113#include <sys/uio.h>
114#include <sys/ktrace.h>
82949828 115#include <stdio.h>
0bb7d8c8 116#include <stdint.h>
82949828
MD
117#include <stdlib.h>
118#include <stdarg.h>
119#include <stddef.h>
120#include <unistd.h>
121#include <string.h>
122#include <fcntl.h>
123#include <errno.h>
0bb7d8c8 124#include <pthread.h>
82949828
MD
125
126#include "spinlock.h"
127#include "un-namespace.h"
128
0bb7d8c8
VS
129static char rcsid[] = "$Id: nmalloc.c,v 1.37 2010/07/23 08:20:35 sv5679 Exp $";
130
82949828
MD
131/*
132 * Linked list of large allocations
133 */
134typedef struct bigalloc {
135 struct bigalloc *next; /* hash link */
136 void *base; /* base pointer */
137 u_long bytes; /* bytes allocated */
82949828
MD
138} *bigalloc_t;
139
140/*
141 * Note that any allocations which are exact multiples of PAGE_SIZE, or
142 * which are >= ZALLOC_ZONE_LIMIT, will fall through to the kmem subsystem.
143 */
144#define ZALLOC_ZONE_LIMIT (16 * 1024) /* max slab-managed alloc */
145#define ZALLOC_MIN_ZONE_SIZE (32 * 1024) /* minimum zone size */
146#define ZALLOC_MAX_ZONE_SIZE (128 * 1024) /* maximum zone size */
147#define ZALLOC_ZONE_SIZE (64 * 1024)
148#define ZALLOC_SLAB_MAGIC 0x736c6162 /* magic sanity */
149#define ZALLOC_SLAB_SLIDE 20 /* L1-cache skip */
150
151#if ZALLOC_ZONE_LIMIT == 16384
152#define NZONES 72
153#elif ZALLOC_ZONE_LIMIT == 32768
154#define NZONES 80
155#else
156#error "I couldn't figure out NZONES"
157#endif
158
159/*
160 * Chunk structure for free elements
161 */
162typedef struct slchunk {
163 struct slchunk *c_Next;
164} *slchunk_t;
165
166/*
167 * The IN-BAND zone header is placed at the beginning of each zone.
168 */
169struct slglobaldata;
170
171typedef struct slzone {
0bb7d8c8 172 int32_t z_Magic; /* magic number for sanity check */
82949828
MD
173 int z_NFree; /* total free chunks / ualloc space */
174 struct slzone *z_Next; /* ZoneAry[] link if z_NFree non-zero */
82949828
MD
175 int z_NMax; /* maximum free chunks */
176 char *z_BasePtr; /* pointer to start of chunk array */
177 int z_UIndex; /* current initial allocation index */
178 int z_UEndIndex; /* last (first) allocation index */
179 int z_ChunkSize; /* chunk size for validation */
180 int z_FirstFreePg; /* chunk list on a page-by-page basis */
181 int z_ZoneIndex;
182 int z_Flags;
183 struct slchunk *z_PageAry[ZALLOC_ZONE_SIZE / PAGE_SIZE];
184#if defined(INVARIANTS)
185 __uint32_t z_Bitmap[]; /* bitmap of free chunks / sanity */
186#endif
187} *slzone_t;
188
189typedef struct slglobaldata {
190 spinlock_t Spinlock;
191 slzone_t ZoneAry[NZONES];/* linked list of zones NFree > 0 */
82949828
MD
192 int JunkIndex;
193} *slglobaldata_t;
194
195#define SLZF_UNOTZEROD 0x0001
196
0bb7d8c8
VS
197#define FASTSLABREALLOC 0x02
198
82949828
MD
199/*
200 * Misc constants. Note that allocations that are exact multiples of
201 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
202 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
203 */
204#define MIN_CHUNK_SIZE 8 /* in bytes */
205#define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1)
82949828
MD
206#define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK)
207
208/*
209 * The WEIRD_ADDR is used as known text to copy into free objects to
210 * try to create deterministic failure cases if the data is accessed after
211 * free.
8ff099ae
MD
212 *
213 * WARNING: A limited number of spinlocks are available, BIGXSIZE should
214 * not be larger then 64.
82949828
MD
215 */
216#define WEIRD_ADDR 0xdeadc0de
217#define MAX_COPY sizeof(weirdary)
aef002e3 218#define ZERO_LENGTH_PTR ((void *)&malloc_dummy_pointer)
82949828
MD
219
220#define BIGHSHIFT 10 /* bigalloc hash table */
221#define BIGHSIZE (1 << BIGHSHIFT)
222#define BIGHMASK (BIGHSIZE - 1)
223#define BIGXSIZE (BIGHSIZE / 16) /* bigalloc lock table */
224#define BIGXMASK (BIGXSIZE - 1)
225
82949828
MD
226#define SAFLAG_ZERO 0x0001
227#define SAFLAG_PASSIVE 0x0002
228
229/*
230 * Thread control
231 */
232
233#define arysize(ary) (sizeof(ary)/sizeof((ary)[0]))
234
235#define MASSERT(exp) do { if (__predict_false(!(exp))) \
236 _mpanic("assertion: %s in %s", \
237 #exp, __func__); \
238 } while (0)
239
240/*
0bb7d8c8
VS
241 * Magazines
242 */
243
244#define M_MAX_ROUNDS 64
245#define M_ZONE_ROUNDS 64
246#define M_LOW_ROUNDS 32
247#define M_INIT_ROUNDS 8
248#define M_BURST_FACTOR 8
249#define M_BURST_NSCALE 2
250
251#define M_BURST 0x0001
252#define M_BURST_EARLY 0x0002
253
254struct magazine {
255 SLIST_ENTRY(magazine) nextmagazine;
256
257 int flags;
258 int capacity; /* Max rounds in this magazine */
259 int rounds; /* Current number of free rounds */
260 int burst_factor; /* Number of blocks to prefill with */
261 int low_factor; /* Free till low_factor from full mag */
262 void *objects[M_MAX_ROUNDS];
263};
264
265SLIST_HEAD(magazinelist, magazine);
266
267static spinlock_t zone_mag_lock;
268static struct magazine zone_magazine = {
269 .flags = M_BURST | M_BURST_EARLY,
270 .capacity = M_ZONE_ROUNDS,
271 .rounds = 0,
272 .burst_factor = M_BURST_FACTOR,
273 .low_factor = M_LOW_ROUNDS
274};
275
276#define MAGAZINE_FULL(mp) (mp->rounds == mp->capacity)
277#define MAGAZINE_NOTFULL(mp) (mp->rounds < mp->capacity)
278#define MAGAZINE_EMPTY(mp) (mp->rounds == 0)
279#define MAGAZINE_NOTEMPTY(mp) (mp->rounds != 0)
280
281/* Each thread will have a pair of magazines per size-class (NZONES)
282 * The loaded magazine will support immediate allocations, the previous
283 * magazine will either be full or empty and can be swapped at need */
284typedef struct magazine_pair {
285 struct magazine *loaded;
286 struct magazine *prev;
287} magazine_pair;
288
289/* A depot is a collection of magazines for a single zone. */
290typedef struct magazine_depot {
291 struct magazinelist full;
292 struct magazinelist empty;
293 pthread_spinlock_t lock;
294} magazine_depot;
295
296typedef struct thr_mags {
297 magazine_pair mags[NZONES];
298 int init;
299} thr_mags;
300
301/* With this attribute set, do not require a function call for accessing
302 * this variable when the code is compiled -fPIC */
303#define TLS_ATTRIBUTE __attribute__ ((tls_model ("initial-exec")));
304
6c4de62c 305static int mtmagazine_free_live;
0bb7d8c8
VS
306static __thread thr_mags thread_mags TLS_ATTRIBUTE;
307static pthread_key_t thread_mags_key;
308static pthread_once_t thread_mags_once = PTHREAD_ONCE_INIT;
309static magazine_depot depots[NZONES];
310
311/*
82949828
MD
312 * Fixed globals (not per-cpu)
313 */
314static const int ZoneSize = ZALLOC_ZONE_SIZE;
315static const int ZoneLimit = ZALLOC_ZONE_LIMIT;
316static const int ZonePageCount = ZALLOC_ZONE_SIZE / PAGE_SIZE;
317static const int ZoneMask = ZALLOC_ZONE_SIZE - 1;
318
0bb7d8c8
VS
319static int opt_madvise = 0;
320static int opt_utrace = 0;
321static int malloc_started = 0;
322static int g_malloc_flags = 0;
323static spinlock_t malloc_init_lock;
324static struct slglobaldata SLGlobalData;
82949828
MD
325static bigalloc_t bigalloc_array[BIGHSIZE];
326static spinlock_t bigspin_array[BIGXSIZE];
327static int malloc_panic;
aef002e3 328static int malloc_dummy_pointer;
82949828
MD
329
330static const int32_t weirdary[16] = {
331 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
332 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
333 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR,
334 WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR, WEIRD_ADDR
335};
336
82949828
MD
337static void *_slaballoc(size_t size, int flags);
338static void *_slabrealloc(void *ptr, size_t size);
0bb7d8c8 339static void _slabfree(void *ptr, int, bigalloc_t *);
82949828
MD
340static void *_vmem_alloc(size_t bytes, size_t align, int flags);
341static void _vmem_free(void *ptr, size_t bytes);
0bb7d8c8
VS
342static void *magazine_alloc(struct magazine *, int *);
343static int magazine_free(struct magazine *, void *);
344static void *mtmagazine_alloc(int zi);
345static int mtmagazine_free(int zi, void *);
346static void mtmagazine_init(void);
347static void mtmagazine_destructor(void *);
348static slzone_t zone_alloc(int flags);
349static void zone_free(void *z);
82949828 350static void _mpanic(const char *ctl, ...);
0bb7d8c8 351static void malloc_init(void);
82949828
MD
352#if defined(INVARIANTS)
353static void chunk_mark_allocated(slzone_t z, void *chunk);
354static void chunk_mark_free(slzone_t z, void *chunk);
355#endif
356
0bb7d8c8
VS
357struct nmalloc_utrace {
358 void *p;
359 size_t s;
360 void *r;
361};
362
363#define UTRACE(a, b, c) \
364 if (opt_utrace) { \
365 struct nmalloc_utrace ut = { \
366 .p = (a), \
367 .s = (b), \
368 .r = (c) \
369 }; \
370 utrace(&ut, sizeof(ut)); \
371 }
372
82949828
MD
373#ifdef INVARIANTS
374/*
375 * If enabled any memory allocated without M_ZERO is initialized to -1.
376 */
377static int use_malloc_pattern;
378#endif
379
0bb7d8c8
VS
380static void
381malloc_init(void)
382{
383 const char *p = NULL;
384
385 if (__isthreaded) {
386 _SPINLOCK(&malloc_init_lock);
387 if (malloc_started) {
388 _SPINUNLOCK(&malloc_init_lock);
389 return;
390 }
391 }
392
393 if (issetugid() == 0)
394 p = getenv("MALLOC_OPTIONS");
395
396 for (; p != NULL && *p != '\0'; p++) {
397 switch(*p) {
398 case 'u': opt_utrace = 0; break;
399 case 'U': opt_utrace = 1; break;
400 case 'h': opt_madvise = 0; break;
401 case 'H': opt_madvise = 1; break;
402 case 'z': g_malloc_flags = 0; break;
403 case 'Z': g_malloc_flags = SAFLAG_ZERO; break;
404 default:
405 break;
406 }
407 }
408
409 malloc_started = 1;
410
411 if (__isthreaded)
412 _SPINUNLOCK(&malloc_init_lock);
413
414 UTRACE((void *) -1, 0, NULL);
415}
416
82949828 417/*
6c4de62c
MD
418 * We have to install a handler for nmalloc thread teardowns when
419 * the thread is created. We cannot delay this because destructors in
420 * sophisticated userland programs can call malloc() for the first time
421 * during their thread exit.
422 *
423 * This routine is called directly from pthreads.
424 */
425void
426_nmalloc_thr_init(void)
427{
428 thr_mags *tp;
429
430 /*
431 * Disallow mtmagazine operations until the mtmagazine is
432 * initialized.
433 */
434 tp = &thread_mags;
435 tp->init = -1;
436
437 pthread_setspecific(thread_mags_key, tp);
438 if (mtmagazine_free_live == 0) {
439 mtmagazine_free_live = 1;
440 pthread_once(&thread_mags_once, mtmagazine_init);
441 }
442 tp->init = 1;
443}
444
445/*
82949828 446 * Thread locks.
82949828
MD
447 */
448static __inline void
449slgd_lock(slglobaldata_t slgd)
450{
451 if (__isthreaded)
452 _SPINLOCK(&slgd->Spinlock);
453}
454
0bb7d8c8
VS
455static __inline void
456slgd_unlock(slglobaldata_t slgd)
82949828
MD
457{
458 if (__isthreaded)
0bb7d8c8 459 _SPINUNLOCK(&slgd->Spinlock);
82949828
MD
460}
461
462static __inline void
0bb7d8c8 463depot_lock(magazine_depot *dp)
82949828
MD
464{
465 if (__isthreaded)
0bb7d8c8
VS
466 pthread_spin_lock(&dp->lock);
467}
468
469static __inline void
470depot_unlock(magazine_depot *dp)
471{
472 if (__isthreaded)
473 pthread_spin_unlock(&dp->lock);
474}
475
476static __inline void
477zone_magazine_lock(void)
478{
479 if (__isthreaded)
480 _SPINLOCK(&zone_mag_lock);
481}
482
483static __inline void
484zone_magazine_unlock(void)
485{
486 if (__isthreaded)
487 _SPINUNLOCK(&zone_mag_lock);
488}
489
490static __inline void
491swap_mags(magazine_pair *mp)
492{
493 struct magazine *tmp;
494 tmp = mp->loaded;
495 mp->loaded = mp->prev;
496 mp->prev = tmp;
82949828
MD
497}
498
499/*
500 * bigalloc hashing and locking support.
501 *
502 * Return an unmasked hash code for the passed pointer.
503 */
504static __inline int
505_bigalloc_hash(void *ptr)
506{
507 int hv;
508
9a768e12
MD
509 hv = ((int)(intptr_t)ptr >> PAGE_SHIFT) ^
510 ((int)(intptr_t)ptr >> (PAGE_SHIFT + BIGHSHIFT));
82949828
MD
511
512 return(hv);
513}
514
515/*
516 * Lock the hash chain and return a pointer to its base for the specified
517 * address.
518 */
519static __inline bigalloc_t *
520bigalloc_lock(void *ptr)
521{
522 int hv = _bigalloc_hash(ptr);
523 bigalloc_t *bigp;
524
525 bigp = &bigalloc_array[hv & BIGHMASK];
526 if (__isthreaded)
527 _SPINLOCK(&bigspin_array[hv & BIGXMASK]);
528 return(bigp);
529}
530
531/*
532 * Lock the hash chain and return a pointer to its base for the specified
533 * address.
534 *
535 * BUT, if the hash chain is empty, just return NULL and do not bother
536 * to lock anything.
537 */
538static __inline bigalloc_t *
539bigalloc_check_and_lock(void *ptr)
540{
541 int hv = _bigalloc_hash(ptr);
542 bigalloc_t *bigp;
543
544 bigp = &bigalloc_array[hv & BIGHMASK];
545 if (*bigp == NULL)
546 return(NULL);
547 if (__isthreaded) {
548 _SPINLOCK(&bigspin_array[hv & BIGXMASK]);
549 }
550 return(bigp);
551}
552
553static __inline void
554bigalloc_unlock(void *ptr)
555{
556 int hv;
557
558 if (__isthreaded) {
559 hv = _bigalloc_hash(ptr);
560 _SPINUNLOCK(&bigspin_array[hv & BIGXMASK]);
561 }
562}
563
564/*
565 * Calculate the zone index for the allocation request size and set the
566 * allocation request size to that particular zone's chunk size.
567 */
568static __inline int
569zoneindex(size_t *bytes, size_t *chunking)
570{
571 size_t n = (unsigned int)*bytes; /* unsigned for shift opt */
572 if (n < 128) {
573 *bytes = n = (n + 7) & ~7;
574 *chunking = 8;
575 return(n / 8 - 1); /* 8 byte chunks, 16 zones */
576 }
577 if (n < 256) {
578 *bytes = n = (n + 15) & ~15;
579 *chunking = 16;
580 return(n / 16 + 7);
581 }
582 if (n < 8192) {
583 if (n < 512) {
584 *bytes = n = (n + 31) & ~31;
585 *chunking = 32;
586 return(n / 32 + 15);
587 }
588 if (n < 1024) {
589 *bytes = n = (n + 63) & ~63;
590 *chunking = 64;
591 return(n / 64 + 23);
592 }
593 if (n < 2048) {
594 *bytes = n = (n + 127) & ~127;
595 *chunking = 128;
596 return(n / 128 + 31);
597 }
598 if (n < 4096) {
599 *bytes = n = (n + 255) & ~255;
600 *chunking = 256;
601 return(n / 256 + 39);
602 }
603 *bytes = n = (n + 511) & ~511;
604 *chunking = 512;
605 return(n / 512 + 47);
606 }
607#if ZALLOC_ZONE_LIMIT > 8192
608 if (n < 16384) {
609 *bytes = n = (n + 1023) & ~1023;
610 *chunking = 1024;
611 return(n / 1024 + 55);
612 }
613#endif
614#if ZALLOC_ZONE_LIMIT > 16384
615 if (n < 32768) {
616 *bytes = n = (n + 2047) & ~2047;
617 *chunking = 2048;
618 return(n / 2048 + 63);
619 }
620#endif
621 _mpanic("Unexpected byte count %d", n);
622 return(0);
623}
624
625/*
626 * malloc() - call internal slab allocator
627 */
628void *
629malloc(size_t size)
630{
11e45f67
MD
631 void *ptr;
632
633 ptr = _slaballoc(size, 0);
634 if (ptr == NULL)
635 errno = ENOMEM;
0bb7d8c8
VS
636 else
637 UTRACE(0, size, ptr);
11e45f67 638 return(ptr);
82949828
MD
639}
640
641/*
642 * calloc() - call internal slab allocator
643 */
644void *
645calloc(size_t number, size_t size)
646{
11e45f67
MD
647 void *ptr;
648
649 ptr = _slaballoc(number * size, SAFLAG_ZERO);
650 if (ptr == NULL)
651 errno = ENOMEM;
0bb7d8c8
VS
652 else
653 UTRACE(0, number * size, ptr);
11e45f67 654 return(ptr);
82949828
MD
655}
656
657/*
658 * realloc() (SLAB ALLOCATOR)
659 *
660 * We do not attempt to optimize this routine beyond reusing the same
661 * pointer if the new size fits within the chunking of the old pointer's
662 * zone.
663 */
664void *
665realloc(void *ptr, size_t size)
666{
0bb7d8c8
VS
667 void *ret;
668 ret = _slabrealloc(ptr, size);
669 if (ret == NULL)
11e45f67 670 errno = ENOMEM;
0bb7d8c8
VS
671 else
672 UTRACE(ptr, size, ret);
673 return(ret);
11e45f67
MD
674}
675
676/*
677 * posix_memalign()
678 *
679 * Allocate (size) bytes with a alignment of (alignment), where (alignment)
680 * is a power of 2 >= sizeof(void *).
681 *
682 * The slab allocator will allocate on power-of-2 boundaries up to
683 * at least PAGE_SIZE. We use the zoneindex mechanic to find a
684 * zone matching the requirements, and _vmem_alloc() otherwise.
685 */
686int
687posix_memalign(void **memptr, size_t alignment, size_t size)
688{
689 bigalloc_t *bigp;
690 bigalloc_t big;
6c23d8e0 691 size_t chunking;
11e45f67
MD
692 int zi;
693
694 /*
695 * OpenGroup spec issue 6 checks
696 */
697 if ((alignment | (alignment - 1)) + 1 != (alignment << 1)) {
698 *memptr = NULL;
699 return(EINVAL);
700 }
701 if (alignment < sizeof(void *)) {
702 *memptr = NULL;
703 return(EINVAL);
704 }
705
706 /*
8ff099ae
MD
707 * Our zone mechanism guarantees same-sized alignment for any
708 * power-of-2 allocation. If size is a power-of-2 and reasonable
709 * we can just call _slaballoc() and be done. We round size up
710 * to the nearest alignment boundary to improve our odds of
711 * it becoming a power-of-2 if it wasn't before.
11e45f67 712 */
8ff099ae 713 if (size <= alignment)
11e45f67 714 size = alignment;
8ff099ae
MD
715 else
716 size = (size + alignment - 1) & ~(size_t)(alignment - 1);
717 if (size < PAGE_SIZE && (size | (size - 1)) + 1 == (size << 1)) {
718 *memptr = _slaballoc(size, 0);
719 return(*memptr ? 0 : ENOMEM);
720 }
721
722 /*
723 * Otherwise locate a zone with a chunking that matches
724 * the requested alignment, within reason. Consider two cases:
725 *
726 * (1) A 1K allocation on a 32-byte alignment. The first zoneindex
727 * we find will be the best fit because the chunking will be
728 * greater or equal to the alignment.
729 *
730 * (2) A 513 allocation on a 256-byte alignment. In this case
731 * the first zoneindex we find will be for 576 byte allocations
732 * with a chunking of 64, which is not sufficient. To fix this
733 * we simply find the nearest power-of-2 >= size and use the
734 * same side-effect of _slaballoc() which guarantees
735 * same-alignment on a power-of-2 allocation.
736 */
737 if (size < PAGE_SIZE) {
11e45f67
MD
738 zi = zoneindex(&size, &chunking);
739 if (chunking >= alignment) {
740 *memptr = _slaballoc(size, 0);
741 return(*memptr ? 0 : ENOMEM);
742 }
8ff099ae
MD
743 if (size >= 1024)
744 alignment = 1024;
745 if (size >= 16384)
746 alignment = 16384;
747 while (alignment < size)
748 alignment <<= 1;
749 *memptr = _slaballoc(alignment, 0);
750 return(*memptr ? 0 : ENOMEM);
11e45f67
MD
751 }
752
753 /*
754 * If the slab allocator cannot handle it use vmem_alloc().
755 *
756 * Alignment must be adjusted up to at least PAGE_SIZE in this case.
757 */
758 if (alignment < PAGE_SIZE)
759 alignment = PAGE_SIZE;
760 if (size < alignment)
761 size = alignment;
762 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
763 *memptr = _vmem_alloc(size, alignment, 0);
764 if (*memptr == NULL)
765 return(ENOMEM);
766
767 big = _slaballoc(sizeof(struct bigalloc), 0);
768 if (big == NULL) {
769 _vmem_free(*memptr, size);
770 *memptr = NULL;
771 return(ENOMEM);
772 }
773 bigp = bigalloc_lock(*memptr);
774 big->base = *memptr;
775 big->bytes = size;
11e45f67
MD
776 big->next = *bigp;
777 *bigp = big;
778 bigalloc_unlock(*memptr);
779
780 return(0);
82949828
MD
781}
782
11e45f67
MD
783/*
784 * free() (SLAB ALLOCATOR) - do the obvious
785 */
82949828
MD
786void
787free(void *ptr)
788{
0bb7d8c8
VS
789 UTRACE(ptr, 0, 0);
790 _slabfree(ptr, 0, NULL);
82949828
MD
791}
792
793/*
794 * _slaballoc() (SLAB ALLOCATOR)
795 *
796 * Allocate memory via the slab allocator. If the request is too large,
797 * or if it page-aligned beyond a certain size, we fall back to the
798 * KMEM subsystem
799 */
800static void *
801_slaballoc(size_t size, int flags)
802{
803 slzone_t z;
804 slchunk_t chunk;
805 slglobaldata_t slgd;
6c23d8e0 806 size_t chunking;
82949828
MD
807 int zi;
808#ifdef INVARIANTS
809 int i;
810#endif
811 int off;
0bb7d8c8
VS
812 void *obj;
813
814 if (!malloc_started)
815 malloc_init();
82949828
MD
816
817 /*
818 * Handle the degenerate size == 0 case. Yes, this does happen.
819 * Return a special pointer. This is to maintain compatibility with
820 * the original malloc implementation. Certain devices, such as the
821 * adaptec driver, not only allocate 0 bytes, they check for NULL and
822 * also realloc() later on. Joy.
823 */
824 if (size == 0)
825 return(ZERO_LENGTH_PTR);
826
0bb7d8c8
VS
827 /* Capture global flags */
828 flags |= g_malloc_flags;
829
82949828
MD
830 /*
831 * Handle large allocations directly. There should not be very many
832 * of these so performance is not a big issue.
833 *
834 * The backend allocator is pretty nasty on a SMP system. Use the
835 * slab allocator for one and two page-sized chunks even though we
836 * lose some efficiency.
837 */
838 if (size >= ZoneLimit ||
839 ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) {
840 bigalloc_t big;
841 bigalloc_t *bigp;
842
843 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
844 chunk = _vmem_alloc(size, PAGE_SIZE, flags);
845 if (chunk == NULL)
846 return(NULL);
847
848 big = _slaballoc(sizeof(struct bigalloc), 0);
11e45f67
MD
849 if (big == NULL) {
850 _vmem_free(chunk, size);
851 return(NULL);
852 }
82949828
MD
853 bigp = bigalloc_lock(chunk);
854 big->base = chunk;
855 big->bytes = size;
82949828
MD
856 big->next = *bigp;
857 *bigp = big;
858 bigalloc_unlock(chunk);
859
860 return(chunk);
861 }
862
0bb7d8c8
VS
863 /* Compute allocation zone; zoneindex will panic on excessive sizes */
864 zi = zoneindex(&size, &chunking);
865 MASSERT(zi < NZONES);
866
867 obj = mtmagazine_alloc(zi);
868 if (obj != NULL) {
869 if (flags & SAFLAG_ZERO)
870 bzero(obj, size);
871 return (obj);
82949828
MD
872 }
873
0bb7d8c8
VS
874 slgd = &SLGlobalData;
875 slgd_lock(slgd);
876
82949828
MD
877 /*
878 * Attempt to allocate out of an existing zone. If all zones are
879 * exhausted pull one off the free list or allocate a new one.
82949828 880 */
82949828 881 if ((z = slgd->ZoneAry[zi]) == NULL) {
0bb7d8c8
VS
882 z = zone_alloc(flags);
883 if (z == NULL)
884 goto fail;
82949828
MD
885
886 /*
887 * How big is the base structure?
888 */
889#if defined(INVARIANTS)
890 /*
891 * Make room for z_Bitmap. An exact calculation is
892 * somewhat more complicated so don't make an exact
893 * calculation.
894 */
895 off = offsetof(struct slzone,
896 z_Bitmap[(ZoneSize / size + 31) / 32]);
897 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8);
898#else
899 off = sizeof(struct slzone);
900#endif
901
902 /*
903 * Align the storage in the zone based on the chunking.
904 *
0bb7d8c8 905 * Guarantee power-of-2 alignment for power-of-2-sized
82949828
MD
906 * chunks. Otherwise align based on the chunking size
907 * (typically 8 or 16 bytes for small allocations).
908 *
909 * NOTE: Allocations >= ZoneLimit are governed by the
910 * bigalloc code and typically only guarantee page-alignment.
911 *
912 * Set initial conditions for UIndex near the zone header
913 * to reduce unecessary page faults, vs semi-randomization
914 * to improve L1 cache saturation.
915 */
916 if ((size | (size - 1)) + 1 == (size << 1))
917 off = (off + size - 1) & ~(size - 1);
918 else
919 off = (off + chunking - 1) & ~(chunking - 1);
920 z->z_Magic = ZALLOC_SLAB_MAGIC;
82949828
MD
921 z->z_ZoneIndex = zi;
922 z->z_NMax = (ZoneSize - off) / size;
923 z->z_NFree = z->z_NMax;
924 z->z_BasePtr = (char *)z + off;
82949828
MD
925 z->z_UIndex = z->z_UEndIndex = 0;
926 z->z_ChunkSize = size;
927 z->z_FirstFreePg = ZonePageCount;
928 z->z_Next = slgd->ZoneAry[zi];
929 slgd->ZoneAry[zi] = z;
930 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
931 flags &= ~SAFLAG_ZERO; /* already zero'd */
932 flags |= SAFLAG_PASSIVE;
933 }
934
935 /*
936 * Slide the base index for initial allocations out of the
937 * next zone we create so we do not over-weight the lower
938 * part of the cpu memory caches.
939 */
940 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
941 & (ZALLOC_MAX_ZONE_SIZE - 1);
942 }
943
944 /*
945 * Ok, we have a zone from which at least one chunk is available.
946 *
947 * Remove us from the ZoneAry[] when we become empty
948 */
82949828
MD
949 MASSERT(z->z_NFree > 0);
950
951 if (--z->z_NFree == 0) {
952 slgd->ZoneAry[zi] = z->z_Next;
953 z->z_Next = NULL;
954 }
955
956 /*
957 * Locate a chunk in a free page. This attempts to localize
958 * reallocations into earlier pages without us having to sort
959 * the chunk list. A chunk may still overlap a page boundary.
960 */
961 while (z->z_FirstFreePg < ZonePageCount) {
962 if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) {
963#ifdef DIAGNOSTIC
964 /*
965 * Diagnostic: c_Next is not total garbage.
966 */
967 MASSERT(chunk->c_Next == NULL ||
968 ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) ==
969 ((intptr_t)chunk & IN_SAME_PAGE_MASK));
970#endif
971#ifdef INVARIANTS
972 chunk_mark_allocated(z, chunk);
973#endif
974 MASSERT((uintptr_t)chunk & ZoneMask);
975 z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next;
976 goto done;
977 }
978 ++z->z_FirstFreePg;
979 }
980
981 /*
982 * No chunks are available but NFree said we had some memory,
983 * so it must be available in the never-before-used-memory
984 * area governed by UIndex. The consequences are very
985 * serious if our zone got corrupted so we use an explicit
986 * panic rather then a KASSERT.
987 */
988 chunk = (slchunk_t)(z->z_BasePtr + z->z_UIndex * size);
989
990 if (++z->z_UIndex == z->z_NMax)
991 z->z_UIndex = 0;
992 if (z->z_UIndex == z->z_UEndIndex) {
993 if (z->z_NFree != 0)
994 _mpanic("slaballoc: corrupted zone");
995 }
996
997 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
998 flags &= ~SAFLAG_ZERO;
999 flags |= SAFLAG_PASSIVE;
1000 }
1001#if defined(INVARIANTS)
1002 chunk_mark_allocated(z, chunk);
1003#endif
1004
1005done:
1006 slgd_unlock(slgd);
1007 if (flags & SAFLAG_ZERO) {
1008 bzero(chunk, size);
1009#ifdef INVARIANTS
1010 } else if ((flags & (SAFLAG_ZERO|SAFLAG_PASSIVE)) == 0) {
1011 if (use_malloc_pattern) {
1012 for (i = 0; i < size; i += sizeof(int)) {
1013 *(int *)((char *)chunk + i) = -1;
1014 }
1015 }
1016 /* avoid accidental double-free check */
1017 chunk->c_Next = (void *)-1;
1018#endif
1019 }
1020 return(chunk);
1021fail:
1022 slgd_unlock(slgd);
1023 return(NULL);
1024}
1025
1026/*
1027 * Reallocate memory within the chunk
1028 */
1029static void *
1030_slabrealloc(void *ptr, size_t size)
1031{
1032 bigalloc_t *bigp;
1033 void *nptr;
1034 slzone_t z;
1035 size_t chunking;
1036
1037 if (ptr == NULL || ptr == ZERO_LENGTH_PTR)
1038 return(_slaballoc(size, 0));
1039
1040 if (size == 0) {
1041 free(ptr);
1042 return(ZERO_LENGTH_PTR);
1043 }
1044
1045 /*
0bb7d8c8 1046 * Handle oversized allocations.
82949828
MD
1047 */
1048 if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) {
1049 bigalloc_t big;
1050 size_t bigbytes;
1051
1052 while ((big = *bigp) != NULL) {
1053 if (big->base == ptr) {
1054 size = (size + PAGE_MASK) & ~(size_t)PAGE_MASK;
1055 bigbytes = big->bytes;
0bb7d8c8
VS
1056 if (bigbytes == size) {
1057 bigalloc_unlock(ptr);
82949828 1058 return(ptr);
0bb7d8c8
VS
1059 }
1060 *bigp = big->next;
1061 bigalloc_unlock(ptr);
1062 if ((nptr = _slaballoc(size, 0)) == NULL) {
1063 /* Relink block */
1064 bigp = bigalloc_lock(ptr);
1065 big->next = *bigp;
1066 *bigp = big;
1067 bigalloc_unlock(ptr);
82949828 1068 return(NULL);
0bb7d8c8 1069 }
82949828
MD
1070 if (size > bigbytes)
1071 size = bigbytes;
1072 bcopy(ptr, nptr, size);
0bb7d8c8 1073 _slabfree(ptr, FASTSLABREALLOC, &big);
82949828
MD
1074 return(nptr);
1075 }
1076 bigp = &big->next;
1077 }
1078 bigalloc_unlock(ptr);
1079 }
1080
1081 /*
1082 * Get the original allocation's zone. If the new request winds
1083 * up using the same chunk size we do not have to do anything.
1084 *
1085 * NOTE: We don't have to lock the globaldata here, the fields we
1086 * access here will not change at least as long as we have control
1087 * over the allocation.
1088 */
1089 z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
1090 MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1091
1092 /*
1093 * Use zoneindex() to chunk-align the new size, as long as the
1094 * new size is not too large.
1095 */
1096 if (size < ZoneLimit) {
1097 zoneindex(&size, &chunking);
1098 if (z->z_ChunkSize == size)
1099 return(ptr);
1100 }
1101
1102 /*
1103 * Allocate memory for the new request size and copy as appropriate.
1104 */
1105 if ((nptr = _slaballoc(size, 0)) != NULL) {
1106 if (size > z->z_ChunkSize)
1107 size = z->z_ChunkSize;
1108 bcopy(ptr, nptr, size);
0bb7d8c8 1109 _slabfree(ptr, 0, NULL);
82949828
MD
1110 }
1111
1112 return(nptr);
1113}
1114
1115/*
1116 * free (SLAB ALLOCATOR)
1117 *
1118 * Free a memory block previously allocated by malloc. Note that we do not
1119 * attempt to uplodate ks_loosememuse as MP races could prevent us from
1120 * checking memory limits in malloc.
1121 *
0bb7d8c8 1122 * flags:
4cd64cfe
MD
1123 * FASTSLABREALLOC Fast call from realloc, *rbigp already
1124 * unlinked.
1125 *
82949828
MD
1126 * MPSAFE
1127 */
1128static void
0bb7d8c8 1129_slabfree(void *ptr, int flags, bigalloc_t *rbigp)
82949828
MD
1130{
1131 slzone_t z;
1132 slchunk_t chunk;
1133 bigalloc_t big;
1134 bigalloc_t *bigp;
1135 slglobaldata_t slgd;
1136 size_t size;
0bb7d8c8 1137 int zi;
82949828
MD
1138 int pgno;
1139
0bb7d8c8
VS
1140 /* Fast realloc path for big allocations */
1141 if (flags & FASTSLABREALLOC) {
1142 big = *rbigp;
1143 goto fastslabrealloc;
1144 }
1145
82949828
MD
1146 /*
1147 * Handle NULL frees and special 0-byte allocations
1148 */
1149 if (ptr == NULL)
1150 return;
1151 if (ptr == ZERO_LENGTH_PTR)
1152 return;
1153
1154 /*
1155 * Handle oversized allocations.
1156 */
1157 if ((bigp = bigalloc_check_and_lock(ptr)) != NULL) {
1158 while ((big = *bigp) != NULL) {
1159 if (big->base == ptr) {
4cd64cfe
MD
1160 *bigp = big->next;
1161 bigalloc_unlock(ptr);
0bb7d8c8 1162fastslabrealloc:
82949828 1163 size = big->bytes;
0bb7d8c8 1164 _slabfree(big, 0, NULL);
82949828
MD
1165#ifdef INVARIANTS
1166 MASSERT(sizeof(weirdary) <= size);
1167 bcopy(weirdary, ptr, sizeof(weirdary));
1168#endif
1169 _vmem_free(ptr, size);
1170 return;
1171 }
1172 bigp = &big->next;
1173 }
1174 bigalloc_unlock(ptr);
1175 }
1176
1177 /*
1178 * Zone case. Figure out the zone based on the fact that it is
1179 * ZoneSize aligned.
1180 */
1181 z = (slzone_t)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
1182 MASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1183
0bb7d8c8
VS
1184 size = z->z_ChunkSize;
1185 zi = z->z_ZoneIndex;
1186
1187 if (g_malloc_flags & SAFLAG_ZERO)
1188 bzero(ptr, size);
1189
6c4de62c 1190 if (mtmagazine_free(zi, ptr) == 0)
0bb7d8c8
VS
1191 return;
1192
82949828
MD
1193 pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT;
1194 chunk = ptr;
0bb7d8c8 1195 slgd = &SLGlobalData;
82949828
MD
1196 slgd_lock(slgd);
1197
1198#ifdef INVARIANTS
1199 /*
1200 * Attempt to detect a double-free. To reduce overhead we only check
1201 * if there appears to be link pointer at the base of the data.
1202 */
1203 if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) {
1204 slchunk_t scan;
1205
1206 for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) {
1207 if (scan == chunk)
1208 _mpanic("Double free at %p", chunk);
1209 }
1210 }
1211 chunk_mark_free(z, chunk);
1212#endif
1213
1214 /*
1215 * Put weird data into the memory to detect modifications after
1216 * freeing, illegal pointer use after freeing (we should fault on
1217 * the odd address), and so forth.
1218 */
1219#ifdef INVARIANTS
1220 if (z->z_ChunkSize < sizeof(weirdary))
1221 bcopy(weirdary, chunk, z->z_ChunkSize);
1222 else
1223 bcopy(weirdary, chunk, sizeof(weirdary));
1224#endif
1225
1226 /*
1227 * Add this free non-zero'd chunk to a linked list for reuse, adjust
1228 * z_FirstFreePg.
1229 */
1230 chunk->c_Next = z->z_PageAry[pgno];
1231 z->z_PageAry[pgno] = chunk;
1232 if (z->z_FirstFreePg > pgno)
1233 z->z_FirstFreePg = pgno;
1234
1235 /*
1236 * Bump the number of free chunks. If it becomes non-zero the zone
1237 * must be added back onto the appropriate list.
1238 */
1239 if (z->z_NFree++ == 0) {
1240 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
1241 slgd->ZoneAry[z->z_ZoneIndex] = z;
1242 }
1243
1244 /*
0bb7d8c8 1245 * If the zone becomes totally free then release it.
82949828
MD
1246 */
1247 if (z->z_NFree == z->z_NMax) {
1248 slzone_t *pz;
1249
1250 pz = &slgd->ZoneAry[z->z_ZoneIndex];
1251 while (z != *pz)
1252 pz = &(*pz)->z_Next;
1253 *pz = z->z_Next;
1254 z->z_Magic = -1;
0bb7d8c8
VS
1255 z->z_Next = NULL;
1256 zone_free(z);
4cd64cfe 1257 /* slgd lock released */
0bb7d8c8 1258 return;
82949828
MD
1259 }
1260 slgd_unlock(slgd);
1261}
1262
1263#if defined(INVARIANTS)
1264/*
1265 * Helper routines for sanity checks
1266 */
1267static
1268void
1269chunk_mark_allocated(slzone_t z, void *chunk)
1270{
1271 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1272 __uint32_t *bitptr;
1273
1274 MASSERT(bitdex >= 0 && bitdex < z->z_NMax);
1275 bitptr = &z->z_Bitmap[bitdex >> 5];
1276 bitdex &= 31;
1277 MASSERT((*bitptr & (1 << bitdex)) == 0);
1278 *bitptr |= 1 << bitdex;
1279}
1280
1281static
1282void
1283chunk_mark_free(slzone_t z, void *chunk)
1284{
1285 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1286 __uint32_t *bitptr;
1287
1288 MASSERT(bitdex >= 0 && bitdex < z->z_NMax);
1289 bitptr = &z->z_Bitmap[bitdex >> 5];
1290 bitdex &= 31;
1291 MASSERT((*bitptr & (1 << bitdex)) != 0);
1292 *bitptr &= ~(1 << bitdex);
1293}
1294
1295#endif
1296
4cd64cfe
MD
1297/*
1298 * Allocate and return a magazine. NULL is returned and *burst is adjusted
1299 * if the magazine is empty.
1300 */
0bb7d8c8
VS
1301static __inline void *
1302magazine_alloc(struct magazine *mp, int *burst)
1303{
4cd64cfe 1304 void *obj;
0bb7d8c8 1305
4cd64cfe
MD
1306 if (MAGAZINE_NOTEMPTY(mp)) {
1307 obj = mp->objects[--mp->rounds];
1308 return(obj);
1309 }
0bb7d8c8 1310
4cd64cfe
MD
1311 /*
1312 * Return burst factor to caller along with NULL
1313 */
1314 if ((mp->flags & M_BURST) && (burst != NULL)) {
1315 *burst = mp->burst_factor;
1316 }
1317 /* Reduce burst factor by NSCALE; if it hits 1, disable BURST */
1318 if ((mp->flags & M_BURST) && (mp->flags & M_BURST_EARLY) &&
1319 (burst != NULL)) {
1320 mp->burst_factor -= M_BURST_NSCALE;
1321 if (mp->burst_factor <= 1) {
1322 mp->burst_factor = 1;
1323 mp->flags &= ~(M_BURST);
1324 mp->flags &= ~(M_BURST_EARLY);
0bb7d8c8 1325 }
4cd64cfe
MD
1326 }
1327 return (NULL);
0bb7d8c8
VS
1328}
1329
1330static __inline int
1331magazine_free(struct magazine *mp, void *p)
1332{
1333 if (mp != NULL && MAGAZINE_NOTFULL(mp)) {
1334 mp->objects[mp->rounds++] = p;
1335 return 0;
1336 }
1337
1338 return -1;
1339}
1340
1341static void *
1342mtmagazine_alloc(int zi)
1343{
1344 thr_mags *tp;
1345 struct magazine *mp, *emptymag;
1346 magazine_depot *d;
1347 void *obj = NULL;
1348
6c4de62c
MD
1349 /*
1350 * Do not try to access per-thread magazines while the mtmagazine
1351 * is being initialized or destroyed.
1352 */
0bb7d8c8 1353 tp = &thread_mags;
6c4de62c
MD
1354 if (tp->init < 0)
1355 return(NULL);
0bb7d8c8 1356
6c4de62c
MD
1357 /*
1358 * Primary per-thread allocation loop
1359 */
7b033ca7 1360 for (;;) {
0bb7d8c8
VS
1361 /* If the loaded magazine has rounds, allocate and return */
1362 if (((mp = tp->mags[zi].loaded) != NULL) &&
1363 MAGAZINE_NOTEMPTY(mp)) {
1364 obj = magazine_alloc(mp, NULL);
1365 break;
1366 }
1367
1368 /* If the prev magazine is full, swap with loaded and retry */
1369 if (((mp = tp->mags[zi].prev) != NULL) &&
1370 MAGAZINE_FULL(mp)) {
1371 swap_mags(&tp->mags[zi]);
1372 continue;
1373 }
1374
4cd64cfe
MD
1375 /*
1376 * Lock the depot and check if it has any full magazines; if
1377 * so we return the prev to the emptymag list, move loaded
1378 * to prev load a full magazine, and retry
1379 */
0bb7d8c8
VS
1380 d = &depots[zi];
1381 depot_lock(d);
1382
1383 if (!SLIST_EMPTY(&d->full)) {
1384 emptymag = tp->mags[zi].prev;
1385 tp->mags[zi].prev = tp->mags[zi].loaded;
1386 tp->mags[zi].loaded = SLIST_FIRST(&d->full);
1387 SLIST_REMOVE_HEAD(&d->full, nextmagazine);
1388
1389 /* Return emptymag to the depot */
4cd64cfe
MD
1390 if (emptymag != NULL) {
1391 SLIST_INSERT_HEAD(&d->empty, emptymag,
1392 nextmagazine);
1393 }
0bb7d8c8
VS
1394 depot_unlock(d);
1395 continue;
0bb7d8c8 1396 }
4cd64cfe 1397 depot_unlock(d);
7b033ca7
VS
1398 break;
1399 }
0bb7d8c8
VS
1400
1401 return (obj);
1402}
1403
1404static int
1405mtmagazine_free(int zi, void *ptr)
1406{
1407 thr_mags *tp;
1408 struct magazine *mp, *loadedmag, *newmag;
1409 magazine_depot *d;
1410 int rc = -1;
1411
6c4de62c
MD
1412 /*
1413 * Do not try to access per-thread magazines while the mtmagazine
1414 * is being initialized or destroyed.
1415 */
0bb7d8c8 1416 tp = &thread_mags;
6c4de62c
MD
1417 if (tp->init < 0)
1418 return(-1);
0bb7d8c8 1419
6c4de62c
MD
1420 /*
1421 * Primary per-thread freeing loop
1422 */
7b033ca7 1423 for (;;) {
0bb7d8c8
VS
1424 /* If the loaded magazine has space, free directly to it */
1425 if (((mp = tp->mags[zi].loaded) != NULL) &&
1426 MAGAZINE_NOTFULL(mp)) {
1427 rc = magazine_free(mp, ptr);
1428 break;
1429 }
1430
1431 /* If the prev magazine is empty, swap with loaded and retry */
1432 if (((mp = tp->mags[zi].prev) != NULL) &&
1433 MAGAZINE_EMPTY(mp)) {
1434 swap_mags(&tp->mags[zi]);
1435 continue;
1436 }
1437
1438 /* Lock the depot; if there are any empty magazines, move the
1439 * prev to the depot's fullmag list, move loaded to previous,
1440 * and move a new emptymag to loaded, and retry. */
1441
1442 d = &depots[zi];
1443 depot_lock(d);
1444
1445 if (!SLIST_EMPTY(&d->empty)) {
1446 loadedmag = tp->mags[zi].prev;
1447 tp->mags[zi].prev = tp->mags[zi].loaded;
1448 tp->mags[zi].loaded = SLIST_FIRST(&d->empty);
1449 SLIST_REMOVE_HEAD(&d->empty, nextmagazine);
1450
1451 /* Return loadedmag to the depot */
4cd64cfe 1452 if (loadedmag != NULL) {
0bb7d8c8
VS
1453 SLIST_INSERT_HEAD(&d->full, loadedmag,
1454 nextmagazine);
4cd64cfe 1455 }
0bb7d8c8
VS
1456 depot_unlock(d);
1457 continue;
1458 }
1459
4cd64cfe
MD
1460 /*
1461 * Allocate an empty magazine, add it to the depot, retry
1462 */
1463 depot_unlock(d);
0bb7d8c8
VS
1464 newmag = _slaballoc(sizeof(struct magazine), SAFLAG_ZERO);
1465 if (newmag != NULL) {
1466 newmag->capacity = M_MAX_ROUNDS;
1467 newmag->rounds = 0;
1468
4cd64cfe 1469 depot_lock(d);
0bb7d8c8
VS
1470 SLIST_INSERT_HEAD(&d->empty, newmag, nextmagazine);
1471 depot_unlock(d);
1472 continue;
0bb7d8c8 1473 }
4cd64cfe 1474 rc = -1;
7b033ca7
VS
1475 break;
1476 }
0bb7d8c8
VS
1477
1478 return rc;
1479}
1480
1481static void
6c4de62c
MD
1482mtmagazine_init(void)
1483{
1484 int error;
1485
1486 error = pthread_key_create(&thread_mags_key, mtmagazine_destructor);
1487 if (error)
0bb7d8c8
VS
1488 abort();
1489}
1490
6c4de62c
MD
1491/*
1492 * This function is only used by the thread exit destructor
1493 */
0bb7d8c8
VS
1494static void
1495mtmagazine_drain(struct magazine *mp)
1496{
1497 void *obj;
1498
1499 while (MAGAZINE_NOTEMPTY(mp)) {
1500 obj = magazine_alloc(mp, NULL);
6c4de62c 1501 _slabfree(obj, 0, NULL);
0bb7d8c8
VS
1502 }
1503}
1504
1505/*
1506 * mtmagazine_destructor()
1507 *
1508 * When a thread exits, we reclaim all its resources; all its magazines are
1509 * drained and the structures are freed.
6c4de62c
MD
1510 *
1511 * WARNING! The destructor can be called multiple times if the larger user
1512 * program has its own destructors which run after ours which
1513 * allocate or free memory.
0bb7d8c8
VS
1514 */
1515static void
1516mtmagazine_destructor(void *thrp)
1517{
1518 thr_mags *tp = thrp;
1519 struct magazine *mp;
1520 int i;
1521
6c4de62c
MD
1522 /*
1523 * Prevent further use of mtmagazines while we are destructing
1524 * them, as well as for any destructors which are run after us
1525 * prior to the thread actually being destroyed.
1526 */
1527 tp->init = -1;
1528
0bb7d8c8
VS
1529 for (i = 0; i < NZONES; i++) {
1530 mp = tp->mags[i].loaded;
6c4de62c 1531 tp->mags[i].loaded = NULL;
0bb7d8c8
VS
1532 if (mp != NULL && MAGAZINE_NOTEMPTY(mp))
1533 mtmagazine_drain(mp);
6c4de62c 1534 _slabfree(mp, 0, NULL);
0bb7d8c8
VS
1535
1536 mp = tp->mags[i].prev;
6c4de62c 1537 tp->mags[i].prev = NULL;
0bb7d8c8
VS
1538 if (mp != NULL && MAGAZINE_NOTEMPTY(mp))
1539 mtmagazine_drain(mp);
6c4de62c 1540 _slabfree(mp, 0, NULL);
0bb7d8c8
VS
1541 }
1542}
1543
1544/*
1545 * zone_alloc()
1546 *
1547 * Attempt to allocate a zone from the zone magazine; the zone magazine has
1548 * M_BURST_EARLY enabled, so honor the burst request from the magazine.
1549 */
1550static slzone_t
1551zone_alloc(int flags)
1552{
1553 slglobaldata_t slgd = &SLGlobalData;
1554 int burst = 1;
1555 int i, j;
1556 slzone_t z;
1557
1558 zone_magazine_lock();
1559 slgd_unlock(slgd);
1560
1561 z = magazine_alloc(&zone_magazine, &burst);
4cd64cfe
MD
1562 if (z == NULL && burst == 1) {
1563 zone_magazine_unlock();
0bb7d8c8 1564 z = _vmem_alloc(ZoneSize * burst, ZoneSize, flags);
4cd64cfe
MD
1565 } else if (z == NULL) {
1566 z = _vmem_alloc(ZoneSize * burst, ZoneSize, flags);
1567 if (z) {
1568 for (i = 1; i < burst; i++) {
1569 j = magazine_free(&zone_magazine,
1570 (char *) z + (ZoneSize * i));
1571 MASSERT(j == 0);
1572 }
0bb7d8c8 1573 }
4cd64cfe 1574 zone_magazine_unlock();
0bb7d8c8
VS
1575 } else {
1576 z->z_Flags |= SLZF_UNOTZEROD;
1577 zone_magazine_unlock();
1578 }
0bb7d8c8
VS
1579 slgd_lock(slgd);
1580 return z;
1581}
1582
1583/*
1584 * zone_free()
1585 *
4cd64cfe 1586 * Release a zone and unlock the slgd lock.
0bb7d8c8
VS
1587 */
1588static void
1589zone_free(void *z)
1590{
1591 slglobaldata_t slgd = &SLGlobalData;
1592 void *excess[M_ZONE_ROUNDS - M_LOW_ROUNDS] = {};
1593 int i, j;
1594
1595 zone_magazine_lock();
1596 slgd_unlock(slgd);
1597
1598 bzero(z, sizeof(struct slzone));
1599
1600 if (opt_madvise)
1601 madvise(z, ZoneSize, MADV_FREE);
1602
1603 i = magazine_free(&zone_magazine, z);
1604
4cd64cfe
MD
1605 /*
1606 * If we failed to free, collect excess magazines; release the zone
0bb7d8c8 1607 * magazine lock, and then free to the system via _vmem_free. Re-enable
4cd64cfe
MD
1608 * BURST mode for the magazine.
1609 */
0bb7d8c8
VS
1610 if (i == -1) {
1611 j = zone_magazine.rounds - zone_magazine.low_factor;
1612 for (i = 0; i < j; i++) {
1613 excess[i] = magazine_alloc(&zone_magazine, NULL);
1614 MASSERT(excess[i] != NULL);
1615 }
1616
1617 zone_magazine_unlock();
1618
1619 for (i = 0; i < j; i++)
1620 _vmem_free(excess[i], ZoneSize);
1621
1622 _vmem_free(z, ZoneSize);
1623 } else {
1624 zone_magazine_unlock();
1625 }
1626}
1627
82949828
MD
1628/*
1629 * _vmem_alloc()
1630 *
1631 * Directly map memory in PAGE_SIZE'd chunks with the specified
1632 * alignment.
1633 *
1634 * Alignment must be a multiple of PAGE_SIZE.
11e45f67
MD
1635 *
1636 * Size must be >= alignment.
82949828
MD
1637 */
1638static void *
1639_vmem_alloc(size_t size, size_t align, int flags)
1640{
1641 char *addr;
1642 char *save;
1643 size_t excess;
1644
1645 /*
1646 * Map anonymous private memory.
1647 */
1648 addr = mmap(NULL, size, PROT_READ|PROT_WRITE,
1649 MAP_PRIVATE|MAP_ANON, -1, 0);
11e45f67 1650 if (addr == MAP_FAILED)
82949828 1651 return(NULL);
82949828
MD
1652
1653 /*
1654 * Check alignment. The misaligned offset is also the excess
1655 * amount. If misaligned unmap the excess so we have a chance of
1656 * mapping at the next alignment point and recursively try again.
11e45f67
MD
1657 *
1658 * BBBBBBBBBBB BBBBBBBBBBB BBBBBBBBBBB block alignment
1659 * aaaaaaaaa aaaaaaaaaaa aa mis-aligned allocation
1660 * xxxxxxxxx final excess calculation
1661 * ^ returned address
82949828
MD
1662 */
1663 excess = (uintptr_t)addr & (align - 1);
11e45f67 1664
82949828 1665 if (excess) {
11e45f67 1666 excess = align - excess;
82949828 1667 save = addr;
11e45f67
MD
1668
1669 munmap(save + excess, size - excess);
82949828 1670 addr = _vmem_alloc(size, align, flags);
11e45f67 1671 munmap(save, excess);
82949828
MD
1672 }
1673 return((void *)addr);
1674}
1675
1676/*
1677 * _vmem_free()
1678 *
1679 * Free a chunk of memory allocated with _vmem_alloc()
1680 */
1681static void
b435182d 1682_vmem_free(void *ptr, size_t size)
82949828
MD
1683{
1684 munmap(ptr, size);
1685}
1686
1687/*
1688 * Panic on fatal conditions
1689 */
1690static void
1691_mpanic(const char *ctl, ...)
1692{
1693 va_list va;
1694
1695 if (malloc_panic == 0) {
1696 malloc_panic = 1;
1697 va_start(va, ctl);
1698 vfprintf(stderr, ctl, va);
1699 fprintf(stderr, "\n");
1700 fflush(stderr);
1701 va_end(va);
1702 }
1703 abort();
1704}