2 * ----------------------------------------------------------------------------
3 * "THE BEER-WARE LICENSE" (Revision 42):
4 * <phk@FreeBSD.ORG> wrote this file. As long as you retain this notice you
5 * can do whatever you want with this stuff. If we meet some day, and you think
6 * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
7 * ----------------------------------------------------------------------------
9 * $FreeBSD: src/lib/libc/stdlib/malloc.c,v 1.49.2.4 2001/12/29 08:10:14 knu Exp $
10 * $DragonFly: src/lib/libc/stdlib/malloc.c,v 1.6 2005/01/31 22:29:42 dillon Exp $
15 * Defining EXTRA_SANITY will enable extra checks which are related
16 * to internal conditions and consistency in malloc.c. This has a
17 * noticeable runtime performance hit, and generally will not do you
18 * any good unless you fiddle with the internals of malloc or want
19 * to catch random pointer corruption as early as possible.
21 #ifndef MALLOC_EXTRA_SANITY
22 #undef MALLOC_EXTRA_SANITY
26 * What to use for Junk. This is the byte value we use to fill with
27 * when the 'J' option is enabled.
29 #define SOME_JUNK 0xd0 /* as in "Duh" :-) */
32 * The basic parameters you can tweak.
34 * malloc_pageshift pagesize = 1 << malloc_pageshift
35 * It's probably best if this is the native
36 * page size, but it doesn't have to be.
38 * malloc_minsize minimum size of an allocation in bytes.
39 * If this is too small it's too much work
40 * to manage them. This is also the smallest
41 * unit of alignment used for the storage
42 * returned by malloc/realloc.
46 #include "namespace.h"
47 #if defined(__FreeBSD__) || defined(__DragonFly__)
48 # if defined(__i386__)
49 # define malloc_pageshift 12U
50 # define malloc_minsize 16U
52 # if defined(__alpha__)
53 # define malloc_pageshift 13U
54 # define malloc_minsize 16U
56 # if !defined(__NETBSD_SYSCALLS)
60 * Make malloc/free/realloc thread-safe in libc for use with
63 # include "libc_private.h"
64 # include "spinlock.h"
65 static spinlock_t thread_lock = _SPINLOCK_INITIALIZER;
66 # define THREAD_LOCK() if (__isthreaded) _SPINLOCK(&thread_lock);
67 # define THREAD_UNLOCK() if (__isthreaded) _SPINUNLOCK(&thread_lock);
68 #endif /* __FreeBSD__ || __DragonFly__ */
70 #if defined(__sparc__) && defined(sun)
71 # define malloc_pageshift 12U
72 # define malloc_minsize 16U
75 # define MMAP_FD fdzero
76 # define INIT_MMAP() \
77 { if ((fdzero = _open(_PATH_DEVZERO, O_RDWR, 0000)) == -1) \
78 wrterror("open of /dev/zero"); }
79 # define MADV_FREE MADV_DONTNEED
80 #endif /* __sparc__ */
82 /* Insert your combination here... */
83 #if defined(__FOOCPU__) && defined(__BAROS__)
84 # define malloc_pageshift 12U
85 # define malloc_minsize 16U
86 #endif /* __FOOCPU__ && __BAROS__ */
90 * No user serviceable parts behind this point.
92 #include <sys/types.h>
102 #include "un-namespace.h"
105 * This structure describes a page worth of chunks.
109 struct pginfo *next; /* next on the free list */
110 void *page; /* Pointer to the page */
111 u_short size; /* size of this page's chunks */
112 u_short shift; /* How far to shift for this size chunks */
113 u_short free; /* How many free chunks */
114 u_short total; /* How many chunk */
115 u_int bits[1]; /* Which chunks are free */
119 * This structure describes a number of free pages.
123 struct pgfree *next; /* next run of free pages */
124 struct pgfree *prev; /* prev run of free pages */
125 void *page; /* pointer to free pages */
126 void *end; /* pointer to end of free pages */
127 size_t size; /* number of bytes free */
131 * How many bits per u_int in the bitmap.
132 * Change only if not 8 bits/byte
134 #define MALLOC_BITS (8*sizeof(u_int))
137 * Magic values to put in the page_directory
139 #define MALLOC_NOT_MINE ((struct pginfo*) 0)
140 #define MALLOC_FREE ((struct pginfo*) 1)
141 #define MALLOC_FIRST ((struct pginfo*) 2)
142 #define MALLOC_FOLLOW ((struct pginfo*) 3)
143 #define MALLOC_MAGIC ((struct pginfo*) 4)
145 #ifndef malloc_pageshift
146 #define malloc_pageshift 12U
149 #ifndef malloc_minsize
150 #define malloc_minsize 16U
153 #if !defined(malloc_pagesize)
154 #define malloc_pagesize (1UL<<malloc_pageshift)
157 #if ((1<<malloc_pageshift) != malloc_pagesize)
158 #error "(1<<malloc_pageshift) != malloc_pagesize"
161 #ifndef malloc_maxsize
162 #define malloc_maxsize ((malloc_pagesize)>>1)
165 /* A mask for the offset inside a page. */
166 #define malloc_pagemask ((malloc_pagesize)-1)
168 #define pageround(foo) (((foo) + (malloc_pagemask))&(~(malloc_pagemask)))
169 #define ptr2index(foo) (((u_long)(foo) >> malloc_pageshift)-malloc_origo)
172 #define THREAD_LOCK()
175 #ifndef THREAD_UNLOCK
176 #define THREAD_UNLOCK()
187 /* Set when initialization has been done */
188 static unsigned malloc_started;
190 /* Recusion flag for public interface. */
191 static int malloc_active;
193 /* Number of free pages we cache */
194 static unsigned malloc_cache = 16;
196 /* The offset from pagenumber to index into the page directory */
197 static u_long malloc_origo;
199 /* The last index in the page directory we care about */
200 static u_long last_index;
202 /* Pointer to page directory. Allocated "as if with" malloc */
203 static struct pginfo **page_dir;
205 /* How many slots in the page directory */
206 static unsigned malloc_ninfo;
208 /* Free pages line up here */
209 static struct pgfree free_list;
211 /* Abort(), user doesn't handle problems. */
212 static int malloc_abort;
214 /* Are we trying to die ? */
217 /* always realloc ? */
218 static int malloc_realloc;
220 /* pass the kernel a hint on free pages ? */
221 static int malloc_hint = 0;
223 /* xmalloc behaviour ? */
224 static int malloc_xmalloc;
226 /* sysv behaviour for malloc(0) ? */
227 static int malloc_sysv;
230 static int malloc_zero;
233 static int malloc_junk;
238 static int malloc_utrace;
240 struct ut { void *p; size_t s; void *r; };
242 void utrace (struct ut *, int);
244 #define UTRACE(a, b, c) \
246 {struct ut u; u.p=a; u.s = b; u.r=c; utrace(&u, sizeof u);}
247 #else /* !HAS_UTRACE */
248 #define UTRACE(a,b,c)
249 #endif /* HAS_UTRACE */
252 static void *malloc_brk;
254 /* one location cache for free-list holders */
255 static struct pgfree *px;
257 /* compile-time options */
258 char *malloc_options;
260 /* Name of the current public function */
261 static char *malloc_func;
265 mmap(0, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \
269 * Necessary function declarations
271 static int extend_pgdir(u_long index);
272 static void *imalloc(size_t size);
273 static void ifree(void *ptr);
274 static void *irealloc(void *ptr, size_t size);
276 extern char *__progname;
281 char *q = " error: ";
282 _write(STDERR_FILENO, __progname, strlen(__progname));
283 _write(STDERR_FILENO, malloc_func, strlen(malloc_func));
284 _write(STDERR_FILENO, q, strlen(q));
285 _write(STDERR_FILENO, p, strlen(p));
293 char *q = " warning: ";
296 _write(STDERR_FILENO, __progname, strlen(__progname));
297 _write(STDERR_FILENO, malloc_func, strlen(malloc_func));
298 _write(STDERR_FILENO, q, strlen(q));
299 _write(STDERR_FILENO, p, strlen(p));
303 * Allocate a number of pages from the OS
306 map_pages(size_t pages)
308 caddr_t result, tail;
310 result = (caddr_t)pageround((u_long)sbrk(0));
311 tail = result + (pages << malloc_pageshift);
315 wrterror("(ES): map_pages fails\n");
316 #endif /* EXTRA_SANITY */
320 last_index = ptr2index(tail) - 1;
323 if ((last_index+1) >= malloc_ninfo && !extend_pgdir(last_index))
330 * Extend page directory
333 extend_pgdir(u_long index)
335 struct pginfo **new, **old;
338 /* Make it this many pages */
339 i = index * sizeof *page_dir;
340 i /= malloc_pagesize;
343 /* remember the old mapping size */
344 oldlen = malloc_ninfo * sizeof *page_dir;
347 * NOTE: we allocate new pages and copy the directory rather than tempt
348 * fate by trying to "grow" the region.. There is nothing to prevent
349 * us from accidently re-mapping space that's been allocated by our caller
350 * via dlopen() or other mmap().
352 * The copy problem is not too bad, as there is 4K of page index per
353 * 4MB of malloc arena.
355 * We can totally avoid the copy if we open a file descriptor to associate
356 * the anon mappings with. Then, when we remap the pages at the new
357 * address, the old pages will be "magically" remapped.. But this means
358 * keeping open a "secret" file descriptor.....
362 new = (struct pginfo**) MMAP(i * malloc_pagesize);
363 if (new == (struct pginfo **)-1)
366 /* Copy the old stuff */
367 memcpy(new, page_dir,
368 malloc_ninfo * sizeof *page_dir);
370 /* register the new size */
371 malloc_ninfo = i * malloc_pagesize / sizeof *page_dir;
373 /* swap the pointers */
377 /* Now free the old stuff */
383 * Initialize the world
396 #endif /* EXTRA_SANITY */
398 for (i = 0; i < 3; i++) {
401 j = readlink("/etc/malloc.conf", b, sizeof b - 1);
408 p = getenv("MALLOC_OPTIONS");
412 for (; p && *p; p++) {
414 case '>': malloc_cache <<= 1; break;
415 case '<': malloc_cache >>= 1; break;
416 case 'a': malloc_abort = 0; break;
417 case 'A': malloc_abort = 1; break;
418 case 'h': malloc_hint = 0; break;
419 case 'H': malloc_hint = 1; break;
420 case 'r': malloc_realloc = 0; break;
421 case 'R': malloc_realloc = 1; break;
422 case 'j': malloc_junk = 0; break;
423 case 'J': malloc_junk = 1; break;
425 case 'u': malloc_utrace = 0; break;
426 case 'U': malloc_utrace = 1; break;
428 case 'v': malloc_sysv = 0; break;
429 case 'V': malloc_sysv = 1; break;
430 case 'x': malloc_xmalloc = 0; break;
431 case 'X': malloc_xmalloc = 1; break;
432 case 'z': malloc_zero = 0; break;
433 case 'Z': malloc_zero = 1; break;
437 wrtwarning("unknown char in MALLOC_OPTIONS\n");
447 * We want junk in the entire allocation, and zero only in the part
448 * the user asked for.
454 * If we run with junk (or implicitly from above: zero), we want to
455 * force realloc() to get new storage, so we can DTRT with it.
460 /* Allocate one page for the page directory */
461 page_dir = (struct pginfo **) MMAP(malloc_pagesize);
463 if (page_dir == (struct pginfo **) -1)
464 wrterror("mmap(2) failed, check limits\n");
467 * We need a maximum of malloc_pageshift buckets, steal these from the
468 * front of the page_directory;
470 malloc_origo = ((u_long)pageround((u_long)sbrk(0))) >> malloc_pageshift;
471 malloc_origo -= malloc_pageshift;
473 malloc_ninfo = malloc_pagesize / sizeof *page_dir;
475 /* Recalculate the cache size in bytes, and make sure it's nonzero */
480 malloc_cache <<= malloc_pageshift;
483 * This is a nice hack from Kaleb Keithly (kaleb@x.org).
484 * We can sbrk(2) further back when we keep this on a low address.
486 px = (struct pgfree *) imalloc (sizeof *px);
488 /* Been here, done that */
493 * Allocate a number of complete pages
496 malloc_pages(size_t size)
498 void *p, *delay_free = 0;
503 size = pageround(size);
507 /* Look for free pages before asking for more */
508 for(pf = free_list.next; pf; pf = pf->next) {
511 if (pf->size & malloc_pagemask)
512 wrterror("(ES): junk length entry on free_list\n");
514 wrterror("(ES): zero length entry on free_list\n");
515 if (pf->page == pf->end)
516 wrterror("(ES): zero entry on free_list\n");
517 if (pf->page > pf->end)
518 wrterror("(ES): sick entry on free_list\n");
519 if ((void*)pf->page >= (void*)sbrk(0))
520 wrterror("(ES): entry on free_list past brk\n");
521 if (page_dir[ptr2index(pf->page)] != MALLOC_FREE)
522 wrterror("(ES): non-free first page on free-list\n");
523 if (page_dir[ptr2index(pf->end)-1] != MALLOC_FREE)
524 wrterror("(ES): non-free last page on free-list\n");
525 #endif /* EXTRA_SANITY */
530 if (pf->size == size) {
533 pf->next->prev = pf->prev;
534 pf->prev->next = pf->next;
540 pf->page = (char *)pf->page + size;
546 if (p && page_dir[ptr2index(p)] != MALLOC_FREE)
547 wrterror("(ES): allocated non-free page on free-list\n");
548 #endif /* EXTRA_SANITY */
550 size >>= malloc_pageshift;
558 index = ptr2index(p);
559 page_dir[index] = MALLOC_FIRST;
561 page_dir[index+i] = MALLOC_FOLLOW;
564 memset(p, SOME_JUNK, size << malloc_pageshift);
578 * Allocate a page of fragments
581 static __inline__ int
582 malloc_make_chunks(int bits)
588 /* Allocate a new bucket */
589 pp = malloc_pages(malloc_pagesize);
593 /* Find length of admin structure */
594 l = offsetof(struct pginfo, bits[0]);
595 l += sizeof bp->bits[0] *
596 (((malloc_pagesize >> bits)+MALLOC_BITS-1) / MALLOC_BITS);
598 /* Don't waste more than two chunks on this */
599 if ((1<<(bits)) <= l+l) {
600 bp = (struct pginfo *)pp;
602 bp = (struct pginfo *)imalloc(l);
609 bp->size = (1<<bits);
611 bp->total = bp->free = malloc_pagesize >> bits;
614 /* set all valid bits in the bitmap */
618 /* Do a bunch at a time */
619 for(;k-i >= MALLOC_BITS; i += MALLOC_BITS)
620 bp->bits[i / MALLOC_BITS] = ~0;
623 bp->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
625 if (bp == bp->page) {
626 /* Mark the ones we stole for ourselves */
628 bp->bits[i/MALLOC_BITS] &= ~(1<<(i%MALLOC_BITS));
637 page_dir[ptr2index(pp)] = bp;
639 bp->next = page_dir[bits];
648 * Allocate a fragment
651 malloc_bytes(size_t size)
659 /* Don't bother with anything less than this */
660 if (size < malloc_minsize)
661 size = malloc_minsize;
663 /* Find the right bucket */
669 /* If it's empty, make a page more of that size chunks */
670 if (!page_dir[j] && !malloc_make_chunks(j))
675 /* Find first word of bitmap which isn't empty */
676 for (lp = bp->bits; !*lp; lp++)
679 /* Find that bit, and tweak it */
688 /* If there are no more free, remove from free-list */
690 page_dir[j] = bp->next;
694 /* Adjust to the real offset of that chunk */
695 k += (lp-bp->bits)*MALLOC_BITS;
699 memset((u_char*)bp->page + k, SOME_JUNK, bp->size);
701 return (u_char *)bp->page + k;
705 * Allocate a piece of memory
715 if ((size + malloc_pagesize) < size) /* Check for overflow */
717 else if (size <= malloc_maxsize)
718 result = malloc_bytes(size);
720 result = malloc_pages(size);
722 if (malloc_zero && result)
723 memset(result, 0, size);
729 * Change the size of an allocation.
732 irealloc(void *ptr, size_t size)
742 index = ptr2index(ptr);
744 if (index < malloc_pageshift) {
745 wrtwarning("junk pointer, too low to make sense\n");
749 if (index > last_index) {
750 wrtwarning("junk pointer, too high to make sense\n");
754 mp = &page_dir[index];
756 if (*mp == MALLOC_FIRST) { /* Page allocation */
758 /* Check the pointer */
759 if ((u_long)ptr & malloc_pagemask) {
760 wrtwarning("modified (page-) pointer\n");
764 /* Find the size in bytes */
765 for (osize = malloc_pagesize; *++mp == MALLOC_FOLLOW;)
766 osize += malloc_pagesize;
768 if (!malloc_realloc && /* unless we have to, */
769 size <= osize && /* .. or are too small, */
770 size > (osize - malloc_pagesize)) { /* .. or can free a page, */
771 return ptr; /* don't do anything. */
774 } else if (*mp >= MALLOC_MAGIC) { /* Chunk allocation */
776 /* Check the pointer for sane values */
777 if (((u_long)ptr & ((*mp)->size-1))) {
778 wrtwarning("modified (chunk-) pointer\n");
782 /* Find the chunk index in the page */
783 i = ((u_long)ptr & malloc_pagemask) >> (*mp)->shift;
785 /* Verify that it isn't a free chunk already */
786 if ((*mp)->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) {
787 wrtwarning("chunk is already free\n");
793 if (!malloc_realloc && /* Unless we have to, */
794 size < osize && /* ..or are too small, */
795 (size > osize/2 || /* ..or could use a smaller size, */
796 osize == malloc_minsize)) { /* ..(if there is one) */
797 return ptr; /* ..Don't do anything */
801 wrtwarning("pointer to wrong page\n");
808 /* copy the lesser of the two sizes, and free the old one */
811 else if (osize < size)
812 memcpy(p, ptr, osize);
814 memcpy(p, ptr, size);
821 * Free a sequence of pages
824 static __inline__ void
825 free_pages(void *ptr, u_long index, struct pginfo *info)
828 struct pgfree *pf, *pt=0;
832 if (info == MALLOC_FREE) {
833 wrtwarning("page is already free\n");
837 if (info != MALLOC_FIRST) {
838 wrtwarning("pointer to wrong page\n");
842 if ((u_long)ptr & malloc_pagemask) {
843 wrtwarning("modified (page-) pointer\n");
847 /* Count how many pages and mark them free at the same time */
848 page_dir[index] = MALLOC_FREE;
849 for (i = 1; page_dir[index+i] == MALLOC_FOLLOW; i++)
850 page_dir[index + i] = MALLOC_FREE;
852 l = i << malloc_pageshift;
855 memset(ptr, SOME_JUNK, l);
858 madvise(ptr, l, MADV_FREE);
860 tail = (char *)ptr+l;
862 /* add to free-list */
864 px = imalloc(sizeof *pt); /* This cannot fail... */
868 if (!free_list.next) {
870 /* Nothing on free list, put this at head */
871 px->next = free_list.next;
872 px->prev = &free_list;
879 /* Find the right spot, leave pf pointing to the modified entry. */
880 tail = (char *)ptr+l;
882 for(pf = free_list.next; pf->end < ptr && pf->next; pf = pf->next)
883 ; /* Race ahead here */
885 if (pf->page > tail) {
886 /* Insert before entry */
893 } else if (pf->end == ptr ) {
894 /* Append to the previous entry */
895 pf->end = (char *)pf->end + l;
897 if (pf->next && pf->end == pf->next->page ) {
898 /* And collapse the next too. */
901 pf->size += pt->size;
906 } else if (pf->page == tail) {
907 /* Prepend to entry */
910 } else if (!pf->next) {
911 /* Append at tail of chain */
918 wrterror("freelist is destroyed\n");
922 /* Return something to OS ? */
923 if (!pf->next && /* If we're the last one, */
924 pf->size > malloc_cache && /* ..and the cache is full, */
925 pf->end == malloc_brk && /* ..and none behind us, */
926 malloc_brk == sbrk(0)) { /* ..and it's OK to do... */
929 * Keep the cache intact. Notice that the '>' above guarantees that
930 * the pf will always have at least one page afterwards.
932 pf->end = (char *)pf->page + malloc_cache;
933 pf->size = malloc_cache;
936 malloc_brk = pf->end;
938 index = ptr2index(pf->end);
939 last_index = index - 1;
941 for(i=index;i <= last_index;)
942 page_dir[i++] = MALLOC_NOT_MINE;
944 /* XXX: We could realloc/shrink the pagedir here I guess. */
951 * Free a chunk, and possibly the page it's on, if the page becomes empty.
954 static __inline__ void
955 free_bytes(void *ptr, u_long index, struct pginfo *info)
961 /* Find the chunk number on the page */
962 i = ((u_long)ptr & malloc_pagemask) >> info->shift;
964 if (((u_long)ptr & (info->size-1))) {
965 wrtwarning("modified (chunk-) pointer\n");
969 if (info->bits[i/MALLOC_BITS] & (1<<(i%MALLOC_BITS))) {
970 wrtwarning("chunk is already free\n");
975 memset(ptr, SOME_JUNK, info->size);
977 info->bits[i/MALLOC_BITS] |= 1<<(i%MALLOC_BITS);
980 mp = page_dir + info->shift;
982 if (info->free == 1) {
984 /* Page became non-full */
986 mp = page_dir + info->shift;
987 /* Insert in address order */
988 while (*mp && (*mp)->next && (*mp)->next->page < info->page)
995 if (info->free != info->total)
998 /* Find & remove this page in the queue */
999 while (*mp != info) {
1000 mp = &((*mp)->next);
1003 wrterror("(ES): Not on queue\n");
1004 #endif /* EXTRA_SANITY */
1008 /* Free the page & the info structure if need be */
1009 page_dir[ptr2index(info->page)] = MALLOC_FIRST;
1010 vp = info->page; /* Order is important ! */
1011 if(vp != (void*)info)
1019 struct pginfo *info;
1026 if (!malloc_started) {
1027 wrtwarning("malloc() has never been called\n");
1031 /* If we're already sinking, don't make matters any worse. */
1035 index = ptr2index(ptr);
1037 if (index < malloc_pageshift) {
1038 wrtwarning("junk pointer, too low to make sense\n");
1042 if (index > last_index) {
1043 wrtwarning("junk pointer, too high to make sense\n");
1047 info = page_dir[index];
1049 if (info < MALLOC_MAGIC)
1050 free_pages(ptr, index, info);
1052 free_bytes(ptr, index, info);
1057 * These are the public exported interface routines.
1067 malloc_func = " in malloc():";
1068 if (malloc_active++) {
1069 wrtwarning("recursive call\n");
1074 if (!malloc_started)
1076 if (malloc_sysv && !size)
1083 if (malloc_xmalloc && !r)
1084 wrterror("out of memory\n");
1092 malloc_func = " in free():";
1093 if (malloc_active++) {
1094 wrtwarning("recursive call\n");
1108 realloc(void *ptr, size_t size)
1114 malloc_func = " in realloc():";
1115 if (malloc_active++) {
1116 wrtwarning("recursive call\n");
1121 if (ptr && !malloc_started) {
1122 wrtwarning("malloc() has never been called\n");
1125 if (!malloc_started)
1127 if (malloc_sysv && !size) {
1134 r = irealloc(ptr, size);
1137 UTRACE(ptr, size, r);
1140 if (malloc_xmalloc && err)
1141 wrterror("out of memory\n");