2 * Copyright (c) 1987, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
34 * $FreeBSD: src/sys/kern/kern_malloc.c,v 1.64.2.5 2002/03/16 02:19:51 archie Exp $
35 * $DragonFly: src/sys/kern/Attic/kern_malloc.c,v 1.11 2003/08/26 21:09:02 rob Exp $
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/malloc.h>
45 #include <sys/vmmeter.h>
47 #include <sys/thread.h>
48 #include <sys/globaldata.h>
51 #include <vm/vm_param.h>
52 #include <vm/vm_kern.h>
53 #include <vm/vm_extern.h>
55 #include <vm/vm_map.h>
57 #if defined(INVARIANTS) && defined(__i386__)
58 #include <machine/cpu.h>
62 * When realloc() is called, if the new size is sufficiently smaller than
63 * the old size, realloc() will allocate a new, smaller block to avoid
64 * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
65 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
67 #ifndef REALLOC_FRACTION
68 #define REALLOC_FRACTION 1 /* new block if <= half the size */
71 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
72 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
73 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
75 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
76 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
78 static void kmeminit (void *);
79 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL)
81 static MALLOC_DEFINE(M_FREE, "free", "should be on free list");
83 static struct malloc_type *kmemstatistics;
84 static struct kmembuckets bucket[MINBUCKET + 16];
85 static struct kmemusage *kmemusage;
86 #if defined(NO_KMEM_MAP)
87 static const char *kmembase = (char *)VM_MIN_KERNEL_ADDRESS;
88 static const char *kmemlimit = (char *)VM_MAX_KERNEL_ADDRESS;
90 static char *kmembase;
91 static char *kmemlimit;
94 #if !defined(NO_KMEM_MAP)
100 * This structure provides a set of masks to catch unaligned frees.
102 static long addrmask[] = { 0,
103 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
104 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
105 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
106 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
110 * The WEIRD_ADDR is used as known text to copy into free objects so
111 * that modifications after frees can be detected.
113 #define WEIRD_ADDR 0xdeadc0de
117 * Normally the first word of the structure is used to hold the list
118 * pointer for free objects. However, when running with diagnostics,
119 * we use the third and fourth fields, so as to catch modifications
120 * in the most commonly trashed first two words.
124 struct malloc_type *type;
128 #else /* !INVARIANTS */
132 #endif /* INVARIANTS */
137 * Allocate a block of memory.
139 * If M_NOWAIT is set, this routine will not block and return NULL if
140 * the allocation fails.
143 malloc(size, type, flags)
145 struct malloc_type *type;
148 struct kmembuckets *kbp;
149 struct kmemusage *kup;
150 struct freelist *freep;
151 long indx, npg, allocsize;
153 caddr_t va, cp, savedlist;
157 const char *savedtype;
159 struct malloc_type *ksp = type;
161 #if defined(INVARIANTS)
162 if (mycpu->gd_intr_nesting_level)
163 printf("WARNING: malloc() called from FASTint or ipiq, from %p\n", ((int **)&size)[-1]);
164 if (flags == M_WAITOK) {
165 KASSERT(mycpu->gd_intr_nesting_level == 0,
166 ("malloc(M_WAITOK) in interrupt context"));
170 * Must be at splmem() prior to initializing segment to handle
171 * potential initialization race.
176 if (type->ks_limit == 0)
179 indx = BUCKETINDX(size);
183 while (ksp->ks_memuse >= ksp->ks_limit) {
184 if (flags & M_NOWAIT) {
186 return ((void *) NULL);
188 if (ksp->ks_limblocks < 65535)
190 tsleep((caddr_t)ksp, 0, type->ks_shortdesc, 0);
192 ksp->ks_size |= 1 << indx;
194 copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
196 if (kbp->kb_next == NULL) {
198 if (size > MAXALLOCSAVE)
199 allocsize = roundup(size, PAGE_SIZE);
201 allocsize = 1 << indx;
202 npg = btoc(allocsize);
203 #if defined(NO_KMEM_MAP)
204 va = (caddr_t) kmem_malloc(kernel_map,
205 (vm_size_t)ctob(npg), flags);
207 va = (caddr_t) kmem_malloc(kmem_map,
208 (vm_size_t)ctob(npg), flags);
212 return ((void *) NULL);
214 kbp->kb_total += kbp->kb_elmpercl;
217 if (allocsize > MAXALLOCSAVE) {
219 panic("malloc: allocation too large");
220 kup->ku_pagecnt = npg;
221 ksp->ks_memuse += allocsize;
224 kup->ku_freecnt = kbp->kb_elmpercl;
225 kbp->kb_totalfree += kbp->kb_elmpercl;
227 * Just in case we blocked while allocating memory,
228 * and someone else also allocated memory for this
229 * bucket, don't assume the list is still empty.
231 savedlist = kbp->kb_next;
232 kbp->kb_next = cp = va + (npg * PAGE_SIZE) - allocsize;
234 freep = (struct freelist *)cp;
237 * Copy in known text to detect modification
240 end = (long *)&cp[copysize];
241 for (lp = (long *)cp; lp < end; lp++)
243 freep->type = M_FREE;
244 #endif /* INVARIANTS */
250 freep->next = savedlist;
251 if (kbp->kb_last == NULL)
252 kbp->kb_last = (caddr_t)freep;
256 if (flags & M_NOWAIT) {
258 return ((void *) NULL);
262 kbp->kb_next = ((struct freelist *)va)->next;
264 freep = (struct freelist *)va;
265 savedtype = (const char *) freep->type->ks_shortdesc;
266 #if BYTE_ORDER == BIG_ENDIAN
267 freep->type = (struct malloc_type *)WEIRD_ADDR >> 16;
269 #if BYTE_ORDER == LITTLE_ENDIAN
270 freep->type = (struct malloc_type *)WEIRD_ADDR;
272 if ((intptr_t)(void *)&freep->next & 0x2)
273 freep->next = (caddr_t)((WEIRD_ADDR >> 16)|(WEIRD_ADDR << 16));
275 freep->next = (caddr_t)WEIRD_ADDR;
276 end = (long *)&va[copysize];
277 for (lp = (long *)va; lp < end; lp++) {
278 if (*lp == WEIRD_ADDR)
280 printf("%s %ld of object %p size %lu %s %s (0x%lx != 0x%lx)\n",
281 "Data modified on freelist: word",
282 (long)(lp - (long *)va), (void *)va, size,
283 "previous type", savedtype, *lp, (u_long)WEIRD_ADDR);
287 #endif /* INVARIANTS */
289 if (kup->ku_indx != indx)
290 panic("malloc: wrong bucket");
291 if (kup->ku_freecnt == 0)
292 panic("malloc: lost data");
295 ksp->ks_memuse += 1 << indx;
300 if (ksp->ks_memuse > ksp->ks_maxused)
301 ksp->ks_maxused = ksp->ks_memuse;
303 /* XXX: Do idle pre-zeroing. */
304 if (va != NULL && (flags & M_ZERO))
306 return ((void *) va);
312 * Free a block of memory allocated by malloc.
314 * This routine may not block.
319 struct malloc_type *type;
321 struct kmembuckets *kbp;
322 struct kmemusage *kup;
323 struct freelist *freep;
328 long *end, *lp, alloc, copysize;
330 struct malloc_type *ksp = type;
332 if (type->ks_limit == 0)
333 panic("freeing with unknown type (%s)", type->ks_shortdesc);
335 /* free(NULL, ...) does nothing */
339 KASSERT(kmembase <= (char *)addr && (char *)addr < kmemlimit,
340 ("free: address %p out of range", (void *)addr));
342 size = 1 << kup->ku_indx;
343 kbp = &bucket[kup->ku_indx];
347 * Check for returns of data that do not point to the
348 * beginning of the allocation.
350 if (size > PAGE_SIZE)
351 alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
353 alloc = addrmask[kup->ku_indx];
354 if (((uintptr_t)(void *)addr & alloc) != 0)
355 panic("free: unaligned addr %p, size %ld, type %s, mask %ld",
356 (void *)addr, size, type->ks_shortdesc, alloc);
357 #endif /* INVARIANTS */
358 if (size > MAXALLOCSAVE) {
359 #if defined(NO_KMEM_MAP)
360 kmem_free(kernel_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
362 kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt));
364 size = kup->ku_pagecnt << PAGE_SHIFT;
365 ksp->ks_memuse -= size;
368 if (ksp->ks_memuse + size >= ksp->ks_limit &&
369 ksp->ks_memuse < ksp->ks_limit)
370 wakeup((caddr_t)ksp);
376 freep = (struct freelist *)addr;
379 * Check for multiple frees. Use a quick check to see if
380 * it looks free before laboriously searching the freelist.
382 if (freep->spare0 == WEIRD_ADDR) {
383 fp = (struct freelist *)kbp->kb_next;
385 if (fp->spare0 != WEIRD_ADDR)
386 panic("free: free item %p modified", fp);
387 else if (addr == (caddr_t)fp)
388 panic("free: multiple freed item %p", addr);
389 fp = (struct freelist *)fp->next;
393 * Copy in known text to detect modification after freeing
394 * and to make it look free. Also, save the type being freed
395 * so we can list likely culprit if modification is detected
396 * when the object is reallocated.
398 copysize = size < MAX_COPY ? size : MAX_COPY;
399 end = (long *)&((caddr_t)addr)[copysize];
400 for (lp = (long *)addr; lp < end; lp++)
403 #endif /* INVARIANTS */
405 if (kup->ku_freecnt >= kbp->kb_elmpercl) {
406 if (kup->ku_freecnt > kbp->kb_elmpercl)
407 panic("free: multiple frees");
408 else if (kbp->kb_totalfree > kbp->kb_highwat)
412 ksp->ks_memuse -= size;
413 if (ksp->ks_memuse + size >= ksp->ks_limit &&
414 ksp->ks_memuse < ksp->ks_limit)
415 wakeup((caddr_t)ksp);
417 #ifdef OLD_MALLOC_MEMORY_POLICY
418 if (kbp->kb_next == NULL)
421 ((struct freelist *)kbp->kb_last)->next = addr;
426 * Return memory to the head of the queue for quick reuse. This
427 * can improve performance by improving the probability of the
428 * item being in the cache when it is reused.
430 if (kbp->kb_next == NULL) {
435 freep->next = kbp->kb_next;
443 * realloc: change the size of a memory block
446 realloc(addr, size, type, flags)
449 struct malloc_type *type;
452 struct kmemusage *kup;
456 /* realloc(NULL, ...) is equivalent to malloc(...) */
458 return (malloc(size, type, flags));
461 KASSERT(kmembase <= (char *)addr && (char *)addr < kmemlimit,
462 ("realloc: address %p out of range", (void *)addr));
464 /* Get the size of the original block */
466 alloc = 1 << kup->ku_indx;
467 if (alloc > MAXALLOCSAVE)
468 alloc = kup->ku_pagecnt << PAGE_SHIFT;
470 /* Reuse the original block if appropriate */
472 && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE))
475 /* Allocate a new, bigger (or smaller) block */
476 if ((newaddr = malloc(size, type, flags)) == NULL)
479 /* Copy over original contents */
480 bcopy(addr, newaddr, min(size, alloc));
486 * reallocf: same as realloc() but free memory on failure.
489 reallocf(addr, size, type, flags)
492 struct malloc_type *type;
497 if ((mem = realloc(addr, size, type, flags)) == NULL)
503 * Initialize the kernel memory allocator
514 #if ((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
515 #error "kmeminit: MAXALLOCSAVE not power of 2"
517 #if (MAXALLOCSAVE > MINALLOCSIZE * 32768)
518 #error "kmeminit: MAXALLOCSAVE too big"
520 #if (MAXALLOCSAVE < PAGE_SIZE)
521 #error "kmeminit: MAXALLOCSAVE too small"
525 * Try to auto-tune the kernel memory size, so that it is
526 * more applicable for a wider range of machine sizes.
527 * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while
528 * a VM_KMEM_SIZE of 12MB is a fair compromise. The
529 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space
530 * available, and on an X86 with a total KVA space of 256MB,
531 * try to keep VM_KMEM_SIZE_MAX at 80MB or below.
533 * Note that the kmem_map is also used by the zone allocator,
534 * so make sure that there is enough space.
536 mem_size = vmstats.v_page_count * PAGE_SIZE;
538 #if !defined(NO_KMEM_MAP)
539 vm_kmem_size = VM_KMEM_SIZE;
540 #if defined(VM_KMEM_SIZE_SCALE)
541 if ((mem_size / VM_KMEM_SIZE_SCALE) > vm_kmem_size)
542 vm_kmem_size = mem_size / VM_KMEM_SIZE_SCALE;
545 #if defined(VM_KMEM_SIZE_MAX)
546 if (vm_kmem_size >= VM_KMEM_SIZE_MAX)
547 vm_kmem_size = VM_KMEM_SIZE_MAX;
550 /* Allow final override from the kernel environment */
551 TUNABLE_INT_FETCH("kern.vm.kmem.size", &vm_kmem_size);
554 * Limit kmem virtual size to twice the physical memory.
555 * This allows for kmem map sparseness, but limits the size
556 * to something sane. Be careful to not overflow the 32bit
557 * ints while doing the check.
559 if ((vm_kmem_size / 2) > (vmstats.v_page_count * PAGE_SIZE))
560 vm_kmem_size = 2 * vmstats.v_page_count * PAGE_SIZE;
562 npg = (nmbufs * MSIZE + nmbclusters * MCLBYTES + vm_kmem_size)
565 kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
566 (vm_size_t)(npg * sizeof(struct kmemusage)));
567 kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
568 (vm_offset_t *)&kmemlimit, (vm_size_t)(npg * PAGE_SIZE));
569 kmem_map->system_map = 1;
571 npg = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE;
572 kmemusage = (struct kmemusage *) kmem_alloc(kernel_map,
573 (vm_size_t)(npg * sizeof(struct kmemusage)));
575 for (indx = 0; indx < MINBUCKET + 16; indx++) {
576 if (1 << indx >= PAGE_SIZE)
577 bucket[indx].kb_elmpercl = 1;
579 bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx);
580 bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
588 struct malloc_type *type = (struct malloc_type *)data;
589 #if defined(NO_KMEM_MAP)
593 if (type->ks_magic != M_MAGIC)
594 panic("malloc type lacks magic");
596 if (type->ks_limit != 0)
599 if (vmstats.v_page_count == 0)
600 panic("malloc_init not allowed before vm init");
603 * The default limits for each malloc region is 1/10 of available
604 * memory or 1/10 of our KVA space, whichever is lower.
606 #if defined(NO_KMEM_MAP)
607 limsize = (uintptr_t)vmstats.v_page_count * PAGE_SIZE;
608 if (limsize > VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)
609 limsize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
610 type->ks_limit = limsize / 10;
612 type->ks_limit = vm_kmem_size / 2;
614 type->ks_next = kmemstatistics;
615 kmemstatistics = type;
622 struct malloc_type *type = (struct malloc_type *)data;
623 struct malloc_type *t;
625 struct kmembuckets *kbp;
626 struct freelist *freep;
631 if (type->ks_magic != M_MAGIC)
632 panic("malloc type lacks magic");
634 if (vmstats.v_page_count == 0)
635 panic("malloc_uninit not allowed before vm init");
637 if (type->ks_limit == 0)
638 panic("malloc_uninit on uninitialized type");
642 for (indx = 0; indx < MINBUCKET + 16; indx++) {
644 freep = (struct freelist*)kbp->kb_next;
646 if (freep->type == type)
647 freep->type = M_FREE;
648 freep = (struct freelist*)freep->next;
653 if (type->ks_memuse != 0)
654 printf("malloc_uninit: %ld bytes of '%s' still allocated\n",
655 type->ks_memuse, type->ks_shortdesc);
658 if (type == kmemstatistics)
659 kmemstatistics = type->ks_next;
661 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
662 if (t->ks_next == type) {
663 t->ks_next = type->ks_next;
668 type->ks_next = NULL;