Merge from vendor branch HEIMDAL:
[dragonfly.git] / sys / kern / kern_slaballoc.c
CommitLineData
a108bf71
MD
1/*
2 * KERN_SLABALLOC.C - Kernel SLAB memory allocator
8c10bfcf
MD
3 *
4 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
5 *
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com>
8 *
a108bf71
MD
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
8c10bfcf 12 *
a108bf71
MD
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
8c10bfcf
MD
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
a108bf71 34 * SUCH DAMAGE.
8c10bfcf 35 *
508324e4 36 * $DragonFly: src/sys/kern/kern_slaballoc.c,v 1.26 2005/03/28 14:27:37 joerg Exp $
a108bf71
MD
37 *
38 * This module implements a slab allocator drop-in replacement for the
39 * kernel malloc().
40 *
41 * A slab allocator reserves a ZONE for each chunk size, then lays the
42 * chunks out in an array within the zone. Allocation and deallocation
43 * is nearly instantanious, and fragmentation/overhead losses are limited
44 * to a fixed worst-case amount.
45 *
46 * The downside of this slab implementation is in the chunk size
47 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu.
48 * In a kernel implementation all this memory will be physical so
49 * the zone size is adjusted downward on machines with less physical
50 * memory. The upside is that overhead is bounded... this is the *worst*
51 * case overhead.
52 *
53 * Slab management is done on a per-cpu basis and no locking or mutexes
54 * are required, only a critical section. When one cpu frees memory
55 * belonging to another cpu's slab manager an asynchronous IPI message
56 * will be queued to execute the operation. In addition, both the
57 * high level slab allocator and the low level zone allocator optimize
58 * M_ZERO requests, and the slab allocator does not have to pre initialize
59 * the linked list of chunks.
60 *
61 * XXX Balancing is needed between cpus. Balance will be handled through
62 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
63 *
64 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
65 * the new zone should be restricted to M_USE_RESERVE requests only.
66 *
67 * Alloc Size Chunking Number of zones
68 * 0-127 8 16
69 * 128-255 16 8
70 * 256-511 32 8
71 * 512-1023 64 8
72 * 1024-2047 128 8
73 * 2048-4095 256 8
74 * 4096-8191 512 8
75 * 8192-16383 1024 8
76 * 16384-32767 2048 8
77 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383)
78 *
46a3f46d 79 * Allocations >= ZoneLimit go directly to kmem.
a108bf71
MD
80 *
81 * API REQUIREMENTS AND SIDE EFFECTS
82 *
83 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
84 * have remained compatible with the following API requirements:
85 *
86 * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty)
3d177b31 87 * + all power-of-2 sized allocations are power-of-2 aligned (twe)
a108bf71
MD
88 * + malloc(0) is allowed and returns non-NULL (ahc driver)
89 * + ability to allocate arbitrarily large chunks of memory
90 */
91
92#include "opt_vm.h"
93
a108bf71
MD
94#include <sys/param.h>
95#include <sys/systm.h>
96#include <sys/kernel.h>
97#include <sys/slaballoc.h>
98#include <sys/mbuf.h>
99#include <sys/vmmeter.h>
100#include <sys/lock.h>
101#include <sys/thread.h>
102#include <sys/globaldata.h>
103
104#include <vm/vm.h>
105#include <vm/vm_param.h>
106#include <vm/vm_kern.h>
107#include <vm/vm_extern.h>
108#include <vm/vm_object.h>
109#include <vm/pmap.h>
110#include <vm/vm_map.h>
111#include <vm/vm_page.h>
112#include <vm/vm_pageout.h>
113
114#include <machine/cpu.h>
115
116#include <sys/thread2.h>
117
118#define arysize(ary) (sizeof(ary)/sizeof((ary)[0]))
119
120/*
121 * Fixed globals (not per-cpu)
122 */
123static int ZoneSize;
46a3f46d 124static int ZoneLimit;
a108bf71 125static int ZonePageCount;
a108bf71
MD
126static int ZoneMask;
127static struct malloc_type *kmemstatistics;
128static struct kmemusage *kmemusage;
129static int32_t weirdary[16];
130
131static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags);
132static void kmem_slab_free(void *ptr, vm_size_t bytes);
133
134/*
135 * Misc constants. Note that allocations that are exact multiples of
136 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
137 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
138 */
139#define MIN_CHUNK_SIZE 8 /* in bytes */
140#define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1)
141#define ZONE_RELS_THRESH 2 /* threshold number of zones */
142#define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK)
143
144/*
145 * The WEIRD_ADDR is used as known text to copy into free objects to
146 * try to create deterministic failure cases if the data is accessed after
147 * free.
148 */
149#define WEIRD_ADDR 0xdeadc0de
150#define MAX_COPY sizeof(weirdary)
151#define ZERO_LENGTH_PTR ((void *)-8)
152
153/*
154 * Misc global malloc buckets
155 */
156
157MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
158MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
159MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
160
161MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
162MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
163
164/*
165 * Initialize the slab memory allocator. We have to choose a zone size based
166 * on available physical memory. We choose a zone side which is approximately
167 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of
168 * 128K. The zone size is limited to the bounds set in slaballoc.h
169 * (typically 32K min, 128K max).
170 */
171static void kmeminit(void *dummy);
172
173SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL)
174
175static void
176kmeminit(void *dummy)
177{
178 vm_poff_t limsize;
179 int usesize;
180 int i;
181 vm_pindex_t npg;
182
183 limsize = (vm_poff_t)vmstats.v_page_count * PAGE_SIZE;
184 if (limsize > VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)
185 limsize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
186
187 usesize = (int)(limsize / 1024); /* convert to KB */
188
189 ZoneSize = ZALLOC_MIN_ZONE_SIZE;
190 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize)
191 ZoneSize <<= 1;
46a3f46d
MD
192 ZoneLimit = ZoneSize / 4;
193 if (ZoneLimit > ZALLOC_ZONE_LIMIT)
194 ZoneLimit = ZALLOC_ZONE_LIMIT;
a108bf71 195 ZoneMask = ZoneSize - 1;
a108bf71
MD
196 ZonePageCount = ZoneSize / PAGE_SIZE;
197
198 npg = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE;
dc1fd4b3 199 kmemusage = kmem_slab_alloc(npg * sizeof(struct kmemusage), PAGE_SIZE, M_WAITOK|M_ZERO);
a108bf71
MD
200
201 for (i = 0; i < arysize(weirdary); ++i)
202 weirdary[i] = WEIRD_ADDR;
203
204 if (bootverbose)
205 printf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024);
206}
207
208/*
bba6a44d 209 * Initialize a malloc type tracking structure.
a108bf71
MD
210 */
211void
212malloc_init(void *data)
213{
214 struct malloc_type *type = data;
215 vm_poff_t limsize;
216
217 if (type->ks_magic != M_MAGIC)
218 panic("malloc type lacks magic");
219
220 if (type->ks_limit != 0)
221 return;
222
223 if (vmstats.v_page_count == 0)
224 panic("malloc_init not allowed before vm init");
225
226 limsize = (vm_poff_t)vmstats.v_page_count * PAGE_SIZE;
227 if (limsize > VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)
228 limsize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
229 type->ks_limit = limsize / 10;
230
231 type->ks_next = kmemstatistics;
232 kmemstatistics = type;
233}
234
235void
236malloc_uninit(void *data)
237{
238 struct malloc_type *type = data;
239 struct malloc_type *t;
bba6a44d
MD
240#ifdef INVARIANTS
241 int i;
1d712609 242 long ttl;
bba6a44d 243#endif
a108bf71
MD
244
245 if (type->ks_magic != M_MAGIC)
246 panic("malloc type lacks magic");
247
248 if (vmstats.v_page_count == 0)
249 panic("malloc_uninit not allowed before vm init");
250
251 if (type->ks_limit == 0)
252 panic("malloc_uninit on uninitialized type");
253
254#ifdef INVARIANTS
1d712609
MD
255 /*
256 * memuse is only correct in aggregation. Due to memory being allocated
257 * on one cpu and freed on another individual array entries may be
258 * negative or positive (canceling each other out).
259 */
260 for (i = ttl = 0; i < ncpus; ++i)
261 ttl += type->ks_memuse[i];
262 if (ttl) {
263 printf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n",
264 ttl, type->ks_shortdesc, i);
a108bf71
MD
265 }
266#endif
267 if (type == kmemstatistics) {
268 kmemstatistics = type->ks_next;
269 } else {
270 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
271 if (t->ks_next == type) {
272 t->ks_next = type->ks_next;
273 break;
274 }
275 }
276 }
277 type->ks_next = NULL;
278 type->ks_limit = 0;
279}
280
281/*
282 * Calculate the zone index for the allocation request size and set the
283 * allocation request size to that particular zone's chunk size.
284 */
285static __inline int
286zoneindex(unsigned long *bytes)
287{
288 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */
289 if (n < 128) {
290 *bytes = n = (n + 7) & ~7;
291 return(n / 8 - 1); /* 8 byte chunks, 16 zones */
292 }
293 if (n < 256) {
294 *bytes = n = (n + 15) & ~15;
295 return(n / 16 + 7);
296 }
297 if (n < 8192) {
298 if (n < 512) {
299 *bytes = n = (n + 31) & ~31;
300 return(n / 32 + 15);
301 }
302 if (n < 1024) {
303 *bytes = n = (n + 63) & ~63;
304 return(n / 64 + 23);
305 }
306 if (n < 2048) {
307 *bytes = n = (n + 127) & ~127;
308 return(n / 128 + 31);
309 }
310 if (n < 4096) {
311 *bytes = n = (n + 255) & ~255;
312 return(n / 256 + 39);
313 }
314 *bytes = n = (n + 511) & ~511;
315 return(n / 512 + 47);
316 }
317#if ZALLOC_ZONE_LIMIT > 8192
318 if (n < 16384) {
319 *bytes = n = (n + 1023) & ~1023;
320 return(n / 1024 + 55);
321 }
322#endif
323#if ZALLOC_ZONE_LIMIT > 16384
324 if (n < 32768) {
325 *bytes = n = (n + 2047) & ~2047;
326 return(n / 2048 + 63);
327 }
328#endif
329 panic("Unexpected byte count %d", n);
330 return(0);
331}
332
333/*
334 * malloc() (SLAB ALLOCATOR)
335 *
336 * Allocate memory via the slab allocator. If the request is too large,
337 * or if it page-aligned beyond a certain size, we fall back to the
338 * KMEM subsystem. A SLAB tracking descriptor must be specified, use
339 * &SlabMisc if you don't care.
340 *
dc1fd4b3 341 * M_RNOWAIT - return NULL instead of blocking.
a108bf71 342 * M_ZERO - zero the returned memory.
dc1fd4b3
MD
343 * M_USE_RESERVE - allow greater drawdown of the free list
344 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted
a108bf71
MD
345 */
346void *
347malloc(unsigned long size, struct malloc_type *type, int flags)
348{
349 SLZone *z;
350 SLChunk *chunk;
351 SLGlobalData *slgd;
bba6a44d 352 struct globaldata *gd;
a108bf71
MD
353 int zi;
354
bba6a44d
MD
355 gd = mycpu;
356 slgd = &gd->gd_slab;
a108bf71
MD
357
358 /*
359 * XXX silly to have this in the critical path.
360 */
361 if (type->ks_limit == 0) {
362 crit_enter();
363 if (type->ks_limit == 0)
364 malloc_init(type);
365 crit_exit();
366 }
367 ++type->ks_calls;
368
369 /*
370 * Handle the case where the limit is reached. Panic if can't return
371 * NULL. XXX the original malloc code looped, but this tended to
372 * simply deadlock the computer.
373 */
bba6a44d
MD
374 while (type->ks_loosememuse >= type->ks_limit) {
375 int i;
376 long ttl;
377
378 for (i = ttl = 0; i < ncpus; ++i)
379 ttl += type->ks_memuse[i];
380 type->ks_loosememuse = ttl;
381 if (ttl >= type->ks_limit) {
dc1fd4b3 382 if (flags & (M_RNOWAIT|M_NULLOK))
bba6a44d
MD
383 return(NULL);
384 panic("%s: malloc limit exceeded", type->ks_shortdesc);
385 }
a108bf71
MD
386 }
387
388 /*
389 * Handle the degenerate size == 0 case. Yes, this does happen.
390 * Return a special pointer. This is to maintain compatibility with
391 * the original malloc implementation. Certain devices, such as the
392 * adaptec driver, not only allocate 0 bytes, they check for NULL and
393 * also realloc() later on. Joy.
394 */
395 if (size == 0)
396 return(ZERO_LENGTH_PTR);
397
a7cf0021
MD
398 /*
399 * Handle hysteresis from prior frees here in malloc(). We cannot
400 * safely manipulate the kernel_map in free() due to free() possibly
401 * being called via an IPI message or from sensitive interrupt code.
402 */
dc1fd4b3 403 while (slgd->NFreeZones > ZONE_RELS_THRESH && (flags & M_RNOWAIT) == 0) {
46a3f46d
MD
404 crit_enter();
405 if (slgd->NFreeZones > ZONE_RELS_THRESH) { /* crit sect race */
406 z = slgd->FreeZones;
407 slgd->FreeZones = z->z_Next;
408 --slgd->NFreeZones;
409 kmem_slab_free(z, ZoneSize); /* may block */
410 }
411 crit_exit();
412 }
413 /*
414 * XXX handle oversized frees that were queued from free().
415 */
dc1fd4b3 416 while (slgd->FreeOvZones && (flags & M_RNOWAIT) == 0) {
46a3f46d
MD
417 crit_enter();
418 if ((z = slgd->FreeOvZones) != NULL) {
419 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC);
420 slgd->FreeOvZones = z->z_Next;
421 kmem_slab_free(z, z->z_ChunkSize); /* may block */
422 }
423 crit_exit();
a7cf0021
MD
424 }
425
a108bf71
MD
426 /*
427 * Handle large allocations directly. There should not be very many of
428 * these so performance is not a big issue.
429 *
430 * Guarentee page alignment for allocations in multiples of PAGE_SIZE
431 */
46a3f46d 432 if (size >= ZoneLimit || (size & PAGE_MASK) == 0) {
a108bf71
MD
433 struct kmemusage *kup;
434
435 size = round_page(size);
436 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags);
437 if (chunk == NULL)
438 return(NULL);
439 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */
8f1d5415 440 flags |= M_PASSIVE_ZERO;
a108bf71
MD
441 kup = btokup(chunk);
442 kup->ku_pagecnt = size / PAGE_SIZE;
bba6a44d 443 kup->ku_cpu = gd->gd_cpuid;
a108bf71
MD
444 crit_enter();
445 goto done;
446 }
447
448 /*
449 * Attempt to allocate out of an existing zone. First try the free list,
450 * then allocate out of unallocated space. If we find a good zone move
451 * it to the head of the list so later allocations find it quickly
452 * (we might have thousands of zones in the list).
453 *
454 * Note: zoneindex() will panic of size is too large.
455 */
456 zi = zoneindex(&size);
457 KKASSERT(zi < NZONES);
458 crit_enter();
459 if ((z = slgd->ZoneAry[zi]) != NULL) {
460 KKASSERT(z->z_NFree > 0);
461
462 /*
463 * Remove us from the ZoneAry[] when we become empty
464 */
465 if (--z->z_NFree == 0) {
466 slgd->ZoneAry[zi] = z->z_Next;
467 z->z_Next = NULL;
468 }
469
470 /*
471 * Locate a chunk in a free page. This attempts to localize
472 * reallocations into earlier pages without us having to sort
473 * the chunk list. A chunk may still overlap a page boundary.
474 */
475 while (z->z_FirstFreePg < ZonePageCount) {
476 if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) {
477#ifdef DIAGNOSTIC
478 /*
479 * Diagnostic: c_Next is not total garbage.
480 */
481 KKASSERT(chunk->c_Next == NULL ||
482 ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) ==
483 ((intptr_t)chunk & IN_SAME_PAGE_MASK));
484#endif
6ab8e1da
MD
485#ifdef INVARIANTS
486 if ((uintptr_t)chunk < VM_MIN_KERNEL_ADDRESS)
a108bf71 487 panic("chunk %p FFPG %d/%d", chunk, z->z_FirstFreePg, ZonePageCount);
6ab8e1da 488 if (chunk->c_Next && (uintptr_t)chunk->c_Next < VM_MIN_KERNEL_ADDRESS)
a108bf71 489 panic("chunkNEXT %p %p FFPG %d/%d", chunk, chunk->c_Next, z->z_FirstFreePg, ZonePageCount);
6ab8e1da 490#endif
a108bf71
MD
491 z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next;
492 goto done;
493 }
494 ++z->z_FirstFreePg;
495 }
496
497 /*
1c5ca4f3
MD
498 * No chunks are available but NFree said we had some memory, so
499 * it must be available in the never-before-used-memory area
500 * governed by UIndex. The consequences are very serious if our zone
501 * got corrupted so we use an explicit panic rather then a KASSERT.
a108bf71 502 */
1c5ca4f3
MD
503 if (z->z_UIndex + 1 != z->z_NMax)
504 z->z_UIndex = z->z_UIndex + 1;
505 else
506 z->z_UIndex = 0;
507 if (z->z_UIndex == z->z_UEndIndex)
508 panic("slaballoc: corrupted zone");
509 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
8f1d5415 510 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
6ab8e1da 511 flags &= ~M_ZERO;
8f1d5415
MD
512 flags |= M_PASSIVE_ZERO;
513 }
a108bf71
MD
514 goto done;
515 }
516
517 /*
518 * If all zones are exhausted we need to allocate a new zone for this
519 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see
6ab8e1da
MD
520 * UAlloc use above in regards to M_ZERO. Note that when we are reusing
521 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and
522 * we do not pre-zero it because we do not want to mess up the L1 cache.
a108bf71
MD
523 *
524 * At least one subsystem, the tty code (see CROUND) expects power-of-2
525 * allocations to be power-of-2 aligned. We maintain compatibility by
526 * adjusting the base offset below.
527 */
528 {
529 int off;
530
531 if ((z = slgd->FreeZones) != NULL) {
532 slgd->FreeZones = z->z_Next;
533 --slgd->NFreeZones;
534 bzero(z, sizeof(SLZone));
6ab8e1da 535 z->z_Flags |= SLZF_UNOTZEROD;
a108bf71
MD
536 } else {
537 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO);
538 if (z == NULL)
539 goto fail;
540 }
541
542 /*
543 * Guarentee power-of-2 alignment for power-of-2-sized chunks.
544 * Otherwise just 8-byte align the data.
545 */
546 if ((size | (size - 1)) + 1 == (size << 1))
547 off = (sizeof(SLZone) + size - 1) & ~(size - 1);
548 else
549 off = (sizeof(SLZone) + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK;
550 z->z_Magic = ZALLOC_SLAB_MAGIC;
551 z->z_ZoneIndex = zi;
552 z->z_NMax = (ZoneSize - off) / size;
553 z->z_NFree = z->z_NMax - 1;
1c5ca4f3
MD
554 z->z_BasePtr = (char *)z + off;
555 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax;
a108bf71
MD
556 z->z_ChunkSize = size;
557 z->z_FirstFreePg = ZonePageCount;
2db3b277 558 z->z_CpuGd = gd;
bba6a44d 559 z->z_Cpu = gd->gd_cpuid;
1c5ca4f3 560 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
a108bf71
MD
561 z->z_Next = slgd->ZoneAry[zi];
562 slgd->ZoneAry[zi] = z;
8f1d5415 563 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
6ab8e1da 564 flags &= ~M_ZERO; /* already zero'd */
8f1d5415
MD
565 flags |= M_PASSIVE_ZERO;
566 }
1c5ca4f3
MD
567
568 /*
569 * Slide the base index for initial allocations out of the next
570 * zone we create so we do not over-weight the lower part of the
571 * cpu memory caches.
572 */
573 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
574 & (ZALLOC_MAX_ZONE_SIZE - 1);
a108bf71
MD
575 }
576done:
bba6a44d
MD
577 ++type->ks_inuse[gd->gd_cpuid];
578 type->ks_memuse[gd->gd_cpuid] += size;
579 type->ks_loosememuse += size;
a108bf71
MD
580 crit_exit();
581 if (flags & M_ZERO)
582 bzero(chunk, size);
bba6a44d 583#ifdef INVARIANTS
8f1d5415 584 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0)
bba6a44d
MD
585 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */
586#endif
a108bf71
MD
587 return(chunk);
588fail:
589 crit_exit();
590 return(NULL);
591}
592
593void *
594realloc(void *ptr, unsigned long size, struct malloc_type *type, int flags)
595{
596 SLZone *z;
597 void *nptr;
598 unsigned long osize;
599
eb7f3e3c
MD
600 KKASSERT((flags & M_ZERO) == 0); /* not supported */
601
a108bf71
MD
602 if (ptr == NULL || ptr == ZERO_LENGTH_PTR)
603 return(malloc(size, type, flags));
604 if (size == 0) {
605 free(ptr, type);
606 return(NULL);
607 }
608
609 /*
610 * Handle oversized allocations. XXX we really should require that a
611 * size be passed to free() instead of this nonsense.
612 */
613 {
614 struct kmemusage *kup;
615
616 kup = btokup(ptr);
617 if (kup->ku_pagecnt) {
618 osize = kup->ku_pagecnt << PAGE_SHIFT;
619 if (osize == round_page(size))
620 return(ptr);
621 if ((nptr = malloc(size, type, flags)) == NULL)
622 return(NULL);
623 bcopy(ptr, nptr, min(size, osize));
624 free(ptr, type);
625 return(nptr);
626 }
627 }
628
629 /*
630 * Get the original allocation's zone. If the new request winds up
631 * using the same chunk size we do not have to do anything.
632 */
633 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
634 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
635
636 zoneindex(&size);
637 if (z->z_ChunkSize == size)
638 return(ptr);
639
640 /*
641 * Allocate memory for the new request size. Note that zoneindex has
642 * already adjusted the request size to the appropriate chunk size, which
643 * should optimize our bcopy(). Then copy and return the new pointer.
644 */
645 if ((nptr = malloc(size, type, flags)) == NULL)
646 return(NULL);
647 bcopy(ptr, nptr, min(size, z->z_ChunkSize));
648 free(ptr, type);
649 return(nptr);
650}
651
1ac06773
MD
652char *
653strdup(const char *str, struct malloc_type *type)
654{
655 int zlen; /* length inclusive of terminating NUL */
656 char *nstr;
657
658 if (str == NULL)
659 return(NULL);
660 zlen = strlen(str) + 1;
661 nstr = malloc(zlen, type, M_WAITOK);
662 bcopy(str, nstr, zlen);
663 return(nstr);
664}
665
1d712609 666#ifdef SMP
a108bf71
MD
667/*
668 * free() (SLAB ALLOCATOR)
669 *
bba6a44d 670 * Free the specified chunk of memory.
a108bf71
MD
671 */
672static
673void
674free_remote(void *ptr)
675{
676 free(ptr, *(struct malloc_type **)ptr);
677}
678
1d712609
MD
679#endif
680
a108bf71
MD
681void
682free(void *ptr, struct malloc_type *type)
683{
684 SLZone *z;
685 SLChunk *chunk;
686 SLGlobalData *slgd;
bba6a44d 687 struct globaldata *gd;
a108bf71
MD
688 int pgno;
689
bba6a44d
MD
690 gd = mycpu;
691 slgd = &gd->gd_slab;
a108bf71 692
d39911d9
JS
693 if (ptr == NULL)
694 panic("trying to free NULL pointer");
695
a108bf71
MD
696 /*
697 * Handle special 0-byte allocations
698 */
699 if (ptr == ZERO_LENGTH_PTR)
700 return;
701
702 /*
703 * Handle oversized allocations. XXX we really should require that a
704 * size be passed to free() instead of this nonsense.
bba6a44d
MD
705 *
706 * This code is never called via an ipi.
a108bf71
MD
707 */
708 {
709 struct kmemusage *kup;
710 unsigned long size;
711
712 kup = btokup(ptr);
713 if (kup->ku_pagecnt) {
714 size = kup->ku_pagecnt << PAGE_SHIFT;
715 kup->ku_pagecnt = 0;
a108bf71
MD
716#ifdef INVARIANTS
717 KKASSERT(sizeof(weirdary) <= size);
718 bcopy(weirdary, ptr, sizeof(weirdary));
719#endif
bba6a44d
MD
720 /*
721 * note: we always adjust our cpu's slot, not the originating
722 * cpu (kup->ku_cpuid). The statistics are in aggregate.
81f5fc99
MD
723 *
724 * note: XXX we have still inherited the interrupts-can't-block
725 * assumption. An interrupt thread does not bump
726 * gd_intr_nesting_level so check TDF_INTTHREAD. This is
727 * primarily until we can fix softupdate's assumptions about free().
bba6a44d
MD
728 */
729 crit_enter();
730 --type->ks_inuse[gd->gd_cpuid];
731 type->ks_memuse[gd->gd_cpuid] -= size;
81f5fc99 732 if (mycpu->gd_intr_nesting_level || (gd->gd_curthread->td_flags & TDF_INTTHREAD)) {
46a3f46d
MD
733 z = (SLZone *)ptr;
734 z->z_Magic = ZALLOC_OVSZ_MAGIC;
735 z->z_Next = slgd->FreeOvZones;
736 z->z_ChunkSize = size;
737 slgd->FreeOvZones = z;
738 crit_exit();
739 } else {
bba6a44d 740 crit_exit();
46a3f46d
MD
741 kmem_slab_free(ptr, size); /* may block */
742 }
a108bf71
MD
743 return;
744 }
745 }
746
747 /*
748 * Zone case. Figure out the zone based on the fact that it is
749 * ZoneSize aligned.
750 */
751 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
752 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
753
754 /*
755 * If we do not own the zone then forward the request to the
6812be85 756 * cpu that does.
a108bf71 757 */
2db3b277 758 if (z->z_CpuGd != gd) {
a108bf71 759 *(struct malloc_type **)ptr = type;
75c7ffea 760#ifdef SMP
2db3b277 761 lwkt_send_ipiq(z->z_CpuGd, free_remote, ptr);
75c7ffea
MD
762#else
763 panic("Corrupt SLZone");
764#endif
a108bf71
MD
765 return;
766 }
767
768 if (type->ks_magic != M_MAGIC)
769 panic("free: malloc type lacks magic");
770
771 crit_enter();
772 pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT;
773 chunk = ptr;
774
bba6a44d 775#ifdef INVARIANTS
a108bf71 776 /*
bba6a44d
MD
777 * Attempt to detect a double-free. To reduce overhead we only check
778 * if there appears to be link pointer at the base of the data.
a108bf71
MD
779 */
780 if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) {
781 SLChunk *scan;
782 for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) {
783 if (scan == chunk)
784 panic("Double free at %p", chunk);
785 }
786 }
787#endif
788
789 /*
790 * Put weird data into the memory to detect modifications after freeing,
791 * illegal pointer use after freeing (we should fault on the odd address),
792 * and so forth. XXX needs more work, see the old malloc code.
793 */
794#ifdef INVARIANTS
795 if (z->z_ChunkSize < sizeof(weirdary))
796 bcopy(weirdary, chunk, z->z_ChunkSize);
797 else
798 bcopy(weirdary, chunk, sizeof(weirdary));
799#endif
800
801 /*
802 * Add this free non-zero'd chunk to a linked list for reuse, adjust
803 * z_FirstFreePg.
804 */
6ab8e1da
MD
805#ifdef INVARIANTS
806 if ((uintptr_t)chunk < VM_MIN_KERNEL_ADDRESS)
fc92d4aa 807 panic("BADFREE %p", chunk);
a108bf71
MD
808#endif
809 chunk->c_Next = z->z_PageAry[pgno];
810 z->z_PageAry[pgno] = chunk;
6ab8e1da
MD
811#ifdef INVARIANTS
812 if (chunk->c_Next && (uintptr_t)chunk->c_Next < VM_MIN_KERNEL_ADDRESS)
a108bf71 813 panic("BADFREE2");
6ab8e1da 814#endif
a108bf71
MD
815 if (z->z_FirstFreePg > pgno)
816 z->z_FirstFreePg = pgno;
817
818 /*
819 * Bump the number of free chunks. If it becomes non-zero the zone
820 * must be added back onto the appropriate list.
821 */
822 if (z->z_NFree++ == 0) {
823 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
824 slgd->ZoneAry[z->z_ZoneIndex] = z;
825 }
826
bba6a44d
MD
827 --type->ks_inuse[z->z_Cpu];
828 type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize;
a108bf71
MD
829
830 /*
831 * If the zone becomes totally free, and there are other zones we
a7cf0021
MD
832 * can allocate from, move this zone to the FreeZones list. Since
833 * this code can be called from an IPI callback, do *NOT* try to mess
834 * with kernel_map here. Hysteresis will be performed at malloc() time.
a108bf71
MD
835 */
836 if (z->z_NFree == z->z_NMax &&
837 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z)
838 ) {
839 SLZone **pz;
840
841 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; z != *pz; pz = &(*pz)->z_Next)
842 ;
843 *pz = z->z_Next;
844 z->z_Magic = -1;
a7cf0021
MD
845 z->z_Next = slgd->FreeZones;
846 slgd->FreeZones = z;
847 ++slgd->NFreeZones;
a108bf71
MD
848 }
849 crit_exit();
850}
851
852/*
853 * kmem_slab_alloc()
854 *
855 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the
856 * specified alignment. M_* flags are expected in the flags field.
857 *
858 * Alignment must be a multiple of PAGE_SIZE.
859 *
860 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(),
861 * but when we move zalloc() over to use this function as its backend
862 * we will have to switch to kreserve/krelease and call reserve(0)
863 * after the new space is made available.
dc1fd4b3
MD
864 *
865 * Interrupt code which has preempted other code is not allowed to
c397c465
MD
866 * use PQ_CACHE pages. However, if an interrupt thread is run
867 * non-preemptively or blocks and then runs non-preemptively, then
868 * it is free to use PQ_CACHE pages.
a108bf71
MD
869 */
870static void *
871kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags)
872{
873 vm_size_t i;
874 vm_offset_t addr;
875 vm_offset_t offset;
876 int count;
dc1fd4b3 877 thread_t td;
a108bf71
MD
878 vm_map_t map = kernel_map;
879
880 size = round_page(size);
881 addr = vm_map_min(map);
882
883 /*
884 * Reserve properly aligned space from kernel_map
885 */
886 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
887 crit_enter();
888 vm_map_lock(map);
889 if (vm_map_findspace(map, vm_map_min(map), size, align, &addr)) {
890 vm_map_unlock(map);
dc1fd4b3 891 if ((flags & (M_RNOWAIT|M_NULLOK)) == 0)
a108bf71
MD
892 panic("kmem_slab_alloc(): kernel_map ran out of space!");
893 crit_exit();
894 vm_map_entry_release(count);
895 return(NULL);
896 }
897 offset = addr - VM_MIN_KERNEL_ADDRESS;
898 vm_object_reference(kernel_object);
899 vm_map_insert(map, &count,
900 kernel_object, offset, addr, addr + size,
901 VM_PROT_ALL, VM_PROT_ALL, 0);
902
dc1fd4b3 903 td = curthread;
dc1fd4b3 904
a108bf71
MD
905 /*
906 * Allocate the pages. Do not mess with the PG_ZERO flag yet.
907 */
908 for (i = 0; i < size; i += PAGE_SIZE) {
909 vm_page_t m;
910 vm_pindex_t idx = OFF_TO_IDX(offset + i);
dc1fd4b3
MD
911 int vmflags = 0;
912
913 if (flags & M_ZERO)
914 vmflags |= VM_ALLOC_ZERO;
915 if (flags & M_USE_RESERVE)
916 vmflags |= VM_ALLOC_SYSTEM;
917 if (flags & M_USE_INTERRUPT_RESERVE)
918 vmflags |= VM_ALLOC_INTERRUPT;
919 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0)
fc92d4aa 920 panic("kmem_slab_alloc: bad flags %08x (%p)", flags, ((int **)&size)[-1]);
fe1e98d0
MD
921
922 /*
c397c465
MD
923 * VM_ALLOC_NORMAL can only be set if we are not preempting.
924 *
925 * VM_ALLOC_SYSTEM is automatically set if we are preempting and
926 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is
927 * implied in this case), though I'm sure if we really need to do
928 * that.
fe1e98d0 929 */
c397c465 930 if (flags & M_WAITOK) {
dc1fd4b3 931 if (td->td_preempted) {
fe1e98d0 932 vmflags |= VM_ALLOC_SYSTEM;
dc1fd4b3
MD
933 } else {
934 vmflags |= VM_ALLOC_NORMAL;
dc1fd4b3
MD
935 }
936 }
a108bf71 937
dc1fd4b3
MD
938 m = vm_page_alloc(kernel_object, idx, vmflags);
939
940 /*
941 * If the allocation failed we either return NULL or we retry.
942 *
c397c465
MD
943 * If M_WAITOK is specified we wait for more memory and retry.
944 * If M_WAITOK is specified from a preemption we yield instead of
945 * wait. Livelock will not occur because the interrupt thread
946 * will not be preempting anyone the second time around after the
947 * yield.
dc1fd4b3 948 */
a108bf71 949 if (m == NULL) {
c397c465 950 if (flags & M_WAITOK) {
fe1e98d0 951 if (td->td_preempted) {
dc1fd4b3
MD
952 vm_map_unlock(map);
953 lwkt_yield();
954 vm_map_lock(map);
955 } else {
dc1fd4b3
MD
956 vm_map_unlock(map);
957 vm_wait();
958 vm_map_lock(map);
959 }
a108bf71
MD
960 i -= PAGE_SIZE; /* retry */
961 continue;
962 }
dc1fd4b3
MD
963
964 /*
965 * We were unable to recover, cleanup and return NULL
966 */
a108bf71
MD
967 while (i != 0) {
968 i -= PAGE_SIZE;
969 m = vm_page_lookup(kernel_object, OFF_TO_IDX(offset + i));
970 vm_page_free(m);
971 }
972 vm_map_delete(map, addr, addr + size, &count);
973 vm_map_unlock(map);
974 crit_exit();
975 vm_map_entry_release(count);
976 return(NULL);
977 }
978 }
979
980 /*
dc1fd4b3
MD
981 * Success!
982 *
a108bf71
MD
983 * Mark the map entry as non-pageable using a routine that allows us to
984 * populate the underlying pages.
985 */
986 vm_map_set_wired_quick(map, addr, size, &count);
987 crit_exit();
988
989 /*
990 * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO.
991 */
992 for (i = 0; i < size; i += PAGE_SIZE) {
993 vm_page_t m;
994
995 m = vm_page_lookup(kernel_object, OFF_TO_IDX(offset + i));
996 m->valid = VM_PAGE_BITS_ALL;
997 vm_page_wire(m);
998 vm_page_wakeup(m);
999 pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1);
1000 if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO))
1001 bzero((char *)addr + i, PAGE_SIZE);
1002 vm_page_flag_clear(m, PG_ZERO);
1003 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED);
1004 }
1005 vm_map_unlock(map);
1006 vm_map_entry_release(count);
1007 return((void *)addr);
1008}
1009
1010static void
1011kmem_slab_free(void *ptr, vm_size_t size)
1012{
1013 crit_enter();
1014 vm_map_remove(kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size);
1015 crit_exit();
1016}
1017