Properly propagate the FIN flag from the following to-be-coalesced segment.
[dragonfly.git] / sys / kern / kern_slaballoc.c
CommitLineData
a108bf71
MD
1/*
2 * KERN_SLABALLOC.C - Kernel SLAB memory allocator
8c10bfcf
MD
3 *
4 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
5 *
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com>
8 *
a108bf71
MD
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
8c10bfcf 12 *
a108bf71
MD
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
8c10bfcf
MD
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
a108bf71 34 * SUCH DAMAGE.
8c10bfcf 35 *
1ac06773 36 * $DragonFly: src/sys/kern/kern_slaballoc.c,v 1.24 2004/07/29 08:50:09 dillon Exp $
a108bf71
MD
37 *
38 * This module implements a slab allocator drop-in replacement for the
39 * kernel malloc().
40 *
41 * A slab allocator reserves a ZONE for each chunk size, then lays the
42 * chunks out in an array within the zone. Allocation and deallocation
43 * is nearly instantanious, and fragmentation/overhead losses are limited
44 * to a fixed worst-case amount.
45 *
46 * The downside of this slab implementation is in the chunk size
47 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu.
48 * In a kernel implementation all this memory will be physical so
49 * the zone size is adjusted downward on machines with less physical
50 * memory. The upside is that overhead is bounded... this is the *worst*
51 * case overhead.
52 *
53 * Slab management is done on a per-cpu basis and no locking or mutexes
54 * are required, only a critical section. When one cpu frees memory
55 * belonging to another cpu's slab manager an asynchronous IPI message
56 * will be queued to execute the operation. In addition, both the
57 * high level slab allocator and the low level zone allocator optimize
58 * M_ZERO requests, and the slab allocator does not have to pre initialize
59 * the linked list of chunks.
60 *
61 * XXX Balancing is needed between cpus. Balance will be handled through
62 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
63 *
64 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
65 * the new zone should be restricted to M_USE_RESERVE requests only.
66 *
67 * Alloc Size Chunking Number of zones
68 * 0-127 8 16
69 * 128-255 16 8
70 * 256-511 32 8
71 * 512-1023 64 8
72 * 1024-2047 128 8
73 * 2048-4095 256 8
74 * 4096-8191 512 8
75 * 8192-16383 1024 8
76 * 16384-32767 2048 8
77 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383)
78 *
46a3f46d 79 * Allocations >= ZoneLimit go directly to kmem.
a108bf71
MD
80 *
81 * API REQUIREMENTS AND SIDE EFFECTS
82 *
83 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
84 * have remained compatible with the following API requirements:
85 *
86 * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty)
3d177b31 87 * + all power-of-2 sized allocations are power-of-2 aligned (twe)
a108bf71
MD
88 * + malloc(0) is allowed and returns non-NULL (ahc driver)
89 * + ability to allocate arbitrarily large chunks of memory
90 */
91
92#include "opt_vm.h"
93
a108bf71
MD
94#include <sys/param.h>
95#include <sys/systm.h>
96#include <sys/kernel.h>
97#include <sys/slaballoc.h>
98#include <sys/mbuf.h>
99#include <sys/vmmeter.h>
100#include <sys/lock.h>
101#include <sys/thread.h>
102#include <sys/globaldata.h>
103
104#include <vm/vm.h>
105#include <vm/vm_param.h>
106#include <vm/vm_kern.h>
107#include <vm/vm_extern.h>
108#include <vm/vm_object.h>
109#include <vm/pmap.h>
110#include <vm/vm_map.h>
111#include <vm/vm_page.h>
112#include <vm/vm_pageout.h>
113
114#include <machine/cpu.h>
115
116#include <sys/thread2.h>
117
118#define arysize(ary) (sizeof(ary)/sizeof((ary)[0]))
119
120/*
121 * Fixed globals (not per-cpu)
122 */
123static int ZoneSize;
46a3f46d 124static int ZoneLimit;
a108bf71
MD
125static int ZonePageCount;
126static int ZonePageLimit;
127static int ZoneMask;
128static struct malloc_type *kmemstatistics;
129static struct kmemusage *kmemusage;
130static int32_t weirdary[16];
131
132static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags);
133static void kmem_slab_free(void *ptr, vm_size_t bytes);
134
135/*
136 * Misc constants. Note that allocations that are exact multiples of
137 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
138 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
139 */
140#define MIN_CHUNK_SIZE 8 /* in bytes */
141#define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1)
142#define ZONE_RELS_THRESH 2 /* threshold number of zones */
143#define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK)
144
145/*
146 * The WEIRD_ADDR is used as known text to copy into free objects to
147 * try to create deterministic failure cases if the data is accessed after
148 * free.
149 */
150#define WEIRD_ADDR 0xdeadc0de
151#define MAX_COPY sizeof(weirdary)
152#define ZERO_LENGTH_PTR ((void *)-8)
153
154/*
155 * Misc global malloc buckets
156 */
157
158MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
159MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
160MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
161
162MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
163MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
164
165/*
166 * Initialize the slab memory allocator. We have to choose a zone size based
167 * on available physical memory. We choose a zone side which is approximately
168 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of
169 * 128K. The zone size is limited to the bounds set in slaballoc.h
170 * (typically 32K min, 128K max).
171 */
172static void kmeminit(void *dummy);
173
174SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL)
175
176static void
177kmeminit(void *dummy)
178{
179 vm_poff_t limsize;
180 int usesize;
181 int i;
182 vm_pindex_t npg;
183
184 limsize = (vm_poff_t)vmstats.v_page_count * PAGE_SIZE;
185 if (limsize > VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)
186 limsize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
187
188 usesize = (int)(limsize / 1024); /* convert to KB */
189
190 ZoneSize = ZALLOC_MIN_ZONE_SIZE;
191 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize)
192 ZoneSize <<= 1;
46a3f46d
MD
193 ZoneLimit = ZoneSize / 4;
194 if (ZoneLimit > ZALLOC_ZONE_LIMIT)
195 ZoneLimit = ZALLOC_ZONE_LIMIT;
a108bf71
MD
196 ZoneMask = ZoneSize - 1;
197 ZonePageLimit = PAGE_SIZE * 4;
198 ZonePageCount = ZoneSize / PAGE_SIZE;
199
200 npg = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE;
dc1fd4b3 201 kmemusage = kmem_slab_alloc(npg * sizeof(struct kmemusage), PAGE_SIZE, M_WAITOK|M_ZERO);
a108bf71
MD
202
203 for (i = 0; i < arysize(weirdary); ++i)
204 weirdary[i] = WEIRD_ADDR;
205
206 if (bootverbose)
207 printf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024);
208}
209
210/*
bba6a44d 211 * Initialize a malloc type tracking structure.
a108bf71
MD
212 */
213void
214malloc_init(void *data)
215{
216 struct malloc_type *type = data;
217 vm_poff_t limsize;
218
219 if (type->ks_magic != M_MAGIC)
220 panic("malloc type lacks magic");
221
222 if (type->ks_limit != 0)
223 return;
224
225 if (vmstats.v_page_count == 0)
226 panic("malloc_init not allowed before vm init");
227
228 limsize = (vm_poff_t)vmstats.v_page_count * PAGE_SIZE;
229 if (limsize > VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)
230 limsize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
231 type->ks_limit = limsize / 10;
232
233 type->ks_next = kmemstatistics;
234 kmemstatistics = type;
235}
236
237void
238malloc_uninit(void *data)
239{
240 struct malloc_type *type = data;
241 struct malloc_type *t;
bba6a44d
MD
242#ifdef INVARIANTS
243 int i;
1d712609 244 long ttl;
bba6a44d 245#endif
a108bf71
MD
246
247 if (type->ks_magic != M_MAGIC)
248 panic("malloc type lacks magic");
249
250 if (vmstats.v_page_count == 0)
251 panic("malloc_uninit not allowed before vm init");
252
253 if (type->ks_limit == 0)
254 panic("malloc_uninit on uninitialized type");
255
256#ifdef INVARIANTS
1d712609
MD
257 /*
258 * memuse is only correct in aggregation. Due to memory being allocated
259 * on one cpu and freed on another individual array entries may be
260 * negative or positive (canceling each other out).
261 */
262 for (i = ttl = 0; i < ncpus; ++i)
263 ttl += type->ks_memuse[i];
264 if (ttl) {
265 printf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n",
266 ttl, type->ks_shortdesc, i);
a108bf71
MD
267 }
268#endif
269 if (type == kmemstatistics) {
270 kmemstatistics = type->ks_next;
271 } else {
272 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
273 if (t->ks_next == type) {
274 t->ks_next = type->ks_next;
275 break;
276 }
277 }
278 }
279 type->ks_next = NULL;
280 type->ks_limit = 0;
281}
282
283/*
284 * Calculate the zone index for the allocation request size and set the
285 * allocation request size to that particular zone's chunk size.
286 */
287static __inline int
288zoneindex(unsigned long *bytes)
289{
290 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */
291 if (n < 128) {
292 *bytes = n = (n + 7) & ~7;
293 return(n / 8 - 1); /* 8 byte chunks, 16 zones */
294 }
295 if (n < 256) {
296 *bytes = n = (n + 15) & ~15;
297 return(n / 16 + 7);
298 }
299 if (n < 8192) {
300 if (n < 512) {
301 *bytes = n = (n + 31) & ~31;
302 return(n / 32 + 15);
303 }
304 if (n < 1024) {
305 *bytes = n = (n + 63) & ~63;
306 return(n / 64 + 23);
307 }
308 if (n < 2048) {
309 *bytes = n = (n + 127) & ~127;
310 return(n / 128 + 31);
311 }
312 if (n < 4096) {
313 *bytes = n = (n + 255) & ~255;
314 return(n / 256 + 39);
315 }
316 *bytes = n = (n + 511) & ~511;
317 return(n / 512 + 47);
318 }
319#if ZALLOC_ZONE_LIMIT > 8192
320 if (n < 16384) {
321 *bytes = n = (n + 1023) & ~1023;
322 return(n / 1024 + 55);
323 }
324#endif
325#if ZALLOC_ZONE_LIMIT > 16384
326 if (n < 32768) {
327 *bytes = n = (n + 2047) & ~2047;
328 return(n / 2048 + 63);
329 }
330#endif
331 panic("Unexpected byte count %d", n);
332 return(0);
333}
334
335/*
336 * malloc() (SLAB ALLOCATOR)
337 *
338 * Allocate memory via the slab allocator. If the request is too large,
339 * or if it page-aligned beyond a certain size, we fall back to the
340 * KMEM subsystem. A SLAB tracking descriptor must be specified, use
341 * &SlabMisc if you don't care.
342 *
dc1fd4b3 343 * M_RNOWAIT - return NULL instead of blocking.
a108bf71 344 * M_ZERO - zero the returned memory.
dc1fd4b3
MD
345 * M_USE_RESERVE - allow greater drawdown of the free list
346 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted
347 *
348 * M_FAILSAFE - Failsafe allocation, when the allocation must
349 * succeed attemp to get out of any preemption context
350 * and allocate from the cache, else block (even though
351 * we might be blocking from an interrupt), or panic.
a108bf71
MD
352 */
353void *
354malloc(unsigned long size, struct malloc_type *type, int flags)
355{
356 SLZone *z;
357 SLChunk *chunk;
358 SLGlobalData *slgd;
bba6a44d 359 struct globaldata *gd;
a108bf71
MD
360 int zi;
361
bba6a44d
MD
362 gd = mycpu;
363 slgd = &gd->gd_slab;
a108bf71
MD
364
365 /*
366 * XXX silly to have this in the critical path.
367 */
368 if (type->ks_limit == 0) {
369 crit_enter();
370 if (type->ks_limit == 0)
371 malloc_init(type);
372 crit_exit();
373 }
374 ++type->ks_calls;
375
376 /*
377 * Handle the case where the limit is reached. Panic if can't return
378 * NULL. XXX the original malloc code looped, but this tended to
379 * simply deadlock the computer.
380 */
bba6a44d
MD
381 while (type->ks_loosememuse >= type->ks_limit) {
382 int i;
383 long ttl;
384
385 for (i = ttl = 0; i < ncpus; ++i)
386 ttl += type->ks_memuse[i];
387 type->ks_loosememuse = ttl;
388 if (ttl >= type->ks_limit) {
dc1fd4b3 389 if (flags & (M_RNOWAIT|M_NULLOK))
bba6a44d
MD
390 return(NULL);
391 panic("%s: malloc limit exceeded", type->ks_shortdesc);
392 }
a108bf71
MD
393 }
394
395 /*
396 * Handle the degenerate size == 0 case. Yes, this does happen.
397 * Return a special pointer. This is to maintain compatibility with
398 * the original malloc implementation. Certain devices, such as the
399 * adaptec driver, not only allocate 0 bytes, they check for NULL and
400 * also realloc() later on. Joy.
401 */
402 if (size == 0)
403 return(ZERO_LENGTH_PTR);
404
a7cf0021
MD
405 /*
406 * Handle hysteresis from prior frees here in malloc(). We cannot
407 * safely manipulate the kernel_map in free() due to free() possibly
408 * being called via an IPI message or from sensitive interrupt code.
409 */
dc1fd4b3 410 while (slgd->NFreeZones > ZONE_RELS_THRESH && (flags & M_RNOWAIT) == 0) {
46a3f46d
MD
411 crit_enter();
412 if (slgd->NFreeZones > ZONE_RELS_THRESH) { /* crit sect race */
413 z = slgd->FreeZones;
414 slgd->FreeZones = z->z_Next;
415 --slgd->NFreeZones;
416 kmem_slab_free(z, ZoneSize); /* may block */
417 }
418 crit_exit();
419 }
420 /*
421 * XXX handle oversized frees that were queued from free().
422 */
dc1fd4b3 423 while (slgd->FreeOvZones && (flags & M_RNOWAIT) == 0) {
46a3f46d
MD
424 crit_enter();
425 if ((z = slgd->FreeOvZones) != NULL) {
426 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC);
427 slgd->FreeOvZones = z->z_Next;
428 kmem_slab_free(z, z->z_ChunkSize); /* may block */
429 }
430 crit_exit();
a7cf0021
MD
431 }
432
a108bf71
MD
433 /*
434 * Handle large allocations directly. There should not be very many of
435 * these so performance is not a big issue.
436 *
437 * Guarentee page alignment for allocations in multiples of PAGE_SIZE
438 */
46a3f46d 439 if (size >= ZoneLimit || (size & PAGE_MASK) == 0) {
a108bf71
MD
440 struct kmemusage *kup;
441
442 size = round_page(size);
443 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags);
444 if (chunk == NULL)
445 return(NULL);
446 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */
8f1d5415 447 flags |= M_PASSIVE_ZERO;
a108bf71
MD
448 kup = btokup(chunk);
449 kup->ku_pagecnt = size / PAGE_SIZE;
bba6a44d 450 kup->ku_cpu = gd->gd_cpuid;
a108bf71
MD
451 crit_enter();
452 goto done;
453 }
454
455 /*
456 * Attempt to allocate out of an existing zone. First try the free list,
457 * then allocate out of unallocated space. If we find a good zone move
458 * it to the head of the list so later allocations find it quickly
459 * (we might have thousands of zones in the list).
460 *
461 * Note: zoneindex() will panic of size is too large.
462 */
463 zi = zoneindex(&size);
464 KKASSERT(zi < NZONES);
465 crit_enter();
466 if ((z = slgd->ZoneAry[zi]) != NULL) {
467 KKASSERT(z->z_NFree > 0);
468
469 /*
470 * Remove us from the ZoneAry[] when we become empty
471 */
472 if (--z->z_NFree == 0) {
473 slgd->ZoneAry[zi] = z->z_Next;
474 z->z_Next = NULL;
475 }
476
477 /*
478 * Locate a chunk in a free page. This attempts to localize
479 * reallocations into earlier pages without us having to sort
480 * the chunk list. A chunk may still overlap a page boundary.
481 */
482 while (z->z_FirstFreePg < ZonePageCount) {
483 if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) {
484#ifdef DIAGNOSTIC
485 /*
486 * Diagnostic: c_Next is not total garbage.
487 */
488 KKASSERT(chunk->c_Next == NULL ||
489 ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) ==
490 ((intptr_t)chunk & IN_SAME_PAGE_MASK));
491#endif
6ab8e1da
MD
492#ifdef INVARIANTS
493 if ((uintptr_t)chunk < VM_MIN_KERNEL_ADDRESS)
a108bf71 494 panic("chunk %p FFPG %d/%d", chunk, z->z_FirstFreePg, ZonePageCount);
6ab8e1da 495 if (chunk->c_Next && (uintptr_t)chunk->c_Next < VM_MIN_KERNEL_ADDRESS)
a108bf71 496 panic("chunkNEXT %p %p FFPG %d/%d", chunk, chunk->c_Next, z->z_FirstFreePg, ZonePageCount);
6ab8e1da 497#endif
a108bf71
MD
498 z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next;
499 goto done;
500 }
501 ++z->z_FirstFreePg;
502 }
503
504 /*
1c5ca4f3
MD
505 * No chunks are available but NFree said we had some memory, so
506 * it must be available in the never-before-used-memory area
507 * governed by UIndex. The consequences are very serious if our zone
508 * got corrupted so we use an explicit panic rather then a KASSERT.
a108bf71 509 */
1c5ca4f3
MD
510 if (z->z_UIndex + 1 != z->z_NMax)
511 z->z_UIndex = z->z_UIndex + 1;
512 else
513 z->z_UIndex = 0;
514 if (z->z_UIndex == z->z_UEndIndex)
515 panic("slaballoc: corrupted zone");
516 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
8f1d5415 517 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
6ab8e1da 518 flags &= ~M_ZERO;
8f1d5415
MD
519 flags |= M_PASSIVE_ZERO;
520 }
a108bf71
MD
521 goto done;
522 }
523
524 /*
525 * If all zones are exhausted we need to allocate a new zone for this
526 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see
6ab8e1da
MD
527 * UAlloc use above in regards to M_ZERO. Note that when we are reusing
528 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and
529 * we do not pre-zero it because we do not want to mess up the L1 cache.
a108bf71
MD
530 *
531 * At least one subsystem, the tty code (see CROUND) expects power-of-2
532 * allocations to be power-of-2 aligned. We maintain compatibility by
533 * adjusting the base offset below.
534 */
535 {
536 int off;
537
538 if ((z = slgd->FreeZones) != NULL) {
539 slgd->FreeZones = z->z_Next;
540 --slgd->NFreeZones;
541 bzero(z, sizeof(SLZone));
6ab8e1da 542 z->z_Flags |= SLZF_UNOTZEROD;
a108bf71
MD
543 } else {
544 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO);
545 if (z == NULL)
546 goto fail;
547 }
548
549 /*
550 * Guarentee power-of-2 alignment for power-of-2-sized chunks.
551 * Otherwise just 8-byte align the data.
552 */
553 if ((size | (size - 1)) + 1 == (size << 1))
554 off = (sizeof(SLZone) + size - 1) & ~(size - 1);
555 else
556 off = (sizeof(SLZone) + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK;
557 z->z_Magic = ZALLOC_SLAB_MAGIC;
558 z->z_ZoneIndex = zi;
559 z->z_NMax = (ZoneSize - off) / size;
560 z->z_NFree = z->z_NMax - 1;
1c5ca4f3
MD
561 z->z_BasePtr = (char *)z + off;
562 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax;
a108bf71
MD
563 z->z_ChunkSize = size;
564 z->z_FirstFreePg = ZonePageCount;
2db3b277 565 z->z_CpuGd = gd;
bba6a44d 566 z->z_Cpu = gd->gd_cpuid;
1c5ca4f3 567 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
a108bf71
MD
568 z->z_Next = slgd->ZoneAry[zi];
569 slgd->ZoneAry[zi] = z;
8f1d5415 570 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
6ab8e1da 571 flags &= ~M_ZERO; /* already zero'd */
8f1d5415
MD
572 flags |= M_PASSIVE_ZERO;
573 }
1c5ca4f3
MD
574
575 /*
576 * Slide the base index for initial allocations out of the next
577 * zone we create so we do not over-weight the lower part of the
578 * cpu memory caches.
579 */
580 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
581 & (ZALLOC_MAX_ZONE_SIZE - 1);
a108bf71
MD
582 }
583done:
bba6a44d
MD
584 ++type->ks_inuse[gd->gd_cpuid];
585 type->ks_memuse[gd->gd_cpuid] += size;
586 type->ks_loosememuse += size;
a108bf71
MD
587 crit_exit();
588 if (flags & M_ZERO)
589 bzero(chunk, size);
bba6a44d 590#ifdef INVARIANTS
8f1d5415 591 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0)
bba6a44d
MD
592 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */
593#endif
a108bf71
MD
594 return(chunk);
595fail:
596 crit_exit();
597 return(NULL);
598}
599
600void *
601realloc(void *ptr, unsigned long size, struct malloc_type *type, int flags)
602{
603 SLZone *z;
604 void *nptr;
605 unsigned long osize;
606
eb7f3e3c
MD
607 KKASSERT((flags & M_ZERO) == 0); /* not supported */
608
a108bf71
MD
609 if (ptr == NULL || ptr == ZERO_LENGTH_PTR)
610 return(malloc(size, type, flags));
611 if (size == 0) {
612 free(ptr, type);
613 return(NULL);
614 }
615
616 /*
617 * Handle oversized allocations. XXX we really should require that a
618 * size be passed to free() instead of this nonsense.
619 */
620 {
621 struct kmemusage *kup;
622
623 kup = btokup(ptr);
624 if (kup->ku_pagecnt) {
625 osize = kup->ku_pagecnt << PAGE_SHIFT;
626 if (osize == round_page(size))
627 return(ptr);
628 if ((nptr = malloc(size, type, flags)) == NULL)
629 return(NULL);
630 bcopy(ptr, nptr, min(size, osize));
631 free(ptr, type);
632 return(nptr);
633 }
634 }
635
636 /*
637 * Get the original allocation's zone. If the new request winds up
638 * using the same chunk size we do not have to do anything.
639 */
640 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
641 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
642
643 zoneindex(&size);
644 if (z->z_ChunkSize == size)
645 return(ptr);
646
647 /*
648 * Allocate memory for the new request size. Note that zoneindex has
649 * already adjusted the request size to the appropriate chunk size, which
650 * should optimize our bcopy(). Then copy and return the new pointer.
651 */
652 if ((nptr = malloc(size, type, flags)) == NULL)
653 return(NULL);
654 bcopy(ptr, nptr, min(size, z->z_ChunkSize));
655 free(ptr, type);
656 return(nptr);
657}
658
1ac06773
MD
659char *
660strdup(const char *str, struct malloc_type *type)
661{
662 int zlen; /* length inclusive of terminating NUL */
663 char *nstr;
664
665 if (str == NULL)
666 return(NULL);
667 zlen = strlen(str) + 1;
668 nstr = malloc(zlen, type, M_WAITOK);
669 bcopy(str, nstr, zlen);
670 return(nstr);
671}
672
1d712609 673#ifdef SMP
a108bf71
MD
674/*
675 * free() (SLAB ALLOCATOR)
676 *
bba6a44d 677 * Free the specified chunk of memory.
a108bf71
MD
678 */
679static
680void
681free_remote(void *ptr)
682{
683 free(ptr, *(struct malloc_type **)ptr);
684}
685
1d712609
MD
686#endif
687
a108bf71
MD
688void
689free(void *ptr, struct malloc_type *type)
690{
691 SLZone *z;
692 SLChunk *chunk;
693 SLGlobalData *slgd;
bba6a44d 694 struct globaldata *gd;
a108bf71
MD
695 int pgno;
696
bba6a44d
MD
697 gd = mycpu;
698 slgd = &gd->gd_slab;
a108bf71 699
d39911d9
JS
700 if (ptr == NULL)
701 panic("trying to free NULL pointer");
702
a108bf71
MD
703 /*
704 * Handle special 0-byte allocations
705 */
706 if (ptr == ZERO_LENGTH_PTR)
707 return;
708
709 /*
710 * Handle oversized allocations. XXX we really should require that a
711 * size be passed to free() instead of this nonsense.
bba6a44d
MD
712 *
713 * This code is never called via an ipi.
a108bf71
MD
714 */
715 {
716 struct kmemusage *kup;
717 unsigned long size;
718
719 kup = btokup(ptr);
720 if (kup->ku_pagecnt) {
721 size = kup->ku_pagecnt << PAGE_SHIFT;
722 kup->ku_pagecnt = 0;
a108bf71
MD
723#ifdef INVARIANTS
724 KKASSERT(sizeof(weirdary) <= size);
725 bcopy(weirdary, ptr, sizeof(weirdary));
726#endif
bba6a44d
MD
727 /*
728 * note: we always adjust our cpu's slot, not the originating
729 * cpu (kup->ku_cpuid). The statistics are in aggregate.
81f5fc99
MD
730 *
731 * note: XXX we have still inherited the interrupts-can't-block
732 * assumption. An interrupt thread does not bump
733 * gd_intr_nesting_level so check TDF_INTTHREAD. This is
734 * primarily until we can fix softupdate's assumptions about free().
bba6a44d
MD
735 */
736 crit_enter();
737 --type->ks_inuse[gd->gd_cpuid];
738 type->ks_memuse[gd->gd_cpuid] -= size;
81f5fc99 739 if (mycpu->gd_intr_nesting_level || (gd->gd_curthread->td_flags & TDF_INTTHREAD)) {
46a3f46d
MD
740 z = (SLZone *)ptr;
741 z->z_Magic = ZALLOC_OVSZ_MAGIC;
742 z->z_Next = slgd->FreeOvZones;
743 z->z_ChunkSize = size;
744 slgd->FreeOvZones = z;
745 crit_exit();
746 } else {
bba6a44d 747 crit_exit();
46a3f46d
MD
748 kmem_slab_free(ptr, size); /* may block */
749 }
a108bf71
MD
750 return;
751 }
752 }
753
754 /*
755 * Zone case. Figure out the zone based on the fact that it is
756 * ZoneSize aligned.
757 */
758 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
759 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
760
761 /*
762 * If we do not own the zone then forward the request to the
6812be85 763 * cpu that does.
a108bf71 764 */
2db3b277 765 if (z->z_CpuGd != gd) {
a108bf71 766 *(struct malloc_type **)ptr = type;
75c7ffea 767#ifdef SMP
2db3b277 768 lwkt_send_ipiq(z->z_CpuGd, free_remote, ptr);
75c7ffea
MD
769#else
770 panic("Corrupt SLZone");
771#endif
a108bf71
MD
772 return;
773 }
774
775 if (type->ks_magic != M_MAGIC)
776 panic("free: malloc type lacks magic");
777
778 crit_enter();
779 pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT;
780 chunk = ptr;
781
bba6a44d 782#ifdef INVARIANTS
a108bf71 783 /*
bba6a44d
MD
784 * Attempt to detect a double-free. To reduce overhead we only check
785 * if there appears to be link pointer at the base of the data.
a108bf71
MD
786 */
787 if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) {
788 SLChunk *scan;
789 for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) {
790 if (scan == chunk)
791 panic("Double free at %p", chunk);
792 }
793 }
794#endif
795
796 /*
797 * Put weird data into the memory to detect modifications after freeing,
798 * illegal pointer use after freeing (we should fault on the odd address),
799 * and so forth. XXX needs more work, see the old malloc code.
800 */
801#ifdef INVARIANTS
802 if (z->z_ChunkSize < sizeof(weirdary))
803 bcopy(weirdary, chunk, z->z_ChunkSize);
804 else
805 bcopy(weirdary, chunk, sizeof(weirdary));
806#endif
807
808 /*
809 * Add this free non-zero'd chunk to a linked list for reuse, adjust
810 * z_FirstFreePg.
811 */
6ab8e1da
MD
812#ifdef INVARIANTS
813 if ((uintptr_t)chunk < VM_MIN_KERNEL_ADDRESS)
fc92d4aa 814 panic("BADFREE %p", chunk);
a108bf71
MD
815#endif
816 chunk->c_Next = z->z_PageAry[pgno];
817 z->z_PageAry[pgno] = chunk;
6ab8e1da
MD
818#ifdef INVARIANTS
819 if (chunk->c_Next && (uintptr_t)chunk->c_Next < VM_MIN_KERNEL_ADDRESS)
a108bf71 820 panic("BADFREE2");
6ab8e1da 821#endif
a108bf71
MD
822 if (z->z_FirstFreePg > pgno)
823 z->z_FirstFreePg = pgno;
824
825 /*
826 * Bump the number of free chunks. If it becomes non-zero the zone
827 * must be added back onto the appropriate list.
828 */
829 if (z->z_NFree++ == 0) {
830 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
831 slgd->ZoneAry[z->z_ZoneIndex] = z;
832 }
833
bba6a44d
MD
834 --type->ks_inuse[z->z_Cpu];
835 type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize;
a108bf71
MD
836
837 /*
838 * If the zone becomes totally free, and there are other zones we
a7cf0021
MD
839 * can allocate from, move this zone to the FreeZones list. Since
840 * this code can be called from an IPI callback, do *NOT* try to mess
841 * with kernel_map here. Hysteresis will be performed at malloc() time.
a108bf71
MD
842 */
843 if (z->z_NFree == z->z_NMax &&
844 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z)
845 ) {
846 SLZone **pz;
847
848 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; z != *pz; pz = &(*pz)->z_Next)
849 ;
850 *pz = z->z_Next;
851 z->z_Magic = -1;
a7cf0021
MD
852 z->z_Next = slgd->FreeZones;
853 slgd->FreeZones = z;
854 ++slgd->NFreeZones;
a108bf71
MD
855 }
856 crit_exit();
857}
858
859/*
860 * kmem_slab_alloc()
861 *
862 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the
863 * specified alignment. M_* flags are expected in the flags field.
864 *
865 * Alignment must be a multiple of PAGE_SIZE.
866 *
867 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(),
868 * but when we move zalloc() over to use this function as its backend
869 * we will have to switch to kreserve/krelease and call reserve(0)
870 * after the new space is made available.
dc1fd4b3
MD
871 *
872 * Interrupt code which has preempted other code is not allowed to
873 * message with CACHE pages, but if M_FAILSAFE is set we can do a
874 * yield to become non-preempting and try again inclusive of
875 * cache pages.
a108bf71
MD
876 */
877static void *
878kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags)
879{
880 vm_size_t i;
881 vm_offset_t addr;
882 vm_offset_t offset;
883 int count;
dc1fd4b3 884 thread_t td;
a108bf71
MD
885 vm_map_t map = kernel_map;
886
887 size = round_page(size);
888 addr = vm_map_min(map);
889
890 /*
891 * Reserve properly aligned space from kernel_map
892 */
893 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
894 crit_enter();
895 vm_map_lock(map);
896 if (vm_map_findspace(map, vm_map_min(map), size, align, &addr)) {
897 vm_map_unlock(map);
dc1fd4b3 898 if ((flags & (M_RNOWAIT|M_NULLOK)) == 0)
a108bf71
MD
899 panic("kmem_slab_alloc(): kernel_map ran out of space!");
900 crit_exit();
901 vm_map_entry_release(count);
dc1fd4b3
MD
902 if ((flags & (M_FAILSAFE|M_NULLOK)) == M_FAILSAFE)
903 panic("kmem_slab_alloc(): kernel_map ran out of space!");
a108bf71
MD
904 return(NULL);
905 }
906 offset = addr - VM_MIN_KERNEL_ADDRESS;
907 vm_object_reference(kernel_object);
908 vm_map_insert(map, &count,
909 kernel_object, offset, addr, addr + size,
910 VM_PROT_ALL, VM_PROT_ALL, 0);
911
dc1fd4b3 912 td = curthread;
dc1fd4b3 913
a108bf71
MD
914 /*
915 * Allocate the pages. Do not mess with the PG_ZERO flag yet.
916 */
917 for (i = 0; i < size; i += PAGE_SIZE) {
918 vm_page_t m;
919 vm_pindex_t idx = OFF_TO_IDX(offset + i);
dc1fd4b3
MD
920 int vmflags = 0;
921
922 if (flags & M_ZERO)
923 vmflags |= VM_ALLOC_ZERO;
924 if (flags & M_USE_RESERVE)
925 vmflags |= VM_ALLOC_SYSTEM;
926 if (flags & M_USE_INTERRUPT_RESERVE)
927 vmflags |= VM_ALLOC_INTERRUPT;
928 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0)
fc92d4aa 929 panic("kmem_slab_alloc: bad flags %08x (%p)", flags, ((int **)&size)[-1]);
fe1e98d0
MD
930
931 /*
932 * Never set VM_ALLOC_NORMAL during a preemption because this allows
933 * allocation out of the VM page cache and could cause mainline kernel
934 * code working on VM objects to get confused.
935 */
dc1fd4b3
MD
936 if (flags & (M_FAILSAFE|M_WAITOK)) {
937 if (td->td_preempted) {
fe1e98d0 938 vmflags |= VM_ALLOC_SYSTEM;
dc1fd4b3
MD
939 } else {
940 vmflags |= VM_ALLOC_NORMAL;
dc1fd4b3
MD
941 }
942 }
a108bf71 943
dc1fd4b3
MD
944 m = vm_page_alloc(kernel_object, idx, vmflags);
945
946 /*
947 * If the allocation failed we either return NULL or we retry.
948 *
949 * If M_WAITOK or M_FAILSAFE is set we retry. Note that M_WAITOK
950 * (and M_FAILSAFE) can be specified from an interrupt. M_FAILSAFE
951 * generates a warning or a panic.
fe1e98d0
MD
952 *
953 * If we are preempting a thread we yield instead of block. Both
954 * gets us out from under a preemption but yielding will get cpu
955 * back more quicker. Livelock does not occur because we will not
956 * be preempting anyone the second time around.
957 *
dc1fd4b3 958 */
a108bf71 959 if (m == NULL) {
dc1fd4b3 960 if (flags & (M_FAILSAFE|M_WAITOK)) {
fe1e98d0
MD
961 if (td->td_preempted) {
962 if (flags & M_FAILSAFE) {
963 printf("malloc: M_WAITOK from preemption would block"
964 " try failsafe yield/block\n");
965 }
dc1fd4b3
MD
966 vm_map_unlock(map);
967 lwkt_yield();
968 vm_map_lock(map);
969 } else {
dc1fd4b3
MD
970 vm_map_unlock(map);
971 vm_wait();
972 vm_map_lock(map);
973 }
a108bf71
MD
974 i -= PAGE_SIZE; /* retry */
975 continue;
976 }
dc1fd4b3
MD
977
978 /*
979 * We were unable to recover, cleanup and return NULL
980 */
a108bf71
MD
981 while (i != 0) {
982 i -= PAGE_SIZE;
983 m = vm_page_lookup(kernel_object, OFF_TO_IDX(offset + i));
984 vm_page_free(m);
985 }
986 vm_map_delete(map, addr, addr + size, &count);
987 vm_map_unlock(map);
988 crit_exit();
989 vm_map_entry_release(count);
990 return(NULL);
991 }
992 }
993
994 /*
dc1fd4b3
MD
995 * Success!
996 *
a108bf71
MD
997 * Mark the map entry as non-pageable using a routine that allows us to
998 * populate the underlying pages.
999 */
1000 vm_map_set_wired_quick(map, addr, size, &count);
1001 crit_exit();
1002
1003 /*
1004 * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO.
1005 */
1006 for (i = 0; i < size; i += PAGE_SIZE) {
1007 vm_page_t m;
1008
1009 m = vm_page_lookup(kernel_object, OFF_TO_IDX(offset + i));
1010 m->valid = VM_PAGE_BITS_ALL;
1011 vm_page_wire(m);
1012 vm_page_wakeup(m);
1013 pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1);
1014 if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO))
1015 bzero((char *)addr + i, PAGE_SIZE);
1016 vm_page_flag_clear(m, PG_ZERO);
1017 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED);
1018 }
1019 vm_map_unlock(map);
1020 vm_map_entry_release(count);
1021 return((void *)addr);
1022}
1023
1024static void
1025kmem_slab_free(void *ptr, vm_size_t size)
1026{
1027 crit_enter();
1028 vm_map_remove(kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size);
1029 crit_exit();
1030}
1031