Modify ktr(4) to be typesafe
[dragonfly.git] / sys / kern / kern_slaballoc.c
CommitLineData
a108bf71 1/*
ed2013d8
VS
2 * (MPSAFE)
3 *
5b287bba 4 * KERN_SLABALLOC.C - Kernel SLAB memory allocator
8c10bfcf 5 *
ed2013d8 6 * Copyright (c) 2003,2004,2010 The DragonFly Project. All rights reserved.
8c10bfcf
MD
7 *
8 * This code is derived from software contributed to The DragonFly Project
9 * by Matthew Dillon <dillon@backplane.com>
10 *
a108bf71
MD
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
8c10bfcf 14 *
a108bf71
MD
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
8c10bfcf
MD
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 * 3. Neither the name of The DragonFly Project nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific, prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
a108bf71 36 * SUCH DAMAGE.
a108bf71
MD
37 *
38 * This module implements a slab allocator drop-in replacement for the
39 * kernel malloc().
40 *
41 * A slab allocator reserves a ZONE for each chunk size, then lays the
42 * chunks out in an array within the zone. Allocation and deallocation
43 * is nearly instantanious, and fragmentation/overhead losses are limited
44 * to a fixed worst-case amount.
45 *
46 * The downside of this slab implementation is in the chunk size
47 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu.
48 * In a kernel implementation all this memory will be physical so
49 * the zone size is adjusted downward on machines with less physical
50 * memory. The upside is that overhead is bounded... this is the *worst*
51 * case overhead.
52 *
53 * Slab management is done on a per-cpu basis and no locking or mutexes
54 * are required, only a critical section. When one cpu frees memory
55 * belonging to another cpu's slab manager an asynchronous IPI message
56 * will be queued to execute the operation. In addition, both the
57 * high level slab allocator and the low level zone allocator optimize
58 * M_ZERO requests, and the slab allocator does not have to pre initialize
59 * the linked list of chunks.
60 *
61 * XXX Balancing is needed between cpus. Balance will be handled through
62 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
63 *
64 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
65 * the new zone should be restricted to M_USE_RESERVE requests only.
66 *
67 * Alloc Size Chunking Number of zones
68 * 0-127 8 16
69 * 128-255 16 8
70 * 256-511 32 8
71 * 512-1023 64 8
72 * 1024-2047 128 8
73 * 2048-4095 256 8
74 * 4096-8191 512 8
75 * 8192-16383 1024 8
76 * 16384-32767 2048 8
77 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383)
78 *
46a3f46d 79 * Allocations >= ZoneLimit go directly to kmem.
a108bf71
MD
80 *
81 * API REQUIREMENTS AND SIDE EFFECTS
82 *
83 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
84 * have remained compatible with the following API requirements:
85 *
86 * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty)
3d177b31 87 * + all power-of-2 sized allocations are power-of-2 aligned (twe)
a108bf71
MD
88 * + malloc(0) is allowed and returns non-NULL (ahc driver)
89 * + ability to allocate arbitrarily large chunks of memory
90 */
91
92#include "opt_vm.h"
93
a108bf71
MD
94#include <sys/param.h>
95#include <sys/systm.h>
96#include <sys/kernel.h>
97#include <sys/slaballoc.h>
98#include <sys/mbuf.h>
99#include <sys/vmmeter.h>
100#include <sys/lock.h>
101#include <sys/thread.h>
102#include <sys/globaldata.h>
d2182dc1 103#include <sys/sysctl.h>
f2b5daf9 104#include <sys/ktr.h>
a108bf71
MD
105
106#include <vm/vm.h>
107#include <vm/vm_param.h>
108#include <vm/vm_kern.h>
109#include <vm/vm_extern.h>
110#include <vm/vm_object.h>
111#include <vm/pmap.h>
112#include <vm/vm_map.h>
113#include <vm/vm_page.h>
114#include <vm/vm_pageout.h>
115
116#include <machine/cpu.h>
117
118#include <sys/thread2.h>
119
722871d3
MD
120#define btokup(z) (&pmap_kvtom((vm_offset_t)(z))->ku_pagecnt)
121
5bf48697
AE
122#define MEMORY_STRING "ptr=%p type=%p size=%lu flags=%04x"
123#define MEMORY_ARGS void *ptr, void *type, unsigned long size, int flags
f2b5daf9
MD
124
125#if !defined(KTR_MEMORY)
126#define KTR_MEMORY KTR_ALL
127#endif
128KTR_INFO_MASTER(memory);
5bf48697
AE
129KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin");
130KTR_INFO(KTR_MEMORY, memory, malloc_end, 1, MEMORY_STRING, MEMORY_ARGS);
131KTR_INFO(KTR_MEMORY, memory, free_zero, 2, MEMORY_STRING, MEMORY_ARGS);
132KTR_INFO(KTR_MEMORY, memory, free_ovsz, 3, MEMORY_STRING, MEMORY_ARGS);
133KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 4, MEMORY_STRING, MEMORY_ARGS);
134KTR_INFO(KTR_MEMORY, memory, free_chunk, 5, MEMORY_STRING, MEMORY_ARGS);
f2b5daf9 135#ifdef SMP
5bf48697
AE
136KTR_INFO(KTR_MEMORY, memory, free_request, 6, MEMORY_STRING, MEMORY_ARGS);
137KTR_INFO(KTR_MEMORY, memory, free_rem_beg, 7, MEMORY_STRING, MEMORY_ARGS);
138KTR_INFO(KTR_MEMORY, memory, free_rem_end, 8, MEMORY_STRING, MEMORY_ARGS);
f2b5daf9 139#endif
5bf48697
AE
140KTR_INFO(KTR_MEMORY, memory, free_beg, 9, "free begin");
141KTR_INFO(KTR_MEMORY, memory, free_end, 10, "free end");
f2b5daf9
MD
142
143#define logmemory(name, ptr, type, size, flags) \
144 KTR_LOG(memory_ ## name, ptr, type, size, flags)
b68ad50c
MD
145#define logmemory_quick(name) \
146 KTR_LOG(memory_ ## name)
f2b5daf9 147
a108bf71
MD
148/*
149 * Fixed globals (not per-cpu)
150 */
151static int ZoneSize;
46a3f46d 152static int ZoneLimit;
a108bf71 153static int ZonePageCount;
5fee07e6 154static uintptr_t ZoneMask;
665206ee
MD
155static int ZoneBigAlloc; /* in KB */
156static int ZoneGenAlloc; /* in KB */
460426e6 157struct malloc_type *kmemstatistics; /* exported to vmstat */
a108bf71
MD
158static int32_t weirdary[16];
159
160static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags);
161static void kmem_slab_free(void *ptr, vm_size_t bytes);
5fee07e6 162
10cc6608
MD
163#if defined(INVARIANTS)
164static void chunk_mark_allocated(SLZone *z, void *chunk);
165static void chunk_mark_free(SLZone *z, void *chunk);
5fee07e6
MD
166#else
167#define chunk_mark_allocated(z, chunk)
168#define chunk_mark_free(z, chunk)
10cc6608 169#endif
a108bf71
MD
170
171/*
172 * Misc constants. Note that allocations that are exact multiples of
173 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
174 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
175 */
176#define MIN_CHUNK_SIZE 8 /* in bytes */
177#define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1)
ad94a851 178#define ZONE_RELS_THRESH 32 /* threshold number of zones */
a108bf71
MD
179#define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK)
180
181/*
182 * The WEIRD_ADDR is used as known text to copy into free objects to
183 * try to create deterministic failure cases if the data is accessed after
184 * free.
185 */
186#define WEIRD_ADDR 0xdeadc0de
187#define MAX_COPY sizeof(weirdary)
188#define ZERO_LENGTH_PTR ((void *)-8)
189
190/*
191 * Misc global malloc buckets
192 */
193
194MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
195MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
196MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
197
198MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
199MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
200
201/*
202 * Initialize the slab memory allocator. We have to choose a zone size based
203 * on available physical memory. We choose a zone side which is approximately
204 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of
205 * 128K. The zone size is limited to the bounds set in slaballoc.h
206 * (typically 32K min, 128K max).
207 */
208static void kmeminit(void *dummy);
209
c7841cbe
MD
210char *ZeroPage;
211
ba39e2e0 212SYSINIT(kmem, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL)
a108bf71 213
d2182dc1
MD
214#ifdef INVARIANTS
215/*
216 * If enabled any memory allocated without M_ZERO is initialized to -1.
217 */
218static int use_malloc_pattern;
219SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW,
0c52fa62
SG
220 &use_malloc_pattern, 0,
221 "Initialize memory to -1 if M_ZERO not specified");
d2182dc1
MD
222#endif
223
ad94a851 224static int ZoneRelsThresh = ZONE_RELS_THRESH;
665206ee
MD
225SYSCTL_INT(_kern, OID_AUTO, zone_big_alloc, CTLFLAG_RD, &ZoneBigAlloc, 0, "");
226SYSCTL_INT(_kern, OID_AUTO, zone_gen_alloc, CTLFLAG_RD, &ZoneGenAlloc, 0, "");
ad94a851 227SYSCTL_INT(_kern, OID_AUTO, zone_cache, CTLFLAG_RW, &ZoneRelsThresh, 0, "");
665206ee 228
b12defdc
MD
229/*
230 * Returns the kernel memory size limit for the purposes of initializing
231 * various subsystem caches. The smaller of available memory and the KVM
232 * memory space is returned.
233 *
234 * The size in megabytes is returned.
235 */
236size_t
237kmem_lim_size(void)
238{
239 size_t limsize;
240
241 limsize = (size_t)vmstats.v_page_count * PAGE_SIZE;
242 if (limsize > KvaSize)
243 limsize = KvaSize;
244 return (limsize / (1024 * 1024));
245}
246
a108bf71
MD
247static void
248kmeminit(void *dummy)
249{
7c457ac8 250 size_t limsize;
a108bf71
MD
251 int usesize;
252 int i;
a108bf71 253
b12defdc
MD
254 limsize = kmem_lim_size();
255 usesize = (int)(limsize * 1024); /* convert to KB */
a108bf71 256
b12defdc
MD
257 /*
258 * If the machine has a large KVM space and more than 8G of ram,
259 * double the zone release threshold to reduce SMP invalidations.
260 * If more than 16G of ram, do it again.
261 *
262 * The BIOS eats a little ram so add some slop. We want 8G worth of
263 * memory sticks to trigger the first adjustment.
264 */
265 if (ZoneRelsThresh == ZONE_RELS_THRESH) {
266 if (limsize >= 7 * 1024)
267 ZoneRelsThresh *= 2;
268 if (limsize >= 15 * 1024)
269 ZoneRelsThresh *= 2;
270 }
a108bf71 271
b12defdc
MD
272 /*
273 * Calculate the zone size. This typically calculates to
274 * ZALLOC_MAX_ZONE_SIZE
275 */
a108bf71
MD
276 ZoneSize = ZALLOC_MIN_ZONE_SIZE;
277 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize)
278 ZoneSize <<= 1;
46a3f46d
MD
279 ZoneLimit = ZoneSize / 4;
280 if (ZoneLimit > ZALLOC_ZONE_LIMIT)
281 ZoneLimit = ZALLOC_ZONE_LIMIT;
5fee07e6 282 ZoneMask = ~(uintptr_t)(ZoneSize - 1);
a108bf71
MD
283 ZonePageCount = ZoneSize / PAGE_SIZE;
284
a3034532 285 for (i = 0; i < NELEM(weirdary); ++i)
a108bf71
MD
286 weirdary[i] = WEIRD_ADDR;
287
c7841cbe
MD
288 ZeroPage = kmem_slab_alloc(PAGE_SIZE, PAGE_SIZE, M_WAITOK|M_ZERO);
289
a108bf71 290 if (bootverbose)
6ea70f76 291 kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024);
a108bf71
MD
292}
293
294/*
bba6a44d 295 * Initialize a malloc type tracking structure.
a108bf71
MD
296 */
297void
298malloc_init(void *data)
299{
300 struct malloc_type *type = data;
7c457ac8 301 size_t limsize;
a108bf71
MD
302
303 if (type->ks_magic != M_MAGIC)
304 panic("malloc type lacks magic");
305
306 if (type->ks_limit != 0)
307 return;
308
309 if (vmstats.v_page_count == 0)
310 panic("malloc_init not allowed before vm init");
311
b12defdc 312 limsize = kmem_lim_size() * (1024 * 1024);
a108bf71
MD
313 type->ks_limit = limsize / 10;
314
315 type->ks_next = kmemstatistics;
316 kmemstatistics = type;
317}
318
319void
320malloc_uninit(void *data)
321{
322 struct malloc_type *type = data;
323 struct malloc_type *t;
bba6a44d
MD
324#ifdef INVARIANTS
325 int i;
1d712609 326 long ttl;
bba6a44d 327#endif
a108bf71
MD
328
329 if (type->ks_magic != M_MAGIC)
330 panic("malloc type lacks magic");
331
332 if (vmstats.v_page_count == 0)
333 panic("malloc_uninit not allowed before vm init");
334
335 if (type->ks_limit == 0)
336 panic("malloc_uninit on uninitialized type");
337
6c92c1f2
SZ
338#ifdef SMP
339 /* Make sure that all pending kfree()s are finished. */
340 lwkt_synchronize_ipiqs("muninit");
341#endif
342
a108bf71 343#ifdef INVARIANTS
1d712609
MD
344 /*
345 * memuse is only correct in aggregation. Due to memory being allocated
346 * on one cpu and freed on another individual array entries may be
347 * negative or positive (canceling each other out).
348 */
349 for (i = ttl = 0; i < ncpus; ++i)
350 ttl += type->ks_memuse[i];
351 if (ttl) {
6ea70f76 352 kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n",
1d712609 353 ttl, type->ks_shortdesc, i);
a108bf71
MD
354 }
355#endif
356 if (type == kmemstatistics) {
357 kmemstatistics = type->ks_next;
358 } else {
359 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
360 if (t->ks_next == type) {
361 t->ks_next = type->ks_next;
362 break;
363 }
364 }
365 }
366 type->ks_next = NULL;
367 type->ks_limit = 0;
368}
369
370/*
40153c65
MD
371 * Increase the kmalloc pool limit for the specified pool. No changes
372 * are the made if the pool would shrink.
373 */
374void
375kmalloc_raise_limit(struct malloc_type *type, size_t bytes)
376{
377 if (type->ks_limit == 0)
378 malloc_init(type);
7c457ac8
MD
379 if (bytes == 0)
380 bytes = KvaSize;
40153c65
MD
381 if (type->ks_limit < bytes)
382 type->ks_limit = bytes;
383}
384
385/*
ebe36cfe
MD
386 * Dynamically create a malloc pool. This function is a NOP if *typep is
387 * already non-NULL.
388 */
389void
390kmalloc_create(struct malloc_type **typep, const char *descr)
391{
392 struct malloc_type *type;
393
394 if (*typep == NULL) {
395 type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO);
396 type->ks_magic = M_MAGIC;
397 type->ks_shortdesc = descr;
398 malloc_init(type);
399 *typep = type;
400 }
401}
402
403/*
404 * Destroy a dynamically created malloc pool. This function is a NOP if
405 * the pool has already been destroyed.
406 */
407void
408kmalloc_destroy(struct malloc_type **typep)
409{
410 if (*typep != NULL) {
411 malloc_uninit(*typep);
412 kfree(*typep, M_TEMP);
413 *typep = NULL;
414 }
415}
416
417/*
a108bf71
MD
418 * Calculate the zone index for the allocation request size and set the
419 * allocation request size to that particular zone's chunk size.
420 */
421static __inline int
422zoneindex(unsigned long *bytes)
423{
424 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */
425 if (n < 128) {
426 *bytes = n = (n + 7) & ~7;
427 return(n / 8 - 1); /* 8 byte chunks, 16 zones */
428 }
429 if (n < 256) {
430 *bytes = n = (n + 15) & ~15;
431 return(n / 16 + 7);
432 }
433 if (n < 8192) {
434 if (n < 512) {
435 *bytes = n = (n + 31) & ~31;
436 return(n / 32 + 15);
437 }
438 if (n < 1024) {
439 *bytes = n = (n + 63) & ~63;
440 return(n / 64 + 23);
441 }
442 if (n < 2048) {
443 *bytes = n = (n + 127) & ~127;
444 return(n / 128 + 31);
445 }
446 if (n < 4096) {
447 *bytes = n = (n + 255) & ~255;
448 return(n / 256 + 39);
449 }
450 *bytes = n = (n + 511) & ~511;
451 return(n / 512 + 47);
452 }
453#if ZALLOC_ZONE_LIMIT > 8192
454 if (n < 16384) {
455 *bytes = n = (n + 1023) & ~1023;
456 return(n / 1024 + 55);
457 }
458#endif
459#if ZALLOC_ZONE_LIMIT > 16384
460 if (n < 32768) {
461 *bytes = n = (n + 2047) & ~2047;
462 return(n / 2048 + 63);
463 }
464#endif
465 panic("Unexpected byte count %d", n);
466 return(0);
467}
468
bbb201fd
MD
469#ifdef SLAB_DEBUG
470/*
471 * Used to debug memory corruption issues. Record up to (typically 32)
472 * allocation sources for this zone (for a particular chunk size).
473 */
474
475static void
476slab_record_source(SLZone *z, const char *file, int line)
477{
478 int i;
479 int b = line & (SLAB_DEBUG_ENTRIES - 1);
480
481 i = b;
482 do {
483 if (z->z_Sources[i].file == file && z->z_Sources[i].line == line)
484 return;
485 if (z->z_Sources[i].file == NULL)
486 break;
487 i = (i + 1) & (SLAB_DEBUG_ENTRIES - 1);
488 } while (i != b);
489 z->z_Sources[i].file = file;
490 z->z_Sources[i].line = line;
491}
492
493#endif
494
a108bf71 495/*
5fee07e6 496 * kmalloc() (SLAB ALLOCATOR)
a108bf71
MD
497 *
498 * Allocate memory via the slab allocator. If the request is too large,
499 * or if it page-aligned beyond a certain size, we fall back to the
500 * KMEM subsystem. A SLAB tracking descriptor must be specified, use
501 * &SlabMisc if you don't care.
502 *
8cb2bf45
JS
503 * M_RNOWAIT - don't block.
504 * M_NULLOK - return NULL instead of blocking.
a108bf71 505 * M_ZERO - zero the returned memory.
dc1fd4b3
MD
506 * M_USE_RESERVE - allow greater drawdown of the free list
507 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted
5b287bba
MD
508 *
509 * MPSAFE
a108bf71 510 */
bbb201fd
MD
511
512#ifdef SLAB_DEBUG
513void *
514kmalloc_debug(unsigned long size, struct malloc_type *type, int flags,
515 const char *file, int line)
516#else
a108bf71 517void *
8aca2bd4 518kmalloc(unsigned long size, struct malloc_type *type, int flags)
bbb201fd 519#endif
a108bf71
MD
520{
521 SLZone *z;
522 SLChunk *chunk;
d8100bdc 523#ifdef SMP
5fee07e6 524 SLChunk *bchunk;
d8100bdc 525#endif
a108bf71 526 SLGlobalData *slgd;
bba6a44d 527 struct globaldata *gd;
a108bf71 528 int zi;
d2182dc1
MD
529#ifdef INVARIANTS
530 int i;
531#endif
a108bf71 532
b68ad50c 533 logmemory_quick(malloc_beg);
bba6a44d
MD
534 gd = mycpu;
535 slgd = &gd->gd_slab;
a108bf71
MD
536
537 /*
538 * XXX silly to have this in the critical path.
539 */
540 if (type->ks_limit == 0) {
541 crit_enter();
542 if (type->ks_limit == 0)
543 malloc_init(type);
544 crit_exit();
545 }
546 ++type->ks_calls;
547
548 /*
38e34349
MD
549 * Handle the case where the limit is reached. Panic if we can't return
550 * NULL. The original malloc code looped, but this tended to
a108bf71 551 * simply deadlock the computer.
38e34349
MD
552 *
553 * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used
554 * to determine if a more complete limit check should be done. The
555 * actual memory use is tracked via ks_memuse[cpu].
a108bf71 556 */
bba6a44d
MD
557 while (type->ks_loosememuse >= type->ks_limit) {
558 int i;
559 long ttl;
560
561 for (i = ttl = 0; i < ncpus; ++i)
562 ttl += type->ks_memuse[i];
38e34349 563 type->ks_loosememuse = ttl; /* not MP synchronized */
28135cc2
MD
564 if ((ssize_t)ttl < 0) /* deal with occassional race */
565 ttl = 0;
bba6a44d 566 if (ttl >= type->ks_limit) {
f2b5daf9 567 if (flags & M_NULLOK) {
5fee07e6 568 logmemory(malloc_end, NULL, type, size, flags);
bba6a44d 569 return(NULL);
f2b5daf9 570 }
bba6a44d
MD
571 panic("%s: malloc limit exceeded", type->ks_shortdesc);
572 }
a108bf71
MD
573 }
574
575 /*
576 * Handle the degenerate size == 0 case. Yes, this does happen.
577 * Return a special pointer. This is to maintain compatibility with
578 * the original malloc implementation. Certain devices, such as the
579 * adaptec driver, not only allocate 0 bytes, they check for NULL and
580 * also realloc() later on. Joy.
581 */
f2b5daf9 582 if (size == 0) {
5fee07e6 583 logmemory(malloc_end, ZERO_LENGTH_PTR, type, size, flags);
a108bf71 584 return(ZERO_LENGTH_PTR);
f2b5daf9 585 }
a108bf71
MD
586
587 /*
a7cf0021
MD
588 * Handle hysteresis from prior frees here in malloc(). We cannot
589 * safely manipulate the kernel_map in free() due to free() possibly
590 * being called via an IPI message or from sensitive interrupt code.
5fee07e6
MD
591 *
592 * NOTE: ku_pagecnt must be cleared before we free the slab or we
593 * might race another cpu allocating the kva and setting
594 * ku_pagecnt.
a7cf0021 595 */
ad94a851 596 while (slgd->NFreeZones > ZoneRelsThresh && (flags & M_RNOWAIT) == 0) {
46a3f46d 597 crit_enter();
ad94a851 598 if (slgd->NFreeZones > ZoneRelsThresh) { /* crit sect race */
722871d3 599 int *kup;
5fee07e6 600
46a3f46d
MD
601 z = slgd->FreeZones;
602 slgd->FreeZones = z->z_Next;
603 --slgd->NFreeZones;
5fee07e6 604 kup = btokup(z);
722871d3 605 *kup = 0;
46a3f46d 606 kmem_slab_free(z, ZoneSize); /* may block */
665206ee 607 atomic_add_int(&ZoneGenAlloc, -(int)ZoneSize / 1024);
46a3f46d
MD
608 }
609 crit_exit();
610 }
5fee07e6 611
46a3f46d 612 /*
5fee07e6 613 * XXX handle oversized frees that were queued from kfree().
46a3f46d 614 */
dc1fd4b3 615 while (slgd->FreeOvZones && (flags & M_RNOWAIT) == 0) {
46a3f46d
MD
616 crit_enter();
617 if ((z = slgd->FreeOvZones) != NULL) {
7e83df33
MD
618 vm_size_t tsize;
619
46a3f46d
MD
620 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC);
621 slgd->FreeOvZones = z->z_Next;
7e83df33
MD
622 tsize = z->z_ChunkSize;
623 kmem_slab_free(z, tsize); /* may block */
624 atomic_add_int(&ZoneBigAlloc, -(int)tsize / 1024);
46a3f46d
MD
625 }
626 crit_exit();
a7cf0021
MD
627 }
628
629 /*
a108bf71
MD
630 * Handle large allocations directly. There should not be very many of
631 * these so performance is not a big issue.
632 *
b543eeed
MD
633 * The backend allocator is pretty nasty on a SMP system. Use the
634 * slab allocator for one and two page-sized chunks even though we lose
635 * some efficiency. XXX maybe fix mmio and the elf loader instead.
a108bf71 636 */
b543eeed 637 if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) {
722871d3 638 int *kup;
a108bf71
MD
639
640 size = round_page(size);
641 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags);
f2b5daf9 642 if (chunk == NULL) {
5fee07e6 643 logmemory(malloc_end, NULL, type, size, flags);
a108bf71 644 return(NULL);
f2b5daf9 645 }
665206ee 646 atomic_add_int(&ZoneBigAlloc, (int)size / 1024);
a108bf71 647 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */
8f1d5415 648 flags |= M_PASSIVE_ZERO;
a108bf71 649 kup = btokup(chunk);
722871d3 650 *kup = size / PAGE_SIZE;
a108bf71
MD
651 crit_enter();
652 goto done;
653 }
654
655 /*
656 * Attempt to allocate out of an existing zone. First try the free list,
657 * then allocate out of unallocated space. If we find a good zone move
658 * it to the head of the list so later allocations find it quickly
659 * (we might have thousands of zones in the list).
660 *
661 * Note: zoneindex() will panic of size is too large.
662 */
663 zi = zoneindex(&size);
664 KKASSERT(zi < NZONES);
665 crit_enter();
a108bf71 666
5fee07e6 667 if ((z = slgd->ZoneAry[zi]) != NULL) {
a108bf71 668 /*
5fee07e6
MD
669 * Locate a chunk - we have to have at least one. If this is the
670 * last chunk go ahead and do the work to retrieve chunks freed
671 * from remote cpus, and if the zone is still empty move it off
672 * the ZoneAry.
a108bf71 673 */
5fee07e6
MD
674 if (--z->z_NFree <= 0) {
675 KKASSERT(z->z_NFree == 0);
676
677#ifdef SMP
678 /*
679 * WARNING! This code competes with other cpus. It is ok
680 * for us to not drain RChunks here but we might as well, and
681 * it is ok if more accumulate after we're done.
682 *
683 * Set RSignal before pulling rchunks off, indicating that we
684 * will be moving ourselves off of the ZoneAry. Remote ends will
685 * read RSignal before putting rchunks on thus interlocking
686 * their IPI signaling.
687 */
688 if (z->z_RChunks == NULL)
689 atomic_swap_int(&z->z_RSignal, 1);
690
691 while ((bchunk = z->z_RChunks) != NULL) {
692 cpu_ccfence();
693 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) {
694 *z->z_LChunksp = bchunk;
695 while (bchunk) {
696 chunk_mark_free(z, bchunk);
697 z->z_LChunksp = &bchunk->c_Next;
698 bchunk = bchunk->c_Next;
699 ++z->z_NFree;
700 }
701 break;
702 }
703 }
704#endif
705 /*
706 * Remove from the zone list if no free chunks remain.
707 * Clear RSignal
708 */
709 if (z->z_NFree == 0) {
710 slgd->ZoneAry[zi] = z->z_Next;
711 z->z_Next = NULL;
712 } else {
713 z->z_RSignal = 0;
714 }
a108bf71
MD
715 }
716
717 /*
5fee07e6 718 * Fast path, we have chunks available in z_LChunks.
a108bf71 719 */
5fee07e6
MD
720 chunk = z->z_LChunks;
721 if (chunk) {
10cc6608 722 chunk_mark_allocated(z, chunk);
5fee07e6
MD
723 z->z_LChunks = chunk->c_Next;
724 if (z->z_LChunks == NULL)
725 z->z_LChunksp = &z->z_LChunks;
bbb201fd
MD
726#ifdef SLAB_DEBUG
727 slab_record_source(z, file, line);
728#endif
a108bf71 729 goto done;
a108bf71
MD
730 }
731
732 /*
5fee07e6
MD
733 * No chunks are available in LChunks, the free chunk MUST be
734 * in the never-before-used memory area, controlled by UIndex.
735 *
736 * The consequences are very serious if our zone got corrupted so
737 * we use an explicit panic rather than a KASSERT.
a108bf71 738 */
1c5ca4f3 739 if (z->z_UIndex + 1 != z->z_NMax)
5fee07e6 740 ++z->z_UIndex;
1c5ca4f3
MD
741 else
742 z->z_UIndex = 0;
5fee07e6 743
1c5ca4f3
MD
744 if (z->z_UIndex == z->z_UEndIndex)
745 panic("slaballoc: corrupted zone");
5fee07e6 746
1c5ca4f3 747 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
8f1d5415 748 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
6ab8e1da 749 flags &= ~M_ZERO;
8f1d5415
MD
750 flags |= M_PASSIVE_ZERO;
751 }
10cc6608 752 chunk_mark_allocated(z, chunk);
bbb201fd
MD
753#ifdef SLAB_DEBUG
754 slab_record_source(z, file, line);
755#endif
a108bf71
MD
756 goto done;
757 }
758
759 /*
760 * If all zones are exhausted we need to allocate a new zone for this
761 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see
6ab8e1da
MD
762 * UAlloc use above in regards to M_ZERO. Note that when we are reusing
763 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and
764 * we do not pre-zero it because we do not want to mess up the L1 cache.
a108bf71
MD
765 *
766 * At least one subsystem, the tty code (see CROUND) expects power-of-2
767 * allocations to be power-of-2 aligned. We maintain compatibility by
768 * adjusting the base offset below.
769 */
770 {
771 int off;
722871d3 772 int *kup;
a108bf71
MD
773
774 if ((z = slgd->FreeZones) != NULL) {
775 slgd->FreeZones = z->z_Next;
776 --slgd->NFreeZones;
777 bzero(z, sizeof(SLZone));
6ab8e1da 778 z->z_Flags |= SLZF_UNOTZEROD;
a108bf71
MD
779 } else {
780 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO);
781 if (z == NULL)
782 goto fail;
665206ee 783 atomic_add_int(&ZoneGenAlloc, (int)ZoneSize / 1024);
a108bf71
MD
784 }
785
786 /*
10cc6608
MD
787 * How big is the base structure?
788 */
789#if defined(INVARIANTS)
790 /*
791 * Make room for z_Bitmap. An exact calculation is somewhat more
792 * complicated so don't make an exact calculation.
793 */
794 off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]);
795 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8);
796#else
797 off = sizeof(SLZone);
798#endif
799
800 /*
a108bf71
MD
801 * Guarentee power-of-2 alignment for power-of-2-sized chunks.
802 * Otherwise just 8-byte align the data.
803 */
804 if ((size | (size - 1)) + 1 == (size << 1))
10cc6608 805 off = (off + size - 1) & ~(size - 1);
a108bf71 806 else
10cc6608 807 off = (off + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK;
a108bf71
MD
808 z->z_Magic = ZALLOC_SLAB_MAGIC;
809 z->z_ZoneIndex = zi;
810 z->z_NMax = (ZoneSize - off) / size;
811 z->z_NFree = z->z_NMax - 1;
1c5ca4f3
MD
812 z->z_BasePtr = (char *)z + off;
813 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax;
a108bf71 814 z->z_ChunkSize = size;
2db3b277 815 z->z_CpuGd = gd;
bba6a44d 816 z->z_Cpu = gd->gd_cpuid;
5fee07e6 817 z->z_LChunksp = &z->z_LChunks;
bbb201fd
MD
818#ifdef SLAB_DEBUG
819 bcopy(z->z_Sources, z->z_AltSources, sizeof(z->z_Sources));
820 bzero(z->z_Sources, sizeof(z->z_Sources));
821#endif
1c5ca4f3 822 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
a108bf71
MD
823 z->z_Next = slgd->ZoneAry[zi];
824 slgd->ZoneAry[zi] = z;
8f1d5415 825 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
6ab8e1da 826 flags &= ~M_ZERO; /* already zero'd */
8f1d5415
MD
827 flags |= M_PASSIVE_ZERO;
828 }
5fee07e6 829 kup = btokup(z);
722871d3 830 *kup = -(z->z_Cpu + 1); /* -1 to -(N+1) */
10cc6608 831 chunk_mark_allocated(z, chunk);
bbb201fd
MD
832#ifdef SLAB_DEBUG
833 slab_record_source(z, file, line);
834#endif
1c5ca4f3
MD
835
836 /*
837 * Slide the base index for initial allocations out of the next
838 * zone we create so we do not over-weight the lower part of the
839 * cpu memory caches.
840 */
841 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
842 & (ZALLOC_MAX_ZONE_SIZE - 1);
a108bf71 843 }
5fee07e6 844
a108bf71 845done:
bba6a44d
MD
846 ++type->ks_inuse[gd->gd_cpuid];
847 type->ks_memuse[gd->gd_cpuid] += size;
38e34349 848 type->ks_loosememuse += size; /* not MP synchronized */
a108bf71 849 crit_exit();
5fee07e6 850
a108bf71
MD
851 if (flags & M_ZERO)
852 bzero(chunk, size);
bba6a44d 853#ifdef INVARIANTS
d2182dc1
MD
854 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) {
855 if (use_malloc_pattern) {
856 for (i = 0; i < size; i += sizeof(int)) {
857 *(int *)((char *)chunk + i) = -1;
858 }
859 }
bba6a44d 860 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */
d2182dc1 861 }
bba6a44d 862#endif
5fee07e6 863 logmemory(malloc_end, chunk, type, size, flags);
a108bf71
MD
864 return(chunk);
865fail:
866 crit_exit();
5fee07e6 867 logmemory(malloc_end, NULL, type, size, flags);
a108bf71
MD
868 return(NULL);
869}
870
38e34349
MD
871/*
872 * kernel realloc. (SLAB ALLOCATOR) (MP SAFE)
873 *
874 * Generally speaking this routine is not called very often and we do
875 * not attempt to optimize it beyond reusing the same pointer if the
876 * new size fits within the chunking of the old pointer's zone.
877 */
bbb201fd
MD
878#ifdef SLAB_DEBUG
879void *
880krealloc_debug(void *ptr, unsigned long size,
881 struct malloc_type *type, int flags,
882 const char *file, int line)
883#else
a108bf71 884void *
8aca2bd4 885krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags)
bbb201fd 886#endif
a108bf71 887{
722871d3 888 unsigned long osize;
a108bf71
MD
889 SLZone *z;
890 void *nptr;
722871d3 891 int *kup;
a108bf71 892
eb7f3e3c
MD
893 KKASSERT((flags & M_ZERO) == 0); /* not supported */
894
a108bf71 895 if (ptr == NULL || ptr == ZERO_LENGTH_PTR)
bbb201fd 896 return(kmalloc_debug(size, type, flags, file, line));
a108bf71 897 if (size == 0) {
efda3bd0 898 kfree(ptr, type);
a108bf71
MD
899 return(NULL);
900 }
901
902 /*
903 * Handle oversized allocations. XXX we really should require that a
904 * size be passed to free() instead of this nonsense.
905 */
5fee07e6 906 kup = btokup(ptr);
722871d3
MD
907 if (*kup > 0) {
908 osize = *kup << PAGE_SHIFT;
5fee07e6
MD
909 if (osize == round_page(size))
910 return(ptr);
bbb201fd 911 if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL)
5fee07e6
MD
912 return(NULL);
913 bcopy(ptr, nptr, min(size, osize));
914 kfree(ptr, type);
915 return(nptr);
a108bf71
MD
916 }
917
918 /*
919 * Get the original allocation's zone. If the new request winds up
920 * using the same chunk size we do not have to do anything.
921 */
5fee07e6
MD
922 z = (SLZone *)((uintptr_t)ptr & ZoneMask);
923 kup = btokup(z);
722871d3 924 KKASSERT(*kup < 0);
a108bf71
MD
925 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
926
a108bf71
MD
927 /*
928 * Allocate memory for the new request size. Note that zoneindex has
929 * already adjusted the request size to the appropriate chunk size, which
930 * should optimize our bcopy(). Then copy and return the new pointer.
1ea6580d
MD
931 *
932 * Resizing a non-power-of-2 allocation to a power-of-2 size does not
933 * necessary align the result.
934 *
935 * We can only zoneindex (to align size to the chunk size) if the new
936 * size is not too large.
a108bf71 937 */
1ea6580d
MD
938 if (size < ZoneLimit) {
939 zoneindex(&size);
940 if (z->z_ChunkSize == size)
941 return(ptr);
942 }
bbb201fd 943 if ((nptr = kmalloc_debug(size, type, flags, file, line)) == NULL)
a108bf71
MD
944 return(NULL);
945 bcopy(ptr, nptr, min(size, z->z_ChunkSize));
efda3bd0 946 kfree(ptr, type);
a108bf71
MD
947 return(nptr);
948}
949
38e34349 950/*
45d2b1d8
MD
951 * Return the kmalloc limit for this type, in bytes.
952 */
953long
954kmalloc_limit(struct malloc_type *type)
955{
956 if (type->ks_limit == 0) {
957 crit_enter();
958 if (type->ks_limit == 0)
959 malloc_init(type);
960 crit_exit();
961 }
962 return(type->ks_limit);
963}
964
965/*
38e34349
MD
966 * Allocate a copy of the specified string.
967 *
968 * (MP SAFE) (MAY BLOCK)
969 */
bbb201fd
MD
970#ifdef SLAB_DEBUG
971char *
972kstrdup_debug(const char *str, struct malloc_type *type,
973 const char *file, int line)
974#else
1ac06773 975char *
59302080 976kstrdup(const char *str, struct malloc_type *type)
bbb201fd 977#endif
1ac06773
MD
978{
979 int zlen; /* length inclusive of terminating NUL */
980 char *nstr;
981
982 if (str == NULL)
983 return(NULL);
984 zlen = strlen(str) + 1;
bbb201fd 985 nstr = kmalloc_debug(zlen, type, M_WAITOK, file, line);
1ac06773
MD
986 bcopy(str, nstr, zlen);
987 return(nstr);
988}
989
1d712609 990#ifdef SMP
a108bf71 991/*
5fee07e6 992 * Notify our cpu that a remote cpu has freed some chunks in a zone that
df9daea8
MD
993 * we own. RCount will be bumped so the memory should be good, but validate
994 * that it really is.
a108bf71
MD
995 */
996static
997void
5fee07e6 998kfree_remote(void *ptr)
a108bf71 999{
5fee07e6
MD
1000 SLGlobalData *slgd;
1001 SLChunk *bchunk;
1002 SLZone *z;
1003 int nfree;
722871d3 1004 int *kup;
5fee07e6 1005
5fee07e6
MD
1006 slgd = &mycpu->gd_slab;
1007 z = ptr;
1008 kup = btokup(z);
df9daea8
MD
1009 KKASSERT(*kup == -((int)mycpuid + 1));
1010 KKASSERT(z->z_RCount > 0);
1011 atomic_subtract_int(&z->z_RCount, 1);
5fee07e6 1012
5bf48697 1013 logmemory(free_rem_beg, z, NULL, 0L, 0);
df9daea8
MD
1014 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1015 KKASSERT(z->z_Cpu == mycpu->gd_cpuid);
1016 nfree = z->z_NFree;
5fee07e6 1017
df9daea8
MD
1018 /*
1019 * Indicate that we will no longer be off of the ZoneAry by
1020 * clearing RSignal.
1021 */
1022 if (z->z_RChunks)
1023 z->z_RSignal = 0;
5fee07e6 1024
df9daea8
MD
1025 /*
1026 * Atomically extract the bchunks list and then process it back
1027 * into the lchunks list. We want to append our bchunks to the
1028 * lchunks list and not prepend since we likely do not have
1029 * cache mastership of the related data (not that it helps since
1030 * we are using c_Next).
1031 */
1032 while ((bchunk = z->z_RChunks) != NULL) {
1033 cpu_ccfence();
1034 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) {
1035 *z->z_LChunksp = bchunk;
1036 while (bchunk) {
1037 chunk_mark_free(z, bchunk);
1038 z->z_LChunksp = &bchunk->c_Next;
1039 bchunk = bchunk->c_Next;
1040 ++z->z_NFree;
5fee07e6 1041 }
df9daea8 1042 break;
5fee07e6 1043 }
df9daea8
MD
1044 }
1045 if (z->z_NFree && nfree == 0) {
1046 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
1047 slgd->ZoneAry[z->z_ZoneIndex] = z;
1048 }
5fee07e6 1049
df9daea8
MD
1050 /*
1051 * If the zone becomes totally free, and there are other zones we
1052 * can allocate from, move this zone to the FreeZones list. Since
1053 * this code can be called from an IPI callback, do *NOT* try to mess
1054 * with kernel_map here. Hysteresis will be performed at malloc() time.
1055 *
1056 * Do not move the zone if there is an IPI inflight, otherwise MP
1057 * races can result in our free_remote code accessing a destroyed
1058 * zone.
1059 */
1060 if (z->z_NFree == z->z_NMax &&
1061 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z) &&
1062 z->z_RCount == 0
1063 ) {
1064 SLZone **pz;
1065 int *kup;
5fee07e6 1066
df9daea8
MD
1067 for (pz = &slgd->ZoneAry[z->z_ZoneIndex];
1068 z != *pz;
1069 pz = &(*pz)->z_Next) {
1070 ;
5fee07e6 1071 }
df9daea8
MD
1072 *pz = z->z_Next;
1073 z->z_Magic = -1;
1074 z->z_Next = slgd->FreeZones;
1075 slgd->FreeZones = z;
1076 ++slgd->NFreeZones;
1077 kup = btokup(z);
1078 *kup = 0;
5fee07e6 1079 }
5bf48697 1080 logmemory(free_rem_end, z, bchunk, 0L, 0);
a108bf71
MD
1081}
1082
1d712609
MD
1083#endif
1084
38e34349 1085/*
5b287bba 1086 * free (SLAB ALLOCATOR)
38e34349
MD
1087 *
1088 * Free a memory block previously allocated by malloc. Note that we do not
5fee07e6 1089 * attempt to update ks_loosememuse as MP races could prevent us from
38e34349 1090 * checking memory limits in malloc.
5b287bba
MD
1091 *
1092 * MPSAFE
38e34349 1093 */
a108bf71 1094void
8aca2bd4 1095kfree(void *ptr, struct malloc_type *type)
a108bf71
MD
1096{
1097 SLZone *z;
1098 SLChunk *chunk;
1099 SLGlobalData *slgd;
bba6a44d 1100 struct globaldata *gd;
722871d3 1101 int *kup;
5fee07e6 1102 unsigned long size;
d8100bdc
SW
1103#ifdef SMP
1104 SLChunk *bchunk;
5fee07e6 1105 int rsignal;
d8100bdc 1106#endif
a108bf71 1107
b68ad50c 1108 logmemory_quick(free_beg);
bba6a44d
MD
1109 gd = mycpu;
1110 slgd = &gd->gd_slab;
a108bf71 1111
d39911d9
JS
1112 if (ptr == NULL)
1113 panic("trying to free NULL pointer");
1114
a108bf71
MD
1115 /*
1116 * Handle special 0-byte allocations
1117 */
f2b5daf9 1118 if (ptr == ZERO_LENGTH_PTR) {
5bf48697 1119 logmemory(free_zero, ptr, type, -1UL, 0);
b68ad50c 1120 logmemory_quick(free_end);
a108bf71 1121 return;
f2b5daf9 1122 }
a108bf71
MD
1123
1124 /*
5fee07e6
MD
1125 * Panic on bad malloc type
1126 */
1127 if (type->ks_magic != M_MAGIC)
1128 panic("free: malloc type lacks magic");
1129
1130 /*
a108bf71
MD
1131 * Handle oversized allocations. XXX we really should require that a
1132 * size be passed to free() instead of this nonsense.
bba6a44d
MD
1133 *
1134 * This code is never called via an ipi.
a108bf71 1135 */
5fee07e6 1136 kup = btokup(ptr);
722871d3
MD
1137 if (*kup > 0) {
1138 size = *kup << PAGE_SHIFT;
1139 *kup = 0;
a108bf71 1140#ifdef INVARIANTS
5fee07e6
MD
1141 KKASSERT(sizeof(weirdary) <= size);
1142 bcopy(weirdary, ptr, sizeof(weirdary));
a108bf71 1143#endif
5fee07e6
MD
1144 /*
1145 * NOTE: For oversized allocations we do not record the
1146 * originating cpu. It gets freed on the cpu calling
1147 * kfree(). The statistics are in aggregate.
1148 *
1149 * note: XXX we have still inherited the interrupts-can't-block
1150 * assumption. An interrupt thread does not bump
1151 * gd_intr_nesting_level so check TDF_INTTHREAD. This is
1152 * primarily until we can fix softupdate's assumptions about free().
1153 */
1154 crit_enter();
1155 --type->ks_inuse[gd->gd_cpuid];
1156 type->ks_memuse[gd->gd_cpuid] -= size;
1157 if (mycpu->gd_intr_nesting_level ||
1158 (gd->gd_curthread->td_flags & TDF_INTTHREAD))
1159 {
1160 logmemory(free_ovsz_delayed, ptr, type, size, 0);
1161 z = (SLZone *)ptr;
1162 z->z_Magic = ZALLOC_OVSZ_MAGIC;
1163 z->z_Next = slgd->FreeOvZones;
1164 z->z_ChunkSize = size;
1165 slgd->FreeOvZones = z;
1166 crit_exit();
1167 } else {
1168 crit_exit();
1169 logmemory(free_ovsz, ptr, type, size, 0);
1170 kmem_slab_free(ptr, size); /* may block */
1171 atomic_add_int(&ZoneBigAlloc, -(int)size / 1024);
a108bf71 1172 }
5fee07e6
MD
1173 logmemory_quick(free_end);
1174 return;
a108bf71
MD
1175 }
1176
1177 /*
1178 * Zone case. Figure out the zone based on the fact that it is
1179 * ZoneSize aligned.
1180 */
5fee07e6
MD
1181 z = (SLZone *)((uintptr_t)ptr & ZoneMask);
1182 kup = btokup(z);
722871d3 1183 KKASSERT(*kup < 0);
a108bf71
MD
1184 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1185
1186 /*
5fee07e6
MD
1187 * If we do not own the zone then use atomic ops to free to the
1188 * remote cpu linked list and notify the target zone using a
1189 * passive message.
1190 *
1191 * The target zone cannot be deallocated while we own a chunk of it,
1192 * so the zone header's storage is stable until the very moment
1193 * we adjust z_RChunks. After that we cannot safely dereference (z).
1194 *
1195 * (no critical section needed)
a108bf71 1196 */
2db3b277 1197 if (z->z_CpuGd != gd) {
75c7ffea 1198#ifdef SMP
5fee07e6
MD
1199 /*
1200 * Making these adjustments now allow us to avoid passing (type)
1201 * to the remote cpu. Note that ks_inuse/ks_memuse is being
28135cc2
MD
1202 * adjusted on OUR cpu, not the zone cpu, but it should all still
1203 * sum up properly and cancel out.
5fee07e6 1204 */
28135cc2
MD
1205 crit_enter();
1206 --type->ks_inuse[gd->gd_cpuid];
1207 type->ks_memuse[gd->gd_cpuid] -= z->z_ChunkSize;
1208 crit_exit();
5fee07e6
MD
1209
1210 /*
1211 * WARNING! This code competes with other cpus. Once we
1212 * successfully link the chunk to RChunks the remote
1213 * cpu can rip z's storage out from under us.
df9daea8
MD
1214 *
1215 * Bumping RCount prevents z's storage from getting
1216 * ripped out.
5fee07e6
MD
1217 */
1218 rsignal = z->z_RSignal;
1219 cpu_lfence();
df9daea8
MD
1220 if (rsignal)
1221 atomic_add_int(&z->z_RCount, 1);
5fee07e6
MD
1222
1223 chunk = ptr;
1224 for (;;) {
1225 bchunk = z->z_RChunks;
1226 cpu_ccfence();
1227 chunk->c_Next = bchunk;
1228 cpu_sfence();
1229
1230 if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, chunk))
1231 break;
1232 }
5fee07e6
MD
1233
1234 /*
1235 * We have to signal the remote cpu if our actions will cause
1236 * the remote zone to be placed back on ZoneAry so it can
1237 * move the zone back on.
1238 *
1239 * We only need to deal with NULL->non-NULL RChunk transitions
1240 * and only if z_RSignal is set. We interlock by reading rsignal
1241 * before adding our chunk to RChunks. This should result in
1242 * virtually no IPI traffic.
1243 *
1244 * We can use a passive IPI to reduce overhead even further.
1245 */
1246 if (bchunk == NULL && rsignal) {
5bf48697 1247 logmemory(free_request, ptr, type, (unsigned long)z->z_ChunkSize, 0);
5fee07e6 1248 lwkt_send_ipiq_passive(z->z_CpuGd, kfree_remote, z);
df9daea8
MD
1249 /* z can get ripped out from under us from this point on */
1250 } else if (rsignal) {
1251 atomic_subtract_int(&z->z_RCount, 1);
1252 /* z can get ripped out from under us from this point on */
5fee07e6 1253 }
75c7ffea
MD
1254#else
1255 panic("Corrupt SLZone");
1256#endif
b68ad50c 1257 logmemory_quick(free_end);
a108bf71
MD
1258 return;
1259 }
1260
5fee07e6
MD
1261 /*
1262 * kfree locally
1263 */
5bf48697 1264 logmemory(free_chunk, ptr, type, (unsigned long)z->z_ChunkSize, 0);
f2b5daf9 1265
a108bf71 1266 crit_enter();
a108bf71 1267 chunk = ptr;
10cc6608 1268 chunk_mark_free(z, chunk);
a108bf71
MD
1269
1270 /*
1271 * Put weird data into the memory to detect modifications after freeing,
1272 * illegal pointer use after freeing (we should fault on the odd address),
1273 * and so forth. XXX needs more work, see the old malloc code.
1274 */
1275#ifdef INVARIANTS
1276 if (z->z_ChunkSize < sizeof(weirdary))
1277 bcopy(weirdary, chunk, z->z_ChunkSize);
1278 else
1279 bcopy(weirdary, chunk, sizeof(weirdary));
1280#endif
1281
1282 /*
5fee07e6
MD
1283 * Add this free non-zero'd chunk to a linked list for reuse. Add
1284 * to the front of the linked list so it is more likely to be
1285 * reallocated, since it is already in our L1 cache.
a108bf71 1286 */
6ab8e1da 1287#ifdef INVARIANTS
c439ad8f 1288 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd)
fc92d4aa 1289 panic("BADFREE %p", chunk);
a108bf71 1290#endif
5fee07e6
MD
1291 chunk->c_Next = z->z_LChunks;
1292 z->z_LChunks = chunk;
1293 if (chunk->c_Next == NULL)
1294 z->z_LChunksp = &chunk->c_Next;
1295
6ab8e1da 1296#ifdef INVARIANTS
c439ad8f 1297 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart)
a108bf71 1298 panic("BADFREE2");
6ab8e1da 1299#endif
a108bf71
MD
1300
1301 /*
1302 * Bump the number of free chunks. If it becomes non-zero the zone
1303 * must be added back onto the appropriate list.
1304 */
1305 if (z->z_NFree++ == 0) {
1306 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
1307 slgd->ZoneAry[z->z_ZoneIndex] = z;
1308 }
1309
bba6a44d
MD
1310 --type->ks_inuse[z->z_Cpu];
1311 type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize;
a108bf71
MD
1312
1313 /*
1314 * If the zone becomes totally free, and there are other zones we
a7cf0021
MD
1315 * can allocate from, move this zone to the FreeZones list. Since
1316 * this code can be called from an IPI callback, do *NOT* try to mess
1317 * with kernel_map here. Hysteresis will be performed at malloc() time.
a108bf71
MD
1318 */
1319 if (z->z_NFree == z->z_NMax &&
df9daea8
MD
1320 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z) &&
1321 z->z_RCount == 0
a108bf71
MD
1322 ) {
1323 SLZone **pz;
722871d3 1324 int *kup;
a108bf71
MD
1325
1326 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; z != *pz; pz = &(*pz)->z_Next)
1327 ;
1328 *pz = z->z_Next;
1329 z->z_Magic = -1;
a7cf0021
MD
1330 z->z_Next = slgd->FreeZones;
1331 slgd->FreeZones = z;
1332 ++slgd->NFreeZones;
5fee07e6 1333 kup = btokup(z);
722871d3 1334 *kup = 0;
a108bf71 1335 }
b68ad50c 1336 logmemory_quick(free_end);
a108bf71
MD
1337 crit_exit();
1338}
1339
10cc6608 1340#if defined(INVARIANTS)
5fee07e6 1341
10cc6608
MD
1342/*
1343 * Helper routines for sanity checks
1344 */
1345static
1346void
1347chunk_mark_allocated(SLZone *z, void *chunk)
1348{
1349 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1350 __uint32_t *bitptr;
1351
5fee07e6
MD
1352 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0);
1353 KASSERT(bitdex >= 0 && bitdex < z->z_NMax,
1354 ("memory chunk %p bit index %d is illegal", chunk, bitdex));
10cc6608
MD
1355 bitptr = &z->z_Bitmap[bitdex >> 5];
1356 bitdex &= 31;
5fee07e6
MD
1357 KASSERT((*bitptr & (1 << bitdex)) == 0,
1358 ("memory chunk %p is already allocated!", chunk));
10cc6608
MD
1359 *bitptr |= 1 << bitdex;
1360}
1361
1362static
1363void
1364chunk_mark_free(SLZone *z, void *chunk)
1365{
1366 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1367 __uint32_t *bitptr;
1368
5fee07e6
MD
1369 KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0);
1370 KASSERT(bitdex >= 0 && bitdex < z->z_NMax,
1371 ("memory chunk %p bit index %d is illegal!", chunk, bitdex));
10cc6608
MD
1372 bitptr = &z->z_Bitmap[bitdex >> 5];
1373 bitdex &= 31;
5fee07e6
MD
1374 KASSERT((*bitptr & (1 << bitdex)) != 0,
1375 ("memory chunk %p is already free!", chunk));
10cc6608
MD
1376 *bitptr &= ~(1 << bitdex);
1377}
1378
1379#endif
1380
a108bf71 1381/*
5b287bba 1382 * kmem_slab_alloc()
a108bf71
MD
1383 *
1384 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the
1385 * specified alignment. M_* flags are expected in the flags field.
1386 *
1387 * Alignment must be a multiple of PAGE_SIZE.
1388 *
1389 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(),
1390 * but when we move zalloc() over to use this function as its backend
1391 * we will have to switch to kreserve/krelease and call reserve(0)
1392 * after the new space is made available.
dc1fd4b3
MD
1393 *
1394 * Interrupt code which has preempted other code is not allowed to
c397c465
MD
1395 * use PQ_CACHE pages. However, if an interrupt thread is run
1396 * non-preemptively or blocks and then runs non-preemptively, then
b12defdc 1397 * it is free to use PQ_CACHE pages. <--- may not apply any longer XXX
a108bf71
MD
1398 */
1399static void *
1400kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags)
1401{
1402 vm_size_t i;
1403 vm_offset_t addr;
1de1e800 1404 int count, vmflags, base_vmflags;
b12defdc
MD
1405 vm_page_t mbase = NULL;
1406 vm_page_t m;
dc1fd4b3 1407 thread_t td;
a108bf71
MD
1408
1409 size = round_page(size);
e4846942 1410 addr = vm_map_min(&kernel_map);
a108bf71 1411
a108bf71
MD
1412 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1413 crit_enter();
e4846942 1414 vm_map_lock(&kernel_map);
c809941b 1415 if (vm_map_findspace(&kernel_map, addr, size, align, 0, &addr)) {
e4846942 1416 vm_map_unlock(&kernel_map);
8cb2bf45 1417 if ((flags & M_NULLOK) == 0)
a108bf71 1418 panic("kmem_slab_alloc(): kernel_map ran out of space!");
a108bf71 1419 vm_map_entry_release(count);
2de4f77e 1420 crit_exit();
a108bf71
MD
1421 return(NULL);
1422 }
e4846942
MD
1423
1424 /*
1425 * kernel_object maps 1:1 to kernel_map.
1426 */
b12defdc
MD
1427 vm_object_hold(&kernel_object);
1428 vm_object_reference_locked(&kernel_object);
e4846942
MD
1429 vm_map_insert(&kernel_map, &count,
1430 &kernel_object, addr, addr, addr + size,
1b874851
MD
1431 VM_MAPTYPE_NORMAL,
1432 VM_PROT_ALL, VM_PROT_ALL,
1433 0);
b12defdc
MD
1434 vm_object_drop(&kernel_object);
1435 vm_map_set_wired_quick(&kernel_map, addr, size, &count);
1436 vm_map_unlock(&kernel_map);
a108bf71 1437
dc1fd4b3 1438 td = curthread;
dc1fd4b3 1439
1de1e800
JS
1440 base_vmflags = 0;
1441 if (flags & M_ZERO)
1442 base_vmflags |= VM_ALLOC_ZERO;
1443 if (flags & M_USE_RESERVE)
1444 base_vmflags |= VM_ALLOC_SYSTEM;
1445 if (flags & M_USE_INTERRUPT_RESERVE)
1446 base_vmflags |= VM_ALLOC_INTERRUPT;
77912481
MD
1447 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) {
1448 panic("kmem_slab_alloc: bad flags %08x (%p)",
1449 flags, ((int **)&size)[-1]);
1450 }
1de1e800 1451
a108bf71 1452 /*
b12defdc
MD
1453 * Allocate the pages. Do not mess with the PG_ZERO flag or map
1454 * them yet. VM_ALLOC_NORMAL can only be set if we are not preempting.
1455 *
1456 * VM_ALLOC_SYSTEM is automatically set if we are preempting and
1457 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is
1458 * implied in this case), though I'm not sure if we really need to
1459 * do that.
a108bf71 1460 */
b12defdc
MD
1461 vmflags = base_vmflags;
1462 if (flags & M_WAITOK) {
1463 if (td->td_preempted)
1464 vmflags |= VM_ALLOC_SYSTEM;
1465 else
1466 vmflags |= VM_ALLOC_NORMAL;
1467 }
a108bf71 1468
b12defdc
MD
1469 vm_object_hold(&kernel_object);
1470 for (i = 0; i < size; i += PAGE_SIZE) {
e4846942 1471 m = vm_page_alloc(&kernel_object, OFF_TO_IDX(addr + i), vmflags);
b12defdc
MD
1472 if (i == 0)
1473 mbase = m;
dc1fd4b3
MD
1474
1475 /*
1476 * If the allocation failed we either return NULL or we retry.
1477 *
c397c465
MD
1478 * If M_WAITOK is specified we wait for more memory and retry.
1479 * If M_WAITOK is specified from a preemption we yield instead of
1480 * wait. Livelock will not occur because the interrupt thread
1481 * will not be preempting anyone the second time around after the
1482 * yield.
dc1fd4b3 1483 */
a108bf71 1484 if (m == NULL) {
c397c465 1485 if (flags & M_WAITOK) {
fe1e98d0 1486 if (td->td_preempted) {
77912481 1487 lwkt_switch();
dc1fd4b3 1488 } else {
4ecf7cc9 1489 vm_wait(0);
dc1fd4b3 1490 }
a108bf71
MD
1491 i -= PAGE_SIZE; /* retry */
1492 continue;
1493 }
b12defdc
MD
1494 break;
1495 }
1496 }
dc1fd4b3 1497
b12defdc
MD
1498 /*
1499 * Check and deal with an allocation failure
1500 */
1501 if (i != size) {
1502 while (i != 0) {
1503 i -= PAGE_SIZE;
1504 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i));
1505 /* page should already be busy */
1506 vm_page_free(m);
a108bf71 1507 }
b12defdc
MD
1508 vm_map_lock(&kernel_map);
1509 vm_map_delete(&kernel_map, addr, addr + size, &count);
1510 vm_map_unlock(&kernel_map);
1511 vm_object_drop(&kernel_object);
1512
1513 vm_map_entry_release(count);
1514 crit_exit();
1515 return(NULL);
a108bf71
MD
1516 }
1517
1518 /*
dc1fd4b3
MD
1519 * Success!
1520 *
b12defdc
MD
1521 * NOTE: The VM pages are still busied. mbase points to the first one
1522 * but we have to iterate via vm_page_next()
a108bf71 1523 */
b12defdc 1524 vm_object_drop(&kernel_object);
a108bf71
MD
1525 crit_exit();
1526
1527 /*
1528 * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO.
1529 */
b12defdc
MD
1530 m = mbase;
1531 i = 0;
a108bf71 1532
b12defdc
MD
1533 while (i < size) {
1534 /*
1535 * page should already be busy
1536 */
a108bf71
MD
1537 m->valid = VM_PAGE_BITS_ALL;
1538 vm_page_wire(m);
b12defdc 1539 pmap_enter(&kernel_pmap, addr + i, m, VM_PROT_ALL | VM_PROT_NOSYNC, 1);
a108bf71
MD
1540 if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO))
1541 bzero((char *)addr + i, PAGE_SIZE);
1542 vm_page_flag_clear(m, PG_ZERO);
17cde63e
MD
1543 KKASSERT(m->flags & (PG_WRITEABLE | PG_MAPPED));
1544 vm_page_flag_set(m, PG_REFERENCED);
a491077e 1545 vm_page_wakeup(m);
b12defdc
MD
1546
1547 i += PAGE_SIZE;
1548 vm_object_hold(&kernel_object);
1549 m = vm_page_next(m);
1550 vm_object_drop(&kernel_object);
a108bf71 1551 }
b12defdc 1552 smp_invltlb();
a108bf71
MD
1553 vm_map_entry_release(count);
1554 return((void *)addr);
1555}
1556
38e34349 1557/*
5b287bba 1558 * kmem_slab_free()
38e34349 1559 */
a108bf71
MD
1560static void
1561kmem_slab_free(void *ptr, vm_size_t size)
1562{
1563 crit_enter();
e4846942 1564 vm_map_remove(&kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size);
a108bf71
MD
1565 crit_exit();
1566}
1567