MPSAFE: MPSAFE kern/kern_plimit.c
[dragonfly.git] / sys / kern / kern_slaballoc.c
CommitLineData
a108bf71 1/*
ed2013d8
VS
2 * (MPSAFE)
3 *
5b287bba 4 * KERN_SLABALLOC.C - Kernel SLAB memory allocator
8c10bfcf 5 *
ed2013d8 6 * Copyright (c) 2003,2004,2010 The DragonFly Project. All rights reserved.
8c10bfcf
MD
7 *
8 * This code is derived from software contributed to The DragonFly Project
9 * by Matthew Dillon <dillon@backplane.com>
10 *
a108bf71
MD
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
8c10bfcf 14 *
a108bf71
MD
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
8c10bfcf
MD
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 * 3. Neither the name of The DragonFly Project nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific, prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
a108bf71 36 * SUCH DAMAGE.
8c10bfcf 37 *
40153c65 38 * $DragonFly: src/sys/kern/kern_slaballoc.c,v 1.55 2008/10/22 01:42:17 dillon Exp $
a108bf71
MD
39 *
40 * This module implements a slab allocator drop-in replacement for the
41 * kernel malloc().
42 *
43 * A slab allocator reserves a ZONE for each chunk size, then lays the
44 * chunks out in an array within the zone. Allocation and deallocation
45 * is nearly instantanious, and fragmentation/overhead losses are limited
46 * to a fixed worst-case amount.
47 *
48 * The downside of this slab implementation is in the chunk size
49 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu.
50 * In a kernel implementation all this memory will be physical so
51 * the zone size is adjusted downward on machines with less physical
52 * memory. The upside is that overhead is bounded... this is the *worst*
53 * case overhead.
54 *
55 * Slab management is done on a per-cpu basis and no locking or mutexes
56 * are required, only a critical section. When one cpu frees memory
57 * belonging to another cpu's slab manager an asynchronous IPI message
58 * will be queued to execute the operation. In addition, both the
59 * high level slab allocator and the low level zone allocator optimize
60 * M_ZERO requests, and the slab allocator does not have to pre initialize
61 * the linked list of chunks.
62 *
63 * XXX Balancing is needed between cpus. Balance will be handled through
64 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
65 *
66 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
67 * the new zone should be restricted to M_USE_RESERVE requests only.
68 *
69 * Alloc Size Chunking Number of zones
70 * 0-127 8 16
71 * 128-255 16 8
72 * 256-511 32 8
73 * 512-1023 64 8
74 * 1024-2047 128 8
75 * 2048-4095 256 8
76 * 4096-8191 512 8
77 * 8192-16383 1024 8
78 * 16384-32767 2048 8
79 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383)
80 *
46a3f46d 81 * Allocations >= ZoneLimit go directly to kmem.
a108bf71
MD
82 *
83 * API REQUIREMENTS AND SIDE EFFECTS
84 *
85 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
86 * have remained compatible with the following API requirements:
87 *
88 * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty)
3d177b31 89 * + all power-of-2 sized allocations are power-of-2 aligned (twe)
a108bf71
MD
90 * + malloc(0) is allowed and returns non-NULL (ahc driver)
91 * + ability to allocate arbitrarily large chunks of memory
92 */
93
94#include "opt_vm.h"
95
a108bf71
MD
96#include <sys/param.h>
97#include <sys/systm.h>
98#include <sys/kernel.h>
99#include <sys/slaballoc.h>
100#include <sys/mbuf.h>
101#include <sys/vmmeter.h>
102#include <sys/lock.h>
103#include <sys/thread.h>
104#include <sys/globaldata.h>
d2182dc1 105#include <sys/sysctl.h>
f2b5daf9 106#include <sys/ktr.h>
a108bf71
MD
107
108#include <vm/vm.h>
109#include <vm/vm_param.h>
110#include <vm/vm_kern.h>
111#include <vm/vm_extern.h>
112#include <vm/vm_object.h>
113#include <vm/pmap.h>
114#include <vm/vm_map.h>
115#include <vm/vm_page.h>
116#include <vm/vm_pageout.h>
117
118#include <machine/cpu.h>
119
120#include <sys/thread2.h>
121
122#define arysize(ary) (sizeof(ary)/sizeof((ary)[0]))
123
f2b5daf9
MD
124#define MEMORY_STRING "ptr=%p type=%p size=%d flags=%04x"
125#define MEMORY_ARG_SIZE (sizeof(void *) * 2 + sizeof(unsigned long) + \
126 sizeof(int))
127
128#if !defined(KTR_MEMORY)
129#define KTR_MEMORY KTR_ALL
130#endif
131KTR_INFO_MASTER(memory);
132KTR_INFO(KTR_MEMORY, memory, malloc, 0, MEMORY_STRING, MEMORY_ARG_SIZE);
133KTR_INFO(KTR_MEMORY, memory, free_zero, 1, MEMORY_STRING, MEMORY_ARG_SIZE);
017ba73b
MD
134KTR_INFO(KTR_MEMORY, memory, free_ovsz, 2, MEMORY_STRING, MEMORY_ARG_SIZE);
135KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 3, MEMORY_STRING, MEMORY_ARG_SIZE);
136KTR_INFO(KTR_MEMORY, memory, free_chunk, 4, MEMORY_STRING, MEMORY_ARG_SIZE);
f2b5daf9 137#ifdef SMP
017ba73b
MD
138KTR_INFO(KTR_MEMORY, memory, free_request, 5, MEMORY_STRING, MEMORY_ARG_SIZE);
139KTR_INFO(KTR_MEMORY, memory, free_remote, 6, MEMORY_STRING, MEMORY_ARG_SIZE);
f2b5daf9 140#endif
b68ad50c
MD
141KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin", 0);
142KTR_INFO(KTR_MEMORY, memory, free_beg, 0, "free begin", 0);
143KTR_INFO(KTR_MEMORY, memory, free_end, 0, "free end", 0);
f2b5daf9
MD
144
145#define logmemory(name, ptr, type, size, flags) \
146 KTR_LOG(memory_ ## name, ptr, type, size, flags)
b68ad50c
MD
147#define logmemory_quick(name) \
148 KTR_LOG(memory_ ## name)
f2b5daf9 149
a108bf71
MD
150/*
151 * Fixed globals (not per-cpu)
152 */
153static int ZoneSize;
46a3f46d 154static int ZoneLimit;
a108bf71 155static int ZonePageCount;
a108bf71 156static int ZoneMask;
665206ee
MD
157static int ZoneBigAlloc; /* in KB */
158static int ZoneGenAlloc; /* in KB */
460426e6 159struct malloc_type *kmemstatistics; /* exported to vmstat */
a108bf71
MD
160static struct kmemusage *kmemusage;
161static int32_t weirdary[16];
162
163static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags);
164static void kmem_slab_free(void *ptr, vm_size_t bytes);
10cc6608
MD
165#if defined(INVARIANTS)
166static void chunk_mark_allocated(SLZone *z, void *chunk);
167static void chunk_mark_free(SLZone *z, void *chunk);
168#endif
a108bf71
MD
169
170/*
171 * Misc constants. Note that allocations that are exact multiples of
172 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
173 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
174 */
175#define MIN_CHUNK_SIZE 8 /* in bytes */
176#define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1)
177#define ZONE_RELS_THRESH 2 /* threshold number of zones */
178#define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK)
179
180/*
181 * The WEIRD_ADDR is used as known text to copy into free objects to
182 * try to create deterministic failure cases if the data is accessed after
183 * free.
184 */
185#define WEIRD_ADDR 0xdeadc0de
186#define MAX_COPY sizeof(weirdary)
187#define ZERO_LENGTH_PTR ((void *)-8)
188
189/*
190 * Misc global malloc buckets
191 */
192
193MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
194MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
195MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
196
197MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
198MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
199
200/*
201 * Initialize the slab memory allocator. We have to choose a zone size based
202 * on available physical memory. We choose a zone side which is approximately
203 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of
204 * 128K. The zone size is limited to the bounds set in slaballoc.h
205 * (typically 32K min, 128K max).
206 */
207static void kmeminit(void *dummy);
208
c7841cbe
MD
209char *ZeroPage;
210
ba39e2e0 211SYSINIT(kmem, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL)
a108bf71 212
d2182dc1
MD
213#ifdef INVARIANTS
214/*
215 * If enabled any memory allocated without M_ZERO is initialized to -1.
216 */
217static int use_malloc_pattern;
218SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW,
219 &use_malloc_pattern, 0, "");
220#endif
221
665206ee
MD
222SYSCTL_INT(_kern, OID_AUTO, zone_big_alloc, CTLFLAG_RD, &ZoneBigAlloc, 0, "");
223SYSCTL_INT(_kern, OID_AUTO, zone_gen_alloc, CTLFLAG_RD, &ZoneGenAlloc, 0, "");
224
a108bf71
MD
225static void
226kmeminit(void *dummy)
227{
7c457ac8 228 size_t limsize;
a108bf71
MD
229 int usesize;
230 int i;
f9ab53b8 231 vm_offset_t npg;
a108bf71 232
7c457ac8 233 limsize = (size_t)vmstats.v_page_count * PAGE_SIZE;
c439ad8f
MD
234 if (limsize > KvaSize)
235 limsize = KvaSize;
a108bf71
MD
236
237 usesize = (int)(limsize / 1024); /* convert to KB */
238
239 ZoneSize = ZALLOC_MIN_ZONE_SIZE;
240 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize)
241 ZoneSize <<= 1;
46a3f46d
MD
242 ZoneLimit = ZoneSize / 4;
243 if (ZoneLimit > ZALLOC_ZONE_LIMIT)
244 ZoneLimit = ZALLOC_ZONE_LIMIT;
a108bf71 245 ZoneMask = ZoneSize - 1;
a108bf71
MD
246 ZonePageCount = ZoneSize / PAGE_SIZE;
247
c439ad8f
MD
248 npg = KvaSize / PAGE_SIZE;
249 kmemusage = kmem_slab_alloc(npg * sizeof(struct kmemusage),
250 PAGE_SIZE, M_WAITOK|M_ZERO);
a108bf71
MD
251
252 for (i = 0; i < arysize(weirdary); ++i)
253 weirdary[i] = WEIRD_ADDR;
254
c7841cbe
MD
255 ZeroPage = kmem_slab_alloc(PAGE_SIZE, PAGE_SIZE, M_WAITOK|M_ZERO);
256
a108bf71 257 if (bootverbose)
6ea70f76 258 kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024);
a108bf71
MD
259}
260
261/*
bba6a44d 262 * Initialize a malloc type tracking structure.
a108bf71
MD
263 */
264void
265malloc_init(void *data)
266{
267 struct malloc_type *type = data;
7c457ac8 268 size_t limsize;
a108bf71
MD
269
270 if (type->ks_magic != M_MAGIC)
271 panic("malloc type lacks magic");
272
273 if (type->ks_limit != 0)
274 return;
275
276 if (vmstats.v_page_count == 0)
277 panic("malloc_init not allowed before vm init");
278
7c457ac8 279 limsize = (size_t)vmstats.v_page_count * PAGE_SIZE;
c439ad8f
MD
280 if (limsize > KvaSize)
281 limsize = KvaSize;
a108bf71
MD
282 type->ks_limit = limsize / 10;
283
284 type->ks_next = kmemstatistics;
285 kmemstatistics = type;
286}
287
288void
289malloc_uninit(void *data)
290{
291 struct malloc_type *type = data;
292 struct malloc_type *t;
bba6a44d
MD
293#ifdef INVARIANTS
294 int i;
1d712609 295 long ttl;
bba6a44d 296#endif
a108bf71
MD
297
298 if (type->ks_magic != M_MAGIC)
299 panic("malloc type lacks magic");
300
301 if (vmstats.v_page_count == 0)
302 panic("malloc_uninit not allowed before vm init");
303
304 if (type->ks_limit == 0)
305 panic("malloc_uninit on uninitialized type");
306
6c92c1f2
SZ
307#ifdef SMP
308 /* Make sure that all pending kfree()s are finished. */
309 lwkt_synchronize_ipiqs("muninit");
310#endif
311
a108bf71 312#ifdef INVARIANTS
1d712609
MD
313 /*
314 * memuse is only correct in aggregation. Due to memory being allocated
315 * on one cpu and freed on another individual array entries may be
316 * negative or positive (canceling each other out).
317 */
318 for (i = ttl = 0; i < ncpus; ++i)
319 ttl += type->ks_memuse[i];
320 if (ttl) {
6ea70f76 321 kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n",
1d712609 322 ttl, type->ks_shortdesc, i);
a108bf71
MD
323 }
324#endif
325 if (type == kmemstatistics) {
326 kmemstatistics = type->ks_next;
327 } else {
328 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
329 if (t->ks_next == type) {
330 t->ks_next = type->ks_next;
331 break;
332 }
333 }
334 }
335 type->ks_next = NULL;
336 type->ks_limit = 0;
337}
338
40153c65
MD
339/*
340 * Increase the kmalloc pool limit for the specified pool. No changes
341 * are the made if the pool would shrink.
342 */
343void
344kmalloc_raise_limit(struct malloc_type *type, size_t bytes)
345{
346 if (type->ks_limit == 0)
347 malloc_init(type);
7c457ac8
MD
348 if (bytes == 0)
349 bytes = KvaSize;
40153c65
MD
350 if (type->ks_limit < bytes)
351 type->ks_limit = bytes;
352}
353
ebe36cfe
MD
354/*
355 * Dynamically create a malloc pool. This function is a NOP if *typep is
356 * already non-NULL.
357 */
358void
359kmalloc_create(struct malloc_type **typep, const char *descr)
360{
361 struct malloc_type *type;
362
363 if (*typep == NULL) {
364 type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO);
365 type->ks_magic = M_MAGIC;
366 type->ks_shortdesc = descr;
367 malloc_init(type);
368 *typep = type;
369 }
370}
371
372/*
373 * Destroy a dynamically created malloc pool. This function is a NOP if
374 * the pool has already been destroyed.
375 */
376void
377kmalloc_destroy(struct malloc_type **typep)
378{
379 if (*typep != NULL) {
380 malloc_uninit(*typep);
381 kfree(*typep, M_TEMP);
382 *typep = NULL;
383 }
384}
385
a108bf71
MD
386/*
387 * Calculate the zone index for the allocation request size and set the
388 * allocation request size to that particular zone's chunk size.
389 */
390static __inline int
391zoneindex(unsigned long *bytes)
392{
393 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */
394 if (n < 128) {
395 *bytes = n = (n + 7) & ~7;
396 return(n / 8 - 1); /* 8 byte chunks, 16 zones */
397 }
398 if (n < 256) {
399 *bytes = n = (n + 15) & ~15;
400 return(n / 16 + 7);
401 }
402 if (n < 8192) {
403 if (n < 512) {
404 *bytes = n = (n + 31) & ~31;
405 return(n / 32 + 15);
406 }
407 if (n < 1024) {
408 *bytes = n = (n + 63) & ~63;
409 return(n / 64 + 23);
410 }
411 if (n < 2048) {
412 *bytes = n = (n + 127) & ~127;
413 return(n / 128 + 31);
414 }
415 if (n < 4096) {
416 *bytes = n = (n + 255) & ~255;
417 return(n / 256 + 39);
418 }
419 *bytes = n = (n + 511) & ~511;
420 return(n / 512 + 47);
421 }
422#if ZALLOC_ZONE_LIMIT > 8192
423 if (n < 16384) {
424 *bytes = n = (n + 1023) & ~1023;
425 return(n / 1024 + 55);
426 }
427#endif
428#if ZALLOC_ZONE_LIMIT > 16384
429 if (n < 32768) {
430 *bytes = n = (n + 2047) & ~2047;
431 return(n / 2048 + 63);
432 }
433#endif
434 panic("Unexpected byte count %d", n);
435 return(0);
436}
437
438/*
5b287bba 439 * malloc() (SLAB ALLOCATOR)
a108bf71
MD
440 *
441 * Allocate memory via the slab allocator. If the request is too large,
442 * or if it page-aligned beyond a certain size, we fall back to the
443 * KMEM subsystem. A SLAB tracking descriptor must be specified, use
444 * &SlabMisc if you don't care.
445 *
8cb2bf45
JS
446 * M_RNOWAIT - don't block.
447 * M_NULLOK - return NULL instead of blocking.
a108bf71 448 * M_ZERO - zero the returned memory.
dc1fd4b3
MD
449 * M_USE_RESERVE - allow greater drawdown of the free list
450 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted
5b287bba
MD
451 *
452 * MPSAFE
a108bf71 453 */
8aca2bd4 454
a108bf71 455void *
8aca2bd4 456kmalloc(unsigned long size, struct malloc_type *type, int flags)
a108bf71
MD
457{
458 SLZone *z;
459 SLChunk *chunk;
460 SLGlobalData *slgd;
bba6a44d 461 struct globaldata *gd;
a108bf71 462 int zi;
d2182dc1
MD
463#ifdef INVARIANTS
464 int i;
465#endif
a108bf71 466
b68ad50c 467 logmemory_quick(malloc_beg);
bba6a44d
MD
468 gd = mycpu;
469 slgd = &gd->gd_slab;
a108bf71
MD
470
471 /*
472 * XXX silly to have this in the critical path.
473 */
474 if (type->ks_limit == 0) {
475 crit_enter();
476 if (type->ks_limit == 0)
477 malloc_init(type);
478 crit_exit();
479 }
480 ++type->ks_calls;
481
482 /*
38e34349
MD
483 * Handle the case where the limit is reached. Panic if we can't return
484 * NULL. The original malloc code looped, but this tended to
a108bf71 485 * simply deadlock the computer.
38e34349
MD
486 *
487 * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used
488 * to determine if a more complete limit check should be done. The
489 * actual memory use is tracked via ks_memuse[cpu].
a108bf71 490 */
bba6a44d
MD
491 while (type->ks_loosememuse >= type->ks_limit) {
492 int i;
493 long ttl;
494
495 for (i = ttl = 0; i < ncpus; ++i)
496 ttl += type->ks_memuse[i];
38e34349 497 type->ks_loosememuse = ttl; /* not MP synchronized */
bba6a44d 498 if (ttl >= type->ks_limit) {
f2b5daf9
MD
499 if (flags & M_NULLOK) {
500 logmemory(malloc, NULL, type, size, flags);
bba6a44d 501 return(NULL);
f2b5daf9 502 }
bba6a44d
MD
503 panic("%s: malloc limit exceeded", type->ks_shortdesc);
504 }
a108bf71
MD
505 }
506
507 /*
508 * Handle the degenerate size == 0 case. Yes, this does happen.
509 * Return a special pointer. This is to maintain compatibility with
510 * the original malloc implementation. Certain devices, such as the
511 * adaptec driver, not only allocate 0 bytes, they check for NULL and
512 * also realloc() later on. Joy.
513 */
f2b5daf9
MD
514 if (size == 0) {
515 logmemory(malloc, ZERO_LENGTH_PTR, type, size, flags);
a108bf71 516 return(ZERO_LENGTH_PTR);
f2b5daf9 517 }
a108bf71 518
a7cf0021
MD
519 /*
520 * Handle hysteresis from prior frees here in malloc(). We cannot
521 * safely manipulate the kernel_map in free() due to free() possibly
522 * being called via an IPI message or from sensitive interrupt code.
523 */
dc1fd4b3 524 while (slgd->NFreeZones > ZONE_RELS_THRESH && (flags & M_RNOWAIT) == 0) {
46a3f46d
MD
525 crit_enter();
526 if (slgd->NFreeZones > ZONE_RELS_THRESH) { /* crit sect race */
527 z = slgd->FreeZones;
528 slgd->FreeZones = z->z_Next;
529 --slgd->NFreeZones;
530 kmem_slab_free(z, ZoneSize); /* may block */
665206ee 531 atomic_add_int(&ZoneGenAlloc, -(int)ZoneSize / 1024);
46a3f46d
MD
532 }
533 crit_exit();
534 }
535 /*
536 * XXX handle oversized frees that were queued from free().
537 */
dc1fd4b3 538 while (slgd->FreeOvZones && (flags & M_RNOWAIT) == 0) {
46a3f46d
MD
539 crit_enter();
540 if ((z = slgd->FreeOvZones) != NULL) {
7e83df33
MD
541 vm_size_t tsize;
542
46a3f46d
MD
543 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC);
544 slgd->FreeOvZones = z->z_Next;
7e83df33
MD
545 tsize = z->z_ChunkSize;
546 kmem_slab_free(z, tsize); /* may block */
547 atomic_add_int(&ZoneBigAlloc, -(int)tsize / 1024);
46a3f46d
MD
548 }
549 crit_exit();
a7cf0021
MD
550 }
551
a108bf71
MD
552 /*
553 * Handle large allocations directly. There should not be very many of
554 * these so performance is not a big issue.
555 *
b543eeed
MD
556 * The backend allocator is pretty nasty on a SMP system. Use the
557 * slab allocator for one and two page-sized chunks even though we lose
558 * some efficiency. XXX maybe fix mmio and the elf loader instead.
a108bf71 559 */
b543eeed 560 if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) {
a108bf71
MD
561 struct kmemusage *kup;
562
563 size = round_page(size);
564 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags);
f2b5daf9
MD
565 if (chunk == NULL) {
566 logmemory(malloc, NULL, type, size, flags);
a108bf71 567 return(NULL);
f2b5daf9 568 }
665206ee 569 atomic_add_int(&ZoneBigAlloc, (int)size / 1024);
a108bf71 570 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */
8f1d5415 571 flags |= M_PASSIVE_ZERO;
a108bf71
MD
572 kup = btokup(chunk);
573 kup->ku_pagecnt = size / PAGE_SIZE;
574 crit_enter();
575 goto done;
576 }
577
578 /*
579 * Attempt to allocate out of an existing zone. First try the free list,
580 * then allocate out of unallocated space. If we find a good zone move
581 * it to the head of the list so later allocations find it quickly
582 * (we might have thousands of zones in the list).
583 *
584 * Note: zoneindex() will panic of size is too large.
585 */
586 zi = zoneindex(&size);
587 KKASSERT(zi < NZONES);
588 crit_enter();
589 if ((z = slgd->ZoneAry[zi]) != NULL) {
590 KKASSERT(z->z_NFree > 0);
591
592 /*
593 * Remove us from the ZoneAry[] when we become empty
594 */
595 if (--z->z_NFree == 0) {
596 slgd->ZoneAry[zi] = z->z_Next;
597 z->z_Next = NULL;
598 }
599
600 /*
601 * Locate a chunk in a free page. This attempts to localize
602 * reallocations into earlier pages without us having to sort
603 * the chunk list. A chunk may still overlap a page boundary.
604 */
605 while (z->z_FirstFreePg < ZonePageCount) {
606 if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) {
607#ifdef DIAGNOSTIC
608 /*
609 * Diagnostic: c_Next is not total garbage.
610 */
611 KKASSERT(chunk->c_Next == NULL ||
612 ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) ==
613 ((intptr_t)chunk & IN_SAME_PAGE_MASK));
614#endif
6ab8e1da 615#ifdef INVARIANTS
c439ad8f 616 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd)
a108bf71 617 panic("chunk %p FFPG %d/%d", chunk, z->z_FirstFreePg, ZonePageCount);
c439ad8f 618 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart)
a108bf71 619 panic("chunkNEXT %p %p FFPG %d/%d", chunk, chunk->c_Next, z->z_FirstFreePg, ZonePageCount);
10cc6608 620 chunk_mark_allocated(z, chunk);
6ab8e1da 621#endif
a108bf71
MD
622 z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next;
623 goto done;
624 }
625 ++z->z_FirstFreePg;
626 }
627
628 /*
1c5ca4f3
MD
629 * No chunks are available but NFree said we had some memory, so
630 * it must be available in the never-before-used-memory area
631 * governed by UIndex. The consequences are very serious if our zone
632 * got corrupted so we use an explicit panic rather then a KASSERT.
a108bf71 633 */
1c5ca4f3
MD
634 if (z->z_UIndex + 1 != z->z_NMax)
635 z->z_UIndex = z->z_UIndex + 1;
636 else
637 z->z_UIndex = 0;
638 if (z->z_UIndex == z->z_UEndIndex)
639 panic("slaballoc: corrupted zone");
640 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
8f1d5415 641 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
6ab8e1da 642 flags &= ~M_ZERO;
8f1d5415
MD
643 flags |= M_PASSIVE_ZERO;
644 }
10cc6608
MD
645#if defined(INVARIANTS)
646 chunk_mark_allocated(z, chunk);
647#endif
a108bf71
MD
648 goto done;
649 }
650
651 /*
652 * If all zones are exhausted we need to allocate a new zone for this
653 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see
6ab8e1da
MD
654 * UAlloc use above in regards to M_ZERO. Note that when we are reusing
655 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and
656 * we do not pre-zero it because we do not want to mess up the L1 cache.
a108bf71
MD
657 *
658 * At least one subsystem, the tty code (see CROUND) expects power-of-2
659 * allocations to be power-of-2 aligned. We maintain compatibility by
660 * adjusting the base offset below.
661 */
662 {
663 int off;
664
665 if ((z = slgd->FreeZones) != NULL) {
666 slgd->FreeZones = z->z_Next;
667 --slgd->NFreeZones;
668 bzero(z, sizeof(SLZone));
6ab8e1da 669 z->z_Flags |= SLZF_UNOTZEROD;
a108bf71
MD
670 } else {
671 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO);
672 if (z == NULL)
673 goto fail;
665206ee 674 atomic_add_int(&ZoneGenAlloc, (int)ZoneSize / 1024);
a108bf71
MD
675 }
676
10cc6608
MD
677 /*
678 * How big is the base structure?
679 */
680#if defined(INVARIANTS)
681 /*
682 * Make room for z_Bitmap. An exact calculation is somewhat more
683 * complicated so don't make an exact calculation.
684 */
685 off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]);
686 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8);
687#else
688 off = sizeof(SLZone);
689#endif
690
a108bf71
MD
691 /*
692 * Guarentee power-of-2 alignment for power-of-2-sized chunks.
693 * Otherwise just 8-byte align the data.
694 */
695 if ((size | (size - 1)) + 1 == (size << 1))
10cc6608 696 off = (off + size - 1) & ~(size - 1);
a108bf71 697 else
10cc6608 698 off = (off + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK;
a108bf71
MD
699 z->z_Magic = ZALLOC_SLAB_MAGIC;
700 z->z_ZoneIndex = zi;
701 z->z_NMax = (ZoneSize - off) / size;
702 z->z_NFree = z->z_NMax - 1;
1c5ca4f3
MD
703 z->z_BasePtr = (char *)z + off;
704 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax;
a108bf71
MD
705 z->z_ChunkSize = size;
706 z->z_FirstFreePg = ZonePageCount;
2db3b277 707 z->z_CpuGd = gd;
bba6a44d 708 z->z_Cpu = gd->gd_cpuid;
1c5ca4f3 709 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
a108bf71
MD
710 z->z_Next = slgd->ZoneAry[zi];
711 slgd->ZoneAry[zi] = z;
8f1d5415 712 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
6ab8e1da 713 flags &= ~M_ZERO; /* already zero'd */
8f1d5415
MD
714 flags |= M_PASSIVE_ZERO;
715 }
10cc6608
MD
716#if defined(INVARIANTS)
717 chunk_mark_allocated(z, chunk);
718#endif
1c5ca4f3
MD
719
720 /*
721 * Slide the base index for initial allocations out of the next
722 * zone we create so we do not over-weight the lower part of the
723 * cpu memory caches.
724 */
725 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
726 & (ZALLOC_MAX_ZONE_SIZE - 1);
a108bf71
MD
727 }
728done:
bba6a44d
MD
729 ++type->ks_inuse[gd->gd_cpuid];
730 type->ks_memuse[gd->gd_cpuid] += size;
38e34349 731 type->ks_loosememuse += size; /* not MP synchronized */
a108bf71
MD
732 crit_exit();
733 if (flags & M_ZERO)
734 bzero(chunk, size);
bba6a44d 735#ifdef INVARIANTS
d2182dc1
MD
736 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) {
737 if (use_malloc_pattern) {
738 for (i = 0; i < size; i += sizeof(int)) {
739 *(int *)((char *)chunk + i) = -1;
740 }
741 }
bba6a44d 742 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */
d2182dc1 743 }
bba6a44d 744#endif
f2b5daf9 745 logmemory(malloc, chunk, type, size, flags);
a108bf71
MD
746 return(chunk);
747fail:
748 crit_exit();
f2b5daf9 749 logmemory(malloc, NULL, type, size, flags);
a108bf71
MD
750 return(NULL);
751}
752
38e34349
MD
753/*
754 * kernel realloc. (SLAB ALLOCATOR) (MP SAFE)
755 *
756 * Generally speaking this routine is not called very often and we do
757 * not attempt to optimize it beyond reusing the same pointer if the
758 * new size fits within the chunking of the old pointer's zone.
759 */
a108bf71 760void *
8aca2bd4 761krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags)
a108bf71
MD
762{
763 SLZone *z;
764 void *nptr;
765 unsigned long osize;
766
eb7f3e3c
MD
767 KKASSERT((flags & M_ZERO) == 0); /* not supported */
768
a108bf71 769 if (ptr == NULL || ptr == ZERO_LENGTH_PTR)
efda3bd0 770 return(kmalloc(size, type, flags));
a108bf71 771 if (size == 0) {
efda3bd0 772 kfree(ptr, type);
a108bf71
MD
773 return(NULL);
774 }
775
776 /*
777 * Handle oversized allocations. XXX we really should require that a
778 * size be passed to free() instead of this nonsense.
779 */
780 {
781 struct kmemusage *kup;
782
783 kup = btokup(ptr);
784 if (kup->ku_pagecnt) {
785 osize = kup->ku_pagecnt << PAGE_SHIFT;
786 if (osize == round_page(size))
787 return(ptr);
efda3bd0 788 if ((nptr = kmalloc(size, type, flags)) == NULL)
a108bf71
MD
789 return(NULL);
790 bcopy(ptr, nptr, min(size, osize));
efda3bd0 791 kfree(ptr, type);
a108bf71
MD
792 return(nptr);
793 }
794 }
795
796 /*
797 * Get the original allocation's zone. If the new request winds up
798 * using the same chunk size we do not have to do anything.
799 */
800 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
801 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
802
a108bf71
MD
803 /*
804 * Allocate memory for the new request size. Note that zoneindex has
805 * already adjusted the request size to the appropriate chunk size, which
806 * should optimize our bcopy(). Then copy and return the new pointer.
1ea6580d
MD
807 *
808 * Resizing a non-power-of-2 allocation to a power-of-2 size does not
809 * necessary align the result.
810 *
811 * We can only zoneindex (to align size to the chunk size) if the new
812 * size is not too large.
a108bf71 813 */
1ea6580d
MD
814 if (size < ZoneLimit) {
815 zoneindex(&size);
816 if (z->z_ChunkSize == size)
817 return(ptr);
818 }
efda3bd0 819 if ((nptr = kmalloc(size, type, flags)) == NULL)
a108bf71
MD
820 return(NULL);
821 bcopy(ptr, nptr, min(size, z->z_ChunkSize));
efda3bd0 822 kfree(ptr, type);
a108bf71
MD
823 return(nptr);
824}
825
45d2b1d8
MD
826/*
827 * Return the kmalloc limit for this type, in bytes.
828 */
829long
830kmalloc_limit(struct malloc_type *type)
831{
832 if (type->ks_limit == 0) {
833 crit_enter();
834 if (type->ks_limit == 0)
835 malloc_init(type);
836 crit_exit();
837 }
838 return(type->ks_limit);
839}
840
38e34349
MD
841/*
842 * Allocate a copy of the specified string.
843 *
844 * (MP SAFE) (MAY BLOCK)
845 */
1ac06773 846char *
59302080 847kstrdup(const char *str, struct malloc_type *type)
1ac06773
MD
848{
849 int zlen; /* length inclusive of terminating NUL */
850 char *nstr;
851
852 if (str == NULL)
853 return(NULL);
854 zlen = strlen(str) + 1;
efda3bd0 855 nstr = kmalloc(zlen, type, M_WAITOK);
1ac06773
MD
856 bcopy(str, nstr, zlen);
857 return(nstr);
858}
859
1d712609 860#ifdef SMP
a108bf71
MD
861/*
862 * free() (SLAB ALLOCATOR)
863 *
bba6a44d 864 * Free the specified chunk of memory.
a108bf71
MD
865 */
866static
867void
868free_remote(void *ptr)
869{
f2b5daf9 870 logmemory(free_remote, ptr, *(struct malloc_type **)ptr, -1, 0);
efda3bd0 871 kfree(ptr, *(struct malloc_type **)ptr);
a108bf71
MD
872}
873
1d712609
MD
874#endif
875
38e34349 876/*
5b287bba 877 * free (SLAB ALLOCATOR)
38e34349
MD
878 *
879 * Free a memory block previously allocated by malloc. Note that we do not
880 * attempt to uplodate ks_loosememuse as MP races could prevent us from
881 * checking memory limits in malloc.
5b287bba
MD
882 *
883 * MPSAFE
38e34349 884 */
a108bf71 885void
8aca2bd4 886kfree(void *ptr, struct malloc_type *type)
a108bf71
MD
887{
888 SLZone *z;
889 SLChunk *chunk;
890 SLGlobalData *slgd;
bba6a44d 891 struct globaldata *gd;
a108bf71
MD
892 int pgno;
893
b68ad50c 894 logmemory_quick(free_beg);
bba6a44d
MD
895 gd = mycpu;
896 slgd = &gd->gd_slab;
a108bf71 897
d39911d9
JS
898 if (ptr == NULL)
899 panic("trying to free NULL pointer");
900
a108bf71
MD
901 /*
902 * Handle special 0-byte allocations
903 */
f2b5daf9
MD
904 if (ptr == ZERO_LENGTH_PTR) {
905 logmemory(free_zero, ptr, type, -1, 0);
b68ad50c 906 logmemory_quick(free_end);
a108bf71 907 return;
f2b5daf9 908 }
a108bf71
MD
909
910 /*
911 * Handle oversized allocations. XXX we really should require that a
912 * size be passed to free() instead of this nonsense.
bba6a44d
MD
913 *
914 * This code is never called via an ipi.
a108bf71
MD
915 */
916 {
917 struct kmemusage *kup;
918 unsigned long size;
919
920 kup = btokup(ptr);
921 if (kup->ku_pagecnt) {
922 size = kup->ku_pagecnt << PAGE_SHIFT;
923 kup->ku_pagecnt = 0;
a108bf71
MD
924#ifdef INVARIANTS
925 KKASSERT(sizeof(weirdary) <= size);
926 bcopy(weirdary, ptr, sizeof(weirdary));
927#endif
bba6a44d 928 /*
fc183e1f
MD
929 * NOTE: For oversized allocations we do not record the
930 * originating cpu. It gets freed on the cpu calling
931 * kfree(). The statistics are in aggregate.
81f5fc99
MD
932 *
933 * note: XXX we have still inherited the interrupts-can't-block
934 * assumption. An interrupt thread does not bump
935 * gd_intr_nesting_level so check TDF_INTTHREAD. This is
936 * primarily until we can fix softupdate's assumptions about free().
bba6a44d
MD
937 */
938 crit_enter();
939 --type->ks_inuse[gd->gd_cpuid];
940 type->ks_memuse[gd->gd_cpuid] -= size;
fc183e1f
MD
941 if (mycpu->gd_intr_nesting_level ||
942 (gd->gd_curthread->td_flags & TDF_INTTHREAD))
943 {
f2b5daf9 944 logmemory(free_ovsz_delayed, ptr, type, size, 0);
46a3f46d
MD
945 z = (SLZone *)ptr;
946 z->z_Magic = ZALLOC_OVSZ_MAGIC;
947 z->z_Next = slgd->FreeOvZones;
948 z->z_ChunkSize = size;
949 slgd->FreeOvZones = z;
950 crit_exit();
951 } else {
bba6a44d 952 crit_exit();
f2b5daf9 953 logmemory(free_ovsz, ptr, type, size, 0);
46a3f46d 954 kmem_slab_free(ptr, size); /* may block */
665206ee 955 atomic_add_int(&ZoneBigAlloc, -(int)size / 1024);
46a3f46d 956 }
b68ad50c 957 logmemory_quick(free_end);
a108bf71
MD
958 return;
959 }
960 }
961
962 /*
963 * Zone case. Figure out the zone based on the fact that it is
964 * ZoneSize aligned.
965 */
966 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
967 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
968
969 /*
970 * If we do not own the zone then forward the request to the
4c9f5a7f
MD
971 * cpu that does. Since the timing is non-critical, a passive
972 * message is sent.
a108bf71 973 */
2db3b277 974 if (z->z_CpuGd != gd) {
a108bf71 975 *(struct malloc_type **)ptr = type;
75c7ffea 976#ifdef SMP
f2b5daf9 977 logmemory(free_request, ptr, type, z->z_ChunkSize, 0);
4c9f5a7f 978 lwkt_send_ipiq_passive(z->z_CpuGd, free_remote, ptr);
75c7ffea
MD
979#else
980 panic("Corrupt SLZone");
981#endif
b68ad50c 982 logmemory_quick(free_end);
a108bf71
MD
983 return;
984 }
985
f2b5daf9
MD
986 logmemory(free_chunk, ptr, type, z->z_ChunkSize, 0);
987
a108bf71
MD
988 if (type->ks_magic != M_MAGIC)
989 panic("free: malloc type lacks magic");
990
991 crit_enter();
992 pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT;
993 chunk = ptr;
994
bba6a44d 995#ifdef INVARIANTS
a108bf71 996 /*
bba6a44d
MD
997 * Attempt to detect a double-free. To reduce overhead we only check
998 * if there appears to be link pointer at the base of the data.
a108bf71
MD
999 */
1000 if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) {
1001 SLChunk *scan;
1002 for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) {
1003 if (scan == chunk)
1004 panic("Double free at %p", chunk);
1005 }
1006 }
10cc6608 1007 chunk_mark_free(z, chunk);
a108bf71
MD
1008#endif
1009
1010 /*
1011 * Put weird data into the memory to detect modifications after freeing,
1012 * illegal pointer use after freeing (we should fault on the odd address),
1013 * and so forth. XXX needs more work, see the old malloc code.
1014 */
1015#ifdef INVARIANTS
1016 if (z->z_ChunkSize < sizeof(weirdary))
1017 bcopy(weirdary, chunk, z->z_ChunkSize);
1018 else
1019 bcopy(weirdary, chunk, sizeof(weirdary));
1020#endif
1021
1022 /*
1023 * Add this free non-zero'd chunk to a linked list for reuse, adjust
1024 * z_FirstFreePg.
1025 */
6ab8e1da 1026#ifdef INVARIANTS
c439ad8f 1027 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd)
fc92d4aa 1028 panic("BADFREE %p", chunk);
a108bf71
MD
1029#endif
1030 chunk->c_Next = z->z_PageAry[pgno];
1031 z->z_PageAry[pgno] = chunk;
6ab8e1da 1032#ifdef INVARIANTS
c439ad8f 1033 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart)
a108bf71 1034 panic("BADFREE2");
6ab8e1da 1035#endif
a108bf71
MD
1036 if (z->z_FirstFreePg > pgno)
1037 z->z_FirstFreePg = pgno;
1038
1039 /*
1040 * Bump the number of free chunks. If it becomes non-zero the zone
1041 * must be added back onto the appropriate list.
1042 */
1043 if (z->z_NFree++ == 0) {
1044 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
1045 slgd->ZoneAry[z->z_ZoneIndex] = z;
1046 }
1047
bba6a44d
MD
1048 --type->ks_inuse[z->z_Cpu];
1049 type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize;
a108bf71
MD
1050
1051 /*
1052 * If the zone becomes totally free, and there are other zones we
a7cf0021
MD
1053 * can allocate from, move this zone to the FreeZones list. Since
1054 * this code can be called from an IPI callback, do *NOT* try to mess
1055 * with kernel_map here. Hysteresis will be performed at malloc() time.
a108bf71
MD
1056 */
1057 if (z->z_NFree == z->z_NMax &&
1058 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z)
1059 ) {
1060 SLZone **pz;
1061
1062 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; z != *pz; pz = &(*pz)->z_Next)
1063 ;
1064 *pz = z->z_Next;
1065 z->z_Magic = -1;
a7cf0021
MD
1066 z->z_Next = slgd->FreeZones;
1067 slgd->FreeZones = z;
1068 ++slgd->NFreeZones;
a108bf71 1069 }
b68ad50c 1070 logmemory_quick(free_end);
a108bf71
MD
1071 crit_exit();
1072}
1073
10cc6608
MD
1074#if defined(INVARIANTS)
1075/*
1076 * Helper routines for sanity checks
1077 */
1078static
1079void
1080chunk_mark_allocated(SLZone *z, void *chunk)
1081{
1082 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1083 __uint32_t *bitptr;
1084
1085 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, ("memory chunk %p bit index %d is illegal", chunk, bitdex));
1086 bitptr = &z->z_Bitmap[bitdex >> 5];
1087 bitdex &= 31;
1088 KASSERT((*bitptr & (1 << bitdex)) == 0, ("memory chunk %p is already allocated!", chunk));
1089 *bitptr |= 1 << bitdex;
1090}
1091
1092static
1093void
1094chunk_mark_free(SLZone *z, void *chunk)
1095{
1096 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1097 __uint32_t *bitptr;
1098
1099 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, ("memory chunk %p bit index %d is illegal!", chunk, bitdex));
1100 bitptr = &z->z_Bitmap[bitdex >> 5];
1101 bitdex &= 31;
1102 KASSERT((*bitptr & (1 << bitdex)) != 0, ("memory chunk %p is already free!", chunk));
1103 *bitptr &= ~(1 << bitdex);
1104}
1105
1106#endif
1107
a108bf71 1108/*
5b287bba 1109 * kmem_slab_alloc()
a108bf71
MD
1110 *
1111 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the
1112 * specified alignment. M_* flags are expected in the flags field.
1113 *
1114 * Alignment must be a multiple of PAGE_SIZE.
1115 *
1116 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(),
1117 * but when we move zalloc() over to use this function as its backend
1118 * we will have to switch to kreserve/krelease and call reserve(0)
1119 * after the new space is made available.
dc1fd4b3
MD
1120 *
1121 * Interrupt code which has preempted other code is not allowed to
c397c465
MD
1122 * use PQ_CACHE pages. However, if an interrupt thread is run
1123 * non-preemptively or blocks and then runs non-preemptively, then
1124 * it is free to use PQ_CACHE pages.
a108bf71
MD
1125 */
1126static void *
1127kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags)
1128{
1129 vm_size_t i;
1130 vm_offset_t addr;
1de1e800 1131 int count, vmflags, base_vmflags;
dc1fd4b3 1132 thread_t td;
a108bf71
MD
1133
1134 size = round_page(size);
e4846942 1135 addr = vm_map_min(&kernel_map);
a108bf71
MD
1136
1137 /*
5c39c498
MD
1138 * Reserve properly aligned space from kernel_map. RNOWAIT allocations
1139 * cannot block.
a108bf71 1140 */
5c39c498 1141 if (flags & M_RNOWAIT) {
ed2013d8 1142 if (lwkt_trytoken(&vm_token) == 0)
5c39c498
MD
1143 return(NULL);
1144 } else {
ed2013d8 1145 lwkt_gettoken(&vm_token);
5c39c498 1146 }
a108bf71
MD
1147 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1148 crit_enter();
e4846942 1149 vm_map_lock(&kernel_map);
c809941b 1150 if (vm_map_findspace(&kernel_map, addr, size, align, 0, &addr)) {
e4846942 1151 vm_map_unlock(&kernel_map);
8cb2bf45 1152 if ((flags & M_NULLOK) == 0)
a108bf71
MD
1153 panic("kmem_slab_alloc(): kernel_map ran out of space!");
1154 crit_exit();
1155 vm_map_entry_release(count);
ed2013d8 1156 lwkt_reltoken(&vm_token);
a108bf71
MD
1157 return(NULL);
1158 }
e4846942
MD
1159
1160 /*
1161 * kernel_object maps 1:1 to kernel_map.
1162 */
c439ad8f 1163 vm_object_reference(&kernel_object);
e4846942
MD
1164 vm_map_insert(&kernel_map, &count,
1165 &kernel_object, addr, addr, addr + size,
1b874851
MD
1166 VM_MAPTYPE_NORMAL,
1167 VM_PROT_ALL, VM_PROT_ALL,
1168 0);
a108bf71 1169
dc1fd4b3 1170 td = curthread;
dc1fd4b3 1171
1de1e800
JS
1172 base_vmflags = 0;
1173 if (flags & M_ZERO)
1174 base_vmflags |= VM_ALLOC_ZERO;
1175 if (flags & M_USE_RESERVE)
1176 base_vmflags |= VM_ALLOC_SYSTEM;
1177 if (flags & M_USE_INTERRUPT_RESERVE)
1178 base_vmflags |= VM_ALLOC_INTERRUPT;
1179 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0)
1180 panic("kmem_slab_alloc: bad flags %08x (%p)", flags, ((int **)&size)[-1]);
1181
1182
a108bf71
MD
1183 /*
1184 * Allocate the pages. Do not mess with the PG_ZERO flag yet.
1185 */
1186 for (i = 0; i < size; i += PAGE_SIZE) {
1187 vm_page_t m;
fe1e98d0
MD
1188
1189 /*
c397c465
MD
1190 * VM_ALLOC_NORMAL can only be set if we are not preempting.
1191 *
1192 * VM_ALLOC_SYSTEM is automatically set if we are preempting and
1193 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is
4ecf7cc9
MD
1194 * implied in this case), though I'm not sure if we really need to
1195 * do that.
fe1e98d0 1196 */
1de1e800 1197 vmflags = base_vmflags;
c397c465 1198 if (flags & M_WAITOK) {
1de1e800 1199 if (td->td_preempted)
fe1e98d0 1200 vmflags |= VM_ALLOC_SYSTEM;
1de1e800 1201 else
dc1fd4b3 1202 vmflags |= VM_ALLOC_NORMAL;
dc1fd4b3 1203 }
a108bf71 1204
e4846942 1205 m = vm_page_alloc(&kernel_object, OFF_TO_IDX(addr + i), vmflags);
dc1fd4b3
MD
1206
1207 /*
1208 * If the allocation failed we either return NULL or we retry.
1209 *
c397c465
MD
1210 * If M_WAITOK is specified we wait for more memory and retry.
1211 * If M_WAITOK is specified from a preemption we yield instead of
1212 * wait. Livelock will not occur because the interrupt thread
1213 * will not be preempting anyone the second time around after the
1214 * yield.
dc1fd4b3 1215 */
a108bf71 1216 if (m == NULL) {
c397c465 1217 if (flags & M_WAITOK) {
fe1e98d0 1218 if (td->td_preempted) {
e4846942 1219 vm_map_unlock(&kernel_map);
dc1fd4b3 1220 lwkt_yield();
e4846942 1221 vm_map_lock(&kernel_map);
dc1fd4b3 1222 } else {
e4846942 1223 vm_map_unlock(&kernel_map);
4ecf7cc9 1224 vm_wait(0);
e4846942 1225 vm_map_lock(&kernel_map);
dc1fd4b3 1226 }
a108bf71
MD
1227 i -= PAGE_SIZE; /* retry */
1228 continue;
1229 }
dc1fd4b3
MD
1230
1231 /*
1232 * We were unable to recover, cleanup and return NULL
1233 */
a108bf71
MD
1234 while (i != 0) {
1235 i -= PAGE_SIZE;
e4846942 1236 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i));
17cde63e 1237 /* page should already be busy */
a108bf71
MD
1238 vm_page_free(m);
1239 }
e4846942
MD
1240 vm_map_delete(&kernel_map, addr, addr + size, &count);
1241 vm_map_unlock(&kernel_map);
a108bf71
MD
1242 crit_exit();
1243 vm_map_entry_release(count);
ed2013d8 1244 lwkt_reltoken(&vm_token);
a108bf71
MD
1245 return(NULL);
1246 }
1247 }
1248
1249 /*
dc1fd4b3
MD
1250 * Success!
1251 *
a108bf71
MD
1252 * Mark the map entry as non-pageable using a routine that allows us to
1253 * populate the underlying pages.
17cde63e
MD
1254 *
1255 * The pages were busied by the allocations above.
a108bf71 1256 */
e4846942 1257 vm_map_set_wired_quick(&kernel_map, addr, size, &count);
a108bf71
MD
1258 crit_exit();
1259
1260 /*
1261 * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO.
1262 */
1263 for (i = 0; i < size; i += PAGE_SIZE) {
1264 vm_page_t m;
1265
e4846942 1266 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i));
a108bf71 1267 m->valid = VM_PAGE_BITS_ALL;
17cde63e 1268 /* page should already be busy */
a108bf71
MD
1269 vm_page_wire(m);
1270 vm_page_wakeup(m);
fbbaeba3 1271 pmap_enter(&kernel_pmap, addr + i, m, VM_PROT_ALL, 1);
a108bf71
MD
1272 if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO))
1273 bzero((char *)addr + i, PAGE_SIZE);
1274 vm_page_flag_clear(m, PG_ZERO);
17cde63e
MD
1275 KKASSERT(m->flags & (PG_WRITEABLE | PG_MAPPED));
1276 vm_page_flag_set(m, PG_REFERENCED);
a108bf71 1277 }
e4846942 1278 vm_map_unlock(&kernel_map);
a108bf71 1279 vm_map_entry_release(count);
ed2013d8 1280 lwkt_reltoken(&vm_token);
a108bf71
MD
1281 return((void *)addr);
1282}
1283
38e34349 1284/*
5b287bba 1285 * kmem_slab_free()
38e34349 1286 */
a108bf71
MD
1287static void
1288kmem_slab_free(void *ptr, vm_size_t size)
1289{
1290 crit_enter();
ed2013d8 1291 lwkt_gettoken(&vm_token);
e4846942 1292 vm_map_remove(&kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size);
ed2013d8 1293 lwkt_reltoken(&vm_token);
a108bf71
MD
1294 crit_exit();
1295}
1296