Split extern in6* declarations for libc vs the kernel.
[dragonfly.git] / sys / kern / kern_slaballoc.c
CommitLineData
a108bf71 1/*
5b287bba 2 * KERN_SLABALLOC.C - Kernel SLAB memory allocator
8c10bfcf
MD
3 *
4 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
5 *
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com>
8 *
a108bf71
MD
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
8c10bfcf 12 *
a108bf71
MD
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
8c10bfcf
MD
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
a108bf71 34 * SUCH DAMAGE.
8c10bfcf 35 *
59302080 36 * $DragonFly: src/sys/kern/kern_slaballoc.c,v 1.39 2006/09/04 07:00:58 dillon Exp $
a108bf71
MD
37 *
38 * This module implements a slab allocator drop-in replacement for the
39 * kernel malloc().
40 *
41 * A slab allocator reserves a ZONE for each chunk size, then lays the
42 * chunks out in an array within the zone. Allocation and deallocation
43 * is nearly instantanious, and fragmentation/overhead losses are limited
44 * to a fixed worst-case amount.
45 *
46 * The downside of this slab implementation is in the chunk size
47 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu.
48 * In a kernel implementation all this memory will be physical so
49 * the zone size is adjusted downward on machines with less physical
50 * memory. The upside is that overhead is bounded... this is the *worst*
51 * case overhead.
52 *
53 * Slab management is done on a per-cpu basis and no locking or mutexes
54 * are required, only a critical section. When one cpu frees memory
55 * belonging to another cpu's slab manager an asynchronous IPI message
56 * will be queued to execute the operation. In addition, both the
57 * high level slab allocator and the low level zone allocator optimize
58 * M_ZERO requests, and the slab allocator does not have to pre initialize
59 * the linked list of chunks.
60 *
61 * XXX Balancing is needed between cpus. Balance will be handled through
62 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
63 *
64 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
65 * the new zone should be restricted to M_USE_RESERVE requests only.
66 *
67 * Alloc Size Chunking Number of zones
68 * 0-127 8 16
69 * 128-255 16 8
70 * 256-511 32 8
71 * 512-1023 64 8
72 * 1024-2047 128 8
73 * 2048-4095 256 8
74 * 4096-8191 512 8
75 * 8192-16383 1024 8
76 * 16384-32767 2048 8
77 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383)
78 *
46a3f46d 79 * Allocations >= ZoneLimit go directly to kmem.
a108bf71
MD
80 *
81 * API REQUIREMENTS AND SIDE EFFECTS
82 *
83 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
84 * have remained compatible with the following API requirements:
85 *
86 * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty)
3d177b31 87 * + all power-of-2 sized allocations are power-of-2 aligned (twe)
a108bf71
MD
88 * + malloc(0) is allowed and returns non-NULL (ahc driver)
89 * + ability to allocate arbitrarily large chunks of memory
90 */
91
92#include "opt_vm.h"
93
a108bf71
MD
94#include <sys/param.h>
95#include <sys/systm.h>
96#include <sys/kernel.h>
97#include <sys/slaballoc.h>
98#include <sys/mbuf.h>
99#include <sys/vmmeter.h>
100#include <sys/lock.h>
101#include <sys/thread.h>
102#include <sys/globaldata.h>
d2182dc1 103#include <sys/sysctl.h>
f2b5daf9 104#include <sys/ktr.h>
a108bf71
MD
105
106#include <vm/vm.h>
107#include <vm/vm_param.h>
108#include <vm/vm_kern.h>
109#include <vm/vm_extern.h>
110#include <vm/vm_object.h>
111#include <vm/pmap.h>
112#include <vm/vm_map.h>
113#include <vm/vm_page.h>
114#include <vm/vm_pageout.h>
115
116#include <machine/cpu.h>
117
118#include <sys/thread2.h>
119
120#define arysize(ary) (sizeof(ary)/sizeof((ary)[0]))
121
f2b5daf9
MD
122#define MEMORY_STRING "ptr=%p type=%p size=%d flags=%04x"
123#define MEMORY_ARG_SIZE (sizeof(void *) * 2 + sizeof(unsigned long) + \
124 sizeof(int))
125
126#if !defined(KTR_MEMORY)
127#define KTR_MEMORY KTR_ALL
128#endif
129KTR_INFO_MASTER(memory);
130KTR_INFO(KTR_MEMORY, memory, malloc, 0, MEMORY_STRING, MEMORY_ARG_SIZE);
131KTR_INFO(KTR_MEMORY, memory, free_zero, 1, MEMORY_STRING, MEMORY_ARG_SIZE);
017ba73b
MD
132KTR_INFO(KTR_MEMORY, memory, free_ovsz, 2, MEMORY_STRING, MEMORY_ARG_SIZE);
133KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 3, MEMORY_STRING, MEMORY_ARG_SIZE);
134KTR_INFO(KTR_MEMORY, memory, free_chunk, 4, MEMORY_STRING, MEMORY_ARG_SIZE);
f2b5daf9 135#ifdef SMP
017ba73b
MD
136KTR_INFO(KTR_MEMORY, memory, free_request, 5, MEMORY_STRING, MEMORY_ARG_SIZE);
137KTR_INFO(KTR_MEMORY, memory, free_remote, 6, MEMORY_STRING, MEMORY_ARG_SIZE);
f2b5daf9 138#endif
b68ad50c
MD
139KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin", 0);
140KTR_INFO(KTR_MEMORY, memory, free_beg, 0, "free begin", 0);
141KTR_INFO(KTR_MEMORY, memory, free_end, 0, "free end", 0);
f2b5daf9
MD
142
143#define logmemory(name, ptr, type, size, flags) \
144 KTR_LOG(memory_ ## name, ptr, type, size, flags)
b68ad50c
MD
145#define logmemory_quick(name) \
146 KTR_LOG(memory_ ## name)
f2b5daf9 147
a108bf71
MD
148/*
149 * Fixed globals (not per-cpu)
150 */
151static int ZoneSize;
46a3f46d 152static int ZoneLimit;
a108bf71 153static int ZonePageCount;
a108bf71
MD
154static int ZoneMask;
155static struct malloc_type *kmemstatistics;
156static struct kmemusage *kmemusage;
157static int32_t weirdary[16];
158
159static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags);
160static void kmem_slab_free(void *ptr, vm_size_t bytes);
10cc6608
MD
161#if defined(INVARIANTS)
162static void chunk_mark_allocated(SLZone *z, void *chunk);
163static void chunk_mark_free(SLZone *z, void *chunk);
164#endif
a108bf71
MD
165
166/*
167 * Misc constants. Note that allocations that are exact multiples of
168 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
169 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
170 */
171#define MIN_CHUNK_SIZE 8 /* in bytes */
172#define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1)
173#define ZONE_RELS_THRESH 2 /* threshold number of zones */
174#define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK)
175
176/*
177 * The WEIRD_ADDR is used as known text to copy into free objects to
178 * try to create deterministic failure cases if the data is accessed after
179 * free.
180 */
181#define WEIRD_ADDR 0xdeadc0de
182#define MAX_COPY sizeof(weirdary)
183#define ZERO_LENGTH_PTR ((void *)-8)
184
185/*
186 * Misc global malloc buckets
187 */
188
189MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
190MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
191MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
192
193MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
194MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
195
196/*
197 * Initialize the slab memory allocator. We have to choose a zone size based
198 * on available physical memory. We choose a zone side which is approximately
199 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of
200 * 128K. The zone size is limited to the bounds set in slaballoc.h
201 * (typically 32K min, 128K max).
202 */
203static void kmeminit(void *dummy);
204
205SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL)
206
d2182dc1
MD
207#ifdef INVARIANTS
208/*
209 * If enabled any memory allocated without M_ZERO is initialized to -1.
210 */
211static int use_malloc_pattern;
212SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW,
213 &use_malloc_pattern, 0, "");
214#endif
215
a108bf71
MD
216static void
217kmeminit(void *dummy)
218{
219 vm_poff_t limsize;
220 int usesize;
221 int i;
222 vm_pindex_t npg;
223
224 limsize = (vm_poff_t)vmstats.v_page_count * PAGE_SIZE;
225 if (limsize > VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)
226 limsize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
227
228 usesize = (int)(limsize / 1024); /* convert to KB */
229
230 ZoneSize = ZALLOC_MIN_ZONE_SIZE;
231 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize)
232 ZoneSize <<= 1;
46a3f46d
MD
233 ZoneLimit = ZoneSize / 4;
234 if (ZoneLimit > ZALLOC_ZONE_LIMIT)
235 ZoneLimit = ZALLOC_ZONE_LIMIT;
a108bf71 236 ZoneMask = ZoneSize - 1;
a108bf71
MD
237 ZonePageCount = ZoneSize / PAGE_SIZE;
238
239 npg = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE;
dc1fd4b3 240 kmemusage = kmem_slab_alloc(npg * sizeof(struct kmemusage), PAGE_SIZE, M_WAITOK|M_ZERO);
a108bf71
MD
241
242 for (i = 0; i < arysize(weirdary); ++i)
243 weirdary[i] = WEIRD_ADDR;
244
245 if (bootverbose)
246 printf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024);
247}
248
249/*
bba6a44d 250 * Initialize a malloc type tracking structure.
a108bf71
MD
251 */
252void
253malloc_init(void *data)
254{
255 struct malloc_type *type = data;
256 vm_poff_t limsize;
257
258 if (type->ks_magic != M_MAGIC)
259 panic("malloc type lacks magic");
260
261 if (type->ks_limit != 0)
262 return;
263
264 if (vmstats.v_page_count == 0)
265 panic("malloc_init not allowed before vm init");
266
267 limsize = (vm_poff_t)vmstats.v_page_count * PAGE_SIZE;
268 if (limsize > VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)
269 limsize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
270 type->ks_limit = limsize / 10;
271
272 type->ks_next = kmemstatistics;
273 kmemstatistics = type;
274}
275
276void
277malloc_uninit(void *data)
278{
279 struct malloc_type *type = data;
280 struct malloc_type *t;
bba6a44d
MD
281#ifdef INVARIANTS
282 int i;
1d712609 283 long ttl;
bba6a44d 284#endif
a108bf71
MD
285
286 if (type->ks_magic != M_MAGIC)
287 panic("malloc type lacks magic");
288
289 if (vmstats.v_page_count == 0)
290 panic("malloc_uninit not allowed before vm init");
291
292 if (type->ks_limit == 0)
293 panic("malloc_uninit on uninitialized type");
294
295#ifdef INVARIANTS
1d712609
MD
296 /*
297 * memuse is only correct in aggregation. Due to memory being allocated
298 * on one cpu and freed on another individual array entries may be
299 * negative or positive (canceling each other out).
300 */
301 for (i = ttl = 0; i < ncpus; ++i)
302 ttl += type->ks_memuse[i];
303 if (ttl) {
304 printf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n",
305 ttl, type->ks_shortdesc, i);
a108bf71
MD
306 }
307#endif
308 if (type == kmemstatistics) {
309 kmemstatistics = type->ks_next;
310 } else {
311 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
312 if (t->ks_next == type) {
313 t->ks_next = type->ks_next;
314 break;
315 }
316 }
317 }
318 type->ks_next = NULL;
319 type->ks_limit = 0;
320}
321
322/*
323 * Calculate the zone index for the allocation request size and set the
324 * allocation request size to that particular zone's chunk size.
325 */
326static __inline int
327zoneindex(unsigned long *bytes)
328{
329 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */
330 if (n < 128) {
331 *bytes = n = (n + 7) & ~7;
332 return(n / 8 - 1); /* 8 byte chunks, 16 zones */
333 }
334 if (n < 256) {
335 *bytes = n = (n + 15) & ~15;
336 return(n / 16 + 7);
337 }
338 if (n < 8192) {
339 if (n < 512) {
340 *bytes = n = (n + 31) & ~31;
341 return(n / 32 + 15);
342 }
343 if (n < 1024) {
344 *bytes = n = (n + 63) & ~63;
345 return(n / 64 + 23);
346 }
347 if (n < 2048) {
348 *bytes = n = (n + 127) & ~127;
349 return(n / 128 + 31);
350 }
351 if (n < 4096) {
352 *bytes = n = (n + 255) & ~255;
353 return(n / 256 + 39);
354 }
355 *bytes = n = (n + 511) & ~511;
356 return(n / 512 + 47);
357 }
358#if ZALLOC_ZONE_LIMIT > 8192
359 if (n < 16384) {
360 *bytes = n = (n + 1023) & ~1023;
361 return(n / 1024 + 55);
362 }
363#endif
364#if ZALLOC_ZONE_LIMIT > 16384
365 if (n < 32768) {
366 *bytes = n = (n + 2047) & ~2047;
367 return(n / 2048 + 63);
368 }
369#endif
370 panic("Unexpected byte count %d", n);
371 return(0);
372}
373
374/*
5b287bba 375 * malloc() (SLAB ALLOCATOR)
a108bf71
MD
376 *
377 * Allocate memory via the slab allocator. If the request is too large,
378 * or if it page-aligned beyond a certain size, we fall back to the
379 * KMEM subsystem. A SLAB tracking descriptor must be specified, use
380 * &SlabMisc if you don't care.
381 *
8cb2bf45
JS
382 * M_RNOWAIT - don't block.
383 * M_NULLOK - return NULL instead of blocking.
a108bf71 384 * M_ZERO - zero the returned memory.
dc1fd4b3
MD
385 * M_USE_RESERVE - allow greater drawdown of the free list
386 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted
5b287bba
MD
387 *
388 * MPSAFE
a108bf71
MD
389 */
390void *
391malloc(unsigned long size, struct malloc_type *type, int flags)
392{
393 SLZone *z;
394 SLChunk *chunk;
395 SLGlobalData *slgd;
bba6a44d 396 struct globaldata *gd;
a108bf71 397 int zi;
d2182dc1
MD
398#ifdef INVARIANTS
399 int i;
400#endif
a108bf71 401
b68ad50c 402 logmemory_quick(malloc_beg);
bba6a44d
MD
403 gd = mycpu;
404 slgd = &gd->gd_slab;
a108bf71
MD
405
406 /*
407 * XXX silly to have this in the critical path.
408 */
409 if (type->ks_limit == 0) {
410 crit_enter();
411 if (type->ks_limit == 0)
412 malloc_init(type);
413 crit_exit();
414 }
415 ++type->ks_calls;
416
417 /*
38e34349
MD
418 * Handle the case where the limit is reached. Panic if we can't return
419 * NULL. The original malloc code looped, but this tended to
a108bf71 420 * simply deadlock the computer.
38e34349
MD
421 *
422 * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used
423 * to determine if a more complete limit check should be done. The
424 * actual memory use is tracked via ks_memuse[cpu].
a108bf71 425 */
bba6a44d
MD
426 while (type->ks_loosememuse >= type->ks_limit) {
427 int i;
428 long ttl;
429
430 for (i = ttl = 0; i < ncpus; ++i)
431 ttl += type->ks_memuse[i];
38e34349 432 type->ks_loosememuse = ttl; /* not MP synchronized */
bba6a44d 433 if (ttl >= type->ks_limit) {
f2b5daf9
MD
434 if (flags & M_NULLOK) {
435 logmemory(malloc, NULL, type, size, flags);
bba6a44d 436 return(NULL);
f2b5daf9 437 }
bba6a44d
MD
438 panic("%s: malloc limit exceeded", type->ks_shortdesc);
439 }
a108bf71
MD
440 }
441
442 /*
443 * Handle the degenerate size == 0 case. Yes, this does happen.
444 * Return a special pointer. This is to maintain compatibility with
445 * the original malloc implementation. Certain devices, such as the
446 * adaptec driver, not only allocate 0 bytes, they check for NULL and
447 * also realloc() later on. Joy.
448 */
f2b5daf9
MD
449 if (size == 0) {
450 logmemory(malloc, ZERO_LENGTH_PTR, type, size, flags);
a108bf71 451 return(ZERO_LENGTH_PTR);
f2b5daf9 452 }
a108bf71 453
a7cf0021
MD
454 /*
455 * Handle hysteresis from prior frees here in malloc(). We cannot
456 * safely manipulate the kernel_map in free() due to free() possibly
457 * being called via an IPI message or from sensitive interrupt code.
458 */
dc1fd4b3 459 while (slgd->NFreeZones > ZONE_RELS_THRESH && (flags & M_RNOWAIT) == 0) {
46a3f46d
MD
460 crit_enter();
461 if (slgd->NFreeZones > ZONE_RELS_THRESH) { /* crit sect race */
462 z = slgd->FreeZones;
463 slgd->FreeZones = z->z_Next;
464 --slgd->NFreeZones;
465 kmem_slab_free(z, ZoneSize); /* may block */
466 }
467 crit_exit();
468 }
469 /*
470 * XXX handle oversized frees that were queued from free().
471 */
dc1fd4b3 472 while (slgd->FreeOvZones && (flags & M_RNOWAIT) == 0) {
46a3f46d
MD
473 crit_enter();
474 if ((z = slgd->FreeOvZones) != NULL) {
475 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC);
476 slgd->FreeOvZones = z->z_Next;
477 kmem_slab_free(z, z->z_ChunkSize); /* may block */
478 }
479 crit_exit();
a7cf0021
MD
480 }
481
a108bf71
MD
482 /*
483 * Handle large allocations directly. There should not be very many of
484 * these so performance is not a big issue.
485 *
486 * Guarentee page alignment for allocations in multiples of PAGE_SIZE
487 */
46a3f46d 488 if (size >= ZoneLimit || (size & PAGE_MASK) == 0) {
a108bf71
MD
489 struct kmemusage *kup;
490
491 size = round_page(size);
492 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags);
f2b5daf9
MD
493 if (chunk == NULL) {
494 logmemory(malloc, NULL, type, size, flags);
a108bf71 495 return(NULL);
f2b5daf9 496 }
a108bf71 497 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */
8f1d5415 498 flags |= M_PASSIVE_ZERO;
a108bf71
MD
499 kup = btokup(chunk);
500 kup->ku_pagecnt = size / PAGE_SIZE;
bba6a44d 501 kup->ku_cpu = gd->gd_cpuid;
a108bf71
MD
502 crit_enter();
503 goto done;
504 }
505
506 /*
507 * Attempt to allocate out of an existing zone. First try the free list,
508 * then allocate out of unallocated space. If we find a good zone move
509 * it to the head of the list so later allocations find it quickly
510 * (we might have thousands of zones in the list).
511 *
512 * Note: zoneindex() will panic of size is too large.
513 */
514 zi = zoneindex(&size);
515 KKASSERT(zi < NZONES);
516 crit_enter();
517 if ((z = slgd->ZoneAry[zi]) != NULL) {
518 KKASSERT(z->z_NFree > 0);
519
520 /*
521 * Remove us from the ZoneAry[] when we become empty
522 */
523 if (--z->z_NFree == 0) {
524 slgd->ZoneAry[zi] = z->z_Next;
525 z->z_Next = NULL;
526 }
527
528 /*
529 * Locate a chunk in a free page. This attempts to localize
530 * reallocations into earlier pages without us having to sort
531 * the chunk list. A chunk may still overlap a page boundary.
532 */
533 while (z->z_FirstFreePg < ZonePageCount) {
534 if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) {
535#ifdef DIAGNOSTIC
536 /*
537 * Diagnostic: c_Next is not total garbage.
538 */
539 KKASSERT(chunk->c_Next == NULL ||
540 ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) ==
541 ((intptr_t)chunk & IN_SAME_PAGE_MASK));
542#endif
6ab8e1da
MD
543#ifdef INVARIANTS
544 if ((uintptr_t)chunk < VM_MIN_KERNEL_ADDRESS)
a108bf71 545 panic("chunk %p FFPG %d/%d", chunk, z->z_FirstFreePg, ZonePageCount);
6ab8e1da 546 if (chunk->c_Next && (uintptr_t)chunk->c_Next < VM_MIN_KERNEL_ADDRESS)
a108bf71 547 panic("chunkNEXT %p %p FFPG %d/%d", chunk, chunk->c_Next, z->z_FirstFreePg, ZonePageCount);
10cc6608 548 chunk_mark_allocated(z, chunk);
6ab8e1da 549#endif
a108bf71
MD
550 z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next;
551 goto done;
552 }
553 ++z->z_FirstFreePg;
554 }
555
556 /*
1c5ca4f3
MD
557 * No chunks are available but NFree said we had some memory, so
558 * it must be available in the never-before-used-memory area
559 * governed by UIndex. The consequences are very serious if our zone
560 * got corrupted so we use an explicit panic rather then a KASSERT.
a108bf71 561 */
1c5ca4f3
MD
562 if (z->z_UIndex + 1 != z->z_NMax)
563 z->z_UIndex = z->z_UIndex + 1;
564 else
565 z->z_UIndex = 0;
566 if (z->z_UIndex == z->z_UEndIndex)
567 panic("slaballoc: corrupted zone");
568 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
8f1d5415 569 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
6ab8e1da 570 flags &= ~M_ZERO;
8f1d5415
MD
571 flags |= M_PASSIVE_ZERO;
572 }
10cc6608
MD
573#if defined(INVARIANTS)
574 chunk_mark_allocated(z, chunk);
575#endif
a108bf71
MD
576 goto done;
577 }
578
579 /*
580 * If all zones are exhausted we need to allocate a new zone for this
581 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see
6ab8e1da
MD
582 * UAlloc use above in regards to M_ZERO. Note that when we are reusing
583 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and
584 * we do not pre-zero it because we do not want to mess up the L1 cache.
a108bf71
MD
585 *
586 * At least one subsystem, the tty code (see CROUND) expects power-of-2
587 * allocations to be power-of-2 aligned. We maintain compatibility by
588 * adjusting the base offset below.
589 */
590 {
591 int off;
592
593 if ((z = slgd->FreeZones) != NULL) {
594 slgd->FreeZones = z->z_Next;
595 --slgd->NFreeZones;
596 bzero(z, sizeof(SLZone));
6ab8e1da 597 z->z_Flags |= SLZF_UNOTZEROD;
a108bf71
MD
598 } else {
599 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO);
600 if (z == NULL)
601 goto fail;
602 }
603
10cc6608
MD
604 /*
605 * How big is the base structure?
606 */
607#if defined(INVARIANTS)
608 /*
609 * Make room for z_Bitmap. An exact calculation is somewhat more
610 * complicated so don't make an exact calculation.
611 */
612 off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]);
613 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8);
614#else
615 off = sizeof(SLZone);
616#endif
617
a108bf71
MD
618 /*
619 * Guarentee power-of-2 alignment for power-of-2-sized chunks.
620 * Otherwise just 8-byte align the data.
621 */
622 if ((size | (size - 1)) + 1 == (size << 1))
10cc6608 623 off = (off + size - 1) & ~(size - 1);
a108bf71 624 else
10cc6608 625 off = (off + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK;
a108bf71
MD
626 z->z_Magic = ZALLOC_SLAB_MAGIC;
627 z->z_ZoneIndex = zi;
628 z->z_NMax = (ZoneSize - off) / size;
629 z->z_NFree = z->z_NMax - 1;
1c5ca4f3
MD
630 z->z_BasePtr = (char *)z + off;
631 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax;
a108bf71
MD
632 z->z_ChunkSize = size;
633 z->z_FirstFreePg = ZonePageCount;
2db3b277 634 z->z_CpuGd = gd;
bba6a44d 635 z->z_Cpu = gd->gd_cpuid;
1c5ca4f3 636 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
a108bf71
MD
637 z->z_Next = slgd->ZoneAry[zi];
638 slgd->ZoneAry[zi] = z;
8f1d5415 639 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
6ab8e1da 640 flags &= ~M_ZERO; /* already zero'd */
8f1d5415
MD
641 flags |= M_PASSIVE_ZERO;
642 }
10cc6608
MD
643#if defined(INVARIANTS)
644 chunk_mark_allocated(z, chunk);
645#endif
1c5ca4f3
MD
646
647 /*
648 * Slide the base index for initial allocations out of the next
649 * zone we create so we do not over-weight the lower part of the
650 * cpu memory caches.
651 */
652 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
653 & (ZALLOC_MAX_ZONE_SIZE - 1);
a108bf71
MD
654 }
655done:
bba6a44d
MD
656 ++type->ks_inuse[gd->gd_cpuid];
657 type->ks_memuse[gd->gd_cpuid] += size;
38e34349 658 type->ks_loosememuse += size; /* not MP synchronized */
a108bf71
MD
659 crit_exit();
660 if (flags & M_ZERO)
661 bzero(chunk, size);
bba6a44d 662#ifdef INVARIANTS
d2182dc1
MD
663 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) {
664 if (use_malloc_pattern) {
665 for (i = 0; i < size; i += sizeof(int)) {
666 *(int *)((char *)chunk + i) = -1;
667 }
668 }
bba6a44d 669 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */
d2182dc1 670 }
bba6a44d 671#endif
f2b5daf9 672 logmemory(malloc, chunk, type, size, flags);
a108bf71
MD
673 return(chunk);
674fail:
675 crit_exit();
f2b5daf9 676 logmemory(malloc, NULL, type, size, flags);
a108bf71
MD
677 return(NULL);
678}
679
38e34349
MD
680/*
681 * kernel realloc. (SLAB ALLOCATOR) (MP SAFE)
682 *
683 * Generally speaking this routine is not called very often and we do
684 * not attempt to optimize it beyond reusing the same pointer if the
685 * new size fits within the chunking of the old pointer's zone.
686 */
a108bf71
MD
687void *
688realloc(void *ptr, unsigned long size, struct malloc_type *type, int flags)
689{
690 SLZone *z;
691 void *nptr;
692 unsigned long osize;
693
eb7f3e3c
MD
694 KKASSERT((flags & M_ZERO) == 0); /* not supported */
695
a108bf71
MD
696 if (ptr == NULL || ptr == ZERO_LENGTH_PTR)
697 return(malloc(size, type, flags));
698 if (size == 0) {
699 free(ptr, type);
700 return(NULL);
701 }
702
703 /*
704 * Handle oversized allocations. XXX we really should require that a
705 * size be passed to free() instead of this nonsense.
706 */
707 {
708 struct kmemusage *kup;
709
710 kup = btokup(ptr);
711 if (kup->ku_pagecnt) {
712 osize = kup->ku_pagecnt << PAGE_SHIFT;
713 if (osize == round_page(size))
714 return(ptr);
715 if ((nptr = malloc(size, type, flags)) == NULL)
716 return(NULL);
717 bcopy(ptr, nptr, min(size, osize));
718 free(ptr, type);
719 return(nptr);
720 }
721 }
722
723 /*
724 * Get the original allocation's zone. If the new request winds up
725 * using the same chunk size we do not have to do anything.
726 */
727 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
728 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
729
730 zoneindex(&size);
731 if (z->z_ChunkSize == size)
732 return(ptr);
733
734 /*
735 * Allocate memory for the new request size. Note that zoneindex has
736 * already adjusted the request size to the appropriate chunk size, which
737 * should optimize our bcopy(). Then copy and return the new pointer.
738 */
739 if ((nptr = malloc(size, type, flags)) == NULL)
740 return(NULL);
741 bcopy(ptr, nptr, min(size, z->z_ChunkSize));
742 free(ptr, type);
743 return(nptr);
744}
745
38e34349
MD
746/*
747 * Allocate a copy of the specified string.
748 *
749 * (MP SAFE) (MAY BLOCK)
750 */
1ac06773 751char *
59302080 752kstrdup(const char *str, struct malloc_type *type)
1ac06773
MD
753{
754 int zlen; /* length inclusive of terminating NUL */
755 char *nstr;
756
757 if (str == NULL)
758 return(NULL);
759 zlen = strlen(str) + 1;
760 nstr = malloc(zlen, type, M_WAITOK);
761 bcopy(str, nstr, zlen);
762 return(nstr);
763}
764
1d712609 765#ifdef SMP
a108bf71
MD
766/*
767 * free() (SLAB ALLOCATOR)
768 *
bba6a44d 769 * Free the specified chunk of memory.
a108bf71
MD
770 */
771static
772void
773free_remote(void *ptr)
774{
f2b5daf9 775 logmemory(free_remote, ptr, *(struct malloc_type **)ptr, -1, 0);
a108bf71
MD
776 free(ptr, *(struct malloc_type **)ptr);
777}
778
1d712609
MD
779#endif
780
38e34349 781/*
5b287bba 782 * free (SLAB ALLOCATOR)
38e34349
MD
783 *
784 * Free a memory block previously allocated by malloc. Note that we do not
785 * attempt to uplodate ks_loosememuse as MP races could prevent us from
786 * checking memory limits in malloc.
5b287bba
MD
787 *
788 * MPSAFE
38e34349 789 */
a108bf71
MD
790void
791free(void *ptr, struct malloc_type *type)
792{
793 SLZone *z;
794 SLChunk *chunk;
795 SLGlobalData *slgd;
bba6a44d 796 struct globaldata *gd;
a108bf71
MD
797 int pgno;
798
b68ad50c 799 logmemory_quick(free_beg);
bba6a44d
MD
800 gd = mycpu;
801 slgd = &gd->gd_slab;
a108bf71 802
d39911d9
JS
803 if (ptr == NULL)
804 panic("trying to free NULL pointer");
805
a108bf71
MD
806 /*
807 * Handle special 0-byte allocations
808 */
f2b5daf9
MD
809 if (ptr == ZERO_LENGTH_PTR) {
810 logmemory(free_zero, ptr, type, -1, 0);
b68ad50c 811 logmemory_quick(free_end);
a108bf71 812 return;
f2b5daf9 813 }
a108bf71
MD
814
815 /*
816 * Handle oversized allocations. XXX we really should require that a
817 * size be passed to free() instead of this nonsense.
bba6a44d
MD
818 *
819 * This code is never called via an ipi.
a108bf71
MD
820 */
821 {
822 struct kmemusage *kup;
823 unsigned long size;
824
825 kup = btokup(ptr);
826 if (kup->ku_pagecnt) {
827 size = kup->ku_pagecnt << PAGE_SHIFT;
828 kup->ku_pagecnt = 0;
a108bf71
MD
829#ifdef INVARIANTS
830 KKASSERT(sizeof(weirdary) <= size);
831 bcopy(weirdary, ptr, sizeof(weirdary));
832#endif
bba6a44d
MD
833 /*
834 * note: we always adjust our cpu's slot, not the originating
835 * cpu (kup->ku_cpuid). The statistics are in aggregate.
81f5fc99
MD
836 *
837 * note: XXX we have still inherited the interrupts-can't-block
838 * assumption. An interrupt thread does not bump
839 * gd_intr_nesting_level so check TDF_INTTHREAD. This is
840 * primarily until we can fix softupdate's assumptions about free().
bba6a44d
MD
841 */
842 crit_enter();
843 --type->ks_inuse[gd->gd_cpuid];
844 type->ks_memuse[gd->gd_cpuid] -= size;
81f5fc99 845 if (mycpu->gd_intr_nesting_level || (gd->gd_curthread->td_flags & TDF_INTTHREAD)) {
f2b5daf9 846 logmemory(free_ovsz_delayed, ptr, type, size, 0);
46a3f46d
MD
847 z = (SLZone *)ptr;
848 z->z_Magic = ZALLOC_OVSZ_MAGIC;
849 z->z_Next = slgd->FreeOvZones;
850 z->z_ChunkSize = size;
851 slgd->FreeOvZones = z;
852 crit_exit();
853 } else {
bba6a44d 854 crit_exit();
f2b5daf9 855 logmemory(free_ovsz, ptr, type, size, 0);
46a3f46d
MD
856 kmem_slab_free(ptr, size); /* may block */
857 }
b68ad50c 858 logmemory_quick(free_end);
a108bf71
MD
859 return;
860 }
861 }
862
863 /*
864 * Zone case. Figure out the zone based on the fact that it is
865 * ZoneSize aligned.
866 */
867 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
868 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
869
870 /*
871 * If we do not own the zone then forward the request to the
4c9f5a7f
MD
872 * cpu that does. Since the timing is non-critical, a passive
873 * message is sent.
a108bf71 874 */
2db3b277 875 if (z->z_CpuGd != gd) {
a108bf71 876 *(struct malloc_type **)ptr = type;
75c7ffea 877#ifdef SMP
f2b5daf9 878 logmemory(free_request, ptr, type, z->z_ChunkSize, 0);
4c9f5a7f 879 lwkt_send_ipiq_passive(z->z_CpuGd, free_remote, ptr);
75c7ffea
MD
880#else
881 panic("Corrupt SLZone");
882#endif
b68ad50c 883 logmemory_quick(free_end);
a108bf71
MD
884 return;
885 }
886
f2b5daf9
MD
887 logmemory(free_chunk, ptr, type, z->z_ChunkSize, 0);
888
a108bf71
MD
889 if (type->ks_magic != M_MAGIC)
890 panic("free: malloc type lacks magic");
891
892 crit_enter();
893 pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT;
894 chunk = ptr;
895
bba6a44d 896#ifdef INVARIANTS
a108bf71 897 /*
bba6a44d
MD
898 * Attempt to detect a double-free. To reduce overhead we only check
899 * if there appears to be link pointer at the base of the data.
a108bf71
MD
900 */
901 if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) {
902 SLChunk *scan;
903 for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) {
904 if (scan == chunk)
905 panic("Double free at %p", chunk);
906 }
907 }
10cc6608 908 chunk_mark_free(z, chunk);
a108bf71
MD
909#endif
910
911 /*
912 * Put weird data into the memory to detect modifications after freeing,
913 * illegal pointer use after freeing (we should fault on the odd address),
914 * and so forth. XXX needs more work, see the old malloc code.
915 */
916#ifdef INVARIANTS
917 if (z->z_ChunkSize < sizeof(weirdary))
918 bcopy(weirdary, chunk, z->z_ChunkSize);
919 else
920 bcopy(weirdary, chunk, sizeof(weirdary));
921#endif
922
923 /*
924 * Add this free non-zero'd chunk to a linked list for reuse, adjust
925 * z_FirstFreePg.
926 */
6ab8e1da
MD
927#ifdef INVARIANTS
928 if ((uintptr_t)chunk < VM_MIN_KERNEL_ADDRESS)
fc92d4aa 929 panic("BADFREE %p", chunk);
a108bf71
MD
930#endif
931 chunk->c_Next = z->z_PageAry[pgno];
932 z->z_PageAry[pgno] = chunk;
6ab8e1da
MD
933#ifdef INVARIANTS
934 if (chunk->c_Next && (uintptr_t)chunk->c_Next < VM_MIN_KERNEL_ADDRESS)
a108bf71 935 panic("BADFREE2");
6ab8e1da 936#endif
a108bf71
MD
937 if (z->z_FirstFreePg > pgno)
938 z->z_FirstFreePg = pgno;
939
940 /*
941 * Bump the number of free chunks. If it becomes non-zero the zone
942 * must be added back onto the appropriate list.
943 */
944 if (z->z_NFree++ == 0) {
945 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
946 slgd->ZoneAry[z->z_ZoneIndex] = z;
947 }
948
bba6a44d
MD
949 --type->ks_inuse[z->z_Cpu];
950 type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize;
a108bf71
MD
951
952 /*
953 * If the zone becomes totally free, and there are other zones we
a7cf0021
MD
954 * can allocate from, move this zone to the FreeZones list. Since
955 * this code can be called from an IPI callback, do *NOT* try to mess
956 * with kernel_map here. Hysteresis will be performed at malloc() time.
a108bf71
MD
957 */
958 if (z->z_NFree == z->z_NMax &&
959 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z)
960 ) {
961 SLZone **pz;
962
963 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; z != *pz; pz = &(*pz)->z_Next)
964 ;
965 *pz = z->z_Next;
966 z->z_Magic = -1;
a7cf0021
MD
967 z->z_Next = slgd->FreeZones;
968 slgd->FreeZones = z;
969 ++slgd->NFreeZones;
a108bf71 970 }
b68ad50c 971 logmemory_quick(free_end);
a108bf71
MD
972 crit_exit();
973}
974
10cc6608
MD
975#if defined(INVARIANTS)
976/*
977 * Helper routines for sanity checks
978 */
979static
980void
981chunk_mark_allocated(SLZone *z, void *chunk)
982{
983 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
984 __uint32_t *bitptr;
985
986 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, ("memory chunk %p bit index %d is illegal", chunk, bitdex));
987 bitptr = &z->z_Bitmap[bitdex >> 5];
988 bitdex &= 31;
989 KASSERT((*bitptr & (1 << bitdex)) == 0, ("memory chunk %p is already allocated!", chunk));
990 *bitptr |= 1 << bitdex;
991}
992
993static
994void
995chunk_mark_free(SLZone *z, void *chunk)
996{
997 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
998 __uint32_t *bitptr;
999
1000 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, ("memory chunk %p bit index %d is illegal!", chunk, bitdex));
1001 bitptr = &z->z_Bitmap[bitdex >> 5];
1002 bitdex &= 31;
1003 KASSERT((*bitptr & (1 << bitdex)) != 0, ("memory chunk %p is already free!", chunk));
1004 *bitptr &= ~(1 << bitdex);
1005}
1006
1007#endif
1008
a108bf71 1009/*
5b287bba 1010 * kmem_slab_alloc()
a108bf71
MD
1011 *
1012 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the
1013 * specified alignment. M_* flags are expected in the flags field.
1014 *
1015 * Alignment must be a multiple of PAGE_SIZE.
1016 *
1017 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(),
1018 * but when we move zalloc() over to use this function as its backend
1019 * we will have to switch to kreserve/krelease and call reserve(0)
1020 * after the new space is made available.
dc1fd4b3
MD
1021 *
1022 * Interrupt code which has preempted other code is not allowed to
c397c465
MD
1023 * use PQ_CACHE pages. However, if an interrupt thread is run
1024 * non-preemptively or blocks and then runs non-preemptively, then
1025 * it is free to use PQ_CACHE pages.
38e34349
MD
1026 *
1027 * This routine will currently obtain the BGL.
5b287bba
MD
1028 *
1029 * MPALMOSTSAFE - acquires mplock
a108bf71
MD
1030 */
1031static void *
1032kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags)
1033{
1034 vm_size_t i;
1035 vm_offset_t addr;
1036 vm_offset_t offset;
1de1e800 1037 int count, vmflags, base_vmflags;
dc1fd4b3 1038 thread_t td;
a108bf71
MD
1039 vm_map_t map = kernel_map;
1040
1041 size = round_page(size);
1042 addr = vm_map_min(map);
1043
1044 /*
5c39c498
MD
1045 * Reserve properly aligned space from kernel_map. RNOWAIT allocations
1046 * cannot block.
a108bf71 1047 */
5c39c498
MD
1048 if (flags & M_RNOWAIT) {
1049 if (try_mplock() == 0)
1050 return(NULL);
1051 } else {
1052 get_mplock();
1053 }
a108bf71
MD
1054 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1055 crit_enter();
1056 vm_map_lock(map);
1057 if (vm_map_findspace(map, vm_map_min(map), size, align, &addr)) {
1058 vm_map_unlock(map);
8cb2bf45 1059 if ((flags & M_NULLOK) == 0)
a108bf71
MD
1060 panic("kmem_slab_alloc(): kernel_map ran out of space!");
1061 crit_exit();
1062 vm_map_entry_release(count);
38e34349 1063 rel_mplock();
a108bf71
MD
1064 return(NULL);
1065 }
1066 offset = addr - VM_MIN_KERNEL_ADDRESS;
1067 vm_object_reference(kernel_object);
1068 vm_map_insert(map, &count,
1069 kernel_object, offset, addr, addr + size,
1070 VM_PROT_ALL, VM_PROT_ALL, 0);
1071
dc1fd4b3 1072 td = curthread;
dc1fd4b3 1073
1de1e800
JS
1074 base_vmflags = 0;
1075 if (flags & M_ZERO)
1076 base_vmflags |= VM_ALLOC_ZERO;
1077 if (flags & M_USE_RESERVE)
1078 base_vmflags |= VM_ALLOC_SYSTEM;
1079 if (flags & M_USE_INTERRUPT_RESERVE)
1080 base_vmflags |= VM_ALLOC_INTERRUPT;
1081 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0)
1082 panic("kmem_slab_alloc: bad flags %08x (%p)", flags, ((int **)&size)[-1]);
1083
1084
a108bf71
MD
1085 /*
1086 * Allocate the pages. Do not mess with the PG_ZERO flag yet.
1087 */
1088 for (i = 0; i < size; i += PAGE_SIZE) {
1089 vm_page_t m;
1090 vm_pindex_t idx = OFF_TO_IDX(offset + i);
fe1e98d0
MD
1091
1092 /*
c397c465
MD
1093 * VM_ALLOC_NORMAL can only be set if we are not preempting.
1094 *
1095 * VM_ALLOC_SYSTEM is automatically set if we are preempting and
1096 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is
1097 * implied in this case), though I'm sure if we really need to do
1098 * that.
fe1e98d0 1099 */
1de1e800 1100 vmflags = base_vmflags;
c397c465 1101 if (flags & M_WAITOK) {
1de1e800 1102 if (td->td_preempted)
fe1e98d0 1103 vmflags |= VM_ALLOC_SYSTEM;
1de1e800 1104 else
dc1fd4b3 1105 vmflags |= VM_ALLOC_NORMAL;
dc1fd4b3 1106 }
a108bf71 1107
dc1fd4b3
MD
1108 m = vm_page_alloc(kernel_object, idx, vmflags);
1109
1110 /*
1111 * If the allocation failed we either return NULL or we retry.
1112 *
c397c465
MD
1113 * If M_WAITOK is specified we wait for more memory and retry.
1114 * If M_WAITOK is specified from a preemption we yield instead of
1115 * wait. Livelock will not occur because the interrupt thread
1116 * will not be preempting anyone the second time around after the
1117 * yield.
dc1fd4b3 1118 */
a108bf71 1119 if (m == NULL) {
c397c465 1120 if (flags & M_WAITOK) {
fe1e98d0 1121 if (td->td_preempted) {
dc1fd4b3
MD
1122 vm_map_unlock(map);
1123 lwkt_yield();
1124 vm_map_lock(map);
1125 } else {
dc1fd4b3
MD
1126 vm_map_unlock(map);
1127 vm_wait();
1128 vm_map_lock(map);
1129 }
a108bf71
MD
1130 i -= PAGE_SIZE; /* retry */
1131 continue;
1132 }
dc1fd4b3
MD
1133
1134 /*
1135 * We were unable to recover, cleanup and return NULL
1136 */
a108bf71
MD
1137 while (i != 0) {
1138 i -= PAGE_SIZE;
1139 m = vm_page_lookup(kernel_object, OFF_TO_IDX(offset + i));
1140 vm_page_free(m);
1141 }
1142 vm_map_delete(map, addr, addr + size, &count);
1143 vm_map_unlock(map);
1144 crit_exit();
1145 vm_map_entry_release(count);
38e34349 1146 rel_mplock();
a108bf71
MD
1147 return(NULL);
1148 }
1149 }
1150
1151 /*
dc1fd4b3
MD
1152 * Success!
1153 *
a108bf71
MD
1154 * Mark the map entry as non-pageable using a routine that allows us to
1155 * populate the underlying pages.
1156 */
1157 vm_map_set_wired_quick(map, addr, size, &count);
1158 crit_exit();
1159
1160 /*
1161 * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO.
1162 */
1163 for (i = 0; i < size; i += PAGE_SIZE) {
1164 vm_page_t m;
1165
1166 m = vm_page_lookup(kernel_object, OFF_TO_IDX(offset + i));
1167 m->valid = VM_PAGE_BITS_ALL;
1168 vm_page_wire(m);
1169 vm_page_wakeup(m);
1170 pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1);
1171 if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO))
1172 bzero((char *)addr + i, PAGE_SIZE);
1173 vm_page_flag_clear(m, PG_ZERO);
1174 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED);
1175 }
1176 vm_map_unlock(map);
1177 vm_map_entry_release(count);
38e34349 1178 rel_mplock();
a108bf71
MD
1179 return((void *)addr);
1180}
1181
38e34349 1182/*
5b287bba
MD
1183 * kmem_slab_free()
1184 *
1185 * MPALMOSTSAFE - acquires mplock
38e34349 1186 */
a108bf71
MD
1187static void
1188kmem_slab_free(void *ptr, vm_size_t size)
1189{
38e34349 1190 get_mplock();
a108bf71
MD
1191 crit_enter();
1192 vm_map_remove(kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size);
1193 crit_exit();
38e34349 1194 rel_mplock();
a108bf71
MD
1195}
1196