Fix fork/vfork statistics. forks and vforks were being improperly counted
[dragonfly.git] / sys / kern / kern_slaballoc.c
... / ...
CommitLineData
1/*
2 * KERN_SLABALLOC.C - Kernel SLAB memory allocator
3 *
4 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
5 *
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * $DragonFly: src/sys/kern/kern_slaballoc.c,v 1.51 2007/11/18 09:53:19 sephe Exp $
37 *
38 * This module implements a slab allocator drop-in replacement for the
39 * kernel malloc().
40 *
41 * A slab allocator reserves a ZONE for each chunk size, then lays the
42 * chunks out in an array within the zone. Allocation and deallocation
43 * is nearly instantanious, and fragmentation/overhead losses are limited
44 * to a fixed worst-case amount.
45 *
46 * The downside of this slab implementation is in the chunk size
47 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu.
48 * In a kernel implementation all this memory will be physical so
49 * the zone size is adjusted downward on machines with less physical
50 * memory. The upside is that overhead is bounded... this is the *worst*
51 * case overhead.
52 *
53 * Slab management is done on a per-cpu basis and no locking or mutexes
54 * are required, only a critical section. When one cpu frees memory
55 * belonging to another cpu's slab manager an asynchronous IPI message
56 * will be queued to execute the operation. In addition, both the
57 * high level slab allocator and the low level zone allocator optimize
58 * M_ZERO requests, and the slab allocator does not have to pre initialize
59 * the linked list of chunks.
60 *
61 * XXX Balancing is needed between cpus. Balance will be handled through
62 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
63 *
64 * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
65 * the new zone should be restricted to M_USE_RESERVE requests only.
66 *
67 * Alloc Size Chunking Number of zones
68 * 0-127 8 16
69 * 128-255 16 8
70 * 256-511 32 8
71 * 512-1023 64 8
72 * 1024-2047 128 8
73 * 2048-4095 256 8
74 * 4096-8191 512 8
75 * 8192-16383 1024 8
76 * 16384-32767 2048 8
77 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383)
78 *
79 * Allocations >= ZoneLimit go directly to kmem.
80 *
81 * API REQUIREMENTS AND SIDE EFFECTS
82 *
83 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
84 * have remained compatible with the following API requirements:
85 *
86 * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty)
87 * + all power-of-2 sized allocations are power-of-2 aligned (twe)
88 * + malloc(0) is allowed and returns non-NULL (ahc driver)
89 * + ability to allocate arbitrarily large chunks of memory
90 */
91
92#include "opt_vm.h"
93
94#include <sys/param.h>
95#include <sys/systm.h>
96#include <sys/kernel.h>
97#include <sys/slaballoc.h>
98#include <sys/mbuf.h>
99#include <sys/vmmeter.h>
100#include <sys/lock.h>
101#include <sys/thread.h>
102#include <sys/globaldata.h>
103#include <sys/sysctl.h>
104#include <sys/ktr.h>
105
106#include <vm/vm.h>
107#include <vm/vm_param.h>
108#include <vm/vm_kern.h>
109#include <vm/vm_extern.h>
110#include <vm/vm_object.h>
111#include <vm/pmap.h>
112#include <vm/vm_map.h>
113#include <vm/vm_page.h>
114#include <vm/vm_pageout.h>
115
116#include <machine/cpu.h>
117
118#include <sys/thread2.h>
119
120#define arysize(ary) (sizeof(ary)/sizeof((ary)[0]))
121
122#define MEMORY_STRING "ptr=%p type=%p size=%d flags=%04x"
123#define MEMORY_ARG_SIZE (sizeof(void *) * 2 + sizeof(unsigned long) + \
124 sizeof(int))
125
126#if !defined(KTR_MEMORY)
127#define KTR_MEMORY KTR_ALL
128#endif
129KTR_INFO_MASTER(memory);
130KTR_INFO(KTR_MEMORY, memory, malloc, 0, MEMORY_STRING, MEMORY_ARG_SIZE);
131KTR_INFO(KTR_MEMORY, memory, free_zero, 1, MEMORY_STRING, MEMORY_ARG_SIZE);
132KTR_INFO(KTR_MEMORY, memory, free_ovsz, 2, MEMORY_STRING, MEMORY_ARG_SIZE);
133KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 3, MEMORY_STRING, MEMORY_ARG_SIZE);
134KTR_INFO(KTR_MEMORY, memory, free_chunk, 4, MEMORY_STRING, MEMORY_ARG_SIZE);
135#ifdef SMP
136KTR_INFO(KTR_MEMORY, memory, free_request, 5, MEMORY_STRING, MEMORY_ARG_SIZE);
137KTR_INFO(KTR_MEMORY, memory, free_remote, 6, MEMORY_STRING, MEMORY_ARG_SIZE);
138#endif
139KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin", 0);
140KTR_INFO(KTR_MEMORY, memory, free_beg, 0, "free begin", 0);
141KTR_INFO(KTR_MEMORY, memory, free_end, 0, "free end", 0);
142
143#define logmemory(name, ptr, type, size, flags) \
144 KTR_LOG(memory_ ## name, ptr, type, size, flags)
145#define logmemory_quick(name) \
146 KTR_LOG(memory_ ## name)
147
148/*
149 * Fixed globals (not per-cpu)
150 */
151static int ZoneSize;
152static int ZoneLimit;
153static int ZonePageCount;
154static int ZoneMask;
155struct malloc_type *kmemstatistics; /* exported to vmstat */
156static struct kmemusage *kmemusage;
157static int32_t weirdary[16];
158
159static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags);
160static void kmem_slab_free(void *ptr, vm_size_t bytes);
161#if defined(INVARIANTS)
162static void chunk_mark_allocated(SLZone *z, void *chunk);
163static void chunk_mark_free(SLZone *z, void *chunk);
164#endif
165
166/*
167 * Misc constants. Note that allocations that are exact multiples of
168 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
169 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
170 */
171#define MIN_CHUNK_SIZE 8 /* in bytes */
172#define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1)
173#define ZONE_RELS_THRESH 2 /* threshold number of zones */
174#define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK)
175
176/*
177 * The WEIRD_ADDR is used as known text to copy into free objects to
178 * try to create deterministic failure cases if the data is accessed after
179 * free.
180 */
181#define WEIRD_ADDR 0xdeadc0de
182#define MAX_COPY sizeof(weirdary)
183#define ZERO_LENGTH_PTR ((void *)-8)
184
185/*
186 * Misc global malloc buckets
187 */
188
189MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
190MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
191MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
192
193MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
194MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
195
196/*
197 * Initialize the slab memory allocator. We have to choose a zone size based
198 * on available physical memory. We choose a zone side which is approximately
199 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of
200 * 128K. The zone size is limited to the bounds set in slaballoc.h
201 * (typically 32K min, 128K max).
202 */
203static void kmeminit(void *dummy);
204
205SYSINIT(kmem, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL)
206
207#ifdef INVARIANTS
208/*
209 * If enabled any memory allocated without M_ZERO is initialized to -1.
210 */
211static int use_malloc_pattern;
212SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW,
213 &use_malloc_pattern, 0, "");
214#endif
215
216static void
217kmeminit(void *dummy)
218{
219 vm_poff_t limsize;
220 int usesize;
221 int i;
222 vm_pindex_t npg;
223
224 limsize = (vm_poff_t)vmstats.v_page_count * PAGE_SIZE;
225 if (limsize > KvaSize)
226 limsize = KvaSize;
227
228 usesize = (int)(limsize / 1024); /* convert to KB */
229
230 ZoneSize = ZALLOC_MIN_ZONE_SIZE;
231 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize)
232 ZoneSize <<= 1;
233 ZoneLimit = ZoneSize / 4;
234 if (ZoneLimit > ZALLOC_ZONE_LIMIT)
235 ZoneLimit = ZALLOC_ZONE_LIMIT;
236 ZoneMask = ZoneSize - 1;
237 ZonePageCount = ZoneSize / PAGE_SIZE;
238
239 npg = KvaSize / PAGE_SIZE;
240 kmemusage = kmem_slab_alloc(npg * sizeof(struct kmemusage),
241 PAGE_SIZE, M_WAITOK|M_ZERO);
242
243 for (i = 0; i < arysize(weirdary); ++i)
244 weirdary[i] = WEIRD_ADDR;
245
246 if (bootverbose)
247 kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024);
248}
249
250/*
251 * Initialize a malloc type tracking structure.
252 */
253void
254malloc_init(void *data)
255{
256 struct malloc_type *type = data;
257 vm_poff_t limsize;
258
259 if (type->ks_magic != M_MAGIC)
260 panic("malloc type lacks magic");
261
262 if (type->ks_limit != 0)
263 return;
264
265 if (vmstats.v_page_count == 0)
266 panic("malloc_init not allowed before vm init");
267
268 limsize = (vm_poff_t)vmstats.v_page_count * PAGE_SIZE;
269 if (limsize > KvaSize)
270 limsize = KvaSize;
271 type->ks_limit = limsize / 10;
272
273 type->ks_next = kmemstatistics;
274 kmemstatistics = type;
275}
276
277void
278malloc_uninit(void *data)
279{
280 struct malloc_type *type = data;
281 struct malloc_type *t;
282#ifdef INVARIANTS
283 int i;
284 long ttl;
285#endif
286
287 if (type->ks_magic != M_MAGIC)
288 panic("malloc type lacks magic");
289
290 if (vmstats.v_page_count == 0)
291 panic("malloc_uninit not allowed before vm init");
292
293 if (type->ks_limit == 0)
294 panic("malloc_uninit on uninitialized type");
295
296#ifdef SMP
297 /* Make sure that all pending kfree()s are finished. */
298 lwkt_synchronize_ipiqs("muninit");
299#endif
300
301#ifdef INVARIANTS
302 /*
303 * memuse is only correct in aggregation. Due to memory being allocated
304 * on one cpu and freed on another individual array entries may be
305 * negative or positive (canceling each other out).
306 */
307 for (i = ttl = 0; i < ncpus; ++i)
308 ttl += type->ks_memuse[i];
309 if (ttl) {
310 kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n",
311 ttl, type->ks_shortdesc, i);
312 }
313#endif
314 if (type == kmemstatistics) {
315 kmemstatistics = type->ks_next;
316 } else {
317 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
318 if (t->ks_next == type) {
319 t->ks_next = type->ks_next;
320 break;
321 }
322 }
323 }
324 type->ks_next = NULL;
325 type->ks_limit = 0;
326}
327
328/*
329 * Calculate the zone index for the allocation request size and set the
330 * allocation request size to that particular zone's chunk size.
331 */
332static __inline int
333zoneindex(unsigned long *bytes)
334{
335 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */
336 if (n < 128) {
337 *bytes = n = (n + 7) & ~7;
338 return(n / 8 - 1); /* 8 byte chunks, 16 zones */
339 }
340 if (n < 256) {
341 *bytes = n = (n + 15) & ~15;
342 return(n / 16 + 7);
343 }
344 if (n < 8192) {
345 if (n < 512) {
346 *bytes = n = (n + 31) & ~31;
347 return(n / 32 + 15);
348 }
349 if (n < 1024) {
350 *bytes = n = (n + 63) & ~63;
351 return(n / 64 + 23);
352 }
353 if (n < 2048) {
354 *bytes = n = (n + 127) & ~127;
355 return(n / 128 + 31);
356 }
357 if (n < 4096) {
358 *bytes = n = (n + 255) & ~255;
359 return(n / 256 + 39);
360 }
361 *bytes = n = (n + 511) & ~511;
362 return(n / 512 + 47);
363 }
364#if ZALLOC_ZONE_LIMIT > 8192
365 if (n < 16384) {
366 *bytes = n = (n + 1023) & ~1023;
367 return(n / 1024 + 55);
368 }
369#endif
370#if ZALLOC_ZONE_LIMIT > 16384
371 if (n < 32768) {
372 *bytes = n = (n + 2047) & ~2047;
373 return(n / 2048 + 63);
374 }
375#endif
376 panic("Unexpected byte count %d", n);
377 return(0);
378}
379
380/*
381 * malloc() (SLAB ALLOCATOR)
382 *
383 * Allocate memory via the slab allocator. If the request is too large,
384 * or if it page-aligned beyond a certain size, we fall back to the
385 * KMEM subsystem. A SLAB tracking descriptor must be specified, use
386 * &SlabMisc if you don't care.
387 *
388 * M_RNOWAIT - don't block.
389 * M_NULLOK - return NULL instead of blocking.
390 * M_ZERO - zero the returned memory.
391 * M_USE_RESERVE - allow greater drawdown of the free list
392 * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted
393 *
394 * MPSAFE
395 */
396
397void *
398kmalloc(unsigned long size, struct malloc_type *type, int flags)
399{
400 SLZone *z;
401 SLChunk *chunk;
402 SLGlobalData *slgd;
403 struct globaldata *gd;
404 int zi;
405#ifdef INVARIANTS
406 int i;
407#endif
408
409 logmemory_quick(malloc_beg);
410 gd = mycpu;
411 slgd = &gd->gd_slab;
412
413 /*
414 * XXX silly to have this in the critical path.
415 */
416 if (type->ks_limit == 0) {
417 crit_enter();
418 if (type->ks_limit == 0)
419 malloc_init(type);
420 crit_exit();
421 }
422 ++type->ks_calls;
423
424 /*
425 * Handle the case where the limit is reached. Panic if we can't return
426 * NULL. The original malloc code looped, but this tended to
427 * simply deadlock the computer.
428 *
429 * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used
430 * to determine if a more complete limit check should be done. The
431 * actual memory use is tracked via ks_memuse[cpu].
432 */
433 while (type->ks_loosememuse >= type->ks_limit) {
434 int i;
435 long ttl;
436
437 for (i = ttl = 0; i < ncpus; ++i)
438 ttl += type->ks_memuse[i];
439 type->ks_loosememuse = ttl; /* not MP synchronized */
440 if (ttl >= type->ks_limit) {
441 if (flags & M_NULLOK) {
442 logmemory(malloc, NULL, type, size, flags);
443 return(NULL);
444 }
445 panic("%s: malloc limit exceeded", type->ks_shortdesc);
446 }
447 }
448
449 /*
450 * Handle the degenerate size == 0 case. Yes, this does happen.
451 * Return a special pointer. This is to maintain compatibility with
452 * the original malloc implementation. Certain devices, such as the
453 * adaptec driver, not only allocate 0 bytes, they check for NULL and
454 * also realloc() later on. Joy.
455 */
456 if (size == 0) {
457 logmemory(malloc, ZERO_LENGTH_PTR, type, size, flags);
458 return(ZERO_LENGTH_PTR);
459 }
460
461 /*
462 * Handle hysteresis from prior frees here in malloc(). We cannot
463 * safely manipulate the kernel_map in free() due to free() possibly
464 * being called via an IPI message or from sensitive interrupt code.
465 */
466 while (slgd->NFreeZones > ZONE_RELS_THRESH && (flags & M_RNOWAIT) == 0) {
467 crit_enter();
468 if (slgd->NFreeZones > ZONE_RELS_THRESH) { /* crit sect race */
469 z = slgd->FreeZones;
470 slgd->FreeZones = z->z_Next;
471 --slgd->NFreeZones;
472 kmem_slab_free(z, ZoneSize); /* may block */
473 }
474 crit_exit();
475 }
476 /*
477 * XXX handle oversized frees that were queued from free().
478 */
479 while (slgd->FreeOvZones && (flags & M_RNOWAIT) == 0) {
480 crit_enter();
481 if ((z = slgd->FreeOvZones) != NULL) {
482 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC);
483 slgd->FreeOvZones = z->z_Next;
484 kmem_slab_free(z, z->z_ChunkSize); /* may block */
485 }
486 crit_exit();
487 }
488
489 /*
490 * Handle large allocations directly. There should not be very many of
491 * these so performance is not a big issue.
492 *
493 * The backend allocator is pretty nasty on a SMP system. Use the
494 * slab allocator for one and two page-sized chunks even though we lose
495 * some efficiency. XXX maybe fix mmio and the elf loader instead.
496 */
497 if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) {
498 struct kmemusage *kup;
499
500 size = round_page(size);
501 chunk = kmem_slab_alloc(size, PAGE_SIZE, flags);
502 if (chunk == NULL) {
503 logmemory(malloc, NULL, type, size, flags);
504 return(NULL);
505 }
506 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */
507 flags |= M_PASSIVE_ZERO;
508 kup = btokup(chunk);
509 kup->ku_pagecnt = size / PAGE_SIZE;
510 kup->ku_cpu = gd->gd_cpuid;
511 crit_enter();
512 goto done;
513 }
514
515 /*
516 * Attempt to allocate out of an existing zone. First try the free list,
517 * then allocate out of unallocated space. If we find a good zone move
518 * it to the head of the list so later allocations find it quickly
519 * (we might have thousands of zones in the list).
520 *
521 * Note: zoneindex() will panic of size is too large.
522 */
523 zi = zoneindex(&size);
524 KKASSERT(zi < NZONES);
525 crit_enter();
526 if ((z = slgd->ZoneAry[zi]) != NULL) {
527 KKASSERT(z->z_NFree > 0);
528
529 /*
530 * Remove us from the ZoneAry[] when we become empty
531 */
532 if (--z->z_NFree == 0) {
533 slgd->ZoneAry[zi] = z->z_Next;
534 z->z_Next = NULL;
535 }
536
537 /*
538 * Locate a chunk in a free page. This attempts to localize
539 * reallocations into earlier pages without us having to sort
540 * the chunk list. A chunk may still overlap a page boundary.
541 */
542 while (z->z_FirstFreePg < ZonePageCount) {
543 if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) {
544#ifdef DIAGNOSTIC
545 /*
546 * Diagnostic: c_Next is not total garbage.
547 */
548 KKASSERT(chunk->c_Next == NULL ||
549 ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) ==
550 ((intptr_t)chunk & IN_SAME_PAGE_MASK));
551#endif
552#ifdef INVARIANTS
553 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd)
554 panic("chunk %p FFPG %d/%d", chunk, z->z_FirstFreePg, ZonePageCount);
555 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart)
556 panic("chunkNEXT %p %p FFPG %d/%d", chunk, chunk->c_Next, z->z_FirstFreePg, ZonePageCount);
557 chunk_mark_allocated(z, chunk);
558#endif
559 z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next;
560 goto done;
561 }
562 ++z->z_FirstFreePg;
563 }
564
565 /*
566 * No chunks are available but NFree said we had some memory, so
567 * it must be available in the never-before-used-memory area
568 * governed by UIndex. The consequences are very serious if our zone
569 * got corrupted so we use an explicit panic rather then a KASSERT.
570 */
571 if (z->z_UIndex + 1 != z->z_NMax)
572 z->z_UIndex = z->z_UIndex + 1;
573 else
574 z->z_UIndex = 0;
575 if (z->z_UIndex == z->z_UEndIndex)
576 panic("slaballoc: corrupted zone");
577 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
578 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
579 flags &= ~M_ZERO;
580 flags |= M_PASSIVE_ZERO;
581 }
582#if defined(INVARIANTS)
583 chunk_mark_allocated(z, chunk);
584#endif
585 goto done;
586 }
587
588 /*
589 * If all zones are exhausted we need to allocate a new zone for this
590 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see
591 * UAlloc use above in regards to M_ZERO. Note that when we are reusing
592 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and
593 * we do not pre-zero it because we do not want to mess up the L1 cache.
594 *
595 * At least one subsystem, the tty code (see CROUND) expects power-of-2
596 * allocations to be power-of-2 aligned. We maintain compatibility by
597 * adjusting the base offset below.
598 */
599 {
600 int off;
601
602 if ((z = slgd->FreeZones) != NULL) {
603 slgd->FreeZones = z->z_Next;
604 --slgd->NFreeZones;
605 bzero(z, sizeof(SLZone));
606 z->z_Flags |= SLZF_UNOTZEROD;
607 } else {
608 z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO);
609 if (z == NULL)
610 goto fail;
611 }
612
613 /*
614 * How big is the base structure?
615 */
616#if defined(INVARIANTS)
617 /*
618 * Make room for z_Bitmap. An exact calculation is somewhat more
619 * complicated so don't make an exact calculation.
620 */
621 off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]);
622 bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8);
623#else
624 off = sizeof(SLZone);
625#endif
626
627 /*
628 * Guarentee power-of-2 alignment for power-of-2-sized chunks.
629 * Otherwise just 8-byte align the data.
630 */
631 if ((size | (size - 1)) + 1 == (size << 1))
632 off = (off + size - 1) & ~(size - 1);
633 else
634 off = (off + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK;
635 z->z_Magic = ZALLOC_SLAB_MAGIC;
636 z->z_ZoneIndex = zi;
637 z->z_NMax = (ZoneSize - off) / size;
638 z->z_NFree = z->z_NMax - 1;
639 z->z_BasePtr = (char *)z + off;
640 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax;
641 z->z_ChunkSize = size;
642 z->z_FirstFreePg = ZonePageCount;
643 z->z_CpuGd = gd;
644 z->z_Cpu = gd->gd_cpuid;
645 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
646 z->z_Next = slgd->ZoneAry[zi];
647 slgd->ZoneAry[zi] = z;
648 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
649 flags &= ~M_ZERO; /* already zero'd */
650 flags |= M_PASSIVE_ZERO;
651 }
652#if defined(INVARIANTS)
653 chunk_mark_allocated(z, chunk);
654#endif
655
656 /*
657 * Slide the base index for initial allocations out of the next
658 * zone we create so we do not over-weight the lower part of the
659 * cpu memory caches.
660 */
661 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
662 & (ZALLOC_MAX_ZONE_SIZE - 1);
663 }
664done:
665 ++type->ks_inuse[gd->gd_cpuid];
666 type->ks_memuse[gd->gd_cpuid] += size;
667 type->ks_loosememuse += size; /* not MP synchronized */
668 crit_exit();
669 if (flags & M_ZERO)
670 bzero(chunk, size);
671#ifdef INVARIANTS
672 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) {
673 if (use_malloc_pattern) {
674 for (i = 0; i < size; i += sizeof(int)) {
675 *(int *)((char *)chunk + i) = -1;
676 }
677 }
678 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */
679 }
680#endif
681 logmemory(malloc, chunk, type, size, flags);
682 return(chunk);
683fail:
684 crit_exit();
685 logmemory(malloc, NULL, type, size, flags);
686 return(NULL);
687}
688
689/*
690 * kernel realloc. (SLAB ALLOCATOR) (MP SAFE)
691 *
692 * Generally speaking this routine is not called very often and we do
693 * not attempt to optimize it beyond reusing the same pointer if the
694 * new size fits within the chunking of the old pointer's zone.
695 */
696void *
697krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags)
698{
699 SLZone *z;
700 void *nptr;
701 unsigned long osize;
702
703 KKASSERT((flags & M_ZERO) == 0); /* not supported */
704
705 if (ptr == NULL || ptr == ZERO_LENGTH_PTR)
706 return(kmalloc(size, type, flags));
707 if (size == 0) {
708 kfree(ptr, type);
709 return(NULL);
710 }
711
712 /*
713 * Handle oversized allocations. XXX we really should require that a
714 * size be passed to free() instead of this nonsense.
715 */
716 {
717 struct kmemusage *kup;
718
719 kup = btokup(ptr);
720 if (kup->ku_pagecnt) {
721 osize = kup->ku_pagecnt << PAGE_SHIFT;
722 if (osize == round_page(size))
723 return(ptr);
724 if ((nptr = kmalloc(size, type, flags)) == NULL)
725 return(NULL);
726 bcopy(ptr, nptr, min(size, osize));
727 kfree(ptr, type);
728 return(nptr);
729 }
730 }
731
732 /*
733 * Get the original allocation's zone. If the new request winds up
734 * using the same chunk size we do not have to do anything.
735 */
736 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
737 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
738
739 zoneindex(&size);
740 if (z->z_ChunkSize == size)
741 return(ptr);
742
743 /*
744 * Allocate memory for the new request size. Note that zoneindex has
745 * already adjusted the request size to the appropriate chunk size, which
746 * should optimize our bcopy(). Then copy and return the new pointer.
747 */
748 if ((nptr = kmalloc(size, type, flags)) == NULL)
749 return(NULL);
750 bcopy(ptr, nptr, min(size, z->z_ChunkSize));
751 kfree(ptr, type);
752 return(nptr);
753}
754
755/*
756 * Allocate a copy of the specified string.
757 *
758 * (MP SAFE) (MAY BLOCK)
759 */
760char *
761kstrdup(const char *str, struct malloc_type *type)
762{
763 int zlen; /* length inclusive of terminating NUL */
764 char *nstr;
765
766 if (str == NULL)
767 return(NULL);
768 zlen = strlen(str) + 1;
769 nstr = kmalloc(zlen, type, M_WAITOK);
770 bcopy(str, nstr, zlen);
771 return(nstr);
772}
773
774#ifdef SMP
775/*
776 * free() (SLAB ALLOCATOR)
777 *
778 * Free the specified chunk of memory.
779 */
780static
781void
782free_remote(void *ptr)
783{
784 logmemory(free_remote, ptr, *(struct malloc_type **)ptr, -1, 0);
785 kfree(ptr, *(struct malloc_type **)ptr);
786}
787
788#endif
789
790/*
791 * free (SLAB ALLOCATOR)
792 *
793 * Free a memory block previously allocated by malloc. Note that we do not
794 * attempt to uplodate ks_loosememuse as MP races could prevent us from
795 * checking memory limits in malloc.
796 *
797 * MPSAFE
798 */
799void
800kfree(void *ptr, struct malloc_type *type)
801{
802 SLZone *z;
803 SLChunk *chunk;
804 SLGlobalData *slgd;
805 struct globaldata *gd;
806 int pgno;
807
808 logmemory_quick(free_beg);
809 gd = mycpu;
810 slgd = &gd->gd_slab;
811
812 if (ptr == NULL)
813 panic("trying to free NULL pointer");
814
815 /*
816 * Handle special 0-byte allocations
817 */
818 if (ptr == ZERO_LENGTH_PTR) {
819 logmemory(free_zero, ptr, type, -1, 0);
820 logmemory_quick(free_end);
821 return;
822 }
823
824 /*
825 * Handle oversized allocations. XXX we really should require that a
826 * size be passed to free() instead of this nonsense.
827 *
828 * This code is never called via an ipi.
829 */
830 {
831 struct kmemusage *kup;
832 unsigned long size;
833
834 kup = btokup(ptr);
835 if (kup->ku_pagecnt) {
836 size = kup->ku_pagecnt << PAGE_SHIFT;
837 kup->ku_pagecnt = 0;
838#ifdef INVARIANTS
839 KKASSERT(sizeof(weirdary) <= size);
840 bcopy(weirdary, ptr, sizeof(weirdary));
841#endif
842 /*
843 * note: we always adjust our cpu's slot, not the originating
844 * cpu (kup->ku_cpuid). The statistics are in aggregate.
845 *
846 * note: XXX we have still inherited the interrupts-can't-block
847 * assumption. An interrupt thread does not bump
848 * gd_intr_nesting_level so check TDF_INTTHREAD. This is
849 * primarily until we can fix softupdate's assumptions about free().
850 */
851 crit_enter();
852 --type->ks_inuse[gd->gd_cpuid];
853 type->ks_memuse[gd->gd_cpuid] -= size;
854 if (mycpu->gd_intr_nesting_level || (gd->gd_curthread->td_flags & TDF_INTTHREAD)) {
855 logmemory(free_ovsz_delayed, ptr, type, size, 0);
856 z = (SLZone *)ptr;
857 z->z_Magic = ZALLOC_OVSZ_MAGIC;
858 z->z_Next = slgd->FreeOvZones;
859 z->z_ChunkSize = size;
860 slgd->FreeOvZones = z;
861 crit_exit();
862 } else {
863 crit_exit();
864 logmemory(free_ovsz, ptr, type, size, 0);
865 kmem_slab_free(ptr, size); /* may block */
866 }
867 logmemory_quick(free_end);
868 return;
869 }
870 }
871
872 /*
873 * Zone case. Figure out the zone based on the fact that it is
874 * ZoneSize aligned.
875 */
876 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
877 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
878
879 /*
880 * If we do not own the zone then forward the request to the
881 * cpu that does. Since the timing is non-critical, a passive
882 * message is sent.
883 */
884 if (z->z_CpuGd != gd) {
885 *(struct malloc_type **)ptr = type;
886#ifdef SMP
887 logmemory(free_request, ptr, type, z->z_ChunkSize, 0);
888 lwkt_send_ipiq_passive(z->z_CpuGd, free_remote, ptr);
889#else
890 panic("Corrupt SLZone");
891#endif
892 logmemory_quick(free_end);
893 return;
894 }
895
896 logmemory(free_chunk, ptr, type, z->z_ChunkSize, 0);
897
898 if (type->ks_magic != M_MAGIC)
899 panic("free: malloc type lacks magic");
900
901 crit_enter();
902 pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT;
903 chunk = ptr;
904
905#ifdef INVARIANTS
906 /*
907 * Attempt to detect a double-free. To reduce overhead we only check
908 * if there appears to be link pointer at the base of the data.
909 */
910 if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) {
911 SLChunk *scan;
912 for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) {
913 if (scan == chunk)
914 panic("Double free at %p", chunk);
915 }
916 }
917 chunk_mark_free(z, chunk);
918#endif
919
920 /*
921 * Put weird data into the memory to detect modifications after freeing,
922 * illegal pointer use after freeing (we should fault on the odd address),
923 * and so forth. XXX needs more work, see the old malloc code.
924 */
925#ifdef INVARIANTS
926 if (z->z_ChunkSize < sizeof(weirdary))
927 bcopy(weirdary, chunk, z->z_ChunkSize);
928 else
929 bcopy(weirdary, chunk, sizeof(weirdary));
930#endif
931
932 /*
933 * Add this free non-zero'd chunk to a linked list for reuse, adjust
934 * z_FirstFreePg.
935 */
936#ifdef INVARIANTS
937 if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd)
938 panic("BADFREE %p", chunk);
939#endif
940 chunk->c_Next = z->z_PageAry[pgno];
941 z->z_PageAry[pgno] = chunk;
942#ifdef INVARIANTS
943 if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart)
944 panic("BADFREE2");
945#endif
946 if (z->z_FirstFreePg > pgno)
947 z->z_FirstFreePg = pgno;
948
949 /*
950 * Bump the number of free chunks. If it becomes non-zero the zone
951 * must be added back onto the appropriate list.
952 */
953 if (z->z_NFree++ == 0) {
954 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
955 slgd->ZoneAry[z->z_ZoneIndex] = z;
956 }
957
958 --type->ks_inuse[z->z_Cpu];
959 type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize;
960
961 /*
962 * If the zone becomes totally free, and there are other zones we
963 * can allocate from, move this zone to the FreeZones list. Since
964 * this code can be called from an IPI callback, do *NOT* try to mess
965 * with kernel_map here. Hysteresis will be performed at malloc() time.
966 */
967 if (z->z_NFree == z->z_NMax &&
968 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z)
969 ) {
970 SLZone **pz;
971
972 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; z != *pz; pz = &(*pz)->z_Next)
973 ;
974 *pz = z->z_Next;
975 z->z_Magic = -1;
976 z->z_Next = slgd->FreeZones;
977 slgd->FreeZones = z;
978 ++slgd->NFreeZones;
979 }
980 logmemory_quick(free_end);
981 crit_exit();
982}
983
984#if defined(INVARIANTS)
985/*
986 * Helper routines for sanity checks
987 */
988static
989void
990chunk_mark_allocated(SLZone *z, void *chunk)
991{
992 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
993 __uint32_t *bitptr;
994
995 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, ("memory chunk %p bit index %d is illegal", chunk, bitdex));
996 bitptr = &z->z_Bitmap[bitdex >> 5];
997 bitdex &= 31;
998 KASSERT((*bitptr & (1 << bitdex)) == 0, ("memory chunk %p is already allocated!", chunk));
999 *bitptr |= 1 << bitdex;
1000}
1001
1002static
1003void
1004chunk_mark_free(SLZone *z, void *chunk)
1005{
1006 int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1007 __uint32_t *bitptr;
1008
1009 KASSERT(bitdex >= 0 && bitdex < z->z_NMax, ("memory chunk %p bit index %d is illegal!", chunk, bitdex));
1010 bitptr = &z->z_Bitmap[bitdex >> 5];
1011 bitdex &= 31;
1012 KASSERT((*bitptr & (1 << bitdex)) != 0, ("memory chunk %p is already free!", chunk));
1013 *bitptr &= ~(1 << bitdex);
1014}
1015
1016#endif
1017
1018/*
1019 * kmem_slab_alloc()
1020 *
1021 * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the
1022 * specified alignment. M_* flags are expected in the flags field.
1023 *
1024 * Alignment must be a multiple of PAGE_SIZE.
1025 *
1026 * NOTE! XXX For the moment we use vm_map_entry_reserve/release(),
1027 * but when we move zalloc() over to use this function as its backend
1028 * we will have to switch to kreserve/krelease and call reserve(0)
1029 * after the new space is made available.
1030 *
1031 * Interrupt code which has preempted other code is not allowed to
1032 * use PQ_CACHE pages. However, if an interrupt thread is run
1033 * non-preemptively or blocks and then runs non-preemptively, then
1034 * it is free to use PQ_CACHE pages.
1035 *
1036 * This routine will currently obtain the BGL.
1037 *
1038 * MPALMOSTSAFE - acquires mplock
1039 */
1040static void *
1041kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags)
1042{
1043 vm_size_t i;
1044 vm_offset_t addr;
1045 int count, vmflags, base_vmflags;
1046 thread_t td;
1047
1048 size = round_page(size);
1049 addr = vm_map_min(&kernel_map);
1050
1051 /*
1052 * Reserve properly aligned space from kernel_map. RNOWAIT allocations
1053 * cannot block.
1054 */
1055 if (flags & M_RNOWAIT) {
1056 if (try_mplock() == 0)
1057 return(NULL);
1058 } else {
1059 get_mplock();
1060 }
1061 count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1062 crit_enter();
1063 vm_map_lock(&kernel_map);
1064 if (vm_map_findspace(&kernel_map, addr, size, align, &addr)) {
1065 vm_map_unlock(&kernel_map);
1066 if ((flags & M_NULLOK) == 0)
1067 panic("kmem_slab_alloc(): kernel_map ran out of space!");
1068 crit_exit();
1069 vm_map_entry_release(count);
1070 rel_mplock();
1071 return(NULL);
1072 }
1073
1074 /*
1075 * kernel_object maps 1:1 to kernel_map.
1076 */
1077 vm_object_reference(&kernel_object);
1078 vm_map_insert(&kernel_map, &count,
1079 &kernel_object, addr, addr, addr + size,
1080 VM_MAPTYPE_NORMAL,
1081 VM_PROT_ALL, VM_PROT_ALL,
1082 0);
1083
1084 td = curthread;
1085
1086 base_vmflags = 0;
1087 if (flags & M_ZERO)
1088 base_vmflags |= VM_ALLOC_ZERO;
1089 if (flags & M_USE_RESERVE)
1090 base_vmflags |= VM_ALLOC_SYSTEM;
1091 if (flags & M_USE_INTERRUPT_RESERVE)
1092 base_vmflags |= VM_ALLOC_INTERRUPT;
1093 if ((flags & (M_RNOWAIT|M_WAITOK)) == 0)
1094 panic("kmem_slab_alloc: bad flags %08x (%p)", flags, ((int **)&size)[-1]);
1095
1096
1097 /*
1098 * Allocate the pages. Do not mess with the PG_ZERO flag yet.
1099 */
1100 for (i = 0; i < size; i += PAGE_SIZE) {
1101 vm_page_t m;
1102
1103 /*
1104 * VM_ALLOC_NORMAL can only be set if we are not preempting.
1105 *
1106 * VM_ALLOC_SYSTEM is automatically set if we are preempting and
1107 * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is
1108 * implied in this case), though I'm sure if we really need to do
1109 * that.
1110 */
1111 vmflags = base_vmflags;
1112 if (flags & M_WAITOK) {
1113 if (td->td_preempted)
1114 vmflags |= VM_ALLOC_SYSTEM;
1115 else
1116 vmflags |= VM_ALLOC_NORMAL;
1117 }
1118
1119 m = vm_page_alloc(&kernel_object, OFF_TO_IDX(addr + i), vmflags);
1120
1121 /*
1122 * If the allocation failed we either return NULL or we retry.
1123 *
1124 * If M_WAITOK is specified we wait for more memory and retry.
1125 * If M_WAITOK is specified from a preemption we yield instead of
1126 * wait. Livelock will not occur because the interrupt thread
1127 * will not be preempting anyone the second time around after the
1128 * yield.
1129 */
1130 if (m == NULL) {
1131 if (flags & M_WAITOK) {
1132 if (td->td_preempted) {
1133 vm_map_unlock(&kernel_map);
1134 lwkt_yield();
1135 vm_map_lock(&kernel_map);
1136 } else {
1137 vm_map_unlock(&kernel_map);
1138 vm_wait();
1139 vm_map_lock(&kernel_map);
1140 }
1141 i -= PAGE_SIZE; /* retry */
1142 continue;
1143 }
1144
1145 /*
1146 * We were unable to recover, cleanup and return NULL
1147 */
1148 while (i != 0) {
1149 i -= PAGE_SIZE;
1150 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i));
1151 vm_page_free(m);
1152 }
1153 vm_map_delete(&kernel_map, addr, addr + size, &count);
1154 vm_map_unlock(&kernel_map);
1155 crit_exit();
1156 vm_map_entry_release(count);
1157 rel_mplock();
1158 return(NULL);
1159 }
1160 }
1161
1162 /*
1163 * Success!
1164 *
1165 * Mark the map entry as non-pageable using a routine that allows us to
1166 * populate the underlying pages.
1167 */
1168 vm_map_set_wired_quick(&kernel_map, addr, size, &count);
1169 crit_exit();
1170
1171 /*
1172 * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO.
1173 */
1174 for (i = 0; i < size; i += PAGE_SIZE) {
1175 vm_page_t m;
1176
1177 m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i));
1178 m->valid = VM_PAGE_BITS_ALL;
1179 vm_page_wire(m);
1180 vm_page_wakeup(m);
1181 pmap_enter(&kernel_pmap, addr + i, m, VM_PROT_ALL, 1);
1182 if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO))
1183 bzero((char *)addr + i, PAGE_SIZE);
1184 vm_page_flag_clear(m, PG_ZERO);
1185 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED);
1186 }
1187 vm_map_unlock(&kernel_map);
1188 vm_map_entry_release(count);
1189 rel_mplock();
1190 return((void *)addr);
1191}
1192
1193/*
1194 * kmem_slab_free()
1195 *
1196 * MPALMOSTSAFE - acquires mplock
1197 */
1198static void
1199kmem_slab_free(void *ptr, vm_size_t size)
1200{
1201 get_mplock();
1202 crit_enter();
1203 vm_map_remove(&kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size);
1204 crit_exit();
1205 rel_mplock();
1206}
1207