Commit | Line | Data |
---|---|---|
a108bf71 | 1 | /* |
5b287bba | 2 | * KERN_SLABALLOC.C - Kernel SLAB memory allocator |
8c10bfcf MD |
3 | * |
4 | * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. | |
5 | * | |
6 | * This code is derived from software contributed to The DragonFly Project | |
7 | * by Matthew Dillon <dillon@backplane.com> | |
8 | * | |
a108bf71 MD |
9 | * Redistribution and use in source and binary forms, with or without |
10 | * modification, are permitted provided that the following conditions | |
11 | * are met: | |
8c10bfcf | 12 | * |
a108bf71 MD |
13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. | |
15 | * 2. Redistributions in binary form must reproduce the above copyright | |
8c10bfcf MD |
16 | * notice, this list of conditions and the following disclaimer in |
17 | * the documentation and/or other materials provided with the | |
18 | * distribution. | |
19 | * 3. Neither the name of The DragonFly Project nor the names of its | |
20 | * contributors may be used to endorse or promote products derived | |
21 | * from this software without specific, prior written permission. | |
22 | * | |
23 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
24 | * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
25 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | |
26 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE | |
27 | * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, | |
28 | * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, | |
29 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
30 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED | |
31 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, | |
32 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT | |
33 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
a108bf71 | 34 | * SUCH DAMAGE. |
8c10bfcf | 35 | * |
45d2b1d8 | 36 | * $DragonFly: src/sys/kern/kern_slaballoc.c,v 1.53 2008/06/30 03:00:36 dillon Exp $ |
a108bf71 MD |
37 | * |
38 | * This module implements a slab allocator drop-in replacement for the | |
39 | * kernel malloc(). | |
40 | * | |
41 | * A slab allocator reserves a ZONE for each chunk size, then lays the | |
42 | * chunks out in an array within the zone. Allocation and deallocation | |
43 | * is nearly instantanious, and fragmentation/overhead losses are limited | |
44 | * to a fixed worst-case amount. | |
45 | * | |
46 | * The downside of this slab implementation is in the chunk size | |
47 | * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu. | |
48 | * In a kernel implementation all this memory will be physical so | |
49 | * the zone size is adjusted downward on machines with less physical | |
50 | * memory. The upside is that overhead is bounded... this is the *worst* | |
51 | * case overhead. | |
52 | * | |
53 | * Slab management is done on a per-cpu basis and no locking or mutexes | |
54 | * are required, only a critical section. When one cpu frees memory | |
55 | * belonging to another cpu's slab manager an asynchronous IPI message | |
56 | * will be queued to execute the operation. In addition, both the | |
57 | * high level slab allocator and the low level zone allocator optimize | |
58 | * M_ZERO requests, and the slab allocator does not have to pre initialize | |
59 | * the linked list of chunks. | |
60 | * | |
61 | * XXX Balancing is needed between cpus. Balance will be handled through | |
62 | * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks. | |
63 | * | |
64 | * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of | |
65 | * the new zone should be restricted to M_USE_RESERVE requests only. | |
66 | * | |
67 | * Alloc Size Chunking Number of zones | |
68 | * 0-127 8 16 | |
69 | * 128-255 16 8 | |
70 | * 256-511 32 8 | |
71 | * 512-1023 64 8 | |
72 | * 1024-2047 128 8 | |
73 | * 2048-4095 256 8 | |
74 | * 4096-8191 512 8 | |
75 | * 8192-16383 1024 8 | |
76 | * 16384-32767 2048 8 | |
77 | * (if PAGE_SIZE is 4K the maximum zone allocation is 16383) | |
78 | * | |
46a3f46d | 79 | * Allocations >= ZoneLimit go directly to kmem. |
a108bf71 MD |
80 | * |
81 | * API REQUIREMENTS AND SIDE EFFECTS | |
82 | * | |
83 | * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we | |
84 | * have remained compatible with the following API requirements: | |
85 | * | |
86 | * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty) | |
3d177b31 | 87 | * + all power-of-2 sized allocations are power-of-2 aligned (twe) |
a108bf71 MD |
88 | * + malloc(0) is allowed and returns non-NULL (ahc driver) |
89 | * + ability to allocate arbitrarily large chunks of memory | |
90 | */ | |
91 | ||
92 | #include "opt_vm.h" | |
93 | ||
a108bf71 MD |
94 | #include <sys/param.h> |
95 | #include <sys/systm.h> | |
96 | #include <sys/kernel.h> | |
97 | #include <sys/slaballoc.h> | |
98 | #include <sys/mbuf.h> | |
99 | #include <sys/vmmeter.h> | |
100 | #include <sys/lock.h> | |
101 | #include <sys/thread.h> | |
102 | #include <sys/globaldata.h> | |
d2182dc1 | 103 | #include <sys/sysctl.h> |
f2b5daf9 | 104 | #include <sys/ktr.h> |
a108bf71 MD |
105 | |
106 | #include <vm/vm.h> | |
107 | #include <vm/vm_param.h> | |
108 | #include <vm/vm_kern.h> | |
109 | #include <vm/vm_extern.h> | |
110 | #include <vm/vm_object.h> | |
111 | #include <vm/pmap.h> | |
112 | #include <vm/vm_map.h> | |
113 | #include <vm/vm_page.h> | |
114 | #include <vm/vm_pageout.h> | |
115 | ||
116 | #include <machine/cpu.h> | |
117 | ||
118 | #include <sys/thread2.h> | |
119 | ||
120 | #define arysize(ary) (sizeof(ary)/sizeof((ary)[0])) | |
121 | ||
f2b5daf9 MD |
122 | #define MEMORY_STRING "ptr=%p type=%p size=%d flags=%04x" |
123 | #define MEMORY_ARG_SIZE (sizeof(void *) * 2 + sizeof(unsigned long) + \ | |
124 | sizeof(int)) | |
125 | ||
126 | #if !defined(KTR_MEMORY) | |
127 | #define KTR_MEMORY KTR_ALL | |
128 | #endif | |
129 | KTR_INFO_MASTER(memory); | |
130 | KTR_INFO(KTR_MEMORY, memory, malloc, 0, MEMORY_STRING, MEMORY_ARG_SIZE); | |
131 | KTR_INFO(KTR_MEMORY, memory, free_zero, 1, MEMORY_STRING, MEMORY_ARG_SIZE); | |
017ba73b MD |
132 | KTR_INFO(KTR_MEMORY, memory, free_ovsz, 2, MEMORY_STRING, MEMORY_ARG_SIZE); |
133 | KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 3, MEMORY_STRING, MEMORY_ARG_SIZE); | |
134 | KTR_INFO(KTR_MEMORY, memory, free_chunk, 4, MEMORY_STRING, MEMORY_ARG_SIZE); | |
f2b5daf9 | 135 | #ifdef SMP |
017ba73b MD |
136 | KTR_INFO(KTR_MEMORY, memory, free_request, 5, MEMORY_STRING, MEMORY_ARG_SIZE); |
137 | KTR_INFO(KTR_MEMORY, memory, free_remote, 6, MEMORY_STRING, MEMORY_ARG_SIZE); | |
f2b5daf9 | 138 | #endif |
b68ad50c MD |
139 | KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin", 0); |
140 | KTR_INFO(KTR_MEMORY, memory, free_beg, 0, "free begin", 0); | |
141 | KTR_INFO(KTR_MEMORY, memory, free_end, 0, "free end", 0); | |
f2b5daf9 MD |
142 | |
143 | #define logmemory(name, ptr, type, size, flags) \ | |
144 | KTR_LOG(memory_ ## name, ptr, type, size, flags) | |
b68ad50c MD |
145 | #define logmemory_quick(name) \ |
146 | KTR_LOG(memory_ ## name) | |
f2b5daf9 | 147 | |
a108bf71 MD |
148 | /* |
149 | * Fixed globals (not per-cpu) | |
150 | */ | |
151 | static int ZoneSize; | |
46a3f46d | 152 | static int ZoneLimit; |
a108bf71 | 153 | static int ZonePageCount; |
a108bf71 | 154 | static int ZoneMask; |
460426e6 | 155 | struct malloc_type *kmemstatistics; /* exported to vmstat */ |
a108bf71 MD |
156 | static struct kmemusage *kmemusage; |
157 | static int32_t weirdary[16]; | |
158 | ||
159 | static void *kmem_slab_alloc(vm_size_t bytes, vm_offset_t align, int flags); | |
160 | static void kmem_slab_free(void *ptr, vm_size_t bytes); | |
10cc6608 MD |
161 | #if defined(INVARIANTS) |
162 | static void chunk_mark_allocated(SLZone *z, void *chunk); | |
163 | static void chunk_mark_free(SLZone *z, void *chunk); | |
164 | #endif | |
a108bf71 MD |
165 | |
166 | /* | |
167 | * Misc constants. Note that allocations that are exact multiples of | |
168 | * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module. | |
169 | * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists. | |
170 | */ | |
171 | #define MIN_CHUNK_SIZE 8 /* in bytes */ | |
172 | #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1) | |
173 | #define ZONE_RELS_THRESH 2 /* threshold number of zones */ | |
174 | #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK) | |
175 | ||
176 | /* | |
177 | * The WEIRD_ADDR is used as known text to copy into free objects to | |
178 | * try to create deterministic failure cases if the data is accessed after | |
179 | * free. | |
180 | */ | |
181 | #define WEIRD_ADDR 0xdeadc0de | |
182 | #define MAX_COPY sizeof(weirdary) | |
183 | #define ZERO_LENGTH_PTR ((void *)-8) | |
184 | ||
185 | /* | |
186 | * Misc global malloc buckets | |
187 | */ | |
188 | ||
189 | MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); | |
190 | MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); | |
191 | MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); | |
192 | ||
193 | MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); | |
194 | MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); | |
195 | ||
196 | /* | |
197 | * Initialize the slab memory allocator. We have to choose a zone size based | |
198 | * on available physical memory. We choose a zone side which is approximately | |
199 | * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of | |
200 | * 128K. The zone size is limited to the bounds set in slaballoc.h | |
201 | * (typically 32K min, 128K max). | |
202 | */ | |
203 | static void kmeminit(void *dummy); | |
204 | ||
ba39e2e0 | 205 | SYSINIT(kmem, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL) |
a108bf71 | 206 | |
d2182dc1 MD |
207 | #ifdef INVARIANTS |
208 | /* | |
209 | * If enabled any memory allocated without M_ZERO is initialized to -1. | |
210 | */ | |
211 | static int use_malloc_pattern; | |
212 | SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW, | |
213 | &use_malloc_pattern, 0, ""); | |
214 | #endif | |
215 | ||
a108bf71 MD |
216 | static void |
217 | kmeminit(void *dummy) | |
218 | { | |
219 | vm_poff_t limsize; | |
220 | int usesize; | |
221 | int i; | |
222 | vm_pindex_t npg; | |
223 | ||
224 | limsize = (vm_poff_t)vmstats.v_page_count * PAGE_SIZE; | |
c439ad8f MD |
225 | if (limsize > KvaSize) |
226 | limsize = KvaSize; | |
a108bf71 MD |
227 | |
228 | usesize = (int)(limsize / 1024); /* convert to KB */ | |
229 | ||
230 | ZoneSize = ZALLOC_MIN_ZONE_SIZE; | |
231 | while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize) | |
232 | ZoneSize <<= 1; | |
46a3f46d MD |
233 | ZoneLimit = ZoneSize / 4; |
234 | if (ZoneLimit > ZALLOC_ZONE_LIMIT) | |
235 | ZoneLimit = ZALLOC_ZONE_LIMIT; | |
a108bf71 | 236 | ZoneMask = ZoneSize - 1; |
a108bf71 MD |
237 | ZonePageCount = ZoneSize / PAGE_SIZE; |
238 | ||
c439ad8f MD |
239 | npg = KvaSize / PAGE_SIZE; |
240 | kmemusage = kmem_slab_alloc(npg * sizeof(struct kmemusage), | |
241 | PAGE_SIZE, M_WAITOK|M_ZERO); | |
a108bf71 MD |
242 | |
243 | for (i = 0; i < arysize(weirdary); ++i) | |
244 | weirdary[i] = WEIRD_ADDR; | |
245 | ||
246 | if (bootverbose) | |
6ea70f76 | 247 | kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024); |
a108bf71 MD |
248 | } |
249 | ||
250 | /* | |
bba6a44d | 251 | * Initialize a malloc type tracking structure. |
a108bf71 MD |
252 | */ |
253 | void | |
254 | malloc_init(void *data) | |
255 | { | |
256 | struct malloc_type *type = data; | |
257 | vm_poff_t limsize; | |
258 | ||
259 | if (type->ks_magic != M_MAGIC) | |
260 | panic("malloc type lacks magic"); | |
261 | ||
262 | if (type->ks_limit != 0) | |
263 | return; | |
264 | ||
265 | if (vmstats.v_page_count == 0) | |
266 | panic("malloc_init not allowed before vm init"); | |
267 | ||
268 | limsize = (vm_poff_t)vmstats.v_page_count * PAGE_SIZE; | |
c439ad8f MD |
269 | if (limsize > KvaSize) |
270 | limsize = KvaSize; | |
a108bf71 MD |
271 | type->ks_limit = limsize / 10; |
272 | ||
273 | type->ks_next = kmemstatistics; | |
274 | kmemstatistics = type; | |
275 | } | |
276 | ||
277 | void | |
278 | malloc_uninit(void *data) | |
279 | { | |
280 | struct malloc_type *type = data; | |
281 | struct malloc_type *t; | |
bba6a44d MD |
282 | #ifdef INVARIANTS |
283 | int i; | |
1d712609 | 284 | long ttl; |
bba6a44d | 285 | #endif |
a108bf71 MD |
286 | |
287 | if (type->ks_magic != M_MAGIC) | |
288 | panic("malloc type lacks magic"); | |
289 | ||
290 | if (vmstats.v_page_count == 0) | |
291 | panic("malloc_uninit not allowed before vm init"); | |
292 | ||
293 | if (type->ks_limit == 0) | |
294 | panic("malloc_uninit on uninitialized type"); | |
295 | ||
6c92c1f2 SZ |
296 | #ifdef SMP |
297 | /* Make sure that all pending kfree()s are finished. */ | |
298 | lwkt_synchronize_ipiqs("muninit"); | |
299 | #endif | |
300 | ||
a108bf71 | 301 | #ifdef INVARIANTS |
1d712609 MD |
302 | /* |
303 | * memuse is only correct in aggregation. Due to memory being allocated | |
304 | * on one cpu and freed on another individual array entries may be | |
305 | * negative or positive (canceling each other out). | |
306 | */ | |
307 | for (i = ttl = 0; i < ncpus; ++i) | |
308 | ttl += type->ks_memuse[i]; | |
309 | if (ttl) { | |
6ea70f76 | 310 | kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n", |
1d712609 | 311 | ttl, type->ks_shortdesc, i); |
a108bf71 MD |
312 | } |
313 | #endif | |
314 | if (type == kmemstatistics) { | |
315 | kmemstatistics = type->ks_next; | |
316 | } else { | |
317 | for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) { | |
318 | if (t->ks_next == type) { | |
319 | t->ks_next = type->ks_next; | |
320 | break; | |
321 | } | |
322 | } | |
323 | } | |
324 | type->ks_next = NULL; | |
325 | type->ks_limit = 0; | |
326 | } | |
327 | ||
328 | /* | |
329 | * Calculate the zone index for the allocation request size and set the | |
330 | * allocation request size to that particular zone's chunk size. | |
331 | */ | |
332 | static __inline int | |
333 | zoneindex(unsigned long *bytes) | |
334 | { | |
335 | unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */ | |
336 | if (n < 128) { | |
337 | *bytes = n = (n + 7) & ~7; | |
338 | return(n / 8 - 1); /* 8 byte chunks, 16 zones */ | |
339 | } | |
340 | if (n < 256) { | |
341 | *bytes = n = (n + 15) & ~15; | |
342 | return(n / 16 + 7); | |
343 | } | |
344 | if (n < 8192) { | |
345 | if (n < 512) { | |
346 | *bytes = n = (n + 31) & ~31; | |
347 | return(n / 32 + 15); | |
348 | } | |
349 | if (n < 1024) { | |
350 | *bytes = n = (n + 63) & ~63; | |
351 | return(n / 64 + 23); | |
352 | } | |
353 | if (n < 2048) { | |
354 | *bytes = n = (n + 127) & ~127; | |
355 | return(n / 128 + 31); | |
356 | } | |
357 | if (n < 4096) { | |
358 | *bytes = n = (n + 255) & ~255; | |
359 | return(n / 256 + 39); | |
360 | } | |
361 | *bytes = n = (n + 511) & ~511; | |
362 | return(n / 512 + 47); | |
363 | } | |
364 | #if ZALLOC_ZONE_LIMIT > 8192 | |
365 | if (n < 16384) { | |
366 | *bytes = n = (n + 1023) & ~1023; | |
367 | return(n / 1024 + 55); | |
368 | } | |
369 | #endif | |
370 | #if ZALLOC_ZONE_LIMIT > 16384 | |
371 | if (n < 32768) { | |
372 | *bytes = n = (n + 2047) & ~2047; | |
373 | return(n / 2048 + 63); | |
374 | } | |
375 | #endif | |
376 | panic("Unexpected byte count %d", n); | |
377 | return(0); | |
378 | } | |
379 | ||
380 | /* | |
5b287bba | 381 | * malloc() (SLAB ALLOCATOR) |
a108bf71 MD |
382 | * |
383 | * Allocate memory via the slab allocator. If the request is too large, | |
384 | * or if it page-aligned beyond a certain size, we fall back to the | |
385 | * KMEM subsystem. A SLAB tracking descriptor must be specified, use | |
386 | * &SlabMisc if you don't care. | |
387 | * | |
8cb2bf45 JS |
388 | * M_RNOWAIT - don't block. |
389 | * M_NULLOK - return NULL instead of blocking. | |
a108bf71 | 390 | * M_ZERO - zero the returned memory. |
dc1fd4b3 MD |
391 | * M_USE_RESERVE - allow greater drawdown of the free list |
392 | * M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted | |
5b287bba MD |
393 | * |
394 | * MPSAFE | |
a108bf71 | 395 | */ |
8aca2bd4 | 396 | |
a108bf71 | 397 | void * |
8aca2bd4 | 398 | kmalloc(unsigned long size, struct malloc_type *type, int flags) |
a108bf71 MD |
399 | { |
400 | SLZone *z; | |
401 | SLChunk *chunk; | |
402 | SLGlobalData *slgd; | |
bba6a44d | 403 | struct globaldata *gd; |
a108bf71 | 404 | int zi; |
d2182dc1 MD |
405 | #ifdef INVARIANTS |
406 | int i; | |
407 | #endif | |
a108bf71 | 408 | |
b68ad50c | 409 | logmemory_quick(malloc_beg); |
bba6a44d MD |
410 | gd = mycpu; |
411 | slgd = &gd->gd_slab; | |
a108bf71 MD |
412 | |
413 | /* | |
414 | * XXX silly to have this in the critical path. | |
415 | */ | |
416 | if (type->ks_limit == 0) { | |
417 | crit_enter(); | |
418 | if (type->ks_limit == 0) | |
419 | malloc_init(type); | |
420 | crit_exit(); | |
421 | } | |
422 | ++type->ks_calls; | |
423 | ||
424 | /* | |
38e34349 MD |
425 | * Handle the case where the limit is reached. Panic if we can't return |
426 | * NULL. The original malloc code looped, but this tended to | |
a108bf71 | 427 | * simply deadlock the computer. |
38e34349 MD |
428 | * |
429 | * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used | |
430 | * to determine if a more complete limit check should be done. The | |
431 | * actual memory use is tracked via ks_memuse[cpu]. | |
a108bf71 | 432 | */ |
bba6a44d MD |
433 | while (type->ks_loosememuse >= type->ks_limit) { |
434 | int i; | |
435 | long ttl; | |
436 | ||
437 | for (i = ttl = 0; i < ncpus; ++i) | |
438 | ttl += type->ks_memuse[i]; | |
38e34349 | 439 | type->ks_loosememuse = ttl; /* not MP synchronized */ |
bba6a44d | 440 | if (ttl >= type->ks_limit) { |
f2b5daf9 MD |
441 | if (flags & M_NULLOK) { |
442 | logmemory(malloc, NULL, type, size, flags); | |
bba6a44d | 443 | return(NULL); |
f2b5daf9 | 444 | } |
bba6a44d MD |
445 | panic("%s: malloc limit exceeded", type->ks_shortdesc); |
446 | } | |
a108bf71 MD |
447 | } |
448 | ||
449 | /* | |
450 | * Handle the degenerate size == 0 case. Yes, this does happen. | |
451 | * Return a special pointer. This is to maintain compatibility with | |
452 | * the original malloc implementation. Certain devices, such as the | |
453 | * adaptec driver, not only allocate 0 bytes, they check for NULL and | |
454 | * also realloc() later on. Joy. | |
455 | */ | |
f2b5daf9 MD |
456 | if (size == 0) { |
457 | logmemory(malloc, ZERO_LENGTH_PTR, type, size, flags); | |
a108bf71 | 458 | return(ZERO_LENGTH_PTR); |
f2b5daf9 | 459 | } |
a108bf71 | 460 | |
a7cf0021 MD |
461 | /* |
462 | * Handle hysteresis from prior frees here in malloc(). We cannot | |
463 | * safely manipulate the kernel_map in free() due to free() possibly | |
464 | * being called via an IPI message or from sensitive interrupt code. | |
465 | */ | |
dc1fd4b3 | 466 | while (slgd->NFreeZones > ZONE_RELS_THRESH && (flags & M_RNOWAIT) == 0) { |
46a3f46d MD |
467 | crit_enter(); |
468 | if (slgd->NFreeZones > ZONE_RELS_THRESH) { /* crit sect race */ | |
469 | z = slgd->FreeZones; | |
470 | slgd->FreeZones = z->z_Next; | |
471 | --slgd->NFreeZones; | |
472 | kmem_slab_free(z, ZoneSize); /* may block */ | |
473 | } | |
474 | crit_exit(); | |
475 | } | |
476 | /* | |
477 | * XXX handle oversized frees that were queued from free(). | |
478 | */ | |
dc1fd4b3 | 479 | while (slgd->FreeOvZones && (flags & M_RNOWAIT) == 0) { |
46a3f46d MD |
480 | crit_enter(); |
481 | if ((z = slgd->FreeOvZones) != NULL) { | |
482 | KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC); | |
483 | slgd->FreeOvZones = z->z_Next; | |
484 | kmem_slab_free(z, z->z_ChunkSize); /* may block */ | |
485 | } | |
486 | crit_exit(); | |
a7cf0021 MD |
487 | } |
488 | ||
a108bf71 MD |
489 | /* |
490 | * Handle large allocations directly. There should not be very many of | |
491 | * these so performance is not a big issue. | |
492 | * | |
b543eeed MD |
493 | * The backend allocator is pretty nasty on a SMP system. Use the |
494 | * slab allocator for one and two page-sized chunks even though we lose | |
495 | * some efficiency. XXX maybe fix mmio and the elf loader instead. | |
a108bf71 | 496 | */ |
b543eeed | 497 | if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) { |
a108bf71 MD |
498 | struct kmemusage *kup; |
499 | ||
500 | size = round_page(size); | |
501 | chunk = kmem_slab_alloc(size, PAGE_SIZE, flags); | |
f2b5daf9 MD |
502 | if (chunk == NULL) { |
503 | logmemory(malloc, NULL, type, size, flags); | |
a108bf71 | 504 | return(NULL); |
f2b5daf9 | 505 | } |
a108bf71 | 506 | flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */ |
8f1d5415 | 507 | flags |= M_PASSIVE_ZERO; |
a108bf71 MD |
508 | kup = btokup(chunk); |
509 | kup->ku_pagecnt = size / PAGE_SIZE; | |
bba6a44d | 510 | kup->ku_cpu = gd->gd_cpuid; |
a108bf71 MD |
511 | crit_enter(); |
512 | goto done; | |
513 | } | |
514 | ||
515 | /* | |
516 | * Attempt to allocate out of an existing zone. First try the free list, | |
517 | * then allocate out of unallocated space. If we find a good zone move | |
518 | * it to the head of the list so later allocations find it quickly | |
519 | * (we might have thousands of zones in the list). | |
520 | * | |
521 | * Note: zoneindex() will panic of size is too large. | |
522 | */ | |
523 | zi = zoneindex(&size); | |
524 | KKASSERT(zi < NZONES); | |
525 | crit_enter(); | |
526 | if ((z = slgd->ZoneAry[zi]) != NULL) { | |
527 | KKASSERT(z->z_NFree > 0); | |
528 | ||
529 | /* | |
530 | * Remove us from the ZoneAry[] when we become empty | |
531 | */ | |
532 | if (--z->z_NFree == 0) { | |
533 | slgd->ZoneAry[zi] = z->z_Next; | |
534 | z->z_Next = NULL; | |
535 | } | |
536 | ||
537 | /* | |
538 | * Locate a chunk in a free page. This attempts to localize | |
539 | * reallocations into earlier pages without us having to sort | |
540 | * the chunk list. A chunk may still overlap a page boundary. | |
541 | */ | |
542 | while (z->z_FirstFreePg < ZonePageCount) { | |
543 | if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) { | |
544 | #ifdef DIAGNOSTIC | |
545 | /* | |
546 | * Diagnostic: c_Next is not total garbage. | |
547 | */ | |
548 | KKASSERT(chunk->c_Next == NULL || | |
549 | ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) == | |
550 | ((intptr_t)chunk & IN_SAME_PAGE_MASK)); | |
551 | #endif | |
6ab8e1da | 552 | #ifdef INVARIANTS |
c439ad8f | 553 | if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd) |
a108bf71 | 554 | panic("chunk %p FFPG %d/%d", chunk, z->z_FirstFreePg, ZonePageCount); |
c439ad8f | 555 | if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart) |
a108bf71 | 556 | panic("chunkNEXT %p %p FFPG %d/%d", chunk, chunk->c_Next, z->z_FirstFreePg, ZonePageCount); |
10cc6608 | 557 | chunk_mark_allocated(z, chunk); |
6ab8e1da | 558 | #endif |
a108bf71 MD |
559 | z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next; |
560 | goto done; | |
561 | } | |
562 | ++z->z_FirstFreePg; | |
563 | } | |
564 | ||
565 | /* | |
1c5ca4f3 MD |
566 | * No chunks are available but NFree said we had some memory, so |
567 | * it must be available in the never-before-used-memory area | |
568 | * governed by UIndex. The consequences are very serious if our zone | |
569 | * got corrupted so we use an explicit panic rather then a KASSERT. | |
a108bf71 | 570 | */ |
1c5ca4f3 MD |
571 | if (z->z_UIndex + 1 != z->z_NMax) |
572 | z->z_UIndex = z->z_UIndex + 1; | |
573 | else | |
574 | z->z_UIndex = 0; | |
575 | if (z->z_UIndex == z->z_UEndIndex) | |
576 | panic("slaballoc: corrupted zone"); | |
577 | chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); | |
8f1d5415 | 578 | if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { |
6ab8e1da | 579 | flags &= ~M_ZERO; |
8f1d5415 MD |
580 | flags |= M_PASSIVE_ZERO; |
581 | } | |
10cc6608 MD |
582 | #if defined(INVARIANTS) |
583 | chunk_mark_allocated(z, chunk); | |
584 | #endif | |
a108bf71 MD |
585 | goto done; |
586 | } | |
587 | ||
588 | /* | |
589 | * If all zones are exhausted we need to allocate a new zone for this | |
590 | * index. Use M_ZERO to take advantage of pre-zerod pages. Also see | |
6ab8e1da MD |
591 | * UAlloc use above in regards to M_ZERO. Note that when we are reusing |
592 | * a zone from the FreeZones list UAlloc'd data will not be zero'd, and | |
593 | * we do not pre-zero it because we do not want to mess up the L1 cache. | |
a108bf71 MD |
594 | * |
595 | * At least one subsystem, the tty code (see CROUND) expects power-of-2 | |
596 | * allocations to be power-of-2 aligned. We maintain compatibility by | |
597 | * adjusting the base offset below. | |
598 | */ | |
599 | { | |
600 | int off; | |
601 | ||
602 | if ((z = slgd->FreeZones) != NULL) { | |
603 | slgd->FreeZones = z->z_Next; | |
604 | --slgd->NFreeZones; | |
605 | bzero(z, sizeof(SLZone)); | |
6ab8e1da | 606 | z->z_Flags |= SLZF_UNOTZEROD; |
a108bf71 MD |
607 | } else { |
608 | z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO); | |
609 | if (z == NULL) | |
610 | goto fail; | |
611 | } | |
612 | ||
10cc6608 MD |
613 | /* |
614 | * How big is the base structure? | |
615 | */ | |
616 | #if defined(INVARIANTS) | |
617 | /* | |
618 | * Make room for z_Bitmap. An exact calculation is somewhat more | |
619 | * complicated so don't make an exact calculation. | |
620 | */ | |
621 | off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]); | |
622 | bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8); | |
623 | #else | |
624 | off = sizeof(SLZone); | |
625 | #endif | |
626 | ||
a108bf71 MD |
627 | /* |
628 | * Guarentee power-of-2 alignment for power-of-2-sized chunks. | |
629 | * Otherwise just 8-byte align the data. | |
630 | */ | |
631 | if ((size | (size - 1)) + 1 == (size << 1)) | |
10cc6608 | 632 | off = (off + size - 1) & ~(size - 1); |
a108bf71 | 633 | else |
10cc6608 | 634 | off = (off + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK; |
a108bf71 MD |
635 | z->z_Magic = ZALLOC_SLAB_MAGIC; |
636 | z->z_ZoneIndex = zi; | |
637 | z->z_NMax = (ZoneSize - off) / size; | |
638 | z->z_NFree = z->z_NMax - 1; | |
1c5ca4f3 MD |
639 | z->z_BasePtr = (char *)z + off; |
640 | z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax; | |
a108bf71 MD |
641 | z->z_ChunkSize = size; |
642 | z->z_FirstFreePg = ZonePageCount; | |
2db3b277 | 643 | z->z_CpuGd = gd; |
bba6a44d | 644 | z->z_Cpu = gd->gd_cpuid; |
1c5ca4f3 | 645 | chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size); |
a108bf71 MD |
646 | z->z_Next = slgd->ZoneAry[zi]; |
647 | slgd->ZoneAry[zi] = z; | |
8f1d5415 | 648 | if ((z->z_Flags & SLZF_UNOTZEROD) == 0) { |
6ab8e1da | 649 | flags &= ~M_ZERO; /* already zero'd */ |
8f1d5415 MD |
650 | flags |= M_PASSIVE_ZERO; |
651 | } | |
10cc6608 MD |
652 | #if defined(INVARIANTS) |
653 | chunk_mark_allocated(z, chunk); | |
654 | #endif | |
1c5ca4f3 MD |
655 | |
656 | /* | |
657 | * Slide the base index for initial allocations out of the next | |
658 | * zone we create so we do not over-weight the lower part of the | |
659 | * cpu memory caches. | |
660 | */ | |
661 | slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE) | |
662 | & (ZALLOC_MAX_ZONE_SIZE - 1); | |
a108bf71 MD |
663 | } |
664 | done: | |
bba6a44d MD |
665 | ++type->ks_inuse[gd->gd_cpuid]; |
666 | type->ks_memuse[gd->gd_cpuid] += size; | |
38e34349 | 667 | type->ks_loosememuse += size; /* not MP synchronized */ |
a108bf71 MD |
668 | crit_exit(); |
669 | if (flags & M_ZERO) | |
670 | bzero(chunk, size); | |
bba6a44d | 671 | #ifdef INVARIANTS |
d2182dc1 MD |
672 | else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) { |
673 | if (use_malloc_pattern) { | |
674 | for (i = 0; i < size; i += sizeof(int)) { | |
675 | *(int *)((char *)chunk + i) = -1; | |
676 | } | |
677 | } | |
bba6a44d | 678 | chunk->c_Next = (void *)-1; /* avoid accidental double-free check */ |
d2182dc1 | 679 | } |
bba6a44d | 680 | #endif |
f2b5daf9 | 681 | logmemory(malloc, chunk, type, size, flags); |
a108bf71 MD |
682 | return(chunk); |
683 | fail: | |
684 | crit_exit(); | |
f2b5daf9 | 685 | logmemory(malloc, NULL, type, size, flags); |
a108bf71 MD |
686 | return(NULL); |
687 | } | |
688 | ||
38e34349 MD |
689 | /* |
690 | * kernel realloc. (SLAB ALLOCATOR) (MP SAFE) | |
691 | * | |
692 | * Generally speaking this routine is not called very often and we do | |
693 | * not attempt to optimize it beyond reusing the same pointer if the | |
694 | * new size fits within the chunking of the old pointer's zone. | |
695 | */ | |
a108bf71 | 696 | void * |
8aca2bd4 | 697 | krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags) |
a108bf71 MD |
698 | { |
699 | SLZone *z; | |
700 | void *nptr; | |
701 | unsigned long osize; | |
702 | ||
eb7f3e3c MD |
703 | KKASSERT((flags & M_ZERO) == 0); /* not supported */ |
704 | ||
a108bf71 | 705 | if (ptr == NULL || ptr == ZERO_LENGTH_PTR) |
efda3bd0 | 706 | return(kmalloc(size, type, flags)); |
a108bf71 | 707 | if (size == 0) { |
efda3bd0 | 708 | kfree(ptr, type); |
a108bf71 MD |
709 | return(NULL); |
710 | } | |
711 | ||
712 | /* | |
713 | * Handle oversized allocations. XXX we really should require that a | |
714 | * size be passed to free() instead of this nonsense. | |
715 | */ | |
716 | { | |
717 | struct kmemusage *kup; | |
718 | ||
719 | kup = btokup(ptr); | |
720 | if (kup->ku_pagecnt) { | |
721 | osize = kup->ku_pagecnt << PAGE_SHIFT; | |
722 | if (osize == round_page(size)) | |
723 | return(ptr); | |
efda3bd0 | 724 | if ((nptr = kmalloc(size, type, flags)) == NULL) |
a108bf71 MD |
725 | return(NULL); |
726 | bcopy(ptr, nptr, min(size, osize)); | |
efda3bd0 | 727 | kfree(ptr, type); |
a108bf71 MD |
728 | return(nptr); |
729 | } | |
730 | } | |
731 | ||
732 | /* | |
733 | * Get the original allocation's zone. If the new request winds up | |
734 | * using the same chunk size we do not have to do anything. | |
735 | */ | |
736 | z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask); | |
737 | KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); | |
738 | ||
739 | zoneindex(&size); | |
740 | if (z->z_ChunkSize == size) | |
741 | return(ptr); | |
742 | ||
743 | /* | |
744 | * Allocate memory for the new request size. Note that zoneindex has | |
745 | * already adjusted the request size to the appropriate chunk size, which | |
746 | * should optimize our bcopy(). Then copy and return the new pointer. | |
747 | */ | |
efda3bd0 | 748 | if ((nptr = kmalloc(size, type, flags)) == NULL) |
a108bf71 MD |
749 | return(NULL); |
750 | bcopy(ptr, nptr, min(size, z->z_ChunkSize)); | |
efda3bd0 | 751 | kfree(ptr, type); |
a108bf71 MD |
752 | return(nptr); |
753 | } | |
754 | ||
45d2b1d8 MD |
755 | /* |
756 | * Return the kmalloc limit for this type, in bytes. | |
757 | */ | |
758 | long | |
759 | kmalloc_limit(struct malloc_type *type) | |
760 | { | |
761 | if (type->ks_limit == 0) { | |
762 | crit_enter(); | |
763 | if (type->ks_limit == 0) | |
764 | malloc_init(type); | |
765 | crit_exit(); | |
766 | } | |
767 | return(type->ks_limit); | |
768 | } | |
769 | ||
38e34349 MD |
770 | /* |
771 | * Allocate a copy of the specified string. | |
772 | * | |
773 | * (MP SAFE) (MAY BLOCK) | |
774 | */ | |
1ac06773 | 775 | char * |
59302080 | 776 | kstrdup(const char *str, struct malloc_type *type) |
1ac06773 MD |
777 | { |
778 | int zlen; /* length inclusive of terminating NUL */ | |
779 | char *nstr; | |
780 | ||
781 | if (str == NULL) | |
782 | return(NULL); | |
783 | zlen = strlen(str) + 1; | |
efda3bd0 | 784 | nstr = kmalloc(zlen, type, M_WAITOK); |
1ac06773 MD |
785 | bcopy(str, nstr, zlen); |
786 | return(nstr); | |
787 | } | |
788 | ||
1d712609 | 789 | #ifdef SMP |
a108bf71 MD |
790 | /* |
791 | * free() (SLAB ALLOCATOR) | |
792 | * | |
bba6a44d | 793 | * Free the specified chunk of memory. |
a108bf71 MD |
794 | */ |
795 | static | |
796 | void | |
797 | free_remote(void *ptr) | |
798 | { | |
f2b5daf9 | 799 | logmemory(free_remote, ptr, *(struct malloc_type **)ptr, -1, 0); |
efda3bd0 | 800 | kfree(ptr, *(struct malloc_type **)ptr); |
a108bf71 MD |
801 | } |
802 | ||
1d712609 MD |
803 | #endif |
804 | ||
38e34349 | 805 | /* |
5b287bba | 806 | * free (SLAB ALLOCATOR) |
38e34349 MD |
807 | * |
808 | * Free a memory block previously allocated by malloc. Note that we do not | |
809 | * attempt to uplodate ks_loosememuse as MP races could prevent us from | |
810 | * checking memory limits in malloc. | |
5b287bba MD |
811 | * |
812 | * MPSAFE | |
38e34349 | 813 | */ |
a108bf71 | 814 | void |
8aca2bd4 | 815 | kfree(void *ptr, struct malloc_type *type) |
a108bf71 MD |
816 | { |
817 | SLZone *z; | |
818 | SLChunk *chunk; | |
819 | SLGlobalData *slgd; | |
bba6a44d | 820 | struct globaldata *gd; |
a108bf71 MD |
821 | int pgno; |
822 | ||
b68ad50c | 823 | logmemory_quick(free_beg); |
bba6a44d MD |
824 | gd = mycpu; |
825 | slgd = &gd->gd_slab; | |
a108bf71 | 826 | |
d39911d9 JS |
827 | if (ptr == NULL) |
828 | panic("trying to free NULL pointer"); | |
829 | ||
a108bf71 MD |
830 | /* |
831 | * Handle special 0-byte allocations | |
832 | */ | |
f2b5daf9 MD |
833 | if (ptr == ZERO_LENGTH_PTR) { |
834 | logmemory(free_zero, ptr, type, -1, 0); | |
b68ad50c | 835 | logmemory_quick(free_end); |
a108bf71 | 836 | return; |
f2b5daf9 | 837 | } |
a108bf71 MD |
838 | |
839 | /* | |
840 | * Handle oversized allocations. XXX we really should require that a | |
841 | * size be passed to free() instead of this nonsense. | |
bba6a44d MD |
842 | * |
843 | * This code is never called via an ipi. | |
a108bf71 MD |
844 | */ |
845 | { | |
846 | struct kmemusage *kup; | |
847 | unsigned long size; | |
848 | ||
849 | kup = btokup(ptr); | |
850 | if (kup->ku_pagecnt) { | |
851 | size = kup->ku_pagecnt << PAGE_SHIFT; | |
852 | kup->ku_pagecnt = 0; | |
a108bf71 MD |
853 | #ifdef INVARIANTS |
854 | KKASSERT(sizeof(weirdary) <= size); | |
855 | bcopy(weirdary, ptr, sizeof(weirdary)); | |
856 | #endif | |
bba6a44d MD |
857 | /* |
858 | * note: we always adjust our cpu's slot, not the originating | |
859 | * cpu (kup->ku_cpuid). The statistics are in aggregate. | |
81f5fc99 MD |
860 | * |
861 | * note: XXX we have still inherited the interrupts-can't-block | |
862 | * assumption. An interrupt thread does not bump | |
863 | * gd_intr_nesting_level so check TDF_INTTHREAD. This is | |
864 | * primarily until we can fix softupdate's assumptions about free(). | |
bba6a44d MD |
865 | */ |
866 | crit_enter(); | |
867 | --type->ks_inuse[gd->gd_cpuid]; | |
868 | type->ks_memuse[gd->gd_cpuid] -= size; | |
81f5fc99 | 869 | if (mycpu->gd_intr_nesting_level || (gd->gd_curthread->td_flags & TDF_INTTHREAD)) { |
f2b5daf9 | 870 | logmemory(free_ovsz_delayed, ptr, type, size, 0); |
46a3f46d MD |
871 | z = (SLZone *)ptr; |
872 | z->z_Magic = ZALLOC_OVSZ_MAGIC; | |
873 | z->z_Next = slgd->FreeOvZones; | |
874 | z->z_ChunkSize = size; | |
875 | slgd->FreeOvZones = z; | |
876 | crit_exit(); | |
877 | } else { | |
bba6a44d | 878 | crit_exit(); |
f2b5daf9 | 879 | logmemory(free_ovsz, ptr, type, size, 0); |
46a3f46d MD |
880 | kmem_slab_free(ptr, size); /* may block */ |
881 | } | |
b68ad50c | 882 | logmemory_quick(free_end); |
a108bf71 MD |
883 | return; |
884 | } | |
885 | } | |
886 | ||
887 | /* | |
888 | * Zone case. Figure out the zone based on the fact that it is | |
889 | * ZoneSize aligned. | |
890 | */ | |
891 | z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask); | |
892 | KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC); | |
893 | ||
894 | /* | |
895 | * If we do not own the zone then forward the request to the | |
4c9f5a7f MD |
896 | * cpu that does. Since the timing is non-critical, a passive |
897 | * message is sent. | |
a108bf71 | 898 | */ |
2db3b277 | 899 | if (z->z_CpuGd != gd) { |
a108bf71 | 900 | *(struct malloc_type **)ptr = type; |
75c7ffea | 901 | #ifdef SMP |
f2b5daf9 | 902 | logmemory(free_request, ptr, type, z->z_ChunkSize, 0); |
4c9f5a7f | 903 | lwkt_send_ipiq_passive(z->z_CpuGd, free_remote, ptr); |
75c7ffea MD |
904 | #else |
905 | panic("Corrupt SLZone"); | |
906 | #endif | |
b68ad50c | 907 | logmemory_quick(free_end); |
a108bf71 MD |
908 | return; |
909 | } | |
910 | ||
f2b5daf9 MD |
911 | logmemory(free_chunk, ptr, type, z->z_ChunkSize, 0); |
912 | ||
a108bf71 MD |
913 | if (type->ks_magic != M_MAGIC) |
914 | panic("free: malloc type lacks magic"); | |
915 | ||
916 | crit_enter(); | |
917 | pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT; | |
918 | chunk = ptr; | |
919 | ||
bba6a44d | 920 | #ifdef INVARIANTS |
a108bf71 | 921 | /* |
bba6a44d MD |
922 | * Attempt to detect a double-free. To reduce overhead we only check |
923 | * if there appears to be link pointer at the base of the data. | |
a108bf71 MD |
924 | */ |
925 | if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) { | |
926 | SLChunk *scan; | |
927 | for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) { | |
928 | if (scan == chunk) | |
929 | panic("Double free at %p", chunk); | |
930 | } | |
931 | } | |
10cc6608 | 932 | chunk_mark_free(z, chunk); |
a108bf71 MD |
933 | #endif |
934 | ||
935 | /* | |
936 | * Put weird data into the memory to detect modifications after freeing, | |
937 | * illegal pointer use after freeing (we should fault on the odd address), | |
938 | * and so forth. XXX needs more work, see the old malloc code. | |
939 | */ | |
940 | #ifdef INVARIANTS | |
941 | if (z->z_ChunkSize < sizeof(weirdary)) | |
942 | bcopy(weirdary, chunk, z->z_ChunkSize); | |
943 | else | |
944 | bcopy(weirdary, chunk, sizeof(weirdary)); | |
945 | #endif | |
946 | ||
947 | /* | |
948 | * Add this free non-zero'd chunk to a linked list for reuse, adjust | |
949 | * z_FirstFreePg. | |
950 | */ | |
6ab8e1da | 951 | #ifdef INVARIANTS |
c439ad8f | 952 | if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd) |
fc92d4aa | 953 | panic("BADFREE %p", chunk); |
a108bf71 MD |
954 | #endif |
955 | chunk->c_Next = z->z_PageAry[pgno]; | |
956 | z->z_PageAry[pgno] = chunk; | |
6ab8e1da | 957 | #ifdef INVARIANTS |
c439ad8f | 958 | if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart) |
a108bf71 | 959 | panic("BADFREE2"); |
6ab8e1da | 960 | #endif |
a108bf71 MD |
961 | if (z->z_FirstFreePg > pgno) |
962 | z->z_FirstFreePg = pgno; | |
963 | ||
964 | /* | |
965 | * Bump the number of free chunks. If it becomes non-zero the zone | |
966 | * must be added back onto the appropriate list. | |
967 | */ | |
968 | if (z->z_NFree++ == 0) { | |
969 | z->z_Next = slgd->ZoneAry[z->z_ZoneIndex]; | |
970 | slgd->ZoneAry[z->z_ZoneIndex] = z; | |
971 | } | |
972 | ||
bba6a44d MD |
973 | --type->ks_inuse[z->z_Cpu]; |
974 | type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize; | |
a108bf71 MD |
975 | |
976 | /* | |
977 | * If the zone becomes totally free, and there are other zones we | |
a7cf0021 MD |
978 | * can allocate from, move this zone to the FreeZones list. Since |
979 | * this code can be called from an IPI callback, do *NOT* try to mess | |
980 | * with kernel_map here. Hysteresis will be performed at malloc() time. | |
a108bf71 MD |
981 | */ |
982 | if (z->z_NFree == z->z_NMax && | |
983 | (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z) | |
984 | ) { | |
985 | SLZone **pz; | |
986 | ||
987 | for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; z != *pz; pz = &(*pz)->z_Next) | |
988 | ; | |
989 | *pz = z->z_Next; | |
990 | z->z_Magic = -1; | |
a7cf0021 MD |
991 | z->z_Next = slgd->FreeZones; |
992 | slgd->FreeZones = z; | |
993 | ++slgd->NFreeZones; | |
a108bf71 | 994 | } |
b68ad50c | 995 | logmemory_quick(free_end); |
a108bf71 MD |
996 | crit_exit(); |
997 | } | |
998 | ||
10cc6608 MD |
999 | #if defined(INVARIANTS) |
1000 | /* | |
1001 | * Helper routines for sanity checks | |
1002 | */ | |
1003 | static | |
1004 | void | |
1005 | chunk_mark_allocated(SLZone *z, void *chunk) | |
1006 | { | |
1007 | int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; | |
1008 | __uint32_t *bitptr; | |
1009 | ||
1010 | KASSERT(bitdex >= 0 && bitdex < z->z_NMax, ("memory chunk %p bit index %d is illegal", chunk, bitdex)); | |
1011 | bitptr = &z->z_Bitmap[bitdex >> 5]; | |
1012 | bitdex &= 31; | |
1013 | KASSERT((*bitptr & (1 << bitdex)) == 0, ("memory chunk %p is already allocated!", chunk)); | |
1014 | *bitptr |= 1 << bitdex; | |
1015 | } | |
1016 | ||
1017 | static | |
1018 | void | |
1019 | chunk_mark_free(SLZone *z, void *chunk) | |
1020 | { | |
1021 | int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize; | |
1022 | __uint32_t *bitptr; | |
1023 | ||
1024 | KASSERT(bitdex >= 0 && bitdex < z->z_NMax, ("memory chunk %p bit index %d is illegal!", chunk, bitdex)); | |
1025 | bitptr = &z->z_Bitmap[bitdex >> 5]; | |
1026 | bitdex &= 31; | |
1027 | KASSERT((*bitptr & (1 << bitdex)) != 0, ("memory chunk %p is already free!", chunk)); | |
1028 | *bitptr &= ~(1 << bitdex); | |
1029 | } | |
1030 | ||
1031 | #endif | |
1032 | ||
a108bf71 | 1033 | /* |
5b287bba | 1034 | * kmem_slab_alloc() |
a108bf71 MD |
1035 | * |
1036 | * Directly allocate and wire kernel memory in PAGE_SIZE chunks with the | |
1037 | * specified alignment. M_* flags are expected in the flags field. | |
1038 | * | |
1039 | * Alignment must be a multiple of PAGE_SIZE. | |
1040 | * | |
1041 | * NOTE! XXX For the moment we use vm_map_entry_reserve/release(), | |
1042 | * but when we move zalloc() over to use this function as its backend | |
1043 | * we will have to switch to kreserve/krelease and call reserve(0) | |
1044 | * after the new space is made available. | |
dc1fd4b3 MD |
1045 | * |
1046 | * Interrupt code which has preempted other code is not allowed to | |
c397c465 MD |
1047 | * use PQ_CACHE pages. However, if an interrupt thread is run |
1048 | * non-preemptively or blocks and then runs non-preemptively, then | |
1049 | * it is free to use PQ_CACHE pages. | |
38e34349 MD |
1050 | * |
1051 | * This routine will currently obtain the BGL. | |
5b287bba MD |
1052 | * |
1053 | * MPALMOSTSAFE - acquires mplock | |
a108bf71 MD |
1054 | */ |
1055 | static void * | |
1056 | kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags) | |
1057 | { | |
1058 | vm_size_t i; | |
1059 | vm_offset_t addr; | |
1de1e800 | 1060 | int count, vmflags, base_vmflags; |
dc1fd4b3 | 1061 | thread_t td; |
a108bf71 MD |
1062 | |
1063 | size = round_page(size); | |
e4846942 | 1064 | addr = vm_map_min(&kernel_map); |
a108bf71 MD |
1065 | |
1066 | /* | |
5c39c498 MD |
1067 | * Reserve properly aligned space from kernel_map. RNOWAIT allocations |
1068 | * cannot block. | |
a108bf71 | 1069 | */ |
5c39c498 MD |
1070 | if (flags & M_RNOWAIT) { |
1071 | if (try_mplock() == 0) | |
1072 | return(NULL); | |
1073 | } else { | |
1074 | get_mplock(); | |
1075 | } | |
a108bf71 MD |
1076 | count = vm_map_entry_reserve(MAP_RESERVE_COUNT); |
1077 | crit_enter(); | |
e4846942 MD |
1078 | vm_map_lock(&kernel_map); |
1079 | if (vm_map_findspace(&kernel_map, addr, size, align, &addr)) { | |
1080 | vm_map_unlock(&kernel_map); | |
8cb2bf45 | 1081 | if ((flags & M_NULLOK) == 0) |
a108bf71 MD |
1082 | panic("kmem_slab_alloc(): kernel_map ran out of space!"); |
1083 | crit_exit(); | |
1084 | vm_map_entry_release(count); | |
38e34349 | 1085 | rel_mplock(); |
a108bf71 MD |
1086 | return(NULL); |
1087 | } | |
e4846942 MD |
1088 | |
1089 | /* | |
1090 | * kernel_object maps 1:1 to kernel_map. | |
1091 | */ | |
c439ad8f | 1092 | vm_object_reference(&kernel_object); |
e4846942 MD |
1093 | vm_map_insert(&kernel_map, &count, |
1094 | &kernel_object, addr, addr, addr + size, | |
1b874851 MD |
1095 | VM_MAPTYPE_NORMAL, |
1096 | VM_PROT_ALL, VM_PROT_ALL, | |
1097 | 0); | |
a108bf71 | 1098 | |
dc1fd4b3 | 1099 | td = curthread; |
dc1fd4b3 | 1100 | |
1de1e800 JS |
1101 | base_vmflags = 0; |
1102 | if (flags & M_ZERO) | |
1103 | base_vmflags |= VM_ALLOC_ZERO; | |
1104 | if (flags & M_USE_RESERVE) | |
1105 | base_vmflags |= VM_ALLOC_SYSTEM; | |
1106 | if (flags & M_USE_INTERRUPT_RESERVE) | |
1107 | base_vmflags |= VM_ALLOC_INTERRUPT; | |
1108 | if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) | |
1109 | panic("kmem_slab_alloc: bad flags %08x (%p)", flags, ((int **)&size)[-1]); | |
1110 | ||
1111 | ||
a108bf71 MD |
1112 | /* |
1113 | * Allocate the pages. Do not mess with the PG_ZERO flag yet. | |
1114 | */ | |
1115 | for (i = 0; i < size; i += PAGE_SIZE) { | |
1116 | vm_page_t m; | |
fe1e98d0 MD |
1117 | |
1118 | /* | |
c397c465 MD |
1119 | * VM_ALLOC_NORMAL can only be set if we are not preempting. |
1120 | * | |
1121 | * VM_ALLOC_SYSTEM is automatically set if we are preempting and | |
1122 | * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is | |
1123 | * implied in this case), though I'm sure if we really need to do | |
1124 | * that. | |
fe1e98d0 | 1125 | */ |
1de1e800 | 1126 | vmflags = base_vmflags; |
c397c465 | 1127 | if (flags & M_WAITOK) { |
1de1e800 | 1128 | if (td->td_preempted) |
fe1e98d0 | 1129 | vmflags |= VM_ALLOC_SYSTEM; |
1de1e800 | 1130 | else |
dc1fd4b3 | 1131 | vmflags |= VM_ALLOC_NORMAL; |
dc1fd4b3 | 1132 | } |
a108bf71 | 1133 | |
e4846942 | 1134 | m = vm_page_alloc(&kernel_object, OFF_TO_IDX(addr + i), vmflags); |
dc1fd4b3 MD |
1135 | |
1136 | /* | |
1137 | * If the allocation failed we either return NULL or we retry. | |
1138 | * | |
c397c465 MD |
1139 | * If M_WAITOK is specified we wait for more memory and retry. |
1140 | * If M_WAITOK is specified from a preemption we yield instead of | |
1141 | * wait. Livelock will not occur because the interrupt thread | |
1142 | * will not be preempting anyone the second time around after the | |
1143 | * yield. | |
dc1fd4b3 | 1144 | */ |
a108bf71 | 1145 | if (m == NULL) { |
c397c465 | 1146 | if (flags & M_WAITOK) { |
fe1e98d0 | 1147 | if (td->td_preempted) { |
e4846942 | 1148 | vm_map_unlock(&kernel_map); |
dc1fd4b3 | 1149 | lwkt_yield(); |
e4846942 | 1150 | vm_map_lock(&kernel_map); |
dc1fd4b3 | 1151 | } else { |
e4846942 | 1152 | vm_map_unlock(&kernel_map); |
dc1fd4b3 | 1153 | vm_wait(); |
e4846942 | 1154 | vm_map_lock(&kernel_map); |
dc1fd4b3 | 1155 | } |
a108bf71 MD |
1156 | i -= PAGE_SIZE; /* retry */ |
1157 | continue; | |
1158 | } | |
dc1fd4b3 MD |
1159 | |
1160 | /* | |
1161 | * We were unable to recover, cleanup and return NULL | |
1162 | */ | |
a108bf71 MD |
1163 | while (i != 0) { |
1164 | i -= PAGE_SIZE; | |
e4846942 | 1165 | m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i)); |
17cde63e | 1166 | /* page should already be busy */ |
a108bf71 MD |
1167 | vm_page_free(m); |
1168 | } | |
e4846942 MD |
1169 | vm_map_delete(&kernel_map, addr, addr + size, &count); |
1170 | vm_map_unlock(&kernel_map); | |
a108bf71 MD |
1171 | crit_exit(); |
1172 | vm_map_entry_release(count); | |
38e34349 | 1173 | rel_mplock(); |
a108bf71 MD |
1174 | return(NULL); |
1175 | } | |
1176 | } | |
1177 | ||
1178 | /* | |
dc1fd4b3 MD |
1179 | * Success! |
1180 | * | |
a108bf71 MD |
1181 | * Mark the map entry as non-pageable using a routine that allows us to |
1182 | * populate the underlying pages. | |
17cde63e MD |
1183 | * |
1184 | * The pages were busied by the allocations above. | |
a108bf71 | 1185 | */ |
e4846942 | 1186 | vm_map_set_wired_quick(&kernel_map, addr, size, &count); |
a108bf71 MD |
1187 | crit_exit(); |
1188 | ||
1189 | /* | |
1190 | * Enter the pages into the pmap and deal with PG_ZERO and M_ZERO. | |
1191 | */ | |
1192 | for (i = 0; i < size; i += PAGE_SIZE) { | |
1193 | vm_page_t m; | |
1194 | ||
e4846942 | 1195 | m = vm_page_lookup(&kernel_object, OFF_TO_IDX(addr + i)); |
a108bf71 | 1196 | m->valid = VM_PAGE_BITS_ALL; |
17cde63e | 1197 | /* page should already be busy */ |
a108bf71 MD |
1198 | vm_page_wire(m); |
1199 | vm_page_wakeup(m); | |
fbbaeba3 | 1200 | pmap_enter(&kernel_pmap, addr + i, m, VM_PROT_ALL, 1); |
a108bf71 MD |
1201 | if ((m->flags & PG_ZERO) == 0 && (flags & M_ZERO)) |
1202 | bzero((char *)addr + i, PAGE_SIZE); | |
1203 | vm_page_flag_clear(m, PG_ZERO); | |
17cde63e MD |
1204 | KKASSERT(m->flags & (PG_WRITEABLE | PG_MAPPED)); |
1205 | vm_page_flag_set(m, PG_REFERENCED); | |
a108bf71 | 1206 | } |
e4846942 | 1207 | vm_map_unlock(&kernel_map); |
a108bf71 | 1208 | vm_map_entry_release(count); |
38e34349 | 1209 | rel_mplock(); |
a108bf71 MD |
1210 | return((void *)addr); |
1211 | } | |
1212 | ||
38e34349 | 1213 | /* |
5b287bba MD |
1214 | * kmem_slab_free() |
1215 | * | |
1216 | * MPALMOSTSAFE - acquires mplock | |
38e34349 | 1217 | */ |
a108bf71 MD |
1218 | static void |
1219 | kmem_slab_free(void *ptr, vm_size_t size) | |
1220 | { | |
38e34349 | 1221 | get_mplock(); |
a108bf71 | 1222 | crit_enter(); |
e4846942 | 1223 | vm_map_remove(&kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size); |
a108bf71 | 1224 | crit_exit(); |
38e34349 | 1225 | rel_mplock(); |
a108bf71 MD |
1226 | } |
1227 |