2 * Copyright (c) 2006-2007 Pawel Jakub Dawidek <pjd@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/systm.h>
33 #include <sys/malloc.h>
35 #include <sys/debug.h>
36 #include <sys/mutex.h>
37 #include <sys/vmmeter.h>
39 #include <vm/vm_page.h>
40 #include <vm/vm_object.h>
41 #include <vm/vm_kern.h>
42 #include <vm/vm_map.h>
45 #include <sys/queue.h>
46 #include <sys/stack.h>
50 MALLOC_DEFINE(M_SOLARIS, "solaris", "Solaris");
52 #define malloc(size, type, flags) malloc(size)
53 #define free(addr, type) free(addr)
59 LIST_ENTRY(kmem_item) next;
61 static LIST_HEAD(, kmem_item) kmem_items;
62 static struct mtx kmem_items_mtx;
63 MTX_SYSINIT(kmem_items_mtx, &kmem_items_mtx, "kmem_items", MTX_DEF);
64 #endif /* KMEM_DEBUG */
69 zfs_kmem_alloc(size_t size, int kmflags)
75 size += sizeof(struct kmem_item);
77 p = malloc(size, M_SOLARIS, kmflags);
79 if (kmflags & KM_SLEEP)
85 p = (u_char *)p + sizeof(struct kmem_item);
86 stack_save(&i->stack);
87 mtx_lock(&kmem_items_mtx);
88 LIST_INSERT_HEAD(&kmem_items, i, next);
89 mtx_unlock(&kmem_items_mtx);
96 zfs_kmem_free(void *buf, size_t size __unused)
100 printf("%s: attempt to free NULL\n", __func__);
105 buf = (u_char *)buf - sizeof(struct kmem_item);
106 mtx_lock(&kmem_items_mtx);
107 LIST_FOREACH(i, &kmem_items, next) {
112 LIST_REMOVE(i, next);
113 mtx_unlock(&kmem_items_mtx);
115 free(buf, M_SOLARIS);
118 static uint64_t kmem_size_val;
121 kmem_size_init(void *unused __unused)
124 kmem_size_val = (uint64_t)vm_cnt.v_page_count * PAGE_SIZE;
125 if (kmem_size_val > vm_kmem_size)
126 kmem_size_val = vm_kmem_size;
128 SYSINIT(kmem_size_init, SI_SUB_KMEM, SI_ORDER_ANY, kmem_size_init, NULL);
134 return (kmem_size_val);
138 kmem_std_constructor(void *mem, int size __unused, void *private, int flags)
140 struct kmem_cache *cache = private;
142 return (cache->kc_constructor(mem, cache->kc_private, flags));
146 kmem_std_destructor(void *mem, int size __unused, void *private)
148 struct kmem_cache *cache = private;
150 cache->kc_destructor(mem, cache->kc_private);
154 kmem_cache_create(char *name, size_t bufsize, size_t align,
155 int (*constructor)(void *, void *, int), void (*destructor)(void *, void *),
156 void (*reclaim)(void *) __unused, void *private, vmem_t *vmp, int cflags)
162 cache = kmem_alloc(sizeof(*cache), KM_SLEEP);
163 strlcpy(cache->kc_name, name, sizeof(cache->kc_name));
164 cache->kc_constructor = constructor;
165 cache->kc_destructor = destructor;
166 cache->kc_private = private;
167 #if defined(_KERNEL) && !defined(KMEM_DEBUG)
168 cache->kc_zone = uma_zcreate(cache->kc_name, bufsize,
169 constructor != NULL ? kmem_std_constructor : NULL,
170 destructor != NULL ? kmem_std_destructor : NULL,
171 NULL, NULL, align > 0 ? align - 1 : 0, cflags);
173 cache->kc_size = bufsize;
180 kmem_cache_destroy(kmem_cache_t *cache)
182 #if defined(_KERNEL) && !defined(KMEM_DEBUG)
183 uma_zdestroy(cache->kc_zone);
185 kmem_free(cache, sizeof(*cache));
189 kmem_cache_alloc(kmem_cache_t *cache, int flags)
191 #if defined(_KERNEL) && !defined(KMEM_DEBUG)
192 return (uma_zalloc_arg(cache->kc_zone, cache, flags));
196 p = kmem_alloc(cache->kc_size, flags);
197 if (p != NULL && cache->kc_constructor != NULL)
198 kmem_std_constructor(p, cache->kc_size, cache, flags);
204 kmem_cache_free(kmem_cache_t *cache, void *buf)
206 #if defined(_KERNEL) && !defined(KMEM_DEBUG)
207 uma_zfree_arg(cache->kc_zone, buf, cache);
209 if (cache->kc_destructor != NULL)
210 kmem_std_destructor(buf, cache->kc_size, cache);
211 kmem_free(buf, cache->kc_size);
216 * Allow our caller to determine if there are running reaps.
218 * This call is very conservative and may return B_TRUE even when
219 * reaping activity isn't active. If it returns B_FALSE, then reaping
220 * activity is definitely inactive.
223 kmem_cache_reap_active(void)
230 * Reap (almost) everything soon.
232 * Note: this does not wait for the reap-tasks to complete. Caller
233 * should use kmem_cache_reap_active() (above) and/or moderation to
234 * avoid scheduling too many reap-tasks.
238 kmem_cache_reap_soon(kmem_cache_t *cache)
241 uma_zone_reclaim(cache->kc_zone, UMA_RECLAIM_DRAIN);
248 uma_reclaim(UMA_RECLAIM_TRIM);
252 kmem_cache_reap_soon(kmem_cache_t *cache __unused)
269 calloc(size_t n, size_t s)
271 return (kmem_zalloc(n * s, KM_NOSLEEP));
275 void kmem_show(void *);
277 kmem_show(void *dummy __unused)
281 mtx_lock(&kmem_items_mtx);
282 if (LIST_EMPTY(&kmem_items))
283 printf("KMEM_DEBUG: No leaked elements.\n");
285 printf("KMEM_DEBUG: Leaked elements:\n\n");
286 LIST_FOREACH(i, &kmem_items, next) {
287 printf("address=%p\n", i);
288 stack_print_ddb(&i->stack);
292 mtx_unlock(&kmem_items_mtx);
295 SYSUNINIT(sol_kmem, SI_SUB_CPU, SI_ORDER_FIRST, kmem_show, NULL);
296 #endif /* KMEM_DEBUG */