2 * KERN_KMALLOC.C - Kernel memory allocator
4 * Copyright (c) 2021 The DragonFly Project, All rights reserved.
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * This module implements the kmalloc_obj allocator. This is a type-stable
39 * allocator that uses the same base structures (e.g. malloc_type) plus
40 * some extensions to efficiently implement single-type zones.
42 * All memory management is zone based. When a zone is destroyed, all of
43 * its memory is returned to the system with no fragmentation.
45 * A mini-slab allocator hangs directly off the zone structure (malloc_type).
46 * Since the object zones are single-size-only, the slab allocator is very
47 * simple and currently utilizes just two per-zone/per-cpu slabs (active and
48 * alternate) before kicking up to the per-zone cache. Beyond that we just
49 * have the per-cpu globaldata-based 'free slab' cache to avoid unnecessary
50 * kernel_map mappings and unmappings.
52 * The advantage of this that zones don't stomp over each other and cause
53 * excessive fragmentation in the slabs. For example, when you umount a
54 * large tmpfs filesystem, most of its memory (all of its kmalloc_obj memory)
55 * is returned to the system.
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/slaballoc.h>
65 #include <sys/vmmeter.h>
66 #include <sys/spinlock.h>
68 #include <sys/thread.h>
69 #include <sys/globaldata.h>
70 #include <sys/sysctl.h>
72 #include <sys/malloc.h>
75 #include <vm/vm_param.h>
76 #include <vm/vm_kern.h>
77 #include <vm/vm_extern.h>
78 #include <vm/vm_object.h>
80 #include <vm/vm_map.h>
81 #include <vm/vm_page.h>
82 #include <vm/vm_pageout.h>
84 #include <machine/cpu.h>
86 #include <sys/spinlock2.h>
87 #include <sys/thread2.h>
88 #include <sys/exislock2.h>
89 #include <vm/vm_page2.h>
91 #define MEMORY_STRING "ptr=%p type=%p size=%lu flags=%04x"
92 #define MEMORY_ARGS void *ptr, void *type, unsigned long size, int flags
94 #if !defined(KTR_MEMORY)
95 #define KTR_MEMORY KTR_ALL
97 KTR_INFO_MASTER(mem_obj);
98 KTR_INFO(KTR_MEMORY, mem_obj, malloc_beg, 0, "kmalloc_obj begin");
99 KTR_INFO(KTR_MEMORY, mem_obj, malloc_end, 1, MEMORY_STRING, MEMORY_ARGS);
101 KTR_INFO(KTR_MEMORY, mem_obj, free_zero, 2, MEMORY_STRING, MEMORY_ARGS);
102 KTR_INFO(KTR_MEMORY, mem_obj, free_ovsz, 3, MEMORY_STRING, MEMORY_ARGS);
103 KTR_INFO(KTR_MEMORY, mem_obj, free_ovsz_delayed, 4, MEMORY_STRING, MEMORY_ARGS);
104 KTR_INFO(KTR_MEMORY, mem_obj, free_chunk, 5, MEMORY_STRING, MEMORY_ARGS);
105 KTR_INFO(KTR_MEMORY, mem_obj, free_request, 6, MEMORY_STRING, MEMORY_ARGS);
106 KTR_INFO(KTR_MEMORY, mem_obj, free_rem_beg, 7, MEMORY_STRING, MEMORY_ARGS);
107 KTR_INFO(KTR_MEMORY, mem_obj, free_rem_end, 8, MEMORY_STRING, MEMORY_ARGS);
109 KTR_INFO(KTR_MEMORY, mem_obj, free_beg, 9, "kfree_obj begin");
110 KTR_INFO(KTR_MEMORY, mem_obj, free_end, 10, "kfree_obj end");
112 #define logmemory(name, ptr, type, size, flags) \
113 KTR_LOG(mem_obj_ ## name, ptr, type, size, flags)
114 #define logmemory_quick(name) \
115 KTR_LOG(mem_obj_ ## name)
117 __read_frequently static int KMGDMaxFreeSlabs = KMGD_MAXFREESLABS;
118 SYSCTL_INT(_kern, OID_AUTO, kzone_cache, CTLFLAG_RW, &KMGDMaxFreeSlabs, 0, "");
119 __read_frequently static int kzone_debug;
120 SYSCTL_INT(_kern, OID_AUTO, kzone_debug, CTLFLAG_RW, &kzone_debug, 0, "");
122 __read_frequently struct kmalloc_slab kslab_dummy;
124 static void malloc_slab_destroy(struct malloc_type *type,
125 struct kmalloc_slab **slabp);
128 * Cache a chain of slabs onto their respective cpu slab caches. Any slabs
129 * which we cannot cache will be returned.
131 * free_slabs - Current structure may only be accessed by current cpu
132 * remote_free_slabs - Only atomic swap operations are allowed.
133 * free_count - Only atomic operations are allowed.
135 * If the count is sufficient to cache the entire list, NULL is returned.
136 * Otherwise the portion that was not cached is returned.
139 struct kmalloc_slab *
140 gslab_cache(struct kmalloc_slab *slab)
142 struct kmalloc_slab *save;
143 struct kmalloc_slab *next;
144 struct kmalloc_slab *res;
145 struct kmalloc_slab **resp;
146 struct kmalloc_slab **slabp;
153 KKASSERT(((uintptr_t)slab & KMALLOC_SLAB_MASK) == 0);
156 * Given the slab list, get the cpuid and clip off as many matching
157 * elements as fits in the cache.
160 cpuid = slab->orig_cpuid;
161 rgd = globaldata_find(cpuid);
163 KKASSERT(((uintptr_t)slab & KMALLOC_SLAB_MASK) == 0);
165 * Doesn't fit in cache, put on return list.
167 if (rgd->gd_kmslab.free_count >= KMGDMaxFreeSlabs) {
175 * Collect. We aren't required to match-up the original cpu
176 * with the disposal cpu, but its a good idea to retain
179 * The slabs we collect are going into the global cache,
180 * remove the type association.
182 KKASSERT(((uintptr_t)slab & KMALLOC_SLAB_MASK) == 0);
187 while ((next = *slabp) != NULL &&
188 next->orig_cpuid == cpuid &&
189 rgd->gd_kmslab.free_count + count < KMGDMaxFreeSlabs)
191 KKASSERT(((uintptr_t)next & KMALLOC_SLAB_MASK) == 0);
198 * Safety, unhook before next, next is not included in the
199 * list starting with slab that is being pre-pended
200 * to remote_free_slabs.
205 * Now atomically pre-pend slab...*slabp to remote_free_slabs.
206 * Pump the count first (its ok if the actual chain length
207 * races the count update).
209 * NOTE: In the loop, (save) is updated by fcmpset.
211 atomic_add_long(&rgd->gd_kmslab.free_count, count);
212 save = rgd->gd_kmslab.remote_free_slabs;
214 KKASSERT(((uintptr_t)save & KMALLOC_SLAB_MASK) == 0);
215 *slabp = save; /* end of slab list chain to... */
217 if (atomic_fcmpset_ptr(
218 &rgd->gd_kmslab.remote_free_slabs,
226 * Setup for next loop
232 * Terminate the result list and return it
240 * May only be called on current cpu. Pull a free slab from the
241 * pcpu cache. If we run out, move any slabs that have built-up
244 * We are only allowed to swap the remote_free_slabs head, we cannot
245 * manipulate any next pointers while structures are sitting on that list.
248 struct kmalloc_slab *
249 gslab_alloc(globaldata_t gd)
251 struct kmalloc_slab *slab;
253 slab = gd->gd_kmslab.free_slabs;
255 slab = atomic_swap_ptr(
256 (volatile void **)&gd->gd_kmslab.remote_free_slabs,
258 KKASSERT(((uintptr_t)slab & KMALLOC_SLAB_MASK) == 0);
261 gd->gd_kmslab.free_slabs = slab->next;
263 atomic_add_long(&gd->gd_kmslab.free_count, -1);
264 KKASSERT(((uintptr_t)slab & KMALLOC_SLAB_MASK) == 0);
270 malloc_mgt_init(struct malloc_type *type __unused,
271 struct kmalloc_mgt *mgt, size_t size)
276 bzero(mgt, sizeof(*mgt));
277 spin_init(&mgt->spin, "kmmgt");
280 * Allows us to avoid a conditional. The dummy slabs are empty
281 * and have no objects.
283 mgt->active = &kslab_dummy;
284 mgt->alternate = &kslab_dummy;
285 mgt->empty_tailp = &mgt->empty;
288 * Figure out the count by taking into account the size of the fobjs[]
289 * array by adding it to the object size.
291 offset = offsetof(struct kmalloc_slab, fobjs[0]);
292 offset = __VM_CACHELINE_ALIGN(offset);
293 count = (KMALLOC_SLAB_SIZE - offset) / (size + sizeof(void *));
296 * However, the fobj[] array itself must be aligned, so we might
297 * have to reduce the count by 1. (We can do this becaues 'size'
298 * is already aligned as well).
300 offset = offsetof(struct kmalloc_slab, fobjs[count]);
301 offset = __VM_CACHELINE_ALIGN(offset);
303 if (offset + size * count > KMALLOC_SLAB_SIZE) {
305 offset = offsetof(struct kmalloc_slab, fobjs[count]);
306 offset = __VM_CACHELINE_ALIGN(offset);
307 KKASSERT (offset + size * count <= KMALLOC_SLAB_SIZE);
310 mgt->slab_offset = offset;
311 mgt->slab_count = count;
315 malloc_mgt_relocate(struct kmalloc_mgt *src, struct kmalloc_mgt *dst)
317 struct kmalloc_slab **slabp;
319 spin_init(&dst->spin, "kmmgt");
323 slabp = &(*slabp)->next;
325 dst->empty_tailp = slabp;
329 malloc_mgt_uninit(struct malloc_type *type, struct kmalloc_mgt *mgt)
331 if (mgt->active != &kslab_dummy)
332 malloc_slab_destroy(type, &mgt->active);
335 if (mgt->alternate != &kslab_dummy)
336 malloc_slab_destroy(type, &mgt->alternate);
337 mgt->alternate = NULL;
339 malloc_slab_destroy(type, &mgt->partial);
340 malloc_slab_destroy(type, &mgt->full);
341 malloc_slab_destroy(type, &mgt->empty);
345 mgt->empty_tailp = &mgt->empty;
347 spin_uninit(&mgt->spin);
351 * Destroy a list of slabs. Attempt to cache the slabs on the specified
352 * (possibly remote) cpu. This allows slabs that were operating on a
353 * particular cpu to be disposed of back to that same cpu.
356 malloc_slab_destroy(struct malloc_type *type, struct kmalloc_slab **slabp)
358 struct kmalloc_slab *slab;
359 struct kmalloc_slab *base;
360 struct kmalloc_slab **basep;
367 * Collect all slabs that can actually be destroyed, complain
372 while ((slab = *slabp) != NULL) {
373 KKASSERT(((uintptr_t)slab & KMALLOC_SLAB_MASK) == 0);
375 delta = slab->findex - slab->aindex;
376 if (delta == slab->ncount) {
377 *slabp = slab->next; /* unlink */
378 *basep = slab; /* link into base list */
381 kprintf("%s: slab %p %zd objects "
382 "were still allocated\n",
383 type->ks_shortdesc, slab,
384 slab->ncount - delta);
385 /* leave link intact and iterate */
391 * Terminate the base list of slabs that can be destroyed,
392 * then cache as many of them as possible.
397 base = gslab_cache(base);
400 * Destroy the remainder
402 while ((slab = base) != NULL) {
404 slab->next = (void *)(uintptr_t)-1;
405 kmem_slab_free(slab, KMALLOC_SLAB_SIZE);
410 * Poll a limited number of slabs on the empty list and move them
411 * to the appropriate full or partial list. Slabs left on the empty
412 * list or rotated to the tail.
414 * If gcache is non-zero this function will try to place full slabs into
415 * the globaldata cache, if it isn't already too full.
417 * The mgt is spin-locked
419 * Returns non-zero if the ggm updates possibly made slabs available for
423 malloc_mgt_poll_empty_locked(struct kmalloc_mgt *ggm, int count)
425 struct kmalloc_slab *marker;
426 struct kmalloc_slab *slab;
430 if (ggm->empty == NULL)
436 while (count-- && (slab = ggm->empty) != NULL) {
440 ggm->empty = slab->next;
443 if (ggm->empty_tailp == &slab->next)
444 ggm->empty_tailp = &ggm->empty;
447 * Check partial, full, and empty. We rotate
448 * empty entries to the end of the empty list.
450 * NOTE: For a fully-freeable slab we also have
453 delta = slab->findex - slab->aindex;
454 if (delta == slab->ncount) {
456 * Stuff into the full list. This requires setting
457 * the exis sequence number via exis_terminate().
459 KKASSERT(slab->next == NULL);
460 exis_terminate(&slab->exis);
461 slab->next = ggm->full;
469 KKASSERT(slab->next == NULL);
470 slab->next = ggm->partial;
478 KKASSERT(slab->next == NULL);
479 *ggm->empty_tailp = slab;
480 ggm->empty_tailp = &slab->next;
482 if (ggm->empty == marker)
486 return got_something;
490 * Called once a second with the zone interlocked against destruction.
492 * Returns non-zero to tell the caller to iterate to the next type,
493 * else the caller should stay on the current type.
496 malloc_mgt_poll(struct malloc_type *type)
498 struct kmalloc_mgt *ggm;
499 struct kmalloc_slab *slab;
500 struct kmalloc_slab **slabp;
501 struct kmalloc_slab *base;
502 struct kmalloc_slab **basep;
508 if ((type->ks_flags & KSF_OBJSIZE) == 0)
512 * Check the partial, full, and empty lists for full freeable slabs
513 * in excess of desired caching count.
516 spin_lock(&ggm->spin);
519 * Move empty slabs to partial or full as appropriate. We
520 * don't bother checking partial slabs to see if they are full
523 malloc_mgt_poll_empty_locked(ggm, 16);
526 * Ok, cleanout some of the full mags from the full list
534 if (count > KMALLOC_MAXFREEMAGS) {
536 count -= KMALLOC_MAXFREEMAGS;
540 while (count && (slab = *slabp) != NULL) {
541 delta = slab->findex - slab->aindex;
542 if (delta == slab->ncount &&
543 slab->xindex == slab->findex &&
544 exis_freeable(&slab->exis))
547 * (1) No allocated entries in the structure,
548 * this should always be the case from the
551 * (2) kfree_obj() has fully completed. Just
552 * checking findex is not sufficient since
553 * it is incremented to reserve the slot
554 * before the element is loaded into it.
556 * (3) The slab has been on the full list for
557 * a sufficient number of EXIS
558 * pseudo_ticks, for type-safety.
571 *basep = NULL; /* terminate the retirement list */
572 donext = (*slabp == NULL);
576 spin_unlock(&ggm->spin);
579 * Clean out any slabs that we couldn't stow in the globaldata cache.
583 kprintf("kmalloc_poll: %s retire %d\n",
584 type->ks_shortdesc, retired);
586 base = gslab_cache(base);
587 while ((slab = base) != NULL) {
590 kmem_slab_free(slab, KMALLOC_SLAB_SIZE);
598 * Optional bitmap double-free check. This is typically turned on by
599 * default for safety (sys/_malloc.h)
601 #ifdef KMALLOC_CHECK_DOUBLE_FREE
604 bmap_set(struct kmalloc_slab *slab, void *obj)
608 size_t i = (((uintptr_t)obj & KMALLOC_SLAB_MASK) - slab->offset) /
611 ptr = &slab->bmap[i >> 6];
612 mask = (uint64_t)1U << (i & 63);
613 KKASSERT(i < slab->ncount && (*ptr & mask) == 0);
614 atomic_set_64(ptr, mask);
618 bmap_clr(struct kmalloc_slab *slab, void *obj)
622 size_t i = (((uintptr_t)obj & KMALLOC_SLAB_MASK) - slab->offset) /
625 ptr = &slab->bmap[i >> 6];
626 mask = (uint64_t)1U << (i & 63);
627 KKASSERT(i < slab->ncount && (*ptr & mask) != 0);
628 atomic_clear_64(ptr, mask);
634 * Cleanup a mgt structure.
636 * Always called from the current cpu, so we can manipulate the various
639 * WARNING: findex can race, fobjs[n] is updated after findex is incremented,
644 mgt_cleanup(struct kmalloc_mgt *mgt)
647 struct kmalloc_slab **slabp;
648 struct kmalloc_slab *slab;
657 _kmalloc_obj_debug(unsigned long size, struct malloc_type *type, int flags,
658 const char *file, int line)
661 _kmalloc_obj(unsigned long size, struct malloc_type *type, int flags)
664 struct kmalloc_slab *slab;
665 struct kmalloc_use *use;
666 struct kmalloc_mgt *mgt;
667 struct kmalloc_mgt *ggm;
675 while (__predict_false(type->ks_loosememuse >= type->ks_limit)) {
679 for (n = ttl = 0; n < ncpus; ++n)
680 ttl += type->ks_use[n].memuse;
681 type->ks_loosememuse = ttl; /* not MP synchronized */
682 if ((ssize_t)ttl < 0) /* deal with occassional race */
684 if (ttl >= type->ks_limit) {
685 if (flags & M_NULLOK)
687 panic("%s: malloc limit exceeded", type->ks_shortdesc);
695 logmemory_quick(malloc_beg);
696 KKASSERT(size == type->ks_objsize);
698 use = &type->ks_use[gd->gd_cpuid];
704 * NOTE: obj can be NULL if racing a _kfree_obj().
707 slab = mgt->active; /* Might be dummy */
708 delta = slab->findex - slab->aindex;
709 if (__predict_true(delta != 0)) { /* Cannot be dummy */
712 i = slab->aindex % slab->ncount;
713 obj = slab->fobjs[i];
714 if (__predict_true(obj != NULL)) {
715 slab->fobjs[i] = NULL;
717 #ifdef KMALLOC_CHECK_DOUBLE_FREE
725 * Check alternate. If we find something, swap it with
728 * NOTE: It is possible for exhausted slabs to recover entries
729 * via _kfree_obj(), so we just keep swapping until both
732 * NOTE: obj can be NULL if racing a _kfree_obj().
734 slab = mgt->alternate; /* Might be dummy */
735 delta = slab->findex - slab->aindex;
736 if (__predict_true(delta != 0)) { /* Cannot be dummy */
739 mgt->alternate = mgt->active;
741 i = slab->aindex % slab->ncount;
742 obj = slab->fobjs[i];
743 if (__predict_true(obj != NULL)) {
744 slab->fobjs[i] = NULL;
746 #ifdef KMALLOC_CHECK_DOUBLE_FREE
754 * Rotate a slab from the global mgt into the pcpu mgt.
756 * G(partial, full) -> active -> alternate -> G(empty)
758 * We try to exhaust partials first to reduce fragmentation, then
759 * dig into the fulls.
762 spin_lock(&ggm->spin);
766 slab = mgt->alternate; /* Might be dummy */
767 mgt->alternate = mgt->active; /* Might be dummy */
768 mgt->active = ggm->partial;
769 ggm->partial = ggm->partial->next;
770 mgt->active->next = NULL;
772 if (slab != &kslab_dummy) {
773 KKASSERT(slab->next == NULL);
774 *ggm->empty_tailp = slab;
775 ggm->empty_tailp = &slab->next;
778 spin_unlock(&ggm->spin);
783 slab = mgt->alternate; /* Might be dummy */
784 mgt->alternate = mgt->active; /* Might be dummy */
785 mgt->active = ggm->full;
786 ggm->full = ggm->full->next;
787 mgt->active->next = NULL;
789 exis_setlive(&mgt->active->exis);
790 if (slab != &kslab_dummy) {
791 KKASSERT(slab->next == NULL);
792 *ggm->empty_tailp = slab;
793 ggm->empty_tailp = &slab->next;
796 spin_unlock(&ggm->spin);
801 * We couldn't find anything, scan a limited number of empty entries
802 * looking for something with objects. This will also free excess
803 * full lists that meet requirements.
805 if (malloc_mgt_poll_empty_locked(ggm, 16))
809 * Absolutely nothing is available, allocate a new slab and
812 * Try to get a slab from the global pcpu slab cache (very cheap).
813 * If that fails, allocate a new slab (very expensive).
815 spin_unlock(&ggm->spin);
817 if (gd->gd_kmslab.free_count == 0 || (slab = gslab_alloc(gd)) == NULL) {
818 slab = kmem_slab_alloc(KMALLOC_SLAB_SIZE, KMALLOC_SLAB_SIZE,
822 bzero(slab, sizeof(*slab));
823 KKASSERT(offsetof(struct kmalloc_slab, fobjs[use->mgt.slab_count]) <=
824 use->mgt.slab_offset);
826 obj = (char *)slab + use->mgt.slab_offset;
828 slab->orig_cpuid = gd->gd_cpuid;
829 slab->ncount = use->mgt.slab_count;
830 slab->offset = use->mgt.slab_offset;
831 slab->objsize = type->ks_objsize;
833 slab->findex = slab->ncount;
834 slab->xindex = slab->ncount;
835 for (delta = 0; delta < slab->ncount; ++delta) {
836 slab->fobjs[delta] = obj;
837 obj = (char *)obj + type->ks_objsize;
841 * Sanity check, assert that the last byte of last object is still
845 KKASSERT(((((uintptr_t)obj - 1) ^ (uintptr_t)slab) &
846 ~KMALLOC_SLAB_MASK) == 0);
848 KASSERT(((((uintptr_t)obj - 1) ^ (uintptr_t)slab) &
849 ~KMALLOC_SLAB_MASK) == 0, ("SLAB %p ncount %zd objsize %zd obj=%p\n", slab, slab->ncount, slab->objsize, obj));
850 slab->magic = KMALLOC_SLAB_MAGIC;
851 spin_init(&slab->spin, "kmslb");
854 * Rotate it in, then retry.
856 * (NEW)slab -> active -> alternate -> G(empty)
858 spin_lock(&ggm->spin);
859 if (mgt->alternate != &kslab_dummy) {
860 struct kmalloc_slab *slab_tmp;
862 slab_tmp = mgt->alternate;
863 slab_tmp->next = NULL;
864 *ggm->empty_tailp = slab_tmp;
865 ggm->empty_tailp = &slab_tmp->next;
868 mgt->alternate = mgt->active; /* Might be dummy */
870 spin_unlock(&ggm->spin);
875 * Found object, adjust statistics and return
881 use->loosememuse += size;
882 if (__predict_false(use->loosememuse >= KMALLOC_LOOSE_SIZE)) {
883 /* not MP synchronized */
884 type->ks_loosememuse += use->loosememuse;
885 use->loosememuse = 0;
889 * Handle remaining flags. M_ZERO is typically not set because
890 * the inline macro deals with zeroing for constant sizes.
892 if (__predict_false(flags & M_ZERO))
896 logmemory(malloc_end, NULL, type, size, flags);
902 * Free a type-stable object. We have the base structure and can
903 * calculate the slab, but from this direction we don't know which
904 * mgt structure or list the slab might be on.
907 _kfree_obj(void *obj, struct malloc_type *type)
909 struct kmalloc_slab *slab;
910 struct kmalloc_use *use;
915 logmemory_quick(free_beg);
919 * Calculate the slab from the pointer
921 slab = (void *)((uintptr_t)obj & ~KMALLOC_SLAB_MASK);
922 delta = slab->findex - slab->aindex;
923 KKASSERT(slab->magic == KMALLOC_SLAB_MAGIC && delta != slab->ncount);
926 * We can only safely adjust the statistics for the current cpu.
927 * Don't try to track down the original cpu. The statistics will
928 * be collected and fixed up by vmstat -m (etc).
930 use = &slab->type->ks_use[gd->gd_cpuid];
932 use->memuse -= slab->objsize;
935 * There MUST be free space in the slab since we are returning
936 * the obj to the same slab it was allocated from.
938 i = atomic_fetchadd_long(&slab->findex, 1);
939 i = i % slab->ncount;
940 if (slab->fobjs[i] != NULL) {
941 kprintf("_kfree_obj failure %zd/%zd/%zd\n",
942 slab->aindex, slab->findex, slab->ncount);
944 #ifdef KMALLOC_CHECK_DOUBLE_FREE
947 KKASSERT(slab->fobjs[i] == NULL);
948 slab->fobjs[i] = obj;
949 atomic_add_long(&slab->xindex, 1); /* synchronizer */
951 logmemory_quick(free_end);