2 * KERN_KMALLOC.C - Kernel memory allocator
4 * Copyright (c) 2021 The DragonFly Project, All rights reserved.
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * This module implements the kmalloc_obj allocator. This is a type-stable
39 * allocator that uses the same base structures (e.g. malloc_type) plus
40 * some extensions to efficiently implement single-type zones.
42 * All memory management is zone based. When a zone is destroyed, all of
43 * its memory is returned to the system with no fragmentation.
45 * A mini-slab allocator hangs directly off the zone structure (malloc_type).
46 * Since the object zones are single-size-only, the slab allocator is very
47 * simple and currently utilizes just two per-zone/per-cpu slabs (active and
48 * alternate) before kicking up to the per-zone cache. Beyond that we just
49 * have the per-cpu globaldata-based 'free slab' cache to avoid unnecessary
50 * kernel_map mappings and unmappings.
52 * The advantage of this that zones don't stomp over each other and cause
53 * excessive fragmentation in the slabs. For example, when you umount a
54 * large tmpfs filesystem, most of its memory (all of its kmalloc_obj memory)
55 * is returned to the system.
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/slaballoc.h>
65 #include <sys/vmmeter.h>
66 #include <sys/spinlock.h>
68 #include <sys/thread.h>
69 #include <sys/globaldata.h>
70 #include <sys/sysctl.h>
72 #include <sys/malloc.h>
75 #include <vm/vm_param.h>
76 #include <vm/vm_kern.h>
77 #include <vm/vm_extern.h>
78 #include <vm/vm_object.h>
80 #include <vm/vm_map.h>
81 #include <vm/vm_page.h>
82 #include <vm/vm_pageout.h>
84 #include <machine/cpu.h>
86 #include <sys/spinlock2.h>
87 #include <sys/thread2.h>
88 #include <sys/exislock2.h>
89 #include <vm/vm_page2.h>
91 #define MEMORY_STRING "ptr=%p type=%p size=%lu flags=%04x"
92 #define MEMORY_ARGS void *ptr, void *type, unsigned long size, int flags
94 #if !defined(KTR_MEMORY)
95 #define KTR_MEMORY KTR_ALL
97 KTR_INFO_MASTER(mem_obj);
98 KTR_INFO(KTR_MEMORY, mem_obj, malloc_beg, 0, "kmalloc_obj begin");
99 KTR_INFO(KTR_MEMORY, mem_obj, malloc_end, 1, MEMORY_STRING, MEMORY_ARGS);
101 KTR_INFO(KTR_MEMORY, mem_obj, free_zero, 2, MEMORY_STRING, MEMORY_ARGS);
102 KTR_INFO(KTR_MEMORY, mem_obj, free_ovsz, 3, MEMORY_STRING, MEMORY_ARGS);
103 KTR_INFO(KTR_MEMORY, mem_obj, free_ovsz_delayed, 4, MEMORY_STRING, MEMORY_ARGS);
104 KTR_INFO(KTR_MEMORY, mem_obj, free_chunk, 5, MEMORY_STRING, MEMORY_ARGS);
105 KTR_INFO(KTR_MEMORY, mem_obj, free_request, 6, MEMORY_STRING, MEMORY_ARGS);
106 KTR_INFO(KTR_MEMORY, mem_obj, free_rem_beg, 7, MEMORY_STRING, MEMORY_ARGS);
107 KTR_INFO(KTR_MEMORY, mem_obj, free_rem_end, 8, MEMORY_STRING, MEMORY_ARGS);
109 KTR_INFO(KTR_MEMORY, mem_obj, free_beg, 9, "kfree_obj begin");
110 KTR_INFO(KTR_MEMORY, mem_obj, free_end, 10, "kfree_obj end");
112 #define logmemory(name, ptr, type, size, flags) \
113 KTR_LOG(mem_obj_ ## name, ptr, type, size, flags)
114 #define logmemory_quick(name) \
115 KTR_LOG(mem_obj_ ## name)
117 __read_frequently static int KMGDMaxFreeSlabs = KMGD_MAXFREESLABS;
118 SYSCTL_INT(_kern, OID_AUTO, kzone_cache, CTLFLAG_RW, &KMGDMaxFreeSlabs, 0, "");
119 __read_frequently static int kzone_bretire = 4;
120 SYSCTL_INT(_kern, OID_AUTO, kzone_bretire, CTLFLAG_RW, &kzone_bretire, 0, "");
121 __read_frequently static int kzone_debug;
122 SYSCTL_INT(_kern, OID_AUTO, kzone_debug, CTLFLAG_RW, &kzone_debug, 0, "");
124 __read_frequently struct kmalloc_slab kslab_dummy;
126 static void malloc_slab_destroy(struct malloc_type *type,
127 struct kmalloc_slab **slabp);
130 * Cache a chain of slabs onto their respective cpu slab caches. Any slabs
131 * which we cannot cache will be returned.
133 * free_slabs - Current structure may only be accessed by current cpu
134 * remote_free_slabs - Only atomic swap operations are allowed.
135 * free_count - Only atomic operations are allowed.
137 * If the count is sufficient to cache the entire list, NULL is returned.
138 * Otherwise the portion that was not cached is returned.
141 struct kmalloc_slab *
142 gslab_cache(struct kmalloc_slab *slab)
144 struct kmalloc_slab *save;
145 struct kmalloc_slab *next;
146 struct kmalloc_slab *res;
147 struct kmalloc_slab **resp;
148 struct kmalloc_slab **slabp;
155 KKASSERT(((uintptr_t)slab & KMALLOC_SLAB_MASK) == 0);
158 * Given the slab list, get the cpuid and clip off as many matching
159 * elements as fits in the cache.
162 cpuid = slab->orig_cpuid;
163 rgd = globaldata_find(cpuid);
165 KKASSERT(((uintptr_t)slab & KMALLOC_SLAB_MASK) == 0);
167 * Doesn't fit in cache, put on return list.
169 if (rgd->gd_kmslab.free_count >= KMGDMaxFreeSlabs) {
177 * Collect. We aren't required to match-up the original cpu
178 * with the disposal cpu, but its a good idea to retain
181 * The slabs we collect are going into the global cache,
182 * remove the type association.
184 KKASSERT(((uintptr_t)slab & KMALLOC_SLAB_MASK) == 0);
189 while ((next = *slabp) != NULL &&
190 next->orig_cpuid == cpuid &&
191 rgd->gd_kmslab.free_count + count < KMGDMaxFreeSlabs)
193 KKASSERT(((uintptr_t)next & KMALLOC_SLAB_MASK) == 0);
200 * Safety, unhook before next, next is not included in the
201 * list starting with slab that is being pre-pended
202 * to remote_free_slabs.
207 * Now atomically pre-pend slab...*slabp to remote_free_slabs.
208 * Pump the count first (its ok if the actual chain length
209 * races the count update).
211 * NOTE: In the loop, (save) is updated by fcmpset.
213 atomic_add_long(&rgd->gd_kmslab.free_count, count);
214 save = rgd->gd_kmslab.remote_free_slabs;
216 KKASSERT(((uintptr_t)save & KMALLOC_SLAB_MASK) == 0);
217 *slabp = save; /* end of slab list chain to... */
219 if (atomic_fcmpset_ptr(
220 &rgd->gd_kmslab.remote_free_slabs,
228 * Setup for next loop
234 * Terminate the result list and return it
242 * May only be called on current cpu. Pull a free slab from the
243 * pcpu cache. If we run out, move any slabs that have built-up
246 * We are only allowed to swap the remote_free_slabs head, we cannot
247 * manipulate any next pointers while structures are sitting on that list.
250 struct kmalloc_slab *
251 gslab_alloc(globaldata_t gd)
253 struct kmalloc_slab *slab;
255 slab = gd->gd_kmslab.free_slabs;
257 slab = atomic_swap_ptr(
258 (volatile void **)&gd->gd_kmslab.remote_free_slabs,
260 KKASSERT(((uintptr_t)slab & KMALLOC_SLAB_MASK) == 0);
263 gd->gd_kmslab.free_slabs = slab->next;
265 atomic_add_long(&gd->gd_kmslab.free_count, -1);
266 KKASSERT(((uintptr_t)slab & KMALLOC_SLAB_MASK) == 0);
272 malloc_mgt_init(struct malloc_type *type __unused,
273 struct kmalloc_mgt *mgt, size_t size)
278 bzero(mgt, sizeof(*mgt));
279 spin_init(&mgt->spin, "kmmgt");
282 * Allows us to avoid a conditional. The dummy slabs are empty
283 * and have no objects.
285 mgt->active = &kslab_dummy;
286 mgt->alternate = &kslab_dummy;
287 mgt->empty_tailp = &mgt->empty;
290 * Figure out the count by taking into account the size of the fobjs[]
291 * array by adding it to the object size. This initial calculation
292 * ignores alignment edge-cases that might require the count to be
295 offset = offsetof(struct kmalloc_slab, fobjs[0]);
296 count = (KMALLOC_SLAB_SIZE - offset) / (size + sizeof(void *));
299 * Recalculate the offset of the first object, this time including
300 * the required alignment. (size) should already be aligned. This
301 * may push the last object beyond the slab so check and loop with
302 * a reduced count as necessary.
304 * Ok, theoretically the count should not actually change since the
305 * division above rounds-down (that is, any mis-alignment is already
306 * not included in the count calculation). But I'm not going to take
307 * any chances and check anyway as a safety in case some programmer
308 * changes the code above later. This is not a time-critical code
311 offset = offsetof(struct kmalloc_slab, fobjs[count]);
312 offset = __VM_CACHELINE_ALIGN(offset);
314 while (offset + size * count > KMALLOC_SLAB_SIZE) {
316 offset = offsetof(struct kmalloc_slab, fobjs[count]);
317 offset = __VM_CACHELINE_ALIGN(offset);
318 KKASSERT (offset + size * count <= KMALLOC_SLAB_SIZE);
321 mgt->slab_offset = offset;
322 mgt->slab_count = count;
326 malloc_mgt_relocate(struct kmalloc_mgt *src, struct kmalloc_mgt *dst)
328 struct kmalloc_slab **slabp;
330 spin_init(&dst->spin, "kmmgt");
334 slabp = &(*slabp)->next;
336 dst->empty_tailp = slabp;
340 malloc_mgt_uninit(struct malloc_type *type, struct kmalloc_mgt *mgt)
342 if (mgt->active != &kslab_dummy)
343 malloc_slab_destroy(type, &mgt->active);
346 if (mgt->alternate != &kslab_dummy)
347 malloc_slab_destroy(type, &mgt->alternate);
348 mgt->alternate = NULL;
350 malloc_slab_destroy(type, &mgt->partial);
351 malloc_slab_destroy(type, &mgt->full);
352 malloc_slab_destroy(type, &mgt->empty);
356 mgt->empty_tailp = &mgt->empty;
358 spin_uninit(&mgt->spin);
362 * Destroy a list of slabs. Attempt to cache the slabs on the specified
363 * (possibly remote) cpu. This allows slabs that were operating on a
364 * particular cpu to be disposed of back to that same cpu.
367 malloc_slab_destroy(struct malloc_type *type, struct kmalloc_slab **slabp)
369 struct kmalloc_slab *slab;
370 struct kmalloc_slab *base;
371 struct kmalloc_slab **basep;
378 * Collect all slabs that can actually be destroyed, complain
383 while ((slab = *slabp) != NULL) {
384 KKASSERT(((uintptr_t)slab & KMALLOC_SLAB_MASK) == 0);
386 delta = slab->findex - slab->aindex;
387 if (delta == slab->ncount) {
388 *slabp = slab->next; /* unlink */
389 *basep = slab; /* link into base list */
392 kprintf("%s: slab %p %zd objects "
393 "were still allocated\n",
394 type->ks_shortdesc, slab,
395 slab->ncount - delta);
396 /* leave link intact and iterate */
402 * Terminate the base list of slabs that can be destroyed,
403 * then cache as many of them as possible.
408 base = gslab_cache(base);
411 * Destroy the remainder
413 while ((slab = base) != NULL) {
415 slab->next = (void *)(uintptr_t)-1;
416 kmem_slab_free(slab, KMALLOC_SLAB_SIZE);
421 * Objects can be freed to an empty slab at any time, causing it to no
422 * longer be empty. To improve performance, we do not try to pro-actively
423 * move such slabs to the appropriate partial or full list upon kfree_obj().
424 * Instead, a poller comes along and tests the slabs on the empty list
425 * periodically, and moves slabs that are no longer empty to the appropriate
430 * Poll a limited number of slabs on the empty list and move them
431 * to the appropriate full or partial list. Slabs left on the empty
432 * list are rotated to the tail.
434 * If gcache is non-zero this function will try to place full slabs into
435 * the globaldata cache, if it isn't already too full.
437 * The mgt is spin-locked
439 * Returns non-zero if the ggm updates possibly made slabs available for
443 malloc_mgt_poll_empty_locked(struct kmalloc_mgt *ggm, int count)
445 struct kmalloc_slab *marker;
446 struct kmalloc_slab *slab;
450 if (ggm->empty == NULL)
456 while (count-- && (slab = ggm->empty) != NULL) {
460 ggm->empty = slab->next;
463 if (ggm->empty_tailp == &slab->next)
464 ggm->empty_tailp = &ggm->empty;
467 * Check partial, full, and empty. We rotate
468 * empty entries to the end of the empty list.
470 * NOTE: For a fully-freeable slab we also have
473 delta = slab->findex - slab->aindex;
474 if (delta == slab->ncount) {
476 * Stuff into the full list. This requires setting
477 * the exis sequence number via exis_terminate().
479 KKASSERT(slab->next == NULL);
480 exis_terminate(&slab->exis);
481 slab->next = ggm->full;
489 KKASSERT(slab->next == NULL);
490 slab->next = ggm->partial;
498 KKASSERT(slab->next == NULL);
499 *ggm->empty_tailp = slab;
500 ggm->empty_tailp = &slab->next;
502 if (ggm->empty == marker)
506 return got_something;
510 * Called once a second with the zone interlocked against destruction.
512 * Returns non-zero to tell the caller to iterate to the next type,
513 * else the caller should stay on the current type.
516 malloc_mgt_poll(struct malloc_type *type)
518 struct kmalloc_mgt *ggm;
519 struct kmalloc_slab *slab;
520 struct kmalloc_slab **slabp;
521 struct kmalloc_slab *base;
522 struct kmalloc_slab **basep;
528 if ((type->ks_flags & KSF_OBJSIZE) == 0)
532 * Check the partial, full, and empty lists for full freeable slabs
533 * in excess of desired caching count.
536 spin_lock(&ggm->spin);
539 * Move empty slabs to partial or full as appropriate. We
540 * don't bother checking partial slabs to see if they are full
543 malloc_mgt_poll_empty_locked(ggm, 16);
546 * Ok, cleanout some of the full mags from the full list
554 if (count > KMALLOC_MAXFREEMAGS) {
556 count -= KMALLOC_MAXFREEMAGS;
560 while (count && (slab = *slabp) != NULL) {
561 delta = slab->findex - slab->aindex;
562 if (delta == slab->ncount &&
563 slab->xindex == slab->findex &&
564 exis_freeable(&slab->exis))
567 * (1) No allocated entries in the structure,
568 * this should always be the case from the
571 * (2) kfree_obj() has fully completed. Just
572 * checking findex is not sufficient since
573 * it is incremented to reserve the slot
574 * before the element is loaded into it.
576 * (3) The slab has been on the full list for
577 * a sufficient number of EXIS
578 * pseudo_ticks, for type-safety.
585 if (++retired == kzone_bretire)
592 *basep = NULL; /* terminate the retirement list */
593 donext = (*slabp == NULL);
597 spin_unlock(&ggm->spin);
600 * Clean out any slabs that we couldn't stow in the globaldata cache.
604 kprintf("kmalloc_poll: %s retire %d\n",
605 type->ks_shortdesc, retired);
607 base = gslab_cache(base);
608 while ((slab = base) != NULL) {
611 kmem_slab_free(slab, KMALLOC_SLAB_SIZE);
619 * Optional bitmap double-free check. This is typically turned on by
620 * default for safety (sys/_malloc.h)
622 #ifdef KMALLOC_CHECK_DOUBLE_FREE
625 bmap_set(struct kmalloc_slab *slab, void *obj)
629 size_t i = (((uintptr_t)obj & KMALLOC_SLAB_MASK) - slab->offset) /
632 ptr = &slab->bmap[i >> 6];
633 mask = (uint64_t)1U << (i & 63);
634 KKASSERT(i < slab->ncount && (*ptr & mask) == 0);
635 atomic_set_64(ptr, mask);
639 bmap_clr(struct kmalloc_slab *slab, void *obj)
643 size_t i = (((uintptr_t)obj & KMALLOC_SLAB_MASK) - slab->offset) /
646 ptr = &slab->bmap[i >> 6];
647 mask = (uint64_t)1U << (i & 63);
648 KKASSERT(i < slab->ncount && (*ptr & mask) != 0);
649 atomic_clear_64(ptr, mask);
655 * Cleanup a mgt structure.
657 * Always called from the current cpu, so we can manipulate the various
660 * WARNING: findex can race, fobjs[n] is updated after findex is incremented,
665 mgt_cleanup(struct kmalloc_mgt *mgt)
668 struct kmalloc_slab **slabp;
669 struct kmalloc_slab *slab;
678 _kmalloc_obj_debug(unsigned long size, struct malloc_type *type, int flags,
679 const char *file, int line)
682 _kmalloc_obj(unsigned long size, struct malloc_type *type, int flags)
685 struct kmalloc_slab *slab;
686 struct kmalloc_use *use;
687 struct kmalloc_mgt *mgt;
688 struct kmalloc_mgt *ggm;
696 while (__predict_false(type->ks_loosememuse >= type->ks_limit)) {
700 for (n = ttl = 0; n < ncpus; ++n)
701 ttl += type->ks_use[n].memuse;
702 type->ks_loosememuse = ttl; /* not MP synchronized */
703 if ((ssize_t)ttl < 0) /* deal with occassional race */
705 if (ttl >= type->ks_limit) {
706 if (flags & M_NULLOK)
708 panic("%s: malloc limit exceeded", type->ks_shortdesc);
716 logmemory_quick(malloc_beg);
717 KKASSERT(size == type->ks_objsize);
719 use = &type->ks_use[gd->gd_cpuid];
725 * NOTE: obj can be NULL if racing a _kfree_obj().
728 slab = mgt->active; /* Might be dummy */
729 delta = slab->findex - slab->aindex;
730 if (__predict_true(delta != 0)) { /* Cannot be dummy */
733 i = slab->aindex % slab->ncount;
734 obj = slab->fobjs[i];
735 if (__predict_true(obj != NULL)) {
736 slab->fobjs[i] = NULL;
738 #ifdef KMALLOC_CHECK_DOUBLE_FREE
746 * Check alternate. If we find something, swap it with
749 * NOTE: It is possible for exhausted slabs to recover entries
750 * via _kfree_obj(), so we just keep swapping until both
753 * NOTE: obj can be NULL if racing a _kfree_obj().
755 slab = mgt->alternate; /* Might be dummy */
756 delta = slab->findex - slab->aindex;
757 if (__predict_true(delta != 0)) { /* Cannot be dummy */
760 mgt->alternate = mgt->active;
762 i = slab->aindex % slab->ncount;
763 obj = slab->fobjs[i];
764 if (__predict_true(obj != NULL)) {
765 slab->fobjs[i] = NULL;
767 #ifdef KMALLOC_CHECK_DOUBLE_FREE
775 * Rotate a slab from the global mgt into the pcpu mgt.
777 * G(partial, full) -> active -> alternate -> G(empty)
779 * We try to exhaust partials first to reduce fragmentation, then
780 * dig into the fulls.
783 spin_lock(&ggm->spin);
787 slab = mgt->alternate; /* Might be dummy */
788 mgt->alternate = mgt->active; /* Might be dummy */
789 mgt->active = ggm->partial;
790 ggm->partial = ggm->partial->next;
791 mgt->active->next = NULL;
793 if (slab != &kslab_dummy) {
794 KKASSERT(slab->next == NULL);
795 *ggm->empty_tailp = slab;
796 ggm->empty_tailp = &slab->next;
799 spin_unlock(&ggm->spin);
804 slab = mgt->alternate; /* Might be dummy */
805 mgt->alternate = mgt->active; /* Might be dummy */
806 mgt->active = ggm->full;
807 ggm->full = ggm->full->next;
808 mgt->active->next = NULL;
810 exis_setlive(&mgt->active->exis);
811 if (slab != &kslab_dummy) {
812 KKASSERT(slab->next == NULL);
813 *ggm->empty_tailp = slab;
814 ggm->empty_tailp = &slab->next;
817 spin_unlock(&ggm->spin);
822 * We couldn't find anything, scan a limited number of empty entries
823 * looking for something with objects. This will also free excess
824 * full lists that meet requirements.
826 if (malloc_mgt_poll_empty_locked(ggm, 16))
830 * Absolutely nothing is available, allocate a new slab and
833 * Try to get a slab from the global pcpu slab cache (very cheap).
834 * If that fails, allocate a new slab (very expensive).
836 spin_unlock(&ggm->spin);
838 if (gd->gd_kmslab.free_count == 0 || (slab = gslab_alloc(gd)) == NULL) {
839 slab = kmem_slab_alloc(KMALLOC_SLAB_SIZE, KMALLOC_SLAB_SIZE,
843 bzero(slab, sizeof(*slab));
844 KKASSERT(offsetof(struct kmalloc_slab, fobjs[use->mgt.slab_count]) <=
845 use->mgt.slab_offset);
847 obj = (char *)slab + use->mgt.slab_offset;
849 slab->orig_cpuid = gd->gd_cpuid;
850 slab->ncount = use->mgt.slab_count;
851 slab->offset = use->mgt.slab_offset;
852 slab->objsize = type->ks_objsize;
854 slab->findex = slab->ncount;
855 slab->xindex = slab->ncount;
856 for (delta = 0; delta < slab->ncount; ++delta) {
857 slab->fobjs[delta] = obj;
858 obj = (char *)obj + type->ks_objsize;
862 * Sanity check, assert that the last byte of last object is still
866 KKASSERT(((((uintptr_t)obj - 1) ^ (uintptr_t)slab) &
867 ~KMALLOC_SLAB_MASK) == 0);
869 KASSERT(((((uintptr_t)obj - 1) ^ (uintptr_t)slab) &
870 ~KMALLOC_SLAB_MASK) == 0, ("SLAB %p ncount %zd objsize %zd obj=%p\n", slab, slab->ncount, slab->objsize, obj));
871 slab->magic = KMALLOC_SLAB_MAGIC;
872 spin_init(&slab->spin, "kmslb");
875 * Rotate it in, then retry.
877 * (NEW)slab -> active -> alternate -> G(empty)
879 spin_lock(&ggm->spin);
880 if (mgt->alternate != &kslab_dummy) {
881 struct kmalloc_slab *slab_tmp;
883 slab_tmp = mgt->alternate;
884 slab_tmp->next = NULL;
885 *ggm->empty_tailp = slab_tmp;
886 ggm->empty_tailp = &slab_tmp->next;
889 mgt->alternate = mgt->active; /* Might be dummy */
891 spin_unlock(&ggm->spin);
896 * Found object, adjust statistics and return
902 use->loosememuse += size;
903 if (__predict_false(use->loosememuse >= KMALLOC_LOOSE_SIZE)) {
904 /* not MP synchronized */
905 type->ks_loosememuse += use->loosememuse;
906 use->loosememuse = 0;
910 * Handle remaining flags. M_ZERO is typically not set because
911 * the inline macro deals with zeroing for constant sizes.
913 if (__predict_false(flags & M_ZERO))
917 logmemory(malloc_end, NULL, type, size, flags);
923 * Free a type-stable object. We have the base structure and can
924 * calculate the slab, but from this direction we don't know which
925 * mgt structure or list the slab might be on.
928 _kfree_obj(void *obj, struct malloc_type *type)
930 struct kmalloc_slab *slab;
931 struct kmalloc_use *use;
936 logmemory_quick(free_beg);
940 * Calculate the slab from the pointer
942 slab = (void *)((uintptr_t)obj & ~KMALLOC_SLAB_MASK);
943 delta = slab->findex - slab->aindex;
944 KKASSERT(slab->magic == KMALLOC_SLAB_MAGIC && delta != slab->ncount);
947 * We can only safely adjust the statistics for the current cpu.
948 * Don't try to track down the original cpu. The statistics will
949 * be collected and fixed up by vmstat -m (etc).
951 use = &slab->type->ks_use[gd->gd_cpuid];
953 use->memuse -= slab->objsize;
956 * There MUST be free space in the slab since we are returning
957 * the obj to the same slab it was allocated from.
959 i = atomic_fetchadd_long(&slab->findex, 1);
960 i = i % slab->ncount;
961 if (slab->fobjs[i] != NULL) {
962 kprintf("_kfree_obj failure %zd/%zd/%zd\n",
963 slab->aindex, slab->findex, slab->ncount);
965 #ifdef KMALLOC_CHECK_DOUBLE_FREE
968 KKASSERT(slab->fobjs[i] == NULL);
969 slab->fobjs[i] = obj;
970 atomic_add_long(&slab->xindex, 1); /* synchronizer */
972 logmemory_quick(free_end);