2 * KERN_KMALLOC.C - Kernel memory allocator
4 * Copyright (c) 2021 The DragonFly Project, All rights reserved.
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * This module implements the kmalloc_obj allocator. This is a type-stable
39 * allocator that uses the same base structures (e.g. malloc_type) plus
40 * some extensions to efficiently implement single-type zones.
42 * All memory management is zone based. When a zone is destroyed, all of
43 * its memory is returned to the system with no fragmentation.
45 * A mini-slab allocator hangs directly off the zone structure (malloc_type).
46 * Since the object zones are single-size-only, the slab allocator is very
47 * simple and currently utilizes just two per-zone/per-cpu slabs (active and
48 * alternate) before kicking up to the per-zone cache. Beyond that we just
49 * have the per-cpu globaldata-based 'free slab' cache to avoid unnecessary
50 * kernel_map mappings and unmappings.
52 * The advantage of this that zones don't stomp over each other and cause
53 * excessive fragmentation in the slabs. For example, when you umount a
54 * large tmpfs filesystem, most of its memory (all of its kmalloc_obj memory)
55 * is returned to the system.
58 #include <sys/param.h>
59 #include <sys/systm.h>
60 #include <sys/kernel.h>
61 #include <sys/slaballoc.h>
63 #include <sys/vmmeter.h>
64 #include <sys/spinlock.h>
66 #include <sys/thread.h>
67 #include <sys/globaldata.h>
68 #include <sys/sysctl.h>
70 #include <sys/malloc.h>
73 #include <vm/vm_param.h>
74 #include <vm/vm_kern.h>
75 #include <vm/vm_extern.h>
76 #include <vm/vm_object.h>
78 #include <vm/vm_map.h>
79 #include <vm/vm_page.h>
80 #include <vm/vm_pageout.h>
82 #include <machine/cpu.h>
84 #include <sys/spinlock2.h>
85 #include <sys/thread2.h>
86 #include <sys/exislock2.h>
87 #include <vm/vm_page2.h>
89 #define MEMORY_STRING "ptr=%p type=%p size=%lu flags=%04x"
90 #define MEMORY_ARGS void *ptr, void *type, unsigned long size, int flags
92 #if !defined(KTR_MEMORY)
93 #define KTR_MEMORY KTR_ALL
95 KTR_INFO_MASTER(mem_obj);
96 KTR_INFO(KTR_MEMORY, mem_obj, malloc_beg, 0, "kmalloc_obj begin");
97 KTR_INFO(KTR_MEMORY, mem_obj, malloc_end, 1, MEMORY_STRING, MEMORY_ARGS);
99 KTR_INFO(KTR_MEMORY, mem_obj, free_zero, 2, MEMORY_STRING, MEMORY_ARGS);
100 KTR_INFO(KTR_MEMORY, mem_obj, free_ovsz, 3, MEMORY_STRING, MEMORY_ARGS);
101 KTR_INFO(KTR_MEMORY, mem_obj, free_ovsz_delayed, 4, MEMORY_STRING, MEMORY_ARGS);
102 KTR_INFO(KTR_MEMORY, mem_obj, free_chunk, 5, MEMORY_STRING, MEMORY_ARGS);
103 KTR_INFO(KTR_MEMORY, mem_obj, free_request, 6, MEMORY_STRING, MEMORY_ARGS);
104 KTR_INFO(KTR_MEMORY, mem_obj, free_rem_beg, 7, MEMORY_STRING, MEMORY_ARGS);
105 KTR_INFO(KTR_MEMORY, mem_obj, free_rem_end, 8, MEMORY_STRING, MEMORY_ARGS);
107 KTR_INFO(KTR_MEMORY, mem_obj, free_beg, 9, "kfree_obj begin");
108 KTR_INFO(KTR_MEMORY, mem_obj, free_end, 10, "kfree_obj end");
110 #define logmemory(name, ptr, type, size, flags) \
111 KTR_LOG(mem_obj_ ## name, ptr, type, size, flags)
112 #define logmemory_quick(name) \
113 KTR_LOG(mem_obj_ ## name)
115 __read_frequently static int KMGDMaxFreeSlabs = KMGD_MAXFREESLABS;
116 SYSCTL_INT(_kern, OID_AUTO, kzone_cache, CTLFLAG_RW, &KMGDMaxFreeSlabs, 0, "");
117 __read_frequently static int kzone_bretire = 4;
118 SYSCTL_INT(_kern, OID_AUTO, kzone_bretire, CTLFLAG_RW, &kzone_bretire, 0, "");
119 __read_frequently static int kzone_debug;
120 SYSCTL_INT(_kern, OID_AUTO, kzone_debug, CTLFLAG_RW, &kzone_debug, 0, "");
122 __read_frequently struct kmalloc_slab kslab_dummy;
124 static void malloc_slab_destroy(struct malloc_type *type,
125 struct kmalloc_slab **slabp);
128 * Cache a chain of slabs onto their respective cpu slab caches. Any slabs
129 * which we cannot cache will be returned.
131 * free_slabs - Current structure may only be accessed by current cpu
132 * remote_free_slabs - Only atomic swap operations are allowed.
133 * free_count - Only atomic operations are allowed.
135 * If the count is sufficient to cache the entire list, NULL is returned.
136 * Otherwise the portion that was not cached is returned.
139 struct kmalloc_slab *
140 gslab_cache(struct kmalloc_slab *slab)
142 struct kmalloc_slab *save;
143 struct kmalloc_slab *next;
144 struct kmalloc_slab *res;
145 struct kmalloc_slab **resp;
146 struct kmalloc_slab **slabp;
153 KKASSERT(((uintptr_t)slab & KMALLOC_SLAB_MASK) == 0);
156 * Given the slab list, get the cpuid and clip off as many matching
157 * elements as fits in the cache.
160 cpuid = slab->orig_cpuid;
161 rgd = globaldata_find(cpuid);
163 KKASSERT(((uintptr_t)slab & KMALLOC_SLAB_MASK) == 0);
165 * Doesn't fit in cache, put on return list.
167 if (rgd->gd_kmslab.free_count >= KMGDMaxFreeSlabs) {
175 * Collect. We aren't required to match-up the original cpu
176 * with the disposal cpu, but its a good idea to retain
179 * The slabs we collect are going into the global cache,
180 * remove the type association.
182 KKASSERT(((uintptr_t)slab & KMALLOC_SLAB_MASK) == 0);
187 while ((next = *slabp) != NULL &&
188 next->orig_cpuid == cpuid &&
189 rgd->gd_kmslab.free_count + count < KMGDMaxFreeSlabs)
191 KKASSERT(((uintptr_t)next & KMALLOC_SLAB_MASK) == 0);
198 * Safety, unhook before next, next is not included in the
199 * list starting with slab that is being pre-pended
200 * to remote_free_slabs.
205 * Now atomically pre-pend slab...*slabp to remote_free_slabs.
206 * Pump the count first (its ok if the actual chain length
207 * races the count update).
209 * NOTE: In the loop, (save) is updated by fcmpset.
211 atomic_add_long(&rgd->gd_kmslab.free_count, count);
212 save = rgd->gd_kmslab.remote_free_slabs;
214 KKASSERT(((uintptr_t)save & KMALLOC_SLAB_MASK) == 0);
215 *slabp = save; /* end of slab list chain to... */
217 if (atomic_fcmpset_ptr(
218 &rgd->gd_kmslab.remote_free_slabs,
226 * Setup for next loop
232 * Terminate the result list and return it
240 * May only be called on current cpu. Pull a free slab from the
241 * pcpu cache. If we run out, move any slabs that have built-up
244 * We are only allowed to swap the remote_free_slabs head, we cannot
245 * manipulate any next pointers while structures are sitting on that list.
248 struct kmalloc_slab *
249 gslab_alloc(globaldata_t gd)
251 struct kmalloc_slab *slab;
253 slab = gd->gd_kmslab.free_slabs;
255 slab = atomic_swap_ptr(
256 (volatile void **)&gd->gd_kmslab.remote_free_slabs,
258 KKASSERT(((uintptr_t)slab & KMALLOC_SLAB_MASK) == 0);
261 gd->gd_kmslab.free_slabs = slab->next;
263 atomic_add_long(&gd->gd_kmslab.free_count, -1);
264 KKASSERT(((uintptr_t)slab & KMALLOC_SLAB_MASK) == 0);
270 malloc_mgt_init(struct malloc_type *type __unused,
271 struct kmalloc_mgt *mgt, size_t size)
276 bzero(mgt, sizeof(*mgt));
277 spin_init(&mgt->spin, "kmmgt");
280 * Allows us to avoid a conditional. The dummy slabs are empty
281 * and have no objects.
283 mgt->active = &kslab_dummy;
284 mgt->alternate = &kslab_dummy;
285 mgt->empty_tailp = &mgt->empty;
288 * Figure out the count by taking into account the size of the fobjs[]
289 * array by adding it to the object size. This initial calculation
290 * ignores alignment edge-cases that might require the count to be
293 offset = offsetof(struct kmalloc_slab, fobjs[0]);
294 count = (KMALLOC_SLAB_SIZE - offset) / (size + sizeof(void *));
297 * Recalculate the offset of the first object, this time including
298 * the required alignment. (size) should already be aligned. This
299 * may push the last object beyond the slab so check and loop with
300 * a reduced count as necessary.
302 * Ok, theoretically the count should not actually change since the
303 * division above rounds-down (that is, any mis-alignment is already
304 * not included in the count calculation). But I'm not going to take
305 * any chances and check anyway as a safety in case some programmer
306 * changes the code above later. This is not a time-critical code
309 offset = offsetof(struct kmalloc_slab, fobjs[count]);
310 offset = __VM_CACHELINE_ALIGN(offset);
312 while (offset + size * count > KMALLOC_SLAB_SIZE) {
314 offset = offsetof(struct kmalloc_slab, fobjs[count]);
315 offset = __VM_CACHELINE_ALIGN(offset);
316 KKASSERT (offset + size * count <= KMALLOC_SLAB_SIZE);
319 mgt->slab_offset = offset;
320 mgt->slab_count = count;
324 malloc_mgt_relocate(struct kmalloc_mgt *src, struct kmalloc_mgt *dst)
326 struct kmalloc_slab **slabp;
328 spin_init(&dst->spin, "kmmgt");
332 slabp = &(*slabp)->next;
334 dst->empty_tailp = slabp;
338 malloc_mgt_uninit(struct malloc_type *type, struct kmalloc_mgt *mgt)
340 if (mgt->active != &kslab_dummy)
341 malloc_slab_destroy(type, &mgt->active);
344 if (mgt->alternate != &kslab_dummy)
345 malloc_slab_destroy(type, &mgt->alternate);
346 mgt->alternate = NULL;
348 malloc_slab_destroy(type, &mgt->partial);
349 malloc_slab_destroy(type, &mgt->full);
350 malloc_slab_destroy(type, &mgt->empty);
354 mgt->empty_tailp = &mgt->empty;
356 spin_uninit(&mgt->spin);
360 * Destroy a list of slabs. Attempt to cache the slabs on the specified
361 * (possibly remote) cpu. This allows slabs that were operating on a
362 * particular cpu to be disposed of back to that same cpu.
365 malloc_slab_destroy(struct malloc_type *type, struct kmalloc_slab **slabp)
367 struct kmalloc_slab *slab;
368 struct kmalloc_slab *base;
369 struct kmalloc_slab **basep;
376 * Collect all slabs that can actually be destroyed, complain
381 while ((slab = *slabp) != NULL) {
382 KKASSERT(((uintptr_t)slab & KMALLOC_SLAB_MASK) == 0);
384 delta = slab->findex - slab->aindex;
385 if (delta == slab->ncount) {
386 *slabp = slab->next; /* unlink */
387 *basep = slab; /* link into base list */
390 kprintf("%s: slab %p %zd objects "
391 "were still allocated\n",
392 type->ks_shortdesc, slab,
393 slab->ncount - delta);
394 /* leave link intact and iterate */
400 * Terminate the base list of slabs that can be destroyed,
401 * then cache as many of them as possible.
406 base = gslab_cache(base);
409 * Destroy the remainder
411 while ((slab = base) != NULL) {
413 slab->next = (void *)(uintptr_t)-1;
414 kmem_slab_free(slab, KMALLOC_SLAB_SIZE);
419 * Objects can be freed to an empty slab at any time, causing it to no
420 * longer be empty. To improve performance, we do not try to pro-actively
421 * move such slabs to the appropriate partial or full list upon kfree_obj().
422 * Instead, a poller comes along and tests the slabs on the empty list
423 * periodically, and moves slabs that are no longer empty to the appropriate
428 * Poll a limited number of slabs on the empty list and move them
429 * to the appropriate full or partial list. Slabs left on the empty
430 * list are rotated to the tail.
432 * If gcache is non-zero this function will try to place full slabs into
433 * the globaldata cache, if it isn't already too full.
435 * The mgt is spin-locked
437 * Returns non-zero if the ggm updates possibly made slabs available for
441 malloc_mgt_poll_empty_locked(struct kmalloc_mgt *ggm, int count)
443 struct kmalloc_slab *marker;
444 struct kmalloc_slab *slab;
448 if (ggm->empty == NULL)
454 while (count-- && (slab = ggm->empty) != NULL) {
458 ggm->empty = slab->next;
461 if (ggm->empty_tailp == &slab->next)
462 ggm->empty_tailp = &ggm->empty;
465 * Check partial, full, and empty. We rotate
466 * empty entries to the end of the empty list.
468 * NOTE: For a fully-freeable slab we also have
471 delta = slab->findex - slab->aindex;
472 if (delta == slab->ncount) {
474 * Stuff into the full list. This requires setting
475 * the exis sequence number via exis_terminate().
477 KKASSERT(slab->next == NULL);
478 exis_terminate(&slab->exis);
479 slab->next = ggm->full;
487 KKASSERT(slab->next == NULL);
488 slab->next = ggm->partial;
496 KKASSERT(slab->next == NULL);
497 *ggm->empty_tailp = slab;
498 ggm->empty_tailp = &slab->next;
500 if (ggm->empty == marker)
504 return got_something;
508 * Called once a second with the zone interlocked against destruction.
510 * Returns non-zero to tell the caller to iterate to the next type,
511 * else the caller should stay on the current type.
514 malloc_mgt_poll(struct malloc_type *type)
516 struct kmalloc_mgt *ggm;
517 struct kmalloc_slab *slab;
518 struct kmalloc_slab **slabp;
519 struct kmalloc_slab *base;
520 struct kmalloc_slab **basep;
526 if ((type->ks_flags & KSF_OBJSIZE) == 0)
530 * Check the partial, full, and empty lists for full freeable slabs
531 * in excess of desired caching count.
534 spin_lock(&ggm->spin);
537 * Move empty slabs to partial or full as appropriate. We
538 * don't bother checking partial slabs to see if they are full
541 malloc_mgt_poll_empty_locked(ggm, 16);
544 * Ok, cleanout some of the full mags from the full list
552 if (count > KMALLOC_MAXFREEMAGS) {
554 count -= KMALLOC_MAXFREEMAGS;
558 while (count && (slab = *slabp) != NULL) {
559 delta = slab->findex - slab->aindex;
560 if (delta == slab->ncount &&
561 slab->xindex == slab->findex &&
562 exis_freeable(&slab->exis))
565 * (1) No allocated entries in the structure,
566 * this should always be the case from the
569 * (2) kfree_obj() has fully completed. Just
570 * checking findex is not sufficient since
571 * it is incremented to reserve the slot
572 * before the element is loaded into it.
574 * (3) The slab has been on the full list for
575 * a sufficient number of EXIS
576 * pseudo_ticks, for type-safety.
583 if (++retired == kzone_bretire)
590 *basep = NULL; /* terminate the retirement list */
591 donext = (*slabp == NULL);
595 spin_unlock(&ggm->spin);
598 * Clean out any slabs that we couldn't stow in the globaldata cache.
602 kprintf("kmalloc_poll: %s retire %d\n",
603 type->ks_shortdesc, retired);
605 base = gslab_cache(base);
606 while ((slab = base) != NULL) {
609 kmem_slab_free(slab, KMALLOC_SLAB_SIZE);
617 * Optional bitmap double-free check. This is typically turned on by
618 * default for safety (sys/_malloc.h)
620 #ifdef KMALLOC_CHECK_DOUBLE_FREE
623 bmap_set(struct kmalloc_slab *slab, void *obj)
627 size_t i = (((uintptr_t)obj & KMALLOC_SLAB_MASK) - slab->offset) /
630 ptr = &slab->bmap[i >> 6];
631 mask = (uint64_t)1U << (i & 63);
632 KKASSERT(i < slab->ncount && (*ptr & mask) == 0);
633 atomic_set_64(ptr, mask);
637 bmap_clr(struct kmalloc_slab *slab, void *obj)
641 size_t i = (((uintptr_t)obj & KMALLOC_SLAB_MASK) - slab->offset) /
644 ptr = &slab->bmap[i >> 6];
645 mask = (uint64_t)1U << (i & 63);
646 KKASSERT(i < slab->ncount && (*ptr & mask) != 0);
647 atomic_clear_64(ptr, mask);
653 * Cleanup a mgt structure.
655 * Always called from the current cpu, so we can manipulate the various
658 * WARNING: findex can race, fobjs[n] is updated after findex is incremented,
663 mgt_cleanup(struct kmalloc_mgt *mgt)
666 struct kmalloc_slab **slabp;
667 struct kmalloc_slab *slab;
676 _kmalloc_obj_debug(unsigned long size, struct malloc_type *type, int flags,
677 const char *file, int line)
680 _kmalloc_obj(unsigned long size, struct malloc_type *type, int flags)
683 struct kmalloc_slab *slab;
684 struct kmalloc_use *use;
685 struct kmalloc_mgt *mgt;
686 struct kmalloc_mgt *ggm;
694 while (__predict_false(type->ks_loosememuse >= type->ks_limit)) {
698 for (n = ttl = 0; n < ncpus; ++n)
699 ttl += type->ks_use[n].memuse;
700 type->ks_loosememuse = ttl; /* not MP synchronized */
701 if ((ssize_t)ttl < 0) /* deal with occassional race */
703 if (ttl >= type->ks_limit) {
704 if (flags & M_NULLOK)
706 panic("%s: malloc limit exceeded", type->ks_shortdesc);
714 logmemory_quick(malloc_beg);
715 KKASSERT(size == type->ks_objsize);
717 use = &type->ks_use[gd->gd_cpuid];
723 * NOTE: obj can be NULL if racing a _kfree_obj().
726 slab = mgt->active; /* Might be dummy */
727 delta = slab->findex - slab->aindex;
728 if (__predict_true(delta != 0)) { /* Cannot be dummy */
731 i = slab->aindex % slab->ncount;
732 obj = slab->fobjs[i];
733 if (__predict_true(obj != NULL)) {
734 slab->fobjs[i] = NULL;
736 #ifdef KMALLOC_CHECK_DOUBLE_FREE
744 * Check alternate. If we find something, swap it with
747 * NOTE: It is possible for exhausted slabs to recover entries
748 * via _kfree_obj(), so we just keep swapping until both
751 * NOTE: obj can be NULL if racing a _kfree_obj().
753 slab = mgt->alternate; /* Might be dummy */
754 delta = slab->findex - slab->aindex;
755 if (__predict_true(delta != 0)) { /* Cannot be dummy */
758 mgt->alternate = mgt->active;
760 i = slab->aindex % slab->ncount;
761 obj = slab->fobjs[i];
762 if (__predict_true(obj != NULL)) {
763 slab->fobjs[i] = NULL;
765 #ifdef KMALLOC_CHECK_DOUBLE_FREE
773 * Rotate a slab from the global mgt into the pcpu mgt.
775 * G(partial, full) -> active -> alternate -> G(empty)
777 * We try to exhaust partials first to reduce fragmentation, then
778 * dig into the fulls.
781 spin_lock(&ggm->spin);
785 slab = mgt->alternate; /* Might be dummy */
786 mgt->alternate = mgt->active; /* Might be dummy */
787 mgt->active = ggm->partial;
788 ggm->partial = ggm->partial->next;
789 mgt->active->next = NULL;
791 if (slab != &kslab_dummy) {
792 KKASSERT(slab->next == NULL);
793 *ggm->empty_tailp = slab;
794 ggm->empty_tailp = &slab->next;
797 spin_unlock(&ggm->spin);
802 slab = mgt->alternate; /* Might be dummy */
803 mgt->alternate = mgt->active; /* Might be dummy */
804 mgt->active = ggm->full;
805 ggm->full = ggm->full->next;
806 mgt->active->next = NULL;
808 exis_setlive(&mgt->active->exis);
809 if (slab != &kslab_dummy) {
810 KKASSERT(slab->next == NULL);
811 *ggm->empty_tailp = slab;
812 ggm->empty_tailp = &slab->next;
815 spin_unlock(&ggm->spin);
820 * We couldn't find anything, scan a limited number of empty entries
821 * looking for something with objects. This will also free excess
822 * full lists that meet requirements.
824 if (malloc_mgt_poll_empty_locked(ggm, 16))
828 * Absolutely nothing is available, allocate a new slab and
831 * Try to get a slab from the global pcpu slab cache (very cheap).
832 * If that fails, allocate a new slab (very expensive).
834 spin_unlock(&ggm->spin);
836 if (gd->gd_kmslab.free_count == 0 || (slab = gslab_alloc(gd)) == NULL) {
837 slab = kmem_slab_alloc(KMALLOC_SLAB_SIZE, KMALLOC_SLAB_SIZE,
841 bzero(slab, sizeof(*slab));
842 KKASSERT(offsetof(struct kmalloc_slab, fobjs[use->mgt.slab_count]) <=
843 use->mgt.slab_offset);
845 obj = (char *)slab + use->mgt.slab_offset;
847 slab->orig_cpuid = gd->gd_cpuid;
848 slab->ncount = use->mgt.slab_count;
849 slab->offset = use->mgt.slab_offset;
850 slab->objsize = type->ks_objsize;
852 slab->findex = slab->ncount;
853 slab->xindex = slab->ncount;
854 for (delta = 0; delta < slab->ncount; ++delta) {
855 slab->fobjs[delta] = obj;
856 obj = (char *)obj + type->ks_objsize;
860 * Sanity check, assert that the last byte of last object is still
864 KKASSERT(((((uintptr_t)obj - 1) ^ (uintptr_t)slab) &
865 ~KMALLOC_SLAB_MASK) == 0);
867 KASSERT(((((uintptr_t)obj - 1) ^ (uintptr_t)slab) &
868 ~KMALLOC_SLAB_MASK) == 0, ("SLAB %p ncount %zd objsize %zd obj=%p\n", slab, slab->ncount, slab->objsize, obj));
869 slab->magic = KMALLOC_SLAB_MAGIC;
870 spin_init(&slab->spin, "kmslb");
873 * Rotate it in, then retry.
875 * (NEW)slab -> active -> alternate -> G(empty)
877 spin_lock(&ggm->spin);
878 if (mgt->alternate != &kslab_dummy) {
879 struct kmalloc_slab *slab_tmp;
881 slab_tmp = mgt->alternate;
882 slab_tmp->next = NULL;
883 *ggm->empty_tailp = slab_tmp;
884 ggm->empty_tailp = &slab_tmp->next;
887 mgt->alternate = mgt->active; /* Might be dummy */
889 spin_unlock(&ggm->spin);
894 * Found object, adjust statistics and return
900 use->loosememuse += size;
901 if (__predict_false(use->loosememuse >= KMALLOC_LOOSE_SIZE)) {
902 /* not MP synchronized */
903 type->ks_loosememuse += use->loosememuse;
904 use->loosememuse = 0;
908 * Handle remaining flags. M_ZERO is typically not set because
909 * the inline macro deals with zeroing for constant sizes.
911 if (__predict_false(flags & M_ZERO))
915 logmemory(malloc_end, NULL, type, size, flags);
921 * Free a type-stable object. We have the base structure and can
922 * calculate the slab, but from this direction we don't know which
923 * mgt structure or list the slab might be on.
926 _kfree_obj(void *obj, struct malloc_type *type)
928 struct kmalloc_slab *slab;
929 struct kmalloc_use *use;
934 logmemory_quick(free_beg);
938 * Calculate the slab from the pointer
940 slab = (void *)((uintptr_t)obj & ~KMALLOC_SLAB_MASK);
941 delta = slab->findex - slab->aindex;
942 KKASSERT(slab->magic == KMALLOC_SLAB_MAGIC && delta != slab->ncount);
945 * We can only safely adjust the statistics for the current cpu.
946 * Don't try to track down the original cpu. The statistics will
947 * be collected and fixed up by vmstat -m (etc).
949 use = &slab->type->ks_use[gd->gd_cpuid];
951 use->memuse -= slab->objsize;
954 * There MUST be free space in the slab since we are returning
955 * the obj to the same slab it was allocated from.
957 i = atomic_fetchadd_long(&slab->findex, 1);
958 i = i % slab->ncount;
959 if (slab->fobjs[i] != NULL) {
960 kprintf("_kfree_obj failure %zd/%zd/%zd\n",
961 slab->aindex, slab->findex, slab->ncount);
963 #ifdef KMALLOC_CHECK_DOUBLE_FREE
966 KKASSERT(slab->fobjs[i] == NULL);
967 slab->fobjs[i] = obj;
968 atomic_add_long(&slab->xindex, 1); /* synchronizer */
970 logmemory_quick(free_end);