2 * Copyright (c) 2019-2021 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
19 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
20 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
22 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #ifndef _SYS__MALLOC_H_
31 #define _SYS__MALLOC_H_
34 * Do not include this header outside _KERNEL or _KERNEL_STRUCTURES scopes.
35 * Used in <sys/user.h>.
38 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
39 #include <sys/cdefs.h> /* for __cache_align */
40 #include <sys/spinlock.h> /* for spinlock */
41 #include <machine/stdint.h> /* for __* types */
42 #include <machine/param.h> /* for SMP_MAXCPU */
45 * A kmalloc slab (used with KSF_OBJSIZE) holds N fixed-size objects
46 * in a fixed (typically 32KB) block of memory prefaced by the structure.
48 #define KMALLOC_SLAB_SIZE (size_t)(128 * 1024)
49 #define KMALLOC_SLAB_MASK ((size_t)(KMALLOC_SLAB_SIZE - 1))
51 #define KMALLOC_SLAB_MAXOBJS (KMALLOC_SLAB_SIZE / __VM_CACHELINE_SIZE)
52 #define KMALLOC_LOOSE_SIZE (KMALLOC_SLAB_SIZE * 4)
54 #define KMALLOC_SLAB_MAGIC 0x6b736c62
55 #define KMALLOC_MAXFREEMAGS 4
57 #define KMALLOC_CHECK_DOUBLE_FREE
61 struct kmalloc_slab *next; /* next mag in list */
62 struct malloc_type *type; /* who does this belong to */
64 uint32_t orig_cpuid; /* originally allocated on */
65 size_t offset; /* copied from kmalloc_mgt */
66 size_t objsize; /* copied from malloc_type */
67 size_t ncount; /* copied from kmalloc_mgt */
68 size_t aindex; /* start of allocations */
69 size_t findex; /* end of frees */
70 size_t xindex; /* synchronizer */
71 struct kmalloc_mgt *mgt;
72 uint64_t bmap[(KMALLOC_SLAB_MAXOBJS + 63) / 64];
73 void *fobjs[1]; /* list of free objects */
77 * pcpu slab management structure for kmalloc zone.
79 * The intent is to try to improve cache characteristics and to reduce
80 * fragmentation by keeping collections localized. The curmag list
81 * used for allocations is loosely sorted by fullness, with the most-full
82 * magazine at the head and the least-full magazine at the tail.
84 * Loosely speaking we want to allocate from the most-full magazine to best
85 * reduce fragmentation.
87 * The kmalloc zone also uses one of these as a global management structure
88 * excess emptymags are regularly moved to the global structure.
92 struct kmalloc_slab *active; /* pcpu */
93 struct kmalloc_slab *alternate; /* pcpu */
94 struct kmalloc_slab *partial; /* global */
95 struct kmalloc_slab *full; /* global */
96 struct kmalloc_slab *empty; /* global */
97 struct kmalloc_slab **empty_tailp; /* global */
98 size_t slab_offset; /* first object in slab */
99 size_t slab_count; /* objects per slab */
100 size_t npartial; /* counts */
106 * The malloc tracking structure. Note that per-cpu entries must be
107 * aggregated for accurate statistics, they do not actually break the
108 * stats down by cpu (e.g. the cpu freeing memory will subtract from
109 * its slot, not the originating cpu's slot).
111 * SMP_MAXCPU is used so modules which use malloc remain compatible
112 * between UP and SMP.
114 * WARNING: __cachealign typically represents 64 byte alignment, so
115 * this structure may be larger than expected.
117 * WARNING: loosememuse is transfered to ks_loosememuse and zerod
118 * often (e.g. uses atomic_swap_long()). It allows pcpu
119 * updates to be taken into account without causing lots
120 * of cache ping-pongs
125 __int64_t calls; /* allocations counter (total) */
126 __size_t loosememuse;
127 struct kmalloc_mgt mgt; /* pcpu object store */
131 struct malloc_type *ks_next; /* next in list */
132 __size_t ks_loosememuse; /* (inaccurate) aggregate memuse */
133 __size_t ks_limit; /* most that are allowed to exist */
134 __uint64_t ks_unused0;
135 __uint32_t ks_flags; /* KSF_x flags */
136 __uint32_t ks_magic; /* if it's not magic, don't touch it */
137 const char *ks_shortdesc; /* short description */
138 __size_t ks_objsize; /* single size if non-zero */
139 struct kmalloc_use *ks_use;
140 struct kmalloc_use ks_use0; /* dummy prior to SMP startup */
141 struct kmalloc_mgt ks_mgt; /* rollup object store */
144 typedef struct malloc_type *malloc_type_t;
146 #define MALLOC_DECLARE(type) \
147 extern struct malloc_type type[1] /* ref as ptr */
149 #define KSF_OBJSIZE 0x00000001 /* zone used for one object type/size */
150 #define KSF_POLLING 0x00000002 /* poll in progress */
152 #define KMGD_MAXFREESLABS 128
154 typedef struct KMGlobalData {
155 struct kmalloc_slab *free_slabs;
156 struct kmalloc_slab *remote_free_slabs;
163 #endif /* !_SYS__MALLOC_H_ */