2 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $DragonFly: src/sys/kern/kern_mpipe.c,v 1.3 2004/03/29 14:06:31 joerg Exp $
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/slaballoc.h>
34 #include <sys/vmmeter.h>
36 #include <sys/thread.h>
37 #include <sys/globaldata.h>
38 #include <sys/mpipe.h>
40 #include <sys/thread2.h>
42 void mpipe_rebalance(malloc_pipe_t mpipe);
44 typedef struct mpipe_buf {
45 TAILQ_ENTRY(mpipe_buf) entry;
49 * Initialize a malloc pipeline for the specified malloc type and allocation
50 * size, and immediately allocate nnow buffers and set the nominal maximum
54 mpipe_init(malloc_pipe_t mpipe, malloc_type_t type, int bytes,
55 int global_nnom, int global_nmax, int cpu_nmax,
58 struct mpipe_buf *buf;
62 if (bytes < sizeof(struct mpipe_buf))
63 bytes = sizeof(struct mpipe_buf);
65 if (global_nnom < cpu_nmax * ncpus)
66 global_nnom = cpu_nmax * ncpus;
67 if (global_nnom > global_nmax)
68 global_nmax = global_nnom;
70 bzero(mpipe, sizeof(struct malloc_pipe));
73 mpipe->max_count = global_nmax;
74 mpipe->cpu_max = cpu_nmax;
75 mpipe->mpflags = mpflags;
78 if ((mpflags & MPF_NO_ZERO) == 0)
81 for (i = 0; i <= SMP_MAXCPU; i++)
82 TAILQ_INIT(&mpipe->queue[i]);
84 for (i = 1; i <= ncpus; i++) {
85 while (mpipe->queue_len[i] < mpipe->cpu_max) {
86 buf = malloc(mpipe->bytes, mpipe->type, mflags);
87 TAILQ_INSERT_TAIL(&mpipe->queue[i], buf, entry);
89 ++mpipe->queue_len[i];
94 TAILQ_INIT(&mpipe->queue[0]);
95 while (--global_nnom >= 0) {
96 buf = malloc(mpipe->bytes, mpipe->type, mflags);
97 TAILQ_INSERT_TAIL(&mpipe->queue[0], buf, entry);
99 ++mpipe->queue_len[0];
104 mpipe_done(malloc_pipe_t mpipe)
110 lwkt_gettoken(&ilock, &mpipe->mpipe_token);
111 KKASSERT(mpipe->queue_len[0] == mpipe->total_count);
112 for (i = 0; i < SMP_MAXCPU; i++) {
113 while(! TAILQ_EMPTY(&mpipe->queue[i])) {
114 buf = TAILQ_FIRST(&mpipe->queue[i]);
115 KKASSERT(buf != NULL);
116 TAILQ_REMOVE(&mpipe->queue[i], buf, entry);
117 --mpipe->queue_len[i];
118 --mpipe->total_count;
119 free(buf, mpipe->type);
121 KKASSERT(mpipe->queue_len[i] == 0);
123 KKASSERT(mpipe->total_count == 0);
127 * Allocation from MPIPE that can wait. Only drain the global queue.
130 mpipe_alloc_waitok(malloc_pipe_t mpipe)
132 mpipe_buf_t buf = NULL;
134 int mflags = M_WAITOK;
136 lwkt_gettoken(&ilock, &mpipe->mpipe_token);
140 if (mpipe->queue_len[0] > 0) {
141 buf = TAILQ_FIRST(&mpipe->queue[0]);
142 KKASSERT(buf != NULL);
143 TAILQ_REMOVE(&mpipe->queue[0], buf, entry);
144 --mpipe->queue_len[0];
145 if ((mpipe->mpflags & MPF_NO_ZERO) == 0)
146 bzero(buf, mpipe->bytes);
148 lwkt_reltoken(&ilock);
149 mpipe_rebalance(mpipe);
153 if (mpipe->total_count < mpipe->max_count) {
154 if ((mpipe->mpflags & MPF_NO_ZERO) == 0)
157 mpipe->total_count++;
159 lwkt_reltoken(&ilock);
160 buf = malloc(mpipe->bytes, mpipe->type, mflags);
161 KKASSERT(buf != NULL);
164 tsleep(mpipe, 0, "mpipe", 0);
169 * Allocation from MPIPE that can't wait. Try to drain the
170 * local cpu queue first, if that is empty, drain the global
175 mpipe_alloc_nowait(malloc_pipe_t mpipe)
177 globaldata_t gd = mycpu;
178 mpipe_buf_t buf = NULL;
180 int my_queue = gd->gd_cpuid + 1;
181 int mflags = M_NOWAIT;
183 /* First check the local CPU queue to avoid token acquisation. */
185 if (mpipe->queue_len[my_queue] > 0) {
186 buf = TAILQ_FIRST(&mpipe->queue[my_queue]);
187 KKASSERT(buf != NULL);
188 TAILQ_REMOVE(&mpipe->queue[my_queue], buf, entry);
189 --mpipe->queue_len[my_queue];
190 if ((mpipe->mpflags & MPF_NO_ZERO) == 0)
191 bzero(buf, mpipe->bytes);
192 mpipe_rebalance(mpipe);
196 /* We have to acquire the token, unblock interrupts and get it. */
199 lwkt_gettoken(&ilock, &mpipe->mpipe_token);
202 if (mpipe->queue_len[0] > 0) {
203 buf = TAILQ_FIRST(&mpipe->queue[0]);
204 KKASSERT(buf != NULL);
205 TAILQ_REMOVE(&mpipe->queue[0], buf, entry);
206 --mpipe->queue_len[0];
207 if ((mpipe->mpflags & MPF_NO_ZERO) == 0)
208 bzero(buf, mpipe->bytes);
210 lwkt_reltoken(&ilock);
214 /* Recheck the local CPU queue again in case an interrupt freed something*/
215 if (mpipe->queue_len[my_queue] > 0) {
216 buf = TAILQ_FIRST(&mpipe->queue[my_queue]);
217 KKASSERT(buf != NULL);
218 TAILQ_REMOVE(&mpipe->queue[my_queue], buf, entry);
219 --mpipe->queue_len[my_queue];
220 if ((mpipe->mpflags & MPF_NO_ZERO) == 0)
221 bzero(buf, mpipe->bytes);
223 lwkt_reltoken(&ilock);
227 if (mpipe->total_count < mpipe->max_count) {
228 if ((mpipe->mpflags & MPF_NO_ZERO) == 0)
231 buf = malloc(mpipe->bytes, mpipe->type, mflags);
233 mpipe->total_count++;
237 lwkt_reltoken(&ilock);
242 * Free an entry, unblock any waiters.
245 mpipe_free(malloc_pipe_t mpipe, void *vbuf)
247 globaldata_t gd = mycpu;
248 mpipe_buf_t buf = NULL;
250 int my_queue = gd->gd_cpuid + 1;
255 lwkt_gettoken(&ilock, &mpipe->mpipe_token);
258 /* first try to refill the current CPU queue */
259 if (mpipe->queue_len[my_queue] < mpipe->cpu_max) {
260 TAILQ_INSERT_TAIL(&mpipe->queue[my_queue], buf, entry);
261 ++mpipe->queue_len[my_queue];
263 if (mpipe->pending) {
267 lwkt_reltoken(&ilock);
268 mpipe_rebalance(mpipe);
272 if (mpipe->total_count < mpipe->max_count) {
273 TAILQ_INSERT_TAIL(&mpipe->queue[0], buf, entry);
274 ++mpipe->queue_len[0];
276 if (mpipe->pending) {
280 lwkt_reltoken(&ilock);
281 mpipe_rebalance(mpipe);
285 --mpipe->total_count;
287 lwkt_reltoken(&ilock);
288 free(buf, mpipe->type);
292 * Rebalance local CPU queue by trying to size it to max_cpu entries
296 mpipe_rebalance(malloc_pipe_t mpipe)
298 globaldata_t gd = mycpu;
301 int my_queue = gd->gd_cpuid + 1;
303 lwkt_gettoken(&ilock, &mpipe->mpipe_token);
305 while (mpipe->queue_len[my_queue] < mpipe->cpu_max &&
306 mpipe->queue_len[0] > 0) {
307 buf = TAILQ_FIRST(&mpipe->queue[0]);
308 TAILQ_REMOVE(&mpipe->queue[0], buf, entry);
309 TAILQ_INSERT_TAIL(&mpipe->queue[my_queue], buf, entry);
310 ++mpipe->queue_len[my_queue];
311 --mpipe->queue_len[0];
313 while (mpipe->queue_len[my_queue] > mpipe->cpu_max) {
314 buf = TAILQ_FIRST(&mpipe->queue[my_queue]);
315 TAILQ_REMOVE(&mpipe->queue[my_queue], buf, entry);
316 TAILQ_INSERT_TAIL(&mpipe->queue[0], buf, entry);
317 ++mpipe->queue_len[0];
318 --mpipe->queue_len[my_queue];
321 lwkt_reltoken(&ilock);