2 * Copyright (c) 2000-2002 Sendmail, Inc. and its suppliers.
5 * By using this file, you agree to the terms and conditions set
6 * forth in the LICENSE file which can be found at the top level of
7 * the sendmail distribution.
11 SM_RCSID("@(#)$Id: rpool.c,v 1.24 2002/01/11 21:54:43 ca Exp $")
15 ** For documentation, see rpool.html
21 #include <sm/varargs.h>
25 #endif /* _FFR_PERF_RPOOL */
27 const char SmRpoolMagic[] = "sm_rpool";
32 char align[SM_ALIGN_SIZE];
40 #define BIG_OBJECT_RATIO 10
43 ** SM_RPOOL_ALLOCBLOCK_X -- allocate a new block for an rpool.
46 ** rpool -- rpool to which the block should be added.
47 ** size -- size of block.
53 ** F:sm_heap -- out of memory
57 sm_rpool_allocblock_x(rpool, size)
63 p = sm_malloc_x(sizeof(SM_POOLHDR_T) + size);
64 p->sm_pnext = rpool->sm_pools;
66 return (char*) p + sizeof(SM_POOLHDR_T);
70 ** SM_RPOOL_ALLOCBLOCK -- allocate a new block for an rpool.
73 ** rpool -- rpool to which the block should be added.
74 ** size -- size of block.
77 ** Pointer to block, NULL on failure.
81 sm_rpool_allocblock(rpool, size)
87 p = sm_malloc(sizeof(SM_POOLHDR_T) + size);
90 p->sm_pnext = rpool->sm_pools;
92 return (char*) p + sizeof(SM_POOLHDR_T);
96 ** SM_RPOOL_MALLOC_TAGGED_X -- allocate memory from rpool
99 ** rpool -- rpool from which memory should be allocated;
100 ** can be NULL, use sm_malloc() then.
101 ** size -- size of block.
103 ** line -- line number in file.
104 ** group -- heap group for debugging.
110 ** F:sm_heap -- out of memory
113 ** if size == 0 and the rpool is new (no memory
114 ** allocated yet) NULL is returned!
115 ** We could solve this by
116 ** - wasting 1 byte (size < avail)
117 ** - checking for rpool->sm_poolptr != NULL
118 ** - not asking for 0 sized buffer
123 sm_rpool_malloc_tagged_x(rpool, size, file, line, group)
129 #else /* SM_HEAP_CHECK */
130 sm_rpool_malloc_x(rpool, size)
133 #endif /* SM_HEAP_CHECK */
138 return sm_malloc_tagged_x(size, file, line, group);
140 /* Ensure that size is properly aligned. */
141 if (size & SM_ALIGN_BITS)
142 size = (size & ~SM_ALIGN_BITS) + SM_ALIGN_SIZE;
144 /* The common case. This is optimized for speed. */
145 if (size <= rpool->sm_poolavail)
147 ptr = rpool->sm_poolptr;
148 rpool->sm_poolptr += size;
149 rpool->sm_poolavail -= size;
154 ** The slow case: we need to call malloc.
155 ** The SM_REQUIRE assertion is deferred until now, for speed.
156 ** That's okay: we set rpool->sm_poolavail to 0 when we free an rpool,
157 ** so the common case code won't be triggered on a dangling pointer.
160 SM_REQUIRE(rpool->sm_magic == SmRpoolMagic);
163 ** If size > sm_poolsize, then malloc a new block especially for
164 ** this request. Future requests will be allocated from the
167 ** What if the current pool is mostly unallocated, and the current
168 ** request is larger than the available space, but < sm_poolsize?
169 ** If we discard the current pool, and start allocating from a new
170 ** pool, then we will be wasting a lot of space. For this reason,
171 ** we malloc a block just for the current request if size >
172 ** sm_bigobjectsize, where sm_bigobjectsize <= sm_poolsize.
173 ** Thus, the most space that we will waste at the end of a pool
174 ** is sm_bigobjectsize - 1.
177 if (size > rpool->sm_bigobjectsize)
180 ++rpool->sm_nbigblocks;
181 #endif /* _FFR_PERF_RPOOL */
182 return sm_rpool_allocblock_x(rpool, size);
184 SM_ASSERT(rpool->sm_bigobjectsize <= rpool->sm_poolsize);
185 ptr = sm_rpool_allocblock_x(rpool, rpool->sm_poolsize);
186 rpool->sm_poolptr = ptr + size;
187 rpool->sm_poolavail = rpool->sm_poolsize - size;
190 #endif /* _FFR_PERF_RPOOL */
195 ** SM_RPOOL_MALLOC_TAGGED -- allocate memory from rpool
198 ** rpool -- rpool from which memory should be allocated;
199 ** can be NULL, use sm_malloc() then.
200 ** size -- size of block.
202 ** line -- line number in file.
203 ** group -- heap group for debugging.
206 ** Pointer to block, NULL on failure.
209 ** if size == 0 and the rpool is new (no memory
210 ** allocated yet) NULL is returned!
211 ** We could solve this by
212 ** - wasting 1 byte (size < avail)
213 ** - checking for rpool->sm_poolptr != NULL
214 ** - not asking for 0 sized buffer
219 sm_rpool_malloc_tagged(rpool, size, file, line, group)
225 #else /* SM_HEAP_CHECK */
226 sm_rpool_malloc(rpool, size)
229 #endif /* SM_HEAP_CHECK */
234 return sm_malloc_tagged(size, file, line, group);
236 /* Ensure that size is properly aligned. */
237 if (size & SM_ALIGN_BITS)
238 size = (size & ~SM_ALIGN_BITS) + SM_ALIGN_SIZE;
240 /* The common case. This is optimized for speed. */
241 if (size <= rpool->sm_poolavail)
243 ptr = rpool->sm_poolptr;
244 rpool->sm_poolptr += size;
245 rpool->sm_poolavail -= size;
250 ** The slow case: we need to call malloc.
251 ** The SM_REQUIRE assertion is deferred until now, for speed.
252 ** That's okay: we set rpool->sm_poolavail to 0 when we free an rpool,
253 ** so the common case code won't be triggered on a dangling pointer.
256 SM_REQUIRE(rpool->sm_magic == SmRpoolMagic);
259 ** If size > sm_poolsize, then malloc a new block especially for
260 ** this request. Future requests will be allocated from the
263 ** What if the current pool is mostly unallocated, and the current
264 ** request is larger than the available space, but < sm_poolsize?
265 ** If we discard the current pool, and start allocating from a new
266 ** pool, then we will be wasting a lot of space. For this reason,
267 ** we malloc a block just for the current request if size >
268 ** sm_bigobjectsize, where sm_bigobjectsize <= sm_poolsize.
269 ** Thus, the most space that we will waste at the end of a pool
270 ** is sm_bigobjectsize - 1.
273 if (size > rpool->sm_bigobjectsize)
276 ++rpool->sm_nbigblocks;
277 #endif /* _FFR_PERF_RPOOL */
278 return sm_rpool_allocblock(rpool, size);
280 SM_ASSERT(rpool->sm_bigobjectsize <= rpool->sm_poolsize);
281 ptr = sm_rpool_allocblock(rpool, rpool->sm_poolsize);
284 rpool->sm_poolptr = ptr + size;
285 rpool->sm_poolavail = rpool->sm_poolsize - size;
288 #endif /* _FFR_PERF_RPOOL */
293 ** SM_RPOOL_NEW_X -- create a new rpool.
296 ** parent -- pointer to parent rpool, can be NULL.
299 ** Pointer to new rpool.
303 sm_rpool_new_x(parent)
308 rpool = sm_malloc_x(sizeof(SM_RPOOL_T));
310 rpool->sm_parentlink = NULL;
314 rpool->sm_parentlink = sm_rpool_attach_x(parent,
315 (SM_RPOOL_RFREE_T) sm_rpool_free,
322 rpool->sm_magic = SmRpoolMagic;
324 rpool->sm_poolsize = POOLSIZE - sizeof(SM_POOLHDR_T);
325 rpool->sm_bigobjectsize = rpool->sm_poolsize / BIG_OBJECT_RATIO;
326 rpool->sm_poolptr = NULL;
327 rpool->sm_poolavail = 0;
328 rpool->sm_pools = NULL;
330 rpool->sm_rptr = NULL;
331 rpool->sm_ravail = 0;
332 rpool->sm_rlists = NULL;
334 rpool->sm_nbigblocks = 0;
335 rpool->sm_npools = 0;
336 #endif /* _FFR_PERF_RPOOL */
342 ** SM_RPOOL_SETSIZES -- set sizes for rpool.
345 ** poolsize -- size of a single rpool block.
346 ** bigobjectsize -- if this size is exceeded, an individual
347 ** block is allocated (must be less or equal poolsize).
354 sm_rpool_setsizes(rpool, poolsize, bigobjectsize)
357 size_t bigobjectsize;
359 SM_REQUIRE(poolsize >= bigobjectsize);
361 poolsize = POOLSIZE - sizeof(SM_POOLHDR_T);
362 if (bigobjectsize == 0)
363 bigobjectsize = poolsize / BIG_OBJECT_RATIO;
364 rpool->sm_poolsize = poolsize;
365 rpool->sm_bigobjectsize = bigobjectsize;
369 ** SM_RPOOL_FREE -- free an rpool and release all of its resources.
372 ** rpool -- rpool to free.
382 SM_RLIST_T *rl, *rnext;
383 SM_RESOURCE_T *r, *rmax;
384 SM_POOLLINK_T *pp, *pnext;
390 ** It's important to free the resources before the memory pools,
391 ** because the resource free functions might modify the contents
392 ** of the memory pools.
395 rl = rpool->sm_rlists;
398 rmax = rpool->sm_rptr;
401 for (r = rl->sm_rvec; r < rmax; ++r)
403 if (r->sm_rfree != NULL)
404 r->sm_rfree(r->sm_rcontext);
406 rnext = rl->sm_rnext;
411 rmax = &rl->sm_rvec[SM_RLIST_MAX];
416 ** Now free the memory pools.
419 for (pp = rpool->sm_pools; pp != NULL; pp = pnext)
421 pnext = pp->sm_pnext;
426 ** Disconnect rpool from its parent.
429 if (rpool->sm_parentlink != NULL)
430 *rpool->sm_parentlink = NULL;
433 ** Setting these fields to zero means that any future to attempt
434 ** to use the rpool after it is freed will cause an assertion failure.
437 rpool->sm_magic = NULL;
438 rpool->sm_poolavail = 0;
439 rpool->sm_ravail = 0;
442 if (rpool->sm_nbigblocks > 0 || rpool->sm_npools > 1)
444 "perf: rpool=%lx, sm_nbigblocks=%d, sm_npools=%d",
445 (long) rpool, rpool->sm_nbigblocks, rpool->sm_npools);
446 rpool->sm_nbigblocks = 0;
447 rpool->sm_npools = 0;
448 #endif /* _FFR_PERF_RPOOL */
453 ** SM_RPOOL_ATTACH_X -- attach a resource to an rpool.
456 ** rpool -- rpool to which resource should be attached.
457 ** rfree -- function to call when rpool is freed.
458 ** rcontext -- argument for function to call when rpool is freed.
461 ** Pointer to allocated function.
464 ** F:sm_heap -- out of memory
468 sm_rpool_attach_x(rpool, rfree, rcontext)
470 SM_RPOOL_RFREE_T rfree;
476 SM_REQUIRE_ISA(rpool, SmRpoolMagic);
478 if (rpool->sm_ravail == 0)
480 rl = sm_malloc_x(sizeof(SM_RLIST_T));
481 rl->sm_rnext = rpool->sm_rlists;
482 rpool->sm_rlists = rl;
483 rpool->sm_rptr = rl->sm_rvec;
484 rpool->sm_ravail = SM_RLIST_MAX;
487 a = &rpool->sm_rptr->sm_rfree;
488 rpool->sm_rptr->sm_rfree = rfree;
489 rpool->sm_rptr->sm_rcontext = rcontext;