2 * Copyright (C) 2004 Internet Systems Consortium, Inc. ("ISC")
3 * Copyright (C) 1997-2003 Internet Software Consortium.
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
10 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
11 * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
12 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
13 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
14 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
15 * PERFORMANCE OF THIS SOFTWARE.
18 /* $Id: mem.c,v 1.98.2.9 2004/03/09 06:11:48 marka Exp $ */
28 #include <isc/magic.h>
31 #include <isc/ondestroy.h>
32 #include <isc/string.h>
34 #include <isc/mutex.h>
37 #ifndef ISC_MEM_DEBUGGING
38 #define ISC_MEM_DEBUGGING 0
40 LIBISC_EXTERNAL_DATA unsigned int isc_mem_debugging = ISC_MEM_DEBUGGING;
43 * Define ISC_MEM_USE_INTERNAL_MALLOC=1 to use the internal malloc()
44 * implementation in preference to the system one. The internal malloc()
45 * is very space-efficient, and quite fast on uniprocessor systems. It
46 * performs poorly on multiprocessor machines.
48 #ifndef ISC_MEM_USE_INTERNAL_MALLOC
49 #define ISC_MEM_USE_INTERNAL_MALLOC 0
56 #define DEF_MAX_SIZE 1100
57 #define DEF_MEM_TARGET 4096
58 #define ALIGNMENT_SIZE 8 /* must be a power of 2 */
59 #define NUM_BASIC_BLOCKS 64 /* must be > 1 */
60 #define TABLE_INCREMENT 1024
61 #define DEBUGLIST_COUNT 1024
66 #if ISC_MEM_TRACKLINES
67 typedef struct debuglink debuglink_t;
69 ISC_LINK(debuglink_t) link;
70 const void *ptr[DEBUGLIST_COUNT];
71 const char *file[DEBUGLIST_COUNT];
72 unsigned int line[DEBUGLIST_COUNT];
76 #define FLARG_PASS , file, line
77 #define FLARG , const char *file, int line
83 typedef struct element element;
90 * This structure must be ALIGNMENT_SIZE bytes.
94 char bytes[ALIGNMENT_SIZE];
100 unsigned long totalgets;
101 #if ISC_MEM_USE_INTERNAL_MALLOC
102 unsigned long blocks;
103 unsigned long freefrags;
104 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
107 #define MEM_MAGIC ISC_MAGIC('M', 'e', 'm', 'C')
108 #define VALID_CONTEXT(c) ISC_MAGIC_VALID(c, MEM_MAGIC)
112 isc_ondestroy_t ondestroy;
114 isc_memalloc_t memalloc;
115 isc_memfree_t memfree;
118 isc_boolean_t checkfree;
119 struct stats * stats;
120 unsigned int references;
127 isc_boolean_t hi_called;
128 isc_mem_water_t water;
130 ISC_LIST(isc_mempool_t) pools;
132 #if ISC_MEM_USE_INTERNAL_MALLOC
134 element ** freelists;
135 element * basic_blocks;
136 unsigned char ** basic_table;
137 unsigned int basic_table_count;
138 unsigned int basic_table_size;
139 unsigned char * lowest;
140 unsigned char * highest;
141 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
143 #if ISC_MEM_TRACKLINES
144 ISC_LIST(debuglink_t) debuglist;
145 unsigned int debugging;
148 unsigned int memalloc_failures;
151 #define MEMPOOL_MAGIC ISC_MAGIC('M', 'E', 'M', 'p')
152 #define VALID_MEMPOOL(c) ISC_MAGIC_VALID(c, MEMPOOL_MAGIC)
155 /* always unlocked */
156 unsigned int magic; /* magic number */
157 isc_mutex_t *lock; /* optional lock */
158 isc_mem_t *mctx; /* our memory context */
159 /* locked via the memory context's lock */
160 ISC_LINK(isc_mempool_t) link; /* next pool in this mem context */
161 /* optionally locked from here down */
162 element *items; /* low water item list */
163 size_t size; /* size of each item on this pool */
164 unsigned int maxalloc; /* max number of items allowed */
165 unsigned int allocated; /* # of items currently given out */
166 unsigned int freecount; /* # of items on reserved list */
167 unsigned int freemax; /* # of items allowed on free list */
168 unsigned int fillcount; /* # of items to fetch on each fill */
170 unsigned int gets; /* # of requests to this pool */
171 /* Debugging only. */
172 #if ISC_MEMPOOL_NAMES
173 char name[16]; /* printed name in stats reports */
178 * Private Inline-able.
181 #if ! ISC_MEM_TRACKLINES
182 #define ADD_TRACE(a, b, c, d, e)
183 #define DELETE_TRACE(a, b, c, d, e)
185 #define ADD_TRACE(a, b, c, d, e) \
186 do { if (b != NULL) add_trace_entry(a, b, c, d, e); } while (0)
187 #define DELETE_TRACE(a, b, c, d, e) delete_trace_entry(a, b, c, d, e)
189 #define MEM_TRACE ((isc_mem_debugging & ISC_MEM_DEBUGTRACE) != 0)
190 #define MEM_RECORD ((mctx->debugging & ISC_MEM_DEBUGRECORD) != 0)
193 print_active(isc_mem_t *ctx, FILE *out);
196 * mctx must be locked.
199 add_trace_entry(isc_mem_t *mctx, const void *ptr, unsigned int size
206 fprintf(stderr, isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
209 "file %s line %u mctx %p\n"),
210 ptr, size, file, line, mctx);
215 dl = ISC_LIST_HEAD(mctx->debuglist);
217 if (dl->count == DEBUGLIST_COUNT)
219 for (i = 0 ; i < DEBUGLIST_COUNT ; i++) {
220 if (dl->ptr[i] == NULL) {
229 dl = ISC_LIST_NEXT(dl, link);
232 dl = malloc(sizeof(debuglink_t));
235 ISC_LINK_INIT(dl, link);
236 for (i = 1 ; i < DEBUGLIST_COUNT ; i++) {
247 ISC_LIST_PREPEND(mctx->debuglist, dl, link);
251 delete_trace_entry(isc_mem_t *mctx, const void *ptr, unsigned int size,
252 const char *file, unsigned int line)
258 fprintf(stderr, isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
261 "file %s line %u mctx %p\n"),
262 ptr, size, file, line, mctx);
267 dl = ISC_LIST_HEAD(mctx->debuglist);
269 for (i = 0 ; i < DEBUGLIST_COUNT ; i++) {
270 if (dl->ptr[i] == ptr) {
275 INSIST(dl->count > 0);
277 if (dl->count == 0) {
278 ISC_LIST_UNLINK(mctx->debuglist,
285 dl = ISC_LIST_NEXT(dl, link);
289 * If we get here, we didn't find the item on the list. We're
294 #endif /* ISC_MEM_TRACKLINES */
296 #if ISC_MEM_USE_INTERNAL_MALLOC
298 rmsize(size_t size) {
300 * round down to ALIGNMENT_SIZE
302 return (size & (~(ALIGNMENT_SIZE - 1)));
306 quantize(size_t size) {
308 * Round up the result in order to get a size big
309 * enough to satisfy the request and be aligned on ALIGNMENT_SIZE
314 return (ALIGNMENT_SIZE);
315 return ((size + ALIGNMENT_SIZE - 1) & (~(ALIGNMENT_SIZE - 1)));
318 static inline isc_boolean_t
319 more_basic_blocks(isc_mem_t *ctx) {
321 unsigned char *curr, *next;
322 unsigned char *first, *last;
323 unsigned char **table;
324 unsigned int table_size;
328 /* Require: we hold the context lock. */
331 * Did we hit the quota for this context?
333 increment = NUM_BASIC_BLOCKS * ctx->mem_target;
334 if (ctx->quota != 0 && ctx->total + increment > ctx->quota)
337 INSIST(ctx->basic_table_count <= ctx->basic_table_size);
338 if (ctx->basic_table_count == ctx->basic_table_size) {
339 table_size = ctx->basic_table_size + TABLE_INCREMENT;
340 table = (ctx->memalloc)(ctx->arg,
341 table_size * sizeof (unsigned char *));
343 ctx->memalloc_failures++;
346 if (ctx->basic_table_size != 0) {
347 memcpy(table, ctx->basic_table,
348 ctx->basic_table_size *
349 sizeof (unsigned char *));
350 (ctx->memfree)(ctx->arg, ctx->basic_table);
352 ctx->basic_table = table;
353 ctx->basic_table_size = table_size;
356 new = (ctx->memalloc)(ctx->arg, NUM_BASIC_BLOCKS * ctx->mem_target);
358 ctx->memalloc_failures++;
361 ctx->total += increment;
362 ctx->basic_table[ctx->basic_table_count] = new;
363 ctx->basic_table_count++;
366 next = curr + ctx->mem_target;
367 for (i = 0; i < (NUM_BASIC_BLOCKS - 1); i++) {
368 ((element *)curr)->next = (element *)next;
370 next += ctx->mem_target;
373 * curr is now pointing at the last block in the
376 ((element *)curr)->next = NULL;
378 last = first + NUM_BASIC_BLOCKS * ctx->mem_target - 1;
379 if (first < ctx->lowest || ctx->lowest == NULL)
381 if (last > ctx->highest)
383 ctx->basic_blocks = new;
388 static inline isc_boolean_t
389 more_frags(isc_mem_t *ctx, size_t new_size) {
393 unsigned char *curr, *next;
396 * Try to get more fragments by chopping up a basic block.
399 if (ctx->basic_blocks == NULL) {
400 if (!more_basic_blocks(ctx)) {
402 * We can't get more memory from the OS, or we've
403 * hit the quota for this context.
406 * XXXRTH "At quota" notification here.
412 total_size = ctx->mem_target;
413 new = ctx->basic_blocks;
414 ctx->basic_blocks = ctx->basic_blocks->next;
415 frags = total_size / new_size;
416 ctx->stats[new_size].blocks++;
417 ctx->stats[new_size].freefrags += frags;
419 * Set up a linked-list of blocks of size
423 next = curr + new_size;
424 total_size -= new_size;
425 for (i = 0; i < (frags - 1); i++) {
426 ((element *)curr)->next = (element *)next;
429 total_size -= new_size;
432 * Add the remaining fragment of the basic block to a free list.
434 total_size = rmsize(total_size);
435 if (total_size > 0) {
436 ((element *)next)->next = ctx->freelists[total_size];
437 ctx->freelists[total_size] = (element *)next;
438 ctx->stats[total_size].freefrags++;
441 * curr is now pointing at the last block in the
444 ((element *)curr)->next = NULL;
445 ctx->freelists[new_size] = new;
451 mem_getunlocked(isc_mem_t *ctx, size_t size) {
452 size_t new_size = quantize(size);
455 if (size >= ctx->max_size || new_size >= ctx->max_size) {
457 * memget() was called on something beyond our upper limit.
459 if (ctx->quota != 0 && ctx->total + size > ctx->quota) {
463 ret = (ctx->memalloc)(ctx->arg, size);
465 ctx->memalloc_failures++;
470 ctx->stats[ctx->max_size].gets++;
471 ctx->stats[ctx->max_size].totalgets++;
473 * If we don't set new_size to size, then the
474 * ISC_MEM_FILL code might write over bytes we
482 * If there are no blocks in the free list for this size, get a chunk
483 * of memory and then break it up into "new_size"-sized blocks, adding
484 * them to the free list.
486 if (ctx->freelists[new_size] == NULL && !more_frags(ctx, new_size))
490 * The free list uses the "rounded-up" size "new_size".
492 ret = ctx->freelists[new_size];
493 ctx->freelists[new_size] = ctx->freelists[new_size]->next;
496 * The stats[] uses the _actual_ "size" requested by the
497 * caller, with the caveat (in the code above) that "size" >= the
498 * max. size (max_size) ends up getting recorded as a call to
501 ctx->stats[size].gets++;
502 ctx->stats[size].totalgets++;
503 ctx->stats[new_size].freefrags--;
504 ctx->inuse += new_size;
510 memset(ret, 0xbe, new_size); /* Mnemonic for "beef". */
516 #if ISC_MEM_FILL && ISC_MEM_CHECKOVERRUN
518 check_overrun(void *mem, size_t size, size_t new_size) {
521 cp = (unsigned char *)mem;
523 while (size < new_size) {
532 mem_putunlocked(isc_mem_t *ctx, void *mem, size_t size) {
533 size_t new_size = quantize(size);
535 if (size == ctx->max_size || new_size >= ctx->max_size) {
537 * memput() called on something beyond our upper limit.
540 memset(mem, 0xde, size); /* Mnemonic for "dead". */
542 (ctx->memfree)(ctx->arg, mem);
543 INSIST(ctx->stats[ctx->max_size].gets != 0);
544 ctx->stats[ctx->max_size].gets--;
545 INSIST(size <= ctx->total);
552 #if ISC_MEM_CHECKOVERRUN
553 check_overrun(mem, size, new_size);
555 memset(mem, 0xde, new_size); /* Mnemonic for "dead". */
559 * The free list uses the "rounded-up" size "new_size".
561 ((element *)mem)->next = ctx->freelists[new_size];
562 ctx->freelists[new_size] = (element *)mem;
565 * The stats[] uses the _actual_ "size" requested by the
566 * caller, with the caveat (in the code above) that "size" >= the
567 * max. size (max_size) ends up getting recorded as a call to
570 INSIST(ctx->stats[size].gets != 0);
571 ctx->stats[size].gets--;
572 ctx->stats[new_size].freefrags++;
573 ctx->inuse -= new_size;
576 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
579 * Perform a malloc, doing memory filling and overrun detection as necessary.
582 mem_get(isc_mem_t *ctx, size_t size) {
585 #if ISC_MEM_CHECKOVERRUN
589 ret = (ctx->memalloc)(ctx->arg, size);
591 ctx->memalloc_failures++;
595 memset(ret, 0xbe, size); /* Mnemonic for "beef". */
597 # if ISC_MEM_CHECKOVERRUN
607 * Perform a free, doing memory filling and overrun detection as necessary.
610 mem_put(isc_mem_t *ctx, void *mem, size_t size) {
611 #if ISC_MEM_CHECKOVERRUN
612 INSIST(((unsigned char *)mem)[size] == 0xbe);
615 memset(mem, 0xde, size); /* Mnemonic for "dead". */
619 (ctx->memfree)(ctx->arg, mem);
623 * Update internal counters after a memory get.
626 mem_getstats(isc_mem_t *ctx, size_t size) {
630 if (size > ctx->max_size) {
631 ctx->stats[ctx->max_size].gets++;
632 ctx->stats[ctx->max_size].totalgets++;
634 ctx->stats[size].gets++;
635 ctx->stats[size].totalgets++;
640 * Update internal counters after a memory put.
643 mem_putstats(isc_mem_t *ctx, void *ptr, size_t size) {
646 INSIST(ctx->inuse >= size);
649 if (size > ctx->max_size) {
650 INSIST(ctx->stats[ctx->max_size].gets > 0U);
651 ctx->stats[ctx->max_size].gets--;
653 INSIST(ctx->stats[size].gets > 0U);
654 ctx->stats[size].gets--;
658 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
665 default_memalloc(void *arg, size_t size) {
669 return (malloc(size));
673 default_memfree(void *arg, void *ptr) {
683 isc_mem_createx(size_t init_max_size, size_t target_size,
684 isc_memalloc_t memalloc, isc_memfree_t memfree, void *arg,
690 REQUIRE(ctxp != NULL && *ctxp == NULL);
691 REQUIRE(memalloc != NULL);
692 REQUIRE(memfree != NULL);
694 INSIST((ALIGNMENT_SIZE & (ALIGNMENT_SIZE - 1)) == 0);
696 #if !ISC_MEM_USE_INTERNAL_MALLOC
700 ctx = (memalloc)(arg, sizeof *ctx);
702 return (ISC_R_NOMEMORY);
704 if (init_max_size == 0U)
705 ctx->max_size = DEF_MAX_SIZE;
707 ctx->max_size = init_max_size;
715 ctx->hi_called = ISC_FALSE;
717 ctx->water_arg = NULL;
718 ctx->magic = MEM_MAGIC;
719 isc_ondestroy_init(&ctx->ondestroy);
720 ctx->memalloc = memalloc;
721 ctx->memfree = memfree;
724 ctx->checkfree = ISC_TRUE;
725 ISC_LIST_INIT(ctx->pools);
727 #if ISC_MEM_USE_INTERNAL_MALLOC
728 ctx->freelists = NULL;
729 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
731 ctx->stats = (memalloc)(arg,
732 (ctx->max_size+1) * sizeof (struct stats));
733 if (ctx->stats == NULL) {
734 result = ISC_R_NOMEMORY;
737 memset(ctx->stats, 0, (ctx->max_size + 1) * sizeof (struct stats));
739 #if ISC_MEM_USE_INTERNAL_MALLOC
740 if (target_size == 0)
741 ctx->mem_target = DEF_MEM_TARGET;
743 ctx->mem_target = target_size;
744 ctx->freelists = (memalloc)(arg, ctx->max_size * sizeof (element *));
745 if (ctx->freelists == NULL) {
746 result = ISC_R_NOMEMORY;
749 memset(ctx->freelists, 0,
750 ctx->max_size * sizeof (element *));
751 ctx->basic_blocks = NULL;
752 ctx->basic_table = NULL;
753 ctx->basic_table_count = 0;
754 ctx->basic_table_size = 0;
757 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
759 if (isc_mutex_init(&ctx->lock) != ISC_R_SUCCESS) {
760 UNEXPECTED_ERROR(__FILE__, __LINE__,
761 "isc_mutex_init() %s",
762 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
763 ISC_MSG_FAILED, "failed"));
764 result = ISC_R_UNEXPECTED;
768 #if ISC_MEM_TRACKLINES
769 ISC_LIST_INIT(ctx->debuglist);
770 ctx->debugging = isc_mem_debugging;
773 ctx->memalloc_failures = 0;
776 return (ISC_R_SUCCESS);
781 (memfree)(arg, ctx->stats);
782 #if ISC_MEM_USE_INTERNAL_MALLOC
784 (memfree)(arg, ctx->freelists);
785 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
793 isc_mem_create(size_t init_max_size, size_t target_size,
796 return (isc_mem_createx(init_max_size, target_size,
797 default_memalloc, default_memfree, NULL,
802 destroy(isc_mem_t *ctx) {
804 isc_ondestroy_t ondest;
808 #if ISC_MEM_USE_INTERNAL_MALLOC
809 INSIST(ISC_LIST_EMPTY(ctx->pools));
810 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
812 #if ISC_MEM_TRACKLINES
813 if (ctx->checkfree) {
814 if (!ISC_LIST_EMPTY(ctx->debuglist))
815 print_active(ctx, stderr);
816 INSIST(ISC_LIST_EMPTY(ctx->debuglist));
820 for (dl = ISC_LIST_HEAD(ctx->debuglist);
822 dl = ISC_LIST_HEAD(ctx->debuglist)) {
823 ISC_LIST_UNLINK(ctx->debuglist, dl, link);
828 INSIST(ctx->references == 0);
830 if (ctx->checkfree) {
831 for (i = 0; i <= ctx->max_size; i++) {
832 #if ISC_MEM_TRACKLINES
833 if (ctx->stats[i].gets != 0)
834 print_active(ctx, stderr);
836 INSIST(ctx->stats[i].gets == 0U);
840 (ctx->memfree)(ctx->arg, ctx->stats);
842 #if ISC_MEM_USE_INTERNAL_MALLOC
843 for (i = 0; i < ctx->basic_table_count; i++)
844 (ctx->memfree)(ctx->arg, ctx->basic_table[i]);
845 (ctx->memfree)(ctx->arg, ctx->freelists);
846 (ctx->memfree)(ctx->arg, ctx->basic_table);
847 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
849 ondest = ctx->ondestroy;
851 DESTROYLOCK(&ctx->lock);
852 (ctx->memfree)(ctx->arg, ctx);
854 isc_ondestroy_notify(&ondest, ctx);
858 isc_mem_attach(isc_mem_t *source, isc_mem_t **targetp) {
859 REQUIRE(VALID_CONTEXT(source));
860 REQUIRE(targetp != NULL && *targetp == NULL);
863 source->references++;
864 UNLOCK(&source->lock);
870 isc_mem_detach(isc_mem_t **ctxp) {
872 isc_boolean_t want_destroy = ISC_FALSE;
874 REQUIRE(ctxp != NULL);
876 REQUIRE(VALID_CONTEXT(ctx));
879 INSIST(ctx->references > 0);
881 if (ctx->references == 0)
882 want_destroy = ISC_TRUE;
892 * isc_mem_putanddetach() is the equivalent of:
895 * isc_mem_attach(ptr->mctx, &mctx);
896 * isc_mem_detach(&ptr->mctx);
897 * isc_mem_put(mctx, ptr, sizeof(*ptr);
898 * isc_mem_detach(&mctx);
902 isc__mem_putanddetach(isc_mem_t **ctxp, void *ptr, size_t size FLARG) {
904 isc_boolean_t want_destroy = ISC_FALSE;
906 REQUIRE(ctxp != NULL);
908 REQUIRE(VALID_CONTEXT(ctx));
909 REQUIRE(ptr != NULL);
912 * Must be before mem_putunlocked() as ctxp is usually within
917 #if ISC_MEM_USE_INTERNAL_MALLOC
919 mem_putunlocked(ctx, ptr, size);
920 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
921 mem_put(ctx, ptr, size);
923 mem_putstats(ctx, ptr, size);
924 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
926 DELETE_TRACE(ctx, ptr, size, file, line);
927 INSIST(ctx->references > 0);
929 if (ctx->references == 0)
930 want_destroy = ISC_TRUE;
939 isc_mem_destroy(isc_mem_t **ctxp) {
943 * This routine provides legacy support for callers who use mctxs
944 * without attaching/detaching.
947 REQUIRE(ctxp != NULL);
949 REQUIRE(VALID_CONTEXT(ctx));
952 #if ISC_MEM_TRACKLINES
953 if (ctx->references != 1)
954 print_active(ctx, stderr);
956 REQUIRE(ctx->references == 1);
966 isc_mem_ondestroy(isc_mem_t *ctx, isc_task_t *task, isc_event_t **event) {
970 res = isc_ondestroy_register(&ctx->ondestroy, task, event);
978 isc__mem_get(isc_mem_t *ctx, size_t size FLARG) {
980 isc_boolean_t call_water = ISC_FALSE;
982 REQUIRE(VALID_CONTEXT(ctx));
984 #if ISC_MEM_USE_INTERNAL_MALLOC
986 ptr = mem_getunlocked(ctx, size);
987 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
988 ptr = mem_get(ctx, size);
991 mem_getstats(ctx, size);
992 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
994 ADD_TRACE(ctx, ptr, size, file, line);
995 if (ctx->hi_water != 0U && !ctx->hi_called &&
996 ctx->inuse > ctx->hi_water) {
997 ctx->hi_called = ISC_TRUE;
998 call_water = ISC_TRUE;
1000 if (ctx->inuse > ctx->maxinuse) {
1001 ctx->maxinuse = ctx->inuse;
1002 if (ctx->hi_water != 0U && ctx->inuse > ctx->hi_water &&
1003 (isc_mem_debugging & ISC_MEM_DEBUGUSAGE) != 0)
1004 fprintf(stderr, "maxinuse = %lu\n",
1005 (unsigned long)ctx->inuse);
1010 (ctx->water)(ctx->water_arg, ISC_MEM_HIWATER);
1017 isc__mem_put(isc_mem_t *ctx, void *ptr, size_t size FLARG)
1019 isc_boolean_t call_water = ISC_FALSE;
1021 REQUIRE(VALID_CONTEXT(ctx));
1022 REQUIRE(ptr != NULL);
1024 #if ISC_MEM_USE_INTERNAL_MALLOC
1026 mem_putunlocked(ctx, ptr, size);
1027 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1028 mem_put(ctx, ptr, size);
1030 mem_putstats(ctx, ptr, size);
1031 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1033 DELETE_TRACE(ctx, ptr, size, file, line);
1036 * The check against ctx->lo_water == 0 is for the condition
1037 * when the context was pushed over hi_water but then had
1038 * isc_mem_setwater() called with 0 for hi_water and lo_water.
1040 if (ctx->hi_called &&
1041 (ctx->inuse < ctx->lo_water || ctx->lo_water == 0U)) {
1042 ctx->hi_called = ISC_FALSE;
1044 if (ctx->water != NULL)
1045 call_water = ISC_TRUE;
1050 (ctx->water)(ctx->water_arg, ISC_MEM_LOWATER);
1054 #if ISC_MEM_TRACKLINES
1056 print_active(isc_mem_t *mctx, FILE *out) {
1061 fprintf(out, isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1063 "Dump of all outstanding "
1064 "memory allocations:\n"));
1065 dl = ISC_LIST_HEAD(mctx->debuglist);
1067 fprintf(out, isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1070 while (dl != NULL) {
1071 for (i = 0 ; i < DEBUGLIST_COUNT ; i++)
1072 if (dl->ptr[i] != NULL)
1074 isc_msgcat_get(isc_msgcat,
1076 ISC_MSG_PTRFILELINE,
1080 dl->ptr[i], dl->file[i],
1082 dl = ISC_LIST_NEXT(dl, link);
1089 * Print the stats[] on the stream "out" with suitable formatting.
1092 isc_mem_stats(isc_mem_t *ctx, FILE *out) {
1094 const struct stats *s;
1095 const isc_mempool_t *pool;
1097 REQUIRE(VALID_CONTEXT(ctx));
1100 for (i = 0; i <= ctx->max_size; i++) {
1103 if (s->totalgets == 0U && s->gets == 0U)
1105 fprintf(out, "%s%5lu: %11lu gets, %11lu rem",
1106 (i == ctx->max_size) ? ">=" : " ",
1107 (unsigned long) i, s->totalgets, s->gets);
1108 #if ISC_MEM_USE_INTERNAL_MALLOC
1109 if (s->blocks != 0 || s->freefrags != 0)
1110 fprintf(out, " (%lu bl, %lu ff)",
1111 s->blocks, s->freefrags);
1112 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1117 * Note that since a pool can be locked now, these stats might be
1118 * somewhat off if the pool is in active use at the time the stats
1119 * are dumped. The link fields are protected by the isc_mem_t's
1120 * lock, however, so walking this list and extracting integers from
1121 * stats fields is always safe.
1123 pool = ISC_LIST_HEAD(ctx->pools);
1125 fprintf(out, isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1127 "[Pool statistics]\n"));
1128 fprintf(out, "%15s %10s %10s %10s %10s %10s %10s %10s %1s\n",
1129 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1130 ISC_MSG_POOLNAME, "name"),
1131 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1132 ISC_MSG_POOLSIZE, "size"),
1133 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1134 ISC_MSG_POOLMAXALLOC, "maxalloc"),
1135 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1136 ISC_MSG_POOLALLOCATED, "allocated"),
1137 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1138 ISC_MSG_POOLFREECOUNT, "freecount"),
1139 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1140 ISC_MSG_POOLFREEMAX, "freemax"),
1141 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1142 ISC_MSG_POOLFILLCOUNT, "fillcount"),
1143 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1144 ISC_MSG_POOLGETS, "gets"),
1147 while (pool != NULL) {
1148 fprintf(out, "%15s %10lu %10u %10u %10u %10u %10u %10u %s\n",
1149 pool->name, (unsigned long) pool->size, pool->maxalloc,
1150 pool->allocated, pool->freecount, pool->freemax,
1151 pool->fillcount, pool->gets,
1152 (pool->lock == NULL ? "N" : "Y"));
1153 pool = ISC_LIST_NEXT(pool, link);
1156 #if ISC_MEM_TRACKLINES
1157 print_active(ctx, out);
1164 * Replacements for malloc() and free() -- they implicitly remember the
1165 * size of the object allocated (with some additional overhead).
1169 isc__mem_allocateunlocked(isc_mem_t *ctx, size_t size) {
1172 size += ALIGNMENT_SIZE;
1173 #if ISC_MEM_USE_INTERNAL_MALLOC
1174 si = mem_getunlocked(ctx, size);
1175 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1176 si = mem_get(ctx, size);
1177 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1185 isc__mem_allocate(isc_mem_t *ctx, size_t size FLARG) {
1188 REQUIRE(VALID_CONTEXT(ctx));
1190 #if ISC_MEM_USE_INTERNAL_MALLOC
1192 si = isc__mem_allocateunlocked(ctx, size);
1193 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1194 si = isc__mem_allocateunlocked(ctx, size);
1197 mem_getstats(ctx, si[-1].u.size);
1198 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1200 #if ISC_MEM_TRACKLINES
1202 ADD_TRACE(ctx, si, si[-1].u.size, file, line);
1211 isc__mem_free(isc_mem_t *ctx, void *ptr FLARG) {
1215 REQUIRE(VALID_CONTEXT(ctx));
1216 REQUIRE(ptr != NULL);
1218 si = &(((size_info *)ptr)[-1]);
1221 #if ISC_MEM_USE_INTERNAL_MALLOC
1223 mem_putunlocked(ctx, si, size);
1224 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1225 mem_put(ctx, si, size);
1227 mem_putstats(ctx, si, size);
1228 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1230 DELETE_TRACE(ctx, ptr, size, file, line);
1237 * Other useful things.
1241 isc__mem_strdup(isc_mem_t *mctx, const char *s FLARG) {
1245 REQUIRE(VALID_CONTEXT(mctx));
1250 ns = isc__mem_allocate(mctx, len + 1 FLARG_PASS);
1253 strncpy(ns, s, len + 1);
1259 isc_mem_setdestroycheck(isc_mem_t *ctx, isc_boolean_t flag) {
1260 REQUIRE(VALID_CONTEXT(ctx));
1263 ctx->checkfree = flag;
1273 isc_mem_setquota(isc_mem_t *ctx, size_t quota) {
1274 REQUIRE(VALID_CONTEXT(ctx));
1283 isc_mem_getquota(isc_mem_t *ctx) {
1286 REQUIRE(VALID_CONTEXT(ctx));
1297 isc_mem_inuse(isc_mem_t *ctx) {
1300 REQUIRE(VALID_CONTEXT(ctx));
1311 isc_mem_setwater(isc_mem_t *ctx, isc_mem_water_t water, void *water_arg,
1312 size_t hiwater, size_t lowater)
1314 REQUIRE(VALID_CONTEXT(ctx));
1315 REQUIRE(hiwater >= lowater);
1318 if (water == NULL) {
1320 ctx->water_arg = NULL;
1323 ctx->hi_called = ISC_FALSE;
1326 ctx->water_arg = water_arg;
1327 ctx->hi_water = hiwater;
1328 ctx->lo_water = lowater;
1329 ctx->hi_called = ISC_FALSE;
1339 isc_mempool_create(isc_mem_t *mctx, size_t size, isc_mempool_t **mpctxp) {
1340 isc_mempool_t *mpctx;
1342 REQUIRE(VALID_CONTEXT(mctx));
1344 REQUIRE(mpctxp != NULL && *mpctxp == NULL);
1347 * Allocate space for this pool, initialize values, and if all works
1348 * well, attach to the memory context.
1350 mpctx = isc_mem_get(mctx, sizeof(isc_mempool_t));
1352 return (ISC_R_NOMEMORY);
1354 mpctx->magic = MEMPOOL_MAGIC;
1358 mpctx->maxalloc = UINT_MAX;
1359 mpctx->allocated = 0;
1360 mpctx->freecount = 0;
1362 mpctx->fillcount = 1;
1364 #if ISC_MEMPOOL_NAMES
1367 mpctx->items = NULL;
1372 ISC_LIST_INITANDAPPEND(mctx->pools, mpctx, link);
1373 UNLOCK(&mctx->lock);
1375 return (ISC_R_SUCCESS);
1379 isc_mempool_setname(isc_mempool_t *mpctx, const char *name) {
1380 REQUIRE(name != NULL);
1382 #if ISC_MEMPOOL_NAMES
1383 if (mpctx->lock != NULL)
1386 strncpy(mpctx->name, name, sizeof(mpctx->name) - 1);
1387 mpctx->name[sizeof(mpctx->name) - 1] = '\0';
1389 if (mpctx->lock != NULL)
1390 UNLOCK(mpctx->lock);
1398 isc_mempool_destroy(isc_mempool_t **mpctxp) {
1399 isc_mempool_t *mpctx;
1404 REQUIRE(mpctxp != NULL);
1406 REQUIRE(VALID_MEMPOOL(mpctx));
1407 #if ISC_MEMPOOL_NAMES
1408 if (mpctx->allocated > 0)
1409 UNEXPECTED_ERROR(__FILE__, __LINE__,
1410 "isc_mempool_destroy(): mempool %s "
1414 REQUIRE(mpctx->allocated == 0);
1424 * Return any items on the free list
1426 while (mpctx->items != NULL) {
1427 INSIST(mpctx->freecount > 0);
1429 item = mpctx->items;
1430 mpctx->items = item->next;
1432 #if ISC_MEM_USE_INTERNAL_MALLOC
1434 mem_putunlocked(mctx, item, mpctx->size);
1435 UNLOCK(&mctx->lock);
1436 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1437 mem_put(mctx, item, mpctx->size);
1438 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1442 * Remove our linked list entry from the memory context.
1445 ISC_LIST_UNLINK(mctx->pools, mpctx, link);
1446 UNLOCK(&mctx->lock);
1450 isc_mem_put(mpctx->mctx, mpctx, sizeof(isc_mempool_t));
1459 isc_mempool_associatelock(isc_mempool_t *mpctx, isc_mutex_t *lock) {
1460 REQUIRE(VALID_MEMPOOL(mpctx));
1461 REQUIRE(mpctx->lock == NULL);
1462 REQUIRE(lock != NULL);
1468 isc__mempool_get(isc_mempool_t *mpctx FLARG) {
1473 REQUIRE(VALID_MEMPOOL(mpctx));
1477 if (mpctx->lock != NULL)
1481 * Don't let the caller go over quota
1483 if (mpctx->allocated >= mpctx->maxalloc) {
1489 * if we have a free list item, return the first here
1491 item = mpctx->items;
1493 mpctx->items = item->next;
1494 INSIST(mpctx->freecount > 0);
1502 * We need to dip into the well. Lock the memory context here and
1503 * fill up our free list.
1505 for (i = 0 ; i < mpctx->fillcount ; i++) {
1506 #if ISC_MEM_USE_INTERNAL_MALLOC
1508 item = mem_getunlocked(mctx, mpctx->size);
1509 UNLOCK(&mctx->lock);
1510 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1511 item = mem_get(mctx, mpctx->size);
1512 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1515 item->next = mpctx->items;
1516 mpctx->items = item;
1521 * If we didn't get any items, return NULL.
1523 item = mpctx->items;
1527 mpctx->items = item->next;
1533 if (mpctx->lock != NULL)
1534 UNLOCK(mpctx->lock);
1536 #if ISC_MEM_TRACKLINES
1539 ADD_TRACE(mctx, item, mpctx->size, file, line);
1540 UNLOCK(&mctx->lock);
1542 #endif /* ISC_MEM_TRACKLINES */
1548 isc__mempool_put(isc_mempool_t *mpctx, void *mem FLARG) {
1552 REQUIRE(VALID_MEMPOOL(mpctx));
1553 REQUIRE(mem != NULL);
1557 if (mpctx->lock != NULL)
1560 INSIST(mpctx->allocated > 0);
1563 #if ISC_MEM_TRACKLINES
1565 DELETE_TRACE(mctx, mem, mpctx->size, file, line);
1566 UNLOCK(&mctx->lock);
1567 #endif /* ISC_MEM_TRACKLINES */
1570 * If our free list is full, return this to the mctx directly.
1572 if (mpctx->freecount >= mpctx->freemax) {
1573 #if ISC_MEM_USE_INTERNAL_MALLOC
1575 mem_putunlocked(mctx, mem, mpctx->size);
1576 UNLOCK(&mctx->lock);
1577 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1578 mem_put(mctx, mem, mpctx->size);
1579 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1580 if (mpctx->lock != NULL)
1581 UNLOCK(mpctx->lock);
1586 * Otherwise, attach it to our free list and bump the counter.
1589 item = (element *)mem;
1590 item->next = mpctx->items;
1591 mpctx->items = item;
1593 if (mpctx->lock != NULL)
1594 UNLOCK(mpctx->lock);
1602 isc_mempool_setfreemax(isc_mempool_t *mpctx, unsigned int limit) {
1603 REQUIRE(VALID_MEMPOOL(mpctx));
1605 if (mpctx->lock != NULL)
1608 mpctx->freemax = limit;
1610 if (mpctx->lock != NULL)
1611 UNLOCK(mpctx->lock);
1615 isc_mempool_getfreemax(isc_mempool_t *mpctx) {
1616 unsigned int freemax;
1618 REQUIRE(VALID_MEMPOOL(mpctx));
1620 if (mpctx->lock != NULL)
1623 freemax = mpctx->freemax;
1625 if (mpctx->lock != NULL)
1626 UNLOCK(mpctx->lock);
1632 isc_mempool_getfreecount(isc_mempool_t *mpctx) {
1633 unsigned int freecount;
1635 REQUIRE(VALID_MEMPOOL(mpctx));
1637 if (mpctx->lock != NULL)
1640 freecount = mpctx->freecount;
1642 if (mpctx->lock != NULL)
1643 UNLOCK(mpctx->lock);
1649 isc_mempool_setmaxalloc(isc_mempool_t *mpctx, unsigned int limit) {
1652 REQUIRE(VALID_MEMPOOL(mpctx));
1654 if (mpctx->lock != NULL)
1657 mpctx->maxalloc = limit;
1659 if (mpctx->lock != NULL)
1660 UNLOCK(mpctx->lock);
1664 isc_mempool_getmaxalloc(isc_mempool_t *mpctx) {
1665 unsigned int maxalloc;
1667 REQUIRE(VALID_MEMPOOL(mpctx));
1669 if (mpctx->lock != NULL)
1672 maxalloc = mpctx->maxalloc;
1674 if (mpctx->lock != NULL)
1675 UNLOCK(mpctx->lock);
1681 isc_mempool_getallocated(isc_mempool_t *mpctx) {
1682 unsigned int allocated;
1684 REQUIRE(VALID_MEMPOOL(mpctx));
1686 if (mpctx->lock != NULL)
1689 allocated = mpctx->allocated;
1691 if (mpctx->lock != NULL)
1692 UNLOCK(mpctx->lock);
1698 isc_mempool_setfillcount(isc_mempool_t *mpctx, unsigned int limit) {
1700 REQUIRE(VALID_MEMPOOL(mpctx));
1702 if (mpctx->lock != NULL)
1705 mpctx->fillcount = limit;
1707 if (mpctx->lock != NULL)
1708 UNLOCK(mpctx->lock);
1712 isc_mempool_getfillcount(isc_mempool_t *mpctx) {
1713 unsigned int fillcount;
1715 REQUIRE(VALID_MEMPOOL(mpctx));
1717 if (mpctx->lock != NULL)
1720 fillcount = mpctx->fillcount;
1722 if (mpctx->lock != NULL)
1723 UNLOCK(mpctx->lock);