2 * Copyright (C) 2004, 2005 Internet Systems Consortium, Inc. ("ISC")
3 * Copyright (C) 1997-2003 Internet Software Consortium.
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
10 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
11 * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
12 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
13 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
14 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
15 * PERFORMANCE OF THIS SOFTWARE.
18 /* $Id: mem.c,v 1.98.2.7.2.7 2005/03/17 03:58:32 marka Exp $ */
28 #include <isc/magic.h>
31 #include <isc/ondestroy.h>
32 #include <isc/string.h>
34 #include <isc/mutex.h>
37 #ifndef ISC_MEM_DEBUGGING
38 #define ISC_MEM_DEBUGGING 0
40 LIBISC_EXTERNAL_DATA unsigned int isc_mem_debugging = ISC_MEM_DEBUGGING;
43 * Define ISC_MEM_USE_INTERNAL_MALLOC=1 to use the internal malloc()
44 * implementation in preference to the system one. The internal malloc()
45 * is very space-efficient, and quite fast on uniprocessor systems. It
46 * performs poorly on multiprocessor machines.
48 #ifndef ISC_MEM_USE_INTERNAL_MALLOC
49 #define ISC_MEM_USE_INTERNAL_MALLOC 0
56 #define DEF_MAX_SIZE 1100
57 #define DEF_MEM_TARGET 4096
58 #define ALIGNMENT_SIZE 8 /* must be a power of 2 */
59 #define NUM_BASIC_BLOCKS 64 /* must be > 1 */
60 #define TABLE_INCREMENT 1024
61 #define DEBUGLIST_COUNT 1024
66 #if ISC_MEM_TRACKLINES
67 typedef struct debuglink debuglink_t;
69 ISC_LINK(debuglink_t) link;
70 const void *ptr[DEBUGLIST_COUNT];
71 unsigned int size[DEBUGLIST_COUNT];
72 const char *file[DEBUGLIST_COUNT];
73 unsigned int line[DEBUGLIST_COUNT];
77 #define FLARG_PASS , file, line
78 #define FLARG , const char *file, int line
84 typedef struct element element;
91 * This structure must be ALIGNMENT_SIZE bytes.
95 char bytes[ALIGNMENT_SIZE];
101 unsigned long totalgets;
102 #if ISC_MEM_USE_INTERNAL_MALLOC
103 unsigned long blocks;
104 unsigned long freefrags;
105 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
108 #define MEM_MAGIC ISC_MAGIC('M', 'e', 'm', 'C')
109 #define VALID_CONTEXT(c) ISC_MAGIC_VALID(c, MEM_MAGIC)
111 #if ISC_MEM_TRACKLINES
112 typedef ISC_LIST(debuglink_t) debuglist_t;
117 isc_ondestroy_t ondestroy;
119 isc_memalloc_t memalloc;
120 isc_memfree_t memfree;
123 isc_boolean_t checkfree;
124 struct stats * stats;
125 unsigned int references;
132 isc_boolean_t hi_called;
133 isc_mem_water_t water;
135 ISC_LIST(isc_mempool_t) pools;
137 #if ISC_MEM_USE_INTERNAL_MALLOC
139 element ** freelists;
140 element * basic_blocks;
141 unsigned char ** basic_table;
142 unsigned int basic_table_count;
143 unsigned int basic_table_size;
144 unsigned char * lowest;
145 unsigned char * highest;
146 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
148 #if ISC_MEM_TRACKLINES
149 debuglist_t * debuglist;
152 unsigned int memalloc_failures;
155 #define MEMPOOL_MAGIC ISC_MAGIC('M', 'E', 'M', 'p')
156 #define VALID_MEMPOOL(c) ISC_MAGIC_VALID(c, MEMPOOL_MAGIC)
159 /* always unlocked */
160 unsigned int magic; /* magic number */
161 isc_mutex_t *lock; /* optional lock */
162 isc_mem_t *mctx; /* our memory context */
163 /* locked via the memory context's lock */
164 ISC_LINK(isc_mempool_t) link; /* next pool in this mem context */
165 /* optionally locked from here down */
166 element *items; /* low water item list */
167 size_t size; /* size of each item on this pool */
168 unsigned int maxalloc; /* max number of items allowed */
169 unsigned int allocated; /* # of items currently given out */
170 unsigned int freecount; /* # of items on reserved list */
171 unsigned int freemax; /* # of items allowed on free list */
172 unsigned int fillcount; /* # of items to fetch on each fill */
174 unsigned int gets; /* # of requests to this pool */
175 /* Debugging only. */
176 #if ISC_MEMPOOL_NAMES
177 char name[16]; /* printed name in stats reports */
182 * Private Inline-able.
185 #if ! ISC_MEM_TRACKLINES
186 #define ADD_TRACE(a, b, c, d, e)
187 #define DELETE_TRACE(a, b, c, d, e)
189 #define ADD_TRACE(a, b, c, d, e) \
191 if ((isc_mem_debugging & (ISC_MEM_DEBUGTRACE | \
192 ISC_MEM_DEBUGRECORD)) != 0 && \
194 add_trace_entry(a, b, c, d, e); \
196 #define DELETE_TRACE(a, b, c, d, e) delete_trace_entry(a, b, c, d, e)
199 print_active(isc_mem_t *ctx, FILE *out);
202 * mctx must be locked.
205 add_trace_entry(isc_mem_t *mctx, const void *ptr, unsigned int size
211 if ((isc_mem_debugging & ISC_MEM_DEBUGTRACE) != 0)
212 fprintf(stderr, isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
215 "file %s line %u mctx %p\n"),
216 ptr, size, file, line, mctx);
218 if (mctx->debuglist == NULL)
221 if (size > mctx->max_size)
222 size = mctx->max_size;
224 dl = ISC_LIST_HEAD(mctx->debuglist[size]);
226 if (dl->count == DEBUGLIST_COUNT)
228 for (i = 0; i < DEBUGLIST_COUNT; i++) {
229 if (dl->ptr[i] == NULL) {
239 dl = ISC_LIST_NEXT(dl, link);
242 dl = malloc(sizeof(debuglink_t));
245 ISC_LINK_INIT(dl, link);
246 for (i = 1; i < DEBUGLIST_COUNT; i++) {
259 ISC_LIST_PREPEND(mctx->debuglist[size], dl, link);
263 delete_trace_entry(isc_mem_t *mctx, const void *ptr, unsigned int size,
264 const char *file, unsigned int line)
269 if ((isc_mem_debugging & ISC_MEM_DEBUGTRACE) != 0)
270 fprintf(stderr, isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
273 "file %s line %u mctx %p\n"),
274 ptr, size, file, line, mctx);
276 if (mctx->debuglist == NULL)
279 if (size > mctx->max_size)
280 size = mctx->max_size;
282 dl = ISC_LIST_HEAD(mctx->debuglist[size]);
284 for (i = 0; i < DEBUGLIST_COUNT; i++) {
285 if (dl->ptr[i] == ptr) {
291 INSIST(dl->count > 0);
293 if (dl->count == 0) {
294 ISC_LIST_UNLINK(mctx->debuglist[size],
301 dl = ISC_LIST_NEXT(dl, link);
305 * If we get here, we didn't find the item on the list. We're
310 #endif /* ISC_MEM_TRACKLINES */
312 #if ISC_MEM_USE_INTERNAL_MALLOC
314 rmsize(size_t size) {
316 * round down to ALIGNMENT_SIZE
318 return (size & (~(ALIGNMENT_SIZE - 1)));
322 quantize(size_t size) {
324 * Round up the result in order to get a size big
325 * enough to satisfy the request and be aligned on ALIGNMENT_SIZE
330 return (ALIGNMENT_SIZE);
331 return ((size + ALIGNMENT_SIZE - 1) & (~(ALIGNMENT_SIZE - 1)));
334 static inline isc_boolean_t
335 more_basic_blocks(isc_mem_t *ctx) {
337 unsigned char *curr, *next;
338 unsigned char *first, *last;
339 unsigned char **table;
340 unsigned int table_size;
344 /* Require: we hold the context lock. */
347 * Did we hit the quota for this context?
349 increment = NUM_BASIC_BLOCKS * ctx->mem_target;
350 if (ctx->quota != 0 && ctx->total + increment > ctx->quota)
353 INSIST(ctx->basic_table_count <= ctx->basic_table_size);
354 if (ctx->basic_table_count == ctx->basic_table_size) {
355 table_size = ctx->basic_table_size + TABLE_INCREMENT;
356 table = (ctx->memalloc)(ctx->arg,
357 table_size * sizeof(unsigned char *));
359 ctx->memalloc_failures++;
362 if (ctx->basic_table_size != 0) {
363 memcpy(table, ctx->basic_table,
364 ctx->basic_table_size *
365 sizeof(unsigned char *));
366 (ctx->memfree)(ctx->arg, ctx->basic_table);
368 ctx->basic_table = table;
369 ctx->basic_table_size = table_size;
372 new = (ctx->memalloc)(ctx->arg, NUM_BASIC_BLOCKS * ctx->mem_target);
374 ctx->memalloc_failures++;
377 ctx->total += increment;
378 ctx->basic_table[ctx->basic_table_count] = new;
379 ctx->basic_table_count++;
382 next = curr + ctx->mem_target;
383 for (i = 0; i < (NUM_BASIC_BLOCKS - 1); i++) {
384 ((element *)curr)->next = (element *)next;
386 next += ctx->mem_target;
389 * curr is now pointing at the last block in the
392 ((element *)curr)->next = NULL;
394 last = first + NUM_BASIC_BLOCKS * ctx->mem_target - 1;
395 if (first < ctx->lowest || ctx->lowest == NULL)
397 if (last > ctx->highest)
399 ctx->basic_blocks = new;
404 static inline isc_boolean_t
405 more_frags(isc_mem_t *ctx, size_t new_size) {
409 unsigned char *curr, *next;
412 * Try to get more fragments by chopping up a basic block.
415 if (ctx->basic_blocks == NULL) {
416 if (!more_basic_blocks(ctx)) {
418 * We can't get more memory from the OS, or we've
419 * hit the quota for this context.
422 * XXXRTH "At quota" notification here.
428 total_size = ctx->mem_target;
429 new = ctx->basic_blocks;
430 ctx->basic_blocks = ctx->basic_blocks->next;
431 frags = total_size / new_size;
432 ctx->stats[new_size].blocks++;
433 ctx->stats[new_size].freefrags += frags;
435 * Set up a linked-list of blocks of size
439 next = curr + new_size;
440 total_size -= new_size;
441 for (i = 0; i < (frags - 1); i++) {
442 ((element *)curr)->next = (element *)next;
445 total_size -= new_size;
448 * Add the remaining fragment of the basic block to a free list.
450 total_size = rmsize(total_size);
451 if (total_size > 0) {
452 ((element *)next)->next = ctx->freelists[total_size];
453 ctx->freelists[total_size] = (element *)next;
454 ctx->stats[total_size].freefrags++;
457 * curr is now pointing at the last block in the
460 ((element *)curr)->next = NULL;
461 ctx->freelists[new_size] = new;
467 mem_getunlocked(isc_mem_t *ctx, size_t size) {
468 size_t new_size = quantize(size);
471 if (size >= ctx->max_size || new_size >= ctx->max_size) {
473 * memget() was called on something beyond our upper limit.
475 if (ctx->quota != 0 && ctx->total + size > ctx->quota) {
479 ret = (ctx->memalloc)(ctx->arg, size);
481 ctx->memalloc_failures++;
486 ctx->stats[ctx->max_size].gets++;
487 ctx->stats[ctx->max_size].totalgets++;
489 * If we don't set new_size to size, then the
490 * ISC_MEM_FILL code might write over bytes we
498 * If there are no blocks in the free list for this size, get a chunk
499 * of memory and then break it up into "new_size"-sized blocks, adding
500 * them to the free list.
502 if (ctx->freelists[new_size] == NULL && !more_frags(ctx, new_size))
506 * The free list uses the "rounded-up" size "new_size".
508 ret = ctx->freelists[new_size];
509 ctx->freelists[new_size] = ctx->freelists[new_size]->next;
512 * The stats[] uses the _actual_ "size" requested by the
513 * caller, with the caveat (in the code above) that "size" >= the
514 * max. size (max_size) ends up getting recorded as a call to
517 ctx->stats[size].gets++;
518 ctx->stats[size].totalgets++;
519 ctx->stats[new_size].freefrags--;
520 ctx->inuse += new_size;
526 memset(ret, 0xbe, new_size); /* Mnemonic for "beef". */
532 #if ISC_MEM_FILL && ISC_MEM_CHECKOVERRUN
534 check_overrun(void *mem, size_t size, size_t new_size) {
537 cp = (unsigned char *)mem;
539 while (size < new_size) {
548 mem_putunlocked(isc_mem_t *ctx, void *mem, size_t size) {
549 size_t new_size = quantize(size);
551 if (size == ctx->max_size || new_size >= ctx->max_size) {
553 * memput() called on something beyond our upper limit.
556 memset(mem, 0xde, size); /* Mnemonic for "dead". */
558 (ctx->memfree)(ctx->arg, mem);
559 INSIST(ctx->stats[ctx->max_size].gets != 0);
560 ctx->stats[ctx->max_size].gets--;
561 INSIST(size <= ctx->total);
568 #if ISC_MEM_CHECKOVERRUN
569 check_overrun(mem, size, new_size);
571 memset(mem, 0xde, new_size); /* Mnemonic for "dead". */
575 * The free list uses the "rounded-up" size "new_size".
577 ((element *)mem)->next = ctx->freelists[new_size];
578 ctx->freelists[new_size] = (element *)mem;
581 * The stats[] uses the _actual_ "size" requested by the
582 * caller, with the caveat (in the code above) that "size" >= the
583 * max. size (max_size) ends up getting recorded as a call to
586 INSIST(ctx->stats[size].gets != 0);
587 ctx->stats[size].gets--;
588 ctx->stats[new_size].freefrags++;
589 ctx->inuse -= new_size;
592 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
595 * Perform a malloc, doing memory filling and overrun detection as necessary.
598 mem_get(isc_mem_t *ctx, size_t size) {
601 #if ISC_MEM_CHECKOVERRUN
605 ret = (ctx->memalloc)(ctx->arg, size);
607 ctx->memalloc_failures++;
611 memset(ret, 0xbe, size); /* Mnemonic for "beef". */
613 # if ISC_MEM_CHECKOVERRUN
623 * Perform a free, doing memory filling and overrun detection as necessary.
626 mem_put(isc_mem_t *ctx, void *mem, size_t size) {
627 #if ISC_MEM_CHECKOVERRUN
628 INSIST(((unsigned char *)mem)[size] == 0xbe);
631 memset(mem, 0xde, size); /* Mnemonic for "dead". */
635 (ctx->memfree)(ctx->arg, mem);
639 * Update internal counters after a memory get.
642 mem_getstats(isc_mem_t *ctx, size_t size) {
646 if (size > ctx->max_size) {
647 ctx->stats[ctx->max_size].gets++;
648 ctx->stats[ctx->max_size].totalgets++;
650 ctx->stats[size].gets++;
651 ctx->stats[size].totalgets++;
656 * Update internal counters after a memory put.
659 mem_putstats(isc_mem_t *ctx, void *ptr, size_t size) {
662 INSIST(ctx->inuse >= size);
665 if (size > ctx->max_size) {
666 INSIST(ctx->stats[ctx->max_size].gets > 0U);
667 ctx->stats[ctx->max_size].gets--;
669 INSIST(ctx->stats[size].gets > 0U);
670 ctx->stats[size].gets--;
674 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
681 default_memalloc(void *arg, size_t size) {
685 return (malloc(size));
689 default_memfree(void *arg, void *ptr) {
699 isc_mem_createx(size_t init_max_size, size_t target_size,
700 isc_memalloc_t memalloc, isc_memfree_t memfree, void *arg,
706 REQUIRE(ctxp != NULL && *ctxp == NULL);
707 REQUIRE(memalloc != NULL);
708 REQUIRE(memfree != NULL);
710 INSIST((ALIGNMENT_SIZE & (ALIGNMENT_SIZE - 1)) == 0);
712 #if !ISC_MEM_USE_INTERNAL_MALLOC
716 ctx = (memalloc)(arg, sizeof(*ctx));
718 return (ISC_R_NOMEMORY);
720 if (isc_mutex_init(&ctx->lock) != ISC_R_SUCCESS) {
721 UNEXPECTED_ERROR(__FILE__, __LINE__,
722 "isc_mutex_init() %s",
723 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
724 ISC_MSG_FAILED, "failed"));
726 return (ISC_R_UNEXPECTED);
729 if (init_max_size == 0U)
730 ctx->max_size = DEF_MAX_SIZE;
732 ctx->max_size = init_max_size;
740 ctx->hi_called = ISC_FALSE;
742 ctx->water_arg = NULL;
743 ctx->magic = MEM_MAGIC;
744 isc_ondestroy_init(&ctx->ondestroy);
745 ctx->memalloc = memalloc;
746 ctx->memfree = memfree;
749 ctx->checkfree = ISC_TRUE;
750 #if ISC_MEM_TRACKLINES
751 ctx->debuglist = NULL;
753 ISC_LIST_INIT(ctx->pools);
755 #if ISC_MEM_USE_INTERNAL_MALLOC
756 ctx->freelists = NULL;
757 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
759 ctx->stats = (memalloc)(arg,
760 (ctx->max_size+1) * sizeof(struct stats));
761 if (ctx->stats == NULL) {
762 result = ISC_R_NOMEMORY;
765 memset(ctx->stats, 0, (ctx->max_size + 1) * sizeof(struct stats));
767 #if ISC_MEM_USE_INTERNAL_MALLOC
768 if (target_size == 0)
769 ctx->mem_target = DEF_MEM_TARGET;
771 ctx->mem_target = target_size;
772 ctx->freelists = (memalloc)(arg, ctx->max_size * sizeof(element *));
773 if (ctx->freelists == NULL) {
774 result = ISC_R_NOMEMORY;
777 memset(ctx->freelists, 0,
778 ctx->max_size * sizeof(element *));
779 ctx->basic_blocks = NULL;
780 ctx->basic_table = NULL;
781 ctx->basic_table_count = 0;
782 ctx->basic_table_size = 0;
785 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
787 #if ISC_MEM_TRACKLINES
788 if ((isc_mem_debugging & ISC_MEM_DEBUGRECORD) != 0) {
791 ctx->debuglist = (memalloc)(arg,
792 (ctx->max_size+1) * sizeof(debuglist_t));
793 if (ctx->debuglist == NULL) {
794 result = ISC_R_NOMEMORY;
797 for (i = 0; i <= ctx->max_size; i++)
798 ISC_LIST_INIT(ctx->debuglist[i]);
802 ctx->memalloc_failures = 0;
805 return (ISC_R_SUCCESS);
809 if (ctx->stats != NULL)
810 (memfree)(arg, ctx->stats);
811 #if ISC_MEM_USE_INTERNAL_MALLOC
812 if (ctx->freelists != NULL)
813 (memfree)(arg, ctx->freelists);
814 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
815 #if ISC_MEM_TRACKLINES
816 if (ctx->debuglist != NULL)
817 (ctx->memfree)(ctx->arg, ctx->debuglist);
818 #endif /* ISC_MEM_TRACKLINES */
819 DESTROYLOCK(&ctx->lock);
827 isc_mem_create(size_t init_max_size, size_t target_size,
830 return (isc_mem_createx(init_max_size, target_size,
831 default_memalloc, default_memfree, NULL,
836 destroy(isc_mem_t *ctx) {
838 isc_ondestroy_t ondest;
842 #if ISC_MEM_USE_INTERNAL_MALLOC
843 INSIST(ISC_LIST_EMPTY(ctx->pools));
844 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
846 #if ISC_MEM_TRACKLINES
847 if (ctx->debuglist != NULL) {
848 if (ctx->checkfree) {
849 for (i = 0; i <= ctx->max_size; i++) {
850 if (!ISC_LIST_EMPTY(ctx->debuglist[i]))
851 print_active(ctx, stderr);
852 INSIST(ISC_LIST_EMPTY(ctx->debuglist[i]));
857 for (i = 0; i <= ctx->max_size; i++)
858 for (dl = ISC_LIST_HEAD(ctx->debuglist[i]);
860 dl = ISC_LIST_HEAD(ctx->debuglist[i])) {
861 ISC_LIST_UNLINK(ctx->debuglist[i],
866 (ctx->memfree)(ctx->arg, ctx->debuglist);
869 INSIST(ctx->references == 0);
871 if (ctx->checkfree) {
872 for (i = 0; i <= ctx->max_size; i++) {
873 #if ISC_MEM_TRACKLINES
874 if (ctx->stats[i].gets != 0U)
875 print_active(ctx, stderr);
877 INSIST(ctx->stats[i].gets == 0U);
881 (ctx->memfree)(ctx->arg, ctx->stats);
883 #if ISC_MEM_USE_INTERNAL_MALLOC
884 for (i = 0; i < ctx->basic_table_count; i++)
885 (ctx->memfree)(ctx->arg, ctx->basic_table[i]);
886 (ctx->memfree)(ctx->arg, ctx->freelists);
887 (ctx->memfree)(ctx->arg, ctx->basic_table);
888 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
890 ondest = ctx->ondestroy;
892 DESTROYLOCK(&ctx->lock);
893 (ctx->memfree)(ctx->arg, ctx);
895 isc_ondestroy_notify(&ondest, ctx);
899 isc_mem_attach(isc_mem_t *source, isc_mem_t **targetp) {
900 REQUIRE(VALID_CONTEXT(source));
901 REQUIRE(targetp != NULL && *targetp == NULL);
904 source->references++;
905 UNLOCK(&source->lock);
911 isc_mem_detach(isc_mem_t **ctxp) {
913 isc_boolean_t want_destroy = ISC_FALSE;
915 REQUIRE(ctxp != NULL);
917 REQUIRE(VALID_CONTEXT(ctx));
920 INSIST(ctx->references > 0);
922 if (ctx->references == 0)
923 want_destroy = ISC_TRUE;
933 * isc_mem_putanddetach() is the equivalent of:
936 * isc_mem_attach(ptr->mctx, &mctx);
937 * isc_mem_detach(&ptr->mctx);
938 * isc_mem_put(mctx, ptr, sizeof(*ptr);
939 * isc_mem_detach(&mctx);
943 isc__mem_putanddetach(isc_mem_t **ctxp, void *ptr, size_t size FLARG) {
945 isc_boolean_t want_destroy = ISC_FALSE;
947 REQUIRE(ctxp != NULL);
949 REQUIRE(VALID_CONTEXT(ctx));
950 REQUIRE(ptr != NULL);
953 * Must be before mem_putunlocked() as ctxp is usually within
958 #if ISC_MEM_USE_INTERNAL_MALLOC
960 mem_putunlocked(ctx, ptr, size);
961 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
962 mem_put(ctx, ptr, size);
964 mem_putstats(ctx, ptr, size);
965 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
967 DELETE_TRACE(ctx, ptr, size, file, line);
968 INSIST(ctx->references > 0);
970 if (ctx->references == 0)
971 want_destroy = ISC_TRUE;
980 isc_mem_destroy(isc_mem_t **ctxp) {
984 * This routine provides legacy support for callers who use mctxs
985 * without attaching/detaching.
988 REQUIRE(ctxp != NULL);
990 REQUIRE(VALID_CONTEXT(ctx));
993 #if ISC_MEM_TRACKLINES
994 if (ctx->references != 1)
995 print_active(ctx, stderr);
997 REQUIRE(ctx->references == 1);
1007 isc_mem_ondestroy(isc_mem_t *ctx, isc_task_t *task, isc_event_t **event) {
1011 res = isc_ondestroy_register(&ctx->ondestroy, task, event);
1019 isc__mem_get(isc_mem_t *ctx, size_t size FLARG) {
1021 isc_boolean_t call_water = ISC_FALSE;
1023 REQUIRE(VALID_CONTEXT(ctx));
1025 #if ISC_MEM_USE_INTERNAL_MALLOC
1027 ptr = mem_getunlocked(ctx, size);
1028 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1029 ptr = mem_get(ctx, size);
1032 mem_getstats(ctx, size);
1033 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1035 ADD_TRACE(ctx, ptr, size, file, line);
1036 if (ctx->hi_water != 0U && !ctx->hi_called &&
1037 ctx->inuse > ctx->hi_water) {
1038 ctx->hi_called = ISC_TRUE;
1039 call_water = ISC_TRUE;
1041 if (ctx->inuse > ctx->maxinuse) {
1042 ctx->maxinuse = ctx->inuse;
1043 if (ctx->hi_water != 0U && ctx->inuse > ctx->hi_water &&
1044 (isc_mem_debugging & ISC_MEM_DEBUGUSAGE) != 0)
1045 fprintf(stderr, "maxinuse = %lu\n",
1046 (unsigned long)ctx->inuse);
1051 (ctx->water)(ctx->water_arg, ISC_MEM_HIWATER);
1057 isc__mem_put(isc_mem_t *ctx, void *ptr, size_t size FLARG)
1059 isc_boolean_t call_water = ISC_FALSE;
1061 REQUIRE(VALID_CONTEXT(ctx));
1062 REQUIRE(ptr != NULL);
1064 #if ISC_MEM_USE_INTERNAL_MALLOC
1066 mem_putunlocked(ctx, ptr, size);
1067 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1068 mem_put(ctx, ptr, size);
1070 mem_putstats(ctx, ptr, size);
1071 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1073 DELETE_TRACE(ctx, ptr, size, file, line);
1076 * The check against ctx->lo_water == 0 is for the condition
1077 * when the context was pushed over hi_water but then had
1078 * isc_mem_setwater() called with 0 for hi_water and lo_water.
1080 if (ctx->hi_called &&
1081 (ctx->inuse < ctx->lo_water || ctx->lo_water == 0U)) {
1082 ctx->hi_called = ISC_FALSE;
1084 if (ctx->water != NULL)
1085 call_water = ISC_TRUE;
1090 (ctx->water)(ctx->water_arg, ISC_MEM_LOWATER);
1093 #if ISC_MEM_TRACKLINES
1095 print_active(isc_mem_t *mctx, FILE *out) {
1096 if (mctx->debuglist != NULL) {
1100 isc_boolean_t found;
1102 fprintf(out, isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1104 "Dump of all outstanding "
1105 "memory allocations:\n"));
1107 format = isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1108 ISC_MSG_PTRFILELINE,
1109 "\tptr %p size %u file %s line %u\n");
1110 for (i = 0; i <= mctx->max_size; i++) {
1111 dl = ISC_LIST_HEAD(mctx->debuglist[i]);
1116 while (dl != NULL) {
1117 for (j = 0; j < DEBUGLIST_COUNT; j++)
1118 if (dl->ptr[j] != NULL)
1119 fprintf(out, format,
1124 dl = ISC_LIST_NEXT(dl, link);
1128 fprintf(out, isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1129 ISC_MSG_NONE, "\tNone.\n"));
1135 * Print the stats[] on the stream "out" with suitable formatting.
1138 isc_mem_stats(isc_mem_t *ctx, FILE *out) {
1140 const struct stats *s;
1141 const isc_mempool_t *pool;
1143 REQUIRE(VALID_CONTEXT(ctx));
1146 for (i = 0; i <= ctx->max_size; i++) {
1149 if (s->totalgets == 0U && s->gets == 0U)
1151 fprintf(out, "%s%5lu: %11lu gets, %11lu rem",
1152 (i == ctx->max_size) ? ">=" : " ",
1153 (unsigned long) i, s->totalgets, s->gets);
1154 #if ISC_MEM_USE_INTERNAL_MALLOC
1155 if (s->blocks != 0 || s->freefrags != 0)
1156 fprintf(out, " (%lu bl, %lu ff)",
1157 s->blocks, s->freefrags);
1158 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1163 * Note that since a pool can be locked now, these stats might be
1164 * somewhat off if the pool is in active use at the time the stats
1165 * are dumped. The link fields are protected by the isc_mem_t's
1166 * lock, however, so walking this list and extracting integers from
1167 * stats fields is always safe.
1169 pool = ISC_LIST_HEAD(ctx->pools);
1171 fprintf(out, isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1173 "[Pool statistics]\n"));
1174 fprintf(out, "%15s %10s %10s %10s %10s %10s %10s %10s %1s\n",
1175 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1176 ISC_MSG_POOLNAME, "name"),
1177 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1178 ISC_MSG_POOLSIZE, "size"),
1179 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1180 ISC_MSG_POOLMAXALLOC, "maxalloc"),
1181 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1182 ISC_MSG_POOLALLOCATED, "allocated"),
1183 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1184 ISC_MSG_POOLFREECOUNT, "freecount"),
1185 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1186 ISC_MSG_POOLFREEMAX, "freemax"),
1187 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1188 ISC_MSG_POOLFILLCOUNT, "fillcount"),
1189 isc_msgcat_get(isc_msgcat, ISC_MSGSET_MEM,
1190 ISC_MSG_POOLGETS, "gets"),
1193 while (pool != NULL) {
1194 fprintf(out, "%15s %10lu %10u %10u %10u %10u %10u %10u %s\n",
1195 pool->name, (unsigned long) pool->size, pool->maxalloc,
1196 pool->allocated, pool->freecount, pool->freemax,
1197 pool->fillcount, pool->gets,
1198 (pool->lock == NULL ? "N" : "Y"));
1199 pool = ISC_LIST_NEXT(pool, link);
1202 #if ISC_MEM_TRACKLINES
1203 print_active(ctx, out);
1210 * Replacements for malloc() and free() -- they implicitly remember the
1211 * size of the object allocated (with some additional overhead).
1215 isc__mem_allocateunlocked(isc_mem_t *ctx, size_t size) {
1218 size += ALIGNMENT_SIZE;
1219 #if ISC_MEM_USE_INTERNAL_MALLOC
1220 si = mem_getunlocked(ctx, size);
1221 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1222 si = mem_get(ctx, size);
1223 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1231 isc__mem_allocate(isc_mem_t *ctx, size_t size FLARG) {
1234 REQUIRE(VALID_CONTEXT(ctx));
1236 #if ISC_MEM_USE_INTERNAL_MALLOC
1238 si = isc__mem_allocateunlocked(ctx, size);
1239 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1240 si = isc__mem_allocateunlocked(ctx, size);
1243 mem_getstats(ctx, si[-1].u.size);
1244 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1246 #if ISC_MEM_TRACKLINES
1247 ADD_TRACE(ctx, si, si[-1].u.size, file, line);
1256 isc__mem_free(isc_mem_t *ctx, void *ptr FLARG) {
1260 REQUIRE(VALID_CONTEXT(ctx));
1261 REQUIRE(ptr != NULL);
1263 si = &(((size_info *)ptr)[-1]);
1266 #if ISC_MEM_USE_INTERNAL_MALLOC
1268 mem_putunlocked(ctx, si, size);
1269 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1270 mem_put(ctx, si, size);
1272 mem_putstats(ctx, si, size);
1273 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1275 DELETE_TRACE(ctx, ptr, size, file, line);
1282 * Other useful things.
1286 isc__mem_strdup(isc_mem_t *mctx, const char *s FLARG) {
1290 REQUIRE(VALID_CONTEXT(mctx));
1295 ns = isc__mem_allocate(mctx, len + 1 FLARG_PASS);
1298 strncpy(ns, s, len + 1);
1304 isc_mem_setdestroycheck(isc_mem_t *ctx, isc_boolean_t flag) {
1305 REQUIRE(VALID_CONTEXT(ctx));
1308 ctx->checkfree = flag;
1318 isc_mem_setquota(isc_mem_t *ctx, size_t quota) {
1319 REQUIRE(VALID_CONTEXT(ctx));
1328 isc_mem_getquota(isc_mem_t *ctx) {
1331 REQUIRE(VALID_CONTEXT(ctx));
1342 isc_mem_inuse(isc_mem_t *ctx) {
1345 REQUIRE(VALID_CONTEXT(ctx));
1356 isc_mem_setwater(isc_mem_t *ctx, isc_mem_water_t water, void *water_arg,
1357 size_t hiwater, size_t lowater)
1359 REQUIRE(VALID_CONTEXT(ctx));
1360 REQUIRE(hiwater >= lowater);
1363 if (water == NULL) {
1365 ctx->water_arg = NULL;
1368 ctx->hi_called = ISC_FALSE;
1371 ctx->water_arg = water_arg;
1372 ctx->hi_water = hiwater;
1373 ctx->lo_water = lowater;
1374 ctx->hi_called = ISC_FALSE;
1384 isc_mempool_create(isc_mem_t *mctx, size_t size, isc_mempool_t **mpctxp) {
1385 isc_mempool_t *mpctx;
1387 REQUIRE(VALID_CONTEXT(mctx));
1389 REQUIRE(mpctxp != NULL && *mpctxp == NULL);
1392 * Allocate space for this pool, initialize values, and if all works
1393 * well, attach to the memory context.
1395 mpctx = isc_mem_get(mctx, sizeof(isc_mempool_t));
1397 return (ISC_R_NOMEMORY);
1399 mpctx->magic = MEMPOOL_MAGIC;
1403 mpctx->maxalloc = UINT_MAX;
1404 mpctx->allocated = 0;
1405 mpctx->freecount = 0;
1407 mpctx->fillcount = 1;
1409 #if ISC_MEMPOOL_NAMES
1412 mpctx->items = NULL;
1417 ISC_LIST_INITANDAPPEND(mctx->pools, mpctx, link);
1418 UNLOCK(&mctx->lock);
1420 return (ISC_R_SUCCESS);
1424 isc_mempool_setname(isc_mempool_t *mpctx, const char *name) {
1425 REQUIRE(name != NULL);
1427 #if ISC_MEMPOOL_NAMES
1428 if (mpctx->lock != NULL)
1431 strncpy(mpctx->name, name, sizeof(mpctx->name) - 1);
1432 mpctx->name[sizeof(mpctx->name) - 1] = '\0';
1434 if (mpctx->lock != NULL)
1435 UNLOCK(mpctx->lock);
1443 isc_mempool_destroy(isc_mempool_t **mpctxp) {
1444 isc_mempool_t *mpctx;
1449 REQUIRE(mpctxp != NULL);
1451 REQUIRE(VALID_MEMPOOL(mpctx));
1452 #if ISC_MEMPOOL_NAMES
1453 if (mpctx->allocated > 0)
1454 UNEXPECTED_ERROR(__FILE__, __LINE__,
1455 "isc_mempool_destroy(): mempool %s "
1459 REQUIRE(mpctx->allocated == 0);
1469 * Return any items on the free list
1472 while (mpctx->items != NULL) {
1473 INSIST(mpctx->freecount > 0);
1475 item = mpctx->items;
1476 mpctx->items = item->next;
1478 #if ISC_MEM_USE_INTERNAL_MALLOC
1479 mem_putunlocked(mctx, item, mpctx->size);
1480 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1481 mem_put(mctx, item, mpctx->size);
1482 mem_putstats(mctx, item, mpctx->size);
1483 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1485 UNLOCK(&mctx->lock);
1488 * Remove our linked list entry from the memory context.
1491 ISC_LIST_UNLINK(mctx->pools, mpctx, link);
1492 UNLOCK(&mctx->lock);
1496 isc_mem_put(mpctx->mctx, mpctx, sizeof(isc_mempool_t));
1505 isc_mempool_associatelock(isc_mempool_t *mpctx, isc_mutex_t *lock) {
1506 REQUIRE(VALID_MEMPOOL(mpctx));
1507 REQUIRE(mpctx->lock == NULL);
1508 REQUIRE(lock != NULL);
1514 isc__mempool_get(isc_mempool_t *mpctx FLARG) {
1519 REQUIRE(VALID_MEMPOOL(mpctx));
1523 if (mpctx->lock != NULL)
1527 * Don't let the caller go over quota
1529 if (mpctx->allocated >= mpctx->maxalloc) {
1535 * if we have a free list item, return the first here
1537 item = mpctx->items;
1539 mpctx->items = item->next;
1540 INSIST(mpctx->freecount > 0);
1548 * We need to dip into the well. Lock the memory context here and
1549 * fill up our free list.
1552 for (i = 0; i < mpctx->fillcount; i++) {
1553 #if ISC_MEM_USE_INTERNAL_MALLOC
1554 item = mem_getunlocked(mctx, mpctx->size);
1555 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1556 item = mem_get(mctx, mpctx->size);
1558 mem_getstats(mctx, mpctx->size);
1559 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1562 item->next = mpctx->items;
1563 mpctx->items = item;
1566 UNLOCK(&mctx->lock);
1569 * If we didn't get any items, return NULL.
1571 item = mpctx->items;
1575 mpctx->items = item->next;
1581 if (mpctx->lock != NULL)
1582 UNLOCK(mpctx->lock);
1584 #if ISC_MEM_TRACKLINES
1587 ADD_TRACE(mctx, item, mpctx->size, file, line);
1588 UNLOCK(&mctx->lock);
1590 #endif /* ISC_MEM_TRACKLINES */
1596 isc__mempool_put(isc_mempool_t *mpctx, void *mem FLARG) {
1600 REQUIRE(VALID_MEMPOOL(mpctx));
1601 REQUIRE(mem != NULL);
1605 if (mpctx->lock != NULL)
1608 INSIST(mpctx->allocated > 0);
1611 #if ISC_MEM_TRACKLINES
1613 DELETE_TRACE(mctx, mem, mpctx->size, file, line);
1614 UNLOCK(&mctx->lock);
1615 #endif /* ISC_MEM_TRACKLINES */
1618 * If our free list is full, return this to the mctx directly.
1620 if (mpctx->freecount >= mpctx->freemax) {
1621 #if ISC_MEM_USE_INTERNAL_MALLOC
1623 mem_putunlocked(mctx, mem, mpctx->size);
1624 UNLOCK(&mctx->lock);
1625 #else /* ISC_MEM_USE_INTERNAL_MALLOC */
1626 mem_put(mctx, mem, mpctx->size);
1628 mem_putstats(mctx, mem, mpctx->size);
1629 UNLOCK(&mctx->lock);
1630 #endif /* ISC_MEM_USE_INTERNAL_MALLOC */
1631 if (mpctx->lock != NULL)
1632 UNLOCK(mpctx->lock);
1637 * Otherwise, attach it to our free list and bump the counter.
1640 item = (element *)mem;
1641 item->next = mpctx->items;
1642 mpctx->items = item;
1644 if (mpctx->lock != NULL)
1645 UNLOCK(mpctx->lock);
1653 isc_mempool_setfreemax(isc_mempool_t *mpctx, unsigned int limit) {
1654 REQUIRE(VALID_MEMPOOL(mpctx));
1656 if (mpctx->lock != NULL)
1659 mpctx->freemax = limit;
1661 if (mpctx->lock != NULL)
1662 UNLOCK(mpctx->lock);
1666 isc_mempool_getfreemax(isc_mempool_t *mpctx) {
1667 unsigned int freemax;
1669 REQUIRE(VALID_MEMPOOL(mpctx));
1671 if (mpctx->lock != NULL)
1674 freemax = mpctx->freemax;
1676 if (mpctx->lock != NULL)
1677 UNLOCK(mpctx->lock);
1683 isc_mempool_getfreecount(isc_mempool_t *mpctx) {
1684 unsigned int freecount;
1686 REQUIRE(VALID_MEMPOOL(mpctx));
1688 if (mpctx->lock != NULL)
1691 freecount = mpctx->freecount;
1693 if (mpctx->lock != NULL)
1694 UNLOCK(mpctx->lock);
1700 isc_mempool_setmaxalloc(isc_mempool_t *mpctx, unsigned int limit) {
1703 REQUIRE(VALID_MEMPOOL(mpctx));
1705 if (mpctx->lock != NULL)
1708 mpctx->maxalloc = limit;
1710 if (mpctx->lock != NULL)
1711 UNLOCK(mpctx->lock);
1715 isc_mempool_getmaxalloc(isc_mempool_t *mpctx) {
1716 unsigned int maxalloc;
1718 REQUIRE(VALID_MEMPOOL(mpctx));
1720 if (mpctx->lock != NULL)
1723 maxalloc = mpctx->maxalloc;
1725 if (mpctx->lock != NULL)
1726 UNLOCK(mpctx->lock);
1732 isc_mempool_getallocated(isc_mempool_t *mpctx) {
1733 unsigned int allocated;
1735 REQUIRE(VALID_MEMPOOL(mpctx));
1737 if (mpctx->lock != NULL)
1740 allocated = mpctx->allocated;
1742 if (mpctx->lock != NULL)
1743 UNLOCK(mpctx->lock);
1749 isc_mempool_setfillcount(isc_mempool_t *mpctx, unsigned int limit) {
1751 REQUIRE(VALID_MEMPOOL(mpctx));
1753 if (mpctx->lock != NULL)
1756 mpctx->fillcount = limit;
1758 if (mpctx->lock != NULL)
1759 UNLOCK(mpctx->lock);
1763 isc_mempool_getfillcount(isc_mempool_t *mpctx) {
1764 unsigned int fillcount;
1766 REQUIRE(VALID_MEMPOOL(mpctx));
1768 if (mpctx->lock != NULL)
1771 fillcount = mpctx->fillcount;
1773 if (mpctx->lock != NULL)
1774 UNLOCK(mpctx->lock);