cvs: Rebuild without gnuregex library
[dragonfly.git] / gnu / lib / libregex / test / g++malloc.c
CommitLineData
984263bc
MD
1#define inline
2
3/*
4Copyright (C) 1989 Free Software Foundation
5 written by Doug Lea (dl@oswego.edu)
6
7This file is part of GNU CC.
8
9GNU CC is distributed in the hope that it will be useful,
10but WITHOUT ANY WARRANTY. No author or distributor
11accepts responsibility to anyone for the consequences of using it
12or for whether it serves any particular purpose or works at all,
13unless he says so in writing. Refer to the GNU CC General Public
14License for full details.
15
16Everyone is granted permission to copy, modify and redistribute
17GNU CC, but only under the conditions described in the
18GNU CC General Public License. A copy of this license is
19supposed to have been given to you along with GNU CC so you
20can know your rights and responsibilities. It should be in a
21file named COPYING. Among other things, the copyright notice
22and this notice must be preserved on all copies.
23*/
24
25
26
27#ifndef NO_LIBGXX_MALLOC /* ignore whole file otherwise */
28
29/* compile with -DMALLOC_STATS to collect statistics */
30/* collecting statistics slows down malloc by at least 15% */
31
32#ifdef MALLOC_STATS
33#define UPDATE_STATS(ARGS) {ARGS;}
34#else
35#define UPDATE_STATS(ARGS)
36#endif
37
38/* History
39
40
41 Tue Jan 16 04:54:27 1990 Doug Lea (dl at g.oswego.edu)
42
43 version 1 released in libg++
44
45 Sun Jan 21 05:52:47 1990 Doug Lea (dl at g.oswego.edu)
46
47 bins are now own struct for, sanity.
48
49 new victim search strategy: scan up and consolidate.
50 Both faster and less fragmentation.
51
52 refined when to scan bins for consolidation, via consollink, etc.
53
54 realloc: always try to expand chunk, avoiding some fragmentation.
55
56 changed a few inlines into macros
57
58 hardwired SBRK_UNIT to 4096 for uniformity across systems
59
60 Tue Mar 20 14:18:23 1990 Doug Lea (dl at g.oswego.edu)
61
62 calloc and cfree now correctly parameterized.
63
64 Sun Apr 1 10:00:48 1990 Doug Lea (dl at g.oswego.edu)
65
66 added memalign and valloc.
67
68 Sun Jun 24 05:46:48 1990 Doug Lea (dl at g.oswego.edu)
69
70 #include gepagesize.h only ifndef sun
71 cache pagesize after first call
72
73 Wed Jul 25 08:35:19 1990 Doug Lea (dl at g.oswego.edu)
74
75 No longer rely on a `designated victim':
76
77 1. It sometimes caused splits of large chunks
78 when smaller ones would do, leading to
79 bad worst-case fragmentation.
80
81 2. Scanning through the av array fast anyway,
82 so the overhead isn't worth it.
83
84 To compensate, several other minor changes:
85
86 1. Unusable chunks are checked for consolidation during
87 searches inside bins, better distributing chunks
88 across bins.
89
90 2. Chunks are returned when found in malloc_find_space,
91 rather than finishing cleaning everything up, to
92 avoid wasted iterations due to (1).
93*/
94
95/*
96 A version of malloc/free/realloc tuned for C++ applications.
97
98 Here's what you probably want to know first:
99
100 In various tests, this appears to be about as fast as,
101 and usually substantially less memory-wasteful than BSD/GNUemacs malloc.
102
103 Generally, it is slower (by perhaps 20%) than bsd-style malloc
104 only when bsd malloc would waste a great deal of space in
105 fragmented blocks, which this malloc recovers; or when, by
106 chance or design, nearly all requests are near the bsd malloc
107 power-of-2 allocation bin boundaries, and as many chunks are
108 used as are allocated.
109
110 It uses more space than bsd malloc only when, again by chance
111 or design, only bsdmalloc bin-sized requests are malloced, or when
112 little dynamic space is malloced, since this malloc may grab larger
113 chunks from the system at a time than bsd.
114
115 In other words, this malloc seems generally superior to bsd
116 except perhaps for programs that are specially tuned to
117 deal with bsdmalloc's characteristics. But even here, the
118 performance differences are slight.
119
120
121 This malloc, like any other, is a compromised design.
122
123
124 Chunks of memory are maintained using a `boundary tag' method as
125 described in e.g., Knuth or Standish. This means that the size of
126 the chunk is stored both in the front of the chunk and at the end.
127 This makes consolidating fragmented chunks into bigger chunks very fast.
128 The size field is also used to hold bits representing whether a
129 chunk is free or in use.
130
131 Malloced chunks have space overhead of 8 bytes: The preceding
132 and trailing size fields. When they are freed, the list pointer
133 fields are also needed.
134
135 Available chunks are kept in doubly linked lists. The lists are
136 maintained in an array of bins using a power-of-two method, except
137 that instead of 32 bins (one for each 1 << i), there are 128: each
138 power of two is split in quarters. The use of very fine bin sizes
139 closely approximates the use of one bin per actually used size,
140 without necessitating the overhead of locating such bins. It is
141 especially desirable in common C++ applications where large numbers
142 of identically-sized blocks are malloced/freed in some dynamic
143 manner, and then later are all freed. The finer bin sizes make
144 finding blocks fast, with little wasted overallocation. The
145 consolidation methods ensure that once the collection of blocks is
146 no longer useful, fragments are gathered into bigger chunks awaiting new
147 roles.
148
149 The bins av[i] serve as heads of the lists. Bins contain a dummy
150 header for the chunk lists, and a `dirty' field used to indicate
151 whether the list may need to be scanned for consolidation.
152
153 On allocation, the bin corresponding to the request size is
154 scanned, and if there is a chunk with size >= requested, it
155 is split, if too big, and used. Chunks on the list which are
156 too small are examined for consolidation during this traversal.
157
158 If no chunk exists in the list bigger bins are scanned in search of
159 a victim.
160
161 If no victim can be found, then smaller bins are examined for
162 consolidation in order to construct a victim.
163
164 Finally, if consolidation fails to come up with a usable chunk,
165 more space is obtained from the system.
166
167 After a split, the remainder is placed on
168 the back of the appropriate bin list. (All freed chunks are placed
169 on fronts of lists. All remaindered or consolidated chunks are
170 placed on the rear. Correspondingly, searching within a bin
171 starts at the front, but finding victims is from the back. All
172 of this approximates the effect of having 2 kinds of lists per
173 bin: returned chunks vs unallocated chunks, but without the overhead
174 of maintaining 2 lists.)
175
176 Deallocation (free) consists only of placing the chunk on
177 a list.
178
179 Reallocation proceeds in the usual way. If a chunk can be extended,
180 it is, else a malloc-copy-free sequence is taken.
181
182 memalign requests more than enough space from malloc, finds a
183 spot within that chunk that meets the alignment request, and
184 then possibly frees the leading and trailing space. Overreliance
185 on memalign is a sure way to fragment space.
186
187
188 Some other implementation matters:
189
190 8 byte alignment is currently hardwired into the design. Calling
191 memalign will return a chunk that is both 8-byte aligned, and
192 meets the requested alignment.
193
194 The basic overhead of a used chunk is 8 bytes: 4 at the front and
195 4 at the end.
196
197 When a chunk is free, 8 additional bytes are needed for free list
198 pointers. Thus, the minimum allocatable size is 16 bytes.
199
200 The existence of front and back overhead permits some reasonably
201 effective fence-bashing checks: The front and back fields must
202 be identical. This is checked only within free() and realloc().
203 The checks are fast enough to be made non-optional.
204
205 The overwriting of parts of freed memory with the freelist pointers
206 can also be very effective (albeit in an annoying way) in helping
207 users track down dangling pointers.
208
209 User overwriting of freed space will often result in crashes
210 within malloc or free.
211
212 These routines are also tuned to C++ in that free(0) is a noop and
213 a failed malloc automatically calls (*new_handler)().
214
215 malloc(0) returns a pointer to something of the minimum allocatable size.
216
217 Additional memory is gathered from the system (via sbrk) in a
218 way that allows chunks obtained across different sbrk calls to
219 be consolidated, but does not require contiguous memory: Thus,
220 it should be safe to intersperse mallocs with other sbrk calls.
221
222 This malloc is NOT designed to work in multiprocessing applications.
223 No semaphores or other concurrency control are provided to ensure
224 that multiple malloc or free calls don't run at the same time,
225 which could be disasterous.
226
227 VERY heavy use of inlines is made, for clarity. If this malloc
228 is ported via a compiler without inlining capabilities, all
229 inlines should be transformed into macros -- making them non-inline
230 makes malloc at least twice as slow.
231
232
233*/
234
235\f
236/* preliminaries */
237
238#ifdef __cplusplus
239#include <stdio.h>
240#else
241#include "//usr/include/stdio.h" /* needed for error reporting */
242#endif
243
244#ifdef __cplusplus
245extern "C" {
246#endif
247
248#ifdef USG
249extern void* memset(void*, int, int);
250extern void* memcpy(void*, const void*, int);
251/*inline void bzero(void* s, int l) { memset(s, 0, l); }*/
252#else
253/*extern void bzero(void*, unsigned int);*/
254#endif
255
256/*extern void bcopy(void*, void*, unsigned int);*/
257
258extern void* sbrk(unsigned int);
259
260/* Put this in instead of commmented out stuff above. */
261#define bcopy(s,d,n) memcpy((d),(s),(n))
262#define bcmp(s1,s2,n) memcmp((s1),(s2),(n))
263#define bzero(s,n) memset((s),0,(n))
264
265
266#ifdef __GNUC__
267extern volatile void abort();
268#else
269extern void abort();
270#endif
271
272#ifdef __cplusplus
273}; /* end of extern "C" */
274#endif
275
276
277/* A good multiple to call sbrk with */
278
279#define SBRK_UNIT 4096
280
281
282
283/* how to die on detected error */
284
285#ifdef __GNUC__
286static volatile void malloc_user_error()
287#else
288static void malloc_user_error()
289#endif
290{
291 fputs("malloc/free/realloc: clobbered space detected\n", stderr); abort();
292}
293
294
295\f
296/* Basic overhead for each malloc'ed chunk */
297
298
299struct malloc_chunk
300{
301 unsigned int size; /* Size in bytes, including overhead. */
302 /* Or'ed with INUSE if in use. */
303
304 struct malloc_chunk* fd; /* double links -- used only if free. */
305 struct malloc_chunk* bk;
306
307};
308
309typedef struct malloc_chunk* mchunkptr;
310
311struct malloc_bin
312{
313 struct malloc_chunk hd; /* dummy list header */
314 unsigned int dirty; /* True if maybe consolidatable */
315 /* Wasting a word here makes */
316 /* sizeof(bin) a power of 2, */
317 /* which makes size2bin() faster */
318};
319
320typedef struct malloc_bin* mbinptr;
321
322
323/* sizes, alignments */
324
325
326#define SIZE_SZ (sizeof(unsigned int))
327#define MALLOC_MIN_OVERHEAD (SIZE_SZ + SIZE_SZ)
328#define MALLOC_ALIGN_MASK (MALLOC_MIN_OVERHEAD - 1)
329
330#define MINSIZE (sizeof(struct malloc_chunk) + SIZE_SZ) /* MUST == 16! */
331
332
333/* pad request bytes into a usable size */
334
335static inline unsigned int request2size(unsigned int request)
336{
337 return (request == 0) ? MINSIZE :
338 ((request + MALLOC_MIN_OVERHEAD + MALLOC_ALIGN_MASK)
339 & ~(MALLOC_ALIGN_MASK));
340}
341
342
343static inline int aligned_OK(void* m)
344{
345 return ((unsigned int)(m) & (MALLOC_ALIGN_MASK)) == 0;
346}
347
348
349/* size field or'd with INUSE when in use */
350#define INUSE 0x1
351
352\f
353
354/* the bins, initialized to have null double linked lists */
355
356#define MAXBIN 120 /* 1 more than needed for 32 bit addresses */
357
358#define FIRSTBIN (&(av[0]))
359
360static struct malloc_bin av[MAXBIN] =
361{
362 { { 0, &(av[0].hd), &(av[0].hd) }, 0 },
363 { { 0, &(av[1].hd), &(av[1].hd) }, 0 },
364 { { 0, &(av[2].hd), &(av[2].hd) }, 0 },
365 { { 0, &(av[3].hd), &(av[3].hd) }, 0 },
366 { { 0, &(av[4].hd), &(av[4].hd) }, 0 },
367 { { 0, &(av[5].hd), &(av[5].hd) }, 0 },
368 { { 0, &(av[6].hd), &(av[6].hd) }, 0 },
369 { { 0, &(av[7].hd), &(av[7].hd) }, 0 },
370 { { 0, &(av[8].hd), &(av[8].hd) }, 0 },
371 { { 0, &(av[9].hd), &(av[9].hd) }, 0 },
372
373 { { 0, &(av[10].hd), &(av[10].hd) }, 0 },
374 { { 0, &(av[11].hd), &(av[11].hd) }, 0 },
375 { { 0, &(av[12].hd), &(av[12].hd) }, 0 },
376 { { 0, &(av[13].hd), &(av[13].hd) }, 0 },
377 { { 0, &(av[14].hd), &(av[14].hd) }, 0 },
378 { { 0, &(av[15].hd), &(av[15].hd) }, 0 },
379 { { 0, &(av[16].hd), &(av[16].hd) }, 0 },
380 { { 0, &(av[17].hd), &(av[17].hd) }, 0 },
381 { { 0, &(av[18].hd), &(av[18].hd) }, 0 },
382 { { 0, &(av[19].hd), &(av[19].hd) }, 0 },
383
384 { { 0, &(av[20].hd), &(av[20].hd) }, 0 },
385 { { 0, &(av[21].hd), &(av[21].hd) }, 0 },
386 { { 0, &(av[22].hd), &(av[22].hd) }, 0 },
387 { { 0, &(av[23].hd), &(av[23].hd) }, 0 },
388 { { 0, &(av[24].hd), &(av[24].hd) }, 0 },
389 { { 0, &(av[25].hd), &(av[25].hd) }, 0 },
390 { { 0, &(av[26].hd), &(av[26].hd) }, 0 },
391 { { 0, &(av[27].hd), &(av[27].hd) }, 0 },
392 { { 0, &(av[28].hd), &(av[28].hd) }, 0 },
393 { { 0, &(av[29].hd), &(av[29].hd) }, 0 },
394
395 { { 0, &(av[30].hd), &(av[30].hd) }, 0 },
396 { { 0, &(av[31].hd), &(av[31].hd) }, 0 },
397 { { 0, &(av[32].hd), &(av[32].hd) }, 0 },
398 { { 0, &(av[33].hd), &(av[33].hd) }, 0 },
399 { { 0, &(av[34].hd), &(av[34].hd) }, 0 },
400 { { 0, &(av[35].hd), &(av[35].hd) }, 0 },
401 { { 0, &(av[36].hd), &(av[36].hd) }, 0 },
402 { { 0, &(av[37].hd), &(av[37].hd) }, 0 },
403 { { 0, &(av[38].hd), &(av[38].hd) }, 0 },
404 { { 0, &(av[39].hd), &(av[39].hd) }, 0 },
405
406 { { 0, &(av[40].hd), &(av[40].hd) }, 0 },
407 { { 0, &(av[41].hd), &(av[41].hd) }, 0 },
408 { { 0, &(av[42].hd), &(av[42].hd) }, 0 },
409 { { 0, &(av[43].hd), &(av[43].hd) }, 0 },
410 { { 0, &(av[44].hd), &(av[44].hd) }, 0 },
411 { { 0, &(av[45].hd), &(av[45].hd) }, 0 },
412 { { 0, &(av[46].hd), &(av[46].hd) }, 0 },
413 { { 0, &(av[47].hd), &(av[47].hd) }, 0 },
414 { { 0, &(av[48].hd), &(av[48].hd) }, 0 },
415 { { 0, &(av[49].hd), &(av[49].hd) }, 0 },
416
417 { { 0, &(av[50].hd), &(av[50].hd) }, 0 },
418 { { 0, &(av[51].hd), &(av[51].hd) }, 0 },
419 { { 0, &(av[52].hd), &(av[52].hd) }, 0 },
420 { { 0, &(av[53].hd), &(av[53].hd) }, 0 },
421 { { 0, &(av[54].hd), &(av[54].hd) }, 0 },
422 { { 0, &(av[55].hd), &(av[55].hd) }, 0 },
423 { { 0, &(av[56].hd), &(av[56].hd) }, 0 },
424 { { 0, &(av[57].hd), &(av[57].hd) }, 0 },
425 { { 0, &(av[58].hd), &(av[58].hd) }, 0 },
426 { { 0, &(av[59].hd), &(av[59].hd) }, 0 },
427
428 { { 0, &(av[60].hd), &(av[60].hd) }, 0 },
429 { { 0, &(av[61].hd), &(av[61].hd) }, 0 },
430 { { 0, &(av[62].hd), &(av[62].hd) }, 0 },
431 { { 0, &(av[63].hd), &(av[63].hd) }, 0 },
432 { { 0, &(av[64].hd), &(av[64].hd) }, 0 },
433 { { 0, &(av[65].hd), &(av[65].hd) }, 0 },
434 { { 0, &(av[66].hd), &(av[66].hd) }, 0 },
435 { { 0, &(av[67].hd), &(av[67].hd) }, 0 },
436 { { 0, &(av[68].hd), &(av[68].hd) }, 0 },
437 { { 0, &(av[69].hd), &(av[69].hd) }, 0 },
438
439 { { 0, &(av[70].hd), &(av[70].hd) }, 0 },
440 { { 0, &(av[71].hd), &(av[71].hd) }, 0 },
441 { { 0, &(av[72].hd), &(av[72].hd) }, 0 },
442 { { 0, &(av[73].hd), &(av[73].hd) }, 0 },
443 { { 0, &(av[74].hd), &(av[74].hd) }, 0 },
444 { { 0, &(av[75].hd), &(av[75].hd) }, 0 },
445 { { 0, &(av[76].hd), &(av[76].hd) }, 0 },
446 { { 0, &(av[77].hd), &(av[77].hd) }, 0 },
447 { { 0, &(av[78].hd), &(av[78].hd) }, 0 },
448 { { 0, &(av[79].hd), &(av[79].hd) }, 0 },
449
450 { { 0, &(av[80].hd), &(av[80].hd) }, 0 },
451 { { 0, &(av[81].hd), &(av[81].hd) }, 0 },
452 { { 0, &(av[82].hd), &(av[82].hd) }, 0 },
453 { { 0, &(av[83].hd), &(av[83].hd) }, 0 },
454 { { 0, &(av[84].hd), &(av[84].hd) }, 0 },
455 { { 0, &(av[85].hd), &(av[85].hd) }, 0 },
456 { { 0, &(av[86].hd), &(av[86].hd) }, 0 },
457 { { 0, &(av[87].hd), &(av[87].hd) }, 0 },
458 { { 0, &(av[88].hd), &(av[88].hd) }, 0 },
459 { { 0, &(av[89].hd), &(av[89].hd) }, 0 },
460
461 { { 0, &(av[90].hd), &(av[90].hd) }, 0 },
462 { { 0, &(av[91].hd), &(av[91].hd) }, 0 },
463 { { 0, &(av[92].hd), &(av[92].hd) }, 0 },
464 { { 0, &(av[93].hd), &(av[93].hd) }, 0 },
465 { { 0, &(av[94].hd), &(av[94].hd) }, 0 },
466 { { 0, &(av[95].hd), &(av[95].hd) }, 0 },
467 { { 0, &(av[96].hd), &(av[96].hd) }, 0 },
468 { { 0, &(av[97].hd), &(av[97].hd) }, 0 },
469 { { 0, &(av[98].hd), &(av[98].hd) }, 0 },
470 { { 0, &(av[99].hd), &(av[99].hd) }, 0 },
471
472 { { 0, &(av[100].hd), &(av[100].hd) }, 0 },
473 { { 0, &(av[101].hd), &(av[101].hd) }, 0 },
474 { { 0, &(av[102].hd), &(av[102].hd) }, 0 },
475 { { 0, &(av[103].hd), &(av[103].hd) }, 0 },
476 { { 0, &(av[104].hd), &(av[104].hd) }, 0 },
477 { { 0, &(av[105].hd), &(av[105].hd) }, 0 },
478 { { 0, &(av[106].hd), &(av[106].hd) }, 0 },
479 { { 0, &(av[107].hd), &(av[107].hd) }, 0 },
480 { { 0, &(av[108].hd), &(av[108].hd) }, 0 },
481 { { 0, &(av[109].hd), &(av[109].hd) }, 0 },
482
483 { { 0, &(av[110].hd), &(av[110].hd) }, 0 },
484 { { 0, &(av[111].hd), &(av[111].hd) }, 0 },
485 { { 0, &(av[112].hd), &(av[112].hd) }, 0 },
486 { { 0, &(av[113].hd), &(av[113].hd) }, 0 },
487 { { 0, &(av[114].hd), &(av[114].hd) }, 0 },
488 { { 0, &(av[115].hd), &(av[115].hd) }, 0 },
489 { { 0, &(av[116].hd), &(av[116].hd) }, 0 },
490 { { 0, &(av[117].hd), &(av[117].hd) }, 0 },
491 { { 0, &(av[118].hd), &(av[118].hd) }, 0 },
492 { { 0, &(av[119].hd), &(av[119].hd) }, 0 }
493};
494
495/*
496 indexing into bins
497*/
498
499static inline mbinptr size2bin(unsigned int sz)
500{
501 mbinptr b = av;
502 while (sz >= (MINSIZE * 2)) { b += 4; sz >>= 1; } /* find power of 2 */
503 b += (sz - MINSIZE) >> 2; /* find quadrant */
504 return b;
505}
506
507\f
508
509/* counts maintained if MALLOC_STATS defined */
510
511#ifdef MALLOC_STATS
512
513static unsigned int sbrked_mem;
514static unsigned int requested_mem;
515static unsigned int malloced_mem;
516static unsigned int freed_mem;
517static unsigned int max_used_mem;
518
519static unsigned int n_sbrks;
520static unsigned int n_mallocs;
521static unsigned int n_frees;
522static unsigned int n_reallocs;
523static unsigned int n_reallocs_with_copy;
524static unsigned int n_avail;
525static unsigned int max_inuse;
526
527static unsigned int n_malloc_chunks;
528static unsigned int n_malloc_bins;
529
530static unsigned int n_split;
531static unsigned int n_consol;
532
533
534static void do_malloc_stats(const mchunkptr p)
535{
536 ++n_mallocs;
537 if ((n_mallocs-n_frees) > max_inuse)
538 max_inuse = n_mallocs - n_frees;
539 malloced_mem += (p->size & ~(INUSE));
540 if (malloced_mem - freed_mem > max_used_mem)
541 max_used_mem = malloced_mem - freed_mem;
542}
543
544static void do_free_stats(const mchunkptr p)
545{
546 ++n_frees;
547 freed_mem += (p->size & ~(INUSE));
548}
549
550#endif
551
552\f
553
554/* Utilities needed below for memalign */
555/* This is redundant with libg++ support, but not if used stand-alone */
556
557static unsigned int gcd(unsigned int a, unsigned int b)
558{
559 unsigned int tmp;
560
561 if (b > a)
562 {
563 tmp = a; a = b; b = tmp;
564 }
565 for(;;)
566 {
567 if (b == 0)
568 return a;
569 else if (b == 1)
570 return b;
571 else
572 {
573 tmp = b;
574 b = a % b;
575 a = tmp;
576 }
577 }
578}
579
580static inline unsigned int lcm(unsigned int x, unsigned int y)
581{
582 return x / gcd(x, y) * y;
583}
584
585\f
586
587/* maintaining INUSE via size field */
588
589
590#define inuse(p) ((p)->size & INUSE)
591#define set_inuse(p) ((p)->size |= INUSE)
592#define clear_inuse(b) ((p)->size &= ~INUSE)
593
594
595/* operations on malloc_chunk addresses */
596
597
598/* return ptr to next physical malloc_chunk */
599
600#define next_chunk(p) ((mchunkptr)((char*)(p) + (p)->size))
601
602/* return ptr to previous physical malloc_chunk */
603
604#define prev_chunk(p) ((mchunkptr)((char*)(p)-((((int*)(p))[-1]) & ~(INUSE))))
605
606/* place size at front and back of chunk */
607
608
609static inline void set_size(mchunkptr p, unsigned int sz)
610{
611 p->size = *((int*)((char*)(p) + sz - SIZE_SZ)) = sz;
612}
613
614
615\f
616
617/* conversion from malloc headers to user pointers, and back */
618
619static inline void* chunk2mem(mchunkptr p)
620{
621 void *mem;
622 set_inuse(p);
623mem = (void*)((char*)(p) + SIZE_SZ);
624 return mem;
625}
626
627/* xxxx my own */
628mchunkptr sanity_check(void* mem)
629{
630 mchunkptr p = (mchunkptr)((char*)(mem) - SIZE_SZ);
631
632 /* a quick sanity check */
633 unsigned int sz = p->size & ~(INUSE);
634 if (p->size == sz || sz != *((int*)((char*)(p) + sz - SIZE_SZ)))
635 malloc_user_error();
636
637 return p;
638}
639
640
641
642
643static inline mchunkptr mem2chunk(void* mem)
644{
645 mchunkptr p = (mchunkptr)((char*)(mem) - SIZE_SZ);
646
647 /* a quick sanity check */
648 unsigned int sz = p->size & ~(INUSE);
649 if (p->size == sz || sz != *((int*)((char*)(p) + sz - SIZE_SZ)))
650 malloc_user_error();
651
652 p->size = sz; /* clears INUSE */
653 return p;
654}
655
656\f
657
658/* maintaining bins & pointers */
659
660
661/* maximum bin actually used */
662
663static mbinptr malloc_maxbin = FIRSTBIN;
664
665
666/* operations on lists inside bins */
667
668
669/* take a chunk off a list */
670
671static inline void unlink(mchunkptr p)
672{
673 mchunkptr b = p->bk;
674 mchunkptr f = p->fd;
675
676 f->bk = b; b->fd = f;
677
678 UPDATE_STATS (--n_avail);
679}
680
681
682
683/* split a chunk and place on the back of a list */
684
685static inline void split(mchunkptr p, unsigned int offset)
686{
687 unsigned int room = p->size - offset;
688 if (room >= MINSIZE)
689 {
690 mbinptr bn = size2bin(room); /* new bin */
691 mchunkptr h = &(bn->hd); /* its head */
692 mchunkptr b = h->bk; /* old back element */
693 mchunkptr t = (mchunkptr)((char*)(p) + offset); /* remaindered chunk */
694
695 /* set size */
696 t->size = *((int*)((char*)(t) + room - SIZE_SZ)) = room;
697
698 /* link up */
699 t->bk = b; t->fd = h; h->bk = b->fd = t;
700
701 /* adjust maxbin (h == b means was empty) */
702 if (h == b && bn > malloc_maxbin) malloc_maxbin = bn;
703
704 /* adjust size of chunk to be returned */
705 p->size = *((int*)((char*)(p) + offset - SIZE_SZ)) = offset;
706
707 UPDATE_STATS ((++n_split, ++n_avail));
708 }
709}
710
711
712
713/* place a consolidated chunk on the back of a list */
714/* like above, except no split */
715
716static inline void consollink(mchunkptr p)
717{
718 mbinptr bn = size2bin(p->size);
719 mchunkptr h = &(bn->hd);
720 mchunkptr b = h->bk;
721
722 p->bk = b; p->fd = h; h->bk = b->fd = p;
723
724 if (h == b && bn > malloc_maxbin) malloc_maxbin = bn;
725
726 UPDATE_STATS(++n_avail);
727}
728
729
730/* place a freed chunk on the front of a list */
731
732static inline void frontlink(mchunkptr p)
733{
734 mbinptr bn = size2bin(p->size);
735 mchunkptr h = &(bn->hd);
736 mchunkptr f = h->fd;
737
738 p->bk = h; p->fd = f; f->bk = h->fd = p;
739
740 if (h == f && bn > malloc_maxbin) malloc_maxbin = bn;
741
742 bn->dirty = 1;
743
744 UPDATE_STATS(++n_avail);
745}
746
747
748\f
749/* Dealing with sbrk */
750
751
752/* To link consecutive sbrk regions when possible */
753
754static int* last_sbrk_end;
755
756
757/* who to call when sbrk returns failure */
758
759#ifndef NO_NEW_HANDLER
760typedef volatile void (*vfp)();
761#ifdef __cplusplus
762extern "C" vfp __new_handler;
763#else
764extern vfp __new_handler;
765#endif
766#endif
767
768static mchunkptr malloc_from_sys(unsigned nb)
769{
770 mchunkptr p;
771 unsigned int sbrk_size;
772 int* ip;
773
774 /* Minimally, we need to pad with enough space */
775 /* to place dummy size/use fields to ends if needed */
776
777 sbrk_size = ((nb + SBRK_UNIT - 1 + SIZE_SZ + SIZE_SZ)
778 / SBRK_UNIT) * SBRK_UNIT;
779
780 ip = (int*)(sbrk(sbrk_size));
781 if ((char*)ip == (char*)(-1)) /* sbrk returns -1 on failure */
782 {
783#ifndef NO_NEW_HANDLER
784 (*__new_handler) ();
785#endif
786 return 0;
787 }
788
789 UPDATE_STATS ((++n_sbrks, sbrked_mem += sbrk_size));
790
791
792 if (last_sbrk_end != &ip[-1])
793 {
794 /* It's either first time through or someone else called sbrk. */
795 /* Arrange end-markers at front & back */
796
797 /* Shouldn't be necessary, but better to be safe */
798 while (!aligned_OK(ip)) { ++ip; sbrk_size -= SIZE_SZ; }
799
800
801 /* Mark the front as in use to prevent merging. */
802 /* Note we can get away with only 1 word, not MINSIZE overhead here */
803
804 *ip++ = SIZE_SZ | INUSE;
805
806 p = (mchunkptr)ip;
807 set_size(p,sbrk_size - (SIZE_SZ + SIZE_SZ));
808
809 }
810 else
811 {
812 mchunkptr l;
813
814 /* We can safely make the header start at end of prev sbrked chunk. */
815 /* We will still have space left at the end from a previous call */
816 /* to place the end marker, below */
817
818 p = (mchunkptr)(last_sbrk_end);
819 set_size(p, sbrk_size);
820
821
822 /* Even better, maybe we can merge with last fragment: */
823
824 l = prev_chunk(p);
825 if (!inuse(l))
826 {
827 unlink(l);
828 set_size(l, p->size + l->size);
829 p = l;
830 }
831
832 }
833
834 /* mark the end of sbrked space as in use to prevent merging */
835
836 last_sbrk_end = (int*)((char*)p + p->size);
837 *last_sbrk_end = SIZE_SZ | INUSE;
838
839 UPDATE_STATS((++n_avail, ++n_malloc_chunks));
840
841 /* make it safe to unlink in malloc */
842 UPDATE_STATS(++n_avail);
843 p->fd = p->bk = p;
844
845 return p;
846}
847
848\f
849
850/* Consolidate dirty bins. */
851/* Stop if found a chunk big enough to satisfy current malloc request */
852
853/* (It requires much less bookkeeping to consolidate entire bins */
854/* at once than to keep records of which chunks might be */
855/* consolidatable. So long as the lists are short, which we */
856/* try to ensure via small bin ranges, there is little wasted effort.) */
857
858static mchunkptr malloc_find_space(unsigned int nb)
859{
860 mbinptr b;
861
862 /* first, re-adjust max used bin */
863
864 while (malloc_maxbin >= FIRSTBIN &&
865 malloc_maxbin->hd.bk == &(malloc_maxbin->hd))
866 {
867 malloc_maxbin->dirty = 0;
868 --malloc_maxbin;
869 }
870
871 for (b = malloc_maxbin; b >= FIRSTBIN; --b)
872 {
873 UPDATE_STATS(++n_malloc_bins);
874
875 if (b->dirty)
876 {
877 mchunkptr h = &(b->hd); /* head of list */
878 mchunkptr p = h->fd; /* chunk traverser */
879
880 while (p != h)
881 {
882 mchunkptr nextp = p->fd; /* save, in case of relinks */
883 int consolidated = 0; /* only unlink/relink if consolidated */
884
885 mchunkptr t;
886
887 UPDATE_STATS(++n_malloc_chunks);
888
889 while (!inuse(t = prev_chunk(p))) /* consolidate backward */
890 {
891 if (!consolidated) { consolidated = 1; unlink(p); }
892 if (t == nextp) nextp = t->fd;
893 unlink(t);
894 set_size(t, t->size + p->size);
895 p = t;
896 UPDATE_STATS (++n_consol);
897 }
898
899 while (!inuse(t = next_chunk(p))) /* consolidate forward */
900 {
901 if (!consolidated) { consolidated = 1; unlink(p); }
902 if (t == nextp) nextp = t->fd;
903 unlink(t);
904 set_size(p, p->size + t->size);
905 UPDATE_STATS (++n_consol);
906 }
907
908 if (consolidated)
909 {
910 if (p->size >= nb)
911 {
912 /* make it safe to unlink in malloc */
913 UPDATE_STATS(++n_avail);
914 p->fd = p->bk = p;
915 return p;
916 }
917 else
918 consollink(p);
919 }
920
921 p = nextp;
922
923 }
924
925 b->dirty = 0;
926
927 }
928 }
929
930 /* nothing available - sbrk some more */
931
932 return malloc_from_sys(nb);
933}
934
935\f
936
937/* Finally, the user-level functions */
938
939void* malloc(unsigned int bytes)
940{
941 unsigned int nb = request2size(bytes); /* padded request size */
942 mbinptr b = size2bin(nb); /* corresponding bin */
943 mchunkptr hd = &(b->hd); /* head of its list */
944 mchunkptr p = hd->fd; /* chunk traverser */
945
946 UPDATE_STATS((requested_mem+=bytes, ++n_malloc_bins));
947
948 /* Try a (near) exact match in own bin */
949 /* clean out unusable but consolidatable chunks in bin while traversing */
950
951 while (p != hd)
952 {
953 UPDATE_STATS(++n_malloc_chunks);
954 if (p->size >= nb)
955 goto found;
956 else /* try to consolidate; same code as malloc_find_space */
957 {
958 mchunkptr nextp = p->fd; /* save, in case of relinks */
959 int consolidated = 0; /* only unlink/relink if consolidated */
960
961 mchunkptr t;
962
963 while (!inuse(t = prev_chunk(p))) /* consolidate backward */
964 {
965 if (!consolidated) { consolidated = 1; unlink(p); }
966 if (t == nextp) nextp = t->fd;
967 unlink(t);
968 set_size(t, t->size + p->size);
969 p = t;
970 UPDATE_STATS (++n_consol);
971 }
972
973 while (!inuse(t = next_chunk(p))) /* consolidate forward */
974 {
975 if (!consolidated) { consolidated = 1; unlink(p); }
976 if (t == nextp) nextp = t->fd;
977 unlink(t);
978 set_size(p, p->size + t->size);
979 UPDATE_STATS (++n_consol);
980 }
981
982 if (consolidated)
983 {
984 if (p->size >= nb)
985 {
986 /* make it safe to unlink again below */
987 UPDATE_STATS(++n_avail);
988 p->fd = p->bk = p;
989 goto found;
990 }
991 else
992 consollink(p);
993 }
994
995 p = nextp;
996
997 }
998 }
999
1000 b->dirty = 0; /* true if got here */
1001
1002 /* Scan bigger bins for a victim */
1003
1004 while (++b <= malloc_maxbin)
1005 {
1006 UPDATE_STATS(++n_malloc_bins);
1007 if ((p = b->hd.bk) != &(b->hd)) /* no need to check size */
1008 goto found;
1009 }
1010
1011 /* Consolidate or sbrk */
1012
1013 p = malloc_find_space(nb);
1014
1015 if (p == 0) return 0; /* allocation failure */
1016
1017 found: /* Use what we found */
1018
1019 unlink(p);
1020 split(p, nb);
1021 UPDATE_STATS(do_malloc_stats(p));
1022 return chunk2mem(p);
1023}
1024
1025
1026\f
1027
1028void free(void* mem)
1029{
1030 if (mem != 0)
1031 {
1032 mchunkptr p = mem2chunk(mem);
1033 UPDATE_STATS(do_free_stats(p));
1034 frontlink(p);
1035 }
1036}
1037
1038
1039void* calloc(unsigned int n, unsigned int elem_size)
1040{
1041 unsigned int sz = n * elem_size;
1042 void* p = malloc(sz);
1043 bzero(p, sz);
1044 return p;
1045};
1046
1047/* This is here for compatibility with older systems */
1048void cfree(void *mem)
1049{
1050 free(mem);
1051}
1052
1053
1054unsigned int malloc_usable_size(void* mem)
1055{
1056 if (mem == 0)
1057 return 0;
1058 else
1059 {
1060 mchunkptr p = (mchunkptr)((char*)(mem) - SIZE_SZ);
1061 unsigned int sz = p->size & ~(INUSE);
1062 if (p->size == sz || sz != *((int*)((char*)(p) + sz - SIZE_SZ)))
1063 return 0;
1064 else
1065 return sz - MALLOC_MIN_OVERHEAD;
1066 }
1067}
1068
1069\f
1070
1071void* realloc(void* mem, unsigned int bytes)
1072{
1073 if (mem == 0)
1074 return malloc(bytes);
1075 else
1076 {
1077 unsigned int nb = request2size(bytes);
1078 mchunkptr p = mem2chunk(mem);
1079 unsigned int oldsize = p->size;
1080 int room;
1081 mchunkptr nxt;
1082
1083 UPDATE_STATS((++n_reallocs, requested_mem += bytes-oldsize));
1084
1085 /* try to expand (even if already big enough), to clean up chunk */
1086
1087 while (!inuse(nxt = next_chunk(p)))
1088 {
1089 UPDATE_STATS ((malloced_mem += nxt->size, ++n_consol));
1090 unlink(nxt);
1091 set_size(p, p->size + nxt->size);
1092 }
1093
1094 room = p->size - nb;
1095 if (room >= 0)
1096 {
1097 split(p, nb);
1098 UPDATE_STATS(malloced_mem -= room);
1099 return chunk2mem(p);
1100 }
1101 else /* do the obvious */
1102 {
1103 void* newmem;
1104 set_inuse(p); /* don't let malloc consolidate us yet! */
1105 newmem = malloc(nb);
1106 bcopy(mem, newmem, oldsize - SIZE_SZ);
1107 free(mem);
1108 UPDATE_STATS(++n_reallocs_with_copy);
1109 return newmem;
1110 }
1111 }
1112}
1113
1114\f
1115
1116/* return a pointer to space with at least the alignment requested */
1117
1118void* memalign(unsigned int alignment, unsigned int bytes)
1119{
1120 mchunkptr p;
1121 unsigned int nb = request2size(bytes);
1122
1123 /* find an alignment that both we and the user can live with: */
1124 /* least common multiple guarantees mutual happiness */
1125 unsigned int align = lcm(alignment, MALLOC_MIN_OVERHEAD);
1126 unsigned int mask = align - 1;
1127
1128 /* call malloc with worst case padding to hit alignment; */
1129 /* we will give back extra */
1130
1131 unsigned int req = nb + align + MINSIZE;
1132 void* m = malloc(req);
1133
1134 if (m == 0) return m;
1135
1136 p = mem2chunk(m);
1137
1138 /* keep statistics on track */
1139
1140 UPDATE_STATS(--n_mallocs);
1141 UPDATE_STATS(malloced_mem -= p->size);
1142 UPDATE_STATS(requested_mem -= req);
1143 UPDATE_STATS(requested_mem += bytes);
1144
1145 if (((int)(m) & (mask)) != 0) /* misaligned */
1146 {
1147
1148 /* find an aligned spot inside chunk */
1149
1150 mchunkptr ap = (mchunkptr)(( ((int)(m) + mask) & -align) - SIZE_SZ);
1151
1152 unsigned int gap = (unsigned int)(ap) - (unsigned int)(p);
1153 unsigned int room;
1154
1155 /* we need to give back leading space in a chunk of at least MINSIZE */
1156
1157 if (gap < MINSIZE)
1158 {
1159 /* This works since align >= MINSIZE */
1160 /* and we've malloc'd enough total room */
1161
1162 ap = (mchunkptr)( (int)(ap) + align );
1163 gap += align;
1164 }
1165
1166 if (gap + nb > p->size) /* can't happen unless chunk sizes corrupted */
1167 malloc_user_error();
1168
1169 room = p->size - gap;
1170
1171 /* give back leader */
1172 set_size(p, gap);
1173 consollink(p);
1174
1175 /* use the rest */
1176 p = ap;
1177 set_size(p, room);
1178 }
1179
1180 /* also give back spare room at the end */
1181
1182 split(p, nb);
1183 UPDATE_STATS(do_malloc_stats(p));
1184 return chunk2mem(p);
1185
1186}
1187
1188#ifndef sun
1189#include "getpagesize.h"
1190#endif
1191
1192static unsigned int malloc_pagesize = 0;
1193
1194void* valloc(unsigned int bytes)
1195{
1196 if (malloc_pagesize == 0) malloc_pagesize = getpagesize();
1197 return memalign (malloc_pagesize, bytes);
1198}
1199
1200\f
1201void malloc_stats()
1202{
1203#ifndef MALLOC_STATS
1204}
1205#else
1206 int i;
1207 mchunkptr p;
1208 double nm = (double)(n_mallocs + n_reallocs);
1209
1210 fprintf(stderr, "\nmalloc statistics\n\n");
1211
1212 if (n_mallocs != 0)
1213 fprintf(stderr, "requests = %10u total size = %10u\tave = %10u\n",
1214 n_mallocs, requested_mem, requested_mem/n_mallocs);
1215
1216 if (n_mallocs != 0)
1217 fprintf(stderr, "mallocs = %10u total size = %10u\tave = %10u\n",
1218 n_mallocs, malloced_mem, malloced_mem/n_mallocs);
1219
1220 if (n_frees != 0)
1221 fprintf(stderr, "frees = %10u total size = %10u\tave = %10u\n",
1222 n_frees, freed_mem, freed_mem/n_frees);
1223
1224 if (n_mallocs-n_frees != 0)
1225 fprintf(stderr, "in use = %10u total size = %10u\tave = %10u\n",
1226 n_mallocs-n_frees, malloced_mem-freed_mem,
1227 (malloced_mem-freed_mem) / (n_mallocs-n_frees));
1228
1229 if (max_inuse != 0)
1230 fprintf(stderr, "max in use= %10u total size = %10u\tave = %10u\n",
1231 max_inuse, max_used_mem, max_used_mem / max_inuse);
1232
1233 if (n_avail != 0)
1234 fprintf(stderr, "available = %10u total size = %10u\tave = %10u\n",
1235 n_avail, sbrked_mem - (malloced_mem-freed_mem),
1236 (sbrked_mem - (malloced_mem-freed_mem)) / n_avail);
1237
1238 if (n_sbrks != 0)
1239 fprintf(stderr, "sbrks = %10u total size = %10u\tave = %10u\n\n",
1240 n_sbrks, sbrked_mem, sbrked_mem/ n_sbrks);
1241
1242 if (n_reallocs != 0)
1243 fprintf(stderr, "reallocs = %10u with copy = %10u\n\n",
1244 n_reallocs, n_reallocs_with_copy);
1245
1246
1247 if (nm != 0)
1248 {
1249 fprintf(stderr, "chunks scanned per malloc = %6.3f\n",
1250 n_malloc_chunks / nm);
1251 fprintf(stderr, "bins scanned per malloc = %6.3f\n",
1252 n_malloc_bins / nm);
1253 fprintf(stderr, "splits per malloc = %6.3f\n",
1254 n_split / nm);
1255 fprintf(stderr, "consolidations per malloc = %6.3f\n",
1256 n_consol / nm);
1257 }
1258
1259 fprintf(stderr, "\nfree chunks:\n");
1260 for (i = 0; i < MAXBIN; ++i)
1261 {
1262 p = av[i].hd.fd;
1263 if (p != &(av[i].hd))
1264 {
1265 unsigned int count = 1;
1266 unsigned int sz = p->size;
1267 for (p = p->fd; p != &(av[i].hd); p = p->fd)
1268 {
1269 if (p->size == sz)
1270 ++count;
1271 else
1272 {
1273 fprintf(stderr, "\tsize = %10u count = %5u\n", sz, count);
1274 count = 1;
1275 sz = p->size;
1276 }
1277 }
1278
1279 fprintf(stderr, "\tsize = %10u count = %5u\n", sz, count);
1280
1281 }
1282 }
1283}
1284#endif /* MALLOC_STATS */
1285
1286#endif /* NO_LIBGXX_MALLOC */
1287
1288