2 * SLABALLOC.C - Userland SLAB memory allocator
4 * Copyright (c) 2003 Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * $DragonFly: src/lib/libcaps/slaballoc.c,v 1.3 2004/03/06 19:48:22 dillon Exp $
30 * This module implements a thread-safe slab allocator for userland.
32 * A slab allocator reserves a ZONE for each chunk size, then lays the
33 * chunks out in an array within the zone. Allocation and deallocation
34 * is nearly instantanious, and fragmentation/overhead losses are limited
35 * to a fixed worst-case amount.
37 * The downside of this slab implementation is in the chunk size
38 * multiplied by the number of zones. ~80 zones * 128K = 10MB of VM per cpu.
39 * To mitigate this we attempt to select a reasonable zone size based on
40 * available system memory. e.g. 32K instead of 128K. Also since the
41 * slab allocator is operating out of virtual memory in userland the actual
42 * physical memory use is not as bad as it might otherwise be.
44 * The upside is that overhead is bounded... waste goes down as use goes up.
46 * Slab management is done on a per-cpu basis and no locking or mutexes
47 * are required, only a critical section. When one cpu frees memory
48 * belonging to another cpu's slab manager an asynchronous IPI message
49 * will be queued to execute the operation. In addition, both the
50 * high level slab allocator and the low level zone allocator optimize
51 * M_ZERO requests, and the slab allocator does not have to pre initialize
52 * the linked list of chunks.
54 * XXX Balancing is needed between cpus. Balance will be handled through
55 * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
57 * Alloc Size Chunking Number of zones
67 * (if PAGE_SIZE is 4K the maximum zone allocation is 16383)
69 * Allocations >= ZoneLimit go directly to kmem.
71 * API REQUIREMENTS AND SIDE EFFECTS
73 * To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
74 * have remained compatible with the following API requirements:
76 * + small power-of-2 sized allocations are power-of-2 aligned (kern_tty)
77 * + all power-of-2 sized allocations are power-of-2 aligned (twe)
78 * + malloc(0) is allowed and returns non-NULL (ahc driver)
79 * + ability to allocate arbitrarily large chunks of memory
84 #include <sys/types.h>
86 #include <sys/stdint.h>
87 #include <sys/malloc.h>
89 #include <sys/thread.h>
90 #include <sys/msgport.h>
91 #include <sys/errno.h>
92 #include "globaldata.h"
93 #include <sys/sysctl.h>
94 #include <sys/thread2.h>
95 #include <sys/msgport2.h>
97 #define arysize(ary) (sizeof(ary)/sizeof((ary)[0]))
98 #define slab_min(a,b) (((a)<(b)) ? (a) : (b))
101 * Fixed globals (not per-cpu)
104 static int ZoneLimit;
105 static int ZonePageCount;
106 static int ZonePageLimit;
108 static struct malloc_type *kmemstatistics;
109 static int32_t weirdary[16];
112 * Misc constants. Note that allocations that are exact multiples of
113 * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
114 * IN_SAME_PAGE_MASK is used to sanity-check the per-page free lists.
116 #define MIN_CHUNK_SIZE 8 /* in bytes */
117 #define MIN_CHUNK_MASK (MIN_CHUNK_SIZE - 1)
118 #define ZONE_RELS_THRESH 2 /* threshold number of zones */
119 #define IN_SAME_PAGE_MASK (~(intptr_t)PAGE_MASK | MIN_CHUNK_MASK)
121 #define SLOVERSZ_HSIZE 8192
122 #define SLOVERSZ_HMASK (SLOVERSZ_HSIZE - 1)
124 #define SLOVERSZ_HASH(ptr) ((((uintptr_t)ptr >> PAGE_SHIFT) ^ \
125 ((uintptr_t)ptr >> (PAGE_SHIFT * 2))) & \
128 SLOversized *SLOvHash[SLOVERSZ_HSIZE];
131 * The WEIRD_ADDR is used as known text to copy into free objects to
132 * try to create deterministic failure cases if the data is accessed after
135 #define WEIRD_ADDR 0xdeadc0de
136 #define MAX_COPY sizeof(weirdary)
137 #define ZERO_LENGTH_PTR ((void *)-8)
140 * Misc global malloc buckets
142 MALLOC_DEFINE(M_OVERSIZED, "overszinfo", "Oversized Info Blocks");
146 get_oversized(void *ptr)
148 SLOversized **slovpp;
151 for (slovpp = &SLOvHash[SLOVERSZ_HASH(ptr)];
152 (slov = *slovpp) != NULL;
153 slovpp = &slov->ov_Next
155 if (slov->ov_Ptr == ptr)
162 * Initialize the slab memory allocator. We have to choose a zone size based
163 * on available physical memory. We choose a zone side which is approximately
164 * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of
165 * 128K. The zone size is limited to the bounds set in slaballoc.h
166 * (typically 32K min, 128K max).
174 int pagecnt_size = sizeof(pagecnt);
176 error = sysctlbyname("vm.stats.vm.v_page_count",
177 &pagecnt, &pagecnt_size, NULL, 0);
182 limsize = pagecnt * (vm_poff_t)PAGE_SIZE;
183 usesize = (int)(limsize / 1024); /* convert to KB */
185 ZoneSize = ZALLOC_MIN_ZONE_SIZE;
186 while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize)
189 ZoneSize = ZALLOC_MIN_ZONE_SIZE;
191 ZoneLimit = ZoneSize / 4;
192 if (ZoneLimit > ZALLOC_ZONE_LIMIT)
193 ZoneLimit = ZALLOC_ZONE_LIMIT;
194 ZoneMask = ZoneSize - 1;
195 ZonePageLimit = PAGE_SIZE * 4;
196 ZonePageCount = ZoneSize / PAGE_SIZE;
198 for (i = 0; i < arysize(weirdary); ++i)
199 weirdary[i] = WEIRD_ADDR;
200 slab_malloc_init(M_OVERSIZED);
204 * Initialize a malloc type tracking structure.
207 slab_malloc_init(void *data)
209 struct malloc_type *type = data;
213 * Skip if already initialized
215 if (type->ks_limit != 0)
218 type->ks_magic = M_MAGIC;
219 limsize = (vm_poff_t)-1; /* unlimited */
220 type->ks_limit = limsize / 10;
221 type->ks_next = kmemstatistics;
222 kmemstatistics = type;
226 slab_malloc_uninit(void *data)
228 struct malloc_type *type = data;
229 struct malloc_type *t;
235 if (type->ks_magic != M_MAGIC)
236 panic("malloc type lacks magic");
238 if (type->ks_limit == 0)
239 panic("malloc_uninit on uninitialized type");
243 * memuse is only correct in aggregation. Due to memory being allocated
244 * on one cpu and freed on another individual array entries may be
245 * negative or positive (canceling each other out).
247 for (i = ttl = 0; i < ncpus; ++i)
248 ttl += type->ks_memuse[i];
250 printf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n",
251 ttl, type->ks_shortdesc, i);
254 if (type == kmemstatistics) {
255 kmemstatistics = type->ks_next;
257 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
258 if (t->ks_next == type) {
259 t->ks_next = type->ks_next;
264 type->ks_next = NULL;
269 * Calculate the zone index for the allocation request size and set the
270 * allocation request size to that particular zone's chunk size.
273 zoneindex(unsigned long *bytes)
275 unsigned int n = (unsigned int)*bytes; /* unsigned for shift opt */
277 *bytes = n = (n + 7) & ~7;
278 return(n / 8 - 1); /* 8 byte chunks, 16 zones */
281 *bytes = n = (n + 15) & ~15;
286 *bytes = n = (n + 31) & ~31;
290 *bytes = n = (n + 63) & ~63;
294 *bytes = n = (n + 127) & ~127;
295 return(n / 128 + 31);
298 *bytes = n = (n + 255) & ~255;
299 return(n / 256 + 39);
301 *bytes = n = (n + 511) & ~511;
302 return(n / 512 + 47);
304 #if ZALLOC_ZONE_LIMIT > 8192
306 *bytes = n = (n + 1023) & ~1023;
307 return(n / 1024 + 55);
310 #if ZALLOC_ZONE_LIMIT > 16384
312 *bytes = n = (n + 2047) & ~2047;
313 return(n / 2048 + 63);
316 panic("Unexpected byte count %d", n);
323 * Allocate memory via the slab allocator. If the request is too large,
324 * or if it page-aligned beyond a certain size, we fall back to the
325 * KMEM subsystem. A SLAB tracking descriptor must be specified, use
326 * &SlabMisc if you don't care.
328 * M_NOWAIT - return NULL instead of blocking.
329 * M_ZERO - zero the returned memory.
332 slab_malloc(unsigned long size, struct malloc_type *type, int flags)
337 struct globaldata *gd;
344 * XXX silly to have this in the critical path.
346 if (type->ks_limit == 0) {
348 if (type->ks_limit == 0)
349 slab_malloc_init(type);
355 * Handle the case where the limit is reached. Panic if can't return
356 * NULL. XXX the original malloc code looped, but this tended to
357 * simply deadlock the computer.
359 while (type->ks_loosememuse >= type->ks_limit) {
363 for (i = ttl = 0; i < ncpus; ++i)
364 ttl += type->ks_memuse[i];
365 type->ks_loosememuse = ttl;
366 if (ttl >= type->ks_limit) {
367 if (flags & (M_NOWAIT|M_NULLOK))
369 panic("%s: malloc limit exceeded", type->ks_shortdesc);
374 * Handle the degenerate size == 0 case. Yes, this does happen.
375 * Return a special pointer. This is to maintain compatibility with
376 * the original malloc implementation. Certain devices, such as the
377 * adaptec driver, not only allocate 0 bytes, they check for NULL and
378 * also realloc() later on. Joy.
381 return(ZERO_LENGTH_PTR);
384 * Handle hysteresis from prior frees here in malloc(). We cannot
385 * safely manipulate the kernel_map in free() due to free() possibly
386 * being called via an IPI message or from sensitive interrupt code.
388 while (slgd->NFreeZones > ZONE_RELS_THRESH && (flags & M_NOWAIT) == 0) {
390 if (slgd->NFreeZones > ZONE_RELS_THRESH) { /* crit sect race */
392 slgd->FreeZones = z->z_Next;
399 * XXX handle oversized frees that were queued from free().
401 while (slgd->FreeOvZones && (flags & M_NOWAIT) == 0) {
403 if ((z = slgd->FreeOvZones) != NULL) {
404 KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC);
405 slgd->FreeOvZones = z->z_Next;
406 munmap(z, z->z_ChunkSize);
412 * Handle large allocations directly. There should not be very many of
413 * these so performance is not a big issue.
415 * Guarentee page alignment for allocations in multiples of PAGE_SIZE
417 if (size >= ZoneLimit || (size & PAGE_MASK) == 0) {
418 SLOversized **slovpp;
421 slov = slab_malloc(sizeof(SLOversized), M_OVERSIZED, M_ZERO);
425 size = round_page(size);
426 chunk = mmap(NULL, size, PROT_READ|PROT_WRITE,
427 MAP_ANON|MAP_PRIVATE, -1, 0);
428 if (chunk == MAP_FAILED) {
429 slab_free(slov, M_OVERSIZED);
432 flags &= ~M_ZERO; /* result already zero'd if M_ZERO was set */
433 flags |= M_PASSIVE_ZERO;
435 slov->ov_Ptr = chunk;
436 slov->ov_Bytes = size;
437 slovpp = &SLOvHash[SLOVERSZ_HASH(chunk)];
438 slov->ov_Next = *slovpp;
445 * Attempt to allocate out of an existing zone. First try the free list,
446 * then allocate out of unallocated space. If we find a good zone move
447 * it to the head of the list so later allocations find it quickly
448 * (we might have thousands of zones in the list).
450 * Note: zoneindex() will panic of size is too large.
452 zi = zoneindex(&size);
453 KKASSERT(zi < NZONES);
455 if ((z = slgd->ZoneAry[zi]) != NULL) {
456 KKASSERT(z->z_NFree > 0);
459 * Remove us from the ZoneAry[] when we become empty
461 if (--z->z_NFree == 0) {
462 slgd->ZoneAry[zi] = z->z_Next;
467 * Locate a chunk in a free page. This attempts to localize
468 * reallocations into earlier pages without us having to sort
469 * the chunk list. A chunk may still overlap a page boundary.
471 while (z->z_FirstFreePg < ZonePageCount) {
472 if ((chunk = z->z_PageAry[z->z_FirstFreePg]) != NULL) {
475 * Diagnostic: c_Next is not total garbage.
477 KKASSERT(chunk->c_Next == NULL ||
478 ((intptr_t)chunk->c_Next & IN_SAME_PAGE_MASK) ==
479 ((intptr_t)chunk & IN_SAME_PAGE_MASK));
482 if ((uintptr_t)chunk < VM_MIN_KERNEL_ADDRESS)
483 panic("chunk %p FFPG %d/%d", chunk, z->z_FirstFreePg, ZonePageCount);
484 if (chunk->c_Next && (uintptr_t)chunk->c_Next < VM_MIN_KERNEL_ADDRESS)
485 panic("chunkNEXT %p %p FFPG %d/%d", chunk, chunk->c_Next, z->z_FirstFreePg, ZonePageCount);
487 z->z_PageAry[z->z_FirstFreePg] = chunk->c_Next;
494 * No chunks are available but NFree said we had some memory, so
495 * it must be available in the never-before-used-memory area
496 * governed by UIndex. The consequences are very serious if our zone
497 * got corrupted so we use an explicit panic rather then a KASSERT.
499 if (z->z_UIndex + 1 != z->z_NMax)
500 z->z_UIndex = z->z_UIndex + 1;
503 if (z->z_UIndex == z->z_UEndIndex)
504 panic("slaballoc: corrupted zone");
505 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
506 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
508 flags |= M_PASSIVE_ZERO;
514 * If all zones are exhausted we need to allocate a new zone for this
515 * index. Use M_ZERO to take advantage of pre-zerod pages. Also see
516 * UAlloc use above in regards to M_ZERO. Note that when we are reusing
517 * a zone from the FreeZones list UAlloc'd data will not be zero'd, and
518 * we do not pre-zero it because we do not want to mess up the L1 cache.
520 * At least one subsystem, the tty code (see CROUND) expects power-of-2
521 * allocations to be power-of-2 aligned. We maintain compatibility by
522 * adjusting the base offset below.
527 if ((z = slgd->FreeZones) != NULL) {
528 slgd->FreeZones = z->z_Next;
530 bzero(z, sizeof(SLZone));
531 z->z_Flags |= SLZF_UNOTZEROD;
533 z = mmap(NULL, ZoneSize, PROT_READ|PROT_WRITE,
534 MAP_ANON|MAP_PRIVATE, -1, 0);
540 * Guarentee power-of-2 alignment for power-of-2-sized chunks.
541 * Otherwise just 8-byte align the data.
543 if ((size | (size - 1)) + 1 == (size << 1))
544 off = (sizeof(SLZone) + size - 1) & ~(size - 1);
546 off = (sizeof(SLZone) + MIN_CHUNK_MASK) & ~MIN_CHUNK_MASK;
547 z->z_Magic = ZALLOC_SLAB_MAGIC;
549 z->z_NMax = (ZoneSize - off) / size;
550 z->z_NFree = z->z_NMax - 1;
551 z->z_BasePtr = (char *)z + off;
552 z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax;
553 z->z_ChunkSize = size;
554 z->z_FirstFreePg = ZonePageCount;
555 z->z_Cpu = gd->gd_cpuid;
557 chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
558 z->z_Next = slgd->ZoneAry[zi];
559 slgd->ZoneAry[zi] = z;
560 if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
561 flags &= ~M_ZERO; /* already zero'd */
562 flags |= M_PASSIVE_ZERO;
566 * Slide the base index for initial allocations out of the next
567 * zone we create so we do not over-weight the lower part of the
570 slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
571 & (ZALLOC_MAX_ZONE_SIZE - 1);
574 ++type->ks_inuse[gd->gd_cpuid];
575 type->ks_memuse[gd->gd_cpuid] += size;
576 type->ks_loosememuse += size;
581 else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0)
582 chunk->c_Next = (void *)-1; /* avoid accidental double-free check */
591 slab_realloc(void *ptr, unsigned long size, struct malloc_type *type, int flags)
594 SLOversized **slovpp;
599 if (ptr == NULL || ptr == ZERO_LENGTH_PTR)
600 return(slab_malloc(size, type, flags));
602 slab_free(ptr, type);
607 * Handle oversized allocations.
609 if ((slovpp = get_oversized(ptr)) != NULL) {
611 osize = slov->ov_Bytes;
612 if (osize == round_page(size))
614 if ((nptr = slab_malloc(size, type, flags)) == NULL)
616 bcopy(ptr, nptr, slab_min(size, osize));
617 slab_free(ptr, type);
622 * Get the original allocation's zone. If the new request winds up
623 * using the same chunk size we do not have to do anything.
625 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
626 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
629 if (z->z_ChunkSize == size)
633 * Allocate memory for the new request size. Note that zoneindex has
634 * already adjusted the request size to the appropriate chunk size, which
635 * should optimize our bcopy(). Then copy and return the new pointer.
637 if ((nptr = slab_malloc(size, type, flags)) == NULL)
639 bcopy(ptr, nptr, slab_min(size, z->z_ChunkSize));
640 slab_free(ptr, type);
646 * slab_free() (SLAB ALLOCATOR)
648 * Free the specified chunk of memory.
652 slab_free_remote(void *ptr)
654 slab_free(ptr, *(struct malloc_type **)ptr);
660 slab_free(void *ptr, struct malloc_type *type)
663 SLOversized **slovpp;
667 struct globaldata *gd;
674 * Handle special 0-byte allocations
676 if (ptr == ZERO_LENGTH_PTR)
680 * Handle oversized allocations. XXX we really should require that a
681 * size be passed to slab_free() instead of this nonsense.
683 * This code is never called via an ipi.
685 if ((slovpp = get_oversized(ptr)) != NULL) {
687 *slovpp = slov->ov_Next;
690 KKASSERT(sizeof(weirdary) <= slov->ov_Bytes);
691 bcopy(weirdary, ptr, sizeof(weirdary));
694 * note: we always adjust our cpu's slot, not the originating
695 * cpu (kup->ku_cpuid). The statistics are in aggregate.
697 * note: XXX we have still inherited the interrupts-can't-block
698 * assumption. An interrupt thread does not bump
699 * gd_intr_nesting_level so check TDF_INTTHREAD. This is
700 * primarily until we can fix softupdate's assumptions about
704 --type->ks_inuse[gd->gd_cpuid];
705 type->ks_memuse[gd->gd_cpuid] -= slov->ov_Bytes;
706 if (mycpu->gd_intr_nesting_level || (gd->gd_curthread->td_flags & TDF_INTTHREAD)) {
708 z->z_Magic = ZALLOC_OVSZ_MAGIC;
709 z->z_Next = slgd->FreeOvZones;
710 z->z_ChunkSize = slov->ov_Bytes;
711 slgd->FreeOvZones = z;
715 munmap(ptr, slov->ov_Bytes);
717 slab_free(slov, M_OVERSIZED);
722 * Zone case. Figure out the zone based on the fact that it is
725 z = (SLZone *)((uintptr_t)ptr & ~(uintptr_t)ZoneMask);
726 KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
729 * If we do not own the zone then forward the request to the
730 * cpu that does. The freeing code does not need the byte count
731 * unless DIAGNOSTIC is set.
733 if (z->z_CpuGd != gd) {
734 *(struct malloc_type **)ptr = type;
736 lwkt_send_ipiq(z->z_CpuGd, slab_free_remote, ptr);
738 panic("Corrupt SLZone");
743 if (type->ks_magic != M_MAGIC)
744 panic("slab_free: malloc type lacks magic");
747 pgno = ((char *)ptr - (char *)z) >> PAGE_SHIFT;
752 * Attempt to detect a double-free. To reduce overhead we only check
753 * if there appears to be link pointer at the base of the data.
755 if (((intptr_t)chunk->c_Next - (intptr_t)z) >> PAGE_SHIFT == pgno) {
757 for (scan = z->z_PageAry[pgno]; scan; scan = scan->c_Next) {
759 panic("Double free at %p", chunk);
765 * Put weird data into the memory to detect modifications after freeing,
766 * illegal pointer use after freeing (we should fault on the odd address),
767 * and so forth. XXX needs more work, see the old malloc code.
770 if (z->z_ChunkSize < sizeof(weirdary))
771 bcopy(weirdary, chunk, z->z_ChunkSize);
773 bcopy(weirdary, chunk, sizeof(weirdary));
777 * Add this free non-zero'd chunk to a linked list for reuse, adjust
781 if ((uintptr_t)chunk < VM_MIN_KERNEL_ADDRESS)
782 panic("BADFREE %p\n", chunk);
784 chunk->c_Next = z->z_PageAry[pgno];
785 z->z_PageAry[pgno] = chunk;
787 if (chunk->c_Next && (uintptr_t)chunk->c_Next < VM_MIN_KERNEL_ADDRESS)
790 if (z->z_FirstFreePg > pgno)
791 z->z_FirstFreePg = pgno;
794 * Bump the number of free chunks. If it becomes non-zero the zone
795 * must be added back onto the appropriate list.
797 if (z->z_NFree++ == 0) {
798 z->z_Next = slgd->ZoneAry[z->z_ZoneIndex];
799 slgd->ZoneAry[z->z_ZoneIndex] = z;
802 --type->ks_inuse[z->z_Cpu];
803 type->ks_memuse[z->z_Cpu] -= z->z_ChunkSize;
806 * If the zone becomes totally free, and there are other zones we
807 * can allocate from, move this zone to the FreeZones list. Since
808 * this code can be called from an IPI callback, do *NOT* try to mess
809 * with kernel_map here. Hysteresis will be performed at malloc() time.
811 if (z->z_NFree == z->z_NMax &&
812 (z->z_Next || slgd->ZoneAry[z->z_ZoneIndex] != z)
816 for (pz = &slgd->ZoneAry[z->z_ZoneIndex]; z != *pz; pz = &(*pz)->z_Next)
820 z->z_Next = slgd->FreeZones;