2 * Copyright (c) 1997, 1998 John S. Dyson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Absolutely no warranty of function or purpose is made by the author
14 * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $
15 * $DragonFly: src/sys/vm/vm_zone.c,v 1.3 2003/07/03 17:24:04 dillon Exp $
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/kernel.h>
22 #include <sys/malloc.h>
23 #include <sys/sysctl.h>
24 #include <sys/vmmeter.h>
27 #include <vm/vm_object.h>
28 #include <vm/vm_page.h>
29 #include <vm/vm_map.h>
30 #include <vm/vm_kern.h>
31 #include <vm/vm_extern.h>
32 #include <vm/vm_zone.h>
34 static MALLOC_DEFINE(M_ZONE, "ZONE", "Zone header");
36 #define ZONE_ERROR_INVALID 0
37 #define ZONE_ERROR_NOTFREE 1
38 #define ZONE_ERROR_ALREADYFREE 2
40 #define ZONE_ROUNDING 32
42 #define ZENTRY_FREE 0x12342378
44 * void *zalloc(vm_zone_t zone) --
45 * Returns an item from a specified zone.
47 * void zfree(vm_zone_t zone, void *item) --
48 * Frees an item back to a specified zone.
50 static __inline__ void *
57 zerror(ZONE_ERROR_INVALID);
60 if (z->zfreecnt <= z->zfreemin) {
63 * PANICFAIL allows the caller to assume that the zalloc()
64 * will always succeed. If it doesn't, we panic here.
66 if (item == NULL && (z->zflags & ZONE_PANICFAIL))
67 panic("zalloc(%s) failed", z->zname);
72 z->zitems = ((void **) item)[0];
74 KASSERT(item != NULL, ("zitems unexpectedly NULL"));
75 if (((void **) item)[1] != (void *) ZENTRY_FREE)
76 zerror(ZONE_ERROR_NOTFREE);
77 ((void **) item)[1] = 0;
85 static __inline__ void
86 _zfree(vm_zone_t z, void *item)
88 ((void **) item)[0] = z->zitems;
90 if (((void **) item)[1] == (void *) ZENTRY_FREE)
91 zerror(ZONE_ERROR_ALREADYFREE);
92 ((void **) item)[1] = (void *) ZENTRY_FREE;
99 * This file comprises a very simple zone allocator. This is used
100 * in lieu of the malloc allocator, where needed or more optimal.
102 * Note that the initial implementation of this had coloring, and
103 * absolutely no improvement (actually perf degradation) occurred.
105 * Note also that the zones are type stable. The only restriction is
106 * that the first two longwords of a data structure can be changed
107 * between allocations. Any data that must be stable between allocations
108 * must reside in areas after the first two longwords.
110 * zinitna, zinit, zbootinit are the initialization routines.
111 * zalloc, zfree, are the interrupt/lock unsafe allocation/free routines.
112 * zalloci, zfreei, are the interrupt/lock safe allocation/free routines.
115 static struct vm_zone *zlist;
116 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
117 static int zone_kmem_pages, zone_kern_pages, zone_kmem_kvaspace;
120 * Create a zone, but don't allocate the zone structure. If the
121 * zone had been previously created by the zone boot code, initialize
122 * various parts of the zone code.
124 * If waits are not allowed during allocation (e.g. during interrupt
125 * code), a-priori allocate the kernel virtual space, and allocate
126 * only pages when needed.
129 * z pointer to zone structure.
130 * obj pointer to VM object (opt).
132 * size size of zone entries.
133 * nentries number of zone entries allocated (only ZONE_INTERRUPT.)
134 * flags ZONE_INTERRUPT -- items can be allocated at interrupt time.
135 * zalloc number of pages allocated when memory is needed.
137 * Note that when using ZONE_INTERRUPT, the size of the zone is limited
138 * by the nentries argument. The size of the memory allocatable is
139 * unlimited if ZONE_INTERRUPT is not set.
143 zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
144 int nentries, int flags, int zalloc)
148 if ((z->zflags & ZONE_BOOT) == 0) {
149 z->zsize = (size + ZONE_ROUNDING - 1) & ~(ZONE_ROUNDING - 1);
150 simple_lock_init(&z->zlock);
165 * If we cannot wait, allocate KVA space up front, and we will fill
166 * in pages as needed.
168 if (z->zflags & ZONE_INTERRUPT) {
170 totsize = round_page(z->zsize * nentries);
171 zone_kmem_kvaspace += totsize;
173 z->zkva = kmem_alloc_pageable(kernel_map, totsize);
179 z->zpagemax = totsize / PAGE_SIZE;
181 z->zobj = vm_object_allocate(OBJT_DEFAULT, z->zpagemax);
184 _vm_object_allocate(OBJT_DEFAULT, z->zpagemax, obj);
186 z->zallocflag = VM_ALLOC_INTERRUPT;
189 z->zallocflag = VM_ALLOC_SYSTEM;
194 if (z->zsize > PAGE_SIZE)
197 z->zfreemin = PAGE_SIZE / z->zsize;
209 * Subroutine same as zinitna, except zone data structure is allocated
210 * automatically by malloc. This routine should normally be used, except
211 * in certain tricky startup conditions in the VM system -- then
212 * zbootinit and zinitna can be used. Zinit is the standard zone
213 * initialization call.
216 zinit(char *name, int size, int nentries, int flags, int zalloc)
220 z = (vm_zone_t) malloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT);
225 if (zinitna(z, NULL, name, size, nentries, flags, zalloc) == 0) {
234 * Initialize a zone before the system is fully up. This routine should
235 * only be called before full VM startup.
238 zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems)
246 z->zflags = ZONE_BOOT;
252 simple_lock_init(&z->zlock);
254 bzero(item, nitems * z->zsize);
256 for (i = 0; i < nitems; i++) {
257 ((void **) item)[0] = z->zitems;
259 ((void **) item)[1] = (void *) ZENTRY_FREE;
262 (char *) item += z->zsize;
264 z->zfreecnt = nitems;
277 * Zone critical region locks.
285 simple_lock(&z->zlock);
290 zunlock(vm_zone_t z, int s)
292 simple_unlock(&z->zlock);
297 * void *zalloc(vm_zone_t zone) --
298 * Returns an item from a specified zone.
300 * void zfree(vm_zone_t zone, void *item) --
301 * Frees an item back to a specified zone.
303 * void *zalloci(vm_zone_t zone) --
304 * Returns an item from a specified zone, interrupt safe.
306 * void zfreei(vm_zone_t zone, void *item) --
307 * Frees an item back to a specified zone, interrupt safe.
322 zfree(vm_zone_t z, void *item)
332 * Zone allocator/deallocator. These are interrupt / (or potentially SMP)
333 * safe. The raw zalloc/zfree routines are not interrupt safe, but are fast.
348 zfreei(vm_zone_t z, void *item)
359 * Internal zone routine. Not to be called from external (non vm_zone) code.
370 panic("zget: null zone");
372 if (z->zflags & ZONE_INTERRUPT) {
373 nbytes = z->zpagecount * PAGE_SIZE;
374 nbytes -= nbytes % z->zsize;
375 item = (char *) z->zkva + nbytes;
376 for (i = 0; ((i < z->zalloc) && (z->zpagecount < z->zpagemax));
380 m = vm_page_alloc(z->zobj, z->zpagecount,
385 zkva = z->zkva + z->zpagecount * PAGE_SIZE;
386 pmap_kenter(zkva, VM_PAGE_TO_PHYS(m));
387 bzero((caddr_t) zkva, PAGE_SIZE);
390 vmstats.v_wire_count++;
392 nitems = ((z->zpagecount * PAGE_SIZE) - nbytes) / z->zsize;
394 nbytes = z->zalloc * PAGE_SIZE;
397 * Check to see if the kernel map is already locked. We could allow
398 * for recursive locks, but that eliminates a valuable debugging
399 * mechanism, and opens up the kernel map for potential corruption
400 * by inconsistent data structure manipulation. We could also use
401 * the interrupt allocation mechanism, but that has size limitations.
402 * Luckily, we have kmem_map that is a submap of kernel map available
403 * for memory allocation, and manipulation of that map doesn't affect
404 * the kernel map structures themselves.
406 * We can wait, so just do normal map allocation in the appropriate
409 if (lockstatus(&kernel_map->lock, NULL)) {
413 simple_unlock(&z->zlock);
415 item = (void *) kmem_malloc(kmem_map, nbytes, M_WAITOK);
417 simple_lock(&z->zlock);
420 zone_kmem_pages += z->zalloc;
424 simple_unlock(&z->zlock);
426 item = (void *) kmem_alloc(kernel_map, nbytes);
428 simple_lock(&z->zlock);
431 zone_kern_pages += z->zalloc;
438 nitems = nbytes / z->zsize;
443 * Save one for immediate allocation
447 for (i = 0; i < nitems; i++) {
448 ((void **) item)[0] = z->zitems;
450 ((void **) item)[1] = (void *) ZENTRY_FREE;
453 (char *) item += z->zsize;
455 z->zfreecnt += nitems;
457 } else if (z->zfreecnt > 0) {
459 z->zitems = ((void **) item)[0];
461 if (((void **) item)[1] != (void *) ZENTRY_FREE)
462 zerror(ZONE_ERROR_NOTFREE);
463 ((void **) item)[1] = 0;
475 sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
478 vm_zone_t curzone, nextzone;
482 snprintf(tmpbuf, sizeof(tmpbuf),
483 "\nITEM SIZE LIMIT USED FREE REQUESTS\n");
484 error = SYSCTL_OUT(req, tmpbuf, strlen(tmpbuf));
488 for (curzone = zlist; curzone; curzone = nextzone) {
493 nextzone = curzone->znext;
494 len = strlen(curzone->zname);
495 if (len >= (sizeof(tmpname) - 1))
496 len = (sizeof(tmpname) - 1);
497 for(i = 0; i < sizeof(tmpname) - 1; i++)
500 memcpy(tmpname, curzone->zname, len);
503 if (curzone == zlist) {
508 snprintf(tmpbuf + offset, sizeof(tmpbuf) - offset,
509 "%s %6.6u, %8.8u, %6.6u, %6.6u, %8.8u\n",
510 tmpname, curzone->zsize, curzone->zmax,
511 (curzone->ztotal - curzone->zfreecnt),
512 curzone->zfreecnt, curzone->znalloc);
514 len = strlen((char *)tmpbuf);
515 if (nextzone == NULL)
518 error = SYSCTL_OUT(req, tmpbuf, len);
526 #ifdef INVARIANT_SUPPORT
533 case ZONE_ERROR_INVALID:
534 msg = "zone: invalid zone";
536 case ZONE_ERROR_NOTFREE:
537 msg = "zone: entry not free";
539 case ZONE_ERROR_ALREADYFREE:
540 msg = "zone: freeing free entry";
543 msg = "zone: invalid error";
550 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, \
551 NULL, 0, sysctl_vm_zone, "A", "Zone Info");
553 SYSCTL_INT(_vm, OID_AUTO, zone_kmem_pages,
554 CTLFLAG_RD, &zone_kmem_pages, 0, "Number of interrupt safe pages allocated by zone");
555 SYSCTL_INT(_vm, OID_AUTO, zone_kmem_kvaspace,
556 CTLFLAG_RD, &zone_kmem_kvaspace, 0, "KVA space allocated by zone");
557 SYSCTL_INT(_vm, OID_AUTO, zone_kern_pages,
558 CTLFLAG_RD, &zone_kern_pages, 0, "Number of non-interrupt safe pages allocated by zone");