2 * Copyright (c) 1997, 1998 John S. Dyson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Absolutely no warranty of function or purpose is made by the author
14 * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $
15 * $DragonFly: src/sys/vm/vm_zone.c,v 1.5 2003/07/28 04:41:37 hmp Exp $
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/kernel.h>
22 #include <sys/malloc.h>
23 #include <sys/sysctl.h>
24 #include <sys/vmmeter.h>
27 #include <vm/vm_object.h>
28 #include <vm/vm_page.h>
29 #include <vm/vm_map.h>
30 #include <vm/vm_kern.h>
31 #include <vm/vm_extern.h>
32 #include <vm/vm_zone.h>
34 static MALLOC_DEFINE(M_ZONE, "ZONE", "Zone header");
36 #define ZONE_ERROR_INVALID 0
37 #define ZONE_ERROR_NOTFREE 1
38 #define ZONE_ERROR_ALREADYFREE 2
40 #define ZONE_ROUNDING 32
42 #define ZENTRY_FREE 0x12342378
44 static void *zget(vm_zone_t z);
47 * Return an item from the specified zone. This function is interrupt/MP
48 * thread safe, but might block.
57 zerror(ZONE_ERROR_INVALID);
59 lwkt_gettoken(&z->zlock);
60 if (z->zfreecnt <= z->zfreemin) {
63 * PANICFAIL allows the caller to assume that the zalloc()
64 * will always succeed. If it doesn't, we panic here.
66 if (item == NULL && (z->zflags & ZONE_PANICFAIL))
67 panic("zalloc(%s) failed", z->zname);
70 z->zitems = ((void **) item)[0];
72 KASSERT(item != NULL, ("zitems unexpectedly NULL"));
73 if (((void **) item)[1] != (void *) ZENTRY_FREE)
74 zerror(ZONE_ERROR_NOTFREE);
75 ((void **) item)[1] = 0;
80 lwkt_reltoken(&z->zlock);
85 * Free an item to the specified zone. This function is interrupt/MP
86 * thread safe, but might block.
89 zfree(vm_zone_t z, void *item)
91 lwkt_gettoken(&z->zlock);
92 ((void **) item)[0] = z->zitems;
94 if (((void **) item)[1] == (void *) ZENTRY_FREE)
95 zerror(ZONE_ERROR_ALREADYFREE);
96 ((void **) item)[1] = (void *) ZENTRY_FREE;
100 lwkt_reltoken(&z->zlock);
104 * This file comprises a very simple zone allocator. This is used
105 * in lieu of the malloc allocator, where needed or more optimal.
107 * Note that the initial implementation of this had coloring, and
108 * absolutely no improvement (actually perf degradation) occurred.
110 * Note also that the zones are type stable. The only restriction is
111 * that the first two longwords of a data structure can be changed
112 * between allocations. Any data that must be stable between allocations
113 * must reside in areas after the first two longwords.
115 * zinitna, zinit, zbootinit are the initialization routines.
116 * zalloc, zfree, are the interrupt/lock unsafe allocation/free routines.
117 * zalloci, zfreei, are the interrupt/lock safe allocation/free routines.
120 static struct vm_zone *zlist;
121 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
122 static int zone_kmem_pages, zone_kern_pages, zone_kmem_kvaspace;
125 * Create a zone, but don't allocate the zone structure. If the
126 * zone had been previously created by the zone boot code, initialize
127 * various parts of the zone code.
129 * If waits are not allowed during allocation (e.g. during interrupt
130 * code), a-priori allocate the kernel virtual space, and allocate
131 * only pages when needed.
134 * z pointer to zone structure.
135 * obj pointer to VM object (opt).
137 * size size of zone entries.
138 * nentries number of zone entries allocated (only ZONE_INTERRUPT.)
139 * flags ZONE_INTERRUPT -- items can be allocated at interrupt time.
140 * zalloc number of pages allocated when memory is needed.
142 * Note that when using ZONE_INTERRUPT, the size of the zone is limited
143 * by the nentries argument. The size of the memory allocatable is
144 * unlimited if ZONE_INTERRUPT is not set.
148 zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
149 int nentries, int flags, int zalloc)
153 if ((z->zflags & ZONE_BOOT) == 0) {
154 z->zsize = (size + ZONE_ROUNDING - 1) & ~(ZONE_ROUNDING - 1);
155 lwkt_inittoken(&z->zlock);
170 * If we cannot wait, allocate KVA space up front, and we will fill
171 * in pages as needed.
173 if (z->zflags & ZONE_INTERRUPT) {
175 totsize = round_page(z->zsize * nentries);
176 zone_kmem_kvaspace += totsize;
178 z->zkva = kmem_alloc_pageable(kernel_map, totsize);
184 z->zpagemax = totsize / PAGE_SIZE;
186 z->zobj = vm_object_allocate(OBJT_DEFAULT, z->zpagemax);
189 _vm_object_allocate(OBJT_DEFAULT, z->zpagemax, obj);
191 z->zallocflag = VM_ALLOC_INTERRUPT;
194 z->zallocflag = VM_ALLOC_SYSTEM;
199 if (z->zsize > PAGE_SIZE)
202 z->zfreemin = PAGE_SIZE / z->zsize;
214 * Subroutine same as zinitna, except zone data structure is allocated
215 * automatically by malloc. This routine should normally be used, except
216 * in certain tricky startup conditions in the VM system -- then
217 * zbootinit and zinitna can be used. Zinit is the standard zone
218 * initialization call.
221 zinit(char *name, int size, int nentries, int flags, int zalloc)
225 z = (vm_zone_t) malloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT);
230 if (zinitna(z, NULL, name, size, nentries, flags, zalloc) == 0) {
239 * Initialize a zone before the system is fully up. This routine should
240 * only be called before full VM startup.
243 zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems)
251 z->zflags = ZONE_BOOT;
257 lwkt_inittoken(&z->zlock);
259 bzero(item, nitems * z->zsize);
261 for (i = 0; i < nitems; i++) {
262 ((void **) item)[0] = z->zitems;
264 ((void **) item)[1] = (void *) ZENTRY_FREE;
267 (char *) item += z->zsize;
269 z->zfreecnt = nitems;
282 * void *zalloc(vm_zone_t zone) --
283 * Returns an item from a specified zone.
285 * void zfree(vm_zone_t zone, void *item) --
286 * Frees an item back to a specified zone.
288 * void *zalloci(vm_zone_t zone) --
289 * Returns an item from a specified zone, interrupt safe.
291 * void zfreei(vm_zone_t zone, void *item) --
292 * Frees an item back to a specified zone, interrupt safe.
297 * Internal zone routine. Not to be called from external (non vm_zone) code.
308 panic("zget: null zone");
310 if (z->zflags & ZONE_INTERRUPT) {
311 nbytes = z->zpagecount * PAGE_SIZE;
312 nbytes -= nbytes % z->zsize;
313 item = (char *) z->zkva + nbytes;
314 for (i = 0; ((i < z->zalloc) && (z->zpagecount < z->zpagemax));
318 m = vm_page_alloc(z->zobj, z->zpagecount,
322 lwkt_regettoken(&z->zlock);
324 zkva = z->zkva + z->zpagecount * PAGE_SIZE;
325 pmap_kenter(zkva, VM_PAGE_TO_PHYS(m)); /* YYY */
326 bzero((caddr_t) zkva, PAGE_SIZE);
329 vmstats.v_wire_count++;
331 nitems = ((z->zpagecount * PAGE_SIZE) - nbytes) / z->zsize;
333 nbytes = z->zalloc * PAGE_SIZE;
336 * Check to see if the kernel map is already locked.
337 * We could allow for recursive locks, but that eliminates
338 * a valuable debugging mechanism, and opens up the kernel
339 * map for potential corruption by inconsistent data structure
340 * manipulation. We could also use the interrupt allocation
341 * mechanism, but that has size limitations. Luckily, we
342 * have kmem_map that is a submap of kernel map available
343 * for memory allocation, and manipulation of that map doesn't
344 * affect the kernel map structures themselves.
346 * We can wait, so just do normal map allocation in the
349 if (lockstatus(&kernel_map->lock, NULL)) {
352 item = (void *) kmem_malloc(kmem_map, nbytes, M_WAITOK);
353 lwkt_regettoken(&z->zlock);
355 zone_kmem_pages += z->zalloc;
358 item = (void *) kmem_alloc(kernel_map, nbytes);
359 lwkt_regettoken(&z->zlock);
361 zone_kern_pages += z->zalloc;
368 nitems = nbytes / z->zsize;
373 * Save one for immediate allocation
377 for (i = 0; i < nitems; i++) {
378 ((void **) item)[0] = z->zitems;
380 ((void **) item)[1] = (void *) ZENTRY_FREE;
383 (char *) item += z->zsize;
385 z->zfreecnt += nitems;
387 } else if (z->zfreecnt > 0) {
389 z->zitems = ((void **) item)[0];
391 if (((void **) item)[1] != (void *) ZENTRY_FREE)
392 zerror(ZONE_ERROR_NOTFREE);
393 ((void **) item)[1] = 0;
405 sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
408 vm_zone_t curzone, nextzone;
412 snprintf(tmpbuf, sizeof(tmpbuf),
413 "\nITEM SIZE LIMIT USED FREE REQUESTS\n");
414 error = SYSCTL_OUT(req, tmpbuf, strlen(tmpbuf));
418 for (curzone = zlist; curzone; curzone = nextzone) {
423 nextzone = curzone->znext;
424 len = strlen(curzone->zname);
425 if (len >= (sizeof(tmpname) - 1))
426 len = (sizeof(tmpname) - 1);
427 for(i = 0; i < sizeof(tmpname) - 1; i++)
430 memcpy(tmpname, curzone->zname, len);
433 if (curzone == zlist) {
438 snprintf(tmpbuf + offset, sizeof(tmpbuf) - offset,
439 "%s %6.6u, %8.8u, %6.6u, %6.6u, %8.8u\n",
440 tmpname, curzone->zsize, curzone->zmax,
441 (curzone->ztotal - curzone->zfreecnt),
442 curzone->zfreecnt, curzone->znalloc);
444 len = strlen((char *)tmpbuf);
445 if (nextzone == NULL)
448 error = SYSCTL_OUT(req, tmpbuf, len);
456 #if defined(INVARIANTS) && defined(INVARIANT_SUPPORT)
463 case ZONE_ERROR_INVALID:
464 msg = "zone: invalid zone";
466 case ZONE_ERROR_NOTFREE:
467 msg = "zone: entry not free";
469 case ZONE_ERROR_ALREADYFREE:
470 msg = "zone: freeing free entry";
473 msg = "zone: invalid error";
480 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, \
481 NULL, 0, sysctl_vm_zone, "A", "Zone Info");
483 SYSCTL_INT(_vm, OID_AUTO, zone_kmem_pages,
484 CTLFLAG_RD, &zone_kmem_pages, 0, "Number of interrupt safe pages allocated by zone");
485 SYSCTL_INT(_vm, OID_AUTO, zone_kmem_kvaspace,
486 CTLFLAG_RD, &zone_kmem_kvaspace, 0, "KVA space allocated by zone");
487 SYSCTL_INT(_vm, OID_AUTO, zone_kern_pages,
488 CTLFLAG_RD, &zone_kern_pages, 0, "Number of non-interrupt safe pages allocated by zone");