2 * Copyright (c) 1997, 1998 John S. Dyson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice immediately at the beginning of the file, without modification,
10 * this list of conditions, and the following disclaimer.
11 * 2. Absolutely no warranty of function or purpose is made by the author
14 * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $
15 * $DragonFly: src/sys/vm/vm_zone.c,v 1.17 2004/10/26 04:33:11 dillon Exp $
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/kernel.h>
22 #include <sys/malloc.h>
23 #include <sys/sysctl.h>
24 #include <sys/vmmeter.h>
27 #include <vm/vm_object.h>
28 #include <vm/vm_page.h>
29 #include <vm/vm_map.h>
30 #include <vm/vm_kern.h>
31 #include <vm/vm_extern.h>
32 #include <vm/vm_zone.h>
34 static MALLOC_DEFINE(M_ZONE, "ZONE", "Zone header");
36 #define ZONE_ERROR_INVALID 0
37 #define ZONE_ERROR_NOTFREE 1
38 #define ZONE_ERROR_ALREADYFREE 2
40 #define ZONE_ROUNDING 32
42 #define ZENTRY_FREE 0x12342378
44 static void *zget(vm_zone_t z);
47 * Return an item from the specified zone. This function is interrupt/MP
48 * thread safe, but might block.
58 zerror(ZONE_ERROR_INVALID);
60 lwkt_gettoken(&ilock, &z->zlock);
61 if (z->zfreecnt <= z->zfreemin) {
64 * PANICFAIL allows the caller to assume that the zalloc()
65 * will always succeed. If it doesn't, we panic here.
67 if (item == NULL && (z->zflags & ZONE_PANICFAIL))
68 panic("zalloc(%s) failed", z->zname);
72 KASSERT(item != NULL, ("zitems unexpectedly NULL"));
73 if (((void **) item)[1] != (void *) ZENTRY_FREE)
74 zerror(ZONE_ERROR_NOTFREE);
75 ((void **) item)[1] = 0;
77 z->zitems = ((void **) item)[0];
81 lwkt_reltoken(&ilock);
86 * Free an item to the specified zone. This function is interrupt/MP
87 * thread safe, but might block.
90 zfree(vm_zone_t z, void *item)
94 lwkt_gettoken(&ilock, &z->zlock);
95 ((void **) item)[0] = z->zitems;
97 if (((void **) item)[1] == (void *) ZENTRY_FREE)
98 zerror(ZONE_ERROR_ALREADYFREE);
99 ((void **) item)[1] = (void *) ZENTRY_FREE;
103 lwkt_reltoken(&ilock);
107 * This file comprises a very simple zone allocator. This is used
108 * in lieu of the malloc allocator, where needed or more optimal.
110 * Note that the initial implementation of this had coloring, and
111 * absolutely no improvement (actually perf degradation) occurred.
113 * Note also that the zones are type stable. The only restriction is
114 * that the first two longwords of a data structure can be changed
115 * between allocations. Any data that must be stable between allocations
116 * must reside in areas after the first two longwords.
118 * zinitna, zinit, zbootinit are the initialization routines.
119 * zalloc, zfree, are the allocation/free routines.
122 static struct vm_zone *zlist;
123 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
124 static int zone_kmem_pages, zone_kern_pages, zone_kmem_kvaspace;
127 * Create a zone, but don't allocate the zone structure. If the
128 * zone had been previously created by the zone boot code, initialize
129 * various parts of the zone code.
131 * If waits are not allowed during allocation (e.g. during interrupt
132 * code), a-priori allocate the kernel virtual space, and allocate
133 * only pages when needed.
136 * z pointer to zone structure.
137 * obj pointer to VM object (opt).
139 * size size of zone entries.
140 * nentries number of zone entries allocated (only ZONE_INTERRUPT.)
141 * flags ZONE_INTERRUPT -- items can be allocated at interrupt time.
142 * zalloc number of pages allocated when memory is needed.
144 * Note that when using ZONE_INTERRUPT, the size of the zone is limited
145 * by the nentries argument. The size of the memory allocatable is
146 * unlimited if ZONE_INTERRUPT is not set.
150 zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
151 int nentries, int flags, int zalloc)
155 if ((z->zflags & ZONE_BOOT) == 0) {
156 z->zsize = (size + ZONE_ROUNDING - 1) & ~(ZONE_ROUNDING - 1);
157 lwkt_token_init(&z->zlock);
172 * If we cannot wait, allocate KVA space up front, and we will fill
173 * in pages as needed. This is particularly required when creating
174 * an allocation space for map entries in kernel_map, because we
175 * do not want to go into a recursion deadlock with
176 * vm_map_entry_reserve().
178 if (z->zflags & ZONE_INTERRUPT) {
180 totsize = round_page(z->zsize * nentries);
181 zone_kmem_kvaspace += totsize;
183 z->zkva = kmem_alloc_pageable(kernel_map, totsize);
189 z->zpagemax = totsize / PAGE_SIZE;
191 z->zobj = vm_object_allocate(OBJT_DEFAULT, z->zpagemax);
194 _vm_object_allocate(OBJT_DEFAULT, z->zpagemax, obj);
196 z->zallocflag = VM_ALLOC_SYSTEM | VM_ALLOC_INTERRUPT;
199 z->zallocflag = VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM;
204 if (z->zsize > PAGE_SIZE)
207 z->zfreemin = PAGE_SIZE / z->zsize;
219 * Subroutine same as zinitna, except zone data structure is allocated
220 * automatically by malloc. This routine should normally be used, except
221 * in certain tricky startup conditions in the VM system -- then
222 * zbootinit and zinitna can be used. Zinit is the standard zone
223 * initialization call.
226 zinit(char *name, int size, int nentries, int flags, int zalloc)
230 z = (vm_zone_t) malloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT);
235 if (zinitna(z, NULL, name, size, nentries, flags, zalloc) == 0) {
244 * Initialize a zone before the system is fully up. This routine should
245 * only be called before full VM startup.
248 zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems)
256 z->zflags = ZONE_BOOT;
262 lwkt_token_init(&z->zlock);
264 bzero(item, nitems * z->zsize);
266 for (i = 0; i < nitems; i++) {
267 ((void **) item)[0] = z->zitems;
269 ((void **) item)[1] = (void *) ZENTRY_FREE;
272 item = (uint8_t *)item + z->zsize;
274 z->zfreecnt = nitems;
287 * void *zalloc(vm_zone_t zone) --
288 * Returns an item from a specified zone. May not be called from a
289 * FAST interrupt or IPI function.
291 * void zfree(vm_zone_t zone, void *item) --
292 * Frees an item back to a specified zone. May not be called from a
293 * FAST interrupt or IPI function.
297 * Internal zone routine. Not to be called from external (non vm_zone) code.
308 panic("zget: null zone");
310 if (z->zflags & ZONE_INTERRUPT) {
312 * Interrupt zones do not mess with the kernel_map, they
313 * simply populate an existing mapping.
315 nbytes = z->zpagecount * PAGE_SIZE;
316 nbytes -= nbytes % z->zsize;
317 item = (char *) z->zkva + nbytes;
318 for (i = 0; ((i < z->zalloc) && (z->zpagecount < z->zpagemax));
322 m = vm_page_alloc(z->zobj, z->zpagecount,
324 /* note: z might be modified due to blocking */
328 zkva = z->zkva + z->zpagecount * PAGE_SIZE;
329 pmap_kenter(zkva, VM_PAGE_TO_PHYS(m)); /* YYY */
330 bzero((caddr_t) zkva, PAGE_SIZE);
333 vmstats.v_wire_count++;
335 nitems = ((z->zpagecount * PAGE_SIZE) - nbytes) / z->zsize;
336 } else if (z->zflags & ZONE_SPECIAL) {
338 * The special zone is the one used for vm_map_entry_t's.
339 * We have to avoid an infinite recursion in
340 * vm_map_entry_reserve() by using vm_map_entry_kreserve()
341 * instead. The map entries are pre-reserved by the kernel
342 * by vm_map_entry_reserve_cpu_init().
344 nbytes = z->zalloc * PAGE_SIZE;
346 item = (void *)kmem_alloc3(kernel_map, nbytes, KM_KRESERVE);
348 /* note: z might be modified due to blocking */
350 zone_kern_pages += z->zalloc;
355 nitems = nbytes / z->zsize;
358 * Otherwise allocate KVA from the kernel_map.
360 nbytes = z->zalloc * PAGE_SIZE;
362 item = (void *)kmem_alloc3(kernel_map, nbytes, 0);
364 /* note: z might be modified due to blocking */
366 zone_kern_pages += z->zalloc;
371 nitems = nbytes / z->zsize;
376 * Save one for immediate allocation
380 for (i = 0; i < nitems; i++) {
381 ((void **) item)[0] = z->zitems;
383 ((void **) item)[1] = (void *) ZENTRY_FREE;
386 item = (uint8_t *)item + z->zsize;
388 z->zfreecnt += nitems;
390 } else if (z->zfreecnt > 0) {
392 z->zitems = ((void **) item)[0];
394 if (((void **) item)[1] != (void *) ZENTRY_FREE)
395 zerror(ZONE_ERROR_NOTFREE);
396 ((void **) item)[1] = 0;
405 * A special zone may have used a kernel-reserved vm_map_entry. If
406 * so we have to be sure to recover our reserve so we don't run out.
407 * We will panic if we run out.
409 if (z->zflags & ZONE_SPECIAL)
410 vm_map_entry_reserve(0);
416 sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
419 vm_zone_t curzone, nextzone;
423 snprintf(tmpbuf, sizeof(tmpbuf),
424 "\nITEM SIZE LIMIT USED FREE REQUESTS\n");
425 error = SYSCTL_OUT(req, tmpbuf, strlen(tmpbuf));
429 for (curzone = zlist; curzone; curzone = nextzone) {
434 nextzone = curzone->znext;
435 len = strlen(curzone->zname);
436 if (len >= (sizeof(tmpname) - 1))
437 len = (sizeof(tmpname) - 1);
438 for(i = 0; i < sizeof(tmpname) - 1; i++)
441 memcpy(tmpname, curzone->zname, len);
444 if (curzone == zlist) {
449 snprintf(tmpbuf + offset, sizeof(tmpbuf) - offset,
450 "%s %6.6u, %8.8u, %6.6u, %6.6u, %8.8u\n",
451 tmpname, curzone->zsize, curzone->zmax,
452 (curzone->ztotal - curzone->zfreecnt),
453 curzone->zfreecnt, curzone->znalloc);
455 len = strlen((char *)tmpbuf);
456 if (nextzone == NULL)
459 error = SYSCTL_OUT(req, tmpbuf, len);
467 #if defined(INVARIANTS)
474 case ZONE_ERROR_INVALID:
475 msg = "zone: invalid zone";
477 case ZONE_ERROR_NOTFREE:
478 msg = "zone: entry not free";
480 case ZONE_ERROR_ALREADYFREE:
481 msg = "zone: freeing free entry";
484 msg = "zone: invalid error";
491 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, \
492 NULL, 0, sysctl_vm_zone, "A", "Zone Info");
494 SYSCTL_INT(_vm, OID_AUTO, zone_kmem_pages,
495 CTLFLAG_RD, &zone_kmem_pages, 0, "Number of interrupt safe pages allocated by zone");
496 SYSCTL_INT(_vm, OID_AUTO, zone_kmem_kvaspace,
497 CTLFLAG_RD, &zone_kmem_kvaspace, 0, "KVA space allocated by zone");
498 SYSCTL_INT(_vm, OID_AUTO, zone_kern_pages,
499 CTLFLAG_RD, &zone_kern_pages, 0, "Number of non-interrupt safe pages allocated by zone");