4 * Copyright (c) 1997, 1998 John S. Dyson
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. Absolutely no warranty of function or purpose is made by the author
16 * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $
17 * $DragonFly: src/sys/vm/vm_zone.c,v 1.28 2008/01/23 17:35:48 nth Exp $
20 #include <sys/param.h>
21 #include <sys/queue.h>
22 #include <sys/systm.h>
23 #include <sys/kernel.h>
25 #include <sys/malloc.h>
26 #include <sys/sysctl.h>
27 #include <sys/vmmeter.h>
30 #include <vm/vm_object.h>
31 #include <vm/vm_page.h>
32 #include <vm/vm_map.h>
33 #include <vm/vm_kern.h>
34 #include <vm/vm_extern.h>
35 #include <vm/vm_zone.h>
37 #include <sys/spinlock2.h>
39 static MALLOC_DEFINE(M_ZONE, "ZONE", "Zone header");
41 #define ZONE_ERROR_INVALID 0
42 #define ZONE_ERROR_NOTFREE 1
43 #define ZONE_ERROR_ALREADYFREE 2
45 #define ZONE_ROUNDING 32
47 #define ZENTRY_FREE 0x12342378
49 static void *zget(vm_zone_t z);
52 * Return an item from the specified zone. This function is non-blocking for
53 * ZONE_INTERRUPT zones.
64 zerror(ZONE_ERROR_INVALID);
67 if (z->zfreecnt > z->zfreemin) {
70 KASSERT(item != NULL, ("zitems unexpectedly NULL"));
71 if (((void **) item)[1] != (void *) ZENTRY_FREE)
72 zerror(ZONE_ERROR_NOTFREE);
73 ((void **) item)[1] = 0;
75 z->zitems = ((void **) item)[0];
78 spin_unlock(&z->zlock);
80 spin_unlock(&z->zlock);
83 * PANICFAIL allows the caller to assume that the zalloc()
84 * will always succeed. If it doesn't, we panic here.
86 if (item == NULL && (z->zflags & ZONE_PANICFAIL))
87 panic("zalloc(%s) failed", z->zname);
93 * Free an item to the specified zone.
98 zfree(vm_zone_t z, void *item)
100 spin_lock(&z->zlock);
101 ((void **) item)[0] = z->zitems;
103 if (((void **) item)[1] == (void *) ZENTRY_FREE)
104 zerror(ZONE_ERROR_ALREADYFREE);
105 ((void **) item)[1] = (void *) ZENTRY_FREE;
109 spin_unlock(&z->zlock);
113 * This file comprises a very simple zone allocator. This is used
114 * in lieu of the malloc allocator, where needed or more optimal.
116 * Note that the initial implementation of this had coloring, and
117 * absolutely no improvement (actually perf degradation) occurred.
119 * Note also that the zones are type stable. The only restriction is
120 * that the first two longwords of a data structure can be changed
121 * between allocations. Any data that must be stable between allocations
122 * must reside in areas after the first two longwords.
124 * zinitna, zinit, zbootinit are the initialization routines.
125 * zalloc, zfree, are the allocation/free routines.
128 LIST_HEAD(zlist, vm_zone) zlist = LIST_HEAD_INITIALIZER(zlist);
129 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
130 static int zone_kmem_pages, zone_kern_pages, zone_kmem_kvaspace;
133 * Create a zone, but don't allocate the zone structure. If the
134 * zone had been previously created by the zone boot code, initialize
135 * various parts of the zone code.
137 * If waits are not allowed during allocation (e.g. during interrupt
138 * code), a-priori allocate the kernel virtual space, and allocate
139 * only pages when needed.
142 * z pointer to zone structure.
143 * obj pointer to VM object (opt).
145 * size size of zone entries.
146 * nentries number of zone entries allocated (only ZONE_INTERRUPT.)
147 * flags ZONE_INTERRUPT -- items can be allocated at interrupt time.
148 * zalloc number of pages allocated when memory is needed.
150 * Note that when using ZONE_INTERRUPT, the size of the zone is limited
151 * by the nentries argument. The size of the memory allocatable is
152 * unlimited if ZONE_INTERRUPT is not set.
157 zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
158 int nentries, int flags, int zalloc)
163 * Only zones created with zinit() are destroyable.
165 if (z->zflags & ZONE_DESTROYABLE)
166 panic("zinitna: can't create destroyable zone");
169 * NOTE: We can only adjust zsize if we previously did not
172 if ((z->zflags & ZONE_BOOT) == 0) {
173 z->zsize = (size + ZONE_ROUNDING - 1) & ~(ZONE_ROUNDING - 1);
174 spin_init(&z->zlock);
182 lwkt_gettoken(&vm_token);
183 LIST_INSERT_HEAD(&zlist, z, zlink);
184 lwkt_reltoken(&vm_token);
188 z->zkmcur = z->zkmmax = 0;
192 * If we cannot wait, allocate KVA space up front, and we will fill
193 * in pages as needed. This is particularly required when creating
194 * an allocation space for map entries in kernel_map, because we
195 * do not want to go into a recursion deadlock with
196 * vm_map_entry_reserve().
198 if (z->zflags & ZONE_INTERRUPT) {
199 totsize = round_page(z->zsize * nentries);
200 zone_kmem_kvaspace += totsize;
202 z->zkva = kmem_alloc_pageable(&kernel_map, totsize);
204 LIST_REMOVE(z, zlink);
208 z->zpagemax = totsize / PAGE_SIZE;
210 z->zobj = vm_object_allocate(OBJT_DEFAULT, z->zpagemax);
213 _vm_object_allocate(OBJT_DEFAULT, z->zpagemax, obj);
215 z->zallocflag = VM_ALLOC_SYSTEM | VM_ALLOC_INTERRUPT;
218 z->zallocflag = VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM;
223 if (z->zsize > PAGE_SIZE)
226 z->zfreemin = PAGE_SIZE / z->zsize;
235 * Populate the interrrupt zone at creation time rather than
236 * on first allocation, as this is a potentially long operation.
238 if (z->zflags & ZONE_INTERRUPT) {
249 * Subroutine same as zinitna, except zone data structure is allocated
250 * automatically by malloc. This routine should normally be used, except
251 * in certain tricky startup conditions in the VM system -- then
252 * zbootinit and zinitna can be used. Zinit is the standard zone
253 * initialization call.
258 zinit(char *name, int size, int nentries, int flags, int zalloc)
262 z = (vm_zone_t) kmalloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT);
267 if (zinitna(z, NULL, name, size, nentries,
268 flags & ~ZONE_DESTROYABLE, zalloc) == 0) {
273 if (flags & ZONE_DESTROYABLE)
274 z->zflags |= ZONE_DESTROYABLE;
280 * Initialize a zone before the system is fully up. This routine should
281 * only be called before full VM startup.
283 * Called from the low level boot code only.
286 zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems)
294 z->zflags = ZONE_BOOT;
300 spin_init(&z->zlock);
302 bzero(item, nitems * z->zsize);
304 for (i = 0; i < nitems; i++) {
305 ((void **) item)[0] = z->zitems;
307 ((void **) item)[1] = (void *) ZENTRY_FREE;
310 item = (uint8_t *)item + z->zsize;
312 z->zfreecnt = nitems;
316 lwkt_gettoken(&vm_token);
317 LIST_INSERT_HEAD(&zlist, z, zlink);
318 lwkt_reltoken(&vm_token);
322 * Release all resources owned by zone created with zinit().
327 zdestroy(vm_zone_t z)
332 panic("zdestroy: null zone");
333 if ((z->zflags & ZONE_DESTROYABLE) == 0)
334 panic("zdestroy: undestroyable zone");
336 lwkt_gettoken(&vm_token);
337 LIST_REMOVE(z, zlink);
338 lwkt_reltoken(&vm_token);
341 * Release virtual mappings, physical memory and update sysctl stats.
343 if (z->zflags & ZONE_INTERRUPT) {
345 * Pages mapped via pmap_kenter() must be removed from the
346 * kernel_pmap() before calling kmem_free() to avoid issues
347 * with kernel_pmap.pm_stats.resident_count.
349 pmap_qremove(z->zkva, z->zpagemax);
354 kmem_free(&kernel_map, z->zkva, z->zpagemax*PAGE_SIZE);
355 atomic_subtract_int(&zone_kmem_kvaspace, z->zpagemax*PAGE_SIZE);
358 * Free the backing object and physical pages.
360 vm_object_deallocate(z->zobj);
361 atomic_subtract_int(&zone_kmem_pages, z->zpagecount);
363 for (i=0; i < z->zkmcur; i++) {
364 kmem_free(&kernel_map, z->zkmvec[i],
365 z->zalloc*PAGE_SIZE);
366 atomic_subtract_int(&zone_kern_pages, z->zalloc);
368 if (z->zkmvec != NULL)
369 kfree(z->zkmvec, M_ZONE);
372 spin_uninit(&z->zlock);
378 * void *zalloc(vm_zone_t zone) --
379 * Returns an item from a specified zone. May not be called from a
380 * FAST interrupt or IPI function.
382 * void zfree(vm_zone_t zone, void *item) --
383 * Frees an item back to a specified zone. May not be called from a
384 * FAST interrupt or IPI function.
388 * Internal zone routine. Not to be called from external (non vm_zone) code.
402 panic("zget: null zone");
404 if (z->zflags & ZONE_INTERRUPT) {
406 * Interrupt zones do not mess with the kernel_map, they
407 * simply populate an existing mapping.
409 vm_object_hold(z->zobj);
410 savezpc = z->zpagecount;
411 nbytes = z->zpagecount * PAGE_SIZE;
412 nbytes -= nbytes % z->zsize;
413 item = (char *) z->zkva + nbytes;
414 for (i = 0; ((i < z->zalloc) && (z->zpagecount < z->zpagemax));
418 m = vm_page_alloc(z->zobj, z->zpagecount,
420 /* note: z might be modified due to blocking */
425 * Unbusy page so it can freed in zdestroy(). Make
426 * sure it is not on any queue and so can not be
427 * recycled under our feet.
429 KKASSERT(m->queue == PQ_NONE);
430 vm_page_flag_clear(m, PG_BUSY);
432 zkva = z->zkva + z->zpagecount * PAGE_SIZE;
433 pmap_kenter(zkva, VM_PAGE_TO_PHYS(m)); /* YYY */
434 bzero((void *)zkva, PAGE_SIZE);
435 KKASSERT(savezpc == z->zpagecount);
439 vmstats.v_wire_count++;
441 nitems = ((z->zpagecount * PAGE_SIZE) - nbytes) / z->zsize;
442 vm_object_drop(z->zobj);
443 } else if (z->zflags & ZONE_SPECIAL) {
445 * The special zone is the one used for vm_map_entry_t's.
446 * We have to avoid an infinite recursion in
447 * vm_map_entry_reserve() by using vm_map_entry_kreserve()
448 * instead. The map entries are pre-reserved by the kernel
449 * by vm_map_entry_reserve_cpu_init().
451 nbytes = z->zalloc * PAGE_SIZE;
453 item = (void *)kmem_alloc3(&kernel_map, nbytes, KM_KRESERVE);
455 /* note: z might be modified due to blocking */
457 zone_kern_pages += z->zalloc; /* not MP-safe XXX */
462 nitems = nbytes / z->zsize;
465 * Otherwise allocate KVA from the kernel_map.
467 nbytes = z->zalloc * PAGE_SIZE;
469 item = (void *)kmem_alloc3(&kernel_map, nbytes, 0);
471 /* note: z might be modified due to blocking */
473 zone_kern_pages += z->zalloc; /* not MP-safe XXX */
476 if (z->zflags & ZONE_DESTROYABLE) {
477 if (z->zkmcur == z->zkmmax) {
479 z->zkmmax==0 ? 1 : z->zkmmax*2;
480 z->zkmvec = krealloc(z->zkmvec,
481 z->zkmmax * sizeof(z->zkmvec[0]),
484 z->zkmvec[z->zkmcur++] = (vm_offset_t)item;
489 nitems = nbytes / z->zsize;
492 spin_lock(&z->zlock);
495 * Save one for immediate allocation
499 for (i = 0; i < nitems; i++) {
500 ((void **) item)[0] = z->zitems;
502 ((void **) item)[1] = (void *) ZENTRY_FREE;
505 item = (uint8_t *)item + z->zsize;
507 z->zfreecnt += nitems;
509 } else if (z->zfreecnt > 0) {
511 z->zitems = ((void **) item)[0];
513 if (((void **) item)[1] != (void *) ZENTRY_FREE)
514 zerror(ZONE_ERROR_NOTFREE);
515 ((void **) item)[1] = 0;
522 spin_unlock(&z->zlock);
525 * A special zone may have used a kernel-reserved vm_map_entry. If
526 * so we have to be sure to recover our reserve so we don't run out.
527 * We will panic if we run out.
529 if (z->zflags & ZONE_SPECIAL)
530 vm_map_entry_reserve(0);
539 sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
546 ksnprintf(tmpbuf, sizeof(tmpbuf),
547 "\nITEM SIZE LIMIT USED FREE REQUESTS\n");
548 error = SYSCTL_OUT(req, tmpbuf, strlen(tmpbuf));
552 lwkt_gettoken(&vm_token);
553 LIST_FOREACH(curzone, &zlist, zlink) {
558 len = strlen(curzone->zname);
559 if (len >= (sizeof(tmpname) - 1))
560 len = (sizeof(tmpname) - 1);
561 for(i = 0; i < sizeof(tmpname) - 1; i++)
564 memcpy(tmpname, curzone->zname, len);
567 if (curzone == LIST_FIRST(&zlist)) {
572 ksnprintf(tmpbuf + offset, sizeof(tmpbuf) - offset,
573 "%s %6.6u, %8.8u, %6.6u, %6.6u, %8.8u\n",
574 tmpname, curzone->zsize, curzone->zmax,
575 (curzone->ztotal - curzone->zfreecnt),
576 curzone->zfreecnt, curzone->znalloc);
578 len = strlen((char *)tmpbuf);
579 if (LIST_NEXT(curzone, zlink) == NULL)
582 error = SYSCTL_OUT(req, tmpbuf, len);
587 lwkt_reltoken(&vm_token);
591 #if defined(INVARIANTS)
602 case ZONE_ERROR_INVALID:
603 msg = "zone: invalid zone";
605 case ZONE_ERROR_NOTFREE:
606 msg = "zone: entry not free";
608 case ZONE_ERROR_ALREADYFREE:
609 msg = "zone: freeing free entry";
612 msg = "zone: invalid error";
619 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, \
620 NULL, 0, sysctl_vm_zone, "A", "Zone Info");
622 SYSCTL_INT(_vm, OID_AUTO, zone_kmem_pages,
623 CTLFLAG_RD, &zone_kmem_pages, 0, "Number of interrupt safe pages allocated by zone");
624 SYSCTL_INT(_vm, OID_AUTO, zone_kmem_kvaspace,
625 CTLFLAG_RD, &zone_kmem_kvaspace, 0, "KVA space allocated by zone");
626 SYSCTL_INT(_vm, OID_AUTO, zone_kern_pages,
627 CTLFLAG_RD, &zone_kern_pages, 0, "Number of non-interrupt safe pages allocated by zone");