4 * Copyright (c) 1997, 1998 John S. Dyson
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. Absolutely no warranty of function or purpose is made by the author
16 * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $
19 #include <sys/param.h>
20 #include <sys/queue.h>
21 #include <sys/systm.h>
22 #include <sys/kernel.h>
24 #include <sys/malloc.h>
25 #include <sys/sysctl.h>
26 #include <sys/vmmeter.h>
29 #include <vm/vm_object.h>
30 #include <vm/vm_page.h>
31 #include <vm/vm_map.h>
32 #include <vm/vm_kern.h>
33 #include <vm/vm_extern.h>
34 #include <vm/vm_zone.h>
36 #include <sys/spinlock2.h>
37 #include <vm/vm_page2.h>
39 static MALLOC_DEFINE(M_ZONE, "ZONE", "Zone header");
41 #define ZONE_ERROR_INVALID 0
42 #define ZONE_ERROR_NOTFREE 1
43 #define ZONE_ERROR_ALREADYFREE 2
45 #define ZONE_ROUNDING 32
47 #define ZENTRY_FREE 0x12342378
51 static void *zget(vm_zone_t z);
54 * Return an item from the specified zone. This function is non-blocking for
55 * ZONE_INTERRUPT zones.
62 globaldata_t gd = mycpu;
68 zerror(ZONE_ERROR_INVALID);
72 * Avoid spinlock contention by allocating from a per-cpu queue
74 if (z->zfreecnt_pcpu[gd->gd_cpuid] > 0) {
76 if (z->zfreecnt_pcpu[gd->gd_cpuid] > 0) {
77 item = z->zitems_pcpu[gd->gd_cpuid];
80 ("zitems_pcpu unexpectedly NULL"));
81 if (((void **)item)[1] != (void *)ZENTRY_FREE)
82 zerror(ZONE_ERROR_NOTFREE);
83 ((void **)item)[1] = NULL;
85 z->zitems_pcpu[gd->gd_cpuid] = ((void **) item)[0];
86 --z->zfreecnt_pcpu[gd->gd_cpuid];
95 * Per-zone spinlock for the remainder. Always load at least one
99 if (z->zfreecnt > z->zfreemin) {
104 KASSERT(item != NULL, ("zitems unexpectedly NULL"));
105 if (((void **)item)[1] != (void *)ZENTRY_FREE)
106 zerror(ZONE_ERROR_NOTFREE);
108 z->zitems = ((void **)item)[0];
110 ((void **)item)[0] = z->zitems_pcpu[gd->gd_cpuid];
111 z->zitems_pcpu[gd->gd_cpuid] = item;
112 ++z->zfreecnt_pcpu[gd->gd_cpuid];
113 } while (--n > 0 && z->zfreecnt > z->zfreemin);
114 spin_unlock(&z->zlock);
117 spin_unlock(&z->zlock);
120 * PANICFAIL allows the caller to assume that the zalloc()
121 * will always succeed. If it doesn't, we panic here.
123 if (item == NULL && (z->zflags & ZONE_PANICFAIL))
124 panic("zalloc(%s) failed", z->zname);
130 * Free an item to the specified zone.
135 zfree(vm_zone_t z, void *item)
137 globaldata_t gd = mycpu;
143 * Avoid spinlock contention by freeing into a per-cpu queue
145 if ((zmax = z->zmax) != 0)
146 zmax = zmax / ncpus / 16;
154 ((void **)item)[0] = z->zitems_pcpu[gd->gd_cpuid];
156 if (((void **)item)[1] == (void *)ZENTRY_FREE)
157 zerror(ZONE_ERROR_ALREADYFREE);
158 ((void **)item)[1] = (void *)ZENTRY_FREE;
160 z->zitems_pcpu[gd->gd_cpuid] = item;
161 ++z->zfreecnt_pcpu[gd->gd_cpuid];
163 if (z->zfreecnt_pcpu[gd->gd_cpuid] < zmax) {
169 * Hystereis, move (zmax) (calculated below) items to the pool.
172 if (zmax > zone_burst)
177 while (count < zmax) {
178 tail_item = ((void **)tail_item)[0];
182 z->zitems_pcpu[gd->gd_cpuid] = ((void **)tail_item)[0];
183 z->zfreecnt_pcpu[gd->gd_cpuid] -= count;
186 * Per-zone spinlock for the remainder.
188 * Also implement hysteresis by freeing a number of pcpu
191 spin_lock(&z->zlock);
192 ((void **)tail_item)[0] = z->zitems;
194 z->zfreecnt += count;
196 spin_unlock(&z->zlock);
201 * This file comprises a very simple zone allocator. This is used
202 * in lieu of the malloc allocator, where needed or more optimal.
204 * Note that the initial implementation of this had coloring, and
205 * absolutely no improvement (actually perf degradation) occurred.
207 * Note also that the zones are type stable. The only restriction is
208 * that the first two longwords of a data structure can be changed
209 * between allocations. Any data that must be stable between allocations
210 * must reside in areas after the first two longwords.
212 * zinitna, zinit, zbootinit are the initialization routines.
213 * zalloc, zfree, are the allocation/free routines.
216 LIST_HEAD(zlist, vm_zone) zlist = LIST_HEAD_INITIALIZER(zlist);
217 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
218 static int zone_kmem_pages, zone_kern_pages;
219 static long zone_kmem_kvaspace;
222 * Create a zone, but don't allocate the zone structure. If the
223 * zone had been previously created by the zone boot code, initialize
224 * various parts of the zone code.
226 * If waits are not allowed during allocation (e.g. during interrupt
227 * code), a-priori allocate the kernel virtual space, and allocate
228 * only pages when needed.
231 * z pointer to zone structure.
232 * obj pointer to VM object (opt).
234 * size size of zone entries.
235 * nentries number of zone entries allocated (only ZONE_INTERRUPT.)
236 * flags ZONE_INTERRUPT -- items can be allocated at interrupt time.
237 * zalloc number of pages allocated when memory is needed.
239 * Note that when using ZONE_INTERRUPT, the size of the zone is limited
240 * by the nentries argument. The size of the memory allocatable is
241 * unlimited if ZONE_INTERRUPT is not set.
246 zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
247 int nentries, int flags)
252 * Only zones created with zinit() are destroyable.
254 if (z->zflags & ZONE_DESTROYABLE)
255 panic("zinitna: can't create destroyable zone");
258 * NOTE: We can only adjust zsize if we previously did not
261 if ((z->zflags & ZONE_BOOT) == 0) {
262 z->zsize = roundup2(size, ZONE_ROUNDING);
263 spin_init(&z->zlock, "zinitna");
271 lwkt_gettoken(&vm_token);
272 LIST_INSERT_HEAD(&zlist, z, zlink);
273 lwkt_reltoken(&vm_token);
275 bzero(z->zitems_pcpu, sizeof(z->zitems_pcpu));
276 bzero(z->zfreecnt_pcpu, sizeof(z->zfreecnt_pcpu));
280 z->zkmcur = z->zkmmax = 0;
284 * If we cannot wait, allocate KVA space up front, and we will fill
285 * in pages as needed. This is particularly required when creating
286 * an allocation space for map entries in kernel_map, because we
287 * do not want to go into a recursion deadlock with
288 * vm_map_entry_reserve().
290 if (z->zflags & ZONE_INTERRUPT) {
291 totsize = round_page((size_t)z->zsize * nentries);
292 atomic_add_long(&zone_kmem_kvaspace, totsize);
294 z->zkva = kmem_alloc_pageable(&kernel_map, totsize,
297 LIST_REMOVE(z, zlink);
301 z->zpagemax = totsize / PAGE_SIZE;
303 z->zobj = vm_object_allocate(OBJT_DEFAULT, z->zpagemax);
306 _vm_object_allocate(OBJT_DEFAULT, z->zpagemax, obj);
309 z->zallocflag = VM_ALLOC_SYSTEM | VM_ALLOC_INTERRUPT |
310 VM_ALLOC_NORMAL | VM_ALLOC_RETRY;
313 z->zallocflag = VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM;
318 if (z->zsize > PAGE_SIZE)
321 z->zfreemin = PAGE_SIZE / z->zsize;
326 * Reduce kernel_map spam by allocating in chunks of 4 pages.
331 * Populate the interrrupt zone at creation time rather than
332 * on first allocation, as this is a potentially long operation.
334 if (z->zflags & ZONE_INTERRUPT) {
345 * Subroutine same as zinitna, except zone data structure is allocated
346 * automatically by malloc. This routine should normally be used, except
347 * in certain tricky startup conditions in the VM system -- then
348 * zbootinit and zinitna can be used. Zinit is the standard zone
349 * initialization call.
354 zinit(char *name, int size, int nentries, int flags)
358 z = (vm_zone_t) kmalloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT);
363 if (zinitna(z, NULL, name, size, nentries,
364 flags & ~ZONE_DESTROYABLE) == 0) {
369 if (flags & ZONE_DESTROYABLE)
370 z->zflags |= ZONE_DESTROYABLE;
376 * Initialize a zone before the system is fully up. This routine should
377 * only be called before full VM startup.
379 * Called from the low level boot code only.
382 zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems)
386 bzero(z->zitems_pcpu, sizeof(z->zitems_pcpu));
387 bzero(z->zfreecnt_pcpu, sizeof(z->zfreecnt_pcpu));
393 z->zflags = ZONE_BOOT;
399 spin_init(&z->zlock, "zbootinit");
401 bzero(item, (size_t)nitems * z->zsize);
403 for (i = 0; i < nitems; i++) {
404 ((void **)item)[0] = z->zitems;
406 ((void **)item)[1] = (void *)ZENTRY_FREE;
409 item = (uint8_t *)item + z->zsize;
411 z->zfreecnt = nitems;
415 lwkt_gettoken(&vm_token);
416 LIST_INSERT_HEAD(&zlist, z, zlink);
417 lwkt_reltoken(&vm_token);
421 * Release all resources owned by zone created with zinit().
426 zdestroy(vm_zone_t z)
432 panic("zdestroy: null zone");
433 if ((z->zflags & ZONE_DESTROYABLE) == 0)
434 panic("zdestroy: undestroyable zone");
436 lwkt_gettoken(&vm_token);
437 LIST_REMOVE(z, zlink);
438 lwkt_reltoken(&vm_token);
441 * Release virtual mappings, physical memory and update sysctl stats.
443 if (z->zflags & ZONE_INTERRUPT) {
445 * Pages mapped via pmap_kenter() must be removed from the
446 * kernel_pmap() before calling kmem_free() to avoid issues
447 * with kernel_pmap.pm_stats.resident_count.
449 pmap_qremove(z->zkva, z->zpagemax);
450 vm_object_hold(z->zobj);
451 for (i = 0; i < z->zpagecount; ++i) {
452 m = vm_page_lookup_busy_wait(z->zobj, i, TRUE, "vmzd");
453 vm_page_unwire(m, 0);
460 kmem_free(&kernel_map, z->zkva,
461 (size_t)z->zpagemax * PAGE_SIZE);
462 atomic_subtract_long(&zone_kmem_kvaspace,
463 (size_t)z->zpagemax * PAGE_SIZE);
466 * Free the backing object and physical pages.
468 vm_object_deallocate(z->zobj);
469 vm_object_drop(z->zobj);
470 atomic_subtract_int(&zone_kmem_pages, z->zpagecount);
472 for (i = 0; i < z->zkmcur; i++) {
473 kmem_free(&kernel_map, z->zkmvec[i],
474 (size_t)z->zalloc * PAGE_SIZE);
475 atomic_subtract_int(&zone_kern_pages, z->zalloc);
477 if (z->zkmvec != NULL)
478 kfree(z->zkmvec, M_ZONE);
481 spin_uninit(&z->zlock);
487 * void *zalloc(vm_zone_t zone) --
488 * Returns an item from a specified zone. May not be called from a
489 * FAST interrupt or IPI function.
491 * void zfree(vm_zone_t zone, void *item) --
492 * Frees an item back to a specified zone. May not be called from a
493 * FAST interrupt or IPI function.
497 * Internal zone routine. Not to be called from external (non vm_zone) code.
514 panic("zget: null zone");
516 if (z->zflags & ZONE_INTERRUPT) {
518 * Interrupt zones do not mess with the kernel_map, they
519 * simply populate an existing mapping.
521 * First reserve the required space.
523 vm_object_hold(z->zobj);
524 noffset = (size_t)z->zpagecount * PAGE_SIZE;
525 noffset -= noffset % z->zsize;
526 savezpc = z->zpagecount;
527 if (z->zpagecount + z->zalloc > z->zpagemax)
528 z->zpagecount = z->zpagemax;
530 z->zpagecount += z->zalloc;
531 item = (char *)z->zkva + noffset;
532 npages = z->zpagecount - savezpc;
533 nitems = ((size_t)(savezpc + npages) * PAGE_SIZE - noffset) /
535 atomic_add_int(&zone_kmem_pages, npages);
538 * Now allocate the pages. Note that we can block in the
539 * loop, so we've already done all the necessary calculations
540 * and reservations above.
542 for (i = 0; i < npages; ++i) {
545 m = vm_page_alloc(z->zobj, savezpc + i, z->zallocflag);
547 /* note: z might be modified due to blocking */
549 KKASSERT(m->queue == PQ_NONE);
550 m->valid = VM_PAGE_BITS_ALL;
554 zkva = z->zkva + (size_t)(savezpc + i) * PAGE_SIZE;
555 pmap_kenter(zkva, VM_PAGE_TO_PHYS(m));
556 bzero((void *)zkva, PAGE_SIZE);
558 vm_object_drop(z->zobj);
559 } else if (z->zflags & ZONE_SPECIAL) {
561 * The special zone is the one used for vm_map_entry_t's.
562 * We have to avoid an infinite recursion in
563 * vm_map_entry_reserve() by using vm_map_entry_kreserve()
564 * instead. The map entries are pre-reserved by the kernel
565 * by vm_map_entry_reserve_cpu_init().
567 nbytes = (size_t)z->zalloc * PAGE_SIZE;
569 item = (void *)kmem_alloc3(&kernel_map, nbytes,
570 VM_SUBSYS_ZALLOC, KM_KRESERVE);
572 /* note: z might be modified due to blocking */
574 atomic_add_int(&zone_kern_pages, z->zalloc);
579 nitems = nbytes / z->zsize;
582 * Otherwise allocate KVA from the kernel_map.
584 nbytes = (size_t)z->zalloc * PAGE_SIZE;
586 item = (void *)kmem_alloc3(&kernel_map, nbytes,
587 VM_SUBSYS_ZALLOC, 0);
589 /* note: z might be modified due to blocking */
591 atomic_add_int(&zone_kern_pages, z->zalloc);
594 if (z->zflags & ZONE_DESTROYABLE) {
595 if (z->zkmcur == z->zkmmax) {
597 z->zkmmax==0 ? 1 : z->zkmmax*2;
598 z->zkmvec = krealloc(z->zkmvec,
599 z->zkmmax * sizeof(z->zkmvec[0]),
602 z->zkmvec[z->zkmcur++] = (vm_offset_t)item;
607 nitems = nbytes / z->zsize;
610 spin_lock(&z->zlock);
613 * Save one for immediate allocation
617 for (i = 0; i < nitems; i++) {
618 ((void **)item)[0] = z->zitems;
620 ((void **)item)[1] = (void *)ZENTRY_FREE;
623 item = (uint8_t *)item + z->zsize;
625 z->zfreecnt += nitems;
627 } else if (z->zfreecnt > 0) {
629 z->zitems = ((void **)item)[0];
631 if (((void **)item)[1] != (void *)ZENTRY_FREE)
632 zerror(ZONE_ERROR_NOTFREE);
633 ((void **) item)[1] = NULL;
640 spin_unlock(&z->zlock);
643 * A special zone may have used a kernel-reserved vm_map_entry. If
644 * so we have to be sure to recover our reserve so we don't run out.
645 * We will panic if we run out.
647 if (z->zflags & ZONE_SPECIAL)
648 vm_map_entry_reserve(0);
657 sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
664 ksnprintf(tmpbuf, sizeof(tmpbuf),
665 "\nITEM SIZE LIMIT USED FREE REQUESTS\n");
666 error = SYSCTL_OUT(req, tmpbuf, strlen(tmpbuf));
670 lwkt_gettoken(&vm_token);
671 LIST_FOREACH(curzone, &zlist, zlink) {
678 len = strlen(curzone->zname);
679 if (len >= (sizeof(tmpname) - 1))
680 len = (sizeof(tmpname) - 1);
681 for(i = 0; i < sizeof(tmpname) - 1; i++)
684 memcpy(tmpname, curzone->zname, len);
687 if (curzone == LIST_FIRST(&zlist)) {
691 freecnt = curzone->zfreecnt;
692 for (n = 0; n < ncpus; ++n)
693 freecnt += curzone->zfreecnt_pcpu[n];
695 ksnprintf(tmpbuf + offset, sizeof(tmpbuf) - offset,
696 "%s %6.6u, %8.8u, %6.6u, %6.6u, %8.8u\n",
697 tmpname, curzone->zsize, curzone->zmax,
698 (curzone->ztotal - freecnt),
699 freecnt, curzone->znalloc);
701 len = strlen((char *)tmpbuf);
702 if (LIST_NEXT(curzone, zlink) == NULL)
705 error = SYSCTL_OUT(req, tmpbuf, len);
710 lwkt_reltoken(&vm_token);
714 #if defined(INVARIANTS)
725 case ZONE_ERROR_INVALID:
726 msg = "zone: invalid zone";
728 case ZONE_ERROR_NOTFREE:
729 msg = "zone: entry not free";
731 case ZONE_ERROR_ALREADYFREE:
732 msg = "zone: freeing free entry";
735 msg = "zone: invalid error";
742 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, \
743 NULL, 0, sysctl_vm_zone, "A", "Zone Info");
745 SYSCTL_INT(_vm, OID_AUTO, zone_kmem_pages,
746 CTLFLAG_RD, &zone_kmem_pages, 0, "Number of interrupt safe pages allocated by zone");
747 SYSCTL_INT(_vm, OID_AUTO, zone_burst,
748 CTLFLAG_RW, &zone_burst, 0, "Burst from depot to pcpu cache");
749 SYSCTL_LONG(_vm, OID_AUTO, zone_kmem_kvaspace,
750 CTLFLAG_RD, &zone_kmem_kvaspace, 0, "KVA space allocated by zone");
751 SYSCTL_INT(_vm, OID_AUTO, zone_kern_pages,
752 CTLFLAG_RD, &zone_kern_pages, 0, "Number of non-interrupt safe pages allocated by zone");