kernel - Fix overflows in vm_zone.c
[dragonfly.git] / sys / vm / vm_zone.c
CommitLineData
984263bc 1/*
494b5669
MD
2 * (MPSAFE)
3 *
984263bc
MD
4 * Copyright (c) 1997, 1998 John S. Dyson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. Absolutely no warranty of function or purpose is made by the author
14 * John S. Dyson.
15 *
16 * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $
6e3afccd 17 * $DragonFly: src/sys/vm/vm_zone.c,v 1.28 2008/01/23 17:35:48 nth Exp $
984263bc
MD
18 */
19
20#include <sys/param.h>
9f3b400c 21#include <sys/queue.h>
984263bc
MD
22#include <sys/systm.h>
23#include <sys/kernel.h>
24#include <sys/lock.h>
25#include <sys/malloc.h>
26#include <sys/sysctl.h>
27#include <sys/vmmeter.h>
28
29#include <vm/vm.h>
30#include <vm/vm_object.h>
31#include <vm/vm_page.h>
32#include <vm/vm_map.h>
33#include <vm/vm_kern.h>
34#include <vm/vm_extern.h>
35#include <vm/vm_zone.h>
5926987a
MD
36
37#include <sys/spinlock2.h>
984263bc
MD
38
39static MALLOC_DEFINE(M_ZONE, "ZONE", "Zone header");
40
861e4ccf
HP
41#define ZONE_ERROR_INVALID 0
42#define ZONE_ERROR_NOTFREE 1
43#define ZONE_ERROR_ALREADYFREE 2
984263bc 44
5926987a 45#define ZONE_ROUNDING 32
984263bc 46
861e4ccf 47#define ZENTRY_FREE 0x12342378
8a8d5d85
MD
48
49static void *zget(vm_zone_t z);
50
984263bc 51/*
cd0f40b8
NT
52 * Return an item from the specified zone. This function is non-blocking for
53 * ZONE_INTERRUPT zones.
494b5669
MD
54 *
55 * No requirements.
984263bc 56 */
8a8d5d85
MD
57void *
58zalloc(vm_zone_t z)
984263bc 59{
85946b6c 60 globaldata_t gd = mycpu;
984263bc
MD
61 void *item;
62
63#ifdef INVARIANTS
8a8d5d85 64 if (z == NULL)
984263bc
MD
65 zerror(ZONE_ERROR_INVALID);
66#endif
85946b6c
MD
67 /*
68 * Avoid spinlock contention by allocating from a per-cpu queue
69 */
70 if (z->zfreecnt_pcpu[gd->gd_cpuid] > 0) {
71 crit_enter_gd(gd);
72 if (z->zfreecnt_pcpu[gd->gd_cpuid] > 0) {
73 item = z->zitems_pcpu[gd->gd_cpuid];
74#ifdef INVARIANTS
75 KASSERT(item != NULL,
76 ("zitems_pcpu unexpectedly NULL"));
77 if (((void **)item)[1] != (void *)ZENTRY_FREE)
78 zerror(ZONE_ERROR_NOTFREE);
79 ((void **)item)[1] = 0;
80#endif
81 z->zitems_pcpu[gd->gd_cpuid] = ((void **) item)[0];
82 --z->zfreecnt_pcpu[gd->gd_cpuid];
83 z->znalloc++;
84 crit_exit_gd(gd);
85 return item;
86 }
87 crit_exit_gd(gd);
88 }
89
90 /*
91 * Per-zone spinlock for the remainder.
92 */
287a8577 93 spin_lock(&z->zlock);
faf71434 94 if (z->zfreecnt > z->zfreemin) {
8a8d5d85 95 item = z->zitems;
984263bc 96#ifdef INVARIANTS
8a8d5d85 97 KASSERT(item != NULL, ("zitems unexpectedly NULL"));
85946b6c 98 if (((void **)item)[1] != (void *)ZENTRY_FREE)
8a8d5d85 99 zerror(ZONE_ERROR_NOTFREE);
85946b6c 100 ((void **)item)[1] = 0;
984263bc 101#endif
85946b6c 102 z->zitems = ((void **)item)[0];
8a8d5d85
MD
103 z->zfreecnt--;
104 z->znalloc++;
287a8577 105 spin_unlock(&z->zlock);
faf71434 106 } else {
287a8577 107 spin_unlock(&z->zlock);
faf71434
JH
108 item = zget(z);
109 /*
110 * PANICFAIL allows the caller to assume that the zalloc()
111 * will always succeed. If it doesn't, we panic here.
112 */
113 if (item == NULL && (z->zflags & ZONE_PANICFAIL))
114 panic("zalloc(%s) failed", z->zname);
8a8d5d85 115 }
984263bc
MD
116 return item;
117}
118
8a8d5d85 119/*
cd0f40b8 120 * Free an item to the specified zone.
494b5669
MD
121 *
122 * No requirements.
8a8d5d85
MD
123 */
124void
125zfree(vm_zone_t z, void *item)
984263bc 126{
85946b6c
MD
127 globaldata_t gd = mycpu;
128 int zmax;
129
130 /*
131 * Avoid spinlock contention by freeing into a per-cpu queue
132 */
133 if ((zmax = z->zmax) != 0)
134 zmax = zmax / ncpus / 16;
135 if (zmax < 64)
136 zmax = 64;
137
138 if (z->zfreecnt_pcpu[gd->gd_cpuid] < zmax) {
139 crit_enter_gd(gd);
140 ((void **)item)[0] = z->zitems_pcpu[gd->gd_cpuid];
141#ifdef INVARIANTS
142 if (((void **)item)[1] == (void *)ZENTRY_FREE)
143 zerror(ZONE_ERROR_ALREADYFREE);
144 ((void **)item)[1] = (void *)ZENTRY_FREE;
145#endif
146 z->zitems_pcpu[gd->gd_cpuid] = item;
147 ++z->zfreecnt_pcpu[gd->gd_cpuid];
148 crit_exit_gd(gd);
149 return;
150 }
151
152 /*
153 * Per-zone spinlock for the remainder.
154 */
287a8577 155 spin_lock(&z->zlock);
85946b6c 156 ((void **)item)[0] = z->zitems;
984263bc 157#ifdef INVARIANTS
85946b6c 158 if (((void **)item)[1] == (void *)ZENTRY_FREE)
984263bc 159 zerror(ZONE_ERROR_ALREADYFREE);
85946b6c 160 ((void **)item)[1] = (void *)ZENTRY_FREE;
984263bc
MD
161#endif
162 z->zitems = item;
163 z->zfreecnt++;
287a8577 164 spin_unlock(&z->zlock);
984263bc
MD
165}
166
167/*
168 * This file comprises a very simple zone allocator. This is used
169 * in lieu of the malloc allocator, where needed or more optimal.
170 *
171 * Note that the initial implementation of this had coloring, and
172 * absolutely no improvement (actually perf degradation) occurred.
173 *
174 * Note also that the zones are type stable. The only restriction is
175 * that the first two longwords of a data structure can be changed
176 * between allocations. Any data that must be stable between allocations
177 * must reside in areas after the first two longwords.
178 *
179 * zinitna, zinit, zbootinit are the initialization routines.
03aa8d99 180 * zalloc, zfree, are the allocation/free routines.
984263bc
MD
181 */
182
9f3b400c 183LIST_HEAD(zlist, vm_zone) zlist = LIST_HEAD_INITIALIZER(zlist);
984263bc 184static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
7e72902d
MD
185static int zone_kmem_pages, zone_kern_pages;
186static long zone_kmem_kvaspace;
984263bc
MD
187
188/*
189 * Create a zone, but don't allocate the zone structure. If the
190 * zone had been previously created by the zone boot code, initialize
191 * various parts of the zone code.
192 *
193 * If waits are not allowed during allocation (e.g. during interrupt
194 * code), a-priori allocate the kernel virtual space, and allocate
195 * only pages when needed.
196 *
197 * Arguments:
198 * z pointer to zone structure.
199 * obj pointer to VM object (opt).
200 * name name of zone.
201 * size size of zone entries.
202 * nentries number of zone entries allocated (only ZONE_INTERRUPT.)
203 * flags ZONE_INTERRUPT -- items can be allocated at interrupt time.
204 * zalloc number of pages allocated when memory is needed.
205 *
206 * Note that when using ZONE_INTERRUPT, the size of the zone is limited
207 * by the nentries argument. The size of the memory allocatable is
208 * unlimited if ZONE_INTERRUPT is not set.
209 *
494b5669 210 * No requirements.
984263bc
MD
211 */
212int
213zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
214 int nentries, int flags, int zalloc)
215{
7e72902d 216 size_t totsize;
984263bc 217
9f3b400c
NT
218 /*
219 * Only zones created with zinit() are destroyable.
220 */
221 if (z->zflags & ZONE_DESTROYABLE)
6e3afccd 222 panic("zinitna: can't create destroyable zone");
9f3b400c 223
5926987a
MD
224 /*
225 * NOTE: We can only adjust zsize if we previously did not
226 * use zbootinit().
227 */
984263bc
MD
228 if ((z->zflags & ZONE_BOOT) == 0) {
229 z->zsize = (size + ZONE_ROUNDING - 1) & ~(ZONE_ROUNDING - 1);
faf71434 230 spin_init(&z->zlock);
984263bc
MD
231 z->zfreecnt = 0;
232 z->ztotal = 0;
233 z->zmax = 0;
234 z->zname = name;
235 z->znalloc = 0;
236 z->zitems = NULL;
237
494b5669 238 lwkt_gettoken(&vm_token);
9f3b400c 239 LIST_INSERT_HEAD(&zlist, z, zlink);
494b5669 240 lwkt_reltoken(&vm_token);
85946b6c
MD
241
242 bzero(z->zitems_pcpu, sizeof(z->zitems_pcpu));
243 bzero(z->zfreecnt_pcpu, sizeof(z->zfreecnt_pcpu));
984263bc
MD
244 }
245
9f3b400c
NT
246 z->zkmvec = NULL;
247 z->zkmcur = z->zkmmax = 0;
984263bc
MD
248 z->zflags |= flags;
249
250 /*
251 * If we cannot wait, allocate KVA space up front, and we will fill
a108bf71
MD
252 * in pages as needed. This is particularly required when creating
253 * an allocation space for map entries in kernel_map, because we
254 * do not want to go into a recursion deadlock with
255 * vm_map_entry_reserve().
984263bc
MD
256 */
257 if (z->zflags & ZONE_INTERRUPT) {
7e72902d
MD
258 totsize = round_page((size_t)z->zsize * nentries);
259 atomic_add_long(&zone_kmem_kvaspace, totsize);
984263bc 260
e4846942 261 z->zkva = kmem_alloc_pageable(&kernel_map, totsize);
984263bc 262 if (z->zkva == 0) {
9f3b400c 263 LIST_REMOVE(z, zlink);
984263bc
MD
264 return 0;
265 }
266
267 z->zpagemax = totsize / PAGE_SIZE;
268 if (obj == NULL) {
269 z->zobj = vm_object_allocate(OBJT_DEFAULT, z->zpagemax);
270 } else {
271 z->zobj = obj;
272 _vm_object_allocate(OBJT_DEFAULT, z->zpagemax, obj);
273 }
dc1fd4b3 274 z->zallocflag = VM_ALLOC_SYSTEM | VM_ALLOC_INTERRUPT;
984263bc
MD
275 z->zmax += nentries;
276 } else {
dc1fd4b3 277 z->zallocflag = VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM;
984263bc
MD
278 z->zmax = 0;
279 }
280
281
282 if (z->zsize > PAGE_SIZE)
283 z->zfreemin = 1;
284 else
285 z->zfreemin = PAGE_SIZE / z->zsize;
286
287 z->zpagecount = 0;
288 if (zalloc)
289 z->zalloc = zalloc;
290 else
291 z->zalloc = 1;
292
faf71434
JH
293 /*
294 * Populate the interrrupt zone at creation time rather than
295 * on first allocation, as this is a potentially long operation.
296 */
297 if (z->zflags & ZONE_INTERRUPT) {
298 void *buf;
299
300 buf = zget(z);
301 zfree(z, buf);
302 }
303
984263bc
MD
304 return 1;
305}
306
307/*
308 * Subroutine same as zinitna, except zone data structure is allocated
309 * automatically by malloc. This routine should normally be used, except
310 * in certain tricky startup conditions in the VM system -- then
311 * zbootinit and zinitna can be used. Zinit is the standard zone
312 * initialization call.
494b5669
MD
313 *
314 * No requirements.
984263bc
MD
315 */
316vm_zone_t
317zinit(char *name, int size, int nentries, int flags, int zalloc)
318{
319 vm_zone_t z;
320
efda3bd0 321 z = (vm_zone_t) kmalloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT);
984263bc
MD
322 if (z == NULL)
323 return NULL;
324
325 z->zflags = 0;
9f3b400c
NT
326 if (zinitna(z, NULL, name, size, nentries,
327 flags & ~ZONE_DESTROYABLE, zalloc) == 0) {
efda3bd0 328 kfree(z, M_ZONE);
984263bc
MD
329 return NULL;
330 }
331
9f3b400c
NT
332 if (flags & ZONE_DESTROYABLE)
333 z->zflags |= ZONE_DESTROYABLE;
334
984263bc
MD
335 return z;
336}
337
338/*
339 * Initialize a zone before the system is fully up. This routine should
340 * only be called before full VM startup.
494b5669
MD
341 *
342 * Called from the low level boot code only.
984263bc
MD
343 */
344void
345zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems)
346{
347 int i;
348
85946b6c
MD
349 bzero(z->zitems_pcpu, sizeof(z->zitems_pcpu));
350 bzero(z->zfreecnt_pcpu, sizeof(z->zfreecnt_pcpu));
351
984263bc
MD
352 z->zname = name;
353 z->zsize = size;
354 z->zpagemax = 0;
355 z->zobj = NULL;
356 z->zflags = ZONE_BOOT;
357 z->zfreemin = 0;
358 z->zallocflag = 0;
359 z->zpagecount = 0;
360 z->zalloc = 0;
361 z->znalloc = 0;
faf71434 362 spin_init(&z->zlock);
984263bc 363
7e72902d 364 bzero(item, (size_t)nitems * z->zsize);
984263bc
MD
365 z->zitems = NULL;
366 for (i = 0; i < nitems; i++) {
85946b6c 367 ((void **)item)[0] = z->zitems;
984263bc 368#ifdef INVARIANTS
85946b6c 369 ((void **)item)[1] = (void *)ZENTRY_FREE;
984263bc
MD
370#endif
371 z->zitems = item;
ff11a0f6 372 item = (uint8_t *)item + z->zsize;
984263bc
MD
373 }
374 z->zfreecnt = nitems;
375 z->zmax = nitems;
376 z->ztotal = nitems;
377
494b5669 378 lwkt_gettoken(&vm_token);
9f3b400c 379 LIST_INSERT_HEAD(&zlist, z, zlink);
494b5669 380 lwkt_reltoken(&vm_token);
9f3b400c
NT
381}
382
383/*
384 * Release all resources owned by zone created with zinit().
494b5669
MD
385 *
386 * No requirements.
9f3b400c
NT
387 */
388void
389zdestroy(vm_zone_t z)
390{
391 int i;
392
393 if (z == NULL)
6e3afccd 394 panic("zdestroy: null zone");
9f3b400c 395 if ((z->zflags & ZONE_DESTROYABLE) == 0)
6e3afccd 396 panic("zdestroy: undestroyable zone");
9f3b400c 397
494b5669 398 lwkt_gettoken(&vm_token);
9f3b400c 399 LIST_REMOVE(z, zlink);
494b5669 400 lwkt_reltoken(&vm_token);
9f3b400c
NT
401
402 /*
403 * Release virtual mappings, physical memory and update sysctl stats.
404 */
405 if (z->zflags & ZONE_INTERRUPT) {
45758e38
MD
406 /*
407 * Pages mapped via pmap_kenter() must be removed from the
408 * kernel_pmap() before calling kmem_free() to avoid issues
409 * with kernel_pmap.pm_stats.resident_count.
410 */
411 pmap_qremove(z->zkva, z->zpagemax);
412
9f3b400c
NT
413 /*
414 * Free the mapping.
415 */
7e72902d
MD
416 kmem_free(&kernel_map, z->zkva,
417 (size_t)z->zpagemax * PAGE_SIZE);
418 atomic_subtract_long(&zone_kmem_kvaspace,
419 (size_t)z->zpagemax * PAGE_SIZE);
45758e38 420
9f3b400c
NT
421 /*
422 * Free the backing object and physical pages.
423 */
424 vm_object_deallocate(z->zobj);
425 atomic_subtract_int(&zone_kmem_pages, z->zpagecount);
984263bc 426 } else {
9f3b400c
NT
427 for (i=0; i < z->zkmcur; i++) {
428 kmem_free(&kernel_map, z->zkmvec[i],
429 z->zalloc*PAGE_SIZE);
430 atomic_subtract_int(&zone_kern_pages, z->zalloc);
431 }
cf7fe71a
NT
432 if (z->zkmvec != NULL)
433 kfree(z->zkmvec, M_ZONE);
984263bc 434 }
9f3b400c
NT
435
436 spin_uninit(&z->zlock);
437 kfree(z, M_ZONE);
984263bc
MD
438}
439
9f3b400c 440
984263bc
MD
441/*
442 * void *zalloc(vm_zone_t zone) --
03aa8d99
MD
443 * Returns an item from a specified zone. May not be called from a
444 * FAST interrupt or IPI function.
984263bc
MD
445 *
446 * void zfree(vm_zone_t zone, void *item) --
03aa8d99
MD
447 * Frees an item back to a specified zone. May not be called from a
448 * FAST interrupt or IPI function.
984263bc
MD
449 */
450
984263bc
MD
451/*
452 * Internal zone routine. Not to be called from external (non vm_zone) code.
494b5669
MD
453 *
454 * No requirements.
984263bc 455 */
8a8d5d85
MD
456static void *
457zget(vm_zone_t z)
984263bc
MD
458{
459 int i;
460 vm_page_t m;
7e72902d 461 int nitems;
5926987a 462 int savezpc;
7e72902d 463 size_t nbytes;
984263bc
MD
464 void *item;
465
466 if (z == NULL)
467 panic("zget: null zone");
468
469 if (z->zflags & ZONE_INTERRUPT) {
c4ae567f
MD
470 /*
471 * Interrupt zones do not mess with the kernel_map, they
472 * simply populate an existing mapping.
473 */
398c240d 474 vm_object_hold(z->zobj);
5926987a 475 savezpc = z->zpagecount;
7e72902d 476 nbytes = (size_t)z->zpagecount * PAGE_SIZE;
984263bc
MD
477 nbytes -= nbytes % z->zsize;
478 item = (char *) z->zkva + nbytes;
479 for (i = 0; ((i < z->zalloc) && (z->zpagecount < z->zpagemax));
480 i++) {
481 vm_offset_t zkva;
482
483 m = vm_page_alloc(z->zobj, z->zpagecount,
484 z->zallocflag);
41a01a4d 485 /* note: z might be modified due to blocking */
398c240d 486 if (m == NULL)
984263bc
MD
487 break;
488
9f3b400c
NT
489 /*
490 * Unbusy page so it can freed in zdestroy(). Make
491 * sure it is not on any queue and so can not be
492 * recycled under our feet.
493 */
494 KKASSERT(m->queue == PQ_NONE);
495 vm_page_flag_clear(m, PG_BUSY);
496
984263bc 497 zkva = z->zkva + z->zpagecount * PAGE_SIZE;
8a8d5d85 498 pmap_kenter(zkva, VM_PAGE_TO_PHYS(m)); /* YYY */
faf71434 499 bzero((void *)zkva, PAGE_SIZE);
5926987a
MD
500 KKASSERT(savezpc == z->zpagecount);
501 ++savezpc;
984263bc
MD
502 z->zpagecount++;
503 zone_kmem_pages++;
12e4aaff 504 vmstats.v_wire_count++;
984263bc 505 }
7e72902d
MD
506 nitems = (((size_t)z->zpagecount * PAGE_SIZE) - nbytes) /
507 z->zsize;
398c240d 508 vm_object_drop(z->zobj);
c4ae567f
MD
509 } else if (z->zflags & ZONE_SPECIAL) {
510 /*
511 * The special zone is the one used for vm_map_entry_t's.
512 * We have to avoid an infinite recursion in
513 * vm_map_entry_reserve() by using vm_map_entry_kreserve()
514 * instead. The map entries are pre-reserved by the kernel
515 * by vm_map_entry_reserve_cpu_init().
516 */
7e72902d 517 nbytes = (size_t)z->zalloc * PAGE_SIZE;
984263bc 518
e4846942 519 item = (void *)kmem_alloc3(&kernel_map, nbytes, KM_KRESERVE);
c4ae567f
MD
520
521 /* note: z might be modified due to blocking */
522 if (item != NULL) {
faf71434 523 zone_kern_pages += z->zalloc; /* not MP-safe XXX */
c4ae567f
MD
524 bzero(item, nbytes);
525 } else {
526 nbytes = 0;
984263bc 527 }
c4ae567f
MD
528 nitems = nbytes / z->zsize;
529 } else {
530 /*
531 * Otherwise allocate KVA from the kernel_map.
532 */
533 nbytes = z->zalloc * PAGE_SIZE;
534
e4846942 535 item = (void *)kmem_alloc3(&kernel_map, nbytes, 0);
c4ae567f
MD
536
537 /* note: z might be modified due to blocking */
984263bc 538 if (item != NULL) {
faf71434 539 zone_kern_pages += z->zalloc; /* not MP-safe XXX */
984263bc 540 bzero(item, nbytes);
9f3b400c
NT
541
542 if (z->zflags & ZONE_DESTROYABLE) {
543 if (z->zkmcur == z->zkmmax) {
544 z->zkmmax =
545 z->zkmmax==0 ? 1 : z->zkmmax*2;
546 z->zkmvec = krealloc(z->zkmvec,
547 z->zkmmax * sizeof(z->zkmvec[0]),
548 M_ZONE, M_WAITOK);
549 }
550 z->zkmvec[z->zkmcur++] = (vm_offset_t)item;
551 }
984263bc
MD
552 } else {
553 nbytes = 0;
554 }
555 nitems = nbytes / z->zsize;
556 }
984263bc 557
287a8577 558 spin_lock(&z->zlock);
faf71434 559 z->ztotal += nitems;
984263bc
MD
560 /*
561 * Save one for immediate allocation
562 */
563 if (nitems != 0) {
564 nitems -= 1;
565 for (i = 0; i < nitems; i++) {
85946b6c 566 ((void **)item)[0] = z->zitems;
984263bc 567#ifdef INVARIANTS
85946b6c 568 ((void **)item)[1] = (void *)ZENTRY_FREE;
984263bc
MD
569#endif
570 z->zitems = item;
ff11a0f6 571 item = (uint8_t *)item + z->zsize;
984263bc
MD
572 }
573 z->zfreecnt += nitems;
574 z->znalloc++;
575 } else if (z->zfreecnt > 0) {
576 item = z->zitems;
85946b6c 577 z->zitems = ((void **)item)[0];
984263bc 578#ifdef INVARIANTS
85946b6c 579 if (((void **)item)[1] != (void *)ZENTRY_FREE)
984263bc
MD
580 zerror(ZONE_ERROR_NOTFREE);
581 ((void **) item)[1] = 0;
582#endif
583 z->zfreecnt--;
584 z->znalloc++;
585 } else {
586 item = NULL;
587 }
287a8577 588 spin_unlock(&z->zlock);
984263bc 589
a108bf71 590 /*
c4ae567f
MD
591 * A special zone may have used a kernel-reserved vm_map_entry. If
592 * so we have to be sure to recover our reserve so we don't run out.
593 * We will panic if we run out.
a108bf71 594 */
c4ae567f
MD
595 if (z->zflags & ZONE_SPECIAL)
596 vm_map_entry_reserve(0);
a108bf71 597
984263bc
MD
598 return item;
599}
600
494b5669
MD
601/*
602 * No requirements.
603 */
984263bc
MD
604static int
605sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
606{
607 int error=0;
9f3b400c 608 vm_zone_t curzone;
984263bc
MD
609 char tmpbuf[128];
610 char tmpname[14];
611
f8c7a42d 612 ksnprintf(tmpbuf, sizeof(tmpbuf),
984263bc
MD
613 "\nITEM SIZE LIMIT USED FREE REQUESTS\n");
614 error = SYSCTL_OUT(req, tmpbuf, strlen(tmpbuf));
615 if (error)
616 return (error);
617
494b5669 618 lwkt_gettoken(&vm_token);
9f3b400c 619 LIST_FOREACH(curzone, &zlist, zlink) {
984263bc
MD
620 int i;
621 int len;
622 int offset;
623
984263bc
MD
624 len = strlen(curzone->zname);
625 if (len >= (sizeof(tmpname) - 1))
626 len = (sizeof(tmpname) - 1);
627 for(i = 0; i < sizeof(tmpname) - 1; i++)
628 tmpname[i] = ' ';
629 tmpname[i] = 0;
630 memcpy(tmpname, curzone->zname, len);
631 tmpname[len] = ':';
632 offset = 0;
9f3b400c 633 if (curzone == LIST_FIRST(&zlist)) {
984263bc
MD
634 offset = 1;
635 tmpbuf[0] = '\n';
636 }
637
f8c7a42d 638 ksnprintf(tmpbuf + offset, sizeof(tmpbuf) - offset,
984263bc
MD
639 "%s %6.6u, %8.8u, %6.6u, %6.6u, %8.8u\n",
640 tmpname, curzone->zsize, curzone->zmax,
641 (curzone->ztotal - curzone->zfreecnt),
642 curzone->zfreecnt, curzone->znalloc);
643
644 len = strlen((char *)tmpbuf);
9f3b400c 645 if (LIST_NEXT(curzone, zlink) == NULL)
984263bc
MD
646 tmpbuf[len - 1] = 0;
647
648 error = SYSCTL_OUT(req, tmpbuf, len);
649
650 if (error)
494b5669 651 break;
984263bc 652 }
494b5669
MD
653 lwkt_reltoken(&vm_token);
654 return (error);
984263bc
MD
655}
656
9e4a2b8c 657#if defined(INVARIANTS)
494b5669
MD
658
659/*
660 * Debugging only.
661 */
984263bc
MD
662void
663zerror(int error)
664{
665 char *msg;
666
667 switch (error) {
668 case ZONE_ERROR_INVALID:
669 msg = "zone: invalid zone";
670 break;
671 case ZONE_ERROR_NOTFREE:
672 msg = "zone: entry not free";
673 break;
674 case ZONE_ERROR_ALREADYFREE:
675 msg = "zone: freeing free entry";
676 break;
677 default:
678 msg = "zone: invalid error";
679 break;
680 }
681 panic(msg);
682}
683#endif
684
685SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, \
686 NULL, 0, sysctl_vm_zone, "A", "Zone Info");
687
688SYSCTL_INT(_vm, OID_AUTO, zone_kmem_pages,
689 CTLFLAG_RD, &zone_kmem_pages, 0, "Number of interrupt safe pages allocated by zone");
7e72902d 690SYSCTL_LONG(_vm, OID_AUTO, zone_kmem_kvaspace,
984263bc
MD
691 CTLFLAG_RD, &zone_kmem_kvaspace, 0, "KVA space allocated by zone");
692SYSCTL_INT(_vm, OID_AUTO, zone_kern_pages,
693 CTLFLAG_RD, &zone_kern_pages, 0, "Number of non-interrupt safe pages allocated by zone");