kernel - Fix overflows and races in zalloc()
[dragonfly.git] / sys / vm / vm_zone.c
... / ...
CommitLineData
1/*
2 * (MPSAFE)
3 *
4 * Copyright (c) 1997, 1998 John S. Dyson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. Absolutely no warranty of function or purpose is made by the author
14 * John S. Dyson.
15 *
16 * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $
17 * $DragonFly: src/sys/vm/vm_zone.c,v 1.28 2008/01/23 17:35:48 nth Exp $
18 */
19
20#include <sys/param.h>
21#include <sys/queue.h>
22#include <sys/systm.h>
23#include <sys/kernel.h>
24#include <sys/lock.h>
25#include <sys/malloc.h>
26#include <sys/sysctl.h>
27#include <sys/vmmeter.h>
28
29#include <vm/vm.h>
30#include <vm/vm_object.h>
31#include <vm/vm_page.h>
32#include <vm/vm_map.h>
33#include <vm/vm_kern.h>
34#include <vm/vm_extern.h>
35#include <vm/vm_zone.h>
36
37#include <sys/spinlock2.h>
38
39static MALLOC_DEFINE(M_ZONE, "ZONE", "Zone header");
40
41#define ZONE_ERROR_INVALID 0
42#define ZONE_ERROR_NOTFREE 1
43#define ZONE_ERROR_ALREADYFREE 2
44
45#define ZONE_ROUNDING 32
46
47#define ZENTRY_FREE 0x12342378
48
49static void *zget(vm_zone_t z);
50
51/*
52 * Return an item from the specified zone. This function is non-blocking for
53 * ZONE_INTERRUPT zones.
54 *
55 * No requirements.
56 */
57void *
58zalloc(vm_zone_t z)
59{
60 globaldata_t gd = mycpu;
61 void *item;
62
63#ifdef INVARIANTS
64 if (z == NULL)
65 zerror(ZONE_ERROR_INVALID);
66#endif
67 /*
68 * Avoid spinlock contention by allocating from a per-cpu queue
69 */
70 if (z->zfreecnt_pcpu[gd->gd_cpuid] > 0) {
71 crit_enter_gd(gd);
72 if (z->zfreecnt_pcpu[gd->gd_cpuid] > 0) {
73 item = z->zitems_pcpu[gd->gd_cpuid];
74#ifdef INVARIANTS
75 KASSERT(item != NULL,
76 ("zitems_pcpu unexpectedly NULL"));
77 if (((void **)item)[1] != (void *)ZENTRY_FREE)
78 zerror(ZONE_ERROR_NOTFREE);
79 ((void **)item)[1] = 0;
80#endif
81 z->zitems_pcpu[gd->gd_cpuid] = ((void **) item)[0];
82 --z->zfreecnt_pcpu[gd->gd_cpuid];
83 z->znalloc++;
84 crit_exit_gd(gd);
85 return item;
86 }
87 crit_exit_gd(gd);
88 }
89
90 /*
91 * Per-zone spinlock for the remainder.
92 */
93 spin_lock(&z->zlock);
94 if (z->zfreecnt > z->zfreemin) {
95 item = z->zitems;
96#ifdef INVARIANTS
97 KASSERT(item != NULL, ("zitems unexpectedly NULL"));
98 if (((void **)item)[1] != (void *)ZENTRY_FREE)
99 zerror(ZONE_ERROR_NOTFREE);
100 ((void **)item)[1] = 0;
101#endif
102 z->zitems = ((void **)item)[0];
103 z->zfreecnt--;
104 z->znalloc++;
105 spin_unlock(&z->zlock);
106 } else {
107 spin_unlock(&z->zlock);
108 item = zget(z);
109 /*
110 * PANICFAIL allows the caller to assume that the zalloc()
111 * will always succeed. If it doesn't, we panic here.
112 */
113 if (item == NULL && (z->zflags & ZONE_PANICFAIL))
114 panic("zalloc(%s) failed", z->zname);
115 }
116 return item;
117}
118
119/*
120 * Free an item to the specified zone.
121 *
122 * No requirements.
123 */
124void
125zfree(vm_zone_t z, void *item)
126{
127 globaldata_t gd = mycpu;
128 int zmax;
129
130 /*
131 * Avoid spinlock contention by freeing into a per-cpu queue
132 */
133 if ((zmax = z->zmax) != 0)
134 zmax = zmax / ncpus / 16;
135 if (zmax < 64)
136 zmax = 64;
137
138 if (z->zfreecnt_pcpu[gd->gd_cpuid] < zmax) {
139 crit_enter_gd(gd);
140 ((void **)item)[0] = z->zitems_pcpu[gd->gd_cpuid];
141#ifdef INVARIANTS
142 if (((void **)item)[1] == (void *)ZENTRY_FREE)
143 zerror(ZONE_ERROR_ALREADYFREE);
144 ((void **)item)[1] = (void *)ZENTRY_FREE;
145#endif
146 z->zitems_pcpu[gd->gd_cpuid] = item;
147 ++z->zfreecnt_pcpu[gd->gd_cpuid];
148 crit_exit_gd(gd);
149 return;
150 }
151
152 /*
153 * Per-zone spinlock for the remainder.
154 */
155 spin_lock(&z->zlock);
156 ((void **)item)[0] = z->zitems;
157#ifdef INVARIANTS
158 if (((void **)item)[1] == (void *)ZENTRY_FREE)
159 zerror(ZONE_ERROR_ALREADYFREE);
160 ((void **)item)[1] = (void *)ZENTRY_FREE;
161#endif
162 z->zitems = item;
163 z->zfreecnt++;
164 spin_unlock(&z->zlock);
165}
166
167/*
168 * This file comprises a very simple zone allocator. This is used
169 * in lieu of the malloc allocator, where needed or more optimal.
170 *
171 * Note that the initial implementation of this had coloring, and
172 * absolutely no improvement (actually perf degradation) occurred.
173 *
174 * Note also that the zones are type stable. The only restriction is
175 * that the first two longwords of a data structure can be changed
176 * between allocations. Any data that must be stable between allocations
177 * must reside in areas after the first two longwords.
178 *
179 * zinitna, zinit, zbootinit are the initialization routines.
180 * zalloc, zfree, are the allocation/free routines.
181 */
182
183LIST_HEAD(zlist, vm_zone) zlist = LIST_HEAD_INITIALIZER(zlist);
184static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
185static int zone_kmem_pages, zone_kern_pages;
186static long zone_kmem_kvaspace;
187
188/*
189 * Create a zone, but don't allocate the zone structure. If the
190 * zone had been previously created by the zone boot code, initialize
191 * various parts of the zone code.
192 *
193 * If waits are not allowed during allocation (e.g. during interrupt
194 * code), a-priori allocate the kernel virtual space, and allocate
195 * only pages when needed.
196 *
197 * Arguments:
198 * z pointer to zone structure.
199 * obj pointer to VM object (opt).
200 * name name of zone.
201 * size size of zone entries.
202 * nentries number of zone entries allocated (only ZONE_INTERRUPT.)
203 * flags ZONE_INTERRUPT -- items can be allocated at interrupt time.
204 * zalloc number of pages allocated when memory is needed.
205 *
206 * Note that when using ZONE_INTERRUPT, the size of the zone is limited
207 * by the nentries argument. The size of the memory allocatable is
208 * unlimited if ZONE_INTERRUPT is not set.
209 *
210 * No requirements.
211 */
212int
213zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
214 int nentries, int flags, int zalloc)
215{
216 size_t totsize;
217
218 /*
219 * Only zones created with zinit() are destroyable.
220 */
221 if (z->zflags & ZONE_DESTROYABLE)
222 panic("zinitna: can't create destroyable zone");
223
224 /*
225 * NOTE: We can only adjust zsize if we previously did not
226 * use zbootinit().
227 */
228 if ((z->zflags & ZONE_BOOT) == 0) {
229 z->zsize = (size + ZONE_ROUNDING - 1) & ~(ZONE_ROUNDING - 1);
230 spin_init(&z->zlock);
231 z->zfreecnt = 0;
232 z->ztotal = 0;
233 z->zmax = 0;
234 z->zname = name;
235 z->znalloc = 0;
236 z->zitems = NULL;
237
238 lwkt_gettoken(&vm_token);
239 LIST_INSERT_HEAD(&zlist, z, zlink);
240 lwkt_reltoken(&vm_token);
241
242 bzero(z->zitems_pcpu, sizeof(z->zitems_pcpu));
243 bzero(z->zfreecnt_pcpu, sizeof(z->zfreecnt_pcpu));
244 }
245
246 z->zkmvec = NULL;
247 z->zkmcur = z->zkmmax = 0;
248 z->zflags |= flags;
249
250 /*
251 * If we cannot wait, allocate KVA space up front, and we will fill
252 * in pages as needed. This is particularly required when creating
253 * an allocation space for map entries in kernel_map, because we
254 * do not want to go into a recursion deadlock with
255 * vm_map_entry_reserve().
256 */
257 if (z->zflags & ZONE_INTERRUPT) {
258 totsize = round_page((size_t)z->zsize * nentries);
259 atomic_add_long(&zone_kmem_kvaspace, totsize);
260
261 z->zkva = kmem_alloc_pageable(&kernel_map, totsize);
262 if (z->zkva == 0) {
263 LIST_REMOVE(z, zlink);
264 return 0;
265 }
266
267 z->zpagemax = totsize / PAGE_SIZE;
268 if (obj == NULL) {
269 z->zobj = vm_object_allocate(OBJT_DEFAULT, z->zpagemax);
270 } else {
271 z->zobj = obj;
272 _vm_object_allocate(OBJT_DEFAULT, z->zpagemax, obj);
273 }
274 z->zallocflag = VM_ALLOC_SYSTEM | VM_ALLOC_INTERRUPT |
275 VM_ALLOC_NORMAL | VM_ALLOC_RETRY;
276 z->zmax += nentries;
277 } else {
278 z->zallocflag = VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM;
279 z->zmax = 0;
280 }
281
282
283 if (z->zsize > PAGE_SIZE)
284 z->zfreemin = 1;
285 else
286 z->zfreemin = PAGE_SIZE / z->zsize;
287
288 z->zpagecount = 0;
289 if (zalloc)
290 z->zalloc = zalloc;
291 else
292 z->zalloc = 1;
293
294 /*
295 * Populate the interrrupt zone at creation time rather than
296 * on first allocation, as this is a potentially long operation.
297 */
298 if (z->zflags & ZONE_INTERRUPT) {
299 void *buf;
300
301 buf = zget(z);
302 zfree(z, buf);
303 }
304
305 return 1;
306}
307
308/*
309 * Subroutine same as zinitna, except zone data structure is allocated
310 * automatically by malloc. This routine should normally be used, except
311 * in certain tricky startup conditions in the VM system -- then
312 * zbootinit and zinitna can be used. Zinit is the standard zone
313 * initialization call.
314 *
315 * No requirements.
316 */
317vm_zone_t
318zinit(char *name, int size, int nentries, int flags, int zalloc)
319{
320 vm_zone_t z;
321
322 z = (vm_zone_t) kmalloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT);
323 if (z == NULL)
324 return NULL;
325
326 z->zflags = 0;
327 if (zinitna(z, NULL, name, size, nentries,
328 flags & ~ZONE_DESTROYABLE, zalloc) == 0) {
329 kfree(z, M_ZONE);
330 return NULL;
331 }
332
333 if (flags & ZONE_DESTROYABLE)
334 z->zflags |= ZONE_DESTROYABLE;
335
336 return z;
337}
338
339/*
340 * Initialize a zone before the system is fully up. This routine should
341 * only be called before full VM startup.
342 *
343 * Called from the low level boot code only.
344 */
345void
346zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems)
347{
348 int i;
349
350 bzero(z->zitems_pcpu, sizeof(z->zitems_pcpu));
351 bzero(z->zfreecnt_pcpu, sizeof(z->zfreecnt_pcpu));
352
353 z->zname = name;
354 z->zsize = size;
355 z->zpagemax = 0;
356 z->zobj = NULL;
357 z->zflags = ZONE_BOOT;
358 z->zfreemin = 0;
359 z->zallocflag = 0;
360 z->zpagecount = 0;
361 z->zalloc = 0;
362 z->znalloc = 0;
363 spin_init(&z->zlock);
364
365 bzero(item, (size_t)nitems * z->zsize);
366 z->zitems = NULL;
367 for (i = 0; i < nitems; i++) {
368 ((void **)item)[0] = z->zitems;
369#ifdef INVARIANTS
370 ((void **)item)[1] = (void *)ZENTRY_FREE;
371#endif
372 z->zitems = item;
373 item = (uint8_t *)item + z->zsize;
374 }
375 z->zfreecnt = nitems;
376 z->zmax = nitems;
377 z->ztotal = nitems;
378
379 lwkt_gettoken(&vm_token);
380 LIST_INSERT_HEAD(&zlist, z, zlink);
381 lwkt_reltoken(&vm_token);
382}
383
384/*
385 * Release all resources owned by zone created with zinit().
386 *
387 * No requirements.
388 */
389void
390zdestroy(vm_zone_t z)
391{
392 vm_page_t m;
393 int i;
394
395 if (z == NULL)
396 panic("zdestroy: null zone");
397 if ((z->zflags & ZONE_DESTROYABLE) == 0)
398 panic("zdestroy: undestroyable zone");
399
400 lwkt_gettoken(&vm_token);
401 LIST_REMOVE(z, zlink);
402 lwkt_reltoken(&vm_token);
403
404 /*
405 * Release virtual mappings, physical memory and update sysctl stats.
406 */
407 if (z->zflags & ZONE_INTERRUPT) {
408 /*
409 * Pages mapped via pmap_kenter() must be removed from the
410 * kernel_pmap() before calling kmem_free() to avoid issues
411 * with kernel_pmap.pm_stats.resident_count.
412 */
413 pmap_qremove(z->zkva, z->zpagemax);
414 vm_object_hold(z->zobj);
415 for (i = 0; i < z->zpagecount; ++i) {
416 m = vm_page_lookup_busy_wait(z->zobj, i, TRUE, "vmzd");
417 vm_page_unwire(m, 0);
418 vm_page_free(m);
419 }
420
421 /*
422 * Free the mapping.
423 */
424 kmem_free(&kernel_map, z->zkva,
425 (size_t)z->zpagemax * PAGE_SIZE);
426 atomic_subtract_long(&zone_kmem_kvaspace,
427 (size_t)z->zpagemax * PAGE_SIZE);
428
429 /*
430 * Free the backing object and physical pages.
431 */
432 vm_object_deallocate(z->zobj);
433 vm_object_drop(z->zobj);
434 atomic_subtract_int(&zone_kmem_pages, z->zpagecount);
435 } else {
436 for (i=0; i < z->zkmcur; i++) {
437 kmem_free(&kernel_map, z->zkmvec[i],
438 (size_t)z->zalloc * PAGE_SIZE);
439 atomic_subtract_int(&zone_kern_pages, z->zalloc);
440 }
441 if (z->zkmvec != NULL)
442 kfree(z->zkmvec, M_ZONE);
443 }
444
445 spin_uninit(&z->zlock);
446 kfree(z, M_ZONE);
447}
448
449
450/*
451 * void *zalloc(vm_zone_t zone) --
452 * Returns an item from a specified zone. May not be called from a
453 * FAST interrupt or IPI function.
454 *
455 * void zfree(vm_zone_t zone, void *item) --
456 * Frees an item back to a specified zone. May not be called from a
457 * FAST interrupt or IPI function.
458 */
459
460/*
461 * Internal zone routine. Not to be called from external (non vm_zone) code.
462 *
463 * No requirements.
464 */
465static void *
466zget(vm_zone_t z)
467{
468 int i;
469 vm_page_t m;
470 int nitems;
471 int npages;
472 int savezpc;
473 size_t nbytes;
474 size_t noffset;
475 void *item;
476
477 if (z == NULL)
478 panic("zget: null zone");
479
480 if (z->zflags & ZONE_INTERRUPT) {
481 /*
482 * Interrupt zones do not mess with the kernel_map, they
483 * simply populate an existing mapping.
484 *
485 * First reserve the required space.
486 */
487 vm_object_hold(z->zobj);
488 noffset = (size_t)z->zpagecount * PAGE_SIZE;
489 noffset -= noffset % z->zsize;
490 savezpc = z->zpagecount;
491 if (z->zpagecount + z->zalloc > z->zpagemax)
492 z->zpagecount = z->zpagemax;
493 else
494 z->zpagecount += z->zalloc;
495 item = (char *)z->zkva + noffset;
496 npages = z->zpagecount - savezpc;
497 nitems = ((size_t)(savezpc + npages) * PAGE_SIZE - noffset) /
498 z->zsize;
499 atomic_add_int(&zone_kmem_pages, npages);
500
501 /*
502 * Now allocate the pages. Note that we can block in the
503 * loop, so we've already done all the necessary calculations
504 * and reservations above.
505 */
506 for (i = 0; i < npages; ++i) {
507 vm_offset_t zkva;
508
509 m = vm_page_alloc(z->zobj, savezpc + i, z->zallocflag);
510 KKASSERT(m != NULL);
511 /* note: z might be modified due to blocking */
512
513 KKASSERT(m->queue == PQ_NONE);
514 m->valid = VM_PAGE_BITS_ALL;
515 vm_page_wire(m);
516 vm_page_wakeup(m);
517
518 zkva = z->zkva + (size_t)(savezpc + i) * PAGE_SIZE;
519 pmap_kenter(zkva, VM_PAGE_TO_PHYS(m));
520 bzero((void *)zkva, PAGE_SIZE);
521 }
522 vm_object_drop(z->zobj);
523 } else if (z->zflags & ZONE_SPECIAL) {
524 /*
525 * The special zone is the one used for vm_map_entry_t's.
526 * We have to avoid an infinite recursion in
527 * vm_map_entry_reserve() by using vm_map_entry_kreserve()
528 * instead. The map entries are pre-reserved by the kernel
529 * by vm_map_entry_reserve_cpu_init().
530 */
531 nbytes = (size_t)z->zalloc * PAGE_SIZE;
532
533 item = (void *)kmem_alloc3(&kernel_map, nbytes, KM_KRESERVE);
534
535 /* note: z might be modified due to blocking */
536 if (item != NULL) {
537 zone_kern_pages += z->zalloc; /* not MP-safe XXX */
538 bzero(item, nbytes);
539 } else {
540 nbytes = 0;
541 }
542 nitems = nbytes / z->zsize;
543 } else {
544 /*
545 * Otherwise allocate KVA from the kernel_map.
546 */
547 nbytes = (size_t)z->zalloc * PAGE_SIZE;
548
549 item = (void *)kmem_alloc3(&kernel_map, nbytes, 0);
550
551 /* note: z might be modified due to blocking */
552 if (item != NULL) {
553 zone_kern_pages += z->zalloc; /* not MP-safe XXX */
554 bzero(item, nbytes);
555
556 if (z->zflags & ZONE_DESTROYABLE) {
557 if (z->zkmcur == z->zkmmax) {
558 z->zkmmax =
559 z->zkmmax==0 ? 1 : z->zkmmax*2;
560 z->zkmvec = krealloc(z->zkmvec,
561 z->zkmmax * sizeof(z->zkmvec[0]),
562 M_ZONE, M_WAITOK);
563 }
564 z->zkmvec[z->zkmcur++] = (vm_offset_t)item;
565 }
566 } else {
567 nbytes = 0;
568 }
569 nitems = nbytes / z->zsize;
570 }
571
572 spin_lock(&z->zlock);
573 z->ztotal += nitems;
574 /*
575 * Save one for immediate allocation
576 */
577 if (nitems != 0) {
578 nitems -= 1;
579 for (i = 0; i < nitems; i++) {
580 ((void **)item)[0] = z->zitems;
581#ifdef INVARIANTS
582 ((void **)item)[1] = (void *)ZENTRY_FREE;
583#endif
584 z->zitems = item;
585 item = (uint8_t *)item + z->zsize;
586 }
587 z->zfreecnt += nitems;
588 z->znalloc++;
589 } else if (z->zfreecnt > 0) {
590 item = z->zitems;
591 z->zitems = ((void **)item)[0];
592#ifdef INVARIANTS
593 if (((void **)item)[1] != (void *)ZENTRY_FREE)
594 zerror(ZONE_ERROR_NOTFREE);
595 ((void **) item)[1] = 0;
596#endif
597 z->zfreecnt--;
598 z->znalloc++;
599 } else {
600 item = NULL;
601 }
602 spin_unlock(&z->zlock);
603
604 /*
605 * A special zone may have used a kernel-reserved vm_map_entry. If
606 * so we have to be sure to recover our reserve so we don't run out.
607 * We will panic if we run out.
608 */
609 if (z->zflags & ZONE_SPECIAL)
610 vm_map_entry_reserve(0);
611
612 return item;
613}
614
615/*
616 * No requirements.
617 */
618static int
619sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
620{
621 int error=0;
622 vm_zone_t curzone;
623 char tmpbuf[128];
624 char tmpname[14];
625
626 ksnprintf(tmpbuf, sizeof(tmpbuf),
627 "\nITEM SIZE LIMIT USED FREE REQUESTS\n");
628 error = SYSCTL_OUT(req, tmpbuf, strlen(tmpbuf));
629 if (error)
630 return (error);
631
632 lwkt_gettoken(&vm_token);
633 LIST_FOREACH(curzone, &zlist, zlink) {
634 int i;
635 int len;
636 int offset;
637
638 len = strlen(curzone->zname);
639 if (len >= (sizeof(tmpname) - 1))
640 len = (sizeof(tmpname) - 1);
641 for(i = 0; i < sizeof(tmpname) - 1; i++)
642 tmpname[i] = ' ';
643 tmpname[i] = 0;
644 memcpy(tmpname, curzone->zname, len);
645 tmpname[len] = ':';
646 offset = 0;
647 if (curzone == LIST_FIRST(&zlist)) {
648 offset = 1;
649 tmpbuf[0] = '\n';
650 }
651
652 ksnprintf(tmpbuf + offset, sizeof(tmpbuf) - offset,
653 "%s %6.6u, %8.8u, %6.6u, %6.6u, %8.8u\n",
654 tmpname, curzone->zsize, curzone->zmax,
655 (curzone->ztotal - curzone->zfreecnt),
656 curzone->zfreecnt, curzone->znalloc);
657
658 len = strlen((char *)tmpbuf);
659 if (LIST_NEXT(curzone, zlink) == NULL)
660 tmpbuf[len - 1] = 0;
661
662 error = SYSCTL_OUT(req, tmpbuf, len);
663
664 if (error)
665 break;
666 }
667 lwkt_reltoken(&vm_token);
668 return (error);
669}
670
671#if defined(INVARIANTS)
672
673/*
674 * Debugging only.
675 */
676void
677zerror(int error)
678{
679 char *msg;
680
681 switch (error) {
682 case ZONE_ERROR_INVALID:
683 msg = "zone: invalid zone";
684 break;
685 case ZONE_ERROR_NOTFREE:
686 msg = "zone: entry not free";
687 break;
688 case ZONE_ERROR_ALREADYFREE:
689 msg = "zone: freeing free entry";
690 break;
691 default:
692 msg = "zone: invalid error";
693 break;
694 }
695 panic(msg);
696}
697#endif
698
699SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, \
700 NULL, 0, sysctl_vm_zone, "A", "Zone Info");
701
702SYSCTL_INT(_vm, OID_AUTO, zone_kmem_pages,
703 CTLFLAG_RD, &zone_kmem_pages, 0, "Number of interrupt safe pages allocated by zone");
704SYSCTL_LONG(_vm, OID_AUTO, zone_kmem_kvaspace,
705 CTLFLAG_RD, &zone_kmem_kvaspace, 0, "KVA space allocated by zone");
706SYSCTL_INT(_vm, OID_AUTO, zone_kern_pages,
707 CTLFLAG_RD, &zone_kern_pages, 0, "Number of non-interrupt safe pages allocated by zone");