2 * Copyright (c) 1991 Regents of the University of California.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
37 * $FreeBSD: src/sys/vm/vm_page.c,v 1.147.2.18 2002/03/10 05:03:19 alc Exp $
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
46 * Permission to use, copy, modify and distribute this software and
47 * its documentation is hereby granted, provided that both the copyright
48 * notice and this permission notice appear in all copies of the
49 * software, derivative works or modified versions, and any portions
50 * thereof, and that both notices appear in supporting documentation.
52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
56 * Carnegie Mellon requests users of this software to return to
58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
59 * School of Computer Science
60 * Carnegie Mellon University
61 * Pittsburgh PA 15213-3890
63 * any improvements or extensions that they make and grant Carnegie the
64 * rights to redistribute these changes.
68 * Resident memory management module.
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/malloc.h>
75 #include <sys/vmmeter.h>
76 #include <sys/vnode.h>
79 #include <vm/vm_param.h>
81 #include <vm/vm_kern.h>
83 #include <vm/vm_map.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pageout.h>
87 #include <vm/vm_pager.h>
88 #include <vm/vm_extern.h>
90 static void vm_page_queue_init (void);
91 static vm_page_t vm_page_select_cache (vm_object_t, vm_pindex_t);
94 * Associated with page of user-allocatable memory is a
98 static struct vm_page **vm_page_buckets; /* Array of buckets */
99 static int vm_page_bucket_count; /* How big is array? */
100 static int vm_page_hash_mask; /* Mask for hash function */
101 static volatile int vm_page_bucket_generation;
103 struct vpgqueues vm_page_queues[PQ_COUNT];
106 vm_page_queue_init(void) {
109 for(i=0;i<PQ_L2_SIZE;i++) {
110 vm_page_queues[PQ_FREE+i].cnt = &cnt.v_free_count;
112 vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count;
114 vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count;
115 vm_page_queues[PQ_HOLD].cnt = &cnt.v_active_count;
116 for(i=0;i<PQ_L2_SIZE;i++) {
117 vm_page_queues[PQ_CACHE+i].cnt = &cnt.v_cache_count;
119 for(i=0;i<PQ_COUNT;i++) {
120 TAILQ_INIT(&vm_page_queues[i].pl);
124 vm_page_t vm_page_array = 0;
125 int vm_page_array_size = 0;
127 int vm_page_zero_count = 0;
129 static __inline int vm_page_hash (vm_object_t object, vm_pindex_t pindex);
130 static void vm_page_free_wakeup (void);
135 * Sets the page size, perhaps based upon the memory
136 * size. Must be called before any use of page-size
137 * dependent functions.
140 vm_set_page_size(void)
142 if (cnt.v_page_size == 0)
143 cnt.v_page_size = PAGE_SIZE;
144 if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0)
145 panic("vm_set_page_size: page size not a power of two");
151 * Add a new page to the freelist for use by the system.
152 * Must be called at splhigh().
155 vm_add_new_page(vm_offset_t pa)
161 m = PHYS_TO_VM_PAGE(pa);
164 m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK;
165 m->queue = m->pc + PQ_FREE;
166 TAILQ_INSERT_HEAD(&vm_page_queues[m->queue].pl, m, pageq);
167 vm_page_queues[m->queue].lcnt++;
174 * Initializes the resident memory module.
176 * Allocates memory for the page cells, and
177 * for the object/offset-to-page hash table headers.
178 * Each page cell is initialized and placed on the free list.
182 vm_page_startup(vm_offset_t starta, vm_offset_t enda, vm_offset_t vaddr)
185 struct vm_page **bucket;
186 vm_size_t npages, page_range;
193 /* the biggest memory array is the second group of pages */
195 vm_offset_t biggestone, biggestsize;
203 vaddr = round_page(vaddr);
205 for (i = 0; phys_avail[i + 1]; i += 2) {
206 phys_avail[i] = round_page(phys_avail[i]);
207 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
210 for (i = 0; phys_avail[i + 1]; i += 2) {
211 int size = phys_avail[i + 1] - phys_avail[i];
213 if (size > biggestsize) {
221 end = phys_avail[biggestone+1];
224 * Initialize the queue headers for the free queue, the active queue
225 * and the inactive queue.
228 vm_page_queue_init();
231 * Allocate (and initialize) the hash table buckets.
233 * The number of buckets MUST BE a power of 2, and the actual value is
234 * the next power of 2 greater than the number of physical pages in
237 * We make the hash table approximately 2x the number of pages to
238 * reduce the chain length. This is about the same size using the
239 * singly-linked list as the 1x hash table we were using before
240 * using TAILQ but the chain length will be smaller.
242 * Note: This computation can be tweaked if desired.
244 vm_page_buckets = (struct vm_page **)vaddr;
245 bucket = vm_page_buckets;
246 if (vm_page_bucket_count == 0) {
247 vm_page_bucket_count = 1;
248 while (vm_page_bucket_count < atop(total))
249 vm_page_bucket_count <<= 1;
251 vm_page_bucket_count <<= 1;
252 vm_page_hash_mask = vm_page_bucket_count - 1;
255 * Validate these addresses.
257 new_end = end - vm_page_bucket_count * sizeof(struct vm_page *);
258 new_end = trunc_page(new_end);
259 mapped = round_page(vaddr);
260 vaddr = pmap_map(mapped, new_end, end,
261 VM_PROT_READ | VM_PROT_WRITE);
262 vaddr = round_page(vaddr);
263 bzero((caddr_t) mapped, vaddr - mapped);
265 for (i = 0; i < vm_page_bucket_count; i++) {
271 * Compute the number of pages of memory that will be available for
272 * use (taking into account the overhead of a page structure per
276 first_page = phys_avail[0] / PAGE_SIZE;
278 page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE - first_page;
279 npages = (total - (page_range * sizeof(struct vm_page)) -
280 (end - new_end)) / PAGE_SIZE;
284 * Initialize the mem entry structures now, and put them in the free
287 vm_page_array = (vm_page_t) vaddr;
291 * Validate these addresses.
294 new_end = trunc_page(end - page_range * sizeof(struct vm_page));
295 mapped = pmap_map(mapped, new_end, end,
296 VM_PROT_READ | VM_PROT_WRITE);
299 * Clear all of the page structures
301 bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
302 vm_page_array_size = page_range;
305 * Construct the free queue(s) in descending order (by physical
306 * address) so that the first 16MB of physical memory is allocated
307 * last rather than first. On large-memory machines, this avoids
308 * the exhaustion of low physical memory before isa_dmainit has run.
310 cnt.v_page_count = 0;
311 cnt.v_free_count = 0;
312 for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
317 last_pa = phys_avail[i + 1];
318 while (pa < last_pa && npages-- > 0) {
329 * Distributes the object/offset key pair among hash buckets.
331 * NOTE: This macro depends on vm_page_bucket_count being a power of 2.
332 * This routine may not block.
334 * We try to randomize the hash based on the object to spread the pages
335 * out in the hash table without it costing us too much.
338 vm_page_hash(vm_object_t object, vm_pindex_t pindex)
340 int i = ((uintptr_t)object + pindex) ^ object->hash_rand;
342 return(i & vm_page_hash_mask);
346 vm_page_unhold(vm_page_t mem)
349 KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
350 if (mem->hold_count == 0 && mem->queue == PQ_HOLD)
351 vm_page_free_toq(mem);
355 * vm_page_insert: [ internal use only ]
357 * Inserts the given mem entry into the object and object list.
359 * The pagetables are not updated but will presumably fault the page
360 * in if necessary, or if a kernel page the caller will at some point
361 * enter the page into the kernel's pmap. We are not allowed to block
362 * here so we *can't* do this anyway.
364 * The object and page must be locked, and must be splhigh.
365 * This routine may not block.
369 vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
371 struct vm_page **bucket;
373 if (m->object != NULL)
374 panic("vm_page_insert: already inserted");
377 * Record the object/offset pair in this page
384 * Insert it into the object_object/offset hash table
387 bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
390 vm_page_bucket_generation++;
393 * Now link into the object's list of backed pages.
396 TAILQ_INSERT_TAIL(&object->memq, m, listq);
397 object->generation++;
400 * show that the object has one more resident page.
403 object->resident_page_count++;
406 * Since we are inserting a new and possibly dirty page,
407 * update the object's OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY flags.
409 if (m->flags & PG_WRITEABLE)
410 vm_object_set_writeable_dirty(object);
415 * NOTE: used by device pager as well -wfj
417 * Removes the given mem entry from the object/offset-page
418 * table and the object page list, but do not invalidate/terminate
421 * The object and page must be locked, and at splhigh.
422 * The underlying pmap entry (if any) is NOT removed here.
423 * This routine may not block.
427 vm_page_remove(vm_page_t m)
431 if (m->object == NULL)
434 if ((m->flags & PG_BUSY) == 0) {
435 panic("vm_page_remove: page not busy");
439 * Basically destroy the page.
447 * Remove from the object_object/offset hash table. The object
448 * must be on the hash queue, we will panic if it isn't
450 * Note: we must NULL-out m->hnext to prevent loops in detached
451 * buffers with vm_page_lookup().
455 struct vm_page **bucket;
457 bucket = &vm_page_buckets[vm_page_hash(m->object, m->pindex)];
458 while (*bucket != m) {
460 panic("vm_page_remove(): page not found in hash");
461 bucket = &(*bucket)->hnext;
465 vm_page_bucket_generation++;
469 * Now remove from the object's list of backed pages.
472 TAILQ_REMOVE(&object->memq, m, listq);
475 * And show that the object has one fewer resident page.
478 object->resident_page_count--;
479 object->generation++;
487 * Returns the page associated with the object/offset
488 * pair specified; if none is found, NULL is returned.
490 * NOTE: the code below does not lock. It will operate properly if
491 * an interrupt makes a change, but the generation algorithm will not
492 * operate properly in an SMP environment where both cpu's are able to run
493 * kernel code simultaneously.
495 * The object must be locked. No side effects.
496 * This routine may not block.
497 * This is a critical path routine
501 vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
504 struct vm_page **bucket;
508 * Search the hash table for this object/offset pair
512 generation = vm_page_bucket_generation;
513 bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
514 for (m = *bucket; m != NULL; m = m->hnext) {
515 if ((m->object == object) && (m->pindex == pindex)) {
516 if (vm_page_bucket_generation != generation)
521 if (vm_page_bucket_generation != generation)
529 * Move the given memory entry from its
530 * current object to the specified target object/offset.
532 * The object must be locked.
533 * This routine may not block.
535 * Note: this routine will raise itself to splvm(), the caller need not.
537 * Note: swap associated with the page must be invalidated by the move. We
538 * have to do this for several reasons: (1) we aren't freeing the
539 * page, (2) we are dirtying the page, (3) the VM system is probably
540 * moving the page from object A to B, and will then later move
541 * the backing store from A to B and we can't have a conflict.
543 * Note: we *always* dirty the page. It is necessary both for the
544 * fact that we moved it, and because we may be invalidating
545 * swap. If the page is on the cache, we have to deactivate it
546 * or vm_page_dirty() will panic. Dirty pages are not allowed
551 vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
557 vm_page_insert(m, new_object, new_pindex);
558 if (m->queue - m->pc == PQ_CACHE)
559 vm_page_deactivate(m);
565 * vm_page_unqueue_nowakeup:
567 * vm_page_unqueue() without any wakeup
569 * This routine must be called at splhigh().
570 * This routine may not block.
574 vm_page_unqueue_nowakeup(vm_page_t m)
576 int queue = m->queue;
577 struct vpgqueues *pq;
578 if (queue != PQ_NONE) {
579 pq = &vm_page_queues[queue];
581 TAILQ_REMOVE(&pq->pl, m, pageq);
590 * Remove a page from its queue.
592 * This routine must be called at splhigh().
593 * This routine may not block.
597 vm_page_unqueue(vm_page_t m)
599 int queue = m->queue;
600 struct vpgqueues *pq;
601 if (queue != PQ_NONE) {
603 pq = &vm_page_queues[queue];
604 TAILQ_REMOVE(&pq->pl, m, pageq);
607 if ((queue - m->pc) == PQ_CACHE) {
608 if (vm_paging_needed())
619 * Find a page on the specified queue with color optimization.
621 * The page coloring optimization attempts to locate a page
622 * that does not overload other nearby pages in the object in
623 * the cpu's L1 or L2 caches. We need this optimization because
624 * cpu caches tend to be physical caches, while object spaces tend
627 * This routine must be called at splvm().
628 * This routine may not block.
630 * This routine may only be called from the vm_page_list_find() macro
634 _vm_page_list_find(int basequeue, int index)
638 struct vpgqueues *pq;
640 pq = &vm_page_queues[basequeue];
643 * Note that for the first loop, index+i and index-i wind up at the
644 * same place. Even though this is not totally optimal, we've already
645 * blown it by missing the cache case so we do not care.
648 for(i = PQ_L2_SIZE / 2; i > 0; --i) {
649 if ((m = TAILQ_FIRST(&pq[(index + i) & PQ_L2_MASK].pl)) != NULL)
652 if ((m = TAILQ_FIRST(&pq[(index - i) & PQ_L2_MASK].pl)) != NULL)
661 * vm_page_select_cache:
663 * Find a page on the cache queue with color optimization. As pages
664 * might be found, but not applicable, they are deactivated. This
665 * keeps us from using potentially busy cached pages.
667 * This routine must be called at splvm().
668 * This routine may not block.
671 vm_page_select_cache(vm_object_t object, vm_pindex_t pindex)
676 m = vm_page_list_find(
678 (pindex + object->pg_color) & PQ_L2_MASK,
681 if (m && ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy ||
682 m->hold_count || m->wire_count)) {
683 vm_page_deactivate(m);
691 * vm_page_select_free:
693 * Find a free or zero page, with specified preference. We attempt to
694 * inline the nominal case and fall back to _vm_page_select_free()
697 * This routine must be called at splvm().
698 * This routine may not block.
701 static __inline vm_page_t
702 vm_page_select_free(vm_object_t object, vm_pindex_t pindex, boolean_t prefer_zero)
706 m = vm_page_list_find(
708 (pindex + object->pg_color) & PQ_L2_MASK,
717 * Allocate and return a memory cell associated
718 * with this VM object/offset pair.
721 * VM_ALLOC_NORMAL normal process request
722 * VM_ALLOC_SYSTEM system *really* needs a page
723 * VM_ALLOC_INTERRUPT interrupt time request
724 * VM_ALLOC_ZERO zero page
726 * Object must be locked.
727 * This routine may not block.
729 * Additional special handling is required when called from an
730 * interrupt (VM_ALLOC_INTERRUPT). We are not allowed to mess with
731 * the page cache in this case.
735 vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req)
740 KASSERT(!vm_page_lookup(object, pindex),
741 ("vm_page_alloc: page already allocated"));
744 * The pager is allowed to eat deeper into the free page list.
747 if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) {
748 page_req = VM_ALLOC_SYSTEM;
754 if (cnt.v_free_count > cnt.v_free_reserved) {
756 * Allocate from the free queue if there are plenty of pages
759 if (page_req == VM_ALLOC_ZERO)
760 m = vm_page_select_free(object, pindex, TRUE);
762 m = vm_page_select_free(object, pindex, FALSE);
764 (page_req == VM_ALLOC_SYSTEM &&
765 cnt.v_cache_count == 0 &&
766 cnt.v_free_count > cnt.v_interrupt_free_min) ||
767 (page_req == VM_ALLOC_INTERRUPT && cnt.v_free_count > 0)
770 * Interrupt or system, dig deeper into the free list.
772 m = vm_page_select_free(object, pindex, FALSE);
773 } else if (page_req != VM_ALLOC_INTERRUPT) {
775 * Allocatable from cache (non-interrupt only). On success,
776 * we must free the page and try again, thus ensuring that
777 * cnt.v_*_free_min counters are replenished.
779 m = vm_page_select_cache(object, pindex);
782 #if defined(DIAGNOSTIC)
783 if (cnt.v_cache_count > 0)
784 printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", cnt.v_cache_count);
786 vm_pageout_deficit++;
790 KASSERT(m->dirty == 0, ("Found dirty cache page %p", m));
792 vm_page_protect(m, VM_PROT_NONE);
797 * Not allocatable from cache from interrupt, give up.
800 vm_pageout_deficit++;
806 * At this point we had better have found a good page.
811 ("vm_page_alloc(): missing page on free queue\n")
815 * Remove from free queue
818 vm_page_unqueue_nowakeup(m);
821 * Initialize structure. Only the PG_ZERO flag is inherited.
824 if (m->flags & PG_ZERO) {
825 vm_page_zero_count--;
826 m->flags = PG_ZERO | PG_BUSY;
835 KASSERT(m->dirty == 0, ("vm_page_alloc: free/cache page %p was dirty", m));
838 * vm_page_insert() is safe prior to the splx(). Note also that
839 * inserting a page here does not insert it into the pmap (which
840 * could cause us to block allocating memory). We cannot block
844 vm_page_insert(m, object, pindex);
847 * Don't wakeup too often - wakeup the pageout daemon when
848 * we would be nearly out of memory.
850 if (vm_paging_needed())
859 * vm_wait: (also see VM_WAIT macro)
861 * Block until free pages are available for allocation
862 * - Called in various places before memory allocations.
871 if (curproc == pageproc) {
872 vm_pageout_pages_needed = 1;
873 tsleep(&vm_pageout_pages_needed, PSWP, "VMWait", 0);
875 if (!vm_pages_needed) {
877 wakeup(&vm_pages_needed);
879 tsleep(&cnt.v_free_count, PVM, "vmwait", 0);
885 * vm_waitpfault: (also see VM_WAITPFAULT macro)
887 * Block until free pages are available for allocation
888 * - Called only in vm_fault so that processes page faulting
889 * can be easily tracked.
890 * - Sleeps at a lower priority than vm_wait() so that vm_wait()ing
891 * processes will be able to grab memory first. Do not change
892 * this balance without careful testing first.
901 if (!vm_pages_needed) {
903 wakeup(&vm_pages_needed);
905 tsleep(&cnt.v_free_count, PUSER, "pfault", 0);
910 * vm_await: (also see VM_AWAIT macro)
912 * asleep on an event that will signal when free pages are available
922 if (curproc == pageproc) {
923 vm_pageout_pages_needed = 1;
924 asleep(&vm_pageout_pages_needed, PSWP, "vmwait", 0);
926 if (!vm_pages_needed) {
928 wakeup(&vm_pages_needed);
930 asleep(&cnt.v_free_count, PVM, "vmwait", 0);
939 * Block until page is no longer busy.
943 vm_page_sleep(vm_page_t m, char *msg, char *busy)
946 if ((busy && *busy) || (m->flags & PG_BUSY)) {
949 if ((busy && *busy) || (m->flags & PG_BUSY)) {
950 vm_page_flag_set(m, PG_WANTED);
951 tsleep(m, PVM, msg, 0);
966 * Similar to vm_page_sleep(), but does not block. Returns 0 if
967 * the page is not busy, or 1 if the page is busy.
969 * This routine has the side effect of calling asleep() if the page
970 * was busy (1 returned).
974 vm_page_asleep(vm_page_t m, char *msg, char *busy)
977 if ((busy && *busy) || (m->flags & PG_BUSY)) {
980 if ((busy && *busy) || (m->flags & PG_BUSY)) {
981 vm_page_flag_set(m, PG_WANTED);
982 asleep(m, PVM, msg, 0);
995 * Put the specified page on the active list (if appropriate).
996 * Ensure that act_count is at least ACT_INIT but do not otherwise
999 * The page queues must be locked.
1000 * This routine may not block.
1003 vm_page_activate(vm_page_t m)
1008 if (m->queue != PQ_ACTIVE) {
1009 if ((m->queue - m->pc) == PQ_CACHE)
1010 cnt.v_reactivated++;
1014 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
1015 m->queue = PQ_ACTIVE;
1016 vm_page_queues[PQ_ACTIVE].lcnt++;
1017 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1018 if (m->act_count < ACT_INIT)
1019 m->act_count = ACT_INIT;
1020 cnt.v_active_count++;
1023 if (m->act_count < ACT_INIT)
1024 m->act_count = ACT_INIT;
1031 * vm_page_free_wakeup:
1033 * Helper routine for vm_page_free_toq() and vm_page_cache(). This
1034 * routine is called when a page has been added to the cache or free
1037 * This routine may not block.
1038 * This routine must be called at splvm()
1040 static __inline void
1041 vm_page_free_wakeup(void)
1044 * if pageout daemon needs pages, then tell it that there are
1047 if (vm_pageout_pages_needed &&
1048 cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) {
1049 wakeup(&vm_pageout_pages_needed);
1050 vm_pageout_pages_needed = 0;
1053 * wakeup processes that are waiting on memory if we hit a
1054 * high water mark. And wakeup scheduler process if we have
1055 * lots of memory. this process will swapin processes.
1057 if (vm_pages_needed && !vm_page_count_min()) {
1058 vm_pages_needed = 0;
1059 wakeup(&cnt.v_free_count);
1066 * Returns the given page to the PQ_FREE list,
1067 * disassociating it with any VM object.
1069 * Object and page must be locked prior to entry.
1070 * This routine may not block.
1074 vm_page_free_toq(vm_page_t m)
1077 struct vpgqueues *pq;
1078 vm_object_t object = m->object;
1084 if (m->busy || ((m->queue - m->pc) == PQ_FREE)) {
1086 "vm_page_free: pindex(%lu), busy(%d), PG_BUSY(%d), hold(%d)\n",
1087 (u_long)m->pindex, m->busy, (m->flags & PG_BUSY) ? 1 : 0,
1089 if ((m->queue - m->pc) == PQ_FREE)
1090 panic("vm_page_free: freeing free page");
1092 panic("vm_page_free: freeing busy page");
1096 * unqueue, then remove page. Note that we cannot destroy
1097 * the page here because we do not want to call the pager's
1098 * callback routine until after we've put the page on the
1099 * appropriate free queue.
1102 vm_page_unqueue_nowakeup(m);
1106 * If fictitious remove object association and
1107 * return, otherwise delay object association removal.
1110 if ((m->flags & PG_FICTITIOUS) != 0) {
1118 if (m->wire_count != 0) {
1119 if (m->wire_count > 1) {
1120 panic("vm_page_free: invalid wire count (%d), pindex: 0x%lx",
1121 m->wire_count, (long)m->pindex);
1123 panic("vm_page_free: freeing wired page\n");
1127 * If we've exhausted the object's resident pages we want to free
1132 (object->type == OBJT_VNODE) &&
1133 ((object->flags & OBJ_DEAD) == 0)
1135 struct vnode *vp = (struct vnode *)object->handle;
1137 if (vp && VSHOULDFREE(vp))
1142 * Clear the UNMANAGED flag when freeing an unmanaged page.
1145 if (m->flags & PG_UNMANAGED) {
1146 m->flags &= ~PG_UNMANAGED;
1149 pmap_page_is_free(m);
1153 if (m->hold_count != 0) {
1154 m->flags &= ~PG_ZERO;
1157 m->queue = PQ_FREE + m->pc;
1158 pq = &vm_page_queues[m->queue];
1163 * Put zero'd pages on the end ( where we look for zero'd pages
1164 * first ) and non-zerod pages at the head.
1167 if (m->flags & PG_ZERO) {
1168 TAILQ_INSERT_TAIL(&pq->pl, m, pageq);
1169 ++vm_page_zero_count;
1171 TAILQ_INSERT_HEAD(&pq->pl, m, pageq);
1174 vm_page_free_wakeup();
1182 * Prevent PV management from being done on the page. The page is
1183 * removed from the paging queues as if it were wired, and as a
1184 * consequence of no longer being managed the pageout daemon will not
1185 * touch it (since there is no way to locate the pte mappings for the
1186 * page). madvise() calls that mess with the pmap will also no longer
1187 * operate on the page.
1189 * Beyond that the page is still reasonably 'normal'. Freeing the page
1190 * will clear the flag.
1192 * This routine is used by OBJT_PHYS objects - objects using unswappable
1193 * physical memory as backing store rather then swap-backed memory and
1194 * will eventually be extended to support 4MB unmanaged physical
1199 vm_page_unmanage(vm_page_t m)
1204 if ((m->flags & PG_UNMANAGED) == 0) {
1205 if (m->wire_count == 0)
1208 vm_page_flag_set(m, PG_UNMANAGED);
1215 * Mark this page as wired down by yet
1216 * another map, removing it from paging queues
1219 * The page queues must be locked.
1220 * This routine may not block.
1223 vm_page_wire(vm_page_t m)
1228 * Only bump the wire statistics if the page is not already wired,
1229 * and only unqueue the page if it is on some queue (if it is unmanaged
1230 * it is already off the queues).
1233 if (m->wire_count == 0) {
1234 if ((m->flags & PG_UNMANAGED) == 0)
1239 KASSERT(m->wire_count != 0,
1240 ("vm_page_wire: wire_count overflow m=%p", m));
1243 vm_page_flag_set(m, PG_MAPPED);
1249 * Release one wiring of this page, potentially
1250 * enabling it to be paged again.
1252 * Many pages placed on the inactive queue should actually go
1253 * into the cache, but it is difficult to figure out which. What
1254 * we do instead, if the inactive target is well met, is to put
1255 * clean pages at the head of the inactive queue instead of the tail.
1256 * This will cause them to be moved to the cache more quickly and
1257 * if not actively re-referenced, freed more quickly. If we just
1258 * stick these pages at the end of the inactive queue, heavy filesystem
1259 * meta-data accesses can cause an unnecessary paging load on memory bound
1260 * processes. This optimization causes one-time-use metadata to be
1261 * reused more quickly.
1263 * BUT, if we are in a low-memory situation we have no choice but to
1264 * put clean pages on the cache queue.
1266 * A number of routines use vm_page_unwire() to guarantee that the page
1267 * will go into either the inactive or active queues, and will NEVER
1268 * be placed in the cache - for example, just after dirtying a page.
1269 * dirty pages in the cache are not allowed.
1271 * The page queues must be locked.
1272 * This routine may not block.
1275 vm_page_unwire(vm_page_t m, int activate)
1281 if (m->wire_count > 0) {
1283 if (m->wire_count == 0) {
1285 if (m->flags & PG_UNMANAGED) {
1287 } else if (activate) {
1288 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl, m, pageq);
1289 m->queue = PQ_ACTIVE;
1290 vm_page_queues[PQ_ACTIVE].lcnt++;
1291 cnt.v_active_count++;
1293 vm_page_flag_clear(m, PG_WINATCFLS);
1294 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
1295 m->queue = PQ_INACTIVE;
1296 vm_page_queues[PQ_INACTIVE].lcnt++;
1297 cnt.v_inactive_count++;
1301 panic("vm_page_unwire: invalid wire count: %d\n", m->wire_count);
1308 * Move the specified page to the inactive queue. If the page has
1309 * any associated swap, the swap is deallocated.
1311 * Normally athead is 0 resulting in LRU operation. athead is set
1312 * to 1 if we want this page to be 'as if it were placed in the cache',
1313 * except without unmapping it from the process address space.
1315 * This routine may not block.
1317 static __inline void
1318 _vm_page_deactivate(vm_page_t m, int athead)
1323 * Ignore if already inactive.
1325 if (m->queue == PQ_INACTIVE)
1329 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
1330 if ((m->queue - m->pc) == PQ_CACHE)
1331 cnt.v_reactivated++;
1332 vm_page_flag_clear(m, PG_WINATCFLS);
1335 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
1337 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
1338 m->queue = PQ_INACTIVE;
1339 vm_page_queues[PQ_INACTIVE].lcnt++;
1340 cnt.v_inactive_count++;
1346 vm_page_deactivate(vm_page_t m)
1348 _vm_page_deactivate(m, 0);
1352 * vm_page_try_to_cache:
1354 * Returns 0 on failure, 1 on success
1357 vm_page_try_to_cache(vm_page_t m)
1359 if (m->dirty || m->hold_count || m->busy || m->wire_count ||
1360 (m->flags & (PG_BUSY|PG_UNMANAGED))) {
1363 vm_page_test_dirty(m);
1371 * vm_page_try_to_free()
1373 * Attempt to free the page. If we cannot free it, we do nothing.
1374 * 1 is returned on success, 0 on failure.
1378 vm_page_try_to_free(vm_page_t m)
1380 if (m->dirty || m->hold_count || m->busy || m->wire_count ||
1381 (m->flags & (PG_BUSY|PG_UNMANAGED))) {
1384 vm_page_test_dirty(m);
1388 vm_page_protect(m, VM_PROT_NONE);
1397 * Put the specified page onto the page cache queue (if appropriate).
1399 * This routine may not block.
1402 vm_page_cache(vm_page_t m)
1406 if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy || m->wire_count) {
1407 printf("vm_page_cache: attempting to cache busy page\n");
1410 if ((m->queue - m->pc) == PQ_CACHE)
1414 * Remove all pmaps and indicate that the page is not
1415 * writeable or mapped.
1418 vm_page_protect(m, VM_PROT_NONE);
1419 if (m->dirty != 0) {
1420 panic("vm_page_cache: caching a dirty page, pindex: %ld",
1424 vm_page_unqueue_nowakeup(m);
1425 m->queue = PQ_CACHE + m->pc;
1426 vm_page_queues[m->queue].lcnt++;
1427 TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, pageq);
1428 cnt.v_cache_count++;
1429 vm_page_free_wakeup();
1436 * Cache, deactivate, or do nothing as appropriate. This routine
1437 * is typically used by madvise() MADV_DONTNEED.
1439 * Generally speaking we want to move the page into the cache so
1440 * it gets reused quickly. However, this can result in a silly syndrome
1441 * due to the page recycling too quickly. Small objects will not be
1442 * fully cached. On the otherhand, if we move the page to the inactive
1443 * queue we wind up with a problem whereby very large objects
1444 * unnecessarily blow away our inactive and cache queues.
1446 * The solution is to move the pages based on a fixed weighting. We
1447 * either leave them alone, deactivate them, or move them to the cache,
1448 * where moving them to the cache has the highest weighting.
1449 * By forcing some pages into other queues we eventually force the
1450 * system to balance the queues, potentially recovering other unrelated
1451 * space from active. The idea is to not force this to happen too
1456 vm_page_dontneed(vm_page_t m)
1458 static int dnweight;
1465 * occassionally leave the page alone
1468 if ((dnw & 0x01F0) == 0 ||
1469 m->queue == PQ_INACTIVE ||
1470 m->queue - m->pc == PQ_CACHE
1472 if (m->act_count >= ACT_INIT)
1478 vm_page_test_dirty(m);
1480 if (m->dirty || (dnw & 0x0070) == 0) {
1482 * Deactivate the page 3 times out of 32.
1487 * Cache the page 28 times out of every 32. Note that
1488 * the page is deactivated instead of cached, but placed
1489 * at the head of the queue instead of the tail.
1493 _vm_page_deactivate(m, head);
1497 * Grab a page, waiting until we are waken up due to the page
1498 * changing state. We keep on waiting, if the page continues
1499 * to be in the object. If the page doesn't exist, allocate it.
1501 * This routine may block.
1504 vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
1511 if ((m = vm_page_lookup(object, pindex)) != NULL) {
1512 if (m->busy || (m->flags & PG_BUSY)) {
1513 generation = object->generation;
1516 while ((object->generation == generation) &&
1517 (m->busy || (m->flags & PG_BUSY))) {
1518 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
1519 tsleep(m, PVM, "pgrbwt", 0);
1520 if ((allocflags & VM_ALLOC_RETRY) == 0) {
1533 m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY);
1536 if ((allocflags & VM_ALLOC_RETRY) == 0)
1545 * Mapping function for valid bits or for dirty bits in
1546 * a page. May not block.
1548 * Inputs are required to range within a page.
1552 vm_page_bits(int base, int size)
1558 base + size <= PAGE_SIZE,
1559 ("vm_page_bits: illegal base/size %d/%d", base, size)
1562 if (size == 0) /* handle degenerate case */
1565 first_bit = base >> DEV_BSHIFT;
1566 last_bit = (base + size - 1) >> DEV_BSHIFT;
1568 return ((2 << last_bit) - (1 << first_bit));
1572 * vm_page_set_validclean:
1574 * Sets portions of a page valid and clean. The arguments are expected
1575 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
1576 * of any partial chunks touched by the range. The invalid portion of
1577 * such chunks will be zero'd.
1579 * This routine may not block.
1581 * (base + size) must be less then or equal to PAGE_SIZE.
1584 vm_page_set_validclean(vm_page_t m, int base, int size)
1590 if (size == 0) /* handle degenerate case */
1594 * If the base is not DEV_BSIZE aligned and the valid
1595 * bit is clear, we have to zero out a portion of the
1599 if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
1600 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0
1602 pmap_zero_page_area(
1610 * If the ending offset is not DEV_BSIZE aligned and the
1611 * valid bit is clear, we have to zero out a portion of
1615 endoff = base + size;
1617 if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
1618 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0
1620 pmap_zero_page_area(
1623 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))
1628 * Set valid, clear dirty bits. If validating the entire
1629 * page we can safely clear the pmap modify bit. We also
1630 * use this opportunity to clear the PG_NOSYNC flag. If a process
1631 * takes a write fault on a MAP_NOSYNC memory area the flag will
1634 * We set valid bits inclusive of any overlap, but we can only
1635 * clear dirty bits for DEV_BSIZE chunks that are fully within
1639 pagebits = vm_page_bits(base, size);
1640 m->valid |= pagebits;
1642 if ((frag = base & (DEV_BSIZE - 1)) != 0) {
1643 frag = DEV_BSIZE - frag;
1649 pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
1651 m->dirty &= ~pagebits;
1652 if (base == 0 && size == PAGE_SIZE) {
1653 pmap_clear_modify(m);
1654 vm_page_flag_clear(m, PG_NOSYNC);
1661 vm_page_set_dirty(vm_page_t m, int base, int size)
1663 m->dirty |= vm_page_bits(base, size);
1669 vm_page_clear_dirty(vm_page_t m, int base, int size)
1671 m->dirty &= ~vm_page_bits(base, size);
1675 * vm_page_set_invalid:
1677 * Invalidates DEV_BSIZE'd chunks within a page. Both the
1678 * valid and dirty bits for the effected areas are cleared.
1683 vm_page_set_invalid(vm_page_t m, int base, int size)
1687 bits = vm_page_bits(base, size);
1690 m->object->generation++;
1694 * vm_page_zero_invalid()
1696 * The kernel assumes that the invalid portions of a page contain
1697 * garbage, but such pages can be mapped into memory by user code.
1698 * When this occurs, we must zero out the non-valid portions of the
1699 * page so user code sees what it expects.
1701 * Pages are most often semi-valid when the end of a file is mapped
1702 * into memory and the file's size is not page aligned.
1706 vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
1712 * Scan the valid bits looking for invalid sections that
1713 * must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the
1714 * valid bit may be set ) have already been zerod by
1715 * vm_page_set_validclean().
1718 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
1719 if (i == (PAGE_SIZE / DEV_BSIZE) ||
1720 (m->valid & (1 << i))
1723 pmap_zero_page_area(
1726 (i - b) << DEV_BSHIFT
1734 * setvalid is TRUE when we can safely set the zero'd areas
1735 * as being valid. We can do this if there are no cache consistency
1736 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS.
1740 m->valid = VM_PAGE_BITS_ALL;
1746 * Is (partial) page valid? Note that the case where size == 0
1747 * will return FALSE in the degenerate case where the page is
1748 * entirely invalid, and TRUE otherwise.
1754 vm_page_is_valid(vm_page_t m, int base, int size)
1756 int bits = vm_page_bits(base, size);
1758 if (m->valid && ((m->valid & bits) == bits))
1765 * update dirty bits from pmap/mmu. May not block.
1769 vm_page_test_dirty(vm_page_t m)
1771 if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) {
1777 * This interface is for merging with malloc() someday.
1778 * Even if we never implement compaction so that contiguous allocation
1779 * works after initialization time, malloc()'s data structures are good
1780 * for statistics and for allocations of less than a page.
1784 unsigned long size, /* should be size_t here and for malloc() */
1785 struct malloc_type *type,
1789 unsigned long alignment,
1790 unsigned long boundary,
1794 vm_offset_t addr, phys, tmp_addr;
1796 vm_page_t pga = vm_page_array;
1798 size = round_page(size);
1800 panic("contigmalloc1: size must not be 0");
1801 if ((alignment & (alignment - 1)) != 0)
1802 panic("contigmalloc1: alignment must be a power of 2");
1803 if ((boundary & (boundary - 1)) != 0)
1804 panic("contigmalloc1: boundary must be a power of 2");
1807 for (pass = 0; pass <= 1; pass++) {
1811 * Find first page in array that is free, within range, aligned, and
1812 * such that the boundary won't be crossed.
1814 for (i = start; i < cnt.v_page_count; i++) {
1816 phys = VM_PAGE_TO_PHYS(&pga[i]);
1817 pqtype = pga[i].queue - pga[i].pc;
1818 if (((pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) &&
1819 (phys >= low) && (phys < high) &&
1820 ((phys & (alignment - 1)) == 0) &&
1821 (((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0))
1826 * If the above failed or we will exceed the upper bound, fail.
1828 if ((i == cnt.v_page_count) ||
1829 ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
1833 for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl);
1837 KASSERT(m->queue == PQ_INACTIVE,
1838 ("contigmalloc1: page %p is not PQ_INACTIVE", m));
1840 next = TAILQ_NEXT(m, pageq);
1841 if (vm_page_sleep_busy(m, TRUE, "vpctw0"))
1843 vm_page_test_dirty(m);
1845 if (m->object->type == OBJT_VNODE) {
1846 vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curproc);
1847 vm_object_page_clean(m->object, 0, 0, OBJPC_SYNC);
1848 VOP_UNLOCK(m->object->handle, 0, curproc);
1850 } else if (m->object->type == OBJT_SWAP ||
1851 m->object->type == OBJT_DEFAULT) {
1852 vm_pageout_flush(&m, 1, 0);
1856 if ((m->dirty == 0) && (m->busy == 0) && (m->hold_count == 0))
1860 for (m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl);
1864 KASSERT(m->queue == PQ_ACTIVE,
1865 ("contigmalloc1: page %p is not PQ_ACTIVE", m));
1867 next = TAILQ_NEXT(m, pageq);
1868 if (vm_page_sleep_busy(m, TRUE, "vpctw1"))
1870 vm_page_test_dirty(m);
1872 if (m->object->type == OBJT_VNODE) {
1873 vn_lock(m->object->handle, LK_EXCLUSIVE | LK_RETRY, curproc);
1874 vm_object_page_clean(m->object, 0, 0, OBJPC_SYNC);
1875 VOP_UNLOCK(m->object->handle, 0, curproc);
1877 } else if (m->object->type == OBJT_SWAP ||
1878 m->object->type == OBJT_DEFAULT) {
1879 vm_pageout_flush(&m, 1, 0);
1883 if ((m->dirty == 0) && (m->busy == 0) && (m->hold_count == 0))
1893 * Check successive pages for contiguous and free.
1895 for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
1897 pqtype = pga[i].queue - pga[i].pc;
1898 if ((VM_PAGE_TO_PHYS(&pga[i]) !=
1899 (VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
1900 ((pqtype != PQ_FREE) && (pqtype != PQ_CACHE))) {
1906 for (i = start; i < (start + size / PAGE_SIZE); i++) {
1908 vm_page_t m = &pga[i];
1910 pqtype = m->queue - m->pc;
1911 if (pqtype == PQ_CACHE) {
1915 vm_page_unqueue_nowakeup(m);
1916 m->valid = VM_PAGE_BITS_ALL;
1917 if (m->flags & PG_ZERO)
1918 vm_page_zero_count--;
1920 KASSERT(m->dirty == 0, ("contigmalloc1: page %p was dirty", m));
1927 * We've found a contiguous chunk that meets are requirements.
1928 * Allocate kernel VM, unfree and assign the physical pages to it and
1929 * return kernel VM pointer.
1932 if (vm_map_findspace(map, vm_map_min(map), size, &addr) !=
1935 * XXX We almost never run out of kernel virtual
1936 * space, so we don't make the allocated memory
1943 vm_object_reference(kernel_object);
1944 vm_map_insert(map, kernel_object, addr - VM_MIN_KERNEL_ADDRESS,
1945 addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0);
1949 for (i = start; i < (start + size / PAGE_SIZE); i++) {
1950 vm_page_t m = &pga[i];
1951 vm_page_insert(m, kernel_object,
1952 OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS));
1953 tmp_addr += PAGE_SIZE;
1955 vm_map_pageable(map, addr, addr + size, FALSE);
1958 return ((void *)addr);
1965 unsigned long size, /* should be size_t here and for malloc() */
1966 struct malloc_type *type,
1970 unsigned long alignment,
1971 unsigned long boundary)
1973 return contigmalloc1(size, type, flags, low, high, alignment, boundary,
1978 contigfree(void *addr, unsigned long size, struct malloc_type *type)
1980 kmem_free(kernel_map, (vm_offset_t)addr, size);
1984 vm_page_alloc_contig(
1988 vm_offset_t alignment)
1990 return ((vm_offset_t)contigmalloc1(size, M_DEVBUF, M_NOWAIT, low, high,
1991 alignment, 0ul, kernel_map));
1994 #include "opt_ddb.h"
1996 #include <sys/kernel.h>
1998 #include <ddb/ddb.h>
2000 DB_SHOW_COMMAND(page, vm_page_print_page_info)
2002 db_printf("cnt.v_free_count: %d\n", cnt.v_free_count);
2003 db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
2004 db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
2005 db_printf("cnt.v_active_count: %d\n", cnt.v_active_count);
2006 db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
2007 db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
2008 db_printf("cnt.v_free_min: %d\n", cnt.v_free_min);
2009 db_printf("cnt.v_free_target: %d\n", cnt.v_free_target);
2010 db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
2011 db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
2014 DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
2017 db_printf("PQ_FREE:");
2018 for(i=0;i<PQ_L2_SIZE;i++) {
2019 db_printf(" %d", vm_page_queues[PQ_FREE + i].lcnt);
2023 db_printf("PQ_CACHE:");
2024 for(i=0;i<PQ_L2_SIZE;i++) {
2025 db_printf(" %d", vm_page_queues[PQ_CACHE + i].lcnt);
2029 db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n",
2030 vm_page_queues[PQ_ACTIVE].lcnt,
2031 vm_page_queues[PQ_INACTIVE].lcnt);