kernel - MPSAFE work - Finish tokenizing vm_page.c
[dragonfly.git] / sys / vm / vm_page.c
CommitLineData
984263bc 1/*
9ad0147b
MD
2 * (MPSAFE)
3 *
984263bc
MD
4 * Copyright (c) 1991 Regents of the University of California.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
39 * $FreeBSD: src/sys/vm/vm_page.c,v 1.147.2.18 2002/03/10 05:03:19 alc Exp $
cfd17028 40 * $DragonFly: src/sys/vm/vm_page.c,v 1.40 2008/08/25 17:01:42 dillon Exp $
984263bc
MD
41 */
42
43/*
44 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
45 * All rights reserved.
46 *
47 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
48 *
49 * Permission to use, copy, modify and distribute this software and
50 * its documentation is hereby granted, provided that both the copyright
51 * notice and this permission notice appear in all copies of the
52 * software, derivative works or modified versions, and any portions
53 * thereof, and that both notices appear in supporting documentation.
54 *
55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
58 *
59 * Carnegie Mellon requests users of this software to return to
60 *
61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
62 * School of Computer Science
63 * Carnegie Mellon University
64 * Pittsburgh PA 15213-3890
65 *
66 * any improvements or extensions that they make and grant Carnegie the
67 * rights to redistribute these changes.
68 */
984263bc 69/*
de71fd3f
MD
70 * Resident memory management module. The module manipulates 'VM pages'.
71 * A VM page is the core building block for memory management.
984263bc
MD
72 */
73
74#include <sys/param.h>
75#include <sys/systm.h>
76#include <sys/malloc.h>
77#include <sys/proc.h>
78#include <sys/vmmeter.h>
79#include <sys/vnode.h>
80
81#include <vm/vm.h>
82#include <vm/vm_param.h>
83#include <sys/lock.h>
84#include <vm/vm_kern.h>
85#include <vm/pmap.h>
86#include <vm/vm_map.h>
87#include <vm/vm_object.h>
88#include <vm/vm_page.h>
89#include <vm/vm_pageout.h>
90#include <vm/vm_pager.h>
91#include <vm/vm_extern.h>
096e95c0 92#include <vm/swap_pager.h>
984263bc 93
8e5e6f1b
AH
94#include <machine/md_var.h>
95
bb6811be
MD
96#include <vm/vm_page2.h>
97#include <sys/mplock2.h>
98
de71fd3f
MD
99static void vm_page_queue_init(void);
100static void vm_page_free_wakeup(void);
101static vm_page_t vm_page_select_cache(vm_object_t, vm_pindex_t);
74232d8e 102static vm_page_t _vm_page_list_find2(int basequeue, int index);
984263bc 103
de71fd3f 104struct vpgqueues vm_page_queues[PQ_COUNT]; /* Array of tailq lists */
984263bc 105
654a39f0
MD
106#define ASSERT_IN_CRIT_SECTION() KKASSERT(crit_test(curthread));
107
1f804340
MD
108RB_GENERATE2(vm_page_rb_tree, vm_page, rb_entry, rb_vm_page_compare,
109 vm_pindex_t, pindex);
110
984263bc 111static void
de71fd3f
MD
112vm_page_queue_init(void)
113{
984263bc
MD
114 int i;
115
de71fd3f 116 for (i = 0; i < PQ_L2_SIZE; i++)
12e4aaff 117 vm_page_queues[PQ_FREE+i].cnt = &vmstats.v_free_count;
de71fd3f
MD
118 for (i = 0; i < PQ_L2_SIZE; i++)
119 vm_page_queues[PQ_CACHE+i].cnt = &vmstats.v_cache_count;
984263bc 120
de71fd3f 121 vm_page_queues[PQ_INACTIVE].cnt = &vmstats.v_inactive_count;
12e4aaff
MD
122 vm_page_queues[PQ_ACTIVE].cnt = &vmstats.v_active_count;
123 vm_page_queues[PQ_HOLD].cnt = &vmstats.v_active_count;
de71fd3f
MD
124 /* PQ_NONE has no queue */
125
126 for (i = 0; i < PQ_COUNT; i++)
984263bc 127 TAILQ_INIT(&vm_page_queues[i].pl);
984263bc
MD
128}
129
de71fd3f
MD
130/*
131 * note: place in initialized data section? Is this necessary?
132 */
984263bc 133long first_page = 0;
de71fd3f 134int vm_page_array_size = 0;
984263bc 135int vm_page_zero_count = 0;
de71fd3f 136vm_page_t vm_page_array = 0;
984263bc
MD
137
138/*
de71fd3f 139 * (low level boot)
984263bc 140 *
de71fd3f
MD
141 * Sets the page size, perhaps based upon the memory size.
142 * Must be called before any use of page-size dependent functions.
984263bc
MD
143 */
144void
145vm_set_page_size(void)
146{
12e4aaff
MD
147 if (vmstats.v_page_size == 0)
148 vmstats.v_page_size = PAGE_SIZE;
149 if (((vmstats.v_page_size - 1) & vmstats.v_page_size) != 0)
984263bc
MD
150 panic("vm_set_page_size: page size not a power of two");
151}
152
153/*
de71fd3f 154 * (low level boot)
984263bc 155 *
de71fd3f
MD
156 * Add a new page to the freelist for use by the system. New pages
157 * are added to both the head and tail of the associated free page
158 * queue in a bottom-up fashion, so both zero'd and non-zero'd page
159 * requests pull 'recent' adds (higher physical addresses) first.
161399b3 160 *
654a39f0 161 * Must be called in a critical section.
984263bc
MD
162 */
163vm_page_t
6ef943a3 164vm_add_new_page(vm_paddr_t pa)
984263bc 165{
161399b3 166 struct vpgqueues *vpq;
de71fd3f 167 vm_page_t m;
984263bc 168
12e4aaff
MD
169 ++vmstats.v_page_count;
170 ++vmstats.v_free_count;
984263bc
MD
171 m = PHYS_TO_VM_PAGE(pa);
172 m->phys_addr = pa;
173 m->flags = 0;
174 m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK;
175 m->queue = m->pc + PQ_FREE;
26bcc0c0 176 KKASSERT(m->dirty == 0);
de71fd3f 177
161399b3
MD
178 vpq = &vm_page_queues[m->queue];
179 if (vpq->flipflop)
180 TAILQ_INSERT_TAIL(&vpq->pl, m, pageq);
181 else
182 TAILQ_INSERT_HEAD(&vpq->pl, m, pageq);
183 vpq->flipflop = 1 - vpq->flipflop;
de71fd3f 184
984263bc
MD
185 vm_page_queues[m->queue].lcnt++;
186 return (m);
187}
188
189/*
de71fd3f 190 * (low level boot)
984263bc 191 *
de71fd3f 192 * Initializes the resident memory module.
984263bc 193 *
de71fd3f
MD
194 * Allocates memory for the page cells, and for the object/offset-to-page
195 * hash table headers. Each page cell is initialized and placed on the
196 * free list.
26bcc0c0
MD
197 *
198 * starta/enda represents the range of physical memory addresses available
199 * for use (skipping memory already used by the kernel), subject to
200 * phys_avail[]. Note that phys_avail[] has already mapped out memory
201 * already in use by the kernel.
984263bc 202 */
984263bc 203vm_offset_t
26bcc0c0 204vm_page_startup(vm_offset_t vaddr)
984263bc
MD
205{
206 vm_offset_t mapped;
6ef943a3
MD
207 vm_size_t npages;
208 vm_paddr_t page_range;
209 vm_paddr_t new_end;
984263bc 210 int i;
6ef943a3 211 vm_paddr_t pa;
984263bc 212 int nblocks;
6ef943a3 213 vm_paddr_t last_pa;
6ef943a3
MD
214 vm_paddr_t end;
215 vm_paddr_t biggestone, biggestsize;
6ef943a3 216 vm_paddr_t total;
984263bc
MD
217
218 total = 0;
219 biggestsize = 0;
220 biggestone = 0;
221 nblocks = 0;
222 vaddr = round_page(vaddr);
223
224 for (i = 0; phys_avail[i + 1]; i += 2) {
aecf2182
MD
225 phys_avail[i] = round_page64(phys_avail[i]);
226 phys_avail[i + 1] = trunc_page64(phys_avail[i + 1]);
984263bc
MD
227 }
228
229 for (i = 0; phys_avail[i + 1]; i += 2) {
6ef943a3 230 vm_paddr_t size = phys_avail[i + 1] - phys_avail[i];
984263bc
MD
231
232 if (size > biggestsize) {
233 biggestone = i;
234 biggestsize = size;
235 }
236 ++nblocks;
237 total += size;
238 }
239
240 end = phys_avail[biggestone+1];
1f804340 241 end = trunc_page(end);
984263bc
MD
242
243 /*
244 * Initialize the queue headers for the free queue, the active queue
245 * and the inactive queue.
246 */
247
248 vm_page_queue_init();
249
6abe3bd0
AH
250 /* VKERNELs don't support minidumps and as such don't need vm_page_dump */
251#if !defined(_KERNEL_VIRTUAL)
984263bc 252 /*
8e5e6f1b
AH
253 * Allocate a bitmap to indicate that a random physical page
254 * needs to be included in a minidump.
255 *
256 * The amd64 port needs this to indicate which direct map pages
257 * need to be dumped, via calls to dump_add_page()/dump_drop_page().
258 *
259 * However, i386 still needs this workspace internally within the
260 * minidump code. In theory, they are not needed on i386, but are
261 * included should the sf_buf code decide to use them.
262 */
263 page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE;
264 vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY);
265 end -= vm_page_dump_size;
266 vm_page_dump = (void *)pmap_map(&vaddr, end, end + vm_page_dump_size,
267 VM_PROT_READ | VM_PROT_WRITE);
268 bzero((void *)vm_page_dump, vm_page_dump_size);
6abe3bd0 269#endif
8e5e6f1b
AH
270
271 /*
984263bc
MD
272 * Compute the number of pages of memory that will be available for
273 * use (taking into account the overhead of a page structure per
274 * page).
275 */
984263bc 276 first_page = phys_avail[0] / PAGE_SIZE;
984263bc 277 page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE - first_page;
1f804340 278 npages = (total - (page_range * sizeof(struct vm_page))) / PAGE_SIZE;
de71fd3f 279
984263bc
MD
280 /*
281 * Initialize the mem entry structures now, and put them in the free
282 * queue.
283 */
984263bc 284 new_end = trunc_page(end - page_range * sizeof(struct vm_page));
8e5e6f1b 285 mapped = pmap_map(&vaddr, new_end, end,
984263bc 286 VM_PROT_READ | VM_PROT_WRITE);
8e5e6f1b
AH
287 vm_page_array = (vm_page_t)mapped;
288
0e6594a8 289#if defined(__x86_64__) && !defined(_KERNEL_VIRTUAL)
8e5e6f1b
AH
290 /*
291 * since pmap_map on amd64 returns stuff out of a direct-map region,
292 * we have to manually add these pages to the minidump tracking so
293 * that they can be dumped, including the vm_page_array.
294 */
295 for (pa = new_end; pa < phys_avail[biggestone + 1]; pa += PAGE_SIZE)
296 dump_add_page(pa);
8fdd3267 297#endif
984263bc
MD
298
299 /*
300 * Clear all of the page structures
301 */
302 bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
303 vm_page_array_size = page_range;
304
305 /*
161399b3 306 * Construct the free queue(s) in ascending order (by physical
984263bc
MD
307 * address) so that the first 16MB of physical memory is allocated
308 * last rather than first. On large-memory machines, this avoids
309 * the exhaustion of low physical memory before isa_dmainit has run.
310 */
12e4aaff
MD
311 vmstats.v_page_count = 0;
312 vmstats.v_free_count = 0;
984263bc
MD
313 for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
314 pa = phys_avail[i];
315 if (i == biggestone)
316 last_pa = new_end;
317 else
318 last_pa = phys_avail[i + 1];
319 while (pa < last_pa && npages-- > 0) {
320 vm_add_new_page(pa);
321 pa += PAGE_SIZE;
322 }
323 }
8e5e6f1b 324 return (vaddr);
984263bc
MD
325}
326
327/*
1f804340
MD
328 * Scan comparison function for Red-Black tree scans. An inclusive
329 * (start,end) is expected. Other fields are not used.
984263bc 330 */
1f804340
MD
331int
332rb_vm_page_scancmp(struct vm_page *p, void *data)
984263bc 333{
1f804340 334 struct rb_vm_page_scan_info *info = data;
984263bc 335
1f804340
MD
336 if (p->pindex < info->start_pindex)
337 return(-1);
338 if (p->pindex > info->end_pindex)
339 return(1);
340 return(0);
341}
342
343int
344rb_vm_page_compare(struct vm_page *p1, struct vm_page *p2)
345{
346 if (p1->pindex < p2->pindex)
347 return(-1);
348 if (p1->pindex > p2->pindex)
349 return(1);
350 return(0);
984263bc
MD
351}
352
de71fd3f 353/*
573fb415
MD
354 * Holding a page keeps it from being reused. Other parts of the system
355 * can still disassociate the page from its current object and free it, or
356 * perform read or write I/O on it and/or otherwise manipulate the page,
357 * but if the page is held the VM system will leave the page and its data
358 * intact and not reuse the page for other purposes until the last hold
359 * reference is released. (see vm_page_wire() if you want to prevent the
360 * page from being disassociated from its object too).
361 *
362 * The caller must hold vm_token.
363 *
364 * The caller must still validate the contents of the page and, if necessary,
365 * wait for any pending I/O (e.g. vm_page_sleep_busy() loop) to complete
366 * before manipulating the page.
367 */
368void
369vm_page_hold(vm_page_t m)
370{
371 ASSERT_LWKT_TOKEN_HELD(&vm_token);
372 ++m->hold_count;
373}
374
375/*
de71fd3f
MD
376 * The opposite of vm_page_hold(). A page can be freed while being held,
377 * which places it on the PQ_HOLD queue. We must call vm_page_free_toq()
378 * in this case to actually free it once the hold count drops to 0.
379 *
573fb415
MD
380 * The caller must hold vm_token if non-blocking operation is desired,
381 * but otherwise does not need to.
de71fd3f 382 */
984263bc 383void
573fb415 384vm_page_unhold(vm_page_t m)
984263bc 385{
573fb415
MD
386 lwkt_gettoken(&vm_token);
387 --m->hold_count;
388 KASSERT(m->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
389 if (m->hold_count == 0 && m->queue == PQ_HOLD) {
390 vm_page_busy(m);
391 vm_page_free_toq(m);
97edb3b6 392 }
573fb415 393 lwkt_reltoken(&vm_token);
984263bc
MD
394}
395
396/*
573fb415 397 * Inserts the given vm_page into the object and object list.
984263bc 398 *
de71fd3f
MD
399 * The pagetables are not updated but will presumably fault the page
400 * in if necessary, or if a kernel page the caller will at some point
401 * enter the page into the kernel's pmap. We are not allowed to block
402 * here so we *can't* do this anyway.
984263bc 403 *
de71fd3f 404 * This routine may not block.
573fb415 405 * This routine must be called with the vm_token held.
654a39f0 406 * This routine must be called with a critical section held.
984263bc 407 */
984263bc
MD
408void
409vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
410{
654a39f0 411 ASSERT_IN_CRIT_SECTION();
573fb415 412 ASSERT_LWKT_TOKEN_HELD(&vm_token);
984263bc
MD
413 if (m->object != NULL)
414 panic("vm_page_insert: already inserted");
415
416 /*
417 * Record the object/offset pair in this page
418 */
984263bc
MD
419 m->object = object;
420 m->pindex = pindex;
421
422 /*
1f804340 423 * Insert it into the object.
984263bc 424 */
bb6811be 425 ASSERT_MP_LOCK_HELD(curthread);
1f804340 426 vm_page_rb_tree_RB_INSERT(&object->rb_memq, m);
984263bc
MD
427 object->generation++;
428
429 /*
430 * show that the object has one more resident page.
431 */
984263bc
MD
432 object->resident_page_count++;
433
434 /*
435 * Since we are inserting a new and possibly dirty page,
436 * update the object's OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY flags.
437 */
17cde63e 438 if ((m->valid & m->dirty) || (m->flags & PG_WRITEABLE))
984263bc 439 vm_object_set_writeable_dirty(object);
096e95c0
MD
440
441 /*
442 * Checks for a swap assignment and sets PG_SWAPPED if appropriate.
443 */
444 swap_pager_page_inserted(m);
984263bc
MD
445}
446
447/*
9765affa
MD
448 * Removes the given vm_page_t from the global (object,index) hash table
449 * and from the object's memq.
984263bc 450 *
de71fd3f
MD
451 * The underlying pmap entry (if any) is NOT removed here.
452 * This routine may not block.
9765affa 453 *
9ad0147b
MD
454 * The page must be BUSY and will remain BUSY on return.
455 * No other requirements.
9765affa 456 *
9ad0147b
MD
457 * NOTE: FreeBSD side effect was to unbusy the page on return. We leave
458 * it busy.
984263bc 459 */
984263bc
MD
460void
461vm_page_remove(vm_page_t m)
462{
463 vm_object_t object;
464
654a39f0 465 crit_enter();
9ad0147b 466 lwkt_gettoken(&vm_token);
654a39f0 467 if (m->object == NULL) {
9ad0147b 468 lwkt_reltoken(&vm_token);
654a39f0 469 crit_exit();
984263bc 470 return;
654a39f0 471 }
984263bc 472
de71fd3f 473 if ((m->flags & PG_BUSY) == 0)
984263bc 474 panic("vm_page_remove: page not busy");
984263bc 475
984263bc
MD
476 object = m->object;
477
478 /*
1f804340 479 * Remove the page from the object and update the object.
984263bc 480 */
bb6811be 481 ASSERT_MP_LOCK_HELD(curthread);
1f804340 482 vm_page_rb_tree_RB_REMOVE(&object->rb_memq, m);
984263bc
MD
483 object->resident_page_count--;
484 object->generation++;
984263bc 485 m->object = NULL;
1f804340 486
9ad0147b 487 lwkt_reltoken(&vm_token);
9765affa 488 crit_exit();
984263bc
MD
489}
490
491/*
de71fd3f
MD
492 * Locate and return the page at (object, pindex), or NULL if the
493 * page could not be found.
494 *
573fb415 495 * The caller must hold vm_token if non-blocking operation is desired.
984263bc 496 */
984263bc
MD
497vm_page_t
498vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
499{
500 vm_page_t m;
984263bc
MD
501
502 /*
503 * Search the hash table for this object/offset pair
504 */
bb6811be 505 ASSERT_MP_LOCK_HELD(curthread);
1f804340 506 crit_enter();
9ad0147b 507 lwkt_gettoken(&vm_token);
1f804340 508 m = vm_page_rb_tree_RB_LOOKUP(&object->rb_memq, pindex);
9ad0147b 509 lwkt_reltoken(&vm_token);
1f804340
MD
510 crit_exit();
511 KKASSERT(m == NULL || (m->object == object && m->pindex == pindex));
512 return(m);
984263bc
MD
513}
514
515/*
de71fd3f 516 * vm_page_rename()
984263bc 517 *
de71fd3f
MD
518 * Move the given memory entry from its current object to the specified
519 * target object/offset.
984263bc 520 *
de71fd3f
MD
521 * The object must be locked.
522 * This routine may not block.
984263bc 523 *
de71fd3f 524 * Note: This routine will raise itself to splvm(), the caller need not.
984263bc 525 *
de71fd3f
MD
526 * Note: Swap associated with the page must be invalidated by the move. We
527 * have to do this for several reasons: (1) we aren't freeing the
528 * page, (2) we are dirtying the page, (3) the VM system is probably
529 * moving the page from object A to B, and will then later move
530 * the backing store from A to B and we can't have a conflict.
984263bc 531 *
de71fd3f
MD
532 * Note: We *always* dirty the page. It is necessary both for the
533 * fact that we moved it, and because we may be invalidating
534 * swap. If the page is on the cache, we have to deactivate it
535 * or vm_page_dirty() will panic. Dirty pages are not allowed
536 * on the cache.
984263bc 537 */
984263bc
MD
538void
539vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
540{
9765affa 541 crit_enter();
9ad0147b 542 lwkt_gettoken(&vm_token);
984263bc
MD
543 vm_page_remove(m);
544 vm_page_insert(m, new_object, new_pindex);
545 if (m->queue - m->pc == PQ_CACHE)
546 vm_page_deactivate(m);
547 vm_page_dirty(m);
9765affa 548 vm_page_wakeup(m);
9ad0147b 549 lwkt_reltoken(&vm_token);
9765affa 550 crit_exit();
984263bc
MD
551}
552
553/*
de71fd3f
MD
554 * vm_page_unqueue() without any wakeup. This routine is used when a page
555 * is being moved between queues or otherwise is to remain BUSYied by the
556 * caller.
984263bc 557 *
573fb415 558 * The caller must hold vm_token
de71fd3f 559 * This routine may not block.
984263bc 560 */
984263bc
MD
561void
562vm_page_unqueue_nowakeup(vm_page_t m)
563{
564 int queue = m->queue;
565 struct vpgqueues *pq;
de71fd3f 566
573fb415 567 ASSERT_LWKT_TOKEN_HELD(&vm_token);
984263bc
MD
568 if (queue != PQ_NONE) {
569 pq = &vm_page_queues[queue];
570 m->queue = PQ_NONE;
571 TAILQ_REMOVE(&pq->pl, m, pageq);
572 (*pq->cnt)--;
573 pq->lcnt--;
574 }
575}
576
577/*
de71fd3f
MD
578 * vm_page_unqueue() - Remove a page from its queue, wakeup the pagedemon
579 * if necessary.
984263bc 580 *
573fb415 581 * The caller must hold vm_token
de71fd3f 582 * This routine may not block.
984263bc 583 */
984263bc
MD
584void
585vm_page_unqueue(vm_page_t m)
586{
587 int queue = m->queue;
588 struct vpgqueues *pq;
de71fd3f 589
573fb415 590 ASSERT_LWKT_TOKEN_HELD(&vm_token);
984263bc
MD
591 if (queue != PQ_NONE) {
592 m->queue = PQ_NONE;
593 pq = &vm_page_queues[queue];
594 TAILQ_REMOVE(&pq->pl, m, pageq);
595 (*pq->cnt)--;
596 pq->lcnt--;
20479584
MD
597 if ((queue - m->pc) == PQ_CACHE || (queue - m->pc) == PQ_FREE)
598 pagedaemon_wakeup();
984263bc
MD
599 }
600}
601
984263bc 602/*
de71fd3f 603 * vm_page_list_find()
984263bc 604 *
de71fd3f 605 * Find a page on the specified queue with color optimization.
984263bc 606 *
de71fd3f
MD
607 * The page coloring optimization attempts to locate a page that does
608 * not overload other nearby pages in the object in the cpu's L1 or L2
609 * caches. We need this optimization because cpu caches tend to be
610 * physical caches, while object spaces tend to be virtual.
984263bc 611 *
573fb415 612 * Must be called with vm_token held.
de71fd3f 613 * This routine may not block.
984263bc 614 *
de71fd3f
MD
615 * Note that this routine is carefully inlined. A non-inlined version
616 * is available for outside callers but the only critical path is
617 * from within this source file.
984263bc 618 */
74232d8e 619static __inline
984263bc 620vm_page_t
74232d8e
MD
621_vm_page_list_find(int basequeue, int index, boolean_t prefer_zero)
622{
623 vm_page_t m;
624
625 if (prefer_zero)
626 m = TAILQ_LAST(&vm_page_queues[basequeue+index].pl, pglist);
627 else
628 m = TAILQ_FIRST(&vm_page_queues[basequeue+index].pl);
629 if (m == NULL)
630 m = _vm_page_list_find2(basequeue, index);
631 return(m);
632}
633
634static vm_page_t
635_vm_page_list_find2(int basequeue, int index)
984263bc
MD
636{
637 int i;
638 vm_page_t m = NULL;
639 struct vpgqueues *pq;
640
641 pq = &vm_page_queues[basequeue];
642
643 /*
644 * Note that for the first loop, index+i and index-i wind up at the
645 * same place. Even though this is not totally optimal, we've already
646 * blown it by missing the cache case so we do not care.
647 */
648
649 for(i = PQ_L2_SIZE / 2; i > 0; --i) {
650 if ((m = TAILQ_FIRST(&pq[(index + i) & PQ_L2_MASK].pl)) != NULL)
651 break;
652
653 if ((m = TAILQ_FIRST(&pq[(index - i) & PQ_L2_MASK].pl)) != NULL)
654 break;
655 }
656 return(m);
657}
658
573fb415
MD
659/*
660 * Must be called with vm_token held if the caller desired non-blocking
661 * operation and a stable result.
662 */
74232d8e
MD
663vm_page_t
664vm_page_list_find(int basequeue, int index, boolean_t prefer_zero)
665{
666 return(_vm_page_list_find(basequeue, index, prefer_zero));
667}
668
984263bc 669/*
de71fd3f
MD
670 * Find a page on the cache queue with color optimization. As pages
671 * might be found, but not applicable, they are deactivated. This
672 * keeps us from using potentially busy cached pages.
984263bc 673 *
de71fd3f 674 * This routine may not block.
573fb415 675 * Must be called with vm_token held.
984263bc
MD
676 */
677vm_page_t
678vm_page_select_cache(vm_object_t object, vm_pindex_t pindex)
679{
680 vm_page_t m;
681
573fb415 682 ASSERT_LWKT_TOKEN_HELD(&vm_token);
984263bc 683 while (TRUE) {
659c6a07 684 m = _vm_page_list_find(
984263bc
MD
685 PQ_CACHE,
686 (pindex + object->pg_color) & PQ_L2_MASK,
687 FALSE
688 );
689 if (m && ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy ||
690 m->hold_count || m->wire_count)) {
691 vm_page_deactivate(m);
692 continue;
693 }
694 return m;
695 }
de71fd3f 696 /* not reached */
984263bc
MD
697}
698
699/*
de71fd3f
MD
700 * Find a free or zero page, with specified preference. We attempt to
701 * inline the nominal case and fall back to _vm_page_select_free()
702 * otherwise.
984263bc 703 *
654a39f0 704 * This routine must be called with a critical section held.
de71fd3f 705 * This routine may not block.
984263bc 706 */
984263bc
MD
707static __inline vm_page_t
708vm_page_select_free(vm_object_t object, vm_pindex_t pindex, boolean_t prefer_zero)
709{
710 vm_page_t m;
711
659c6a07 712 m = _vm_page_list_find(
984263bc
MD
713 PQ_FREE,
714 (pindex + object->pg_color) & PQ_L2_MASK,
715 prefer_zero
716 );
717 return(m);
718}
719
720/*
de71fd3f 721 * vm_page_alloc()
984263bc 722 *
de71fd3f
MD
723 * Allocate and return a memory cell associated with this VM object/offset
724 * pair.
984263bc
MD
725 *
726 * page_req classes:
de71fd3f 727 *
dc1fd4b3 728 * VM_ALLOC_NORMAL allow use of cache pages, nominal free drain
39208dbe 729 * VM_ALLOC_QUICK like normal but cannot use cache
dc1fd4b3
MD
730 * VM_ALLOC_SYSTEM greater free drain
731 * VM_ALLOC_INTERRUPT allow free list to be completely drained
732 * VM_ALLOC_ZERO advisory request for pre-zero'd page
984263bc 733 *
de71fd3f
MD
734 * The object must be locked.
735 * This routine may not block.
9765affa 736 * The returned page will be marked PG_BUSY
984263bc 737 *
de71fd3f
MD
738 * Additional special handling is required when called from an interrupt
739 * (VM_ALLOC_INTERRUPT). We are not allowed to mess with the page cache
740 * in this case.
984263bc 741 */
984263bc
MD
742vm_page_t
743vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req)
744{
745 vm_page_t m = NULL;
984263bc 746
ba9d3e52
AH
747 crit_enter();
748 lwkt_gettoken(&vm_token);
749
cfd17028 750 KKASSERT(object != NULL);
984263bc
MD
751 KASSERT(!vm_page_lookup(object, pindex),
752 ("vm_page_alloc: page already allocated"));
dc1fd4b3 753 KKASSERT(page_req &
39208dbe
MD
754 (VM_ALLOC_NORMAL|VM_ALLOC_QUICK|
755 VM_ALLOC_INTERRUPT|VM_ALLOC_SYSTEM));
984263bc
MD
756
757 /*
4ecf7cc9
MD
758 * Certain system threads (pageout daemon, buf_daemon's) are
759 * allowed to eat deeper into the free page list.
984263bc 760 */
4ecf7cc9 761 if (curthread->td_flags & TDF_SYSTHREAD)
dc1fd4b3 762 page_req |= VM_ALLOC_SYSTEM;
984263bc 763
984263bc 764loop:
dc1fd4b3
MD
765 if (vmstats.v_free_count > vmstats.v_free_reserved ||
766 ((page_req & VM_ALLOC_INTERRUPT) && vmstats.v_free_count > 0) ||
767 ((page_req & VM_ALLOC_SYSTEM) && vmstats.v_cache_count == 0 &&
768 vmstats.v_free_count > vmstats.v_interrupt_free_min)
769 ) {
984263bc 770 /*
dc1fd4b3 771 * The free queue has sufficient free pages to take one out.
984263bc 772 */
dc1fd4b3 773 if (page_req & VM_ALLOC_ZERO)
984263bc
MD
774 m = vm_page_select_free(object, pindex, TRUE);
775 else
776 m = vm_page_select_free(object, pindex, FALSE);
dc1fd4b3 777 } else if (page_req & VM_ALLOC_NORMAL) {
984263bc 778 /*
dc1fd4b3
MD
779 * Allocatable from the cache (non-interrupt only). On
780 * success, we must free the page and try again, thus
781 * ensuring that vmstats.v_*_free_min counters are replenished.
984263bc 782 */
dc1fd4b3
MD
783#ifdef INVARIANTS
784 if (curthread->td_preempted) {
086c1d7e 785 kprintf("vm_page_alloc(): warning, attempt to allocate"
dc1fd4b3
MD
786 " cache page from preempting interrupt\n");
787 m = NULL;
788 } else {
789 m = vm_page_select_cache(object, pindex);
790 }
791#else
792 m = vm_page_select_cache(object, pindex);
793#endif
984263bc 794 /*
9765affa 795 * On success move the page into the free queue and loop.
984263bc 796 */
dc1fd4b3
MD
797 if (m != NULL) {
798 KASSERT(m->dirty == 0,
799 ("Found dirty cache page %p", m));
800 vm_page_busy(m);
801 vm_page_protect(m, VM_PROT_NONE);
802 vm_page_free(m);
803 goto loop;
804 }
805
806 /*
807 * On failure return NULL
808 */
9ad0147b 809 lwkt_reltoken(&vm_token);
9765affa 810 crit_exit();
984263bc 811#if defined(DIAGNOSTIC)
dc1fd4b3 812 if (vmstats.v_cache_count > 0)
086c1d7e 813 kprintf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", vmstats.v_cache_count);
984263bc 814#endif
dc1fd4b3
MD
815 vm_pageout_deficit++;
816 pagedaemon_wakeup();
817 return (NULL);
984263bc
MD
818 } else {
819 /*
dc1fd4b3 820 * No pages available, wakeup the pageout daemon and give up.
984263bc 821 */
9ad0147b 822 lwkt_reltoken(&vm_token);
9765affa 823 crit_exit();
984263bc
MD
824 vm_pageout_deficit++;
825 pagedaemon_wakeup();
826 return (NULL);
827 }
828
829 /*
9765affa
MD
830 * Good page found. The page has not yet been busied. We are in
831 * a critical section.
984263bc 832 */
dc1fd4b3 833 KASSERT(m != NULL, ("vm_page_alloc(): missing page on free queue\n"));
26bcc0c0
MD
834 KASSERT(m->dirty == 0,
835 ("vm_page_alloc: free/cache page %p was dirty", m));
984263bc
MD
836
837 /*
838 * Remove from free queue
839 */
984263bc
MD
840 vm_page_unqueue_nowakeup(m);
841
842 /*
9765affa
MD
843 * Initialize structure. Only the PG_ZERO flag is inherited. Set
844 * the page PG_BUSY
984263bc 845 */
984263bc
MD
846 if (m->flags & PG_ZERO) {
847 vm_page_zero_count--;
848 m->flags = PG_ZERO | PG_BUSY;
849 } else {
850 m->flags = PG_BUSY;
851 }
852 m->wire_count = 0;
853 m->hold_count = 0;
854 m->act_count = 0;
855 m->busy = 0;
856 m->valid = 0;
984263bc
MD
857
858 /*
9765affa 859 * vm_page_insert() is safe prior to the crit_exit(). Note also that
984263bc
MD
860 * inserting a page here does not insert it into the pmap (which
861 * could cause us to block allocating memory). We cannot block
862 * anywhere.
863 */
984263bc
MD
864 vm_page_insert(m, object, pindex);
865
866 /*
867 * Don't wakeup too often - wakeup the pageout daemon when
868 * we would be nearly out of memory.
869 */
20479584 870 pagedaemon_wakeup();
984263bc 871
9ad0147b 872 lwkt_reltoken(&vm_token);
9765affa
MD
873 crit_exit();
874
875 /*
876 * A PG_BUSY page is returned.
877 */
984263bc
MD
878 return (m);
879}
880
881/*
163f8d24
MD
882 * Wait for sufficient free memory for nominal heavy memory use kernel
883 * operations.
884 */
885void
886vm_wait_nominal(void)
887{
888 while (vm_page_count_min(0))
889 vm_wait(0);
890}
891
892/*
12052253
MD
893 * Test if vm_wait_nominal() would block.
894 */
895int
896vm_test_nominal(void)
897{
898 if (vm_page_count_min(0))
899 return(1);
900 return(0);
901}
902
903/*
de71fd3f
MD
904 * Block until free pages are available for allocation, called in various
905 * places before memory allocations.
984263bc 906 */
984263bc 907void
4ecf7cc9 908vm_wait(int timo)
984263bc 909{
cdd46d2e 910 crit_enter();
9ad0147b 911 lwkt_gettoken(&vm_token);
bc6dffab 912 if (curthread == pagethread) {
984263bc 913 vm_pageout_pages_needed = 1;
4ecf7cc9 914 tsleep(&vm_pageout_pages_needed, 0, "VMWait", timo);
984263bc 915 } else {
20479584 916 if (vm_pages_needed == 0) {
984263bc
MD
917 vm_pages_needed = 1;
918 wakeup(&vm_pages_needed);
919 }
4ecf7cc9 920 tsleep(&vmstats.v_free_count, 0, "vmwait", timo);
984263bc 921 }
9ad0147b 922 lwkt_reltoken(&vm_token);
cdd46d2e 923 crit_exit();
984263bc
MD
924}
925
926/*
de71fd3f
MD
927 * Block until free pages are available for allocation
928 *
929 * Called only in vm_fault so that processes page faulting can be
930 * easily tracked.
984263bc 931 */
984263bc
MD
932void
933vm_waitpfault(void)
934{
cdd46d2e 935 crit_enter();
9ad0147b 936 lwkt_gettoken(&vm_token);
20479584 937 if (vm_pages_needed == 0) {
984263bc
MD
938 vm_pages_needed = 1;
939 wakeup(&vm_pages_needed);
940 }
377d4740 941 tsleep(&vmstats.v_free_count, 0, "pfault", 0);
9ad0147b 942 lwkt_reltoken(&vm_token);
cdd46d2e 943 crit_exit();
984263bc
MD
944}
945
946/*
de71fd3f
MD
947 * Put the specified page on the active list (if appropriate). Ensure
948 * that act_count is at least ACT_INIT but do not otherwise mess with it.
984263bc 949 *
de71fd3f
MD
950 * The page queues must be locked.
951 * This routine may not block.
984263bc
MD
952 */
953void
954vm_page_activate(vm_page_t m)
955{
9765affa 956 crit_enter();
9ad0147b 957 lwkt_gettoken(&vm_token);
984263bc
MD
958 if (m->queue != PQ_ACTIVE) {
959 if ((m->queue - m->pc) == PQ_CACHE)
12e4aaff 960 mycpu->gd_cnt.v_reactivated++;
984263bc
MD
961
962 vm_page_unqueue(m);
963
964 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
965 m->queue = PQ_ACTIVE;
966 vm_page_queues[PQ_ACTIVE].lcnt++;
de71fd3f
MD
967 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_ACTIVE].pl,
968 m, pageq);
984263bc
MD
969 if (m->act_count < ACT_INIT)
970 m->act_count = ACT_INIT;
12e4aaff 971 vmstats.v_active_count++;
984263bc
MD
972 }
973 } else {
974 if (m->act_count < ACT_INIT)
975 m->act_count = ACT_INIT;
976 }
9ad0147b 977 lwkt_reltoken(&vm_token);
9765affa 978 crit_exit();
984263bc
MD
979}
980
981/*
de71fd3f
MD
982 * Helper routine for vm_page_free_toq() and vm_page_cache(). This
983 * routine is called when a page has been added to the cache or free
984 * queues.
984263bc 985 *
de71fd3f
MD
986 * This routine may not block.
987 * This routine must be called at splvm()
984263bc
MD
988 */
989static __inline void
990vm_page_free_wakeup(void)
991{
992 /*
993 * if pageout daemon needs pages, then tell it that there are
994 * some free.
995 */
996 if (vm_pageout_pages_needed &&
de71fd3f
MD
997 vmstats.v_cache_count + vmstats.v_free_count >=
998 vmstats.v_pageout_free_min
999 ) {
984263bc
MD
1000 wakeup(&vm_pageout_pages_needed);
1001 vm_pageout_pages_needed = 0;
1002 }
de71fd3f 1003
984263bc
MD
1004 /*
1005 * wakeup processes that are waiting on memory if we hit a
1006 * high water mark. And wakeup scheduler process if we have
1007 * lots of memory. this process will swapin processes.
1008 */
20479584 1009 if (vm_pages_needed && !vm_page_count_min(0)) {
984263bc 1010 vm_pages_needed = 0;
12e4aaff 1011 wakeup(&vmstats.v_free_count);
984263bc
MD
1012 }
1013}
1014
1015/*
1016 * vm_page_free_toq:
1017 *
9765affa
MD
1018 * Returns the given page to the PQ_FREE list, disassociating it with
1019 * any VM object.
1020 *
1021 * The vm_page must be PG_BUSY on entry. PG_BUSY will be released on
1022 * return (the page will have been freed). No particular spl is required
1023 * on entry.
984263bc 1024 *
984263bc
MD
1025 * This routine may not block.
1026 */
984263bc
MD
1027void
1028vm_page_free_toq(vm_page_t m)
1029{
984263bc 1030 struct vpgqueues *pq;
984263bc 1031
9765affa 1032 crit_enter();
9ad0147b 1033 lwkt_gettoken(&vm_token);
12e4aaff 1034 mycpu->gd_cnt.v_tfree++;
984263bc 1035
17cde63e
MD
1036 KKASSERT((m->flags & PG_MAPPED) == 0);
1037
984263bc 1038 if (m->busy || ((m->queue - m->pc) == PQ_FREE)) {
086c1d7e 1039 kprintf(
984263bc
MD
1040 "vm_page_free: pindex(%lu), busy(%d), PG_BUSY(%d), hold(%d)\n",
1041 (u_long)m->pindex, m->busy, (m->flags & PG_BUSY) ? 1 : 0,
1042 m->hold_count);
1043 if ((m->queue - m->pc) == PQ_FREE)
1044 panic("vm_page_free: freeing free page");
1045 else
1046 panic("vm_page_free: freeing busy page");
1047 }
1048
1049 /*
1050 * unqueue, then remove page. Note that we cannot destroy
1051 * the page here because we do not want to call the pager's
1052 * callback routine until after we've put the page on the
1053 * appropriate free queue.
1054 */
984263bc
MD
1055 vm_page_unqueue_nowakeup(m);
1056 vm_page_remove(m);
1057
1058 /*
f2d22ebf
MD
1059 * No further management of fictitious pages occurs beyond object
1060 * and queue removal.
984263bc 1061 */
984263bc 1062 if ((m->flags & PG_FICTITIOUS) != 0) {
9765affa 1063 vm_page_wakeup(m);
9ad0147b 1064 lwkt_reltoken(&vm_token);
9765affa 1065 crit_exit();
984263bc
MD
1066 return;
1067 }
1068
1069 m->valid = 0;
1070 vm_page_undirty(m);
1071
1072 if (m->wire_count != 0) {
1073 if (m->wire_count > 1) {
de71fd3f
MD
1074 panic(
1075 "vm_page_free: invalid wire count (%d), pindex: 0x%lx",
1076 m->wire_count, (long)m->pindex);
984263bc 1077 }
73c351d1 1078 panic("vm_page_free: freeing wired page");
984263bc
MD
1079 }
1080
1081 /*
984263bc
MD
1082 * Clear the UNMANAGED flag when freeing an unmanaged page.
1083 */
984263bc
MD
1084 if (m->flags & PG_UNMANAGED) {
1085 m->flags &= ~PG_UNMANAGED;
984263bc
MD
1086 }
1087
1088 if (m->hold_count != 0) {
1089 m->flags &= ~PG_ZERO;
1090 m->queue = PQ_HOLD;
de71fd3f 1091 } else {
984263bc 1092 m->queue = PQ_FREE + m->pc;
de71fd3f 1093 }
984263bc
MD
1094 pq = &vm_page_queues[m->queue];
1095 pq->lcnt++;
1096 ++(*pq->cnt);
1097
1098 /*
1099 * Put zero'd pages on the end ( where we look for zero'd pages
1100 * first ) and non-zerod pages at the head.
1101 */
984263bc
MD
1102 if (m->flags & PG_ZERO) {
1103 TAILQ_INSERT_TAIL(&pq->pl, m, pageq);
1104 ++vm_page_zero_count;
1105 } else {
1106 TAILQ_INSERT_HEAD(&pq->pl, m, pageq);
1107 }
9765affa 1108 vm_page_wakeup(m);
984263bc 1109 vm_page_free_wakeup();
9ad0147b 1110 lwkt_reltoken(&vm_token);
9765affa 1111 crit_exit();
984263bc
MD
1112}
1113
1114/*
bb6811be
MD
1115 * vm_page_free_fromq_fast()
1116 *
1117 * Remove a non-zero page from one of the free queues; the page is removed for
1118 * zeroing, so do not issue a wakeup.
1119 *
1120 * MPUNSAFE
1121 */
1122vm_page_t
1123vm_page_free_fromq_fast(void)
1124{
1125 static int qi;
1126 vm_page_t m;
1127 int i;
1128
1129 crit_enter();
9ad0147b 1130 lwkt_gettoken(&vm_token);
bb6811be
MD
1131 for (i = 0; i < PQ_L2_SIZE; ++i) {
1132 m = vm_page_list_find(PQ_FREE, qi, FALSE);
1133 qi = (qi + PQ_PRIME2) & PQ_L2_MASK;
1134 if (m && (m->flags & PG_ZERO) == 0) {
1135 vm_page_unqueue_nowakeup(m);
1136 vm_page_busy(m);
1137 break;
1138 }
1139 m = NULL;
1140 }
9ad0147b 1141 lwkt_reltoken(&vm_token);
bb6811be
MD
1142 crit_exit();
1143 return (m);
1144}
1145
1146/*
de71fd3f
MD
1147 * vm_page_unmanage()
1148 *
1149 * Prevent PV management from being done on the page. The page is
1150 * removed from the paging queues as if it were wired, and as a
1151 * consequence of no longer being managed the pageout daemon will not
1152 * touch it (since there is no way to locate the pte mappings for the
1153 * page). madvise() calls that mess with the pmap will also no longer
1154 * operate on the page.
1155 *
1156 * Beyond that the page is still reasonably 'normal'. Freeing the page
1157 * will clear the flag.
1158 *
1159 * This routine is used by OBJT_PHYS objects - objects using unswappable
1160 * physical memory as backing store rather then swap-backed memory and
1161 * will eventually be extended to support 4MB unmanaged physical
1162 * mappings.
654a39f0
MD
1163 *
1164 * Must be called with a critical section held.
573fb415 1165 * Must be called with vm_token held.
984263bc 1166 */
984263bc
MD
1167void
1168vm_page_unmanage(vm_page_t m)
1169{
654a39f0 1170 ASSERT_IN_CRIT_SECTION();
573fb415 1171 ASSERT_LWKT_TOKEN_HELD(&vm_token);
984263bc
MD
1172 if ((m->flags & PG_UNMANAGED) == 0) {
1173 if (m->wire_count == 0)
1174 vm_page_unqueue(m);
1175 }
1176 vm_page_flag_set(m, PG_UNMANAGED);
984263bc
MD
1177}
1178
1179/*
de71fd3f
MD
1180 * Mark this page as wired down by yet another map, removing it from
1181 * paging queues as necessary.
984263bc 1182 *
de71fd3f
MD
1183 * The page queues must be locked.
1184 * This routine may not block.
984263bc
MD
1185 */
1186void
1187vm_page_wire(vm_page_t m)
1188{
984263bc
MD
1189 /*
1190 * Only bump the wire statistics if the page is not already wired,
1191 * and only unqueue the page if it is on some queue (if it is unmanaged
f2d22ebf
MD
1192 * it is already off the queues). Don't do anything with fictitious
1193 * pages because they are always wired.
984263bc 1194 */
654a39f0 1195 crit_enter();
9ad0147b 1196 lwkt_gettoken(&vm_token);
f2d22ebf
MD
1197 if ((m->flags & PG_FICTITIOUS) == 0) {
1198 if (m->wire_count == 0) {
1199 if ((m->flags & PG_UNMANAGED) == 0)
1200 vm_page_unqueue(m);
1201 vmstats.v_wire_count++;
1202 }
1203 m->wire_count++;
1204 KASSERT(m->wire_count != 0,
17cde63e 1205 ("vm_page_wire: wire_count overflow m=%p", m));
984263bc 1206 }
9ad0147b 1207 lwkt_reltoken(&vm_token);
654a39f0 1208 crit_exit();
984263bc
MD
1209}
1210
1211/*
de71fd3f
MD
1212 * Release one wiring of this page, potentially enabling it to be paged again.
1213 *
1214 * Many pages placed on the inactive queue should actually go
1215 * into the cache, but it is difficult to figure out which. What
1216 * we do instead, if the inactive target is well met, is to put
1217 * clean pages at the head of the inactive queue instead of the tail.
1218 * This will cause them to be moved to the cache more quickly and
1219 * if not actively re-referenced, freed more quickly. If we just
1220 * stick these pages at the end of the inactive queue, heavy filesystem
1221 * meta-data accesses can cause an unnecessary paging load on memory bound
1222 * processes. This optimization causes one-time-use metadata to be
1223 * reused more quickly.
1224 *
1225 * BUT, if we are in a low-memory situation we have no choice but to
1226 * put clean pages on the cache queue.
1227 *
1228 * A number of routines use vm_page_unwire() to guarantee that the page
1229 * will go into either the inactive or active queues, and will NEVER
1230 * be placed in the cache - for example, just after dirtying a page.
1231 * dirty pages in the cache are not allowed.
1232 *
1233 * The page queues must be locked.
1234 * This routine may not block.
984263bc
MD
1235 */
1236void
1237vm_page_unwire(vm_page_t m, int activate)
1238{
654a39f0 1239 crit_enter();
9ad0147b 1240 lwkt_gettoken(&vm_token);
f2d22ebf
MD
1241 if (m->flags & PG_FICTITIOUS) {
1242 /* do nothing */
1243 } else if (m->wire_count <= 0) {
1244 panic("vm_page_unwire: invalid wire count: %d", m->wire_count);
1245 } else {
1246 if (--m->wire_count == 0) {
1247 --vmstats.v_wire_count;
984263bc
MD
1248 if (m->flags & PG_UNMANAGED) {
1249 ;
1250 } else if (activate) {
f2d22ebf
MD
1251 TAILQ_INSERT_TAIL(
1252 &vm_page_queues[PQ_ACTIVE].pl, m, pageq);
984263bc
MD
1253 m->queue = PQ_ACTIVE;
1254 vm_page_queues[PQ_ACTIVE].lcnt++;
12e4aaff 1255 vmstats.v_active_count++;
984263bc
MD
1256 } else {
1257 vm_page_flag_clear(m, PG_WINATCFLS);
f2d22ebf
MD
1258 TAILQ_INSERT_TAIL(
1259 &vm_page_queues[PQ_INACTIVE].pl, m, pageq);
984263bc
MD
1260 m->queue = PQ_INACTIVE;
1261 vm_page_queues[PQ_INACTIVE].lcnt++;
12e4aaff 1262 vmstats.v_inactive_count++;
e527fb6b 1263 ++vm_swapcache_inactive_heuristic;
984263bc
MD
1264 }
1265 }
984263bc 1266 }
9ad0147b 1267 lwkt_reltoken(&vm_token);
654a39f0 1268 crit_exit();
984263bc
MD
1269}
1270
1271
1272/*
1273 * Move the specified page to the inactive queue. If the page has
1274 * any associated swap, the swap is deallocated.
1275 *
1276 * Normally athead is 0 resulting in LRU operation. athead is set
1277 * to 1 if we want this page to be 'as if it were placed in the cache',
1278 * except without unmapping it from the process address space.
1279 *
1280 * This routine may not block.
573fb415 1281 * The caller must hold vm_token.
984263bc
MD
1282 */
1283static __inline void
1284_vm_page_deactivate(vm_page_t m, int athead)
1285{
984263bc
MD
1286 /*
1287 * Ignore if already inactive.
1288 */
1289 if (m->queue == PQ_INACTIVE)
1290 return;
1291
984263bc
MD
1292 if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
1293 if ((m->queue - m->pc) == PQ_CACHE)
12e4aaff 1294 mycpu->gd_cnt.v_reactivated++;
984263bc
MD
1295 vm_page_flag_clear(m, PG_WINATCFLS);
1296 vm_page_unqueue(m);
e527fb6b
MD
1297 if (athead) {
1298 TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl,
1299 m, pageq);
1300 } else {
1301 TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl,
1302 m, pageq);
1303 ++vm_swapcache_inactive_heuristic;
1304 }
984263bc
MD
1305 m->queue = PQ_INACTIVE;
1306 vm_page_queues[PQ_INACTIVE].lcnt++;
12e4aaff 1307 vmstats.v_inactive_count++;
984263bc 1308 }
984263bc
MD
1309}
1310
573fb415
MD
1311/*
1312 * Attempt to deactivate a page.
1313 *
1314 * No requirements.
1315 */
984263bc
MD
1316void
1317vm_page_deactivate(vm_page_t m)
1318{
573fb415
MD
1319 crit_enter();
1320 lwkt_gettoken(&vm_token);
1321 _vm_page_deactivate(m, 0);
1322 lwkt_reltoken(&vm_token);
1323 crit_exit();
984263bc
MD
1324}
1325
1326/*
573fb415 1327 * Attempt to move a page to PQ_CACHE.
984263bc 1328 * Returns 0 on failure, 1 on success
573fb415
MD
1329 *
1330 * No requirements.
984263bc
MD
1331 */
1332int
1333vm_page_try_to_cache(vm_page_t m)
1334{
654a39f0 1335 crit_enter();
9ad0147b 1336 lwkt_gettoken(&vm_token);
984263bc
MD
1337 if (m->dirty || m->hold_count || m->busy || m->wire_count ||
1338 (m->flags & (PG_BUSY|PG_UNMANAGED))) {
9ad0147b 1339 lwkt_reltoken(&vm_token);
80137ef3 1340 crit_exit();
984263bc
MD
1341 return(0);
1342 }
1343 vm_page_test_dirty(m);
654a39f0 1344 if (m->dirty) {
9ad0147b 1345 lwkt_reltoken(&vm_token);
654a39f0 1346 crit_exit();
984263bc 1347 return(0);
654a39f0 1348 }
984263bc 1349 vm_page_cache(m);
9ad0147b 1350 lwkt_reltoken(&vm_token);
654a39f0 1351 crit_exit();
984263bc
MD
1352 return(1);
1353}
1354
1355/*
de71fd3f
MD
1356 * Attempt to free the page. If we cannot free it, we do nothing.
1357 * 1 is returned on success, 0 on failure.
573fb415
MD
1358 *
1359 * No requirements.
984263bc 1360 */
984263bc
MD
1361int
1362vm_page_try_to_free(vm_page_t m)
1363{
654a39f0 1364 crit_enter();
9ad0147b 1365 lwkt_gettoken(&vm_token);
984263bc
MD
1366 if (m->dirty || m->hold_count || m->busy || m->wire_count ||
1367 (m->flags & (PG_BUSY|PG_UNMANAGED))) {
9ad0147b 1368 lwkt_reltoken(&vm_token);
654a39f0 1369 crit_exit();
984263bc
MD
1370 return(0);
1371 }
1372 vm_page_test_dirty(m);
654a39f0 1373 if (m->dirty) {
9ad0147b 1374 lwkt_reltoken(&vm_token);
654a39f0 1375 crit_exit();
984263bc 1376 return(0);
654a39f0 1377 }
984263bc
MD
1378 vm_page_busy(m);
1379 vm_page_protect(m, VM_PROT_NONE);
1380 vm_page_free(m);
9ad0147b 1381 lwkt_reltoken(&vm_token);
654a39f0 1382 crit_exit();
984263bc
MD
1383 return(1);
1384}
1385
984263bc
MD
1386/*
1387 * vm_page_cache
1388 *
1389 * Put the specified page onto the page cache queue (if appropriate).
1390 *
573fb415 1391 * The caller must hold vm_token.
984263bc
MD
1392 * This routine may not block.
1393 */
1394void
1395vm_page_cache(vm_page_t m)
1396{
654a39f0 1397 ASSERT_IN_CRIT_SECTION();
573fb415 1398 ASSERT_LWKT_TOKEN_HELD(&vm_token);
984263bc 1399
2681a43c
HP
1400 if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy ||
1401 m->wire_count || m->hold_count) {
086c1d7e 1402 kprintf("vm_page_cache: attempting to cache busy/held page\n");
984263bc
MD
1403 return;
1404 }
c9ec86b3
MD
1405
1406 /*
1407 * Already in the cache (and thus not mapped)
1408 */
17cde63e
MD
1409 if ((m->queue - m->pc) == PQ_CACHE) {
1410 KKASSERT((m->flags & PG_MAPPED) == 0);
984263bc 1411 return;
17cde63e 1412 }
984263bc
MD
1413
1414 /*
c9ec86b3
MD
1415 * Caller is required to test m->dirty, but note that the act of
1416 * removing the page from its maps can cause it to become dirty
1417 * on an SMP system due to another cpu running in usermode.
984263bc 1418 */
c9ec86b3 1419 if (m->dirty) {
984263bc
MD
1420 panic("vm_page_cache: caching a dirty page, pindex: %ld",
1421 (long)m->pindex);
1422 }
c9ec86b3
MD
1423
1424 /*
1425 * Remove all pmaps and indicate that the page is not
17cde63e
MD
1426 * writeable or mapped. Our vm_page_protect() call may
1427 * have blocked (especially w/ VM_PROT_NONE), so recheck
1428 * everything.
c9ec86b3 1429 */
17cde63e 1430 vm_page_busy(m);
c9ec86b3 1431 vm_page_protect(m, VM_PROT_NONE);
17cde63e
MD
1432 vm_page_wakeup(m);
1433 if ((m->flags & (PG_BUSY|PG_UNMANAGED|PG_MAPPED)) || m->busy ||
1434 m->wire_count || m->hold_count) {
1435 /* do nothing */
1436 } else if (m->dirty) {
c9ec86b3
MD
1437 vm_page_deactivate(m);
1438 } else {
1439 vm_page_unqueue_nowakeup(m);
1440 m->queue = PQ_CACHE + m->pc;
1441 vm_page_queues[m->queue].lcnt++;
1442 TAILQ_INSERT_TAIL(&vm_page_queues[m->queue].pl, m, pageq);
1443 vmstats.v_cache_count++;
1444 vm_page_free_wakeup();
1445 }
984263bc
MD
1446}
1447
1448/*
de71fd3f
MD
1449 * vm_page_dontneed()
1450 *
1451 * Cache, deactivate, or do nothing as appropriate. This routine
1452 * is typically used by madvise() MADV_DONTNEED.
1453 *
1454 * Generally speaking we want to move the page into the cache so
1455 * it gets reused quickly. However, this can result in a silly syndrome
1456 * due to the page recycling too quickly. Small objects will not be
1457 * fully cached. On the otherhand, if we move the page to the inactive
1458 * queue we wind up with a problem whereby very large objects
1459 * unnecessarily blow away our inactive and cache queues.
1460 *
1461 * The solution is to move the pages based on a fixed weighting. We
1462 * either leave them alone, deactivate them, or move them to the cache,
1463 * where moving them to the cache has the highest weighting.
1464 * By forcing some pages into other queues we eventually force the
1465 * system to balance the queues, potentially recovering other unrelated
1466 * space from active. The idea is to not force this to happen too
1467 * often.
573fb415
MD
1468 *
1469 * No requirements.
984263bc 1470 */
984263bc
MD
1471void
1472vm_page_dontneed(vm_page_t m)
1473{
1474 static int dnweight;
1475 int dnw;
1476 int head;
1477
1478 dnw = ++dnweight;
1479
1480 /*
1481 * occassionally leave the page alone
1482 */
654a39f0 1483 crit_enter();
9ad0147b 1484 lwkt_gettoken(&vm_token);
984263bc
MD
1485 if ((dnw & 0x01F0) == 0 ||
1486 m->queue == PQ_INACTIVE ||
1487 m->queue - m->pc == PQ_CACHE
1488 ) {
1489 if (m->act_count >= ACT_INIT)
1490 --m->act_count;
9ad0147b 1491 lwkt_reltoken(&vm_token);
654a39f0 1492 crit_exit();
984263bc
MD
1493 return;
1494 }
1495
1496 if (m->dirty == 0)
1497 vm_page_test_dirty(m);
1498
1499 if (m->dirty || (dnw & 0x0070) == 0) {
1500 /*
1501 * Deactivate the page 3 times out of 32.
1502 */
1503 head = 0;
1504 } else {
1505 /*
1506 * Cache the page 28 times out of every 32. Note that
1507 * the page is deactivated instead of cached, but placed
1508 * at the head of the queue instead of the tail.
1509 */
1510 head = 1;
1511 }
1512 _vm_page_deactivate(m, head);
9ad0147b 1513 lwkt_reltoken(&vm_token);
654a39f0 1514 crit_exit();
984263bc
MD
1515}
1516
1517/*
06ecca5a
MD
1518 * Grab a page, blocking if it is busy and allocating a page if necessary.
1519 * A busy page is returned or NULL.
984263bc 1520 *
dc1fd4b3 1521 * If VM_ALLOC_RETRY is specified VM_ALLOC_NORMAL must also be specified.
06ecca5a 1522 * If VM_ALLOC_RETRY is not specified
dc1fd4b3 1523 *
06ecca5a
MD
1524 * This routine may block, but if VM_ALLOC_RETRY is not set then NULL is
1525 * always returned if we had blocked.
1526 * This routine will never return NULL if VM_ALLOC_RETRY is set.
1527 * This routine may not be called from an interrupt.
1528 * The returned page may not be entirely valid.
1529 *
1530 * This routine may be called from mainline code without spl protection and
1531 * be guarenteed a busied page associated with the object at the specified
1532 * index.
573fb415
MD
1533 *
1534 * No requirements.
984263bc
MD
1535 */
1536vm_page_t
1537vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
1538{
984263bc 1539 vm_page_t m;
654a39f0 1540 int generation;
984263bc 1541
dc1fd4b3
MD
1542 KKASSERT(allocflags &
1543 (VM_ALLOC_NORMAL|VM_ALLOC_INTERRUPT|VM_ALLOC_SYSTEM));
654a39f0 1544 crit_enter();
9ad0147b 1545 lwkt_gettoken(&vm_token);
984263bc
MD
1546retrylookup:
1547 if ((m = vm_page_lookup(object, pindex)) != NULL) {
1548 if (m->busy || (m->flags & PG_BUSY)) {
1549 generation = object->generation;
1550
984263bc
MD
1551 while ((object->generation == generation) &&
1552 (m->busy || (m->flags & PG_BUSY))) {
1553 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
377d4740 1554 tsleep(m, 0, "pgrbwt", 0);
984263bc 1555 if ((allocflags & VM_ALLOC_RETRY) == 0) {
06ecca5a
MD
1556 m = NULL;
1557 goto done;
984263bc
MD
1558 }
1559 }
984263bc
MD
1560 goto retrylookup;
1561 } else {
1562 vm_page_busy(m);
06ecca5a 1563 goto done;
984263bc
MD
1564 }
1565 }
984263bc
MD
1566 m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY);
1567 if (m == NULL) {
4ecf7cc9 1568 vm_wait(0);
984263bc 1569 if ((allocflags & VM_ALLOC_RETRY) == 0)
06ecca5a 1570 goto done;
984263bc
MD
1571 goto retrylookup;
1572 }
06ecca5a 1573done:
9ad0147b 1574 lwkt_reltoken(&vm_token);
654a39f0 1575 crit_exit();
06ecca5a 1576 return(m);
984263bc
MD
1577}
1578
1579/*
1580 * Mapping function for valid bits or for dirty bits in
1581 * a page. May not block.
1582 *
1583 * Inputs are required to range within a page.
573fb415
MD
1584 *
1585 * No requirements.
1586 * Non blocking.
984263bc 1587 */
573fb415 1588int
984263bc
MD
1589vm_page_bits(int base, int size)
1590{
1591 int first_bit;
1592 int last_bit;
1593
1594 KASSERT(
1595 base + size <= PAGE_SIZE,
1596 ("vm_page_bits: illegal base/size %d/%d", base, size)
1597 );
1598
1599 if (size == 0) /* handle degenerate case */
1600 return(0);
1601
1602 first_bit = base >> DEV_BSHIFT;
1603 last_bit = (base + size - 1) >> DEV_BSHIFT;
1604
1605 return ((2 << last_bit) - (1 << first_bit));
1606}
1607
1608/*
de71fd3f
MD
1609 * Sets portions of a page valid and clean. The arguments are expected
1610 * to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
1611 * of any partial chunks touched by the range. The invalid portion of
1612 * such chunks will be zero'd.
984263bc 1613 *
c7841cbe
MD
1614 * NOTE: When truncating a buffer vnode_pager_setsize() will automatically
1615 * align base to DEV_BSIZE so as not to mark clean a partially
1616 * truncated device block. Otherwise the dirty page status might be
1617 * lost.
1618 *
de71fd3f 1619 * This routine may not block.
984263bc 1620 *
de71fd3f 1621 * (base + size) must be less then or equal to PAGE_SIZE.
984263bc 1622 */
1a54183b
MD
1623static void
1624_vm_page_zero_valid(vm_page_t m, int base, int size)
984263bc 1625{
984263bc
MD
1626 int frag;
1627 int endoff;
1628
1629 if (size == 0) /* handle degenerate case */
1630 return;
1631
1632 /*
1633 * If the base is not DEV_BSIZE aligned and the valid
1634 * bit is clear, we have to zero out a portion of the
1635 * first block.
1636 */
1637
1638 if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
1639 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0
1640 ) {
1641 pmap_zero_page_area(
1642 VM_PAGE_TO_PHYS(m),
1643 frag,
1644 base - frag
1645 );
1646 }
1647
1648 /*
1649 * If the ending offset is not DEV_BSIZE aligned and the
1650 * valid bit is clear, we have to zero out a portion of
1651 * the last block.
1652 */
1653
1654 endoff = base + size;
1655
1656 if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
1657 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0
1658 ) {
1659 pmap_zero_page_area(
1660 VM_PAGE_TO_PHYS(m),
1661 endoff,
1662 DEV_BSIZE - (endoff & (DEV_BSIZE - 1))
1663 );
1664 }
1a54183b 1665}
984263bc 1666
1a54183b
MD
1667/*
1668 * Set valid, clear dirty bits. If validating the entire
1669 * page we can safely clear the pmap modify bit. We also
1670 * use this opportunity to clear the PG_NOSYNC flag. If a process
1671 * takes a write fault on a MAP_NOSYNC memory area the flag will
1672 * be set again.
1673 *
1674 * We set valid bits inclusive of any overlap, but we can only
1675 * clear dirty bits for DEV_BSIZE chunks that are fully within
1676 * the range.
573fb415
MD
1677 *
1678 * Page must be busied?
1679 * No other requirements.
1a54183b
MD
1680 */
1681void
1682vm_page_set_valid(vm_page_t m, int base, int size)
1683{
1684 _vm_page_zero_valid(m, base, size);
1685 m->valid |= vm_page_bits(base, size);
1686}
984263bc 1687
cb1cf930
MD
1688
1689/*
1690 * Set valid bits and clear dirty bits.
1691 *
1692 * NOTE: This function does not clear the pmap modified bit.
1693 * Also note that e.g. NFS may use a byte-granular base
1694 * and size.
573fb415
MD
1695 *
1696 * Page must be busied?
1697 * No other requirements.
cb1cf930 1698 */
1a54183b
MD
1699void
1700vm_page_set_validclean(vm_page_t m, int base, int size)
1701{
1702 int pagebits;
1703
1704 _vm_page_zero_valid(m, base, size);
984263bc
MD
1705 pagebits = vm_page_bits(base, size);
1706 m->valid |= pagebits;
984263bc
MD
1707 m->dirty &= ~pagebits;
1708 if (base == 0 && size == PAGE_SIZE) {
cb1cf930 1709 /*pmap_clear_modify(m);*/
984263bc
MD
1710 vm_page_flag_clear(m, PG_NOSYNC);
1711 }
1712}
1713
cb1cf930 1714/*
0a8aee15 1715 * Set valid & dirty. Used by buwrite()
573fb415
MD
1716 *
1717 * Page must be busied?
1718 * No other requirements.
0a8aee15
MD
1719 */
1720void
1721vm_page_set_validdirty(vm_page_t m, int base, int size)
1722{
1723 int pagebits;
1724
1725 pagebits = vm_page_bits(base, size);
1726 m->valid |= pagebits;
1727 m->dirty |= pagebits;
d89ce96a
MD
1728 if (m->object)
1729 vm_object_set_writeable_dirty(m->object);
0a8aee15
MD
1730}
1731
1732/*
cb1cf930
MD
1733 * Clear dirty bits.
1734 *
1735 * NOTE: This function does not clear the pmap modified bit.
1736 * Also note that e.g. NFS may use a byte-granular base
1737 * and size.
573fb415
MD
1738 *
1739 * Page must be busied?
1740 * No other requirements.
cb1cf930 1741 */
984263bc
MD
1742void
1743vm_page_clear_dirty(vm_page_t m, int base, int size)
1744{
1745 m->dirty &= ~vm_page_bits(base, size);
1a54183b 1746 if (base == 0 && size == PAGE_SIZE) {
cb1cf930 1747 /*pmap_clear_modify(m);*/
1a54183b
MD
1748 vm_page_flag_clear(m, PG_NOSYNC);
1749 }
984263bc
MD
1750}
1751
1752/*
17cde63e
MD
1753 * Make the page all-dirty.
1754 *
1755 * Also make sure the related object and vnode reflect the fact that the
1756 * object may now contain a dirty page.
573fb415
MD
1757 *
1758 * Page must be busied?
1759 * No other requirements.
17cde63e
MD
1760 */
1761void
1762vm_page_dirty(vm_page_t m)
1763{
1764#ifdef INVARIANTS
1765 int pqtype = m->queue - m->pc;
1766#endif
1767 KASSERT(pqtype != PQ_CACHE && pqtype != PQ_FREE,
1768 ("vm_page_dirty: page in free/cache queue!"));
1769 if (m->dirty != VM_PAGE_BITS_ALL) {
1770 m->dirty = VM_PAGE_BITS_ALL;
1771 if (m->object)
1772 vm_object_set_writeable_dirty(m->object);
1773 }
1774}
1775
1776/*
de71fd3f
MD
1777 * Invalidates DEV_BSIZE'd chunks within a page. Both the
1778 * valid and dirty bits for the effected areas are cleared.
984263bc 1779 *
573fb415
MD
1780 * Page must be busied?
1781 * Does not block.
1782 * No other requirements.
984263bc
MD
1783 */
1784void
1785vm_page_set_invalid(vm_page_t m, int base, int size)
1786{
1787 int bits;
1788
1789 bits = vm_page_bits(base, size);
1790 m->valid &= ~bits;
1791 m->dirty &= ~bits;
1792 m->object->generation++;
1793}
1794
1795/*
de71fd3f
MD
1796 * The kernel assumes that the invalid portions of a page contain
1797 * garbage, but such pages can be mapped into memory by user code.
1798 * When this occurs, we must zero out the non-valid portions of the
1799 * page so user code sees what it expects.
984263bc 1800 *
de71fd3f
MD
1801 * Pages are most often semi-valid when the end of a file is mapped
1802 * into memory and the file's size is not page aligned.
573fb415
MD
1803 *
1804 * Page must be busied?
1805 * No other requirements.
984263bc 1806 */
984263bc
MD
1807void
1808vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
1809{
1810 int b;
1811 int i;
1812
1813 /*
1814 * Scan the valid bits looking for invalid sections that
1815 * must be zerod. Invalid sub-DEV_BSIZE'd areas ( where the
1816 * valid bit may be set ) have already been zerod by
1817 * vm_page_set_validclean().
1818 */
984263bc
MD
1819 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
1820 if (i == (PAGE_SIZE / DEV_BSIZE) ||
1821 (m->valid & (1 << i))
1822 ) {
1823 if (i > b) {
1824 pmap_zero_page_area(
1825 VM_PAGE_TO_PHYS(m),
1826 b << DEV_BSHIFT,
1827 (i - b) << DEV_BSHIFT
1828 );
1829 }
1830 b = i + 1;
1831 }
1832 }
1833
1834 /*
1835 * setvalid is TRUE when we can safely set the zero'd areas
1836 * as being valid. We can do this if there are no cache consistency
1837 * issues. e.g. it is ok to do with UFS, but not ok to do with NFS.
1838 */
984263bc
MD
1839 if (setvalid)
1840 m->valid = VM_PAGE_BITS_ALL;
1841}
1842
1843/*
de71fd3f
MD
1844 * Is a (partial) page valid? Note that the case where size == 0
1845 * will return FALSE in the degenerate case where the page is entirely
1846 * invalid, and TRUE otherwise.
984263bc 1847 *
573fb415
MD
1848 * Does not block.
1849 * No other requirements.
984263bc 1850 */
984263bc
MD
1851int
1852vm_page_is_valid(vm_page_t m, int base, int size)
1853{
1854 int bits = vm_page_bits(base, size);
1855
1856 if (m->valid && ((m->valid & bits) == bits))
1857 return 1;
1858 else
1859 return 0;
1860}
1861
1862/*
1863 * update dirty bits from pmap/mmu. May not block.
573fb415
MD
1864 *
1865 * Caller must hold vm_token if non-blocking operation desired.
1866 * No other requirements.
984263bc 1867 */
984263bc
MD
1868void
1869vm_page_test_dirty(vm_page_t m)
1870{
1871 if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) {
1872 vm_page_dirty(m);
1873 }
1874}
1875
10192bae
MD
1876/*
1877 * Issue an event on a VM page. Corresponding action structures are
1878 * removed from the page's list and called.
1879 */
1880void
1881vm_page_event_internal(vm_page_t m, vm_page_event_t event)
1882{
1883 struct vm_page_action *scan, *next;
1884
1885 LIST_FOREACH_MUTABLE(scan, &m->action_list, entry, next) {
1886 if (scan->event == event) {
1887 scan->event = VMEVENT_NONE;
1888 LIST_REMOVE(scan, entry);
1889 scan->func(m, scan);
1890 }
1891 }
1892}
1893
bb6811be 1894
984263bc
MD
1895#include "opt_ddb.h"
1896#ifdef DDB
1897#include <sys/kernel.h>
1898
1899#include <ddb/ddb.h>
1900
1901DB_SHOW_COMMAND(page, vm_page_print_page_info)
1902{
12e4aaff
MD
1903 db_printf("vmstats.v_free_count: %d\n", vmstats.v_free_count);
1904 db_printf("vmstats.v_cache_count: %d\n", vmstats.v_cache_count);
1905 db_printf("vmstats.v_inactive_count: %d\n", vmstats.v_inactive_count);
1906 db_printf("vmstats.v_active_count: %d\n", vmstats.v_active_count);
1907 db_printf("vmstats.v_wire_count: %d\n", vmstats.v_wire_count);
1908 db_printf("vmstats.v_free_reserved: %d\n", vmstats.v_free_reserved);
1909 db_printf("vmstats.v_free_min: %d\n", vmstats.v_free_min);
1910 db_printf("vmstats.v_free_target: %d\n", vmstats.v_free_target);
1911 db_printf("vmstats.v_cache_min: %d\n", vmstats.v_cache_min);
1912 db_printf("vmstats.v_inactive_target: %d\n", vmstats.v_inactive_target);
984263bc
MD
1913}
1914
1915DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
1916{
1917 int i;
1918 db_printf("PQ_FREE:");
1919 for(i=0;i<PQ_L2_SIZE;i++) {
1920 db_printf(" %d", vm_page_queues[PQ_FREE + i].lcnt);
1921 }
1922 db_printf("\n");
1923
1924 db_printf("PQ_CACHE:");
1925 for(i=0;i<PQ_L2_SIZE;i++) {
1926 db_printf(" %d", vm_page_queues[PQ_CACHE + i].lcnt);
1927 }
1928 db_printf("\n");
1929
1930 db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n",
1931 vm_page_queues[PQ_ACTIVE].lcnt,
1932 vm_page_queues[PQ_INACTIVE].lcnt);
1933}
1934#endif /* DDB */