2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
54 * Carnegie Mellon requests users of this software to return to
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
64 * $FreeBSD: src/sys/vm/vm_page.h,v 1.75.2.8 2002/03/06 01:07:09 dillon Exp $
68 * Resident memory system definitions.
71 #ifndef _VM_VM_PAGE_H_
72 #define _VM_VM_PAGE_H_
74 #if !defined(KLD_MODULE) && defined(_KERNEL)
75 #include "opt_vmpage.h"
79 #include <sys/types.h>
84 #ifndef _MACHINE_PMAP_H_
85 #include <machine/pmap.h>
90 #include <machine/atomic.h>
95 #include <sys/systm.h>
97 #ifndef _SYS_THREAD2_H_
98 #include <sys/thread2.h>
102 #include <machine/vmparam.h>
107 typedef enum vm_page_event { VMEVENT_NONE, VMEVENT_COW } vm_page_event_t;
109 struct vm_page_action {
110 LIST_ENTRY(vm_page_action) entry;
112 vm_page_event_t event;
113 void (*func)(struct vm_page *,
114 struct vm_page_action *);
118 typedef struct vm_page_action *vm_page_action_t;
121 * Management of resident (logical) pages.
123 * A small structure is kept for each resident
124 * page, indexed by page number. Each structure
125 * is an element of several lists:
127 * A hash table bucket used to quickly
128 * perform object/offset lookups
130 * A list of all pages for a given object,
131 * so they can be quickly deactivated at
132 * time of deallocation.
134 * An ordered list of pages due for pageout.
136 * In addition, the structure contains the object
137 * and offset to which this page belongs (for pageout),
138 * and sundry status bits.
140 * Fields in this structure are locked either by the lock on the
141 * object that the page belongs to (O) or by the lock on the page
144 * The 'valid' and 'dirty' fields are distinct. A page may have dirty
145 * bits set without having associated valid bits set. This is used by
146 * NFS to implement piecemeal writes.
149 TAILQ_HEAD(pglist, vm_page);
153 int rb_vm_page_compare(struct vm_page *, struct vm_page *);
155 struct vm_page_rb_tree;
156 RB_PROTOTYPE2(vm_page_rb_tree, vm_page, rb_entry, rb_vm_page_compare, vm_pindex_t);
159 TAILQ_ENTRY(vm_page) pageq; /* vm_page_queues[] list (P) */
160 RB_ENTRY(vm_page) rb_entry; /* Red-Black tree based at object */
162 struct vm_object *object; /* which object am I in (O,P)*/
163 vm_pindex_t pindex; /* offset into object (O,P) */
164 vm_paddr_t phys_addr; /* physical address of page */
165 struct md_page md; /* machine dependant stuff */
166 u_short queue; /* page queue index */
167 u_short pc; /* page color */
168 u_char act_count; /* page usage count */
169 u_char busy; /* page busy count */
172 u_int32_t flags; /* see below */
173 u_int wire_count; /* wired down maps refs (P) */
174 int hold_count; /* page hold count */
177 * NOTE that these must support one bit per DEV_BSIZE in a page!!!
178 * so, on normal X86 kernels, they must be at least 8 bits wide.
180 u_char valid; /* map of valid DEV_BSIZE chunks */
181 u_char dirty; /* map of dirty DEV_BSIZE chunks */
183 int ku_pagecnt; /* kmalloc helper */
185 const char *busy_func;
191 #define VM_PAGE_DEBUG_EXT(name) name ## _debug
192 #define VM_PAGE_DEBUG_ARGS , const char *func, int lineno
194 #define VM_PAGE_DEBUG_EXT(name) name
195 #define VM_PAGE_DEBUG_ARGS
198 #ifndef __VM_PAGE_T_DEFINED__
199 #define __VM_PAGE_T_DEFINED__
200 typedef struct vm_page *vm_page_t;
204 * Page coloring parameters. We default to a middle of the road optimization.
205 * Larger selections would not really hurt us but if a machine does not have
206 * a lot of memory it could cause vm_page_alloc() to eat more cpu cycles
207 * looking for free pages.
209 * Page coloring cannot be disabled. Modules do not have access to most PQ
210 * constants because they can change between builds.
212 #if defined(_KERNEL) && !defined(KLD_MODULE)
214 #if !defined(PQ_CACHESIZE)
215 #define PQ_CACHESIZE 256 /* max is 1024 (MB) */
218 #if PQ_CACHESIZE >= 1024
219 #define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */
220 #define PQ_PRIME2 23 /* Prime number somewhat less than PQ_HASH_SIZE */
221 #define PQ_L2_SIZE 256 /* A number of colors opt for 1M cache */
223 #elif PQ_CACHESIZE >= 512
224 #define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */
225 #define PQ_PRIME2 23 /* Prime number somewhat less than PQ_HASH_SIZE */
226 #define PQ_L2_SIZE 128 /* A number of colors opt for 512K cache */
228 #elif PQ_CACHESIZE >= 256
229 #define PQ_PRIME1 13 /* Prime number somewhat less than PQ_HASH_SIZE */
230 #define PQ_PRIME2 7 /* Prime number somewhat less than PQ_HASH_SIZE */
231 #define PQ_L2_SIZE 64 /* A number of colors opt for 256K cache */
233 #elif PQ_CACHESIZE >= 128
234 #define PQ_PRIME1 9 /* Produces a good PQ_L2_SIZE/3 + PQ_PRIME1 */
235 #define PQ_PRIME2 5 /* Prime number somewhat less than PQ_HASH_SIZE */
236 #define PQ_L2_SIZE 32 /* A number of colors opt for 128k cache */
239 #define PQ_PRIME1 5 /* Prime number somewhat less than PQ_HASH_SIZE */
240 #define PQ_PRIME2 3 /* Prime number somewhat less than PQ_HASH_SIZE */
241 #define PQ_L2_SIZE 16 /* A reasonable number of colors (opt for 64K cache) */
245 #define PQ_L2_MASK (PQ_L2_SIZE - 1)
247 #endif /* KERNEL && !KLD_MODULE */
251 * The queue array is always based on PQ_MAXL2_SIZE regardless of the actual
252 * cache size chosen in order to present a uniform interface for modules.
254 #define PQ_MAXL2_SIZE 256 /* fixed maximum (in pages) / module compat */
256 #if PQ_L2_SIZE > PQ_MAXL2_SIZE
257 #error "Illegal PQ_L2_SIZE"
262 #define PQ_INACTIVE (1 + 1*PQ_MAXL2_SIZE)
263 #define PQ_ACTIVE (2 + 1*PQ_MAXL2_SIZE)
264 #define PQ_CACHE (3 + 1*PQ_MAXL2_SIZE)
265 #define PQ_HOLD (3 + 2*PQ_MAXL2_SIZE)
266 #define PQ_COUNT (4 + 2*PQ_MAXL2_SIZE)
273 struct rb_vm_page_scan_info {
274 vm_pindex_t start_pindex;
275 vm_pindex_t end_pindex;
281 vm_pindex_t backing_offset_index;
282 struct vm_object *object;
283 struct vm_object *backing_object;
284 struct vm_page *mpte;
289 int rb_vm_page_scancmp(struct vm_page *, void *);
295 int flipflop; /* probably not the best place */
296 struct spinlock spin;
297 char unused[64 - sizeof(struct pglist) -
298 sizeof(int *) - sizeof(int) * 2];
301 extern struct vpgqueues vm_page_queues[PQ_COUNT];
304 * These are the flags defined for vm_page.
306 * PG_UNMANAGED (used by OBJT_PHYS) indicates that the page is
307 * not under PV management but otherwise should be treated as a
308 * normal page. Pages not under PV management cannot be paged out
309 * via the object/vm_page_t because there is no knowledge of their
310 * pte mappings, nor can they be removed from their objects via
311 * the object, and such pages are also not on any PQ queue. The
312 * PG_MAPPED and PG_WRITEABLE flags are not applicable.
314 * PG_MAPPED only applies to managed pages, indicating whether the page
315 * is mapped onto one or more pmaps. A page might still be mapped to
316 * special pmaps in an unmanaged fashion, for example when mapped into a
317 * buffer cache buffer, without setting PG_MAPPED.
319 * PG_WRITEABLE indicates that there may be a writeable managed pmap entry
320 * somewhere, and that the page can be dirtied by hardware at any time
321 * and may have to be tested for that. The modified bit in unmanaged
322 * mappings or in the special clean map is not tested.
324 * PG_SWAPPED indicates that the page is backed by a swap block. Any
325 * VM object type other than OBJT_DEFAULT can have swap-backed pages now.
327 * PG_SBUSY is set when m->busy != 0. PG_SBUSY and m->busy are only modified
328 * when the page is PG_BUSY.
330 #define PG_BUSY 0x00000001 /* page is in transit (O) */
331 #define PG_WANTED 0x00000002 /* someone is waiting for page (O) */
332 #define PG_WINATCFLS 0x00000004 /* flush dirty page on inactive q */
333 #define PG_FICTITIOUS 0x00000008 /* physical page doesn't exist (O) */
334 #define PG_WRITEABLE 0x00000010 /* page is writeable */
335 #define PG_MAPPED 0x00000020 /* page is mapped (managed) */
336 #define PG_ZERO 0x00000040 /* page is zeroed */
337 #define PG_REFERENCED 0x00000080 /* page has been referenced */
338 #define PG_CLEANCHK 0x00000100 /* page will be checked for cleaning */
339 #define PG_SWAPINPROG 0x00000200 /* swap I/O in progress on page */
340 #define PG_NOSYNC 0x00000400 /* do not collect for syncer */
341 #define PG_UNMANAGED 0x00000800 /* No PV management for page */
342 #define PG_MARKER 0x00001000 /* special queue marker page */
343 #define PG_RAM 0x00002000 /* read ahead mark */
344 #define PG_SWAPPED 0x00004000 /* backed by swap */
345 #define PG_NOTMETA 0x00008000 /* do not back with swap */
346 #define PG_ACTIONLIST 0x00010000 /* lookaside action list present */
347 #define PG_SBUSY 0x00020000 /* soft-busy also set */
353 #define ACT_DECLINE 1
354 #define ACT_ADVANCE 3
360 * Each pageable resident page falls into one of four lists:
363 * Available for allocation now.
365 * The following are all LRU sorted:
368 * Almost available for allocation. Still in an
369 * object, but clean and immediately freeable at
370 * non-interrupt times.
373 * Low activity, candidates for reclamation.
374 * This is the list of pages that should be
378 * Pages that are "active" i.e. they have been
379 * recently referenced.
382 * Pages that are really free and have been pre-zeroed
386 extern int vm_page_zero_count;
387 extern struct vm_page *vm_page_array; /* First resident page in table */
388 extern int vm_page_array_size; /* number of vm_page_t's */
389 extern long first_page; /* first physical page number */
391 #define VM_PAGE_TO_PHYS(entry) \
394 #define PHYS_TO_VM_PAGE(pa) \
395 (&vm_page_array[atop(pa) - first_page])
398 * Functions implemented as macros
402 vm_page_flag_set(vm_page_t m, unsigned int bits)
404 atomic_set_int(&(m)->flags, bits);
408 vm_page_flag_clear(vm_page_t m, unsigned int bits)
410 atomic_clear_int(&(m)->flags, bits);
414 * Wakeup anyone waiting for the page after potentially unbusying
415 * (hard or soft) or doing other work on a page that might make a
416 * waiter ready. The setting of PG_WANTED is integrated into the
417 * related flags and it can't be set once the flags are already
418 * clear, so there should be no races here.
422 vm_page_flash(vm_page_t m)
424 if (m->flags & PG_WANTED) {
425 vm_page_flag_clear(m, PG_WANTED);
430 #if PAGE_SIZE == 4096
431 #define VM_PAGE_BITS_ALL 0xff
435 * Note: the code will always use nominally free pages from the free list
436 * before trying other flag-specified sources.
438 * At least one of VM_ALLOC_NORMAL|VM_ALLOC_SYSTEM|VM_ALLOC_INTERRUPT
439 * must be specified. VM_ALLOC_RETRY may only be specified if VM_ALLOC_NORMAL
442 #define VM_ALLOC_NORMAL 0x01 /* ok to use cache pages */
443 #define VM_ALLOC_SYSTEM 0x02 /* ok to exhaust most of free list */
444 #define VM_ALLOC_INTERRUPT 0x04 /* ok to exhaust entire free list */
445 #define VM_ALLOC_ZERO 0x08 /* req pre-zero'd memory if avail */
446 #define VM_ALLOC_QUICK 0x10 /* like NORMAL but do not use cache */
447 #define VM_ALLOC_FORCE_ZERO 0x20 /* zero page even if already valid */
448 #define VM_ALLOC_NULL_OK 0x40 /* ok to return NULL on collision */
449 #define VM_ALLOC_RETRY 0x80 /* indefinite block (vm_page_grab()) */
451 void vm_page_queue_spin_lock(vm_page_t);
452 void vm_page_queues_spin_lock(u_short);
453 void vm_page_and_queue_spin_lock(vm_page_t);
455 void vm_page_queue_spin_unlock(vm_page_t);
456 void vm_page_queues_spin_unlock(u_short);
457 void vm_page_and_queue_spin_unlock(vm_page_t m);
459 void vm_page_io_finish(vm_page_t m);
460 void vm_page_io_start(vm_page_t m);
461 void vm_page_wakeup(vm_page_t m);
462 void vm_page_hold(vm_page_t);
463 void vm_page_unhold(vm_page_t);
464 void vm_page_activate (vm_page_t);
465 vm_page_t vm_page_alloc (struct vm_object *, vm_pindex_t, int);
466 vm_page_t vm_page_grab (struct vm_object *, vm_pindex_t, int);
467 void vm_page_cache (vm_page_t);
468 int vm_page_try_to_cache (vm_page_t);
469 int vm_page_try_to_free (vm_page_t);
470 void vm_page_dontneed (vm_page_t);
471 void vm_page_deactivate (vm_page_t);
472 void vm_page_deactivate_locked (vm_page_t);
473 int vm_page_insert (vm_page_t, struct vm_object *, vm_pindex_t);
474 vm_page_t vm_page_lookup (struct vm_object *, vm_pindex_t);
475 vm_page_t VM_PAGE_DEBUG_EXT(vm_page_lookup_busy_wait)(struct vm_object *, vm_pindex_t,
476 int, const char * VM_PAGE_DEBUG_ARGS);
477 vm_page_t VM_PAGE_DEBUG_EXT(vm_page_lookup_busy_try)(struct vm_object *, vm_pindex_t, int, int * VM_PAGE_DEBUG_ARGS);
478 void vm_page_remove (vm_page_t);
479 void vm_page_rename (vm_page_t, struct vm_object *, vm_pindex_t);
480 void vm_page_startup (void);
481 void vm_page_unmanage (vm_page_t);
482 void vm_page_unwire (vm_page_t, int);
483 void vm_page_wire (vm_page_t);
484 void vm_page_unqueue (vm_page_t);
485 void vm_page_unqueue_nowakeup (vm_page_t);
486 vm_page_t vm_page_next (vm_page_t);
487 void vm_page_set_validclean (vm_page_t, int, int);
488 void vm_page_set_validdirty (vm_page_t, int, int);
489 void vm_page_set_valid (vm_page_t, int, int);
490 void vm_page_set_dirty (vm_page_t, int, int);
491 void vm_page_clear_dirty (vm_page_t, int, int);
492 void vm_page_set_invalid (vm_page_t, int, int);
493 int vm_page_is_valid (vm_page_t, int, int);
494 void vm_page_test_dirty (vm_page_t);
495 int vm_page_bits (int, int);
496 vm_page_t vm_page_list_find(int basequeue, int index, boolean_t prefer_zero);
497 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
498 void vm_page_free_toq(vm_page_t m);
499 vm_page_t vm_page_free_fromq_fast(void);
500 void vm_page_event_internal(vm_page_t, vm_page_event_t);
501 void vm_page_dirty(vm_page_t m);
502 void vm_page_register_action(vm_page_action_t action, vm_page_event_t event);
503 void vm_page_unregister_action(vm_page_action_t action);
504 void vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg);
505 void VM_PAGE_DEBUG_EXT(vm_page_busy_wait)(vm_page_t m, int also_m_busy, const char *wmsg VM_PAGE_DEBUG_ARGS);
506 int VM_PAGE_DEBUG_EXT(vm_page_busy_try)(vm_page_t m, int also_m_busy VM_PAGE_DEBUG_ARGS);
510 #define vm_page_lookup_busy_wait(object, pindex, alsob, msg) \
511 vm_page_lookup_busy_wait_debug(object, pindex, alsob, msg, \
514 #define vm_page_lookup_busy_try(object, pindex, alsob, errorp) \
515 vm_page_lookup_busy_try_debug(object, pindex, alsob, errorp, \
518 #define vm_page_busy_wait(m, alsob, msg) \
519 vm_page_busy_wait_debug(m, alsob, msg, __func__, __LINE__)
521 #define vm_page_busy_try(m, alsob) \
522 vm_page_busy_try_debug(m, alsob, __func__, __LINE__)
527 * Reduce the protection of a page. This routine never raises the
528 * protection and therefore can be safely called if the page is already
529 * at VM_PROT_NONE (it will be a NOP effectively ).
531 * VM_PROT_NONE will remove all user mappings of a page. This is often
532 * necessary when a page changes state (for example, turns into a copy-on-write
533 * page or needs to be frozen for write I/O) in order to force a fault, or
534 * to force a page's dirty bits to be synchronized and avoid hardware
535 * (modified/accessed) bit update races with pmap changes.
537 * Since 'prot' is usually a constant, this inline usually winds up optimizing
538 * out the primary conditional.
540 * WARNING: VM_PROT_NONE can block, but will loop until all mappings have
541 * been cleared. Callers should be aware that other page related elements
542 * might have changed, however.
545 vm_page_protect(vm_page_t m, int prot)
547 KKASSERT(m->flags & PG_BUSY);
548 if (prot == VM_PROT_NONE) {
549 if (m->flags & (PG_WRITEABLE|PG_MAPPED)) {
550 pmap_page_protect(m, VM_PROT_NONE);
551 /* PG_WRITEABLE & PG_MAPPED cleared by call */
553 } else if ((prot == VM_PROT_READ) && (m->flags & PG_WRITEABLE)) {
554 pmap_page_protect(m, VM_PROT_READ);
555 /* PG_WRITEABLE cleared by call */
560 * Zero-fill the specified page. The entire contents of the page will be
563 static __inline boolean_t
564 vm_page_zero_fill(vm_page_t m)
566 pmap_zero_page(VM_PAGE_TO_PHYS(m));
571 * Copy the contents of src_m to dest_m. The pages must be stable but spl
572 * and other protections depend on context.
575 vm_page_copy(vm_page_t src_m, vm_page_t dest_m)
577 pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
578 dest_m->valid = VM_PAGE_BITS_ALL;
579 dest_m->dirty = VM_PAGE_BITS_ALL;
583 * Free a page. The page must be marked BUSY.
585 * Always clear PG_ZERO when freeing a page, which ensures the flag is not
586 * set unless we are absolutely certain the page is zerod. This is
587 * particularly important when the vm_page_alloc*() code moves pages from
588 * PQ_CACHE to PQ_FREE.
591 vm_page_free(vm_page_t m)
593 vm_page_flag_clear(m, PG_ZERO);
598 * Free a page to the zerod-pages queue. The caller must ensure that the
599 * page has been zerod.
602 vm_page_free_zero(vm_page_t m)
605 char *p = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
608 for (i = 0; i < PAGE_SIZE; i++) {
610 panic("non-zero page in vm_page_free_zero()");
615 vm_page_flag_set(m, PG_ZERO);
620 * Set page to not be dirty. Note: does not clear pmap modify bits .
623 vm_page_undirty(vm_page_t m)
629 #endif /* !_VM_VM_PAGE_H_ */