Commit | Line | Data |
---|---|---|
984263bc MD |
1 | /* |
2 | * Copyright (c) 1991, 1993 | |
3 | * The Regents of the University of California. All rights reserved. | |
0600465e | 4 | * Copyright (c) 2003-2019 The DragonFly Project. All rights reserved. |
984263bc MD |
5 | * |
6 | * This code is derived from software contributed to Berkeley by | |
7 | * The Mach Operating System project at Carnegie-Mellon University. | |
8 | * | |
bc0aa189 MD |
9 | * This code is derived from software contributed to The DragonFly Project |
10 | * by Matthew Dillon <dillon@backplane.com> | |
11 | * | |
984263bc MD |
12 | * Redistribution and use in source and binary forms, with or without |
13 | * modification, are permitted provided that the following conditions | |
14 | * are met: | |
15 | * 1. Redistributions of source code must retain the above copyright | |
16 | * notice, this list of conditions and the following disclaimer. | |
17 | * 2. Redistributions in binary form must reproduce the above copyright | |
18 | * notice, this list of conditions and the following disclaimer in the | |
19 | * documentation and/or other materials provided with the distribution. | |
2c64e990 | 20 | * 3. Neither the name of the University nor the names of its contributors |
984263bc MD |
21 | * may be used to endorse or promote products derived from this software |
22 | * without specific prior written permission. | |
23 | * | |
24 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND | |
25 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
26 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
27 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | |
28 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |
29 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |
30 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |
31 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |
32 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |
33 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |
34 | * SUCH DAMAGE. | |
35 | * | |
36 | * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93 | |
37 | * | |
38 | * | |
39 | * Copyright (c) 1987, 1990 Carnegie-Mellon University. | |
40 | * All rights reserved. | |
41 | * | |
42 | * Authors: Avadis Tevanian, Jr., Michael Wayne Young | |
43 | * | |
44 | * Permission to use, copy, modify and distribute this software and | |
45 | * its documentation is hereby granted, provided that both the copyright | |
46 | * notice and this permission notice appear in all copies of the | |
47 | * software, derivative works or modified versions, and any portions | |
48 | * thereof, and that both notices appear in supporting documentation. | |
49 | * | |
50 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
51 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND | |
52 | * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
53 | * | |
54 | * Carnegie Mellon requests users of this software to return to | |
55 | * | |
56 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
57 | * School of Computer Science | |
58 | * Carnegie Mellon University | |
59 | * Pittsburgh PA 15213-3890 | |
60 | * | |
61 | * any improvements or extensions that they make and grant Carnegie the | |
62 | * rights to redistribute these changes. | |
984263bc MD |
63 | */ |
64 | ||
65 | /* | |
bc0aa189 | 66 | * Resident memory system definitions. |
984263bc MD |
67 | */ |
68 | ||
1bd40720 MD |
69 | #ifndef _VM_VM_PAGE_H_ |
70 | #define _VM_VM_PAGE_H_ | |
984263bc | 71 | |
1bd40720 MD |
72 | #ifndef _SYS_TYPES_H_ |
73 | #include <sys/types.h> | |
74 | #endif | |
1f804340 MD |
75 | #ifndef _SYS_TREE_H_ |
76 | #include <sys/tree.h> | |
77 | #endif | |
1bd40720 MD |
78 | #ifndef _MACHINE_PMAP_H_ |
79 | #include <machine/pmap.h> | |
80 | #endif | |
81 | #ifndef _VM_PMAP_H_ | |
984263bc | 82 | #include <vm/pmap.h> |
1bd40720 | 83 | #endif |
984263bc | 84 | #include <machine/atomic.h> |
1bd40720 | 85 | |
668b1228 | 86 | #ifdef _KERNEL |
1bd40720 | 87 | |
03d6a592 MD |
88 | #ifndef _SYS_SYSTM_H_ |
89 | #include <sys/systm.h> | |
90 | #endif | |
6ba5daf8 MD |
91 | #ifndef _SYS_SPINLOCK_H_ |
92 | #include <sys/spinlock.h> | |
93 | #endif | |
984263bc | 94 | |
b2b3ffcd | 95 | #ifdef __x86_64__ |
973c11b9 MD |
96 | #include <machine/vmparam.h> |
97 | #endif | |
98 | ||
1bd40720 MD |
99 | #endif |
100 | ||
984263bc | 101 | /* |
0600465e MD |
102 | * The vm_page structure is the heart of the entire system. It's fairly |
103 | * bulky, eating 3.125% of available memory (128 bytes vs 4K page size). | |
104 | * Most normal uses of the structure, representing physical memory, uses | |
105 | * the type-stable vm_page_array[]. Device mappings exposed to mmap() | |
106 | * (such as GPUs) generally use temporary vm_page's outside of this array | |
107 | * and will be flagged FICTITIOUS. Devices which use the kernel's contig | |
108 | * memory allocator get normal pages, but for convenience the pages will | |
109 | * be temporarily flagged as FICTITIOUS. | |
e05899ce | 110 | * |
0600465e MD |
111 | * Soft-busying or Hard-busying guarantees a stable m->object, m->pindex, |
112 | * and m->valid field. A page cannot be validated or invalidated unless | |
113 | * hard-busied. | |
e05899ce | 114 | * |
0600465e | 115 | * The page must be hard-busied to make the following changes: |
e05899ce | 116 | * |
0600465e MD |
117 | * (1) Any change to m->object or m->pindex (also requires the |
118 | * related object to be exclusively locked). | |
e05899ce | 119 | * |
0600465e MD |
120 | * (2) Any transition of m->wire_count to 0 or from 0. Other |
121 | * transitions (e.g. 2->1, 1->2, etc) are allowed without | |
122 | * locks. | |
e05899ce | 123 | * |
0600465e | 124 | * (3) Any change to m->valid. |
e05899ce | 125 | * |
0600465e MD |
126 | * (4) Clearing PG_MAPPED or PG_WRITEABLE (note that because of |
127 | * this, these bits may be left lazily set until they can | |
128 | * be cleared later on. | |
e05899ce | 129 | * |
0600465e MD |
130 | * Most other fields of the vm_page can change at any time with certain |
131 | * restrictions. | |
e05899ce | 132 | * |
0600465e MD |
133 | * (1) PG_WRITEABLE and PG_MAPPED may be set with the page soft-busied |
134 | * or hard-busied. | |
135 | * | |
136 | * (2) m->dirty may be set to VM_PAGE_BITS_ALL by a page fault at | |
137 | * any time if PG_WRITEABLE is flagged. Tests of m->dirty are | |
138 | * only tentative until all writeable mappings of the page are | |
139 | * removed. This may occur unlocked. A hard-busy is required | |
140 | * if modifying m->dirty under other conditions. | |
141 | * | |
142 | * (3) PG_REFERENCED may be set at any time by the pmap code to | |
143 | * synchronized the [A]ccessed bit, if PG_MAPPED is flagged, | |
144 | * unlocked. A hard-busy is required for any other time. | |
145 | * | |
146 | * (3) hold_count can be incremented or decremented at any time, | |
147 | * including transitions to or from 0. Holding a page via | |
148 | * vm_page_hold() does NOT stop major changes from being made | |
149 | * to the page, but WILL prevent the page from being freed | |
150 | * or reallocated. If the hold is emplaced with the page in | |
151 | * a known state it can prevent the underlying data from being | |
152 | * destroyed. | |
153 | * | |
154 | * (4) Each individual flag may have a different behavior. Some flags | |
155 | * can be set or cleared at any time, some require hard-busying, | |
156 | * etc. | |
157 | * | |
158 | * Moving the page between queues (aka m->pageq and m->queue) requires | |
159 | * m->spin to be exclusively locked first, and then also the spinlock related | |
160 | * to the queue. | |
161 | * | |
162 | * (1) This is the only use that requires m->spin any more. | |
163 | * | |
164 | * (2) There is one special case and that is the pageout daemon is | |
165 | * allowed to reorder the page within the same queue while just | |
166 | * holding the queue's spin-lock. | |
167 | * | |
168 | * Please see the flags section below for flag documentation. | |
984263bc | 169 | */ |
984263bc MD |
170 | TAILQ_HEAD(pglist, vm_page); |
171 | ||
03d6a592 MD |
172 | struct vm_object; |
173 | ||
1f804340 MD |
174 | int rb_vm_page_compare(struct vm_page *, struct vm_page *); |
175 | ||
176 | struct vm_page_rb_tree; | |
bc0aa189 MD |
177 | RB_PROTOTYPE2(vm_page_rb_tree, vm_page, rb_entry, |
178 | rb_vm_page_compare, vm_pindex_t); | |
8492a2fe | 179 | RB_HEAD(vm_page_rb_tree, vm_page); |
1f804340 | 180 | |
984263bc | 181 | struct vm_page { |
0600465e | 182 | TAILQ_ENTRY(vm_page) pageq; /* vm_page_queues[] list */ |
1f804340 | 183 | RB_ENTRY(vm_page) rb_entry; /* Red-Black tree based at object */ |
6ba5daf8 | 184 | struct spinlock spin; |
a7c16d7a MD |
185 | struct md_page md; /* machine dependant stuff */ |
186 | uint32_t wire_count; /* wired down maps refs (P) */ | |
187 | uint32_t busy_count; /* soft-busy and hard-busy */ | |
188 | int hold_count; /* page hold count */ | |
189 | int ku_pagecnt; /* help kmalloc() w/oversized allocs */ | |
0600465e MD |
190 | struct vm_object *object; /* which object am I in */ |
191 | vm_pindex_t pindex; /* offset into object */ | |
6ef943a3 | 192 | vm_paddr_t phys_addr; /* physical address of page */ |
bc0aa189 MD |
193 | uint16_t queue; /* page queue index */ |
194 | uint16_t pc; /* page color */ | |
195 | uint8_t act_count; /* page usage count */ | |
196 | uint8_t pat_mode; /* hardware page attribute */ | |
197 | uint8_t valid; /* map of valid DEV_BSIZE chunks */ | |
198 | uint8_t dirty; /* map of dirty DEV_BSIZE chunks */ | |
199 | uint32_t flags; /* see below */ | |
0600465e MD |
200 | int unused01; /* available */ |
201 | /* 128 bytes */ | |
d0aa00e8 MD |
202 | #ifdef VM_PAGE_DEBUG |
203 | const char *busy_func; | |
204 | int busy_line; | |
205 | #endif | |
984263bc MD |
206 | }; |
207 | ||
bc0aa189 MD |
208 | #define PBUSY_LOCKED 0x80000000U |
209 | #define PBUSY_WANTED 0x40000000U | |
210 | #define PBUSY_SWAPINPROG 0x20000000U | |
211 | #define PBUSY_MASK 0x1FFFFFFFU | |
b12defdc | 212 | |
03d6a592 MD |
213 | #ifndef __VM_PAGE_T_DEFINED__ |
214 | #define __VM_PAGE_T_DEFINED__ | |
215 | typedef struct vm_page *vm_page_t; | |
216 | #endif | |
217 | ||
984263bc | 218 | /* |
51c99c61 MD |
219 | * Page coloring parameters. We use generous parameters designed to |
220 | * statistically spread pages over available cpu cache space. This has | |
221 | * become less important over time as cache associativity is higher | |
222 | * in modern times but we still use the core algorithm to help reduce | |
223 | * lock contention between cpus. | |
74232d8e | 224 | * |
51c99c61 | 225 | * Page coloring cannot be disabled. |
8e5d7c42 MD |
226 | * |
227 | * In today's world of many-core systems, we must be able to provide enough VM | |
228 | * page queues for each logical cpu thread to cover the L1/L2/L3 cache set | |
229 | * associativity. If we don't, the cpu caches will not be properly utilized. | |
e3c330f0 MD |
230 | * |
231 | * Using 2048 allows 8-way set-assoc with 256 logical cpus, but seems to | |
232 | * have a number of downsides when queues are assymetrically starved. | |
233 | * | |
234 | * Using 1024 allows 4-way set-assoc with 256 logical cpus, and more with | |
235 | * fewer cpus. | |
984263bc | 236 | */ |
984263bc MD |
237 | #define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */ |
238 | #define PQ_PRIME2 23 /* Prime number somewhat less than PQ_HASH_SIZE */ | |
e3c330f0 | 239 | #define PQ_L2_SIZE 1024 /* Must be enough for maximal ncpus x hw set-assoc */ |
74232d8e | 240 | #define PQ_L2_MASK (PQ_L2_SIZE - 1) |
984263bc | 241 | |
74232d8e | 242 | #define PQ_NONE 0 |
51c99c61 MD |
243 | #define PQ_FREE (1 + 0*PQ_L2_SIZE) |
244 | #define PQ_INACTIVE (1 + 1*PQ_L2_SIZE) | |
245 | #define PQ_ACTIVE (1 + 2*PQ_L2_SIZE) | |
246 | #define PQ_CACHE (1 + 3*PQ_L2_SIZE) | |
247 | #define PQ_HOLD (1 + 4*PQ_L2_SIZE) | |
248 | #define PQ_COUNT (1 + 5*PQ_L2_SIZE) | |
984263bc | 249 | |
1f804340 MD |
250 | /* |
251 | * Scan support | |
252 | */ | |
253 | struct vm_map; | |
254 | ||
255 | struct rb_vm_page_scan_info { | |
256 | vm_pindex_t start_pindex; | |
257 | vm_pindex_t end_pindex; | |
258 | int limit; | |
259 | int desired; | |
260 | int error; | |
261 | int pagerflags; | |
e674353b MD |
262 | int count; |
263 | int unused01; | |
1f804340 | 264 | vm_offset_t addr; |
530e94fc | 265 | struct vm_map_entry *entry; |
1f804340 | 266 | struct vm_object *object; |
530e94fc | 267 | struct vm_object *dest_object; |
1f804340 MD |
268 | struct vm_page *mpte; |
269 | struct pmap *pmap; | |
270 | struct vm_map *map; | |
271 | }; | |
272 | ||
273 | int rb_vm_page_scancmp(struct vm_page *, void *); | |
274 | ||
984263bc | 275 | struct vpgqueues { |
55a7854b | 276 | struct spinlock spin; |
984263bc | 277 | struct pglist pl; |
b396bb03 MD |
278 | long lcnt; |
279 | long adds; /* heuristic, add operations */ | |
5ba14d44 | 280 | int cnt_offset; /* offset into vmstats structure (int) */ |
e3c330f0 | 281 | int lastq; /* heuristic, skip empty queues */ |
b396bb03 | 282 | } __aligned(64); |
984263bc MD |
283 | |
284 | extern struct vpgqueues vm_page_queues[PQ_COUNT]; | |
285 | ||
984263bc | 286 | /* |
0600465e MD |
287 | * The m->flags field is generally categorized as follows. Unless otherwise |
288 | * noted, a flag may only be updated while the page is hard-busied. | |
289 | * | |
290 | * PG_UNQUEUED - This prevents the page from being placed on any queue. | |
291 | * | |
292 | * PG_FICTITIOUS - This indicates to the pmap subsystem that the | |
293 | * page might not be reverse-addressable via | |
294 | * PHYS_TO_VM_PAGE(). The vm_page_t might be | |
295 | * temporary and not exist in the vm_page_array[]. | |
296 | * | |
297 | * This also generally means that the pmap subsystem | |
298 | * cannot synchronize the [M]odified and [A]ccessed | |
299 | * bits with the related vm_page_t, and in fact that | |
300 | * there might not even BE a related vm_page_t. | |
301 | * | |
302 | * Unlike the old system, the new pmap subsystem is | |
303 | * able to do bulk operations on virtual address ranges | |
304 | * containing fictitious pages, and can also pick-out | |
305 | * specific fictitious pages by matching m->phys_addr | |
306 | * if you supply a fake vm_page to it. | |
307 | * | |
308 | * Fictitious pages can still be organized into vm_objects | |
309 | * if desired. | |
310 | * | |
311 | * PG_MAPPED - Indicates that the page MIGHT be mapped into a pmap. | |
312 | * If not set, guarantees that the page is not mapped. | |
313 | * | |
314 | * This bit can be set unlocked but only cleared while | |
315 | * vm_page is hard-busied. | |
316 | * | |
317 | * For FICTITIOUS pages, this bit will be set automatically | |
318 | * via a page fault (aka pmap_enter()), but must be cleared | |
319 | * manually. | |
320 | * | |
c2830aa6 MD |
321 | * PG_MAPPEDMULTI - Possibly mapped to multiple pmaps or to multiple locations |
322 | * ine one pmap. | |
323 | * | |
0600465e MD |
324 | * PG_WRITEABLE - Indicates that the page MIGHT be writeable via a pte. |
325 | * If not set, guarantees that the page is not writeable. | |
326 | * | |
327 | * This bit can be set unlocked but only cleared while | |
328 | * vm_page is hard-busied. | |
329 | * | |
330 | * For FICTITIOUS pages, this bit will be set automatically | |
331 | * via a page fault (aka pmap_enter()), but must be cleared | |
332 | * manually. | |
333 | * | |
334 | * PG_SWAPPED - Indicates that the page is backed by a swap block. | |
335 | * Any VM object type other than OBJT_DEFAULT can contain | |
336 | * swap-backed pages now. The bit may only be adjusted | |
337 | * while the page is hard-busied. | |
338 | * | |
339 | * PG_RAM - Heuristic read-ahead-marker. When I/O brings pages in, | |
340 | * this bit is set on one of them to force a page fault on | |
341 | * it to proactively read-ahead additional pages. | |
342 | * | |
343 | * Can be set or cleared at any time unlocked. | |
344 | * | |
345 | * PG_WINATCFLS - This is used to give dirty pages a second chance | |
346 | * on the inactive queue before getting flushed by | |
347 | * the pageout daemon. | |
348 | * | |
349 | * PG_REFERENCED - Indicates that the page has been accessed. If the | |
350 | * page is PG_MAPPED, this bit might not reflect the | |
351 | * actual state of the page. The pmap code synchronizes | |
352 | * the [A]ccessed bit to this flag and then clears the | |
353 | * [A]ccessed bit. | |
354 | * | |
355 | * PG_MARKER - Used by any queue-scanning code to recognize a fake | |
356 | * vm_page being used only as a scan marker. | |
357 | * | |
358 | * PG_NOTMETA - Distinguish pages representing content from pages | |
359 | * representing meta-data. | |
360 | * | |
361 | * PG_NEED_COMMIT - May only be modified while the page is hard-busied. | |
362 | * Indicates that even if the page might not appear to | |
363 | * be dirty, it must still be validated against some | |
364 | * remote entity (e.g. NFS) before it can be thrown away. | |
365 | * | |
366 | * PG_CLEANCHK - Used by the vm_object subsystem to detect pages that | |
367 | * might have been inserted during a scan. May be changed | |
368 | * at any time by the VM system (usually while holding the | |
369 | * related vm_object's lock). | |
984263bc | 370 | */ |
bc0aa189 MD |
371 | #define PG_UNUSED0001 0x00000001 |
372 | #define PG_UNUSED0002 0x00000002 | |
906c754c | 373 | #define PG_WINATCFLS 0x00000004 /* flush dirty page on inactive q */ |
831a8507 | 374 | #define PG_FICTITIOUS 0x00000008 /* No reverse-map or tracking */ |
e3c330f0 MD |
375 | #define PG_WRITEABLE 0x00000010 /* page may be writeable */ |
376 | #define PG_MAPPED 0x00000020 /* page may be mapped (managed) */ | |
c2830aa6 | 377 | #define PG_MAPPEDMULTI 0x00000040 /* multiple mappings */ |
906c754c MD |
378 | #define PG_REFERENCED 0x00000080 /* page has been referenced */ |
379 | #define PG_CLEANCHK 0x00000100 /* page will be checked for cleaning */ | |
bc0aa189 | 380 | #define PG_UNUSED0200 0x00000200 |
906c754c | 381 | #define PG_NOSYNC 0x00000400 /* do not collect for syncer */ |
831a8507 | 382 | #define PG_UNQUEUED 0x00000800 /* No queue management for page */ |
906c754c MD |
383 | #define PG_MARKER 0x00001000 /* special queue marker page */ |
384 | #define PG_RAM 0x00002000 /* read ahead mark */ | |
385 | #define PG_SWAPPED 0x00004000 /* backed by swap */ | |
386 | #define PG_NOTMETA 0x00008000 /* do not back with swap */ | |
31efdff0 | 387 | #define PG_UNUSED10000 0x00010000 |
bc0aa189 | 388 | #define PG_UNUSED20000 0x00020000 |
9bf025db | 389 | #define PG_NEED_COMMIT 0x00040000 /* clean page requires commit */ |
984263bc | 390 | |
bc0aa189 | 391 | #define PG_KEEP_NEWPAGE_MASK (0) |
e4b2227a | 392 | |
984263bc MD |
393 | /* |
394 | * Misc constants. | |
395 | */ | |
396 | ||
397 | #define ACT_DECLINE 1 | |
398 | #define ACT_ADVANCE 3 | |
399 | #define ACT_INIT 5 | |
400 | #define ACT_MAX 64 | |
984263bc | 401 | |
bc0aa189 MD |
402 | #ifdef VM_PAGE_DEBUG |
403 | #define VM_PAGE_DEBUG_EXT(name) name ## _debug | |
404 | #define VM_PAGE_DEBUG_ARGS , const char *func, int lineno | |
405 | #else | |
406 | #define VM_PAGE_DEBUG_EXT(name) name | |
407 | #define VM_PAGE_DEBUG_ARGS | |
408 | #endif | |
409 | ||
984263bc MD |
410 | #ifdef _KERNEL |
411 | /* | |
412 | * Each pageable resident page falls into one of four lists: | |
413 | * | |
414 | * free | |
415 | * Available for allocation now. | |
416 | * | |
417 | * The following are all LRU sorted: | |
418 | * | |
419 | * cache | |
420 | * Almost available for allocation. Still in an | |
421 | * object, but clean and immediately freeable at | |
422 | * non-interrupt times. | |
423 | * | |
424 | * inactive | |
425 | * Low activity, candidates for reclamation. | |
426 | * This is the list of pages that should be | |
427 | * paged out next. | |
428 | * | |
429 | * active | |
430 | * Pages that are "active" i.e. they have been | |
431 | * recently referenced. | |
432 | * | |
433 | * zero | |
434 | * Pages that are really free and have been pre-zeroed | |
435 | * | |
436 | */ | |
437 | ||
03d6a592 | 438 | extern struct vm_page *vm_page_array; /* First resident page in table */ |
b7ea2f3f MD |
439 | extern vm_pindex_t vm_page_array_size; /* number of vm_page_t's */ |
440 | extern vm_pindex_t first_page; /* first physical page number */ | |
984263bc | 441 | |
a441ad78 MD |
442 | #define VM_PAGE_TO_PHYS(entry) \ |
443 | ((entry)->phys_addr) | |
984263bc | 444 | |
a441ad78 MD |
445 | #define PHYS_TO_VM_PAGE(pa) \ |
446 | (&vm_page_array[atop(pa) - first_page]) | |
984263bc | 447 | |
984263bc | 448 | |
984263bc MD |
449 | #if PAGE_SIZE == 4096 |
450 | #define VM_PAGE_BITS_ALL 0xff | |
451 | #endif | |
452 | ||
dc1fd4b3 MD |
453 | /* |
454 | * Note: the code will always use nominally free pages from the free list | |
455 | * before trying other flag-specified sources. | |
456 | * | |
457 | * At least one of VM_ALLOC_NORMAL|VM_ALLOC_SYSTEM|VM_ALLOC_INTERRUPT | |
458 | * must be specified. VM_ALLOC_RETRY may only be specified if VM_ALLOC_NORMAL | |
459 | * is also specified. | |
460 | */ | |
54341a3b MD |
461 | #define VM_ALLOC_NORMAL 0x0001 /* ok to use cache pages */ |
462 | #define VM_ALLOC_SYSTEM 0x0002 /* ok to exhaust most of free list */ | |
463 | #define VM_ALLOC_INTERRUPT 0x0004 /* ok to exhaust entire free list */ | |
464 | #define VM_ALLOC_ZERO 0x0008 /* req pre-zero'd memory if avail */ | |
465 | #define VM_ALLOC_QUICK 0x0010 /* like NORMAL but do not use cache */ | |
466 | #define VM_ALLOC_FORCE_ZERO 0x0020 /* zero page even if already valid */ | |
467 | #define VM_ALLOC_NULL_OK 0x0040 /* ok to return NULL on collision */ | |
468 | #define VM_ALLOC_RETRY 0x0080 /* indefinite block (vm_page_grab()) */ | |
469 | #define VM_ALLOC_USE_GD 0x0100 /* use per-gd cache */ | |
070a58b3 MD |
470 | #define VM_ALLOC_CPU_SPEC 0x0200 |
471 | ||
472 | #define VM_ALLOC_CPU_SHIFT 16 | |
473 | #define VM_ALLOC_CPU(n) (((n) << VM_ALLOC_CPU_SHIFT) | \ | |
474 | VM_ALLOC_CPU_SPEC) | |
475 | #define VM_ALLOC_GETCPU(flags) ((flags) >> VM_ALLOC_CPU_SHIFT) | |
984263bc | 476 | |
b12defdc MD |
477 | void vm_page_queue_spin_lock(vm_page_t); |
478 | void vm_page_queues_spin_lock(u_short); | |
479 | void vm_page_and_queue_spin_lock(vm_page_t); | |
480 | ||
481 | void vm_page_queue_spin_unlock(vm_page_t); | |
482 | void vm_page_queues_spin_unlock(u_short); | |
483 | void vm_page_and_queue_spin_unlock(vm_page_t m); | |
484 | ||
9e5e1578 | 485 | void vm_page_init(vm_page_t m); |
b12defdc MD |
486 | void vm_page_io_finish(vm_page_t m); |
487 | void vm_page_io_start(vm_page_t m); | |
9bf025db MD |
488 | void vm_page_need_commit(vm_page_t m); |
489 | void vm_page_clear_commit(vm_page_t m); | |
b12defdc | 490 | void vm_page_wakeup(vm_page_t m); |
573fb415 MD |
491 | void vm_page_hold(vm_page_t); |
492 | void vm_page_unhold(vm_page_t); | |
984263bc | 493 | void vm_page_activate (vm_page_t); |
e3c330f0 | 494 | void vm_page_soft_activate (vm_page_t); |
888a40a7 MD |
495 | |
496 | vm_size_t vm_contig_avail_pages(void); | |
03d6a592 | 497 | vm_page_t vm_page_alloc (struct vm_object *, vm_pindex_t, int); |
14067db6 MD |
498 | vm_page_t vm_page_alloczwq (vm_pindex_t, int); |
499 | void vm_page_freezwq (vm_page_t m); | |
79d182b0 MD |
500 | vm_page_t vm_page_alloc_contig(vm_paddr_t low, vm_paddr_t high, |
501 | unsigned long alignment, unsigned long boundary, | |
8b9ed12e | 502 | unsigned long size, vm_memattr_t memattr); |
888a40a7 | 503 | |
03d6a592 | 504 | vm_page_t vm_page_grab (struct vm_object *, vm_pindex_t, int); |
5f910b2f | 505 | void vm_page_cache (vm_page_t); |
984263bc MD |
506 | int vm_page_try_to_cache (vm_page_t); |
507 | int vm_page_try_to_free (vm_page_t); | |
5f910b2f | 508 | void vm_page_dontneed (vm_page_t); |
984263bc | 509 | void vm_page_deactivate (vm_page_t); |
b12defdc | 510 | void vm_page_deactivate_locked (vm_page_t); |
3023924a | 511 | void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr); |
d2d8515b | 512 | int vm_page_insert (vm_page_t, struct vm_object *, vm_pindex_t); |
70f3bb08 MD |
513 | |
514 | vm_page_t vm_page_hash_get(vm_object_t object, vm_pindex_t pindex); | |
515 | ||
03d6a592 | 516 | vm_page_t vm_page_lookup (struct vm_object *, vm_pindex_t); |
bc0aa189 | 517 | vm_page_t vm_page_lookup_sbusy_try(struct vm_object *object, |
eae4df88 | 518 | vm_pindex_t pindex, int pgoff, int pgbytes); |
79d182b0 MD |
519 | vm_page_t VM_PAGE_DEBUG_EXT(vm_page_lookup_busy_wait)( |
520 | struct vm_object *, vm_pindex_t, int, const char * | |
521 | VM_PAGE_DEBUG_ARGS); | |
522 | vm_page_t VM_PAGE_DEBUG_EXT(vm_page_lookup_busy_try)( | |
523 | struct vm_object *, vm_pindex_t, int, int * | |
524 | VM_PAGE_DEBUG_ARGS); | |
984263bc | 525 | void vm_page_remove (vm_page_t); |
03d6a592 | 526 | void vm_page_rename (vm_page_t, struct vm_object *, vm_pindex_t); |
da23a592 | 527 | void vm_page_startup (void); |
c7f9edd8 | 528 | void vm_numa_organize(vm_paddr_t ran_beg, vm_paddr_t bytes, int physid); |
8e5d7c42 | 529 | void vm_numa_organize_finalize(void); |
984263bc MD |
530 | void vm_page_unwire (vm_page_t, int); |
531 | void vm_page_wire (vm_page_t); | |
532 | void vm_page_unqueue (vm_page_t); | |
533 | void vm_page_unqueue_nowakeup (vm_page_t); | |
b12defdc | 534 | vm_page_t vm_page_next (vm_page_t); |
984263bc | 535 | void vm_page_set_validclean (vm_page_t, int, int); |
0a8aee15 | 536 | void vm_page_set_validdirty (vm_page_t, int, int); |
1a54183b | 537 | void vm_page_set_valid (vm_page_t, int, int); |
984263bc MD |
538 | void vm_page_set_dirty (vm_page_t, int, int); |
539 | void vm_page_clear_dirty (vm_page_t, int, int); | |
540 | void vm_page_set_invalid (vm_page_t, int, int); | |
984263bc MD |
541 | int vm_page_is_valid (vm_page_t, int, int); |
542 | void vm_page_test_dirty (vm_page_t); | |
543 | int vm_page_bits (int, int); | |
635c9c15 | 544 | vm_page_t vm_page_list_find(int basequeue, int index); |
984263bc MD |
545 | void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid); |
546 | void vm_page_free_toq(vm_page_t m); | |
79d182b0 | 547 | void vm_page_free_contig(vm_page_t m, unsigned long size); |
bb6811be | 548 | vm_page_t vm_page_free_fromq_fast(void); |
17cde63e | 549 | void vm_page_dirty(vm_page_t m); |
b12defdc | 550 | void vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg); |
bc0aa189 | 551 | int vm_page_sbusy_try(vm_page_t m); |
9002b0d5 MD |
552 | void VM_PAGE_DEBUG_EXT(vm_page_busy_wait)(vm_page_t m, |
553 | int also_m_busy, const char *wmsg VM_PAGE_DEBUG_ARGS); | |
554 | int VM_PAGE_DEBUG_EXT(vm_page_busy_try)(vm_page_t m, | |
555 | int also_m_busy VM_PAGE_DEBUG_ARGS); | |
070a58b3 | 556 | u_short vm_get_pg_color(int cpuid, vm_object_t object, vm_pindex_t pindex); |
b12defdc MD |
557 | |
558 | #ifdef VM_PAGE_DEBUG | |
559 | ||
560 | #define vm_page_lookup_busy_wait(object, pindex, alsob, msg) \ | |
561 | vm_page_lookup_busy_wait_debug(object, pindex, alsob, msg, \ | |
562 | __func__, __LINE__) | |
563 | ||
564 | #define vm_page_lookup_busy_try(object, pindex, alsob, errorp) \ | |
565 | vm_page_lookup_busy_try_debug(object, pindex, alsob, errorp, \ | |
566 | __func__, __LINE__) | |
567 | ||
568 | #define vm_page_busy_wait(m, alsob, msg) \ | |
569 | vm_page_busy_wait_debug(m, alsob, msg, __func__, __LINE__) | |
570 | ||
571 | #define vm_page_busy_try(m, alsob) \ | |
572 | vm_page_busy_try_debug(m, alsob, __func__, __LINE__) | |
573 | ||
574 | #endif | |
d1fcdd16 | 575 | |
984263bc | 576 | #endif /* _KERNEL */ |
1bd40720 | 577 | #endif /* !_VM_VM_PAGE_H_ */ |